From 9f3884814c1a8fa3dc89c973144bc25ecb51bec6 Mon Sep 17 00:00:00 2001 From: Gennady Lipenkov Date: Tue, 26 Mar 2019 15:48:37 +0300 Subject: [PATCH] add dependencies to vendor/ --- vendor/github.com/c2h5oh/datasize/LICENSE | 21 + vendor/github.com/c2h5oh/datasize/README.md | 66 + vendor/github.com/c2h5oh/datasize/datasize.go | 217 ++ vendor/github.com/ghodss/yaml/LICENSE | 50 + vendor/github.com/ghodss/yaml/README.md | 121 + vendor/github.com/ghodss/yaml/fields.go | 501 +++ vendor/github.com/ghodss/yaml/go.mod | 3 + vendor/github.com/ghodss/yaml/go.sum | 3 + vendor/github.com/ghodss/yaml/yaml.go | 326 ++ vendor/github.com/ghodss/yaml/yaml_go110.go | 14 + .../golang/protobuf/jsonpb/jsonpb.go | 1271 ++++++ .../golang/protobuf/proto/decode.go | 1 - .../golang/protobuf/proto/deprecated.go | 63 + .../github.com/golang/protobuf/proto/equal.go | 3 +- .../golang/protobuf/proto/extensions.go | 78 +- .../github.com/golang/protobuf/proto/lib.go | 38 +- .../golang/protobuf/proto/message_set.go | 137 +- .../golang/protobuf/proto/pointer_reflect.go | 5 +- .../golang/protobuf/proto/pointer_unsafe.go | 15 +- .../golang/protobuf/proto/properties.go | 31 +- .../golang/protobuf/proto/table_marshal.go | 45 +- .../golang/protobuf/proto/table_unmarshal.go | 74 +- .../golang/protobuf/ptypes/empty/empty.pb.go | 83 + .../golang/protobuf/ptypes/empty/empty.proto | 52 + .../protobuf/ptypes/struct/struct.pb.go | 336 ++ .../protobuf/ptypes/struct/struct.proto | 96 + .../protobuf/ptypes/wrappers/wrappers.pb.go | 461 +++ .../protobuf/ptypes/wrappers/wrappers.proto | 118 + .../yandex-cloud/go-genproto/LICENSE | 21 + .../go-genproto/yandex/api/operation.pb.go | 109 + .../yandex/cloud/access/access.pb.go | 560 +++ .../yandex/cloud/compute/v1/disk.pb.go | 357 ++ .../cloud/compute/v1/disk_service.pb.go | 1118 ++++++ .../yandex/cloud/compute/v1/disk_type.pb.go | 100 + .../cloud/compute/v1/disk_type_service.pb.go | 325 ++ .../yandex/cloud/compute/v1/image.pb.go | 324 ++ .../cloud/compute/v1/image_service.pb.go | 1278 ++++++ .../yandex/cloud/compute/v1/instance.pb.go | 744 ++++ .../cloud/compute/v1/instance_service.pb.go | 2766 +++++++++++++ .../yandex/cloud/compute/v1/snapshot.pb.go | 238 ++ .../cloud/compute/v1/snapshot_service.pb.go | 976 +++++ .../yandex/cloud/compute/v1/zone.pb.go | 133 + .../cloud/compute/v1/zone_service.pb.go | 324 ++ .../yandex/cloud/endpoint/api_endpoint.pb.go | 87 + .../cloud/endpoint/api_endpoint_service.pb.go | 298 ++ .../iam/v1/awscompatibility/access_key.pb.go | 129 + .../awscompatibility/access_key_service.pb.go | 565 +++ .../cloud/iam/v1/iam_token_service.pb.go | 326 ++ .../go-genproto/yandex/cloud/iam/v1/key.pb.go | 266 ++ .../yandex/cloud/iam/v1/key_service.pb.go | 624 +++ .../yandex/cloud/iam/v1/role.pb.go | 90 + .../yandex/cloud/iam/v1/role_service.pb.go | 336 ++ .../yandex/cloud/iam/v1/service_account.pb.go | 127 + .../iam/v1/service_account_service.pb.go | 1041 +++++ .../yandex/cloud/iam/v1/user_account.pb.go | 221 ++ .../cloud/iam/v1/user_account_service.pb.go | 169 + ...yandex_passport_user_account_service.pb.go | 170 + .../cloud/mdb/clickhouse/v1/backup.pb.go | 137 + .../mdb/clickhouse/v1/backup_service.pb.go | 335 ++ .../cloud/mdb/clickhouse/v1/cluster.pb.go | 1079 ++++++ .../mdb/clickhouse/v1/cluster_service.pb.go | 3437 +++++++++++++++++ .../mdb/clickhouse/v1/config/clickhouse.pb.go | 1867 +++++++++ .../cloud/mdb/clickhouse/v1/database.pb.go | 137 + .../mdb/clickhouse/v1/database_service.pb.go | 630 +++ .../mdb/clickhouse/v1/resource_preset.pb.go | 112 + .../v1/resource_preset_service.pb.go | 325 ++ .../yandex/cloud/mdb/clickhouse/v1/user.pb.go | 210 + .../mdb/clickhouse/v1/user_service.pb.go | 1099 ++++++ .../yandex/cloud/mdb/mongodb/v1/backup.pb.go | 127 + .../cloud/mdb/mongodb/v1/backup_service.pb.go | 331 ++ .../yandex/cloud/mdb/mongodb/v1/cluster.pb.go | 987 +++++ .../mdb/mongodb/v1/cluster_service.pb.go | 2712 +++++++++++++ .../mdb/mongodb/v1/config/mongodb3_6.pb.go | 600 +++ .../cloud/mdb/mongodb/v1/database.pb.go | 137 + .../mdb/mongodb/v1/database_service.pb.go | 627 +++ .../mdb/mongodb/v1/resource_preset.pb.go | 112 + .../mongodb/v1/resource_preset_service.pb.go | 324 ++ .../yandex/cloud/mdb/mongodb/v1/user.pb.go | 220 ++ .../cloud/mdb/mongodb/v1/user_service.pb.go | 1097 ++++++ .../cloud/mdb/postgresql/v1/backup.pb.go | 127 + .../mdb/postgresql/v1/backup_service.pb.go | 335 ++ .../cloud/mdb/postgresql/v1/cluster.pb.go | 1331 +++++++ .../mdb/postgresql/v1/cluster_service.pb.go | 3216 +++++++++++++++ .../mdb/postgresql/v1/config/host10.pb.go | 945 +++++ .../mdb/postgresql/v1/config/host11.pb.go | 935 +++++ .../mdb/postgresql/v1/config/host9_6.pb.go | 955 +++++ .../postgresql/v1/config/postgresql10.pb.go | 1301 +++++++ .../postgresql/v1/config/postgresql11.pb.go | 1291 +++++++ .../postgresql/v1/config/postgresql9_6.pb.go | 1312 +++++++ .../cloud/mdb/postgresql/v1/database.pb.go | 275 ++ .../mdb/postgresql/v1/database_service.pb.go | 792 ++++ .../mdb/postgresql/v1/resource_preset.pb.go | 112 + .../v1/resource_preset_service.pb.go | 324 ++ .../yandex/cloud/mdb/postgresql/v1/user.pb.go | 456 +++ .../mdb/postgresql/v1/user_service.pb.go | 1122 ++++++ .../cloud/mdb/redis/v1alpha/backup.pb.go | 128 + .../mdb/redis/v1alpha/backup_service.pb.go | 334 ++ .../cloud/mdb/redis/v1alpha/cluster.pb.go | 874 +++++ .../mdb/redis/v1alpha/cluster_service.pb.go | 2615 +++++++++++++ .../mdb/redis/v1alpha/config/redis5_0.pb.go | 240 ++ .../mdb/redis/v1alpha/resource_preset.pb.go | 112 + .../v1alpha/resource_preset_service.pb.go | 321 ++ .../yandex/cloud/operation/operation.pb.go | 274 ++ .../cloud/operation/operation_service.pb.go | 245 ++ .../cloud/resourcemanager/v1/cloud.pb.go | 114 + .../resourcemanager/v1/cloud_service.pb.go | 616 +++ .../cloud/resourcemanager/v1/folder.pb.go | 182 + .../resourcemanager/v1/folder_service.pb.go | 1071 +++++ .../yandex/cloud/validation/validation.pb.go | 178 + .../yandex/cloud/vpc/v1/network.pb.go | 139 + .../yandex/cloud/vpc/v1/network_service.pb.go | 1109 ++++++ .../yandex/cloud/vpc/v1/route_table.pb.go | 328 ++ .../cloud/vpc/v1/route_table_service.pb.go | 982 +++++ .../yandex/cloud/vpc/v1/subnet.pb.go | 193 + .../yandex/cloud/vpc/v1/subnet_service.pb.go | 1016 +++++ vendor/github.com/yandex-cloud/go-sdk/AUTHORS | 11 + .../yandex-cloud/go-sdk/CONTRIBUTING.md | 35 + vendor/github.com/yandex-cloud/go-sdk/LICENSE | 21 + .../github.com/yandex-cloud/go-sdk/README.md | 31 + .../yandex-cloud/go-sdk/credentials.go | 142 + .../yandex-cloud/go-sdk/dial/LICENSE | 202 + .../yandex-cloud/go-sdk/dial/dialer.go | 41 + .../yandex-cloud/go-sdk/dial/proxy.go | 183 + .../yandex-cloud/go-sdk/errdetails.go | 10 + .../go-sdk/gen/apiendpoint/apiendpoint.go | 38 + .../go-sdk/gen/apiendpoint/endpoint_group.go | 24 + .../go-sdk/gen/compute/compute_group.go | 49 + .../yandex-cloud/go-sdk/gen/compute/disk.go | 75 + .../go-sdk/gen/compute/disktype.go | 38 + .../yandex-cloud/go-sdk/gen/compute/image.go | 84 + .../go-sdk/gen/compute/instance.go | 138 + .../go-sdk/gen/compute/snapshot.go | 75 + .../yandex-cloud/go-sdk/gen/compute/zone.go | 38 + .../gen/iam/awscompatibility/accesskey.go | 57 + .../awscompatibility_group.go | 24 + .../go-sdk/gen/iam/iam_compatibility_group.go | 12 + .../yandex-cloud/go-sdk/gen/iam/iam_group.go | 49 + .../yandex-cloud/go-sdk/gen/iam/iamtoken.go | 29 + .../yandex-cloud/go-sdk/gen/iam/key.go | 57 + .../yandex-cloud/go-sdk/gen/iam/role.go | 38 + .../go-sdk/gen/iam/serviceaccount.go | 103 + .../go-sdk/gen/iam/useraccount.go | 29 + .../gen/iam/yandexpassportuseraccount.go | 29 + .../go-sdk/gen/mdb/clickhouse/backup.go | 38 + .../gen/mdb/clickhouse/clickhouse_group.go | 44 + .../go-sdk/gen/mdb/clickhouse/cluster.go | 201 + .../go-sdk/gen/mdb/clickhouse/database.go | 57 + .../gen/mdb/clickhouse/resourcepreset.go | 38 + .../go-sdk/gen/mdb/clickhouse/user.go | 84 + .../go-sdk/gen/mdb/mongodb/backup.go | 38 + .../go-sdk/gen/mdb/mongodb/cluster.go | 156 + .../go-sdk/gen/mdb/mongodb/database.go | 57 + .../go-sdk/gen/mdb/mongodb/mongodb_group.go | 44 + .../go-sdk/gen/mdb/mongodb/resourcepreset.go | 38 + .../go-sdk/gen/mdb/mongodb/user.go | 84 + .../go-sdk/gen/mdb/postgresql/backup.go | 38 + .../go-sdk/gen/mdb/postgresql/cluster.go | 165 + .../go-sdk/gen/mdb/postgresql/database.go | 66 + .../gen/mdb/postgresql/postgresql_group.go | 44 + .../gen/mdb/postgresql/resourcepreset.go | 38 + .../go-sdk/gen/mdb/postgresql/user.go | 84 + .../go-sdk/gen/mdb/redis/backup.go | 38 + .../go-sdk/gen/mdb/redis/cluster.go | 156 + .../go-sdk/gen/mdb/redis/redis_group.go | 34 + .../go-sdk/gen/mdb/redis/resourcepreset.go | 38 + .../go-sdk/gen/operation/operation.go | 38 + .../go-sdk/gen/operation/operation_group.go | 24 + .../go-sdk/gen/resourcemanager/cloud.go | 76 + .../go-sdk/gen/resourcemanager/folder.go | 103 + .../resourcemanager/resourcemanager_group.go | 29 + .../yandex-cloud/go-sdk/gen/vpc/network.go | 84 + .../yandex-cloud/go-sdk/gen/vpc/subnet.go | 75 + .../yandex-cloud/go-sdk/gen/vpc/vpc_group.go | 29 + .../go-sdk/iamkey/generate_proto.sh | 8 + .../yandex-cloud/go-sdk/iamkey/key.go | 133 + .../yandex-cloud/go-sdk/iamkey/key.pb.go | 245 ++ .../yandex-cloud/go-sdk/iamkey/key.proto | 42 + vendor/github.com/yandex-cloud/go-sdk/mdb.go | 38 + .../go-sdk/operation/operation.go | 187 + .../yandex-cloud/go-sdk/operation/utils.go | 23 + .../go-sdk/pkg/grpcclient/conn_context.go | 169 + .../go-sdk/pkg/requestid/interceptor.go | 108 + .../go-sdk/pkg/sdkerrors/message.go | 35 + .../go-sdk/pkg/sdkerrors/multierr.go | 79 + .../go-sdk/pkg/singleflight/LICENSE | 191 + .../go-sdk/pkg/singleflight/singleflight.go | 101 + .../yandex-cloud/go-sdk/resolver.go | 17 + .../yandex-cloud/go-sdk/rpc_credentials.go | 121 + vendor/github.com/yandex-cloud/go-sdk/sdk.go | 276 ++ .../rpc/errdetails/error_details.pb.go | 765 ++++ .../protobuf/field_mask/field_mask.pb.go | 280 ++ 191 files changed, 66866 insertions(+), 245 deletions(-) create mode 100644 vendor/github.com/c2h5oh/datasize/LICENSE create mode 100644 vendor/github.com/c2h5oh/datasize/README.md create mode 100644 vendor/github.com/c2h5oh/datasize/datasize.go create mode 100644 vendor/github.com/ghodss/yaml/LICENSE create mode 100644 vendor/github.com/ghodss/yaml/README.md create mode 100644 vendor/github.com/ghodss/yaml/fields.go create mode 100644 vendor/github.com/ghodss/yaml/go.mod create mode 100644 vendor/github.com/ghodss/yaml/go.sum create mode 100644 vendor/github.com/ghodss/yaml/yaml.go create mode 100644 vendor/github.com/ghodss/yaml/yaml_go110.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb.go create mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto create mode 100644 vendor/github.com/yandex-cloud/go-genproto/LICENSE create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/api/operation.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/access/access.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk_type.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk_type_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/image.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/image_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/instance.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/instance_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/snapshot.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/snapshot_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/zone.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/zone_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/endpoint/api_endpoint.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/endpoint/api_endpoint_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/awscompatibility/access_key.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/awscompatibility/access_key_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/iam_token_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/key.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/key_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/role.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/role_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/service_account.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/service_account_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/user_account.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/user_account_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/yandex_passport_user_account_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/backup.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/backup_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/cluster.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/cluster_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/database.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/database_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/resource_preset.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/resource_preset_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/user.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/user_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/backup.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/backup_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/cluster.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/cluster_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/config/mongodb3_6.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/database.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/database_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/resource_preset.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/resource_preset_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/user.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/user_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/backup.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/backup_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/cluster.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/cluster_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/host10.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/host11.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/host9_6.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/postgresql10.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/postgresql11.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/postgresql9_6.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/database.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/database_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/resource_preset.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/resource_preset_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/user.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/user_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/backup.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/backup_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/cluster.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/cluster_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/config/redis5_0.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/resource_preset.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/resource_preset_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/operation/operation.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/operation/operation_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/cloud.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/cloud_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/folder.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/folder_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/validation/validation.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/network.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/network_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/route_table.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/route_table_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/subnet.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/subnet_service.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/AUTHORS create mode 100644 vendor/github.com/yandex-cloud/go-sdk/CONTRIBUTING.md create mode 100644 vendor/github.com/yandex-cloud/go-sdk/LICENSE create mode 100644 vendor/github.com/yandex-cloud/go-sdk/README.md create mode 100644 vendor/github.com/yandex-cloud/go-sdk/credentials.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/dial/LICENSE create mode 100644 vendor/github.com/yandex-cloud/go-sdk/dial/dialer.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/dial/proxy.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/errdetails.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/apiendpoint/apiendpoint.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/apiendpoint/endpoint_group.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/compute/compute_group.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/compute/disk.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/compute/disktype.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/compute/image.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/compute/instance.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/compute/snapshot.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/compute/zone.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/iam/awscompatibility/accesskey.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/iam/awscompatibility/awscompatibility_group.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/iam/iam_compatibility_group.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/iam/iam_group.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/iam/iamtoken.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/iam/key.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/iam/role.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/iam/serviceaccount.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/iam/useraccount.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/iam/yandexpassportuseraccount.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/backup.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/clickhouse_group.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/cluster.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/database.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/resourcepreset.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/user.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/backup.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/cluster.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/database.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/mongodb_group.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/resourcepreset.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/user.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/backup.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/cluster.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/database.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/postgresql_group.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/resourcepreset.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/user.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/backup.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/cluster.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/redis_group.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/resourcepreset.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/operation/operation.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/operation/operation_group.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/resourcemanager/cloud.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/resourcemanager/folder.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/resourcemanager/resourcemanager_group.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/vpc/network.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/vpc/subnet.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/gen/vpc/vpc_group.go create mode 100755 vendor/github.com/yandex-cloud/go-sdk/iamkey/generate_proto.sh create mode 100644 vendor/github.com/yandex-cloud/go-sdk/iamkey/key.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/iamkey/key.pb.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/iamkey/key.proto create mode 100644 vendor/github.com/yandex-cloud/go-sdk/mdb.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/operation/operation.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/operation/utils.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/pkg/grpcclient/conn_context.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/pkg/requestid/interceptor.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/pkg/sdkerrors/message.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/pkg/sdkerrors/multierr.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/pkg/singleflight/LICENSE create mode 100644 vendor/github.com/yandex-cloud/go-sdk/pkg/singleflight/singleflight.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/resolver.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/rpc_credentials.go create mode 100644 vendor/github.com/yandex-cloud/go-sdk/sdk.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go create mode 100644 vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go diff --git a/vendor/github.com/c2h5oh/datasize/LICENSE b/vendor/github.com/c2h5oh/datasize/LICENSE new file mode 100644 index 000000000..f2ba916e6 --- /dev/null +++ b/vendor/github.com/c2h5oh/datasize/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Maciej Lisiewski + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/c2h5oh/datasize/README.md b/vendor/github.com/c2h5oh/datasize/README.md new file mode 100644 index 000000000..d21f136b4 --- /dev/null +++ b/vendor/github.com/c2h5oh/datasize/README.md @@ -0,0 +1,66 @@ +# datasize [![Build Status](https://travis-ci.org/c2h5oh/datasize.svg?branch=master)](https://travis-ci.org/c2h5oh/datasize) + +Golang helpers for data sizes + + +### Constants +Just like `time` package provides `time.Second`, `time.Day` constants `datasize` provides: +* `datasize.B` 1 byte +* `datasize.KB` 1 kilobyte +* `datasize.MB` 1 megabyte +* `datasize.GB` 1 gigabyte +* `datasize.TB` 1 terabyte +* `datasize.PB` 1 petabyte +* `datasize.EB` 1 exabyte + +### Helpers +Just like `time` package provides `duration.Nanoseconds() uint64 `, `duration.Hours() float64` helpers `datasize` has +* `ByteSize.Bytes() uint64` +* `ByteSize.Kilobytes() float4` +* `ByteSize.Megabytes() float64` +* `ByteSize.Gigabytes() float64` +* `ByteSize.Terabytes() float64` +* `ByteSize.Petebytes() float64` +* `ByteSize.Exabytes() float64` + +Warning: see limitations at the end of this document about a possible precission loss + +### Parsing strings +`datasize.ByteSize` implements `TextUnmarshaler` interface and will automatically parse human readable strings into correct values where it is used: +* `"10 MB"` -> `10* datasize.MB` +* `"10240 g"` -> `10 * datasize.TB` +* `"2000"` -> `2000 * datasize.B` +* `"1tB"` -> `datasize.TB` +* `"5 peta"` -> `5 * datasize.PB` +* `"28 kilobytes"` -> `28 * datasize.KB` +* `"1 gigabyte"` -> `1 * datasize.GB` + +You can also do it manually: +```go +var v datasize.ByteSize +err := v.UnmarshalText([]byte("100 mb")) +``` + +### Printing +`Bytesize.String()` uses largest unit allowing an integer value: + * `(102400 * datasize.MB).String()` -> `"100GB"` + * `(datasize.MB + datasize.KB).String()` -> `"1025KB"` + +Use `%d` format string to get value in bytes without a unit + +### JSON and other encoding +Both `TextMarshaler` and `TextUnmarshaler` interfaces are implemented - JSON will just work. Other encoders will work provided they use those interfaces. + +### Human readable +`ByteSize.HumanReadable()` or `ByteSize.HR()` returns a string with 1-3 digits, followed by 1 decimal place, a space and unit big enough to get 1-3 digits + + * `(102400 * datasize.MB).String()` -> `"100.0 GB"` + * `(datasize.MB + 512 * datasize.KB).String()` -> `"1.5 MB"` + +### Limitations +* The underlying data type for `data.ByteSize` is `uint64`, so values outside of 0 to 2^64-1 range will overflow +* size helper functions (like `ByteSize.Kilobytes()`) return `float64`, which can't represent all possible values of `uint64` accurately: + * if the returned value is supposed to have no fraction (ie `(10 * datasize.MB).Kilobytes()`) accuracy loss happens when value is more than 2^53 larger than unit: `.Kilobytes()` over 8 petabytes, `.Megabytes()` over 8 exabytes + * if the returned value is supposed to have a fraction (ie `(datasize.PB + datasize.B).Megabytes()`) in addition to the above note accuracy loss may occur in fractional part too - larger integer part leaves fewer bytes to store fractional part, the smaller the remainder vs unit the move bytes are required to store the fractional part +* Parsing a string with `Mb`, `Tb`, etc units will return a syntax error, because capital followed by lower case is commonly used for bits, not bytes +* Parsing a string with value exceeding 2^64-1 bytes will return 2^64-1 and an out of range error diff --git a/vendor/github.com/c2h5oh/datasize/datasize.go b/vendor/github.com/c2h5oh/datasize/datasize.go new file mode 100644 index 000000000..675478816 --- /dev/null +++ b/vendor/github.com/c2h5oh/datasize/datasize.go @@ -0,0 +1,217 @@ +package datasize + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +type ByteSize uint64 + +const ( + B ByteSize = 1 + KB = B << 10 + MB = KB << 10 + GB = MB << 10 + TB = GB << 10 + PB = TB << 10 + EB = PB << 10 + + fnUnmarshalText string = "UnmarshalText" + maxUint64 uint64 = (1 << 64) - 1 + cutoff uint64 = maxUint64 / 10 +) + +var ErrBits = errors.New("unit with capital unit prefix and lower case unit (b) - bits, not bytes ") + +func (b ByteSize) Bytes() uint64 { + return uint64(b) +} + +func (b ByteSize) KBytes() float64 { + v := b / KB + r := b % KB + return float64(v) + float64(r)/float64(KB) +} + +func (b ByteSize) MBytes() float64 { + v := b / MB + r := b % MB + return float64(v) + float64(r)/float64(MB) +} + +func (b ByteSize) GBytes() float64 { + v := b / GB + r := b % GB + return float64(v) + float64(r)/float64(GB) +} + +func (b ByteSize) TBytes() float64 { + v := b / TB + r := b % TB + return float64(v) + float64(r)/float64(TB) +} + +func (b ByteSize) PBytes() float64 { + v := b / PB + r := b % PB + return float64(v) + float64(r)/float64(PB) +} + +func (b ByteSize) EBytes() float64 { + v := b / EB + r := b % EB + return float64(v) + float64(r)/float64(EB) +} + +func (b ByteSize) String() string { + switch { + case b == 0: + return fmt.Sprint("0B") + case b%EB == 0: + return fmt.Sprintf("%dEB", b/EB) + case b%PB == 0: + return fmt.Sprintf("%dPB", b/PB) + case b%TB == 0: + return fmt.Sprintf("%dTB", b/TB) + case b%GB == 0: + return fmt.Sprintf("%dGB", b/GB) + case b%MB == 0: + return fmt.Sprintf("%dMB", b/MB) + case b%KB == 0: + return fmt.Sprintf("%dKB", b/KB) + default: + return fmt.Sprintf("%dB", b) + } +} + +func (b ByteSize) HR() string { + return b.HumanReadable() +} + +func (b ByteSize) HumanReadable() string { + switch { + case b > EB: + return fmt.Sprintf("%.1f EB", b.EBytes()) + case b > PB: + return fmt.Sprintf("%.1f PB", b.PBytes()) + case b > TB: + return fmt.Sprintf("%.1f TB", b.TBytes()) + case b > GB: + return fmt.Sprintf("%.1f GB", b.GBytes()) + case b > MB: + return fmt.Sprintf("%.1f MB", b.MBytes()) + case b > KB: + return fmt.Sprintf("%.1f KB", b.KBytes()) + default: + return fmt.Sprintf("%d B", b) + } +} + +func (b ByteSize) MarshalText() ([]byte, error) { + return []byte(b.String()), nil +} + +func (b *ByteSize) UnmarshalText(t []byte) error { + var val uint64 + var unit string + + // copy for error message + t0 := t + + var c byte + var i int + +ParseLoop: + for i < len(t) { + c = t[i] + switch { + case '0' <= c && c <= '9': + if val > cutoff { + goto Overflow + } + + c = c - '0' + val *= 10 + + if val > val+uint64(c) { + // val+v overflows + goto Overflow + } + val += uint64(c) + i++ + + default: + if i == 0 { + goto SyntaxError + } + break ParseLoop + } + } + + unit = strings.TrimSpace(string(t[i:])) + switch unit { + case "Kb", "Mb", "Gb", "Tb", "Pb", "Eb": + goto BitsError + } + unit = strings.ToLower(unit) + switch unit { + case "", "b", "byte": + // do nothing - already in bytes + + case "k", "kb", "kilo", "kilobyte", "kilobytes": + if val > maxUint64/uint64(KB) { + goto Overflow + } + val *= uint64(KB) + + case "m", "mb", "mega", "megabyte", "megabytes": + if val > maxUint64/uint64(MB) { + goto Overflow + } + val *= uint64(MB) + + case "g", "gb", "giga", "gigabyte", "gigabytes": + if val > maxUint64/uint64(GB) { + goto Overflow + } + val *= uint64(GB) + + case "t", "tb", "tera", "terabyte", "terabytes": + if val > maxUint64/uint64(TB) { + goto Overflow + } + val *= uint64(TB) + + case "p", "pb", "peta", "petabyte", "petabytes": + if val > maxUint64/uint64(PB) { + goto Overflow + } + val *= uint64(PB) + + case "E", "EB", "e", "eb", "eB": + if val > maxUint64/uint64(EB) { + goto Overflow + } + val *= uint64(EB) + + default: + goto SyntaxError + } + + *b = ByteSize(val) + return nil + +Overflow: + *b = ByteSize(maxUint64) + return &strconv.NumError{fnUnmarshalText, string(t0), strconv.ErrRange} + +SyntaxError: + *b = 0 + return &strconv.NumError{fnUnmarshalText, string(t0), strconv.ErrSyntax} + +BitsError: + *b = 0 + return &strconv.NumError{fnUnmarshalText, string(t0), ErrBits} +} diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE new file mode 100644 index 000000000..7805d36de --- /dev/null +++ b/vendor/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md new file mode 100644 index 000000000..0200f75b4 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/README.md @@ -0,0 +1,121 @@ +# YAML marshaling and unmarshaling support for Go + +[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get github.com/ghodss/yaml +``` + +And import using: + +``` +import "github.com/ghodss/yaml" +``` + +Usage is very similar to the JSON library: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + age: 30 + name: John + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err = yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go new file mode 100644 index 000000000..586007402 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -0,0 +1,501 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/ghodss/yaml/go.mod b/vendor/github.com/ghodss/yaml/go.mod new file mode 100644 index 000000000..8d9ad7b64 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/go.mod @@ -0,0 +1,3 @@ +module github.com/ghodss/yaml + +require gopkg.in/yaml.v2 v2.2.2 diff --git a/vendor/github.com/ghodss/yaml/go.sum b/vendor/github.com/ghodss/yaml/go.sum new file mode 100644 index 000000000..bd555a333 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/go.sum @@ -0,0 +1,3 @@ +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go new file mode 100644 index 000000000..dfd264d6c --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -0,0 +1,326 @@ +// Package yaml provides a wrapper around go-yaml designed to enable a better +// way of handling YAML when marshaling to and from structs. +// +// In short, this package first converts YAML to JSON using go-yaml and then +// uses json.Marshal and json.Unmarshal to convert to or from the struct. This +// means that it effectively reuses the JSON struct tags as well as the custom +// JSON methods MarshalJSON and UnmarshalJSON unlike go-yaml. +// +// See also http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang +// +package yaml // import "github.com/ghodss/yaml" + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// JSONOpt is a decoding option for decoding from JSON format. +type JSONOpt func(*json.Decoder) *json.Decoder + +// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, +// optionally configuring the behavior of the JSON unmarshal. +func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { + return unmarshal(yaml.Unmarshal, y, o, opts) +} + +// UnmarshalStrict is like Unmarshal except that any mapping keys that are +// duplicates will result in an error. +// To also be strict about unknown fields, add the DisallowUnknownFields option. +func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { + return unmarshal(yaml.UnmarshalStrict, y, o, opts) +} + +func unmarshal(f func(in []byte, out interface{}) (err error), y []byte, o interface{}, opts []JSONOpt) error { + vo := reflect.ValueOf(o) + j, err := yamlToJSON(y, &vo, f) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the +// object, optionally applying decoder options prior to decoding. We are not +// using json.Unmarshal directly as we want the chance to pass in non-default +// options. +func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { + d := json.NewDecoder(r) + for _, opt := range opts { + d = opt(d) + } + if err := d.Decode(&o); err != nil { + return fmt.Errorf("while decoding JSON: %v", err) + } + return nil +} + +// Convert JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, +// passing JSON through this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +// +// For strict decoding of YAML, use YAMLToJSONStrict. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil, yaml.Unmarshal) +} + +// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, +// returning an error on any duplicate field names. +func YAMLToJSONStrict(y []byte) ([]byte, error) { + return yamlToJSON(y, nil, yaml.UnmarshalStrict) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yamlUnmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilities happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } + + return nil, nil +} diff --git a/vendor/github.com/ghodss/yaml/yaml_go110.go b/vendor/github.com/ghodss/yaml/yaml_go110.go new file mode 100644 index 000000000..ab3e06a22 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml_go110.go @@ -0,0 +1,14 @@ +// This file contains changes that are only compatible with go 1.10 and onwards. + +// +build go1.10 + +package yaml + +import "encoding/json" + +// DisallowUnknownFields configures the JSON decoder to error out if unknown +// fields come along, instead of dropping them by default. +func DisallowUnknownFields(d *json.Decoder) *json.Decoder { + d.DisallowUnknownFields() + return d +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go new file mode 100644 index 000000000..ada2b78e8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,1271 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + stpb "github.com/golang/protobuf/ptypes/struct" +) + +const secondInNanos = int64(time.Second / time.Nanosecond) + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type wkt interface { + XXX_WellKnownType() string +} + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + if s < 0 { + ns = -ns + } + x := fmt.Sprintf("%d.%09d", s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(wkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + var err error + v = reflect.Indirect(v) + + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if wkt, ok := v.Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + isKnownEnum := enumStr != valStr + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + // TODO handle map key prop properly + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + vprop := prop + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler { + return nil + } + target.Set(reflect.New(targetType.Elem())) + + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(wkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(wkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, err := json.Marshal(jsonFields) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{})) + for k, jv := range m { + pv := &stpb.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) + } else if v, err := unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &stpb.ListValue{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &stpb.Struct{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + if targetType.Kind() != reflect.Int32 { + return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) + } + target.SetInt(int64(n)) + return nil + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays (which aren't encoded bytes) + if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + var kprop *proto.Properties + if prop != nil && prop.MapKeyProp != nil { + kprop = prop.MapKeyProp + } + if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { + return err + } + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + var vprop *proto.Properties + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := u.unmarshalValue(v, raw, vprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + } + return nil + } + + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // integers & floats can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || + targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || + targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.String: + return s[i].String() < s[j].String() + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(wkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if pm, ok := v.Interface().(proto.Message); ok { + return checkRequiredFields(pm) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index d9aa3c42d..63b0f08be 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { if b&0x80 == 0 { goto done } - // x -= 0x80 << 63 // Always zero. return 0, errOverflow diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go new file mode 100644 index 000000000..35b882c09 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index d4db5a1c1..f9b6e41b3 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { return false } - m1, m2 := e1.value, e2.value + m1 := extensionAsLegacyType(e1.value) + m2 := extensionAsLegacyType(e2.value) if m1 == nil && m2 == nil { // Both have only encoded form. diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index 816a3b9d6..fa88add30 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -185,9 +185,25 @@ type Extension struct { // extension will have only enc set. When such an extension is // accessed using GetExtension (or GetExtensions) desc and value // will be set. - desc *ExtensionDesc + desc *ExtensionDesc + + // value is a concrete value for the extension field. Let the type of + // desc.ExtensionType be the "API type" and the type of Extension.value + // be the "storage type". The API type and storage type are the same except: + // * For scalars (except []byte), the API type uses *T, + // while the storage type uses T. + // * For repeated fields, the API type uses []T, while the storage type + // uses *[]T. + // + // The reason for the divergence is so that the storage type more naturally + // matches what is expected of when retrieving the values through the + // protobuf reflection APIs. + // + // The value may only be populated if desc is also populated. value interface{} - enc []byte + + // enc is the raw bytes for the extension field. + enc []byte } // SetRawExtension is for testing only. @@ -334,7 +350,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // descriptors with the same field number. return nil, errors.New("proto: descriptor conflict") } - return e.value, nil + return extensionAsLegacyType(e.value), nil } if extension.ExtensionType == nil { @@ -349,11 +365,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // Remember the decoded version and drop the encoded version. // That way it is safe to mutate what we return. - e.value = v + e.value = extensionAsStorageType(v) e.desc = extension e.enc = nil emap[extension.Field] = e - return e.value, nil + return extensionAsLegacyType(e.value), nil } // defaultExtensionValue returns the default value for extension. @@ -488,7 +504,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension @@ -500,7 +516,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} + extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} return nil } @@ -541,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) { func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { return extensionMaps[reflect.TypeOf(pb).Elem()] } + +// extensionAsLegacyType converts an value in the storage type as the API type. +// See Extension.value. +func extensionAsLegacyType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + // Represent primitive types as a pointer to the value. + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Slice: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + } + return v +} + +// extensionAsStorageType converts an value in the API type as the storage type. +// See Extension.value. +func extensionAsStorageType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + case reflect.Slice: + // Represent slice types as a pointer to the value. + if rv.Type().Elem().Kind() != reflect.Uint8 { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + } + } + return v +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 75565cc6d..fdd328bb7 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -341,26 +341,6 @@ type Message interface { ProtoMessage() } -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; @@ -960,13 +940,19 @@ func isProto3Zero(v reflect.Value) bool { return false } -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion3 = true -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion2 = true + + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion1 = true +) // InternalMessageInfo is a type used internally by generated .pb.go files. // This type is not intended to be used by non-generated code. diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index 3b6ca41d5..f48a75676 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -36,13 +36,7 @@ package proto */ import ( - "bytes" - "encoding/json" "errors" - "fmt" - "reflect" - "sort" - "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte { return buf[i+1:] } -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - return marshalMessageSet(exts, false) -} - -// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. -func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var u marshalInfo - siz := u.sizeMessageSet(exts) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, exts, deterministic) - - case map[int32]Extension: - // This is an old-style extension map. - // Wrap it in a new-style XXX_InternalExtensions. - ie := XXX_InternalExtensions{ - p: &struct { - mu sync.Mutex - extensionMap map[int32]Extension - }{ - extensionMap: exts, - }, - } - - var u marshalInfo - siz := u.sizeMessageSet(&ie) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, &ie, deterministic) - - default: - return nil, errors.New("proto: not an extension map") - } -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { +func unmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: @@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error { } return nil } - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var mu sync.Locker - m, mu = exts.extensionsRead() - if m != nil { - // Keep the extensions map locked until we're done marshaling to prevent - // races between marshaling and unmarshaling the lazily-{en,de}coded - // values. - mu.Lock() - defer mu.Unlock() - } - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - - if i > 0 && b.Len() > 1 { - b.WriteByte(',') - } - - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index b6cad9083..94fa9194a 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -79,10 +79,13 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { +func toAddrPointer(i *interface{}, isptr, deref bool) pointer { v := reflect.ValueOf(*i) u := reflect.New(v.Type()) u.Elem().Set(v) + if deref { + u = u.Elem() + } return pointer{v: u} } diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index d55a335d9..dbfffe071 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -85,16 +85,21 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { +func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { // Super-tricky - read or get the address of data word of interface value. if isptr { // The interface is of pointer type, thus it is a direct interface. // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } else { + // The interface is not of pointer type. The data word is the pointer + // to the data. + p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} + if deref { + p.p = *(*unsafe.Pointer)(p.p) + } + return p } // valToPointer converts v to a pointer. v must be of pointer type. diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index 50b99b83a..79668ff5c 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -334,9 +334,6 @@ func GetProperties(t reflect.Type) *StructProperties { sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { - if collectStats { - stats.Chit++ - } return sprop } @@ -346,17 +343,20 @@ func GetProperties(t reflect.Type) *StructProperties { return sprop } +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } return prop } - if collectStats { - stats.Cmiss++ - } prop := new(StructProperties) // in case of recursive protos, fill this in now. @@ -391,13 +391,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { // Re-order prop.order. sort.Sort(prop) - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - _, _, _, oots = om.XXX_OneofFuncs() - + if len(oots) > 0 { // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) for _, oot := range oots { diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go index b16794496..5cb11fa95 100644 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -87,6 +87,7 @@ type marshalElemInfo struct { sizer sizer marshaler marshaler isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) + deref bool // dereference the pointer before operating on it; implies isptr } var ( @@ -320,8 +321,11 @@ func (u *marshalInfo) computeMarshalInfo() { // get oneof implementers var oneofImplementers []interface{} - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() } n := t.NumField() @@ -407,13 +411,22 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { panic("tag is not an integer") } wt := wiretype(tags[0]) + if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { + t = t.Elem() + } sizer, marshaler := typeMarshaler(t, tags, false, false) + var deref bool + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + t = reflect.PtrTo(t) + deref = true + } e = &marshalElemInfo{ wiretag: uint64(tag)<<3 | wt, tagsize: SizeVarint(uint64(tag) << 3), sizer: sizer, marshaler: marshaler, isptr: t.Kind() == reflect.Ptr, + deref: deref, } // update cache @@ -448,7 +461,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { fi.field = toField(f) - fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. fi.isPointer = true fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) @@ -476,10 +489,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI } } -type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) -} - // wiretype returns the wire encoding of the type. func wiretype(encoding string) uint64 { switch encoding { @@ -2310,8 +2319,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { for _, k := range m.MapKeys() { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) n += siz + SizeVarint(uint64(siz)) + tagsize } @@ -2329,8 +2338,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { for _, k := range keys { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value b = appendVarint(b, tag) siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) b = appendVarint(b, uint64(siz)) @@ -2399,7 +2408,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { // the last time this function was called. ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, ei.tagsize) } mu.Unlock() @@ -2434,7 +2443,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2465,7 +2474,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2510,7 +2519,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, 1) // message, tag = 3 (size=1) } mu.Unlock() @@ -2553,7 +2562,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) if !nerr.Merge(err) { return b, err @@ -2591,7 +2600,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) b = append(b, 1<<3|WireEndGroup) if !nerr.Merge(err) { @@ -2621,7 +2630,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, ei.tagsize) } return n @@ -2656,7 +2665,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go index ebf1caa56..acee2fc52 100644 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -136,7 +136,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { u.computeUnmarshalInfo() } if u.isMessageSet { - return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) + return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) } var reqMask uint64 // bitmask of required fields we've seen. var errLater error @@ -362,46 +362,48 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { } // Find any types associated with oneof fields. - // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") - if fn.IsValid() { - res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} - for i := res.Len() - 1; i >= 0; i-- { - v := res.Index(i) // interface{} - tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X - typ := tptr.Elem() // Msg_X + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break } } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } // Get extension ranges, if any. - fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") if fn.IsValid() { if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { panic("a message with extensions, but no extensions field in " + t.Name()) @@ -1948,7 +1950,7 @@ func encodeVarint(b []byte, x uint64) []byte { // If there is an error, it returns 0,0. func decodeVarint(b []byte) (uint64, int) { var x, y uint64 - if len(b) <= 0 { + if len(b) == 0 { goto bad } x = uint64(b[0]) diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 000000000..b4eb03ecc --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,83 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package empty + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_900544acb223d5b8, []int{0} +} + +func (*Empty) XXX_WellKnownType() string { return "Empty" } + +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) } + +var fileDescriptor_900544acb223d5b8 = []byte{ + // 148 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, + 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, + 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c, + 0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, + 0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, + 0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6, + 0xb7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto new file mode 100644 index 000000000..03cacd233 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto @@ -0,0 +1,52 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/empty"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 000000000..33daa73dd --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,336 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package structpb + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} + +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} + +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Struct.Unmarshal(m, b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) +} +func (m *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(m, src) +} +func (m *Struct) XXX_Size() int { + return xxx_messageInfo_Struct.Size(m) +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{1} +} + +func (*Value) XXX_WellKnownType() string { return "Value" } + +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (m *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(m, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} + +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} + +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_NumberValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_StructValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Value) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{2} +} + +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListValue.Unmarshal(m, b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) +} +func (m *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(m, src) +} +func (m *ListValue) XXX_Size() int { + return xxx_messageInfo_ListValue.Size(m) +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") +} + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) } + +var fileDescriptor_df322afd6c9fb402 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, + 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, + 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, + 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, + 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, + 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, + 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, + 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, + 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, + 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, + 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, + 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, + 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, + 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, + 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, + 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, + 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, + 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, + 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, + 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, + 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, + 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, + 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, + 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, + 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto new file mode 100644 index 000000000..7d7808e7f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 000000000..add19a1ad --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,461 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrappers + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{0} +} + +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleValue.Unmarshal(m, b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) +} +func (m *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(m, src) +} +func (m *DoubleValue) XXX_Size() int { + return xxx_messageInfo_DoubleValue.Size(m) +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{1} +} + +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatValue.Unmarshal(m, b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) +} +func (m *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(m, src) +} +func (m *FloatValue) XXX_Size() int { + return xxx_messageInfo_FloatValue.Size(m) +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{2} +} + +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int64Value.Unmarshal(m, b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) +} +func (m *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(m, src) +} +func (m *Int64Value) XXX_Size() int { + return xxx_messageInfo_Int64Value.Size(m) +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{3} +} + +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt64Value.Unmarshal(m, b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) +} +func (m *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(m, src) +} +func (m *UInt64Value) XXX_Size() int { + return xxx_messageInfo_UInt64Value.Size(m) +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{4} +} + +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int32Value.Unmarshal(m, b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) +} +func (m *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(m, src) +} +func (m *Int32Value) XXX_Size() int { + return xxx_messageInfo_Int32Value.Size(m) +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{5} +} + +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt32Value.Unmarshal(m, b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) +} +func (m *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(m, src) +} +func (m *UInt32Value) XXX_Size() int { + return xxx_messageInfo_UInt32Value.Size(m) +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{6} +} + +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoolValue.Unmarshal(m, b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) +} +func (m *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(m, src) +} +func (m *BoolValue) XXX_Size() int { + return xxx_messageInfo_BoolValue.Size(m) +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{7} +} + +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringValue.Unmarshal(m, b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) +} +func (m *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(m, src) +} +func (m *StringValue) XXX_Size() int { + return xxx_messageInfo_StringValue.Size(m) +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{8} +} + +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BytesValue.Unmarshal(m, b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) +} +func (m *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(m, src) +} +func (m *BytesValue) XXX_Size() int { + return xxx_messageInfo_BytesValue.Size(m) +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } + +var fileDescriptor_5377b62bda767935 = []byte{ + // 259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, + 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, + 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, + 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, + 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, + 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, + 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, + 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto new file mode 100644 index 000000000..01947639a --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/wrappers"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/LICENSE b/vendor/github.com/yandex-cloud/go-genproto/LICENSE new file mode 100644 index 000000000..0cd74fabf --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 YANDEX LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/api/operation.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/api/operation.pb.go new file mode 100644 index 000000000..e066b63e0 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/api/operation.pb.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/api/operation.proto + +package api // import "github.com/yandex-cloud/go-genproto/yandex/api" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Operation is annotation for rpc that returns longrunning operation, describes +// message types that will be returned in metadata [google.protobuf.Any], and +// in response [google.protobuf.Any] (for successful operation). +type Operation struct { + // Optional. If present, rpc returns operation which metadata field will + // contains message of specified type. + Metadata string `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Required. rpc returns operation, in case of success response will contains message of + // specified field. + Response string `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { + return fileDescriptor_operation_743b45b46a739ce6, []int{0} +} +func (m *Operation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Operation.Unmarshal(m, b) +} +func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Operation.Marshal(b, m, deterministic) +} +func (dst *Operation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Operation.Merge(dst, src) +} +func (m *Operation) XXX_Size() int { + return xxx_messageInfo_Operation.Size(m) +} +func (m *Operation) XXX_DiscardUnknown() { + xxx_messageInfo_Operation.DiscardUnknown(m) +} + +var xxx_messageInfo_Operation proto.InternalMessageInfo + +func (m *Operation) GetMetadata() string { + if m != nil { + return m.Metadata + } + return "" +} + +func (m *Operation) GetResponse() string { + if m != nil { + return m.Response + } + return "" +} + +var E_Operation = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 87334, + Name: "yandex.api.operation", + Tag: "bytes,87334,opt,name=operation", + Filename: "yandex/api/operation.proto", +} + +func init() { + proto.RegisterType((*Operation)(nil), "yandex.api.Operation") + proto.RegisterExtension(E_Operation) +} + +func init() { + proto.RegisterFile("yandex/api/operation.proto", fileDescriptor_operation_743b45b46a739ce6) +} + +var fileDescriptor_operation_743b45b46a739ce6 = []byte{ + // 217 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x90, 0x31, 0x4b, 0xc4, 0x40, + 0x10, 0x85, 0x89, 0xa0, 0x98, 0xb5, 0x0b, 0x08, 0x21, 0x85, 0x04, 0xab, 0x6b, 0x6e, 0x16, 0x4e, + 0x2b, 0xed, 0xb4, 0x96, 0x83, 0x03, 0x1b, 0xbb, 0x49, 0x76, 0xdc, 0x5b, 0xb8, 0xdb, 0x19, 0x76, + 0x37, 0xa0, 0x7f, 0xc8, 0xc2, 0x5f, 0x79, 0x24, 0x4b, 0x92, 0xf2, 0xcd, 0xf7, 0x78, 0xef, 0x31, + 0xaa, 0xf9, 0x45, 0x6f, 0xe8, 0x47, 0xa3, 0x38, 0xcd, 0x42, 0x01, 0x93, 0x63, 0x0f, 0x12, 0x38, + 0x71, 0xa5, 0x32, 0x03, 0x14, 0xd7, 0xb4, 0x96, 0xd9, 0x9e, 0x48, 0x4f, 0xa4, 0x1b, 0xbe, 0xb5, + 0xa1, 0xd8, 0x07, 0x27, 0x89, 0x43, 0x76, 0x3f, 0xbe, 0xab, 0x72, 0x3f, 0x07, 0x54, 0x8d, 0xba, + 0x3d, 0x53, 0x42, 0x83, 0x09, 0xeb, 0xa2, 0x2d, 0x36, 0xe5, 0x61, 0xd1, 0x23, 0x0b, 0x14, 0x85, + 0x7d, 0xa4, 0xfa, 0x2a, 0xb3, 0x59, 0xbf, 0x7c, 0xaa, 0x72, 0x59, 0x51, 0x3d, 0x40, 0x2e, 0x85, + 0xb9, 0x14, 0x3e, 0x28, 0x1d, 0xd9, 0xec, 0x65, 0xc4, 0xb1, 0xfe, 0xfb, 0xbf, 0x6e, 0x8b, 0xcd, + 0xdd, 0xee, 0x1e, 0xd6, 0xa1, 0xb0, 0x6c, 0x38, 0xac, 0x49, 0x6f, 0xcf, 0x5f, 0x3b, 0xeb, 0xd2, + 0x71, 0xe8, 0xa0, 0xe7, 0xb3, 0xce, 0xee, 0x6d, 0x7f, 0xe2, 0xc1, 0x68, 0xcb, 0x5b, 0x4b, 0x7e, + 0x6a, 0xd0, 0xeb, 0x2f, 0x5e, 0x51, 0x5c, 0x77, 0x33, 0x5d, 0x9f, 0x2e, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x47, 0x3d, 0x10, 0x6e, 0x24, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/access/access.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/access/access.pb.go new file mode 100644 index 000000000..fb2d2d642 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/access/access.pb.go @@ -0,0 +1,560 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/access/access.proto + +package access // import "github.com/yandex-cloud/go-genproto/yandex/cloud/access" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AccessBindingAction int32 + +const ( + AccessBindingAction_ACCESS_BINDING_ACTION_UNSPECIFIED AccessBindingAction = 0 + // Addition of an access binding. + AccessBindingAction_ADD AccessBindingAction = 1 + // Removal of an access binding. + AccessBindingAction_REMOVE AccessBindingAction = 2 +) + +var AccessBindingAction_name = map[int32]string{ + 0: "ACCESS_BINDING_ACTION_UNSPECIFIED", + 1: "ADD", + 2: "REMOVE", +} +var AccessBindingAction_value = map[string]int32{ + "ACCESS_BINDING_ACTION_UNSPECIFIED": 0, + "ADD": 1, + "REMOVE": 2, +} + +func (x AccessBindingAction) String() string { + return proto.EnumName(AccessBindingAction_name, int32(x)) +} +func (AccessBindingAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_access_6c04a92fd5da6f4f, []int{0} +} + +type Subject struct { + // ID of the subject. + // + // It can contain one of the following values: + // * `allAuthenticatedUsers`: A special system identifier that represents anyone + // who is authenticated. It can be used only if the [type] is `system`. + // + // * ``: An identifier that represents a user account. + // It can be used only if the [type] is `userAccount` or `serviceAccount`. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Type of the subject. + // + // It can contain one of the following values: + // * `system`: System group. This type represents several accounts with a common system identifier. + // * `userAccount`: An user account (for example, "alice.the.girl@yandex.ru"). This type represents the [yandex.cloud.iam.v1.UserAccount] resource. + // * `serviceAccount`: A service account. This type represents the [yandex.cloud.iam.v1.ServiceAccount] resource. + // + // For more information, see [Subject to which the role is assigned](/docs/iam/concepts/access-control/#subject). + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Subject) Reset() { *m = Subject{} } +func (m *Subject) String() string { return proto.CompactTextString(m) } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_access_6c04a92fd5da6f4f, []int{0} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Subject.Unmarshal(m, b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Subject.Marshal(b, m, deterministic) +} +func (dst *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(dst, src) +} +func (m *Subject) XXX_Size() int { + return xxx_messageInfo_Subject.Size(m) +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo + +func (m *Subject) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Subject) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +type AccessBinding struct { + // ID of the [yandex.cloud.iam.v1.Role] that is assigned to the [subject]. + RoleId string `protobuf:"bytes,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"` + // Identity for which access binding is being created. + // It can represent an account with a unique ID or several accounts with a system identifier. + Subject *Subject `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AccessBinding) Reset() { *m = AccessBinding{} } +func (m *AccessBinding) String() string { return proto.CompactTextString(m) } +func (*AccessBinding) ProtoMessage() {} +func (*AccessBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_access_6c04a92fd5da6f4f, []int{1} +} +func (m *AccessBinding) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AccessBinding.Unmarshal(m, b) +} +func (m *AccessBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AccessBinding.Marshal(b, m, deterministic) +} +func (dst *AccessBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_AccessBinding.Merge(dst, src) +} +func (m *AccessBinding) XXX_Size() int { + return xxx_messageInfo_AccessBinding.Size(m) +} +func (m *AccessBinding) XXX_DiscardUnknown() { + xxx_messageInfo_AccessBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_AccessBinding proto.InternalMessageInfo + +func (m *AccessBinding) GetRoleId() string { + if m != nil { + return m.RoleId + } + return "" +} + +func (m *AccessBinding) GetSubject() *Subject { + if m != nil { + return m.Subject + } + return nil +} + +type ListAccessBindingsRequest struct { + // ID of the resource to list access bindings for. + // + // To get the resource ID, use a corresponding List request. + // For example, use the [yandex.cloud.resourcemanager.v1.CloudService.List] request to get the Cloud resource ID. + ResourceId string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + // The maximum number of results per page that should be returned. If the number of available + // results is larger than [page_size], + // the service returns a [ListAccessBindingsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + // Default value: 100. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. Set [page_token] + // to the [ListAccessBindingsResponse.next_page_token] + // returned by a previous list request to get the next page of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListAccessBindingsRequest) Reset() { *m = ListAccessBindingsRequest{} } +func (m *ListAccessBindingsRequest) String() string { return proto.CompactTextString(m) } +func (*ListAccessBindingsRequest) ProtoMessage() {} +func (*ListAccessBindingsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_access_6c04a92fd5da6f4f, []int{2} +} +func (m *ListAccessBindingsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListAccessBindingsRequest.Unmarshal(m, b) +} +func (m *ListAccessBindingsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListAccessBindingsRequest.Marshal(b, m, deterministic) +} +func (dst *ListAccessBindingsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListAccessBindingsRequest.Merge(dst, src) +} +func (m *ListAccessBindingsRequest) XXX_Size() int { + return xxx_messageInfo_ListAccessBindingsRequest.Size(m) +} +func (m *ListAccessBindingsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListAccessBindingsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListAccessBindingsRequest proto.InternalMessageInfo + +func (m *ListAccessBindingsRequest) GetResourceId() string { + if m != nil { + return m.ResourceId + } + return "" +} + +func (m *ListAccessBindingsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListAccessBindingsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListAccessBindingsResponse struct { + // List of access bindings for the specified resource. + AccessBindings []*AccessBinding `protobuf:"bytes,1,rep,name=access_bindings,json=accessBindings,proto3" json:"access_bindings,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListAccessBindingsRequest.page_size], use + // the [next_page_token] as the value + // for the [ListAccessBindingsRequest.page_token] query parameter + // in the next list request. Each subsequent list request will have its own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListAccessBindingsResponse) Reset() { *m = ListAccessBindingsResponse{} } +func (m *ListAccessBindingsResponse) String() string { return proto.CompactTextString(m) } +func (*ListAccessBindingsResponse) ProtoMessage() {} +func (*ListAccessBindingsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_access_6c04a92fd5da6f4f, []int{3} +} +func (m *ListAccessBindingsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListAccessBindingsResponse.Unmarshal(m, b) +} +func (m *ListAccessBindingsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListAccessBindingsResponse.Marshal(b, m, deterministic) +} +func (dst *ListAccessBindingsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListAccessBindingsResponse.Merge(dst, src) +} +func (m *ListAccessBindingsResponse) XXX_Size() int { + return xxx_messageInfo_ListAccessBindingsResponse.Size(m) +} +func (m *ListAccessBindingsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListAccessBindingsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListAccessBindingsResponse proto.InternalMessageInfo + +func (m *ListAccessBindingsResponse) GetAccessBindings() []*AccessBinding { + if m != nil { + return m.AccessBindings + } + return nil +} + +func (m *ListAccessBindingsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type SetAccessBindingsRequest struct { + // ID of the resource for which access bindings are being set. + // + // To get the resource ID, use a corresponding List request. + ResourceId string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + // Access bindings to be set. For more information, see [Access Bindings](/docs/iam/concepts/access-control/#access-bindings). + AccessBindings []*AccessBinding `protobuf:"bytes,2,rep,name=access_bindings,json=accessBindings,proto3" json:"access_bindings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetAccessBindingsRequest) Reset() { *m = SetAccessBindingsRequest{} } +func (m *SetAccessBindingsRequest) String() string { return proto.CompactTextString(m) } +func (*SetAccessBindingsRequest) ProtoMessage() {} +func (*SetAccessBindingsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_access_6c04a92fd5da6f4f, []int{4} +} +func (m *SetAccessBindingsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetAccessBindingsRequest.Unmarshal(m, b) +} +func (m *SetAccessBindingsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetAccessBindingsRequest.Marshal(b, m, deterministic) +} +func (dst *SetAccessBindingsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetAccessBindingsRequest.Merge(dst, src) +} +func (m *SetAccessBindingsRequest) XXX_Size() int { + return xxx_messageInfo_SetAccessBindingsRequest.Size(m) +} +func (m *SetAccessBindingsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetAccessBindingsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetAccessBindingsRequest proto.InternalMessageInfo + +func (m *SetAccessBindingsRequest) GetResourceId() string { + if m != nil { + return m.ResourceId + } + return "" +} + +func (m *SetAccessBindingsRequest) GetAccessBindings() []*AccessBinding { + if m != nil { + return m.AccessBindings + } + return nil +} + +type SetAccessBindingsMetadata struct { + // ID of the resource for which access bindings are being set. + ResourceId string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetAccessBindingsMetadata) Reset() { *m = SetAccessBindingsMetadata{} } +func (m *SetAccessBindingsMetadata) String() string { return proto.CompactTextString(m) } +func (*SetAccessBindingsMetadata) ProtoMessage() {} +func (*SetAccessBindingsMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_access_6c04a92fd5da6f4f, []int{5} +} +func (m *SetAccessBindingsMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetAccessBindingsMetadata.Unmarshal(m, b) +} +func (m *SetAccessBindingsMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetAccessBindingsMetadata.Marshal(b, m, deterministic) +} +func (dst *SetAccessBindingsMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetAccessBindingsMetadata.Merge(dst, src) +} +func (m *SetAccessBindingsMetadata) XXX_Size() int { + return xxx_messageInfo_SetAccessBindingsMetadata.Size(m) +} +func (m *SetAccessBindingsMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_SetAccessBindingsMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_SetAccessBindingsMetadata proto.InternalMessageInfo + +func (m *SetAccessBindingsMetadata) GetResourceId() string { + if m != nil { + return m.ResourceId + } + return "" +} + +type UpdateAccessBindingsRequest struct { + // ID of the resource for which access bindings are being updated. + ResourceId string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + // Updates to access bindings. + AccessBindingDeltas []*AccessBindingDelta `protobuf:"bytes,2,rep,name=access_binding_deltas,json=accessBindingDeltas,proto3" json:"access_binding_deltas,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateAccessBindingsRequest) Reset() { *m = UpdateAccessBindingsRequest{} } +func (m *UpdateAccessBindingsRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateAccessBindingsRequest) ProtoMessage() {} +func (*UpdateAccessBindingsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_access_6c04a92fd5da6f4f, []int{6} +} +func (m *UpdateAccessBindingsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateAccessBindingsRequest.Unmarshal(m, b) +} +func (m *UpdateAccessBindingsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateAccessBindingsRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateAccessBindingsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateAccessBindingsRequest.Merge(dst, src) +} +func (m *UpdateAccessBindingsRequest) XXX_Size() int { + return xxx_messageInfo_UpdateAccessBindingsRequest.Size(m) +} +func (m *UpdateAccessBindingsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateAccessBindingsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateAccessBindingsRequest proto.InternalMessageInfo + +func (m *UpdateAccessBindingsRequest) GetResourceId() string { + if m != nil { + return m.ResourceId + } + return "" +} + +func (m *UpdateAccessBindingsRequest) GetAccessBindingDeltas() []*AccessBindingDelta { + if m != nil { + return m.AccessBindingDeltas + } + return nil +} + +type UpdateAccessBindingsMetadata struct { + // ID of the resource for which access bindings are being updated. + ResourceId string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateAccessBindingsMetadata) Reset() { *m = UpdateAccessBindingsMetadata{} } +func (m *UpdateAccessBindingsMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateAccessBindingsMetadata) ProtoMessage() {} +func (*UpdateAccessBindingsMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_access_6c04a92fd5da6f4f, []int{7} +} +func (m *UpdateAccessBindingsMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateAccessBindingsMetadata.Unmarshal(m, b) +} +func (m *UpdateAccessBindingsMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateAccessBindingsMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateAccessBindingsMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateAccessBindingsMetadata.Merge(dst, src) +} +func (m *UpdateAccessBindingsMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateAccessBindingsMetadata.Size(m) +} +func (m *UpdateAccessBindingsMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateAccessBindingsMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateAccessBindingsMetadata proto.InternalMessageInfo + +func (m *UpdateAccessBindingsMetadata) GetResourceId() string { + if m != nil { + return m.ResourceId + } + return "" +} + +type AccessBindingDelta struct { + // The action that is being performed on an access binding. + Action AccessBindingAction `protobuf:"varint,1,opt,name=action,proto3,enum=yandex.cloud.access.AccessBindingAction" json:"action,omitempty"` + // Access binding. For more information, see [Access Bindings](/docs/iam/concepts/access-control/#access-bindings). + AccessBinding *AccessBinding `protobuf:"bytes,2,opt,name=access_binding,json=accessBinding,proto3" json:"access_binding,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AccessBindingDelta) Reset() { *m = AccessBindingDelta{} } +func (m *AccessBindingDelta) String() string { return proto.CompactTextString(m) } +func (*AccessBindingDelta) ProtoMessage() {} +func (*AccessBindingDelta) Descriptor() ([]byte, []int) { + return fileDescriptor_access_6c04a92fd5da6f4f, []int{8} +} +func (m *AccessBindingDelta) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AccessBindingDelta.Unmarshal(m, b) +} +func (m *AccessBindingDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AccessBindingDelta.Marshal(b, m, deterministic) +} +func (dst *AccessBindingDelta) XXX_Merge(src proto.Message) { + xxx_messageInfo_AccessBindingDelta.Merge(dst, src) +} +func (m *AccessBindingDelta) XXX_Size() int { + return xxx_messageInfo_AccessBindingDelta.Size(m) +} +func (m *AccessBindingDelta) XXX_DiscardUnknown() { + xxx_messageInfo_AccessBindingDelta.DiscardUnknown(m) +} + +var xxx_messageInfo_AccessBindingDelta proto.InternalMessageInfo + +func (m *AccessBindingDelta) GetAction() AccessBindingAction { + if m != nil { + return m.Action + } + return AccessBindingAction_ACCESS_BINDING_ACTION_UNSPECIFIED +} + +func (m *AccessBindingDelta) GetAccessBinding() *AccessBinding { + if m != nil { + return m.AccessBinding + } + return nil +} + +func init() { + proto.RegisterType((*Subject)(nil), "yandex.cloud.access.Subject") + proto.RegisterType((*AccessBinding)(nil), "yandex.cloud.access.AccessBinding") + proto.RegisterType((*ListAccessBindingsRequest)(nil), "yandex.cloud.access.ListAccessBindingsRequest") + proto.RegisterType((*ListAccessBindingsResponse)(nil), "yandex.cloud.access.ListAccessBindingsResponse") + proto.RegisterType((*SetAccessBindingsRequest)(nil), "yandex.cloud.access.SetAccessBindingsRequest") + proto.RegisterType((*SetAccessBindingsMetadata)(nil), "yandex.cloud.access.SetAccessBindingsMetadata") + proto.RegisterType((*UpdateAccessBindingsRequest)(nil), "yandex.cloud.access.UpdateAccessBindingsRequest") + proto.RegisterType((*UpdateAccessBindingsMetadata)(nil), "yandex.cloud.access.UpdateAccessBindingsMetadata") + proto.RegisterType((*AccessBindingDelta)(nil), "yandex.cloud.access.AccessBindingDelta") + proto.RegisterEnum("yandex.cloud.access.AccessBindingAction", AccessBindingAction_name, AccessBindingAction_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/access/access.proto", fileDescriptor_access_6c04a92fd5da6f4f) +} + +var fileDescriptor_access_6c04a92fd5da6f4f = []byte{ + // 579 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xcf, 0x6e, 0xd3, 0x4c, + 0x14, 0xc5, 0x3f, 0x27, 0xfd, 0x92, 0xe6, 0x86, 0xa4, 0xd1, 0x44, 0x48, 0x6e, 0x29, 0x22, 0xb5, + 0x04, 0x8d, 0x90, 0xe2, 0xfc, 0x41, 0x88, 0x05, 0x29, 0x10, 0x27, 0x29, 0xb2, 0xa0, 0x49, 0x6b, + 0xb7, 0x2c, 0xd8, 0x58, 0x13, 0xcf, 0x28, 0x18, 0x82, 0x6d, 0x32, 0x13, 0xd4, 0xf6, 0x11, 0xba, + 0x63, 0x0f, 0x8f, 0x80, 0x78, 0x8c, 0xf6, 0x51, 0x78, 0x06, 0x56, 0xc8, 0x63, 0xa7, 0x8a, 0x89, + 0xa5, 0x66, 0xd1, 0xd5, 0x58, 0xbe, 0xe7, 0x9e, 0xfb, 0x3b, 0x33, 0x9a, 0x81, 0xca, 0x19, 0x76, + 0x09, 0x3d, 0xad, 0xdb, 0x13, 0x6f, 0x46, 0xea, 0xd8, 0xb6, 0x29, 0x63, 0xd1, 0xa2, 0xfa, 0x53, + 0x8f, 0x7b, 0xa8, 0x1c, 0x2a, 0x54, 0xa1, 0x50, 0xc3, 0xd2, 0xd6, 0xfd, 0x58, 0xdb, 0x57, 0x3c, + 0x71, 0x08, 0xe6, 0x8e, 0xe7, 0x86, 0x3d, 0xca, 0x33, 0xc8, 0x9a, 0xb3, 0xd1, 0x47, 0x6a, 0x73, + 0x24, 0x43, 0xca, 0x21, 0xb2, 0x54, 0x91, 0xaa, 0x39, 0x6d, 0xfd, 0xe2, 0xaa, 0xb9, 0xd6, 0xde, + 0x7b, 0xda, 0x30, 0x52, 0x0e, 0x41, 0x08, 0xd6, 0xf8, 0x99, 0x4f, 0xe5, 0x54, 0x50, 0x33, 0xc4, + 0xb7, 0xe2, 0x43, 0xa1, 0x23, 0x26, 0x68, 0x8e, 0x4b, 0x1c, 0x77, 0x8c, 0x76, 0x20, 0x3b, 0xf5, + 0x26, 0xd4, 0x4a, 0xf0, 0xc8, 0x04, 0x05, 0x9d, 0xa0, 0x36, 0x64, 0x59, 0x38, 0x4c, 0x58, 0xe5, + 0x5b, 0xdb, 0x6a, 0x02, 0xb2, 0x1a, 0x01, 0x69, 0x6b, 0xbf, 0x2f, 0x9b, 0x92, 0x31, 0x6f, 0x51, + 0x7e, 0x48, 0xb0, 0xf9, 0xd6, 0x61, 0x3c, 0x36, 0x96, 0x19, 0xf4, 0xcb, 0x8c, 0x32, 0x8e, 0x6a, + 0x90, 0x9f, 0x52, 0xe6, 0xcd, 0xa6, 0xf6, 0x02, 0xc2, 0x9d, 0xc0, 0xe1, 0x1a, 0x03, 0xe6, 0x02, + 0x9d, 0xa0, 0x5d, 0xc8, 0xf9, 0x78, 0x4c, 0x2d, 0xe6, 0x9c, 0x87, 0xb9, 0xd2, 0x1a, 0xfc, 0xb9, + 0x6c, 0x66, 0xda, 0x7b, 0xcd, 0x46, 0xa3, 0x61, 0xac, 0x07, 0x45, 0xd3, 0x39, 0xa7, 0xa8, 0x0a, + 0x20, 0x84, 0xdc, 0xfb, 0x44, 0x5d, 0x39, 0x2d, 0x6c, 0x73, 0x17, 0x57, 0xcd, 0xff, 0x85, 0xd2, + 0x10, 0x2e, 0xc7, 0x41, 0x4d, 0xf9, 0x26, 0xc1, 0x56, 0x12, 0x1f, 0xf3, 0x3d, 0x97, 0x51, 0xf4, + 0x06, 0x36, 0xc2, 0x7c, 0xd6, 0x28, 0x2a, 0xc9, 0x52, 0x25, 0x5d, 0xcd, 0xb7, 0x94, 0xc4, 0x4d, + 0x88, 0xb9, 0x18, 0x45, 0x1c, 0x33, 0x45, 0x8f, 0x60, 0xc3, 0xa5, 0xa7, 0xdc, 0x5a, 0x40, 0x0b, + 0x0f, 0xa7, 0x10, 0xfc, 0x3e, 0xbc, 0x66, 0xfa, 0x2e, 0x81, 0x6c, 0xd2, 0xdb, 0xd9, 0xb2, 0xa3, + 0xe5, 0x00, 0xa9, 0x55, 0x03, 0x44, 0x67, 0xf9, 0x4f, 0x0c, 0xa5, 0x0d, 0x9b, 0x4b, 0x74, 0x07, + 0x94, 0x63, 0x82, 0x39, 0x46, 0x0f, 0x12, 0xf0, 0x16, 0x81, 0x94, 0x5f, 0x12, 0xdc, 0x3b, 0xf1, + 0x09, 0xe6, 0xf4, 0x56, 0xf2, 0x61, 0xb8, 0x1b, 0xcf, 0x67, 0x11, 0x3a, 0xe1, 0x78, 0x9e, 0x72, + 0xf7, 0xe6, 0x94, 0xbd, 0x40, 0x1f, 0x45, 0x2d, 0xe3, 0xa5, 0x0a, 0x53, 0x5e, 0xc2, 0x76, 0x12, + 0xf0, 0xea, 0x91, 0x7f, 0x4a, 0x80, 0x96, 0x47, 0xa2, 0x7d, 0xc8, 0x60, 0x3b, 0xb8, 0xd5, 0xa2, + 0xa5, 0xd8, 0xaa, 0xde, 0xcc, 0xda, 0x11, 0xfa, 0x08, 0x36, 0xea, 0x46, 0x43, 0x28, 0xc6, 0xb7, + 0x20, 0xba, 0xa7, 0xab, 0x9f, 0x70, 0x21, 0x16, 0xfb, 0xf1, 0x11, 0x94, 0x13, 0xa6, 0xa2, 0x87, + 0xb0, 0xd3, 0xe9, 0x76, 0xfb, 0xa6, 0x69, 0x69, 0xfa, 0xa0, 0xa7, 0x0f, 0x5e, 0x5b, 0x9d, 0xee, + 0xb1, 0x3e, 0x1c, 0x58, 0x27, 0x03, 0xf3, 0xb0, 0xdf, 0xd5, 0xf7, 0xf5, 0x7e, 0xaf, 0xf4, 0x1f, + 0xca, 0x42, 0xba, 0xd3, 0xeb, 0x95, 0x24, 0x04, 0x90, 0x31, 0xfa, 0x07, 0xc3, 0x77, 0xfd, 0x52, + 0x4a, 0x7b, 0xf5, 0xfe, 0xc5, 0xd8, 0xe1, 0x1f, 0x66, 0x23, 0xd5, 0xf6, 0x3e, 0xd7, 0x43, 0xae, + 0x5a, 0xf8, 0xba, 0x8d, 0xbd, 0xda, 0x98, 0xba, 0xe2, 0x61, 0xab, 0x27, 0xbc, 0x96, 0xcf, 0xc3, + 0x65, 0x94, 0x11, 0x8a, 0x27, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc3, 0xce, 0x12, 0xcf, 0x52, + 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk.pb.go new file mode 100644 index 000000000..a5d663606 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk.pb.go @@ -0,0 +1,357 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/compute/v1/disk.proto + +package compute // import "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Disk_Status int32 + +const ( + Disk_STATUS_UNSPECIFIED Disk_Status = 0 + // Disk is being created. + Disk_CREATING Disk_Status = 1 + // Disk is ready to use. + Disk_READY Disk_Status = 2 + // Disk encountered a problem and cannot operate. + Disk_ERROR Disk_Status = 3 + // Disk is being deleted. + Disk_DELETING Disk_Status = 4 +) + +var Disk_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "CREATING", + 2: "READY", + 3: "ERROR", + 4: "DELETING", +} +var Disk_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "CREATING": 1, + "READY": 2, + "ERROR": 3, + "DELETING": 4, +} + +func (x Disk_Status) String() string { + return proto.EnumName(Disk_Status_name, int32(x)) +} +func (Disk_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_disk_d27a2bc800477bf5, []int{0, 0} +} + +// A Disk resource. For more information, see [Disks](/docs/compute/concepts/disk). +type Disk struct { + // ID of the disk. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the disk belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Name of the disk. 1-63 characters long. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Description of the disk. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. Maximum of 64 per resource. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID of the disk type. + TypeId string `protobuf:"bytes,7,opt,name=type_id,json=typeId,proto3" json:"type_id,omitempty"` + // ID of the availability zone where the disk resides. + ZoneId string `protobuf:"bytes,8,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + // Size of the disk, specified in bytes. + Size int64 `protobuf:"varint,9,opt,name=size,proto3" json:"size,omitempty"` + // License IDs that indicate which licenses are attached to this resource. + // License IDs are used to calculate additional charges for the use of the virtual machine. + // + // The correct license ID is generated by Yandex.Cloud. IDs are inherited by new resources created from this resource. + // + // If you know the license IDs, specify them when you create the image. + // For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. + // You can specify them in the [yandex.cloud.compute.v1.ImageService.Create] request. + ProductIds []string `protobuf:"bytes,10,rep,name=product_ids,json=productIds,proto3" json:"product_ids,omitempty"` + // Current status of the disk. + Status Disk_Status `protobuf:"varint,11,opt,name=status,proto3,enum=yandex.cloud.compute.v1.Disk_Status" json:"status,omitempty"` + // Types that are valid to be assigned to Source: + // *Disk_SourceImageId + // *Disk_SourceSnapshotId + Source isDisk_Source `protobuf_oneof:"source"` + // Array of instances to which the disk is attached. + InstanceIds []string `protobuf:"bytes,14,rep,name=instance_ids,json=instanceIds,proto3" json:"instance_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Disk) Reset() { *m = Disk{} } +func (m *Disk) String() string { return proto.CompactTextString(m) } +func (*Disk) ProtoMessage() {} +func (*Disk) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_d27a2bc800477bf5, []int{0} +} +func (m *Disk) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Disk.Unmarshal(m, b) +} +func (m *Disk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Disk.Marshal(b, m, deterministic) +} +func (dst *Disk) XXX_Merge(src proto.Message) { + xxx_messageInfo_Disk.Merge(dst, src) +} +func (m *Disk) XXX_Size() int { + return xxx_messageInfo_Disk.Size(m) +} +func (m *Disk) XXX_DiscardUnknown() { + xxx_messageInfo_Disk.DiscardUnknown(m) +} + +var xxx_messageInfo_Disk proto.InternalMessageInfo + +func (m *Disk) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Disk) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *Disk) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Disk) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Disk) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Disk) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Disk) GetTypeId() string { + if m != nil { + return m.TypeId + } + return "" +} + +func (m *Disk) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func (m *Disk) GetSize() int64 { + if m != nil { + return m.Size + } + return 0 +} + +func (m *Disk) GetProductIds() []string { + if m != nil { + return m.ProductIds + } + return nil +} + +func (m *Disk) GetStatus() Disk_Status { + if m != nil { + return m.Status + } + return Disk_STATUS_UNSPECIFIED +} + +type isDisk_Source interface { + isDisk_Source() +} + +type Disk_SourceImageId struct { + SourceImageId string `protobuf:"bytes,12,opt,name=source_image_id,json=sourceImageId,proto3,oneof"` +} + +type Disk_SourceSnapshotId struct { + SourceSnapshotId string `protobuf:"bytes,13,opt,name=source_snapshot_id,json=sourceSnapshotId,proto3,oneof"` +} + +func (*Disk_SourceImageId) isDisk_Source() {} + +func (*Disk_SourceSnapshotId) isDisk_Source() {} + +func (m *Disk) GetSource() isDisk_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *Disk) GetSourceImageId() string { + if x, ok := m.GetSource().(*Disk_SourceImageId); ok { + return x.SourceImageId + } + return "" +} + +func (m *Disk) GetSourceSnapshotId() string { + if x, ok := m.GetSource().(*Disk_SourceSnapshotId); ok { + return x.SourceSnapshotId + } + return "" +} + +func (m *Disk) GetInstanceIds() []string { + if m != nil { + return m.InstanceIds + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Disk) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Disk_OneofMarshaler, _Disk_OneofUnmarshaler, _Disk_OneofSizer, []interface{}{ + (*Disk_SourceImageId)(nil), + (*Disk_SourceSnapshotId)(nil), + } +} + +func _Disk_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Disk) + // source + switch x := m.Source.(type) { + case *Disk_SourceImageId: + b.EncodeVarint(12<<3 | proto.WireBytes) + b.EncodeStringBytes(x.SourceImageId) + case *Disk_SourceSnapshotId: + b.EncodeVarint(13<<3 | proto.WireBytes) + b.EncodeStringBytes(x.SourceSnapshotId) + case nil: + default: + return fmt.Errorf("Disk.Source has unexpected type %T", x) + } + return nil +} + +func _Disk_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Disk) + switch tag { + case 12: // source.source_image_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Disk_SourceImageId{x} + return true, err + case 13: // source.source_snapshot_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Disk_SourceSnapshotId{x} + return true, err + default: + return false, nil + } +} + +func _Disk_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Disk) + // source + switch x := m.Source.(type) { + case *Disk_SourceImageId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.SourceImageId))) + n += len(x.SourceImageId) + case *Disk_SourceSnapshotId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.SourceSnapshotId))) + n += len(x.SourceSnapshotId) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*Disk)(nil), "yandex.cloud.compute.v1.Disk") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.Disk.LabelsEntry") + proto.RegisterEnum("yandex.cloud.compute.v1.Disk_Status", Disk_Status_name, Disk_Status_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/compute/v1/disk.proto", fileDescriptor_disk_d27a2bc800477bf5) +} + +var fileDescriptor_disk_d27a2bc800477bf5 = []byte{ + // 533 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x41, 0x4f, 0xdb, 0x3e, + 0x18, 0xc6, 0x49, 0xd3, 0x86, 0xe6, 0x0d, 0xf0, 0x8f, 0xac, 0xbf, 0x46, 0xc4, 0x0e, 0x64, 0x68, + 0x87, 0xec, 0x40, 0x22, 0xd8, 0x65, 0x6c, 0xbb, 0x14, 0x9a, 0x6d, 0x91, 0x10, 0x9b, 0xdc, 0x72, + 0xd8, 0x2e, 0x55, 0x1a, 0x9b, 0x60, 0x35, 0x8d, 0xa3, 0xd8, 0xa9, 0x56, 0x3e, 0xce, 0x3e, 0xe9, + 0x64, 0x3b, 0x95, 0xb8, 0xb0, 0xdb, 0xeb, 0xe7, 0xf9, 0xd9, 0xef, 0xf3, 0x5a, 0x36, 0x9c, 0x6d, + 0xf3, 0x9a, 0xd0, 0xdf, 0x49, 0x51, 0xf1, 0x8e, 0x24, 0x05, 0x5f, 0x37, 0x9d, 0xa4, 0xc9, 0xe6, + 0x22, 0x21, 0x4c, 0xac, 0xe2, 0xa6, 0xe5, 0x92, 0xa3, 0x63, 0xc3, 0xc4, 0x9a, 0x89, 0x7b, 0x26, + 0xde, 0x5c, 0x9c, 0x9c, 0x96, 0x9c, 0x97, 0x15, 0x4d, 0x34, 0xb6, 0xec, 0x1e, 0x12, 0xc9, 0xd6, + 0x54, 0xc8, 0x7c, 0xdd, 0x98, 0x9d, 0x67, 0x7f, 0x46, 0x30, 0x9c, 0x32, 0xb1, 0x42, 0x47, 0x30, + 0x60, 0x24, 0xb0, 0x42, 0x2b, 0x72, 0xf1, 0x80, 0x11, 0xf4, 0x1a, 0xdc, 0x07, 0x5e, 0x11, 0xda, + 0x2e, 0x18, 0x09, 0x06, 0x5a, 0x1e, 0x1b, 0x21, 0x23, 0xe8, 0x0a, 0xa0, 0x68, 0x69, 0x2e, 0x29, + 0x59, 0xe4, 0x32, 0xb0, 0x43, 0x2b, 0xf2, 0x2e, 0x4f, 0x62, 0xd3, 0x2b, 0xde, 0xf5, 0x8a, 0xe7, + 0xbb, 0x5e, 0xd8, 0xed, 0xe9, 0x89, 0x44, 0x08, 0x86, 0x75, 0xbe, 0xa6, 0xc1, 0x50, 0x1f, 0xa9, + 0x6b, 0x14, 0x82, 0x47, 0xa8, 0x28, 0x5a, 0xd6, 0x48, 0xc6, 0xeb, 0x60, 0xa4, 0xad, 0xe7, 0x12, + 0x9a, 0x80, 0x53, 0xe5, 0x4b, 0x5a, 0x89, 0xc0, 0x09, 0xed, 0xc8, 0xbb, 0x7c, 0x17, 0xbf, 0x30, + 0x71, 0xac, 0x86, 0x89, 0x6f, 0x35, 0x9b, 0xd6, 0xb2, 0xdd, 0xe2, 0x7e, 0x23, 0x3a, 0x86, 0x7d, + 0xb9, 0x6d, 0xa8, 0x1a, 0x67, 0x5f, 0x37, 0x70, 0xd4, 0x32, 0x23, 0xca, 0x78, 0xe2, 0xb5, 0x36, + 0xc6, 0xc6, 0x50, 0xcb, 0x8c, 0xa8, 0xa8, 0x82, 0x3d, 0xd1, 0xc0, 0x0d, 0xad, 0xc8, 0xc6, 0xba, + 0x46, 0xa7, 0xe0, 0x35, 0x2d, 0x27, 0x5d, 0x21, 0x17, 0x8c, 0x88, 0x00, 0x42, 0x3b, 0x72, 0x31, + 0xf4, 0x52, 0x46, 0x04, 0xfa, 0x0c, 0x8e, 0x90, 0xb9, 0xec, 0x44, 0xe0, 0x85, 0x56, 0x74, 0x74, + 0xf9, 0xf6, 0xdf, 0x49, 0x67, 0x9a, 0xc5, 0xfd, 0x1e, 0x14, 0xc1, 0x7f, 0x82, 0x77, 0x6d, 0x41, + 0x17, 0x6c, 0x9d, 0x97, 0x3a, 0xd3, 0x81, 0xca, 0xf4, 0x6d, 0x0f, 0x1f, 0x1a, 0x23, 0x53, 0x7a, + 0x46, 0x50, 0x0c, 0xa8, 0x27, 0x45, 0x9d, 0x37, 0xe2, 0x91, 0xab, 0x40, 0xc1, 0x61, 0x0f, 0xfb, + 0xc6, 0x9b, 0xf5, 0x56, 0x46, 0xd0, 0x1b, 0x38, 0x60, 0xb5, 0x90, 0x79, 0xad, 0xce, 0x26, 0x22, + 0x38, 0xd2, 0xc9, 0xbd, 0x9d, 0x96, 0x11, 0x71, 0x72, 0x05, 0xde, 0xb3, 0x8b, 0x43, 0x3e, 0xd8, + 0x2b, 0xba, 0xed, 0x9f, 0x84, 0x2a, 0xd1, 0xff, 0x30, 0xda, 0xe4, 0x55, 0x47, 0xfb, 0xf7, 0x60, + 0x16, 0x1f, 0x07, 0x1f, 0xac, 0x33, 0x0c, 0x8e, 0x99, 0x04, 0xbd, 0x02, 0x34, 0x9b, 0x4f, 0xe6, + 0xf7, 0xb3, 0xc5, 0xfd, 0xdd, 0xec, 0x47, 0x7a, 0x93, 0x7d, 0xc9, 0xd2, 0xa9, 0xbf, 0x87, 0x0e, + 0x60, 0x7c, 0x83, 0xd3, 0xc9, 0x3c, 0xbb, 0xfb, 0xea, 0x5b, 0xc8, 0x85, 0x11, 0x4e, 0x27, 0xd3, + 0x9f, 0xfe, 0x40, 0x95, 0x29, 0xc6, 0xdf, 0xb1, 0x6f, 0x2b, 0x66, 0x9a, 0xde, 0xa6, 0x9a, 0x19, + 0x5e, 0x8f, 0xc1, 0x31, 0x53, 0x5c, 0xa7, 0xbf, 0x6e, 0x4a, 0x26, 0x1f, 0xbb, 0xa5, 0xba, 0xbe, + 0xc4, 0xdc, 0xe7, 0xb9, 0xf9, 0x0f, 0x25, 0x3f, 0x2f, 0x69, 0xad, 0x9f, 0x5c, 0xf2, 0xc2, 0x47, + 0xf9, 0xd4, 0x97, 0x4b, 0x47, 0x63, 0xef, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x9d, 0xe9, 0x4b, + 0xc4, 0x52, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk_service.pb.go new file mode 100644 index 000000000..02bd073bd --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk_service.pb.go @@ -0,0 +1,1118 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/compute/v1/disk_service.proto + +package compute // import "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetDiskRequest struct { + // ID of the Disk resource to return. + // To get the disk ID use a [DiskService.List] request. + DiskId string `protobuf:"bytes,1,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDiskRequest) Reset() { *m = GetDiskRequest{} } +func (m *GetDiskRequest) String() string { return proto.CompactTextString(m) } +func (*GetDiskRequest) ProtoMessage() {} +func (*GetDiskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_service_204a5ebc353c6801, []int{0} +} +func (m *GetDiskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDiskRequest.Unmarshal(m, b) +} +func (m *GetDiskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDiskRequest.Marshal(b, m, deterministic) +} +func (dst *GetDiskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDiskRequest.Merge(dst, src) +} +func (m *GetDiskRequest) XXX_Size() int { + return xxx_messageInfo_GetDiskRequest.Size(m) +} +func (m *GetDiskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetDiskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDiskRequest proto.InternalMessageInfo + +func (m *GetDiskRequest) GetDiskId() string { + if m != nil { + return m.DiskId + } + return "" +} + +type ListDisksRequest struct { + // ID of the folder to list disks in. + // To get the folder ID use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListDisksResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListDisksResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters resources listed in the response. + // The expression must specify: + // 1. The field name. Currently you can use filtering only on the [Disk.name] field. + // 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + // 3. The value. Must be 3-63 characters long and match the regular expression `^[a-z]([-a-z0-9]{,61}[a-z0-9])?$`. + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListDisksRequest) Reset() { *m = ListDisksRequest{} } +func (m *ListDisksRequest) String() string { return proto.CompactTextString(m) } +func (*ListDisksRequest) ProtoMessage() {} +func (*ListDisksRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_service_204a5ebc353c6801, []int{1} +} +func (m *ListDisksRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListDisksRequest.Unmarshal(m, b) +} +func (m *ListDisksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListDisksRequest.Marshal(b, m, deterministic) +} +func (dst *ListDisksRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListDisksRequest.Merge(dst, src) +} +func (m *ListDisksRequest) XXX_Size() int { + return xxx_messageInfo_ListDisksRequest.Size(m) +} +func (m *ListDisksRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListDisksRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListDisksRequest proto.InternalMessageInfo + +func (m *ListDisksRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListDisksRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListDisksRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListDisksRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListDisksResponse struct { + // List of Disk resources. + Disks []*Disk `protobuf:"bytes,1,rep,name=disks,proto3" json:"disks,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListDisksRequest.page_size], use + // the [next_page_token] as the value + // for the [ListDisksRequest.page_token] query parameter + // in the next list request. Each subsequent list request will have its own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListDisksResponse) Reset() { *m = ListDisksResponse{} } +func (m *ListDisksResponse) String() string { return proto.CompactTextString(m) } +func (*ListDisksResponse) ProtoMessage() {} +func (*ListDisksResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_service_204a5ebc353c6801, []int{2} +} +func (m *ListDisksResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListDisksResponse.Unmarshal(m, b) +} +func (m *ListDisksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListDisksResponse.Marshal(b, m, deterministic) +} +func (dst *ListDisksResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListDisksResponse.Merge(dst, src) +} +func (m *ListDisksResponse) XXX_Size() int { + return xxx_messageInfo_ListDisksResponse.Size(m) +} +func (m *ListDisksResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListDisksResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListDisksResponse proto.InternalMessageInfo + +func (m *ListDisksResponse) GetDisks() []*Disk { + if m != nil { + return m.Disks + } + return nil +} + +func (m *ListDisksResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateDiskRequest struct { + // ID of the folder to create a disk in. + // To get the folder ID use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Name of the disk. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the disk. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID of the disk type. + // To get a list of available disk types use the [yandex.cloud.compute.v1.DiskTypeService.List] request. + TypeId string `protobuf:"bytes,5,opt,name=type_id,json=typeId,proto3" json:"type_id,omitempty"` + // ID of the availability zone where the disk resides. + // To get a list of available zones use the [yandex.cloud.compute.v1.ZoneService.List] request. + ZoneId string `protobuf:"bytes,6,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + // Size of the disk, specified in bytes. + // If the disk was created from a image, this value should be more than the + // [yandex.cloud.compute.v1.Image.min_disk_size] value. + Size int64 `protobuf:"varint,7,opt,name=size,proto3" json:"size,omitempty"` + // Types that are valid to be assigned to Source: + // *CreateDiskRequest_ImageId + // *CreateDiskRequest_SnapshotId + Source isCreateDiskRequest_Source `protobuf_oneof:"source"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateDiskRequest) Reset() { *m = CreateDiskRequest{} } +func (m *CreateDiskRequest) String() string { return proto.CompactTextString(m) } +func (*CreateDiskRequest) ProtoMessage() {} +func (*CreateDiskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_service_204a5ebc353c6801, []int{3} +} +func (m *CreateDiskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateDiskRequest.Unmarshal(m, b) +} +func (m *CreateDiskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateDiskRequest.Marshal(b, m, deterministic) +} +func (dst *CreateDiskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateDiskRequest.Merge(dst, src) +} +func (m *CreateDiskRequest) XXX_Size() int { + return xxx_messageInfo_CreateDiskRequest.Size(m) +} +func (m *CreateDiskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateDiskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateDiskRequest proto.InternalMessageInfo + +func (m *CreateDiskRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *CreateDiskRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateDiskRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *CreateDiskRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *CreateDiskRequest) GetTypeId() string { + if m != nil { + return m.TypeId + } + return "" +} + +func (m *CreateDiskRequest) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func (m *CreateDiskRequest) GetSize() int64 { + if m != nil { + return m.Size + } + return 0 +} + +type isCreateDiskRequest_Source interface { + isCreateDiskRequest_Source() +} + +type CreateDiskRequest_ImageId struct { + ImageId string `protobuf:"bytes,8,opt,name=image_id,json=imageId,proto3,oneof"` +} + +type CreateDiskRequest_SnapshotId struct { + SnapshotId string `protobuf:"bytes,9,opt,name=snapshot_id,json=snapshotId,proto3,oneof"` +} + +func (*CreateDiskRequest_ImageId) isCreateDiskRequest_Source() {} + +func (*CreateDiskRequest_SnapshotId) isCreateDiskRequest_Source() {} + +func (m *CreateDiskRequest) GetSource() isCreateDiskRequest_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *CreateDiskRequest) GetImageId() string { + if x, ok := m.GetSource().(*CreateDiskRequest_ImageId); ok { + return x.ImageId + } + return "" +} + +func (m *CreateDiskRequest) GetSnapshotId() string { + if x, ok := m.GetSource().(*CreateDiskRequest_SnapshotId); ok { + return x.SnapshotId + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CreateDiskRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CreateDiskRequest_OneofMarshaler, _CreateDiskRequest_OneofUnmarshaler, _CreateDiskRequest_OneofSizer, []interface{}{ + (*CreateDiskRequest_ImageId)(nil), + (*CreateDiskRequest_SnapshotId)(nil), + } +} + +func _CreateDiskRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CreateDiskRequest) + // source + switch x := m.Source.(type) { + case *CreateDiskRequest_ImageId: + b.EncodeVarint(8<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ImageId) + case *CreateDiskRequest_SnapshotId: + b.EncodeVarint(9<<3 | proto.WireBytes) + b.EncodeStringBytes(x.SnapshotId) + case nil: + default: + return fmt.Errorf("CreateDiskRequest.Source has unexpected type %T", x) + } + return nil +} + +func _CreateDiskRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CreateDiskRequest) + switch tag { + case 8: // source.image_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &CreateDiskRequest_ImageId{x} + return true, err + case 9: // source.snapshot_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &CreateDiskRequest_SnapshotId{x} + return true, err + default: + return false, nil + } +} + +func _CreateDiskRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CreateDiskRequest) + // source + switch x := m.Source.(type) { + case *CreateDiskRequest_ImageId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.ImageId))) + n += len(x.ImageId) + case *CreateDiskRequest_SnapshotId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.SnapshotId))) + n += len(x.SnapshotId) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type CreateDiskMetadata struct { + // ID of the disk that is being created. + DiskId string `protobuf:"bytes,1,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateDiskMetadata) Reset() { *m = CreateDiskMetadata{} } +func (m *CreateDiskMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateDiskMetadata) ProtoMessage() {} +func (*CreateDiskMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_service_204a5ebc353c6801, []int{4} +} +func (m *CreateDiskMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateDiskMetadata.Unmarshal(m, b) +} +func (m *CreateDiskMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateDiskMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateDiskMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateDiskMetadata.Merge(dst, src) +} +func (m *CreateDiskMetadata) XXX_Size() int { + return xxx_messageInfo_CreateDiskMetadata.Size(m) +} +func (m *CreateDiskMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateDiskMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateDiskMetadata proto.InternalMessageInfo + +func (m *CreateDiskMetadata) GetDiskId() string { + if m != nil { + return m.DiskId + } + return "" +} + +type UpdateDiskRequest struct { + // ID of the Disk resource to update. + // To get the disk ID use a [DiskService.List] request. + DiskId string `protobuf:"bytes,1,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` + // Field mask that specifies which fields of the Disk resource are going to be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Name of the disk. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Description of the disk. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. + // + // Existing set of `` labels `` is completely replaced by the provided set. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Size of the disk, specified in bytes. + Size int64 `protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateDiskRequest) Reset() { *m = UpdateDiskRequest{} } +func (m *UpdateDiskRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateDiskRequest) ProtoMessage() {} +func (*UpdateDiskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_service_204a5ebc353c6801, []int{5} +} +func (m *UpdateDiskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateDiskRequest.Unmarshal(m, b) +} +func (m *UpdateDiskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateDiskRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateDiskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateDiskRequest.Merge(dst, src) +} +func (m *UpdateDiskRequest) XXX_Size() int { + return xxx_messageInfo_UpdateDiskRequest.Size(m) +} +func (m *UpdateDiskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateDiskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateDiskRequest proto.InternalMessageInfo + +func (m *UpdateDiskRequest) GetDiskId() string { + if m != nil { + return m.DiskId + } + return "" +} + +func (m *UpdateDiskRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateDiskRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateDiskRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *UpdateDiskRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *UpdateDiskRequest) GetSize() int64 { + if m != nil { + return m.Size + } + return 0 +} + +type UpdateDiskMetadata struct { + // ID of the Disk resource that is being updated. + DiskId string `protobuf:"bytes,1,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateDiskMetadata) Reset() { *m = UpdateDiskMetadata{} } +func (m *UpdateDiskMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateDiskMetadata) ProtoMessage() {} +func (*UpdateDiskMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_service_204a5ebc353c6801, []int{6} +} +func (m *UpdateDiskMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateDiskMetadata.Unmarshal(m, b) +} +func (m *UpdateDiskMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateDiskMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateDiskMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateDiskMetadata.Merge(dst, src) +} +func (m *UpdateDiskMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateDiskMetadata.Size(m) +} +func (m *UpdateDiskMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateDiskMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateDiskMetadata proto.InternalMessageInfo + +func (m *UpdateDiskMetadata) GetDiskId() string { + if m != nil { + return m.DiskId + } + return "" +} + +type DeleteDiskRequest struct { + // ID of the disk to delete. + // To get the disk ID use a [DiskService.List] request. + DiskId string `protobuf:"bytes,1,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteDiskRequest) Reset() { *m = DeleteDiskRequest{} } +func (m *DeleteDiskRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteDiskRequest) ProtoMessage() {} +func (*DeleteDiskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_service_204a5ebc353c6801, []int{7} +} +func (m *DeleteDiskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteDiskRequest.Unmarshal(m, b) +} +func (m *DeleteDiskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteDiskRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteDiskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteDiskRequest.Merge(dst, src) +} +func (m *DeleteDiskRequest) XXX_Size() int { + return xxx_messageInfo_DeleteDiskRequest.Size(m) +} +func (m *DeleteDiskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteDiskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteDiskRequest proto.InternalMessageInfo + +func (m *DeleteDiskRequest) GetDiskId() string { + if m != nil { + return m.DiskId + } + return "" +} + +type DeleteDiskMetadata struct { + // ID of the disk that is being deleted. + DiskId string `protobuf:"bytes,1,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteDiskMetadata) Reset() { *m = DeleteDiskMetadata{} } +func (m *DeleteDiskMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteDiskMetadata) ProtoMessage() {} +func (*DeleteDiskMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_service_204a5ebc353c6801, []int{8} +} +func (m *DeleteDiskMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteDiskMetadata.Unmarshal(m, b) +} +func (m *DeleteDiskMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteDiskMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteDiskMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteDiskMetadata.Merge(dst, src) +} +func (m *DeleteDiskMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteDiskMetadata.Size(m) +} +func (m *DeleteDiskMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteDiskMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteDiskMetadata proto.InternalMessageInfo + +func (m *DeleteDiskMetadata) GetDiskId() string { + if m != nil { + return m.DiskId + } + return "" +} + +type ListDiskOperationsRequest struct { + // ID of the Disk resource to list operations for. + DiskId string `protobuf:"bytes,1,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListDiskOperationsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListDiskOperationsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListDiskOperationsRequest) Reset() { *m = ListDiskOperationsRequest{} } +func (m *ListDiskOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListDiskOperationsRequest) ProtoMessage() {} +func (*ListDiskOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_service_204a5ebc353c6801, []int{9} +} +func (m *ListDiskOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListDiskOperationsRequest.Unmarshal(m, b) +} +func (m *ListDiskOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListDiskOperationsRequest.Marshal(b, m, deterministic) +} +func (dst *ListDiskOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListDiskOperationsRequest.Merge(dst, src) +} +func (m *ListDiskOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListDiskOperationsRequest.Size(m) +} +func (m *ListDiskOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListDiskOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListDiskOperationsRequest proto.InternalMessageInfo + +func (m *ListDiskOperationsRequest) GetDiskId() string { + if m != nil { + return m.DiskId + } + return "" +} + +func (m *ListDiskOperationsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListDiskOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListDiskOperationsResponse struct { + // List of operations for the specified disk. + Operations []*operation.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListDiskOperationsRequest.page_size], use the [next_page_token] as the value + // for the [ListDiskOperationsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListDiskOperationsResponse) Reset() { *m = ListDiskOperationsResponse{} } +func (m *ListDiskOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListDiskOperationsResponse) ProtoMessage() {} +func (*ListDiskOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_service_204a5ebc353c6801, []int{10} +} +func (m *ListDiskOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListDiskOperationsResponse.Unmarshal(m, b) +} +func (m *ListDiskOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListDiskOperationsResponse.Marshal(b, m, deterministic) +} +func (dst *ListDiskOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListDiskOperationsResponse.Merge(dst, src) +} +func (m *ListDiskOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListDiskOperationsResponse.Size(m) +} +func (m *ListDiskOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListDiskOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListDiskOperationsResponse proto.InternalMessageInfo + +func (m *ListDiskOperationsResponse) GetOperations() []*operation.Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListDiskOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetDiskRequest)(nil), "yandex.cloud.compute.v1.GetDiskRequest") + proto.RegisterType((*ListDisksRequest)(nil), "yandex.cloud.compute.v1.ListDisksRequest") + proto.RegisterType((*ListDisksResponse)(nil), "yandex.cloud.compute.v1.ListDisksResponse") + proto.RegisterType((*CreateDiskRequest)(nil), "yandex.cloud.compute.v1.CreateDiskRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.CreateDiskRequest.LabelsEntry") + proto.RegisterType((*CreateDiskMetadata)(nil), "yandex.cloud.compute.v1.CreateDiskMetadata") + proto.RegisterType((*UpdateDiskRequest)(nil), "yandex.cloud.compute.v1.UpdateDiskRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.UpdateDiskRequest.LabelsEntry") + proto.RegisterType((*UpdateDiskMetadata)(nil), "yandex.cloud.compute.v1.UpdateDiskMetadata") + proto.RegisterType((*DeleteDiskRequest)(nil), "yandex.cloud.compute.v1.DeleteDiskRequest") + proto.RegisterType((*DeleteDiskMetadata)(nil), "yandex.cloud.compute.v1.DeleteDiskMetadata") + proto.RegisterType((*ListDiskOperationsRequest)(nil), "yandex.cloud.compute.v1.ListDiskOperationsRequest") + proto.RegisterType((*ListDiskOperationsResponse)(nil), "yandex.cloud.compute.v1.ListDiskOperationsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// DiskServiceClient is the client API for DiskService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type DiskServiceClient interface { + // Returns the specified Disk resource. + // + // To get the list of available Disk resources, make a [List] request. + Get(ctx context.Context, in *GetDiskRequest, opts ...grpc.CallOption) (*Disk, error) + // Retrieves the list of Disk resources in the specified folder. + List(ctx context.Context, in *ListDisksRequest, opts ...grpc.CallOption) (*ListDisksResponse, error) + // Creates a disk in the specified folder. + // + // You can create an empty disk or restore it from a snapshot or an image. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Create(ctx context.Context, in *CreateDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified disk. + Update(ctx context.Context, in *UpdateDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified disk. + // + // Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete + // any snapshots or images previously made from the disk. You must delete snapshots and images separately. + // + // It is not possible to delete a disk that is attached to an instance. + Delete(ctx context.Context, in *DeleteDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Lists operations for the specified disk. + ListOperations(ctx context.Context, in *ListDiskOperationsRequest, opts ...grpc.CallOption) (*ListDiskOperationsResponse, error) +} + +type diskServiceClient struct { + cc *grpc.ClientConn +} + +func NewDiskServiceClient(cc *grpc.ClientConn) DiskServiceClient { + return &diskServiceClient{cc} +} + +func (c *diskServiceClient) Get(ctx context.Context, in *GetDiskRequest, opts ...grpc.CallOption) (*Disk, error) { + out := new(Disk) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.DiskService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *diskServiceClient) List(ctx context.Context, in *ListDisksRequest, opts ...grpc.CallOption) (*ListDisksResponse, error) { + out := new(ListDisksResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.DiskService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *diskServiceClient) Create(ctx context.Context, in *CreateDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.DiskService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *diskServiceClient) Update(ctx context.Context, in *UpdateDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.DiskService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *diskServiceClient) Delete(ctx context.Context, in *DeleteDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.DiskService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *diskServiceClient) ListOperations(ctx context.Context, in *ListDiskOperationsRequest, opts ...grpc.CallOption) (*ListDiskOperationsResponse, error) { + out := new(ListDiskOperationsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.DiskService/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DiskServiceServer is the server API for DiskService service. +type DiskServiceServer interface { + // Returns the specified Disk resource. + // + // To get the list of available Disk resources, make a [List] request. + Get(context.Context, *GetDiskRequest) (*Disk, error) + // Retrieves the list of Disk resources in the specified folder. + List(context.Context, *ListDisksRequest) (*ListDisksResponse, error) + // Creates a disk in the specified folder. + // + // You can create an empty disk or restore it from a snapshot or an image. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Create(context.Context, *CreateDiskRequest) (*operation.Operation, error) + // Updates the specified disk. + Update(context.Context, *UpdateDiskRequest) (*operation.Operation, error) + // Deletes the specified disk. + // + // Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete + // any snapshots or images previously made from the disk. You must delete snapshots and images separately. + // + // It is not possible to delete a disk that is attached to an instance. + Delete(context.Context, *DeleteDiskRequest) (*operation.Operation, error) + // Lists operations for the specified disk. + ListOperations(context.Context, *ListDiskOperationsRequest) (*ListDiskOperationsResponse, error) +} + +func RegisterDiskServiceServer(s *grpc.Server, srv DiskServiceServer) { + s.RegisterService(&_DiskService_serviceDesc, srv) +} + +func _DiskService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDiskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DiskServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.DiskService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DiskServiceServer).Get(ctx, req.(*GetDiskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DiskService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDisksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DiskServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.DiskService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DiskServiceServer).List(ctx, req.(*ListDisksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DiskService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDiskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DiskServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.DiskService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DiskServiceServer).Create(ctx, req.(*CreateDiskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DiskService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateDiskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DiskServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.DiskService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DiskServiceServer).Update(ctx, req.(*UpdateDiskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DiskService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteDiskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DiskServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.DiskService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DiskServiceServer).Delete(ctx, req.(*DeleteDiskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DiskService_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDiskOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DiskServiceServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.DiskService/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DiskServiceServer).ListOperations(ctx, req.(*ListDiskOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DiskService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.compute.v1.DiskService", + HandlerType: (*DiskServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _DiskService_Get_Handler, + }, + { + MethodName: "List", + Handler: _DiskService_List_Handler, + }, + { + MethodName: "Create", + Handler: _DiskService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _DiskService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _DiskService_Delete_Handler, + }, + { + MethodName: "ListOperations", + Handler: _DiskService_ListOperations_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/compute/v1/disk_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/compute/v1/disk_service.proto", fileDescriptor_disk_service_204a5ebc353c6801) +} + +var fileDescriptor_disk_service_204a5ebc353c6801 = []byte{ + // 1084 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x73, 0xdb, 0x44, + 0x14, 0x46, 0xb1, 0xa2, 0xd8, 0xcf, 0x50, 0x92, 0x85, 0x52, 0x47, 0x90, 0x99, 0x44, 0x4c, 0xd3, + 0xd4, 0x45, 0x92, 0x65, 0x3b, 0xa6, 0x49, 0xdb, 0x61, 0x70, 0x13, 0x8a, 0x67, 0xda, 0x81, 0x51, + 0xe1, 0x42, 0xa6, 0x93, 0x51, 0xac, 0x8d, 0xab, 0xb1, 0x2c, 0x09, 0xad, 0xec, 0x89, 0x5d, 0x7a, + 0xe9, 0x85, 0x99, 0x70, 0xe0, 0xd0, 0xe1, 0xc8, 0x1f, 0xc0, 0x70, 0xcb, 0xf0, 0x3f, 0x38, 0xc3, + 0xb1, 0x1c, 0xf8, 0x07, 0x38, 0x70, 0xee, 0xb1, 0x27, 0x66, 0x57, 0xf2, 0xef, 0x9f, 0xa1, 0x70, + 0x93, 0xf4, 0xbe, 0xb7, 0xfa, 0xf6, 0xdb, 0xef, 0xbd, 0xb7, 0x90, 0x6e, 0x1a, 0x8e, 0x89, 0x4f, + 0xd4, 0xb2, 0xed, 0xd6, 0x4d, 0xb5, 0xec, 0xd6, 0xbc, 0x7a, 0x80, 0xd5, 0x86, 0xa6, 0x9a, 0x16, + 0xa9, 0x1e, 0x12, 0xec, 0x37, 0xac, 0x32, 0x56, 0x3c, 0xdf, 0x0d, 0x5c, 0x74, 0x25, 0xc4, 0x2a, + 0x0c, 0xab, 0x44, 0x58, 0xa5, 0xa1, 0x89, 0x1f, 0x54, 0x5c, 0xb7, 0x62, 0x63, 0xd5, 0xf0, 0x2c, + 0xd5, 0x70, 0x1c, 0x37, 0x30, 0x02, 0xcb, 0x75, 0x48, 0x98, 0x26, 0xae, 0x47, 0x51, 0xf6, 0x76, + 0x54, 0x3f, 0x56, 0x8f, 0x2d, 0x6c, 0x9b, 0x87, 0x35, 0x83, 0x54, 0x23, 0x84, 0x18, 0x91, 0xa0, + 0xf9, 0xae, 0x87, 0x7d, 0x96, 0x1e, 0xc5, 0xa4, 0x69, 0x04, 0x23, 0xcc, 0xe6, 0x00, 0xa6, 0xbb, + 0xc2, 0xc8, 0x5a, 0x6b, 0x03, 0xb8, 0x86, 0x61, 0x5b, 0x66, 0x5f, 0x58, 0xfa, 0x18, 0x2e, 0xdd, + 0xc3, 0xc1, 0x9e, 0x45, 0xaa, 0x3a, 0xfe, 0xb6, 0x8e, 0x49, 0x80, 0xae, 0xc2, 0x12, 0xd3, 0xc1, + 0x32, 0x53, 0xdc, 0x3a, 0xb7, 0x95, 0x28, 0xbe, 0xf9, 0x77, 0x5b, 0xe3, 0x4e, 0xcf, 0x35, 0xfe, + 0xf6, 0x9d, 0xed, 0x8c, 0x2e, 0xd0, 0x60, 0xc9, 0x94, 0x7e, 0xe3, 0x60, 0xf9, 0xbe, 0x45, 0x58, + 0x2a, 0xe9, 0xe4, 0x5e, 0x87, 0xc4, 0xb1, 0x6b, 0x9b, 0xd8, 0x9f, 0x94, 0x1d, 0x0f, 0xc3, 0x25, + 0x13, 0x5d, 0x83, 0x84, 0x67, 0x54, 0xf0, 0x21, 0xb1, 0x5a, 0x38, 0xb5, 0xb0, 0xce, 0x6d, 0xc5, + 0x8a, 0xf0, 0xaa, 0xad, 0x09, 0xb7, 0xef, 0x68, 0x99, 0x4c, 0x46, 0x8f, 0xd3, 0xe0, 0x43, 0xab, + 0x85, 0xd1, 0x16, 0x00, 0x03, 0x06, 0x6e, 0x15, 0x3b, 0xa9, 0x18, 0x5b, 0x34, 0x71, 0x7a, 0xae, + 0x2d, 0x32, 0xa4, 0xce, 0x56, 0xf9, 0x8a, 0xc6, 0x90, 0x04, 0xc2, 0xb1, 0x65, 0x07, 0xd8, 0x4f, + 0xf1, 0x0c, 0x05, 0xa7, 0xe7, 0xdd, 0xf5, 0xa2, 0x88, 0xe4, 0xc1, 0x4a, 0x1f, 0x6b, 0xe2, 0xb9, + 0x0e, 0xc1, 0x28, 0x07, 0x8b, 0x74, 0x57, 0x24, 0xc5, 0xad, 0xc7, 0xb6, 0x92, 0xd9, 0x35, 0x65, + 0xc2, 0xa1, 0x2b, 0x4c, 0xa7, 0x10, 0x8b, 0x36, 0xe1, 0x6d, 0x07, 0x9f, 0x04, 0x87, 0x7d, 0xe4, + 0xe8, 0x36, 0x12, 0xfa, 0x5b, 0xf4, 0xf3, 0x97, 0x1d, 0x56, 0xd2, 0xef, 0x3c, 0xac, 0xdc, 0xf5, + 0xb1, 0x11, 0xe0, 0x7e, 0x95, 0x2f, 0xa0, 0xd4, 0x36, 0xf0, 0x8e, 0x51, 0x0b, 0x45, 0x4a, 0x14, + 0x37, 0x5e, 0xb6, 0xb5, 0xb5, 0xef, 0x0e, 0x0c, 0xb9, 0xf5, 0xe8, 0x40, 0x36, 0xe4, 0x56, 0x46, + 0xde, 0x79, 0xf4, 0x44, 0xfb, 0xa8, 0xa0, 0x3d, 0x3d, 0x88, 0xde, 0x74, 0x06, 0x47, 0x37, 0x20, + 0x69, 0x62, 0x52, 0xf6, 0x2d, 0x8f, 0x1e, 0xf7, 0xa0, 0x70, 0xd9, 0xed, 0x82, 0xde, 0x1f, 0x45, + 0x3f, 0x72, 0x20, 0xd8, 0xc6, 0x11, 0xb6, 0x49, 0x8a, 0x67, 0x1a, 0x14, 0x26, 0x6a, 0x30, 0xb2, + 0x17, 0xe5, 0x3e, 0x4b, 0xdc, 0x77, 0x02, 0xbf, 0x59, 0xfc, 0xe4, 0x65, 0x5b, 0x4b, 0x1e, 0xc8, + 0x87, 0x19, 0x79, 0x87, 0x52, 0x4c, 0x3f, 0x63, 0xfb, 0x29, 0xe4, 0xc3, 0x7d, 0x15, 0x72, 0x67, + 0xe7, 0x9a, 0x20, 0xf2, 0x9a, 0xcc, 0x9e, 0x10, 0x5a, 0x8e, 0x36, 0xd2, 0xc5, 0xeb, 0x11, 0x0d, + 0xb4, 0x01, 0x4b, 0x41, 0xd3, 0xc3, 0x54, 0x9e, 0x45, 0x46, 0x3d, 0xde, 0xb3, 0x20, 0x0d, 0x94, + 0x4c, 0xea, 0xd4, 0x96, 0xeb, 0x30, 0x88, 0x30, 0xce, 0xa9, 0x34, 0x58, 0x32, 0x91, 0x06, 0x3c, + 0x33, 0xd9, 0x12, 0x33, 0xd9, 0x1a, 0xc5, 0xbc, 0x6a, 0x6b, 0x97, 0xf3, 0xda, 0x4e, 0x3e, 0x97, + 0xc9, 0xcb, 0xf9, 0xdc, 0xce, 0xcd, 0x4c, 0xbe, 0xb0, 0xad, 0x69, 0x5a, 0x26, 0xaf, 0x33, 0x28, + 0xba, 0x0a, 0x71, 0xab, 0x46, 0xcf, 0xd5, 0x32, 0x53, 0xf1, 0xc1, 0xbf, 0x7f, 0xfe, 0x86, 0xbe, + 0xc4, 0x62, 0x25, 0x93, 0x4a, 0x4c, 0x1c, 0xc3, 0x23, 0x8f, 0xdd, 0x80, 0x22, 0x13, 0x23, 0x48, + 0xe8, 0x84, 0x4b, 0xa6, 0xb8, 0x03, 0xc9, 0x3e, 0xa1, 0xd0, 0x32, 0xc4, 0xaa, 0xb8, 0x19, 0x1e, + 0xbd, 0x4e, 0x1f, 0xd1, 0xbb, 0xb0, 0xd8, 0x30, 0xec, 0x7a, 0x74, 0xd0, 0x7a, 0xf8, 0xb2, 0xbb, + 0x70, 0x93, 0x2b, 0xc6, 0x41, 0x20, 0x6e, 0xdd, 0x2f, 0x63, 0x49, 0x06, 0xd4, 0xd3, 0xff, 0x01, + 0x0e, 0x0c, 0xd3, 0x08, 0x0c, 0x74, 0x65, 0xa8, 0x64, 0xbb, 0x45, 0xfa, 0x67, 0x0c, 0x56, 0xbe, + 0xf6, 0xcc, 0x21, 0xef, 0xcd, 0x57, 0xe1, 0xe8, 0x16, 0x24, 0xeb, 0x2c, 0x97, 0xb5, 0x2d, 0xc6, + 0x2a, 0x99, 0x15, 0x95, 0xb0, 0xb3, 0x29, 0x9d, 0xce, 0xa6, 0x7c, 0x46, 0x3b, 0xdb, 0x03, 0x83, + 0x54, 0x75, 0x08, 0xe1, 0xf4, 0xb9, 0x6b, 0xda, 0xd8, 0x6b, 0x99, 0x96, 0x9f, 0xd7, 0xb4, 0x8b, + 0x33, 0x4c, 0x3b, 0x22, 0xc2, 0xff, 0x63, 0x5a, 0x39, 0xb2, 0x9a, 0xc0, 0xac, 0xb6, 0x3a, 0xc3, + 0x66, 0xaf, 0x61, 0x09, 0x6a, 0x84, 0xde, 0x9e, 0x66, 0x1b, 0x61, 0x17, 0x56, 0xf6, 0xb0, 0x8d, + 0xff, 0x8d, 0x0f, 0xe8, 0xaf, 0x7a, 0xb9, 0xb3, 0x7f, 0xf5, 0x13, 0x07, 0xab, 0x9d, 0x16, 0xfb, + 0x45, 0x67, 0x18, 0x91, 0x0b, 0x7a, 0xef, 0xbf, 0x9f, 0x0e, 0xd2, 0xf7, 0x1c, 0x88, 0xe3, 0x78, + 0x45, 0x33, 0xe0, 0x53, 0x80, 0xee, 0xe8, 0xec, 0x0c, 0x82, 0x8d, 0x41, 0x3f, 0xf5, 0x46, 0x6b, + 0x37, 0x5f, 0xef, 0x4b, 0x9a, 0x77, 0x22, 0x64, 0x7f, 0x58, 0x82, 0x24, 0x65, 0xf1, 0x30, 0xbc, + 0x69, 0x20, 0x1f, 0x62, 0xf7, 0x70, 0x80, 0xae, 0x4d, 0x74, 0xef, 0xe0, 0x84, 0x16, 0xa7, 0xcf, + 0x27, 0xe9, 0xc3, 0x67, 0x7f, 0xfc, 0xf5, 0x7c, 0x61, 0x0d, 0xbd, 0x3f, 0x7c, 0x73, 0x20, 0xea, + 0x93, 0x48, 0xfb, 0xa7, 0xe8, 0x04, 0x78, 0x2a, 0x06, 0xba, 0x3e, 0x71, 0xad, 0xe1, 0xe1, 0x2e, + 0xa6, 0xe7, 0x81, 0x86, 0x6a, 0x4a, 0xab, 0x8c, 0xc3, 0x3b, 0x68, 0x65, 0x84, 0x03, 0x7a, 0xce, + 0x81, 0x10, 0xf6, 0x30, 0x94, 0x9e, 0x7f, 0xc8, 0x88, 0xb3, 0xcf, 0x42, 0xda, 0x3d, 0x7b, 0x91, + 0x16, 0xc7, 0x36, 0x48, 0x9e, 0xbe, 0x31, 0x4a, 0xef, 0x49, 0xa3, 0x94, 0x76, 0xb9, 0x34, 0xfa, + 0x99, 0x03, 0x21, 0x2c, 0xa8, 0x29, 0xac, 0x46, 0xba, 0xc8, 0x3c, 0xac, 0xf6, 0x42, 0x56, 0x63, + 0xaa, 0xb5, 0xc7, 0x6a, 0x3d, 0x3b, 0xed, 0xb0, 0x28, 0xbf, 0x5f, 0x39, 0x10, 0xc2, 0x2a, 0x9c, + 0xc2, 0x6f, 0xa4, 0xc4, 0xe7, 0xe1, 0xa7, 0x9f, 0xbd, 0x48, 0xdf, 0x18, 0x5b, 0xe2, 0x97, 0x87, + 0x5b, 0xfd, 0x7e, 0xcd, 0x0b, 0x9a, 0xa1, 0xbb, 0xd2, 0x53, 0xdd, 0xf5, 0x0b, 0x07, 0x97, 0xa8, + 0x29, 0x7a, 0x75, 0x86, 0xb2, 0x33, 0xdd, 0x33, 0xd2, 0x2c, 0xc4, 0xdc, 0x85, 0x72, 0x22, 0xeb, + 0x29, 0x8c, 0xe0, 0x16, 0xda, 0x9c, 0x42, 0xb0, 0x77, 0x4b, 0x26, 0xc5, 0xfd, 0x6f, 0xee, 0x56, + 0xac, 0xe0, 0x71, 0xfd, 0x88, 0xae, 0xaf, 0x86, 0x3f, 0x94, 0xc3, 0xdb, 0x72, 0xc5, 0x95, 0x2b, + 0xd8, 0x61, 0xdb, 0x57, 0x27, 0x5c, 0xc9, 0x6f, 0x45, 0x8f, 0x47, 0x02, 0x83, 0xe5, 0xfe, 0x09, + 0x00, 0x00, 0xff, 0xff, 0x64, 0xb4, 0x34, 0x53, 0x5d, 0x0c, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk_type.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk_type.pb.go new file mode 100644 index 000000000..3df948330 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk_type.pb.go @@ -0,0 +1,100 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/compute/v1/disk_type.proto + +package compute // import "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type DiskType struct { + // ID of the disk type. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Description of the disk type. 0-256 characters long. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // Array of availability zones where the disk type is available. + ZoneIds []string `protobuf:"bytes,3,rep,name=zone_ids,json=zoneIds,proto3" json:"zone_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DiskType) Reset() { *m = DiskType{} } +func (m *DiskType) String() string { return proto.CompactTextString(m) } +func (*DiskType) ProtoMessage() {} +func (*DiskType) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_type_5be272c1e4d4338f, []int{0} +} +func (m *DiskType) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DiskType.Unmarshal(m, b) +} +func (m *DiskType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DiskType.Marshal(b, m, deterministic) +} +func (dst *DiskType) XXX_Merge(src proto.Message) { + xxx_messageInfo_DiskType.Merge(dst, src) +} +func (m *DiskType) XXX_Size() int { + return xxx_messageInfo_DiskType.Size(m) +} +func (m *DiskType) XXX_DiscardUnknown() { + xxx_messageInfo_DiskType.DiscardUnknown(m) +} + +var xxx_messageInfo_DiskType proto.InternalMessageInfo + +func (m *DiskType) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *DiskType) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *DiskType) GetZoneIds() []string { + if m != nil { + return m.ZoneIds + } + return nil +} + +func init() { + proto.RegisterType((*DiskType)(nil), "yandex.cloud.compute.v1.DiskType") +} + +func init() { + proto.RegisterFile("yandex/cloud/compute/v1/disk_type.proto", fileDescriptor_disk_type_5be272c1e4d4338f) +} + +var fileDescriptor_disk_type_5be272c1e4d4338f = []byte{ + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xaf, 0x4c, 0xcc, 0x4b, + 0x49, 0xad, 0xd0, 0x4f, 0xce, 0xc9, 0x2f, 0x4d, 0xd1, 0x4f, 0xce, 0xcf, 0x2d, 0x28, 0x2d, 0x49, + 0xd5, 0x2f, 0x33, 0xd4, 0x4f, 0xc9, 0x2c, 0xce, 0x8e, 0x2f, 0xa9, 0x2c, 0x48, 0xd5, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x87, 0x28, 0xd4, 0x03, 0x2b, 0xd4, 0x83, 0x2a, 0xd4, 0x2b, 0x33, + 0x54, 0x0a, 0xe7, 0xe2, 0x70, 0xc9, 0x2c, 0xce, 0x0e, 0xa9, 0x2c, 0x48, 0x15, 0xe2, 0xe3, 0x62, + 0xca, 0x4c, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x62, 0xca, 0x4c, 0x11, 0x52, 0xe0, 0xe2, + 0x4e, 0x49, 0x2d, 0x4e, 0x2e, 0xca, 0x2c, 0x28, 0xc9, 0xcc, 0xcf, 0x93, 0x60, 0x02, 0x4b, 0x20, + 0x0b, 0x09, 0x49, 0x72, 0x71, 0x54, 0xe5, 0xe7, 0xa5, 0xc6, 0x67, 0xa6, 0x14, 0x4b, 0x30, 0x2b, + 0x30, 0x6b, 0x70, 0x06, 0xb1, 0x83, 0xf8, 0x9e, 0x29, 0xc5, 0x4e, 0xae, 0x51, 0xce, 0xe9, 0x99, + 0x25, 0x19, 0xa5, 0x49, 0x20, 0xdb, 0xf4, 0x21, 0xd6, 0xeb, 0x42, 0xdc, 0x99, 0x9e, 0xaf, 0x9b, + 0x9e, 0x9a, 0x07, 0x76, 0x98, 0x3e, 0x0e, 0x0f, 0x58, 0x43, 0x99, 0x49, 0x6c, 0x60, 0x65, 0xc6, + 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x42, 0x85, 0x4f, 0x91, 0xea, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk_type_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk_type_service.pb.go new file mode 100644 index 000000000..76faa5a84 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/disk_type_service.pb.go @@ -0,0 +1,325 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/compute/v1/disk_type_service.proto + +package compute // import "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetDiskTypeRequest struct { + // ID of the disk type to return information about. + // To get the disk type ID use a [DiskTypeService.List] request. + DiskTypeId string `protobuf:"bytes,1,opt,name=disk_type_id,json=diskTypeId,proto3" json:"disk_type_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDiskTypeRequest) Reset() { *m = GetDiskTypeRequest{} } +func (m *GetDiskTypeRequest) String() string { return proto.CompactTextString(m) } +func (*GetDiskTypeRequest) ProtoMessage() {} +func (*GetDiskTypeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_type_service_42656155dbce67c7, []int{0} +} +func (m *GetDiskTypeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDiskTypeRequest.Unmarshal(m, b) +} +func (m *GetDiskTypeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDiskTypeRequest.Marshal(b, m, deterministic) +} +func (dst *GetDiskTypeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDiskTypeRequest.Merge(dst, src) +} +func (m *GetDiskTypeRequest) XXX_Size() int { + return xxx_messageInfo_GetDiskTypeRequest.Size(m) +} +func (m *GetDiskTypeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetDiskTypeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDiskTypeRequest proto.InternalMessageInfo + +func (m *GetDiskTypeRequest) GetDiskTypeId() string { + if m != nil { + return m.DiskTypeId + } + return "" +} + +type ListDiskTypesRequest struct { + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListDiskTypesResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListDiskTypesResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListDiskTypesRequest) Reset() { *m = ListDiskTypesRequest{} } +func (m *ListDiskTypesRequest) String() string { return proto.CompactTextString(m) } +func (*ListDiskTypesRequest) ProtoMessage() {} +func (*ListDiskTypesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_type_service_42656155dbce67c7, []int{1} +} +func (m *ListDiskTypesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListDiskTypesRequest.Unmarshal(m, b) +} +func (m *ListDiskTypesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListDiskTypesRequest.Marshal(b, m, deterministic) +} +func (dst *ListDiskTypesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListDiskTypesRequest.Merge(dst, src) +} +func (m *ListDiskTypesRequest) XXX_Size() int { + return xxx_messageInfo_ListDiskTypesRequest.Size(m) +} +func (m *ListDiskTypesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListDiskTypesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListDiskTypesRequest proto.InternalMessageInfo + +func (m *ListDiskTypesRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListDiskTypesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListDiskTypesResponse struct { + // List of disk types. + DiskTypes []*DiskType `protobuf:"bytes,1,rep,name=disk_types,json=diskTypes,proto3" json:"disk_types,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListDiskTypesRequest.page_size], use + // the [next_page_token] as the value + // for the [ListDiskTypesRequest.page_token] query parameter + // in the next list request. Each subsequent list request will have its own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListDiskTypesResponse) Reset() { *m = ListDiskTypesResponse{} } +func (m *ListDiskTypesResponse) String() string { return proto.CompactTextString(m) } +func (*ListDiskTypesResponse) ProtoMessage() {} +func (*ListDiskTypesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_disk_type_service_42656155dbce67c7, []int{2} +} +func (m *ListDiskTypesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListDiskTypesResponse.Unmarshal(m, b) +} +func (m *ListDiskTypesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListDiskTypesResponse.Marshal(b, m, deterministic) +} +func (dst *ListDiskTypesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListDiskTypesResponse.Merge(dst, src) +} +func (m *ListDiskTypesResponse) XXX_Size() int { + return xxx_messageInfo_ListDiskTypesResponse.Size(m) +} +func (m *ListDiskTypesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListDiskTypesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListDiskTypesResponse proto.InternalMessageInfo + +func (m *ListDiskTypesResponse) GetDiskTypes() []*DiskType { + if m != nil { + return m.DiskTypes + } + return nil +} + +func (m *ListDiskTypesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetDiskTypeRequest)(nil), "yandex.cloud.compute.v1.GetDiskTypeRequest") + proto.RegisterType((*ListDiskTypesRequest)(nil), "yandex.cloud.compute.v1.ListDiskTypesRequest") + proto.RegisterType((*ListDiskTypesResponse)(nil), "yandex.cloud.compute.v1.ListDiskTypesResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// DiskTypeServiceClient is the client API for DiskTypeService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type DiskTypeServiceClient interface { + // Returns the information about specified disk type. + // + // To get the list of available disk types, make a [List] request. + Get(ctx context.Context, in *GetDiskTypeRequest, opts ...grpc.CallOption) (*DiskType, error) + // Retrieves the list of disk types for the specified folder. + List(ctx context.Context, in *ListDiskTypesRequest, opts ...grpc.CallOption) (*ListDiskTypesResponse, error) +} + +type diskTypeServiceClient struct { + cc *grpc.ClientConn +} + +func NewDiskTypeServiceClient(cc *grpc.ClientConn) DiskTypeServiceClient { + return &diskTypeServiceClient{cc} +} + +func (c *diskTypeServiceClient) Get(ctx context.Context, in *GetDiskTypeRequest, opts ...grpc.CallOption) (*DiskType, error) { + out := new(DiskType) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.DiskTypeService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *diskTypeServiceClient) List(ctx context.Context, in *ListDiskTypesRequest, opts ...grpc.CallOption) (*ListDiskTypesResponse, error) { + out := new(ListDiskTypesResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.DiskTypeService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DiskTypeServiceServer is the server API for DiskTypeService service. +type DiskTypeServiceServer interface { + // Returns the information about specified disk type. + // + // To get the list of available disk types, make a [List] request. + Get(context.Context, *GetDiskTypeRequest) (*DiskType, error) + // Retrieves the list of disk types for the specified folder. + List(context.Context, *ListDiskTypesRequest) (*ListDiskTypesResponse, error) +} + +func RegisterDiskTypeServiceServer(s *grpc.Server, srv DiskTypeServiceServer) { + s.RegisterService(&_DiskTypeService_serviceDesc, srv) +} + +func _DiskTypeService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDiskTypeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DiskTypeServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.DiskTypeService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DiskTypeServiceServer).Get(ctx, req.(*GetDiskTypeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DiskTypeService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDiskTypesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DiskTypeServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.DiskTypeService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DiskTypeServiceServer).List(ctx, req.(*ListDiskTypesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DiskTypeService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.compute.v1.DiskTypeService", + HandlerType: (*DiskTypeServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _DiskTypeService_Get_Handler, + }, + { + MethodName: "List", + Handler: _DiskTypeService_List_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/compute/v1/disk_type_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/compute/v1/disk_type_service.proto", fileDescriptor_disk_type_service_42656155dbce67c7) +} + +var fileDescriptor_disk_type_service_42656155dbce67c7 = []byte{ + // 427 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xcf, 0xaa, 0xd3, 0x40, + 0x14, 0xc6, 0x49, 0x7b, 0xbd, 0x98, 0xa3, 0x72, 0x61, 0xf0, 0x72, 0x4b, 0xf0, 0xc2, 0x35, 0x48, + 0x5b, 0xd0, 0x66, 0x9a, 0xba, 0xb4, 0x82, 0x54, 0xa5, 0x08, 0x2e, 0x24, 0xed, 0xca, 0x4d, 0x48, + 0x9b, 0x43, 0x1c, 0x5a, 0x67, 0x62, 0x67, 0x12, 0xda, 0x8a, 0x0b, 0xff, 0xac, 0xdc, 0xba, 0xf7, + 0x75, 0xea, 0xde, 0x57, 0x70, 0xe1, 0x33, 0xb8, 0x92, 0x4c, 0x92, 0xaa, 0xb5, 0xa1, 0x77, 0x17, + 0x72, 0xbe, 0xef, 0x9c, 0xdf, 0x9c, 0x6f, 0x06, 0xe8, 0x2a, 0xe0, 0x21, 0x2e, 0xe9, 0x74, 0x2e, + 0x92, 0x90, 0x4e, 0xc5, 0xeb, 0x38, 0x51, 0x48, 0x53, 0x97, 0x86, 0x4c, 0xce, 0x7c, 0xb5, 0x8a, + 0xd1, 0x97, 0xb8, 0x48, 0xd9, 0x14, 0x9d, 0x78, 0x21, 0x94, 0x20, 0x67, 0xb9, 0xc1, 0xd1, 0x06, + 0xa7, 0x30, 0x38, 0xa9, 0x6b, 0xdd, 0x8a, 0x84, 0x88, 0xe6, 0x48, 0x83, 0x98, 0xd1, 0x80, 0x73, + 0xa1, 0x02, 0xc5, 0x04, 0x97, 0xb9, 0xcd, 0x6a, 0x1d, 0x9c, 0x53, 0x08, 0xcf, 0xff, 0x11, 0xa6, + 0xc1, 0x9c, 0x85, 0xba, 0x51, 0x5e, 0xb6, 0xfb, 0x40, 0x86, 0xa8, 0x9e, 0x30, 0x39, 0x1b, 0xaf, + 0x62, 0xf4, 0xf0, 0x4d, 0x82, 0x52, 0x91, 0x26, 0x5c, 0xff, 0xc3, 0xcb, 0xc2, 0x86, 0x71, 0x61, + 0xb4, 0xcd, 0xc1, 0xd1, 0xcf, 0x8d, 0x6b, 0x78, 0x10, 0x16, 0xe2, 0x67, 0xa1, 0xcd, 0xe0, 0xe6, + 0x73, 0x26, 0xb7, 0x76, 0x59, 0xfa, 0x5b, 0x60, 0xc6, 0x41, 0x84, 0xbe, 0x64, 0x6b, 0xd4, 0xe6, + 0xfa, 0x00, 0x7e, 0x6d, 0xdc, 0xe3, 0xfe, 0x43, 0xb7, 0xdb, 0xed, 0x7a, 0x57, 0xb3, 0xe2, 0x88, + 0xad, 0x91, 0xb4, 0x01, 0xb4, 0x50, 0x89, 0x19, 0xf2, 0x46, 0x4d, 0x8f, 0x31, 0x3f, 0x7f, 0x73, + 0xaf, 0x68, 0xa5, 0xa7, 0xbb, 0x8c, 0xb3, 0x9a, 0xfd, 0xde, 0x80, 0xd3, 0x9d, 0x59, 0x32, 0x16, + 0x5c, 0x22, 0x79, 0x04, 0xb0, 0x85, 0x95, 0x0d, 0xe3, 0xa2, 0xde, 0xbe, 0xd6, 0xbb, 0xed, 0x54, + 0xac, 0xd5, 0xd9, 0x1e, 0xd5, 0x2c, 0xcf, 0x21, 0x49, 0x13, 0x4e, 0x38, 0x2e, 0x95, 0xbf, 0x8b, + 0xe2, 0xdd, 0xc8, 0x7e, 0xbf, 0x28, 0x19, 0x7a, 0x5f, 0x6b, 0x70, 0x52, 0xfa, 0x47, 0x79, 0x8a, + 0xe4, 0xa3, 0x01, 0xf5, 0x21, 0x2a, 0x72, 0xb7, 0x72, 0xe2, 0xff, 0xfb, 0xb5, 0x0e, 0xe3, 0xd9, + 0xf7, 0x3e, 0x7c, 0xff, 0xf1, 0xa5, 0xd6, 0x24, 0x77, 0x76, 0xc3, 0xd5, 0xc8, 0xf4, 0xed, 0xdf, + 0xf9, 0xbc, 0x23, 0x9f, 0x0c, 0x38, 0xca, 0xb6, 0x43, 0x3a, 0x95, 0x9d, 0xf7, 0x05, 0x65, 0x39, + 0x97, 0x95, 0xe7, 0xbb, 0xb6, 0xcf, 0x35, 0xd5, 0x19, 0x39, 0xdd, 0x4b, 0x35, 0x78, 0xfa, 0xf2, + 0x71, 0xc4, 0xd4, 0xab, 0x64, 0x92, 0x75, 0x2a, 0x9e, 0x42, 0x27, 0xbf, 0x79, 0x91, 0xe8, 0x44, + 0xc8, 0xf5, 0xa5, 0xab, 0x7a, 0x23, 0x0f, 0x8a, 0xcf, 0xc9, 0xb1, 0x96, 0xdd, 0xff, 0x1d, 0x00, + 0x00, 0xff, 0xff, 0xf1, 0x82, 0x37, 0x5b, 0x4d, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/image.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/image.pb.go new file mode 100644 index 000000000..a58884659 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/image.pb.go @@ -0,0 +1,324 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/compute/v1/image.proto + +package compute // import "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Image_Status int32 + +const ( + Image_STATUS_UNSPECIFIED Image_Status = 0 + // Image is being created. + Image_CREATING Image_Status = 1 + // Image is ready to use. + Image_READY Image_Status = 2 + // Image encountered a problem and cannot operate. + Image_ERROR Image_Status = 3 + // Image is being deleted. + Image_DELETING Image_Status = 4 +) + +var Image_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "CREATING", + 2: "READY", + 3: "ERROR", + 4: "DELETING", +} +var Image_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "CREATING": 1, + "READY": 2, + "ERROR": 3, + "DELETING": 4, +} + +func (x Image_Status) String() string { + return proto.EnumName(Image_Status_name, int32(x)) +} +func (Image_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_image_b90d09089d7c657a, []int{0, 0} +} + +type Os_Type int32 + +const ( + Os_TYPE_UNSPECIFIED Os_Type = 0 + // Linux operating system. + Os_LINUX Os_Type = 1 + // Windows operating system. + Os_WINDOWS Os_Type = 2 +) + +var Os_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "LINUX", + 2: "WINDOWS", +} +var Os_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "LINUX": 1, + "WINDOWS": 2, +} + +func (x Os_Type) String() string { + return proto.EnumName(Os_Type_name, int32(x)) +} +func (Os_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_image_b90d09089d7c657a, []int{1, 0} +} + +// An Image resource. +type Image struct { + // ID of the image. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the image belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Name of the image. 1-63 characters long. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Description of the image. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. Maximum of 64 per resource. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The name of the image family to which this image belongs. + // + // You can get the most recent image from a family by using + // the [yandex.cloud.compute.v1.ImageService.GetLatestByFamily] request + // and create the disk from this image. + Family string `protobuf:"bytes,7,opt,name=family,proto3" json:"family,omitempty"` + // The size of the image, specified in bytes. + StorageSize int64 `protobuf:"varint,8,opt,name=storage_size,json=storageSize,proto3" json:"storage_size,omitempty"` + // Minimum size of the disk which will be created from this image. + MinDiskSize int64 `protobuf:"varint,9,opt,name=min_disk_size,json=minDiskSize,proto3" json:"min_disk_size,omitempty"` + // License IDs that indicate which licenses are attached to this resource. + // License IDs are used to calculate additional charges for the use of the virtual machine. + // + // The correct license ID is generated by Yandex.Cloud. IDs are inherited by new resources created from this resource. + // + // If you know the license IDs, specify them when you create the image. + // For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. + // You can specify them in the [yandex.cloud.compute.v1.ImageService.Create] request. + ProductIds []string `protobuf:"bytes,10,rep,name=product_ids,json=productIds,proto3" json:"product_ids,omitempty"` + // Current status of the image. + Status Image_Status `protobuf:"varint,11,opt,name=status,proto3,enum=yandex.cloud.compute.v1.Image_Status" json:"status,omitempty"` + // Operating system that is contained in the image. + Os *Os `protobuf:"bytes,12,opt,name=os,proto3" json:"os,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Image) Reset() { *m = Image{} } +func (m *Image) String() string { return proto.CompactTextString(m) } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { + return fileDescriptor_image_b90d09089d7c657a, []int{0} +} +func (m *Image) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Image.Unmarshal(m, b) +} +func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Image.Marshal(b, m, deterministic) +} +func (dst *Image) XXX_Merge(src proto.Message) { + xxx_messageInfo_Image.Merge(dst, src) +} +func (m *Image) XXX_Size() int { + return xxx_messageInfo_Image.Size(m) +} +func (m *Image) XXX_DiscardUnknown() { + xxx_messageInfo_Image.DiscardUnknown(m) +} + +var xxx_messageInfo_Image proto.InternalMessageInfo + +func (m *Image) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Image) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *Image) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Image) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Image) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Image) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Image) GetFamily() string { + if m != nil { + return m.Family + } + return "" +} + +func (m *Image) GetStorageSize() int64 { + if m != nil { + return m.StorageSize + } + return 0 +} + +func (m *Image) GetMinDiskSize() int64 { + if m != nil { + return m.MinDiskSize + } + return 0 +} + +func (m *Image) GetProductIds() []string { + if m != nil { + return m.ProductIds + } + return nil +} + +func (m *Image) GetStatus() Image_Status { + if m != nil { + return m.Status + } + return Image_STATUS_UNSPECIFIED +} + +func (m *Image) GetOs() *Os { + if m != nil { + return m.Os + } + return nil +} + +type Os struct { + // Operating system type. + Type Os_Type `protobuf:"varint,1,opt,name=type,proto3,enum=yandex.cloud.compute.v1.Os_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Os) Reset() { *m = Os{} } +func (m *Os) String() string { return proto.CompactTextString(m) } +func (*Os) ProtoMessage() {} +func (*Os) Descriptor() ([]byte, []int) { + return fileDescriptor_image_b90d09089d7c657a, []int{1} +} +func (m *Os) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Os.Unmarshal(m, b) +} +func (m *Os) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Os.Marshal(b, m, deterministic) +} +func (dst *Os) XXX_Merge(src proto.Message) { + xxx_messageInfo_Os.Merge(dst, src) +} +func (m *Os) XXX_Size() int { + return xxx_messageInfo_Os.Size(m) +} +func (m *Os) XXX_DiscardUnknown() { + xxx_messageInfo_Os.DiscardUnknown(m) +} + +var xxx_messageInfo_Os proto.InternalMessageInfo + +func (m *Os) GetType() Os_Type { + if m != nil { + return m.Type + } + return Os_TYPE_UNSPECIFIED +} + +func init() { + proto.RegisterType((*Image)(nil), "yandex.cloud.compute.v1.Image") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.Image.LabelsEntry") + proto.RegisterType((*Os)(nil), "yandex.cloud.compute.v1.Os") + proto.RegisterEnum("yandex.cloud.compute.v1.Image_Status", Image_Status_name, Image_Status_value) + proto.RegisterEnum("yandex.cloud.compute.v1.Os_Type", Os_Type_name, Os_Type_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/compute/v1/image.proto", fileDescriptor_image_b90d09089d7c657a) +} + +var fileDescriptor_image_b90d09089d7c657a = []byte{ + // 555 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xdf, 0x6b, 0x9c, 0x40, + 0x10, 0xc7, 0xab, 0xf7, 0x23, 0x71, 0x4c, 0x83, 0x2c, 0x21, 0x95, 0xe4, 0x21, 0xf6, 0x4a, 0xe1, + 0x68, 0x89, 0x92, 0x6b, 0x1e, 0x9a, 0x96, 0x3e, 0x5c, 0x72, 0xb6, 0x08, 0xe1, 0x2e, 0xec, 0x19, + 0xd2, 0xf4, 0x45, 0xbc, 0xdb, 0x8d, 0x5d, 0xa2, 0xae, 0xb8, 0x6b, 0xa8, 0xf9, 0x7b, 0xfb, 0x87, + 0x14, 0x57, 0x03, 0xa1, 0x70, 0xed, 0xdb, 0xcc, 0xd7, 0xcf, 0xcc, 0x77, 0x67, 0xd7, 0x81, 0x37, + 0x75, 0x9c, 0x13, 0xfa, 0xcb, 0x5b, 0xa7, 0xbc, 0x22, 0xde, 0x9a, 0x67, 0x45, 0x25, 0xa9, 0xf7, + 0x70, 0xe2, 0xb1, 0x2c, 0x4e, 0xa8, 0x5b, 0x94, 0x5c, 0x72, 0xf4, 0xaa, 0x85, 0x5c, 0x05, 0xb9, + 0x1d, 0xe4, 0x3e, 0x9c, 0x1c, 0x1c, 0x25, 0x9c, 0x27, 0x29, 0xf5, 0x14, 0xb6, 0xaa, 0xee, 0x3c, + 0xc9, 0x32, 0x2a, 0x64, 0x9c, 0x15, 0x6d, 0xe5, 0xe8, 0x77, 0x1f, 0x06, 0x41, 0xd3, 0x09, 0xed, + 0x82, 0xce, 0x88, 0xad, 0x39, 0xda, 0xd8, 0xc0, 0x3a, 0x23, 0xe8, 0x10, 0x8c, 0x3b, 0x9e, 0x12, + 0x5a, 0x46, 0x8c, 0xd8, 0xba, 0x92, 0xb7, 0x5b, 0x21, 0x20, 0xe8, 0x0c, 0x60, 0x5d, 0xd2, 0x58, + 0x52, 0x12, 0xc5, 0xd2, 0xee, 0x39, 0xda, 0xd8, 0x9c, 0x1c, 0xb8, 0xad, 0x99, 0xfb, 0x64, 0xe6, + 0x86, 0x4f, 0x66, 0xd8, 0xe8, 0xe8, 0xa9, 0x44, 0x08, 0xfa, 0x79, 0x9c, 0x51, 0xbb, 0xaf, 0x5a, + 0xaa, 0x18, 0x39, 0x60, 0x12, 0x2a, 0xd6, 0x25, 0x2b, 0x24, 0xe3, 0xb9, 0x3d, 0x50, 0x9f, 0x9e, + 0x4b, 0xe8, 0x1c, 0x86, 0x69, 0xbc, 0xa2, 0xa9, 0xb0, 0x87, 0x4e, 0x6f, 0x6c, 0x4e, 0xde, 0xb9, + 0x1b, 0x46, 0x76, 0xd5, 0x34, 0xee, 0xa5, 0x82, 0xfd, 0x5c, 0x96, 0x35, 0xee, 0x2a, 0xd1, 0x3e, + 0x0c, 0xef, 0xe2, 0x8c, 0xa5, 0xb5, 0xbd, 0xa5, 0x0c, 0xba, 0x0c, 0xbd, 0x86, 0x1d, 0x21, 0x79, + 0x19, 0x27, 0x34, 0x12, 0xec, 0x91, 0xda, 0xdb, 0x8e, 0x36, 0xee, 0x61, 0xb3, 0xd3, 0x96, 0xec, + 0x91, 0xa2, 0x11, 0xbc, 0xcc, 0x58, 0x1e, 0x11, 0x26, 0xee, 0x5b, 0xc6, 0x68, 0x99, 0x8c, 0xe5, + 0x33, 0x26, 0xee, 0x15, 0x73, 0x04, 0x66, 0x51, 0x72, 0x52, 0xad, 0x65, 0xc4, 0x88, 0xb0, 0xc1, + 0xe9, 0x8d, 0x0d, 0x0c, 0x9d, 0x14, 0x10, 0x81, 0xbe, 0xc0, 0x50, 0xc8, 0x58, 0x56, 0xc2, 0x36, + 0x1d, 0x6d, 0xbc, 0x3b, 0x79, 0xfb, 0x9f, 0x19, 0x96, 0x0a, 0xc6, 0x5d, 0x11, 0x7a, 0x0f, 0x3a, + 0x17, 0xf6, 0x8e, 0xba, 0xeb, 0xc3, 0x8d, 0xa5, 0x0b, 0x81, 0x75, 0x2e, 0x0e, 0xce, 0xc0, 0x7c, + 0x76, 0x05, 0xc8, 0x82, 0xde, 0x3d, 0xad, 0xbb, 0xd7, 0x6d, 0x42, 0xb4, 0x07, 0x83, 0x87, 0x38, + 0xad, 0x68, 0xf7, 0xb4, 0x6d, 0xf2, 0x49, 0xff, 0xa8, 0x8d, 0x30, 0x0c, 0x5b, 0x67, 0xb4, 0x0f, + 0x68, 0x19, 0x4e, 0xc3, 0xeb, 0x65, 0x74, 0x3d, 0x5f, 0x5e, 0xf9, 0x17, 0xc1, 0xd7, 0xc0, 0x9f, + 0x59, 0x2f, 0xd0, 0x0e, 0x6c, 0x5f, 0x60, 0x7f, 0x1a, 0x06, 0xf3, 0x6f, 0x96, 0x86, 0x0c, 0x18, + 0x60, 0x7f, 0x3a, 0xbb, 0xb5, 0xf4, 0x26, 0xf4, 0x31, 0x5e, 0x60, 0xab, 0xd7, 0x30, 0x33, 0xff, + 0xd2, 0x57, 0x4c, 0x7f, 0x54, 0x80, 0xbe, 0x10, 0xe8, 0x14, 0xfa, 0xb2, 0x2e, 0xa8, 0x3a, 0xc6, + 0xee, 0xc4, 0xf9, 0xc7, 0x0c, 0x6e, 0x58, 0x17, 0x14, 0x2b, 0x7a, 0x74, 0x0a, 0xfd, 0x26, 0x43, + 0x7b, 0x60, 0x85, 0xb7, 0x57, 0xfe, 0x5f, 0x67, 0x31, 0x60, 0x70, 0x19, 0xcc, 0xaf, 0xbf, 0x5b, + 0x1a, 0x32, 0x61, 0xeb, 0x26, 0x98, 0xcf, 0x16, 0x37, 0x4b, 0x4b, 0x3f, 0xf7, 0x7f, 0x5c, 0x24, + 0x4c, 0xfe, 0xac, 0x56, 0x4d, 0x63, 0xaf, 0x75, 0x3a, 0x6e, 0x97, 0x28, 0xe1, 0xc7, 0x09, 0xcd, + 0xd5, 0x5f, 0xea, 0x6d, 0xd8, 0xae, 0xcf, 0x5d, 0xb8, 0x1a, 0x2a, 0xec, 0xc3, 0x9f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xee, 0xdc, 0xf1, 0x6f, 0x87, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/image_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/image_service.pb.go new file mode 100644 index 000000000..d9a83d748 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/image_service.pb.go @@ -0,0 +1,1278 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/compute/v1/image_service.proto + +package compute // import "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetImageRequest struct { + // ID of the Image resource to return. + // To get the image ID, use a [ImageService.List] request. + ImageId string `protobuf:"bytes,1,opt,name=image_id,json=imageId,proto3" json:"image_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetImageRequest) Reset() { *m = GetImageRequest{} } +func (m *GetImageRequest) String() string { return proto.CompactTextString(m) } +func (*GetImageRequest) ProtoMessage() {} +func (*GetImageRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_image_service_6afec801ba593e67, []int{0} +} +func (m *GetImageRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetImageRequest.Unmarshal(m, b) +} +func (m *GetImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetImageRequest.Marshal(b, m, deterministic) +} +func (dst *GetImageRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetImageRequest.Merge(dst, src) +} +func (m *GetImageRequest) XXX_Size() int { + return xxx_messageInfo_GetImageRequest.Size(m) +} +func (m *GetImageRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetImageRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetImageRequest proto.InternalMessageInfo + +func (m *GetImageRequest) GetImageId() string { + if m != nil { + return m.ImageId + } + return "" +} + +type GetImageLatestByFamilyRequest struct { + // ID of the folder to get the image from. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Name of the image family to search for. + Family string `protobuf:"bytes,2,opt,name=family,proto3" json:"family,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetImageLatestByFamilyRequest) Reset() { *m = GetImageLatestByFamilyRequest{} } +func (m *GetImageLatestByFamilyRequest) String() string { return proto.CompactTextString(m) } +func (*GetImageLatestByFamilyRequest) ProtoMessage() {} +func (*GetImageLatestByFamilyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_image_service_6afec801ba593e67, []int{1} +} +func (m *GetImageLatestByFamilyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetImageLatestByFamilyRequest.Unmarshal(m, b) +} +func (m *GetImageLatestByFamilyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetImageLatestByFamilyRequest.Marshal(b, m, deterministic) +} +func (dst *GetImageLatestByFamilyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetImageLatestByFamilyRequest.Merge(dst, src) +} +func (m *GetImageLatestByFamilyRequest) XXX_Size() int { + return xxx_messageInfo_GetImageLatestByFamilyRequest.Size(m) +} +func (m *GetImageLatestByFamilyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetImageLatestByFamilyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetImageLatestByFamilyRequest proto.InternalMessageInfo + +func (m *GetImageLatestByFamilyRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *GetImageLatestByFamilyRequest) GetFamily() string { + if m != nil { + return m.Family + } + return "" +} + +type ListImagesRequest struct { + // ID of the folder to list images in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListImagesResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListImagesResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters resources listed in the response. + // The expression must specify: + // 1. The field name. Currently you can use filtering only on the [Image.name] field. + // 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + // 3. The value. Must be 3-63 characters long and match the regular expression `^[a-z]([-a-z0-9]{,61}[a-z0-9])?$`. + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListImagesRequest) Reset() { *m = ListImagesRequest{} } +func (m *ListImagesRequest) String() string { return proto.CompactTextString(m) } +func (*ListImagesRequest) ProtoMessage() {} +func (*ListImagesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_image_service_6afec801ba593e67, []int{2} +} +func (m *ListImagesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListImagesRequest.Unmarshal(m, b) +} +func (m *ListImagesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListImagesRequest.Marshal(b, m, deterministic) +} +func (dst *ListImagesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListImagesRequest.Merge(dst, src) +} +func (m *ListImagesRequest) XXX_Size() int { + return xxx_messageInfo_ListImagesRequest.Size(m) +} +func (m *ListImagesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListImagesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListImagesRequest proto.InternalMessageInfo + +func (m *ListImagesRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListImagesRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListImagesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListImagesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListImagesResponse struct { + // List of images. + Images []*Image `protobuf:"bytes,1,rep,name=images,proto3" json:"images,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListSnapshotsRequest.page_size], use + // the [next_page_token] as the value + // for the [ListSnapshotsRequest.page_token] query parameter + // in the next list request. Each subsequent list request will have its own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListImagesResponse) Reset() { *m = ListImagesResponse{} } +func (m *ListImagesResponse) String() string { return proto.CompactTextString(m) } +func (*ListImagesResponse) ProtoMessage() {} +func (*ListImagesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_image_service_6afec801ba593e67, []int{3} +} +func (m *ListImagesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListImagesResponse.Unmarshal(m, b) +} +func (m *ListImagesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListImagesResponse.Marshal(b, m, deterministic) +} +func (dst *ListImagesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListImagesResponse.Merge(dst, src) +} +func (m *ListImagesResponse) XXX_Size() int { + return xxx_messageInfo_ListImagesResponse.Size(m) +} +func (m *ListImagesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListImagesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListImagesResponse proto.InternalMessageInfo + +func (m *ListImagesResponse) GetImages() []*Image { + if m != nil { + return m.Images + } + return nil +} + +func (m *ListImagesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateImageRequest struct { + // ID of the folder to create an image in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Name of the image. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the image. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The name of the image family to which this image belongs. + // To get information about the most recent image from a family, use a [ImageService.GetLatestByFamily] request. + Family string `protobuf:"bytes,5,opt,name=family,proto3" json:"family,omitempty"` + // Minimum size of the disk that will be created from this image. + // Specified in bytes. Should be more than the volume of source data. + MinDiskSize int64 `protobuf:"varint,6,opt,name=min_disk_size,json=minDiskSize,proto3" json:"min_disk_size,omitempty"` + // License IDs that indicate which licenses are attached to this resource. + // License IDs are used to calculate additional charges for the use of the virtual machine. + // + // The correct license ID is generated by Yandex.Cloud. IDs are inherited by new resources created from this resource. + // + // If you know the license IDs, specify them when you create the image. + // For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. + // You can specify them in this request. + ProductIds []string `protobuf:"bytes,7,rep,name=product_ids,json=productIds,proto3" json:"product_ids,omitempty"` + // Types that are valid to be assigned to Source: + // *CreateImageRequest_ImageId + // *CreateImageRequest_DiskId + // *CreateImageRequest_SnapshotId + // *CreateImageRequest_Uri + Source isCreateImageRequest_Source `protobuf_oneof:"source"` + // Operating system that is contained in the image. + Os *Os `protobuf:"bytes,12,opt,name=os,proto3" json:"os,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateImageRequest) Reset() { *m = CreateImageRequest{} } +func (m *CreateImageRequest) String() string { return proto.CompactTextString(m) } +func (*CreateImageRequest) ProtoMessage() {} +func (*CreateImageRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_image_service_6afec801ba593e67, []int{4} +} +func (m *CreateImageRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateImageRequest.Unmarshal(m, b) +} +func (m *CreateImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateImageRequest.Marshal(b, m, deterministic) +} +func (dst *CreateImageRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateImageRequest.Merge(dst, src) +} +func (m *CreateImageRequest) XXX_Size() int { + return xxx_messageInfo_CreateImageRequest.Size(m) +} +func (m *CreateImageRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateImageRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateImageRequest proto.InternalMessageInfo + +func (m *CreateImageRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *CreateImageRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateImageRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *CreateImageRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *CreateImageRequest) GetFamily() string { + if m != nil { + return m.Family + } + return "" +} + +func (m *CreateImageRequest) GetMinDiskSize() int64 { + if m != nil { + return m.MinDiskSize + } + return 0 +} + +func (m *CreateImageRequest) GetProductIds() []string { + if m != nil { + return m.ProductIds + } + return nil +} + +type isCreateImageRequest_Source interface { + isCreateImageRequest_Source() +} + +type CreateImageRequest_ImageId struct { + ImageId string `protobuf:"bytes,8,opt,name=image_id,json=imageId,proto3,oneof"` +} + +type CreateImageRequest_DiskId struct { + DiskId string `protobuf:"bytes,9,opt,name=disk_id,json=diskId,proto3,oneof"` +} + +type CreateImageRequest_SnapshotId struct { + SnapshotId string `protobuf:"bytes,10,opt,name=snapshot_id,json=snapshotId,proto3,oneof"` +} + +type CreateImageRequest_Uri struct { + Uri string `protobuf:"bytes,11,opt,name=uri,proto3,oneof"` +} + +func (*CreateImageRequest_ImageId) isCreateImageRequest_Source() {} + +func (*CreateImageRequest_DiskId) isCreateImageRequest_Source() {} + +func (*CreateImageRequest_SnapshotId) isCreateImageRequest_Source() {} + +func (*CreateImageRequest_Uri) isCreateImageRequest_Source() {} + +func (m *CreateImageRequest) GetSource() isCreateImageRequest_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *CreateImageRequest) GetImageId() string { + if x, ok := m.GetSource().(*CreateImageRequest_ImageId); ok { + return x.ImageId + } + return "" +} + +func (m *CreateImageRequest) GetDiskId() string { + if x, ok := m.GetSource().(*CreateImageRequest_DiskId); ok { + return x.DiskId + } + return "" +} + +func (m *CreateImageRequest) GetSnapshotId() string { + if x, ok := m.GetSource().(*CreateImageRequest_SnapshotId); ok { + return x.SnapshotId + } + return "" +} + +func (m *CreateImageRequest) GetUri() string { + if x, ok := m.GetSource().(*CreateImageRequest_Uri); ok { + return x.Uri + } + return "" +} + +func (m *CreateImageRequest) GetOs() *Os { + if m != nil { + return m.Os + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CreateImageRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CreateImageRequest_OneofMarshaler, _CreateImageRequest_OneofUnmarshaler, _CreateImageRequest_OneofSizer, []interface{}{ + (*CreateImageRequest_ImageId)(nil), + (*CreateImageRequest_DiskId)(nil), + (*CreateImageRequest_SnapshotId)(nil), + (*CreateImageRequest_Uri)(nil), + } +} + +func _CreateImageRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CreateImageRequest) + // source + switch x := m.Source.(type) { + case *CreateImageRequest_ImageId: + b.EncodeVarint(8<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ImageId) + case *CreateImageRequest_DiskId: + b.EncodeVarint(9<<3 | proto.WireBytes) + b.EncodeStringBytes(x.DiskId) + case *CreateImageRequest_SnapshotId: + b.EncodeVarint(10<<3 | proto.WireBytes) + b.EncodeStringBytes(x.SnapshotId) + case *CreateImageRequest_Uri: + b.EncodeVarint(11<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Uri) + case nil: + default: + return fmt.Errorf("CreateImageRequest.Source has unexpected type %T", x) + } + return nil +} + +func _CreateImageRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CreateImageRequest) + switch tag { + case 8: // source.image_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &CreateImageRequest_ImageId{x} + return true, err + case 9: // source.disk_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &CreateImageRequest_DiskId{x} + return true, err + case 10: // source.snapshot_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &CreateImageRequest_SnapshotId{x} + return true, err + case 11: // source.uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &CreateImageRequest_Uri{x} + return true, err + default: + return false, nil + } +} + +func _CreateImageRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CreateImageRequest) + // source + switch x := m.Source.(type) { + case *CreateImageRequest_ImageId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.ImageId))) + n += len(x.ImageId) + case *CreateImageRequest_DiskId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.DiskId))) + n += len(x.DiskId) + case *CreateImageRequest_SnapshotId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.SnapshotId))) + n += len(x.SnapshotId) + case *CreateImageRequest_Uri: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Uri))) + n += len(x.Uri) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type CreateImageMetadata struct { + // ID of the image that is being created. + ImageId string `protobuf:"bytes,1,opt,name=image_id,json=imageId,proto3" json:"image_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateImageMetadata) Reset() { *m = CreateImageMetadata{} } +func (m *CreateImageMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateImageMetadata) ProtoMessage() {} +func (*CreateImageMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_image_service_6afec801ba593e67, []int{5} +} +func (m *CreateImageMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateImageMetadata.Unmarshal(m, b) +} +func (m *CreateImageMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateImageMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateImageMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateImageMetadata.Merge(dst, src) +} +func (m *CreateImageMetadata) XXX_Size() int { + return xxx_messageInfo_CreateImageMetadata.Size(m) +} +func (m *CreateImageMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateImageMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateImageMetadata proto.InternalMessageInfo + +func (m *CreateImageMetadata) GetImageId() string { + if m != nil { + return m.ImageId + } + return "" +} + +type UpdateImageRequest struct { + // ID of the Image resource to update. + // To get the image ID, use a [ImageService.List] request. + ImageId string `protobuf:"bytes,1,opt,name=image_id,json=imageId,proto3" json:"image_id,omitempty"` + // Field mask that specifies which fields of the Image resource are going to be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Name of the image. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Description of the image. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Minimum size of the disk that can be created from this image. + // Specified in bytes. Should be more than the volume of source data and more than the virtual disk size. + MinDiskSize int64 `protobuf:"varint,5,opt,name=min_disk_size,json=minDiskSize,proto3" json:"min_disk_size,omitempty"` + // Resource labels as `` key:value `` pairs. + // + // Existing set of `` labels `` is completely replaced by the provided set. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateImageRequest) Reset() { *m = UpdateImageRequest{} } +func (m *UpdateImageRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateImageRequest) ProtoMessage() {} +func (*UpdateImageRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_image_service_6afec801ba593e67, []int{6} +} +func (m *UpdateImageRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateImageRequest.Unmarshal(m, b) +} +func (m *UpdateImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateImageRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateImageRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateImageRequest.Merge(dst, src) +} +func (m *UpdateImageRequest) XXX_Size() int { + return xxx_messageInfo_UpdateImageRequest.Size(m) +} +func (m *UpdateImageRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateImageRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateImageRequest proto.InternalMessageInfo + +func (m *UpdateImageRequest) GetImageId() string { + if m != nil { + return m.ImageId + } + return "" +} + +func (m *UpdateImageRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateImageRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateImageRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *UpdateImageRequest) GetMinDiskSize() int64 { + if m != nil { + return m.MinDiskSize + } + return 0 +} + +func (m *UpdateImageRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +type UpdateImageMetadata struct { + // ID of the Image resource that is being updated. + ImageId string `protobuf:"bytes,1,opt,name=image_id,json=imageId,proto3" json:"image_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateImageMetadata) Reset() { *m = UpdateImageMetadata{} } +func (m *UpdateImageMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateImageMetadata) ProtoMessage() {} +func (*UpdateImageMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_image_service_6afec801ba593e67, []int{7} +} +func (m *UpdateImageMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateImageMetadata.Unmarshal(m, b) +} +func (m *UpdateImageMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateImageMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateImageMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateImageMetadata.Merge(dst, src) +} +func (m *UpdateImageMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateImageMetadata.Size(m) +} +func (m *UpdateImageMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateImageMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateImageMetadata proto.InternalMessageInfo + +func (m *UpdateImageMetadata) GetImageId() string { + if m != nil { + return m.ImageId + } + return "" +} + +type DeleteImageRequest struct { + // ID of the image to delete. + // To get the image ID, use a [ImageService.List] request. + ImageId string `protobuf:"bytes,1,opt,name=image_id,json=imageId,proto3" json:"image_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteImageRequest) Reset() { *m = DeleteImageRequest{} } +func (m *DeleteImageRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteImageRequest) ProtoMessage() {} +func (*DeleteImageRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_image_service_6afec801ba593e67, []int{8} +} +func (m *DeleteImageRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteImageRequest.Unmarshal(m, b) +} +func (m *DeleteImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteImageRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteImageRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteImageRequest.Merge(dst, src) +} +func (m *DeleteImageRequest) XXX_Size() int { + return xxx_messageInfo_DeleteImageRequest.Size(m) +} +func (m *DeleteImageRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteImageRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteImageRequest proto.InternalMessageInfo + +func (m *DeleteImageRequest) GetImageId() string { + if m != nil { + return m.ImageId + } + return "" +} + +type DeleteImageMetadata struct { + // ID of the image that is being deleted. + ImageId string `protobuf:"bytes,1,opt,name=image_id,json=imageId,proto3" json:"image_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteImageMetadata) Reset() { *m = DeleteImageMetadata{} } +func (m *DeleteImageMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteImageMetadata) ProtoMessage() {} +func (*DeleteImageMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_image_service_6afec801ba593e67, []int{9} +} +func (m *DeleteImageMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteImageMetadata.Unmarshal(m, b) +} +func (m *DeleteImageMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteImageMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteImageMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteImageMetadata.Merge(dst, src) +} +func (m *DeleteImageMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteImageMetadata.Size(m) +} +func (m *DeleteImageMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteImageMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteImageMetadata proto.InternalMessageInfo + +func (m *DeleteImageMetadata) GetImageId() string { + if m != nil { + return m.ImageId + } + return "" +} + +type ListImageOperationsRequest struct { + // ID of the Image resource to list operations for. + ImageId string `protobuf:"bytes,1,opt,name=image_id,json=imageId,proto3" json:"image_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListImageOperationsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListImageOperationsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListImageOperationsRequest) Reset() { *m = ListImageOperationsRequest{} } +func (m *ListImageOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListImageOperationsRequest) ProtoMessage() {} +func (*ListImageOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_image_service_6afec801ba593e67, []int{10} +} +func (m *ListImageOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListImageOperationsRequest.Unmarshal(m, b) +} +func (m *ListImageOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListImageOperationsRequest.Marshal(b, m, deterministic) +} +func (dst *ListImageOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListImageOperationsRequest.Merge(dst, src) +} +func (m *ListImageOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListImageOperationsRequest.Size(m) +} +func (m *ListImageOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListImageOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListImageOperationsRequest proto.InternalMessageInfo + +func (m *ListImageOperationsRequest) GetImageId() string { + if m != nil { + return m.ImageId + } + return "" +} + +func (m *ListImageOperationsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListImageOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListImageOperationsResponse struct { + // List of operations for the specified image. + Operations []*operation.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListImageOperationsRequest.page_size], use the [next_page_token] as the value + // for the [ListImageOperationsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListImageOperationsResponse) Reset() { *m = ListImageOperationsResponse{} } +func (m *ListImageOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListImageOperationsResponse) ProtoMessage() {} +func (*ListImageOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_image_service_6afec801ba593e67, []int{11} +} +func (m *ListImageOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListImageOperationsResponse.Unmarshal(m, b) +} +func (m *ListImageOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListImageOperationsResponse.Marshal(b, m, deterministic) +} +func (dst *ListImageOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListImageOperationsResponse.Merge(dst, src) +} +func (m *ListImageOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListImageOperationsResponse.Size(m) +} +func (m *ListImageOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListImageOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListImageOperationsResponse proto.InternalMessageInfo + +func (m *ListImageOperationsResponse) GetOperations() []*operation.Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListImageOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetImageRequest)(nil), "yandex.cloud.compute.v1.GetImageRequest") + proto.RegisterType((*GetImageLatestByFamilyRequest)(nil), "yandex.cloud.compute.v1.GetImageLatestByFamilyRequest") + proto.RegisterType((*ListImagesRequest)(nil), "yandex.cloud.compute.v1.ListImagesRequest") + proto.RegisterType((*ListImagesResponse)(nil), "yandex.cloud.compute.v1.ListImagesResponse") + proto.RegisterType((*CreateImageRequest)(nil), "yandex.cloud.compute.v1.CreateImageRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.CreateImageRequest.LabelsEntry") + proto.RegisterType((*CreateImageMetadata)(nil), "yandex.cloud.compute.v1.CreateImageMetadata") + proto.RegisterType((*UpdateImageRequest)(nil), "yandex.cloud.compute.v1.UpdateImageRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.UpdateImageRequest.LabelsEntry") + proto.RegisterType((*UpdateImageMetadata)(nil), "yandex.cloud.compute.v1.UpdateImageMetadata") + proto.RegisterType((*DeleteImageRequest)(nil), "yandex.cloud.compute.v1.DeleteImageRequest") + proto.RegisterType((*DeleteImageMetadata)(nil), "yandex.cloud.compute.v1.DeleteImageMetadata") + proto.RegisterType((*ListImageOperationsRequest)(nil), "yandex.cloud.compute.v1.ListImageOperationsRequest") + proto.RegisterType((*ListImageOperationsResponse)(nil), "yandex.cloud.compute.v1.ListImageOperationsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ImageServiceClient is the client API for ImageService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ImageServiceClient interface { + // Returns the specified Image resource. + // + // To get the list of available Image resources, make a [List] request. + Get(ctx context.Context, in *GetImageRequest, opts ...grpc.CallOption) (*Image, error) + // Returns the latest image that is part of an image family. + GetLatestByFamily(ctx context.Context, in *GetImageLatestByFamilyRequest, opts ...grpc.CallOption) (*Image, error) + // Retrieves the list of Image resources in the specified folder. + List(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) + // Creates an image in the specified folder. + // + // You can create an image from a disk, snapshot, other image or URI. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Create(ctx context.Context, in *CreateImageRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified image. + Update(ctx context.Context, in *UpdateImageRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified image. + // + // Deleting an image removes its data permanently and is irreversible. + Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Lists operations for the specified image. + ListOperations(ctx context.Context, in *ListImageOperationsRequest, opts ...grpc.CallOption) (*ListImageOperationsResponse, error) +} + +type imageServiceClient struct { + cc *grpc.ClientConn +} + +func NewImageServiceClient(cc *grpc.ClientConn) ImageServiceClient { + return &imageServiceClient{cc} +} + +func (c *imageServiceClient) Get(ctx context.Context, in *GetImageRequest, opts ...grpc.CallOption) (*Image, error) { + out := new(Image) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.ImageService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imageServiceClient) GetLatestByFamily(ctx context.Context, in *GetImageLatestByFamilyRequest, opts ...grpc.CallOption) (*Image, error) { + out := new(Image) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.ImageService/GetLatestByFamily", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imageServiceClient) List(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) { + out := new(ListImagesResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.ImageService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imageServiceClient) Create(ctx context.Context, in *CreateImageRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.ImageService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imageServiceClient) Update(ctx context.Context, in *UpdateImageRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.ImageService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imageServiceClient) Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.ImageService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imageServiceClient) ListOperations(ctx context.Context, in *ListImageOperationsRequest, opts ...grpc.CallOption) (*ListImageOperationsResponse, error) { + out := new(ListImageOperationsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.ImageService/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ImageServiceServer is the server API for ImageService service. +type ImageServiceServer interface { + // Returns the specified Image resource. + // + // To get the list of available Image resources, make a [List] request. + Get(context.Context, *GetImageRequest) (*Image, error) + // Returns the latest image that is part of an image family. + GetLatestByFamily(context.Context, *GetImageLatestByFamilyRequest) (*Image, error) + // Retrieves the list of Image resources in the specified folder. + List(context.Context, *ListImagesRequest) (*ListImagesResponse, error) + // Creates an image in the specified folder. + // + // You can create an image from a disk, snapshot, other image or URI. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Create(context.Context, *CreateImageRequest) (*operation.Operation, error) + // Updates the specified image. + Update(context.Context, *UpdateImageRequest) (*operation.Operation, error) + // Deletes the specified image. + // + // Deleting an image removes its data permanently and is irreversible. + Delete(context.Context, *DeleteImageRequest) (*operation.Operation, error) + // Lists operations for the specified image. + ListOperations(context.Context, *ListImageOperationsRequest) (*ListImageOperationsResponse, error) +} + +func RegisterImageServiceServer(s *grpc.Server, srv ImageServiceServer) { + s.RegisterService(&_ImageService_serviceDesc, srv) +} + +func _ImageService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.ImageService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).Get(ctx, req.(*GetImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ImageService_GetLatestByFamily_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetImageLatestByFamilyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageServiceServer).GetLatestByFamily(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.ImageService/GetLatestByFamily", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).GetLatestByFamily(ctx, req.(*GetImageLatestByFamilyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ImageService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListImagesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.ImageService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).List(ctx, req.(*ListImagesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ImageService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.ImageService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).Create(ctx, req.(*CreateImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ImageService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.ImageService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).Update(ctx, req.(*UpdateImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ImageService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.ImageService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).Delete(ctx, req.(*DeleteImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ImageService_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListImageOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageServiceServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.ImageService/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).ListOperations(ctx, req.(*ListImageOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ImageService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.compute.v1.ImageService", + HandlerType: (*ImageServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _ImageService_Get_Handler, + }, + { + MethodName: "GetLatestByFamily", + Handler: _ImageService_GetLatestByFamily_Handler, + }, + { + MethodName: "List", + Handler: _ImageService_List_Handler, + }, + { + MethodName: "Create", + Handler: _ImageService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _ImageService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _ImageService_Delete_Handler, + }, + { + MethodName: "ListOperations", + Handler: _ImageService_ListOperations_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/compute/v1/image_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/compute/v1/image_service.proto", fileDescriptor_image_service_6afec801ba593e67) +} + +var fileDescriptor_image_service_6afec801ba593e67 = []byte{ + // 1209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xbf, 0x6f, 0xdb, 0xd6, + 0x13, 0x0f, 0x2d, 0x89, 0x96, 0x4e, 0xce, 0xaf, 0xf7, 0x4d, 0x10, 0x85, 0x89, 0xbf, 0xb5, 0x69, + 0x24, 0x91, 0xe5, 0x48, 0x22, 0x65, 0x59, 0x8d, 0x9c, 0x18, 0x45, 0x95, 0x38, 0xb6, 0x00, 0x07, + 0x29, 0x98, 0x66, 0xa9, 0x11, 0x08, 0xb4, 0xf8, 0xac, 0x10, 0xa2, 0x48, 0x96, 0x8f, 0x12, 0x22, + 0xbb, 0x59, 0x02, 0x14, 0x28, 0x3c, 0x15, 0x28, 0xd0, 0x76, 0xee, 0x5e, 0x14, 0x30, 0xd0, 0xb1, + 0x40, 0x3b, 0xd9, 0x73, 0xfa, 0x0f, 0x74, 0xe8, 0x50, 0xa0, 0x5b, 0xc6, 0x4c, 0x05, 0x1f, 0x29, + 0x99, 0x32, 0xf5, 0x33, 0x69, 0x37, 0x52, 0xf7, 0xb9, 0x7b, 0x1f, 0xde, 0x7d, 0xee, 0xee, 0x09, + 0x96, 0xda, 0xb2, 0xae, 0xe0, 0x17, 0xd9, 0xaa, 0x66, 0x34, 0x95, 0x6c, 0xd5, 0x68, 0x98, 0x4d, + 0x1b, 0x67, 0x5b, 0x62, 0x56, 0x6d, 0xc8, 0x35, 0x5c, 0x21, 0xd8, 0x6a, 0xa9, 0x55, 0x9c, 0x31, + 0x2d, 0xc3, 0x36, 0xd0, 0x15, 0x17, 0x9c, 0xa1, 0xe0, 0x8c, 0x07, 0xce, 0xb4, 0x44, 0xee, 0x7a, + 0xcd, 0x30, 0x6a, 0x1a, 0xce, 0xca, 0xa6, 0x9a, 0x95, 0x75, 0xdd, 0xb0, 0x65, 0x5b, 0x35, 0x74, + 0xe2, 0xba, 0x71, 0x73, 0x9e, 0x95, 0xbe, 0xed, 0x34, 0x77, 0xb3, 0xbb, 0x2a, 0xd6, 0x94, 0x4a, + 0x43, 0x26, 0x75, 0x0f, 0xc1, 0x79, 0x2c, 0x1c, 0x7f, 0xc3, 0xc4, 0x16, 0x75, 0xf7, 0x6c, 0x0b, + 0x43, 0x19, 0x7a, 0xa0, 0x9b, 0x3d, 0xa0, 0x6e, 0x88, 0x40, 0xb0, 0xd9, 0x1e, 0x5c, 0x4b, 0xd6, + 0x54, 0xc5, 0x67, 0xe6, 0x57, 0xe1, 0xfc, 0x06, 0xb6, 0xcb, 0x4e, 0x60, 0x09, 0x7f, 0xde, 0xc4, + 0xc4, 0x46, 0xb7, 0x20, 0xea, 0xa6, 0x42, 0x55, 0x12, 0xcc, 0x1c, 0x93, 0x8c, 0x95, 0x66, 0xfe, + 0x3a, 0x12, 0x99, 0x83, 0x63, 0x31, 0x7c, 0x6f, 0x6d, 0x45, 0x90, 0xa6, 0xa9, 0xb5, 0xac, 0xf0, + 0x5f, 0x32, 0x30, 0xdb, 0x71, 0xde, 0x92, 0x6d, 0x4c, 0xec, 0x52, 0xfb, 0xa1, 0xdc, 0x50, 0xb5, + 0x76, 0x27, 0xd4, 0x22, 0xc4, 0x76, 0x0d, 0x4d, 0xc1, 0xd6, 0xa0, 0x58, 0x51, 0xd7, 0x5c, 0x56, + 0x50, 0x11, 0xd8, 0x5d, 0xea, 0x9b, 0x98, 0xa2, 0xb8, 0xf9, 0x37, 0x47, 0xe2, 0xec, 0x17, 0xdb, + 0x72, 0x7a, 0xef, 0xd9, 0x76, 0x5a, 0x4e, 0xef, 0x09, 0xe9, 0xe2, 0xb3, 0x7d, 0xf1, 0x76, 0x41, + 0x7c, 0xb9, 0xed, 0xbd, 0x49, 0x9e, 0x03, 0xff, 0x33, 0x03, 0x17, 0xb7, 0x54, 0xe2, 0x12, 0x21, + 0xef, 0x70, 0xf6, 0x2d, 0x88, 0x99, 0xb4, 0xf6, 0xea, 0x1e, 0xa6, 0xc7, 0x87, 0x4a, 0xf0, 0xf6, + 0x48, 0x64, 0xef, 0xad, 0x89, 0x82, 0x20, 0x48, 0x51, 0xc7, 0xf8, 0x44, 0xdd, 0xc3, 0x28, 0x09, + 0x40, 0x81, 0xb6, 0x51, 0xc7, 0x7a, 0x22, 0x44, 0x83, 0xc6, 0x0e, 0x8e, 0xc5, 0x08, 0x45, 0x4a, + 0x34, 0xca, 0xa7, 0x8e, 0x0d, 0xf1, 0xc0, 0xee, 0xaa, 0x9a, 0x8d, 0xad, 0x44, 0x98, 0xa2, 0xe0, + 0xe0, 0xb8, 0x1b, 0xcf, 0xb3, 0xf0, 0x36, 0x20, 0x3f, 0x6d, 0x62, 0x1a, 0x3a, 0xc1, 0xa8, 0x00, + 0x2c, 0x4d, 0x30, 0x49, 0x30, 0x73, 0xa1, 0x64, 0x3c, 0xf7, 0xff, 0xcc, 0x00, 0x0d, 0x66, 0xdc, + 0xaa, 0x79, 0x68, 0x74, 0x13, 0xce, 0xeb, 0xf8, 0x85, 0x5d, 0xf1, 0x11, 0xa4, 0x99, 0x94, 0xce, + 0x3a, 0x3f, 0x7f, 0xd2, 0x61, 0xc6, 0xff, 0x11, 0x01, 0x74, 0xdf, 0xc2, 0xb2, 0x8d, 0x7b, 0xaa, + 0x3e, 0x41, 0xba, 0x56, 0x20, 0xac, 0xcb, 0x0d, 0x3c, 0x7e, 0xa1, 0x28, 0x1c, 0x2d, 0x41, 0x5c, + 0xc1, 0xa4, 0x6a, 0xa9, 0xa6, 0xa3, 0xbf, 0xde, 0xec, 0xe5, 0x56, 0x0a, 0x92, 0xdf, 0x8a, 0xbe, + 0x66, 0x80, 0xd5, 0xe4, 0x1d, 0xac, 0x91, 0x44, 0x98, 0xa6, 0xe1, 0xc3, 0x81, 0x69, 0x08, 0x7e, + 0x4c, 0x66, 0x8b, 0x7a, 0xae, 0xeb, 0xb6, 0xd5, 0x2e, 0x7d, 0xf4, 0xe6, 0x48, 0x8c, 0x6f, 0xa7, + 0x2b, 0x42, 0xba, 0xe8, 0x70, 0x4c, 0xbd, 0xa2, 0x1f, 0x54, 0xc8, 0xbb, 0x1f, 0x56, 0x58, 0x3e, + 0x3c, 0x16, 0x59, 0x2e, 0x2c, 0xa6, 0xe9, 0x13, 0x42, 0x17, 0xbc, 0x2f, 0xe9, 0xe2, 0x25, 0x8f, + 0x87, 0x4f, 0xa1, 0x91, 0x09, 0x15, 0x8a, 0xd6, 0xe0, 0x6c, 0x43, 0xd5, 0x2b, 0x8a, 0x4a, 0xea, + 0xae, 0xc8, 0x58, 0x2a, 0xb2, 0xab, 0x6f, 0x8f, 0xc4, 0xcb, 0x79, 0xb1, 0x98, 0x5f, 0x16, 0xf2, + 0xe9, 0xfc, 0x72, 0xf1, 0x8e, 0x90, 0x2f, 0xac, 0x88, 0xa2, 0x28, 0xe4, 0xa5, 0x78, 0x43, 0xd5, + 0x1f, 0xa8, 0xa4, 0x4e, 0x65, 0xb7, 0x08, 0x71, 0xd3, 0x32, 0x94, 0x66, 0xd5, 0xae, 0xa8, 0x0a, + 0x49, 0x4c, 0xcf, 0x85, 0x92, 0xb1, 0x52, 0xb4, 0x5b, 0x19, 0xf0, 0x8c, 0x65, 0x85, 0xa0, 0x1b, + 0xbe, 0xe6, 0x8d, 0x52, 0x9a, 0x5d, 0xdc, 0xe6, 0x99, 0x6e, 0xeb, 0xa2, 0x05, 0x98, 0xa6, 0x64, + 0x54, 0x25, 0x11, 0x0b, 0xa0, 0x58, 0xc7, 0x54, 0x56, 0x9c, 0x82, 0x11, 0x5d, 0x36, 0xc9, 0x73, + 0xc3, 0x39, 0x37, 0x01, 0x01, 0x20, 0x74, 0xcc, 0x65, 0x05, 0x21, 0x08, 0x35, 0x2d, 0x35, 0x11, + 0x77, 0x40, 0x9b, 0x67, 0x24, 0xe7, 0x05, 0x2d, 0xc1, 0x94, 0x41, 0x12, 0x33, 0x73, 0x4c, 0x32, + 0x9e, 0xbb, 0x36, 0xb0, 0x7e, 0x8f, 0x89, 0x34, 0x65, 0x10, 0xae, 0x08, 0x71, 0x5f, 0xd9, 0xd0, + 0x05, 0x08, 0xd5, 0x71, 0xdb, 0x55, 0xa2, 0xe4, 0x3c, 0xa2, 0x4b, 0x10, 0x69, 0xc9, 0x5a, 0xd3, + 0xd3, 0x9d, 0xe4, 0xbe, 0xac, 0x4e, 0xdd, 0x61, 0x4a, 0xe7, 0x80, 0x25, 0x46, 0xd3, 0xaa, 0x62, + 0x14, 0xfe, 0xf5, 0x37, 0x91, 0xe1, 0x05, 0xf8, 0x9f, 0x4f, 0x14, 0x8f, 0xb0, 0x2d, 0x2b, 0xb2, + 0x2d, 0xa3, 0xab, 0xa7, 0x07, 0xdb, 0xc9, 0x28, 0xfb, 0x3b, 0x04, 0xe8, 0xa9, 0xa9, 0x9c, 0x6e, + 0x8a, 0x71, 0x47, 0x21, 0xba, 0x0b, 0xf1, 0x26, 0x75, 0xa7, 0x33, 0x9e, 0x32, 0x8c, 0xe7, 0xb8, + 0x8c, 0xbb, 0x06, 0x32, 0x9d, 0x35, 0x90, 0x79, 0xe8, 0xac, 0x81, 0x47, 0x32, 0xa9, 0x4b, 0xe0, + 0xc2, 0x9d, 0xe7, 0x6e, 0x3f, 0x85, 0xde, 0xab, 0x9f, 0xc2, 0x43, 0xfb, 0x29, 0xa0, 0xc0, 0xc8, + 0x44, 0x0a, 0xf4, 0xb5, 0x23, 0x3b, 0xa2, 0x1d, 0x83, 0x69, 0xfc, 0x4f, 0xda, 0xf1, 0x3d, 0xf4, + 0xe2, 0xe8, 0xc3, 0xc7, 0x72, 0x1c, 0x7d, 0xac, 0x01, 0x7a, 0x80, 0x35, 0xfc, 0x8e, 0xf2, 0x70, + 0x0e, 0xf4, 0xb9, 0x8f, 0x73, 0xe0, 0xf7, 0x0c, 0x70, 0xdd, 0xe5, 0xf0, 0xb8, 0xb3, 0xd3, 0xc9, + 0xc4, 0xc2, 0xfc, 0xf7, 0x57, 0x1b, 0xff, 0x15, 0x03, 0xd7, 0xfa, 0x52, 0xf3, 0x16, 0xd8, 0xc7, + 0x00, 0xdd, 0x4b, 0x48, 0x67, 0x89, 0xcd, 0xf7, 0xca, 0xe5, 0xe4, 0x92, 0xd2, 0xf5, 0x97, 0x7c, + 0x4e, 0xe3, 0xee, 0xb2, 0xdc, 0x2f, 0x51, 0x98, 0xa1, 0x34, 0x9e, 0xb8, 0xb7, 0x36, 0xd4, 0x82, + 0xd0, 0x06, 0xb6, 0x51, 0x72, 0xa0, 0x3a, 0x4f, 0x5d, 0x76, 0xb8, 0x11, 0xdb, 0x95, 0xbf, 0xf1, + 0xea, 0xf7, 0x3f, 0xbf, 0x99, 0xfa, 0x00, 0xcd, 0x06, 0xee, 0x61, 0x24, 0xbb, 0xdf, 0x29, 0xc1, + 0x4b, 0xf4, 0x2d, 0x03, 0x17, 0x37, 0xb0, 0xdd, 0x7b, 0x0b, 0x42, 0x85, 0x91, 0x34, 0xfa, 0x5e, + 0x9b, 0x46, 0x92, 0x5a, 0xa4, 0xa4, 0x16, 0xd0, 0x7c, 0x90, 0xd4, 0xaa, 0xd6, 0x4b, 0x61, 0x1f, + 0xc2, 0x4e, 0xad, 0x50, 0x6a, 0x60, 0xc8, 0xc0, 0xcd, 0x89, 0x5b, 0x1a, 0x0b, 0xeb, 0x56, 0x9b, + 0xe7, 0x28, 0x97, 0x4b, 0x08, 0x05, 0xb9, 0xa0, 0xef, 0x18, 0x60, 0xdd, 0x41, 0x8c, 0x96, 0x26, + 0x58, 0xdf, 0xdc, 0x68, 0xb5, 0xf0, 0x6b, 0x87, 0xaf, 0x53, 0xd7, 0xfb, 0x8f, 0xf9, 0x08, 0x7d, + 0xa5, 0xb4, 0xae, 0xf0, 0x7d, 0x68, 0xad, 0x32, 0x29, 0xf4, 0x03, 0x03, 0xac, 0x3b, 0x02, 0x86, + 0x30, 0x0b, 0x4e, 0xb2, 0x71, 0x98, 0x6d, 0xba, 0xcc, 0xfa, 0x0d, 0x18, 0x1f, 0x33, 0x3e, 0x37, + 0x5c, 0x51, 0x0e, 0xc9, 0x9f, 0x18, 0x60, 0xdd, 0xb1, 0x31, 0x84, 0x64, 0x70, 0x2c, 0x8d, 0x43, + 0xf2, 0xe9, 0xe1, 0xeb, 0xd4, 0xed, 0xfe, 0x43, 0xe9, 0xf2, 0xe9, 0xa5, 0xb5, 0xde, 0x30, 0xed, + 0xb6, 0xdb, 0x06, 0xa9, 0x11, 0x6d, 0xf0, 0x23, 0x03, 0xe7, 0x1c, 0x8d, 0x9c, 0x4c, 0x05, 0xb4, + 0x3c, 0x5a, 0x4c, 0x81, 0xf1, 0xc6, 0xe5, 0x27, 0x73, 0xf2, 0xa4, 0x28, 0x50, 0x92, 0x29, 0x94, + 0x1c, 0x4a, 0xf2, 0xe4, 0x1f, 0x12, 0x29, 0xad, 0x7f, 0x76, 0xbf, 0xa6, 0xda, 0xcf, 0x9b, 0x3b, + 0xce, 0x11, 0x59, 0xf7, 0xcc, 0xb4, 0xfb, 0x4f, 0xa9, 0x66, 0xa4, 0x6b, 0x58, 0xa7, 0x49, 0xc8, + 0x0e, 0xf8, 0x3f, 0x76, 0xd7, 0x7b, 0xdc, 0x61, 0x29, 0x6c, 0xf9, 0x9f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xad, 0x67, 0x8e, 0x3a, 0x5b, 0x0e, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/instance.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/instance.pb.go new file mode 100644 index 000000000..6d8e17fb1 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/instance.pb.go @@ -0,0 +1,744 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/compute/v1/instance.proto + +package compute // import "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type IpVersion int32 + +const ( + IpVersion_IP_VERSION_UNSPECIFIED IpVersion = 0 + // IPv4 address, for example 192.0.2.235. + IpVersion_IPV4 IpVersion = 1 + // IPv6 address: not available yet. + IpVersion_IPV6 IpVersion = 2 +) + +var IpVersion_name = map[int32]string{ + 0: "IP_VERSION_UNSPECIFIED", + 1: "IPV4", + 2: "IPV6", +} +var IpVersion_value = map[string]int32{ + "IP_VERSION_UNSPECIFIED": 0, + "IPV4": 1, + "IPV6": 2, +} + +func (x IpVersion) String() string { + return proto.EnumName(IpVersion_name, int32(x)) +} +func (IpVersion) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_instance_9ed228c16f2b2625, []int{0} +} + +type Instance_Status int32 + +const ( + Instance_STATUS_UNSPECIFIED Instance_Status = 0 + // Instance is waiting for resources to be allocated. + Instance_PROVISIONING Instance_Status = 1 + // Instance is running normally. + Instance_RUNNING Instance_Status = 2 + // Instance is being stopped. + Instance_STOPPING Instance_Status = 3 + // Instance stopped. + Instance_STOPPED Instance_Status = 4 + // Instance is being started. + Instance_STARTING Instance_Status = 5 + // Instance is being restarted. + Instance_RESTARTING Instance_Status = 6 + // Instance is being updated. + Instance_UPDATING Instance_Status = 7 + // Instance encountered a problem and cannot operate. + Instance_ERROR Instance_Status = 8 + // Instance crashed and will be restarted automatically. + Instance_CRASHED Instance_Status = 9 + // Instance is being deleted. + Instance_DELETING Instance_Status = 10 +) + +var Instance_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "PROVISIONING", + 2: "RUNNING", + 3: "STOPPING", + 4: "STOPPED", + 5: "STARTING", + 6: "RESTARTING", + 7: "UPDATING", + 8: "ERROR", + 9: "CRASHED", + 10: "DELETING", +} +var Instance_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "PROVISIONING": 1, + "RUNNING": 2, + "STOPPING": 3, + "STOPPED": 4, + "STARTING": 5, + "RESTARTING": 6, + "UPDATING": 7, + "ERROR": 8, + "CRASHED": 9, + "DELETING": 10, +} + +func (x Instance_Status) String() string { + return proto.EnumName(Instance_Status_name, int32(x)) +} +func (Instance_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_instance_9ed228c16f2b2625, []int{0, 0} +} + +type AttachedDisk_Mode int32 + +const ( + AttachedDisk_MODE_UNSPECIFIED AttachedDisk_Mode = 0 + // Read-only access. + AttachedDisk_READ_ONLY AttachedDisk_Mode = 1 + // Read/Write access. + AttachedDisk_READ_WRITE AttachedDisk_Mode = 2 +) + +var AttachedDisk_Mode_name = map[int32]string{ + 0: "MODE_UNSPECIFIED", + 1: "READ_ONLY", + 2: "READ_WRITE", +} +var AttachedDisk_Mode_value = map[string]int32{ + "MODE_UNSPECIFIED": 0, + "READ_ONLY": 1, + "READ_WRITE": 2, +} + +func (x AttachedDisk_Mode) String() string { + return proto.EnumName(AttachedDisk_Mode_name, int32(x)) +} +func (AttachedDisk_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_instance_9ed228c16f2b2625, []int{2, 0} +} + +// An Instance resource. For more information, see [Instances](/docs/compute/concepts/vm). +type Instance struct { + // ID of the instance. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the instance belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Name of the instance. 1-63 characters long. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Description of the instance. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. Maximum of 64 per resource. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID of the availability zone where the instance resides. + ZoneId string `protobuf:"bytes,7,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + // ID of the hardware platform configuration for the instance. + PlatformId string `protobuf:"bytes,8,opt,name=platform_id,json=platformId,proto3" json:"platform_id,omitempty"` + // Computing resources of the instance such as the amount of memory and number of cores. + Resources *Resources `protobuf:"bytes,9,opt,name=resources,proto3" json:"resources,omitempty"` + // Status of the instance. + Status Instance_Status `protobuf:"varint,10,opt,name=status,proto3,enum=yandex.cloud.compute.v1.Instance_Status" json:"status,omitempty"` + // The metadata key/value pairs assigned to this instance. This includes custom metadata and predefined keys. + // + // For example, you may use the metadata in order to provide your public SSH key to the instance. + // For more information, see [Metadata](/docs/compute/concepts/vm-metadata). + Metadata map[string]string `protobuf:"bytes,11,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Boot disk that is attached to the instance. + BootDisk *AttachedDisk `protobuf:"bytes,12,opt,name=boot_disk,json=bootDisk,proto3" json:"boot_disk,omitempty"` + // Array of secondary disks that are attached to the instance. + SecondaryDisks []*AttachedDisk `protobuf:"bytes,13,rep,name=secondary_disks,json=secondaryDisks,proto3" json:"secondary_disks,omitempty"` + // Array of network interfaces that are attached to the instance. + NetworkInterfaces []*NetworkInterface `protobuf:"bytes,14,rep,name=network_interfaces,json=networkInterfaces,proto3" json:"network_interfaces,omitempty"` + // A domain name of the instance. FQDN is defined by the server + // in the format `..internal` when the instance is created. + // If the hostname were not specified when the instance was created, FQDN would be `.auto.internal`. + Fqdn string `protobuf:"bytes,16,opt,name=fqdn,proto3" json:"fqdn,omitempty"` + // Scheduling policy configuration. + SchedulingPolicy *SchedulingPolicy `protobuf:"bytes,17,opt,name=scheduling_policy,json=schedulingPolicy,proto3" json:"scheduling_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Instance) Reset() { *m = Instance{} } +func (m *Instance) String() string { return proto.CompactTextString(m) } +func (*Instance) ProtoMessage() {} +func (*Instance) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_9ed228c16f2b2625, []int{0} +} +func (m *Instance) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Instance.Unmarshal(m, b) +} +func (m *Instance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Instance.Marshal(b, m, deterministic) +} +func (dst *Instance) XXX_Merge(src proto.Message) { + xxx_messageInfo_Instance.Merge(dst, src) +} +func (m *Instance) XXX_Size() int { + return xxx_messageInfo_Instance.Size(m) +} +func (m *Instance) XXX_DiscardUnknown() { + xxx_messageInfo_Instance.DiscardUnknown(m) +} + +var xxx_messageInfo_Instance proto.InternalMessageInfo + +func (m *Instance) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Instance) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *Instance) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Instance) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Instance) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Instance) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Instance) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func (m *Instance) GetPlatformId() string { + if m != nil { + return m.PlatformId + } + return "" +} + +func (m *Instance) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *Instance) GetStatus() Instance_Status { + if m != nil { + return m.Status + } + return Instance_STATUS_UNSPECIFIED +} + +func (m *Instance) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Instance) GetBootDisk() *AttachedDisk { + if m != nil { + return m.BootDisk + } + return nil +} + +func (m *Instance) GetSecondaryDisks() []*AttachedDisk { + if m != nil { + return m.SecondaryDisks + } + return nil +} + +func (m *Instance) GetNetworkInterfaces() []*NetworkInterface { + if m != nil { + return m.NetworkInterfaces + } + return nil +} + +func (m *Instance) GetFqdn() string { + if m != nil { + return m.Fqdn + } + return "" +} + +func (m *Instance) GetSchedulingPolicy() *SchedulingPolicy { + if m != nil { + return m.SchedulingPolicy + } + return nil +} + +type Resources struct { + // The amount of memory available to the instance, specified in bytes. + Memory int64 `protobuf:"varint,1,opt,name=memory,proto3" json:"memory,omitempty"` + // The number of cores available to the instance. + Cores int64 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores,omitempty"` + // Baseline level of CPU performance with the ability to burst performance above that baseline level. + // This field sets baseline performance for each core. + CoreFraction int64 `protobuf:"varint,3,opt,name=core_fraction,json=coreFraction,proto3" json:"core_fraction,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resources) Reset() { *m = Resources{} } +func (m *Resources) String() string { return proto.CompactTextString(m) } +func (*Resources) ProtoMessage() {} +func (*Resources) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_9ed228c16f2b2625, []int{1} +} +func (m *Resources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resources.Unmarshal(m, b) +} +func (m *Resources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resources.Marshal(b, m, deterministic) +} +func (dst *Resources) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resources.Merge(dst, src) +} +func (m *Resources) XXX_Size() int { + return xxx_messageInfo_Resources.Size(m) +} +func (m *Resources) XXX_DiscardUnknown() { + xxx_messageInfo_Resources.DiscardUnknown(m) +} + +var xxx_messageInfo_Resources proto.InternalMessageInfo + +func (m *Resources) GetMemory() int64 { + if m != nil { + return m.Memory + } + return 0 +} + +func (m *Resources) GetCores() int64 { + if m != nil { + return m.Cores + } + return 0 +} + +func (m *Resources) GetCoreFraction() int64 { + if m != nil { + return m.CoreFraction + } + return 0 +} + +type AttachedDisk struct { + // Access mode to the Disk resource. + Mode AttachedDisk_Mode `protobuf:"varint,1,opt,name=mode,proto3,enum=yandex.cloud.compute.v1.AttachedDisk_Mode" json:"mode,omitempty"` + // Serial number that is reflected into the /dev/disk/by-id/ tree + // of a Linux operating system running within the instance. + // + // This value can be used to reference the device for mounting, resizing, and so on, from within the instance. + DeviceName string `protobuf:"bytes,2,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"` + // Specifies whether the disk will be auto-deleted when the instance is deleted. + AutoDelete bool `protobuf:"varint,3,opt,name=auto_delete,json=autoDelete,proto3" json:"auto_delete,omitempty"` + // ID of the disk that is attached to the instance. + DiskId string `protobuf:"bytes,4,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttachedDisk) Reset() { *m = AttachedDisk{} } +func (m *AttachedDisk) String() string { return proto.CompactTextString(m) } +func (*AttachedDisk) ProtoMessage() {} +func (*AttachedDisk) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_9ed228c16f2b2625, []int{2} +} +func (m *AttachedDisk) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttachedDisk.Unmarshal(m, b) +} +func (m *AttachedDisk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttachedDisk.Marshal(b, m, deterministic) +} +func (dst *AttachedDisk) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttachedDisk.Merge(dst, src) +} +func (m *AttachedDisk) XXX_Size() int { + return xxx_messageInfo_AttachedDisk.Size(m) +} +func (m *AttachedDisk) XXX_DiscardUnknown() { + xxx_messageInfo_AttachedDisk.DiscardUnknown(m) +} + +var xxx_messageInfo_AttachedDisk proto.InternalMessageInfo + +func (m *AttachedDisk) GetMode() AttachedDisk_Mode { + if m != nil { + return m.Mode + } + return AttachedDisk_MODE_UNSPECIFIED +} + +func (m *AttachedDisk) GetDeviceName() string { + if m != nil { + return m.DeviceName + } + return "" +} + +func (m *AttachedDisk) GetAutoDelete() bool { + if m != nil { + return m.AutoDelete + } + return false +} + +func (m *AttachedDisk) GetDiskId() string { + if m != nil { + return m.DiskId + } + return "" +} + +type NetworkInterface struct { + // The index of the network interface, generated by the server, 0,1,2... etc. + // Currently only one network interface is supported per instance. + Index string `protobuf:"bytes,1,opt,name=index,proto3" json:"index,omitempty"` + // MAC address that is assigned to the network interface. + MacAddress string `protobuf:"bytes,2,opt,name=mac_address,json=macAddress,proto3" json:"mac_address,omitempty"` + // ID of the subnet. + SubnetId string `protobuf:"bytes,3,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + // Primary IPv4 address that is assigned to the instance for this network interface. + PrimaryV4Address *PrimaryAddress `protobuf:"bytes,4,opt,name=primary_v4_address,json=primaryV4Address,proto3" json:"primary_v4_address,omitempty"` + // Primary IPv6 address that is assigned to the instance for this network interface. IPv6 not available yet. + PrimaryV6Address *PrimaryAddress `protobuf:"bytes,5,opt,name=primary_v6_address,json=primaryV6Address,proto3" json:"primary_v6_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkInterface) Reset() { *m = NetworkInterface{} } +func (m *NetworkInterface) String() string { return proto.CompactTextString(m) } +func (*NetworkInterface) ProtoMessage() {} +func (*NetworkInterface) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_9ed228c16f2b2625, []int{3} +} +func (m *NetworkInterface) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NetworkInterface.Unmarshal(m, b) +} +func (m *NetworkInterface) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NetworkInterface.Marshal(b, m, deterministic) +} +func (dst *NetworkInterface) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkInterface.Merge(dst, src) +} +func (m *NetworkInterface) XXX_Size() int { + return xxx_messageInfo_NetworkInterface.Size(m) +} +func (m *NetworkInterface) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkInterface.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkInterface proto.InternalMessageInfo + +func (m *NetworkInterface) GetIndex() string { + if m != nil { + return m.Index + } + return "" +} + +func (m *NetworkInterface) GetMacAddress() string { + if m != nil { + return m.MacAddress + } + return "" +} + +func (m *NetworkInterface) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +func (m *NetworkInterface) GetPrimaryV4Address() *PrimaryAddress { + if m != nil { + return m.PrimaryV4Address + } + return nil +} + +func (m *NetworkInterface) GetPrimaryV6Address() *PrimaryAddress { + if m != nil { + return m.PrimaryV6Address + } + return nil +} + +type PrimaryAddress struct { + // An IPv4 internal network address that is assigned to the instance for this network interface. + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // One-to-one NAT configuration. If missing, NAT has not been set up. + OneToOneNat *OneToOneNat `protobuf:"bytes,2,opt,name=one_to_one_nat,json=oneToOneNat,proto3" json:"one_to_one_nat,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrimaryAddress) Reset() { *m = PrimaryAddress{} } +func (m *PrimaryAddress) String() string { return proto.CompactTextString(m) } +func (*PrimaryAddress) ProtoMessage() {} +func (*PrimaryAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_9ed228c16f2b2625, []int{4} +} +func (m *PrimaryAddress) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrimaryAddress.Unmarshal(m, b) +} +func (m *PrimaryAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrimaryAddress.Marshal(b, m, deterministic) +} +func (dst *PrimaryAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrimaryAddress.Merge(dst, src) +} +func (m *PrimaryAddress) XXX_Size() int { + return xxx_messageInfo_PrimaryAddress.Size(m) +} +func (m *PrimaryAddress) XXX_DiscardUnknown() { + xxx_messageInfo_PrimaryAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_PrimaryAddress proto.InternalMessageInfo + +func (m *PrimaryAddress) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *PrimaryAddress) GetOneToOneNat() *OneToOneNat { + if m != nil { + return m.OneToOneNat + } + return nil +} + +type OneToOneNat struct { + // An external IP address associated with this instance. + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // IP version for the external IP address. + IpVersion IpVersion `protobuf:"varint,2,opt,name=ip_version,json=ipVersion,proto3,enum=yandex.cloud.compute.v1.IpVersion" json:"ip_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneToOneNat) Reset() { *m = OneToOneNat{} } +func (m *OneToOneNat) String() string { return proto.CompactTextString(m) } +func (*OneToOneNat) ProtoMessage() {} +func (*OneToOneNat) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_9ed228c16f2b2625, []int{5} +} +func (m *OneToOneNat) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneToOneNat.Unmarshal(m, b) +} +func (m *OneToOneNat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneToOneNat.Marshal(b, m, deterministic) +} +func (dst *OneToOneNat) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneToOneNat.Merge(dst, src) +} +func (m *OneToOneNat) XXX_Size() int { + return xxx_messageInfo_OneToOneNat.Size(m) +} +func (m *OneToOneNat) XXX_DiscardUnknown() { + xxx_messageInfo_OneToOneNat.DiscardUnknown(m) +} + +var xxx_messageInfo_OneToOneNat proto.InternalMessageInfo + +func (m *OneToOneNat) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *OneToOneNat) GetIpVersion() IpVersion { + if m != nil { + return m.IpVersion + } + return IpVersion_IP_VERSION_UNSPECIFIED +} + +type SchedulingPolicy struct { + // Set if instance is preemptible. + Preemptible bool `protobuf:"varint,1,opt,name=preemptible,proto3" json:"preemptible,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SchedulingPolicy) Reset() { *m = SchedulingPolicy{} } +func (m *SchedulingPolicy) String() string { return proto.CompactTextString(m) } +func (*SchedulingPolicy) ProtoMessage() {} +func (*SchedulingPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_9ed228c16f2b2625, []int{6} +} +func (m *SchedulingPolicy) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SchedulingPolicy.Unmarshal(m, b) +} +func (m *SchedulingPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SchedulingPolicy.Marshal(b, m, deterministic) +} +func (dst *SchedulingPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_SchedulingPolicy.Merge(dst, src) +} +func (m *SchedulingPolicy) XXX_Size() int { + return xxx_messageInfo_SchedulingPolicy.Size(m) +} +func (m *SchedulingPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_SchedulingPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_SchedulingPolicy proto.InternalMessageInfo + +func (m *SchedulingPolicy) GetPreemptible() bool { + if m != nil { + return m.Preemptible + } + return false +} + +func init() { + proto.RegisterType((*Instance)(nil), "yandex.cloud.compute.v1.Instance") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.Instance.LabelsEntry") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.Instance.MetadataEntry") + proto.RegisterType((*Resources)(nil), "yandex.cloud.compute.v1.Resources") + proto.RegisterType((*AttachedDisk)(nil), "yandex.cloud.compute.v1.AttachedDisk") + proto.RegisterType((*NetworkInterface)(nil), "yandex.cloud.compute.v1.NetworkInterface") + proto.RegisterType((*PrimaryAddress)(nil), "yandex.cloud.compute.v1.PrimaryAddress") + proto.RegisterType((*OneToOneNat)(nil), "yandex.cloud.compute.v1.OneToOneNat") + proto.RegisterType((*SchedulingPolicy)(nil), "yandex.cloud.compute.v1.SchedulingPolicy") + proto.RegisterEnum("yandex.cloud.compute.v1.IpVersion", IpVersion_name, IpVersion_value) + proto.RegisterEnum("yandex.cloud.compute.v1.Instance_Status", Instance_Status_name, Instance_Status_value) + proto.RegisterEnum("yandex.cloud.compute.v1.AttachedDisk_Mode", AttachedDisk_Mode_name, AttachedDisk_Mode_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/compute/v1/instance.proto", fileDescriptor_instance_9ed228c16f2b2625) +} + +var fileDescriptor_instance_9ed228c16f2b2625 = []byte{ + // 1058 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0x6d, 0x6f, 0xe3, 0x44, + 0x10, 0x26, 0x2f, 0x4d, 0xe3, 0x49, 0x1b, 0xdc, 0xd5, 0xa9, 0x67, 0x95, 0x0f, 0xad, 0xc2, 0x5b, + 0x39, 0xa9, 0x8e, 0xae, 0x54, 0x15, 0x47, 0x25, 0x74, 0xb9, 0xc6, 0x07, 0x16, 0x6d, 0x12, 0x6d, + 0xd2, 0xf0, 0xf2, 0x01, 0x6b, 0xe3, 0xdd, 0xe4, 0x4c, 0x6d, 0xaf, 0xb1, 0xd7, 0x81, 0xf2, 0x3b, + 0xf8, 0x19, 0xfc, 0x2e, 0xfe, 0x04, 0x5f, 0xd0, 0xee, 0xda, 0xb9, 0x5c, 0xa4, 0x70, 0x07, 0x9f, + 0xb2, 0xf3, 0xcc, 0xcc, 0x33, 0x2f, 0x99, 0x9d, 0x35, 0x7c, 0xf2, 0x40, 0x62, 0xca, 0x7e, 0xeb, + 0xfa, 0x21, 0xcf, 0x69, 0xd7, 0xe7, 0x51, 0x92, 0x0b, 0xd6, 0x5d, 0x3e, 0xed, 0x06, 0x71, 0x26, + 0x48, 0xec, 0x33, 0x3b, 0x49, 0xb9, 0xe0, 0xe8, 0xb1, 0xb6, 0xb3, 0x95, 0x9d, 0x5d, 0xd8, 0xd9, + 0xcb, 0xa7, 0x47, 0xc7, 0x0b, 0xce, 0x17, 0x21, 0xeb, 0x2a, 0xb3, 0x59, 0x3e, 0xef, 0x8a, 0x20, + 0x62, 0x99, 0x20, 0x51, 0xa2, 0x3d, 0x3b, 0x7f, 0x37, 0xa1, 0xe9, 0x16, 0x64, 0xa8, 0x0d, 0xd5, + 0x80, 0x5a, 0x95, 0x93, 0xca, 0xa9, 0x81, 0xab, 0x01, 0x45, 0x1f, 0x80, 0x31, 0xe7, 0x21, 0x65, + 0xa9, 0x17, 0x50, 0xab, 0xaa, 0xe0, 0xa6, 0x06, 0x5c, 0x8a, 0x9e, 0x01, 0xf8, 0x29, 0x23, 0x82, + 0x51, 0x8f, 0x08, 0xab, 0x76, 0x52, 0x39, 0x6d, 0x9d, 0x1f, 0xd9, 0x3a, 0x9e, 0x5d, 0xc6, 0xb3, + 0x27, 0x65, 0x3c, 0x6c, 0x14, 0xd6, 0x3d, 0x81, 0x10, 0xd4, 0x63, 0x12, 0x31, 0xab, 0xae, 0x28, + 0xd5, 0x19, 0x9d, 0x40, 0x8b, 0xb2, 0xcc, 0x4f, 0x83, 0x44, 0x04, 0x3c, 0xb6, 0x76, 0x94, 0x6a, + 0x1d, 0x42, 0x0e, 0x34, 0x42, 0x32, 0x63, 0x61, 0x66, 0x35, 0x4e, 0x6a, 0xa7, 0xad, 0xf3, 0x33, + 0x7b, 0x4b, 0xd5, 0x76, 0x59, 0x90, 0x7d, 0xa3, 0xec, 0x9d, 0x58, 0xa4, 0x0f, 0xb8, 0x70, 0x46, + 0x8f, 0x61, 0xf7, 0x77, 0x1e, 0x33, 0x59, 0xd2, 0xae, 0x0a, 0xd2, 0x90, 0xa2, 0x4b, 0xd1, 0x31, + 0xb4, 0x92, 0x90, 0x88, 0x39, 0x4f, 0x23, 0xa9, 0x6c, 0x2a, 0x25, 0x94, 0x90, 0x4b, 0xd1, 0x73, + 0x30, 0x52, 0x96, 0xf1, 0x3c, 0xf5, 0x59, 0x66, 0x19, 0xaa, 0xe0, 0xce, 0xd6, 0x1c, 0x70, 0x69, + 0x89, 0x5f, 0x3b, 0xa1, 0xe7, 0xd0, 0xc8, 0x04, 0x11, 0x79, 0x66, 0xc1, 0x49, 0xe5, 0xb4, 0x7d, + 0x7e, 0xfa, 0xf6, 0x12, 0xc6, 0xca, 0x1e, 0x17, 0x7e, 0xe8, 0x5b, 0x68, 0x46, 0x4c, 0x10, 0x4a, + 0x04, 0xb1, 0x5a, 0xaa, 0x0d, 0xdd, 0xb7, 0x73, 0xdc, 0x16, 0x1e, 0xba, 0x11, 0x2b, 0x02, 0xf4, + 0x02, 0x8c, 0x19, 0xe7, 0xc2, 0xa3, 0x41, 0x76, 0x6f, 0xed, 0xa9, 0x82, 0x3e, 0xde, 0xca, 0xd6, + 0x13, 0x82, 0xf8, 0xaf, 0x18, 0xed, 0x07, 0xd9, 0x3d, 0x6e, 0x4a, 0x3f, 0x79, 0x42, 0x03, 0x78, + 0x3f, 0x63, 0x3e, 0x8f, 0x29, 0x49, 0x1f, 0x14, 0x51, 0x66, 0xed, 0xab, 0xbc, 0xde, 0x91, 0xa9, + 0xbd, 0xf2, 0x96, 0x62, 0x86, 0xbe, 0x07, 0x14, 0x33, 0xf1, 0x2b, 0x4f, 0xef, 0xbd, 0x20, 0x16, + 0x2c, 0x9d, 0x13, 0xd9, 0xed, 0xb6, 0xa2, 0xfc, 0x6c, 0x2b, 0xe5, 0x40, 0xbb, 0xb8, 0xa5, 0x07, + 0x3e, 0x88, 0x37, 0x90, 0x4c, 0x4e, 0xdd, 0xfc, 0x17, 0x1a, 0x5b, 0xa6, 0x9e, 0x3a, 0x79, 0x46, + 0x53, 0x38, 0xc8, 0x64, 0x2a, 0x79, 0x18, 0xc4, 0x0b, 0x2f, 0xe1, 0x61, 0xe0, 0x3f, 0x58, 0x07, + 0xaa, 0x13, 0xdb, 0x83, 0x8d, 0x57, 0x1e, 0x23, 0xe5, 0x80, 0xcd, 0x6c, 0x03, 0x39, 0x7a, 0x06, + 0xad, 0xb5, 0xd9, 0x43, 0x26, 0xd4, 0xee, 0xd9, 0x43, 0x71, 0xb3, 0xe4, 0x11, 0x3d, 0x82, 0x9d, + 0x25, 0x09, 0x73, 0x56, 0x5c, 0x2b, 0x2d, 0x7c, 0x59, 0xfd, 0xa2, 0x72, 0x74, 0x05, 0xfb, 0x6f, + 0xfc, 0x5f, 0xff, 0xc5, 0xb9, 0xf3, 0x67, 0x05, 0x1a, 0x7a, 0x62, 0xd0, 0x21, 0xa0, 0xf1, 0xa4, + 0x37, 0xb9, 0x1b, 0x7b, 0x77, 0x83, 0xf1, 0xc8, 0xb9, 0x76, 0x5f, 0xba, 0x4e, 0xdf, 0x7c, 0x0f, + 0x99, 0xb0, 0x37, 0xc2, 0xc3, 0xa9, 0x3b, 0x76, 0x87, 0x03, 0x77, 0xf0, 0xb5, 0x59, 0x41, 0x2d, + 0xd8, 0xc5, 0x77, 0x03, 0x25, 0x54, 0xd1, 0x1e, 0x34, 0xc7, 0x93, 0xe1, 0x68, 0x24, 0xa5, 0x9a, + 0x54, 0x29, 0xc9, 0xe9, 0x9b, 0x75, 0xad, 0xea, 0xe1, 0x89, 0x54, 0xed, 0xa0, 0x36, 0x00, 0x76, + 0x56, 0x72, 0x43, 0x6a, 0xef, 0x46, 0xfd, 0x9e, 0x92, 0x76, 0x91, 0x01, 0x3b, 0x0e, 0xc6, 0x43, + 0x6c, 0x36, 0x25, 0xc7, 0x35, 0xee, 0x8d, 0xbf, 0x71, 0xfa, 0xa6, 0x21, 0xad, 0xfa, 0xce, 0x8d, + 0xa3, 0xac, 0xa0, 0xf3, 0x13, 0x18, 0xab, 0x7b, 0x82, 0x0e, 0xa1, 0x11, 0xb1, 0x88, 0xa7, 0xba, + 0xd4, 0x1a, 0x2e, 0x24, 0x59, 0xad, 0xcf, 0x53, 0x96, 0xa9, 0x6a, 0x6b, 0x58, 0x0b, 0xe8, 0x43, + 0xd8, 0x97, 0x07, 0x6f, 0x9e, 0x12, 0x5f, 0x6d, 0x8c, 0x9a, 0xd2, 0xee, 0x49, 0xf0, 0x65, 0x81, + 0x75, 0xfe, 0xaa, 0xc0, 0xde, 0xfa, 0xb4, 0xa1, 0xaf, 0xa0, 0x1e, 0x71, 0xca, 0x54, 0x84, 0xf6, + 0xf9, 0x93, 0x77, 0x1a, 0x51, 0xfb, 0x96, 0x53, 0x86, 0x95, 0x9f, 0xdc, 0x11, 0x94, 0x2d, 0x03, + 0x9f, 0x79, 0x6a, 0x81, 0xe9, 0xfe, 0x83, 0x86, 0x06, 0x72, 0x8d, 0x1d, 0x43, 0x8b, 0xe4, 0x82, + 0x7b, 0x94, 0x85, 0x4c, 0x30, 0x95, 0x54, 0x13, 0x83, 0x84, 0xfa, 0x0a, 0x91, 0xeb, 0x47, 0xde, + 0x12, 0xb9, 0x61, 0xf4, 0xfa, 0x6b, 0x48, 0xd1, 0xa5, 0x9d, 0x2b, 0xa8, 0xcb, 0x40, 0xe8, 0x11, + 0x98, 0xb7, 0xc3, 0xbe, 0xb3, 0xf1, 0xaf, 0xed, 0x83, 0x81, 0x9d, 0x5e, 0xdf, 0x1b, 0x0e, 0x6e, + 0x7e, 0x30, 0x2b, 0xba, 0xf9, 0xbd, 0xbe, 0xf7, 0x1d, 0x76, 0x27, 0x8e, 0x59, 0xed, 0xfc, 0x51, + 0x05, 0x73, 0xf3, 0x0e, 0xc8, 0xc6, 0x05, 0xb2, 0xbc, 0x62, 0x74, 0xb4, 0x20, 0x33, 0x8c, 0x88, + 0xef, 0x11, 0x4a, 0x53, 0x96, 0x65, 0x65, 0x09, 0x11, 0xf1, 0x7b, 0x1a, 0x91, 0x5b, 0x3f, 0xcb, + 0x67, 0x31, 0x13, 0x32, 0xc7, 0x9a, 0xde, 0xfa, 0x1a, 0x70, 0x29, 0xba, 0x03, 0x94, 0xa4, 0x41, + 0x24, 0x2f, 0xfb, 0xf2, 0x62, 0x45, 0x52, 0x57, 0x37, 0xe6, 0xd3, 0xad, 0xed, 0x1c, 0x69, 0x97, + 0x22, 0x02, 0x36, 0x0b, 0x8a, 0xe9, 0x45, 0x19, 0x73, 0x9d, 0xf6, 0x72, 0x45, 0xbb, 0xf3, 0x3f, + 0x69, 0x2f, 0x0b, 0xa4, 0x93, 0x43, 0xfb, 0x4d, 0x1b, 0x64, 0xc1, 0x6e, 0xc9, 0xae, 0xbb, 0x52, + 0x8a, 0xc8, 0x85, 0xb6, 0x7c, 0x16, 0x04, 0xf7, 0xe4, 0x4f, 0x4c, 0x84, 0x6a, 0x4d, 0xeb, 0xfc, + 0xa3, 0xad, 0xe1, 0x87, 0x31, 0x9b, 0xf0, 0x61, 0xcc, 0x06, 0x44, 0xe0, 0x16, 0x7f, 0x2d, 0x74, + 0x7e, 0x86, 0xd6, 0x9a, 0xee, 0x5f, 0x62, 0xf6, 0x00, 0x82, 0xc4, 0x5b, 0xb2, 0x34, 0x93, 0x13, + 0x5c, 0x55, 0x43, 0xb9, 0xfd, 0x49, 0x71, 0x93, 0xa9, 0xb6, 0xc4, 0x46, 0x50, 0x1e, 0x3b, 0x17, + 0x60, 0x6e, 0xee, 0x23, 0xf9, 0x96, 0x26, 0x29, 0x63, 0x51, 0x22, 0x82, 0x59, 0xa8, 0x87, 0xbd, + 0x89, 0xd7, 0xa1, 0x27, 0x57, 0x60, 0xac, 0xd8, 0xd0, 0x11, 0x1c, 0xba, 0x23, 0x6f, 0xea, 0x60, + 0xb9, 0x12, 0x36, 0xe6, 0xae, 0x09, 0x75, 0x77, 0x34, 0xbd, 0x30, 0x2b, 0xc5, 0xe9, 0xd2, 0xac, + 0xbe, 0x70, 0x7e, 0xbc, 0x5e, 0x04, 0xe2, 0x55, 0x3e, 0x93, 0xc9, 0x75, 0x75, 0xb6, 0x67, 0xfa, + 0x13, 0x65, 0xc1, 0xcf, 0x16, 0x2c, 0x56, 0xaf, 0x7f, 0x77, 0xcb, 0xb7, 0xcb, 0x55, 0x71, 0x9c, + 0x35, 0x94, 0xd9, 0xe7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x78, 0x8d, 0xb6, 0x7a, 0xe5, 0x08, + 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/instance_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/instance_service.pb.go new file mode 100644 index 000000000..9eec9b43c --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/instance_service.pb.go @@ -0,0 +1,2766 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/compute/v1/instance_service.proto + +package compute // import "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type InstanceView int32 + +const ( + // Doesn't include the metadata field in the server response. + InstanceView_BASIC InstanceView = 0 + // Returns the full representation of the instance in the server response, including metadata. + InstanceView_FULL InstanceView = 1 +) + +var InstanceView_name = map[int32]string{ + 0: "BASIC", + 1: "FULL", +} +var InstanceView_value = map[string]int32{ + "BASIC": 0, + "FULL": 1, +} + +func (x InstanceView) String() string { + return proto.EnumName(InstanceView_name, int32(x)) +} +func (InstanceView) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{0} +} + +type AttachedDiskSpec_Mode int32 + +const ( + AttachedDiskSpec_MODE_UNSPECIFIED AttachedDiskSpec_Mode = 0 + // Read-only access. + AttachedDiskSpec_READ_ONLY AttachedDiskSpec_Mode = 1 + // Read/Write access. Default value. + AttachedDiskSpec_READ_WRITE AttachedDiskSpec_Mode = 2 +) + +var AttachedDiskSpec_Mode_name = map[int32]string{ + 0: "MODE_UNSPECIFIED", + 1: "READ_ONLY", + 2: "READ_WRITE", +} +var AttachedDiskSpec_Mode_value = map[string]int32{ + "MODE_UNSPECIFIED": 0, + "READ_ONLY": 1, + "READ_WRITE": 2, +} + +func (x AttachedDiskSpec_Mode) String() string { + return proto.EnumName(AttachedDiskSpec_Mode_name, int32(x)) +} +func (AttachedDiskSpec_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{26, 0} +} + +type GetInstanceRequest struct { + // ID of the Instance resource to return. + // To get the instance ID, use a [InstanceService.List] request. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + // Defines which information about the Instance resource should be returned in the server response. + View InstanceView `protobuf:"varint,2,opt,name=view,proto3,enum=yandex.cloud.compute.v1.InstanceView" json:"view,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetInstanceRequest) Reset() { *m = GetInstanceRequest{} } +func (m *GetInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*GetInstanceRequest) ProtoMessage() {} +func (*GetInstanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{0} +} +func (m *GetInstanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetInstanceRequest.Unmarshal(m, b) +} +func (m *GetInstanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetInstanceRequest.Marshal(b, m, deterministic) +} +func (dst *GetInstanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetInstanceRequest.Merge(dst, src) +} +func (m *GetInstanceRequest) XXX_Size() int { + return xxx_messageInfo_GetInstanceRequest.Size(m) +} +func (m *GetInstanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetInstanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetInstanceRequest proto.InternalMessageInfo + +func (m *GetInstanceRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +func (m *GetInstanceRequest) GetView() InstanceView { + if m != nil { + return m.View + } + return InstanceView_BASIC +} + +type ListInstancesRequest struct { + // ID of the Folder to list instances in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListInstancesResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, + // set [page_token] to the [ListInstancesResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters resources listed in the response. + // The expression must specify: + // 1. The field name. Currently you can use filtering only on the [Instance.name] field. + // 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + // 3. The value. Must be 3-63 characters long and match the regular expression `^[a-z]([-a-z0-9]{,61}[a-z0-9])?$`. + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListInstancesRequest) Reset() { *m = ListInstancesRequest{} } +func (m *ListInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*ListInstancesRequest) ProtoMessage() {} +func (*ListInstancesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{1} +} +func (m *ListInstancesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListInstancesRequest.Unmarshal(m, b) +} +func (m *ListInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListInstancesRequest.Marshal(b, m, deterministic) +} +func (dst *ListInstancesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListInstancesRequest.Merge(dst, src) +} +func (m *ListInstancesRequest) XXX_Size() int { + return xxx_messageInfo_ListInstancesRequest.Size(m) +} +func (m *ListInstancesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListInstancesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListInstancesRequest proto.InternalMessageInfo + +func (m *ListInstancesRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListInstancesRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListInstancesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListInstancesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListInstancesResponse struct { + // List of Instance resources. + Instances []*Instance `protobuf:"bytes,1,rep,name=instances,proto3" json:"instances,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListInstancesRequest.page_size], use + // the [next_page_token] as the value + // for the [ListInstancesRequest.page_token] query parameter + // in the next list request. Each subsequent list request will have its own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListInstancesResponse) Reset() { *m = ListInstancesResponse{} } +func (m *ListInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*ListInstancesResponse) ProtoMessage() {} +func (*ListInstancesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{2} +} +func (m *ListInstancesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListInstancesResponse.Unmarshal(m, b) +} +func (m *ListInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListInstancesResponse.Marshal(b, m, deterministic) +} +func (dst *ListInstancesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListInstancesResponse.Merge(dst, src) +} +func (m *ListInstancesResponse) XXX_Size() int { + return xxx_messageInfo_ListInstancesResponse.Size(m) +} +func (m *ListInstancesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListInstancesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListInstancesResponse proto.InternalMessageInfo + +func (m *ListInstancesResponse) GetInstances() []*Instance { + if m != nil { + return m.Instances + } + return nil +} + +func (m *ListInstancesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateInstanceRequest struct { + // ID of the folder to create an instance in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Name of the instance. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the instance. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID of the availability zone where the instance resides. + // To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request + ZoneId string `protobuf:"bytes,5,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + // ID of the hardware platform configuration for the instance. + // This field affects the available values in [resources_spec] field. + // + // Currently only one platform is available - `standard-v1`. This platform is suitable for most tasks. + // This platform allows you to create various types of instances: with a large amount of memory, + // with a large number of cores, with a burstable performance. + PlatformId string `protobuf:"bytes,6,opt,name=platform_id,json=platformId,proto3" json:"platform_id,omitempty"` + // Computing resources of the instance, such as the amount of memory and number of cores. + // To get a list of available values, see [Computing resources](/docs/compute/concepts/vm-types). + ResourcesSpec *ResourcesSpec `protobuf:"bytes,7,opt,name=resources_spec,json=resourcesSpec,proto3" json:"resources_spec,omitempty"` + // The metadata `` key:value `` pairs that will be assigned to this instance. This includes custom metadata and predefined keys. + // The total size of all keys and values must be less than 512 KB. + // + // Values are free-form strings, and only have meaning as interpreted by the programs which configure the instance. + // The only restriction placed on values is that their size must be 256 KB or less. + // + // For example, you may use the metadata in order to provide your public SSH key to the instance. + // For more information, see [Metadata](/docs/compute/concepts/vm-metadata). + Metadata map[string]string `protobuf:"bytes,8,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Boot disk to attach to the instance. + BootDiskSpec *AttachedDiskSpec `protobuf:"bytes,9,opt,name=boot_disk_spec,json=bootDiskSpec,proto3" json:"boot_disk_spec,omitempty"` + // Array of secondary disks to attach to the instance. + SecondaryDiskSpecs []*AttachedDiskSpec `protobuf:"bytes,10,rep,name=secondary_disk_specs,json=secondaryDiskSpecs,proto3" json:"secondary_disk_specs,omitempty"` + // Network configuration for the instance. Specifies how the network interface is configured + // to interact with other services on the internal network and on the internet. + // Currently only one network interface is supported per instance. + NetworkInterfaceSpecs []*NetworkInterfaceSpec `protobuf:"bytes,11,rep,name=network_interface_specs,json=networkInterfaceSpecs,proto3" json:"network_interface_specs,omitempty"` + // Host name for the instance. + // This field is used to generate the [yandex.cloud.compute.v1.Instance.fqdn] value. + // The host name must be unique within the network and region. + // If not specified, the host name will be equal to [yandex.cloud.compute.v1.Instance.id] of the instance + // and FQDN will be `.auto.internal`. Otherwise FQDN will be `..internal`. + Hostname string `protobuf:"bytes,12,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Scheduling policy configuration. + SchedulingPolicy *SchedulingPolicy `protobuf:"bytes,13,opt,name=scheduling_policy,json=schedulingPolicy,proto3" json:"scheduling_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateInstanceRequest) Reset() { *m = CreateInstanceRequest{} } +func (m *CreateInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*CreateInstanceRequest) ProtoMessage() {} +func (*CreateInstanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{3} +} +func (m *CreateInstanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateInstanceRequest.Unmarshal(m, b) +} +func (m *CreateInstanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateInstanceRequest.Marshal(b, m, deterministic) +} +func (dst *CreateInstanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateInstanceRequest.Merge(dst, src) +} +func (m *CreateInstanceRequest) XXX_Size() int { + return xxx_messageInfo_CreateInstanceRequest.Size(m) +} +func (m *CreateInstanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateInstanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateInstanceRequest proto.InternalMessageInfo + +func (m *CreateInstanceRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *CreateInstanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateInstanceRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *CreateInstanceRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *CreateInstanceRequest) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func (m *CreateInstanceRequest) GetPlatformId() string { + if m != nil { + return m.PlatformId + } + return "" +} + +func (m *CreateInstanceRequest) GetResourcesSpec() *ResourcesSpec { + if m != nil { + return m.ResourcesSpec + } + return nil +} + +func (m *CreateInstanceRequest) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *CreateInstanceRequest) GetBootDiskSpec() *AttachedDiskSpec { + if m != nil { + return m.BootDiskSpec + } + return nil +} + +func (m *CreateInstanceRequest) GetSecondaryDiskSpecs() []*AttachedDiskSpec { + if m != nil { + return m.SecondaryDiskSpecs + } + return nil +} + +func (m *CreateInstanceRequest) GetNetworkInterfaceSpecs() []*NetworkInterfaceSpec { + if m != nil { + return m.NetworkInterfaceSpecs + } + return nil +} + +func (m *CreateInstanceRequest) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +func (m *CreateInstanceRequest) GetSchedulingPolicy() *SchedulingPolicy { + if m != nil { + return m.SchedulingPolicy + } + return nil +} + +type CreateInstanceMetadata struct { + // ID of the instance that is being created. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateInstanceMetadata) Reset() { *m = CreateInstanceMetadata{} } +func (m *CreateInstanceMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateInstanceMetadata) ProtoMessage() {} +func (*CreateInstanceMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{4} +} +func (m *CreateInstanceMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateInstanceMetadata.Unmarshal(m, b) +} +func (m *CreateInstanceMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateInstanceMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateInstanceMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateInstanceMetadata.Merge(dst, src) +} +func (m *CreateInstanceMetadata) XXX_Size() int { + return xxx_messageInfo_CreateInstanceMetadata.Size(m) +} +func (m *CreateInstanceMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateInstanceMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateInstanceMetadata proto.InternalMessageInfo + +func (m *CreateInstanceMetadata) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +type UpdateInstanceRequest struct { + // ID of the Instance resource to update. + // To get the instance ID, use a [InstanceService.List] request. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + // Field mask that specifies which fields of the Instance resource are going to be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Name of the instance. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Description of the instance. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. + // + // Existing set of `` labels `` is completely replaced by the provided set. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID of the hardware platform configuration for the instance. + // This field affects the available values in [resources_spec] field. + // + // Currently only one platform is available - `standard-v1`. This platform is suitable for most tasks. + // This platform allows you to create various types of instances: with a large amount of memory, + // with a large number of cores, with a burstable performance. + PlatformId string `protobuf:"bytes,6,opt,name=platform_id,json=platformId,proto3" json:"platform_id,omitempty"` + // Computing resources of the instance, such as the amount of memory and number of cores. + // To get a list of available values, see [Computing resources](/docs/compute/concepts/vm-types). + ResourcesSpec *ResourcesSpec `protobuf:"bytes,7,opt,name=resources_spec,json=resourcesSpec,proto3" json:"resources_spec,omitempty"` + // The metadata `` key:value `` pairs that will be assigned to this instance. This includes custom metadata and predefined keys. + // The total size of all keys and values must be less than 512 KB. + // + // Existing set of `` metadata `` is completely replaced by the provided set. + // + // Values are free-form strings, and only have meaning as interpreted by the programs which configure the instance. + // The only restriction placed on values is that their size must be 256 KB or less. + // + // For example, you may use the metadata in order to provide your public SSH key to the instance. + // For more information, see [Metadata](/docs/compute/concepts/vm-metadata). + Metadata map[string]string `protobuf:"bytes,8,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateInstanceRequest) Reset() { *m = UpdateInstanceRequest{} } +func (m *UpdateInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateInstanceRequest) ProtoMessage() {} +func (*UpdateInstanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{5} +} +func (m *UpdateInstanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateInstanceRequest.Unmarshal(m, b) +} +func (m *UpdateInstanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateInstanceRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateInstanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateInstanceRequest.Merge(dst, src) +} +func (m *UpdateInstanceRequest) XXX_Size() int { + return xxx_messageInfo_UpdateInstanceRequest.Size(m) +} +func (m *UpdateInstanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateInstanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateInstanceRequest proto.InternalMessageInfo + +func (m *UpdateInstanceRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +func (m *UpdateInstanceRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateInstanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateInstanceRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *UpdateInstanceRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *UpdateInstanceRequest) GetPlatformId() string { + if m != nil { + return m.PlatformId + } + return "" +} + +func (m *UpdateInstanceRequest) GetResourcesSpec() *ResourcesSpec { + if m != nil { + return m.ResourcesSpec + } + return nil +} + +func (m *UpdateInstanceRequest) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +type UpdateInstanceMetadata struct { + // ID of the Instance resource that is being updated. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateInstanceMetadata) Reset() { *m = UpdateInstanceMetadata{} } +func (m *UpdateInstanceMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateInstanceMetadata) ProtoMessage() {} +func (*UpdateInstanceMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{6} +} +func (m *UpdateInstanceMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateInstanceMetadata.Unmarshal(m, b) +} +func (m *UpdateInstanceMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateInstanceMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateInstanceMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateInstanceMetadata.Merge(dst, src) +} +func (m *UpdateInstanceMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateInstanceMetadata.Size(m) +} +func (m *UpdateInstanceMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateInstanceMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateInstanceMetadata proto.InternalMessageInfo + +func (m *UpdateInstanceMetadata) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +type DeleteInstanceRequest struct { + // ID of the instance to delete. + // To get the instance ID, use a [InstanceService.List] request. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteInstanceRequest) Reset() { *m = DeleteInstanceRequest{} } +func (m *DeleteInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteInstanceRequest) ProtoMessage() {} +func (*DeleteInstanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{7} +} +func (m *DeleteInstanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteInstanceRequest.Unmarshal(m, b) +} +func (m *DeleteInstanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteInstanceRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteInstanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteInstanceRequest.Merge(dst, src) +} +func (m *DeleteInstanceRequest) XXX_Size() int { + return xxx_messageInfo_DeleteInstanceRequest.Size(m) +} +func (m *DeleteInstanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteInstanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteInstanceRequest proto.InternalMessageInfo + +func (m *DeleteInstanceRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +type DeleteInstanceMetadata struct { + // ID of the instance that is being deleted. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteInstanceMetadata) Reset() { *m = DeleteInstanceMetadata{} } +func (m *DeleteInstanceMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteInstanceMetadata) ProtoMessage() {} +func (*DeleteInstanceMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{8} +} +func (m *DeleteInstanceMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteInstanceMetadata.Unmarshal(m, b) +} +func (m *DeleteInstanceMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteInstanceMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteInstanceMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteInstanceMetadata.Merge(dst, src) +} +func (m *DeleteInstanceMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteInstanceMetadata.Size(m) +} +func (m *DeleteInstanceMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteInstanceMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteInstanceMetadata proto.InternalMessageInfo + +func (m *DeleteInstanceMetadata) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +type UpdateInstanceMetadataRequest struct { + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + Delete []string `protobuf:"bytes,2,rep,name=delete,proto3" json:"delete,omitempty"` + Upsert map[string]string `protobuf:"bytes,3,rep,name=upsert,proto3" json:"upsert,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateInstanceMetadataRequest) Reset() { *m = UpdateInstanceMetadataRequest{} } +func (m *UpdateInstanceMetadataRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateInstanceMetadataRequest) ProtoMessage() {} +func (*UpdateInstanceMetadataRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{9} +} +func (m *UpdateInstanceMetadataRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateInstanceMetadataRequest.Unmarshal(m, b) +} +func (m *UpdateInstanceMetadataRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateInstanceMetadataRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateInstanceMetadataRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateInstanceMetadataRequest.Merge(dst, src) +} +func (m *UpdateInstanceMetadataRequest) XXX_Size() int { + return xxx_messageInfo_UpdateInstanceMetadataRequest.Size(m) +} +func (m *UpdateInstanceMetadataRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateInstanceMetadataRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateInstanceMetadataRequest proto.InternalMessageInfo + +func (m *UpdateInstanceMetadataRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +func (m *UpdateInstanceMetadataRequest) GetDelete() []string { + if m != nil { + return m.Delete + } + return nil +} + +func (m *UpdateInstanceMetadataRequest) GetUpsert() map[string]string { + if m != nil { + return m.Upsert + } + return nil +} + +type UpdateInstanceMetadataMetadata struct { + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateInstanceMetadataMetadata) Reset() { *m = UpdateInstanceMetadataMetadata{} } +func (m *UpdateInstanceMetadataMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateInstanceMetadataMetadata) ProtoMessage() {} +func (*UpdateInstanceMetadataMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{10} +} +func (m *UpdateInstanceMetadataMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateInstanceMetadataMetadata.Unmarshal(m, b) +} +func (m *UpdateInstanceMetadataMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateInstanceMetadataMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateInstanceMetadataMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateInstanceMetadataMetadata.Merge(dst, src) +} +func (m *UpdateInstanceMetadataMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateInstanceMetadataMetadata.Size(m) +} +func (m *UpdateInstanceMetadataMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateInstanceMetadataMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateInstanceMetadataMetadata proto.InternalMessageInfo + +func (m *UpdateInstanceMetadataMetadata) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +type GetInstanceSerialPortOutputRequest struct { + // ID of the instance to return the serial port output for. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetInstanceSerialPortOutputRequest) Reset() { *m = GetInstanceSerialPortOutputRequest{} } +func (m *GetInstanceSerialPortOutputRequest) String() string { return proto.CompactTextString(m) } +func (*GetInstanceSerialPortOutputRequest) ProtoMessage() {} +func (*GetInstanceSerialPortOutputRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{11} +} +func (m *GetInstanceSerialPortOutputRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetInstanceSerialPortOutputRequest.Unmarshal(m, b) +} +func (m *GetInstanceSerialPortOutputRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetInstanceSerialPortOutputRequest.Marshal(b, m, deterministic) +} +func (dst *GetInstanceSerialPortOutputRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetInstanceSerialPortOutputRequest.Merge(dst, src) +} +func (m *GetInstanceSerialPortOutputRequest) XXX_Size() int { + return xxx_messageInfo_GetInstanceSerialPortOutputRequest.Size(m) +} +func (m *GetInstanceSerialPortOutputRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetInstanceSerialPortOutputRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetInstanceSerialPortOutputRequest proto.InternalMessageInfo + +func (m *GetInstanceSerialPortOutputRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +type GetInstanceSerialPortOutputResponse struct { + // The contents of the serial port output, starting from the time when the instance + // started to boot. + Contents string `protobuf:"bytes,1,opt,name=contents,proto3" json:"contents,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetInstanceSerialPortOutputResponse) Reset() { *m = GetInstanceSerialPortOutputResponse{} } +func (m *GetInstanceSerialPortOutputResponse) String() string { return proto.CompactTextString(m) } +func (*GetInstanceSerialPortOutputResponse) ProtoMessage() {} +func (*GetInstanceSerialPortOutputResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{12} +} +func (m *GetInstanceSerialPortOutputResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetInstanceSerialPortOutputResponse.Unmarshal(m, b) +} +func (m *GetInstanceSerialPortOutputResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetInstanceSerialPortOutputResponse.Marshal(b, m, deterministic) +} +func (dst *GetInstanceSerialPortOutputResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetInstanceSerialPortOutputResponse.Merge(dst, src) +} +func (m *GetInstanceSerialPortOutputResponse) XXX_Size() int { + return xxx_messageInfo_GetInstanceSerialPortOutputResponse.Size(m) +} +func (m *GetInstanceSerialPortOutputResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetInstanceSerialPortOutputResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetInstanceSerialPortOutputResponse proto.InternalMessageInfo + +func (m *GetInstanceSerialPortOutputResponse) GetContents() string { + if m != nil { + return m.Contents + } + return "" +} + +type StopInstanceRequest struct { + // ID of the instance to stop. + // To get the instance ID, use a [InstanceService.List] request. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopInstanceRequest) Reset() { *m = StopInstanceRequest{} } +func (m *StopInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*StopInstanceRequest) ProtoMessage() {} +func (*StopInstanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{13} +} +func (m *StopInstanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopInstanceRequest.Unmarshal(m, b) +} +func (m *StopInstanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopInstanceRequest.Marshal(b, m, deterministic) +} +func (dst *StopInstanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopInstanceRequest.Merge(dst, src) +} +func (m *StopInstanceRequest) XXX_Size() int { + return xxx_messageInfo_StopInstanceRequest.Size(m) +} +func (m *StopInstanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopInstanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopInstanceRequest proto.InternalMessageInfo + +func (m *StopInstanceRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +type StopInstanceMetadata struct { + // ID of the instance that is being deleted. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopInstanceMetadata) Reset() { *m = StopInstanceMetadata{} } +func (m *StopInstanceMetadata) String() string { return proto.CompactTextString(m) } +func (*StopInstanceMetadata) ProtoMessage() {} +func (*StopInstanceMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{14} +} +func (m *StopInstanceMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopInstanceMetadata.Unmarshal(m, b) +} +func (m *StopInstanceMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopInstanceMetadata.Marshal(b, m, deterministic) +} +func (dst *StopInstanceMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopInstanceMetadata.Merge(dst, src) +} +func (m *StopInstanceMetadata) XXX_Size() int { + return xxx_messageInfo_StopInstanceMetadata.Size(m) +} +func (m *StopInstanceMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_StopInstanceMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_StopInstanceMetadata proto.InternalMessageInfo + +func (m *StopInstanceMetadata) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +type StartInstanceRequest struct { + // ID of the instance to start. + // To get the instance ID, use a [InstanceService.List] request. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartInstanceRequest) Reset() { *m = StartInstanceRequest{} } +func (m *StartInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*StartInstanceRequest) ProtoMessage() {} +func (*StartInstanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{15} +} +func (m *StartInstanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartInstanceRequest.Unmarshal(m, b) +} +func (m *StartInstanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartInstanceRequest.Marshal(b, m, deterministic) +} +func (dst *StartInstanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartInstanceRequest.Merge(dst, src) +} +func (m *StartInstanceRequest) XXX_Size() int { + return xxx_messageInfo_StartInstanceRequest.Size(m) +} +func (m *StartInstanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartInstanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartInstanceRequest proto.InternalMessageInfo + +func (m *StartInstanceRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +type StartInstanceMetadata struct { + // ID of the instance. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartInstanceMetadata) Reset() { *m = StartInstanceMetadata{} } +func (m *StartInstanceMetadata) String() string { return proto.CompactTextString(m) } +func (*StartInstanceMetadata) ProtoMessage() {} +func (*StartInstanceMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{16} +} +func (m *StartInstanceMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartInstanceMetadata.Unmarshal(m, b) +} +func (m *StartInstanceMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartInstanceMetadata.Marshal(b, m, deterministic) +} +func (dst *StartInstanceMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartInstanceMetadata.Merge(dst, src) +} +func (m *StartInstanceMetadata) XXX_Size() int { + return xxx_messageInfo_StartInstanceMetadata.Size(m) +} +func (m *StartInstanceMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_StartInstanceMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_StartInstanceMetadata proto.InternalMessageInfo + +func (m *StartInstanceMetadata) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +type RestartInstanceRequest struct { + // ID of the instance to restart. + // To get the instance ID, use a [InstanceService.List] request. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestartInstanceRequest) Reset() { *m = RestartInstanceRequest{} } +func (m *RestartInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*RestartInstanceRequest) ProtoMessage() {} +func (*RestartInstanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{17} +} +func (m *RestartInstanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestartInstanceRequest.Unmarshal(m, b) +} +func (m *RestartInstanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestartInstanceRequest.Marshal(b, m, deterministic) +} +func (dst *RestartInstanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestartInstanceRequest.Merge(dst, src) +} +func (m *RestartInstanceRequest) XXX_Size() int { + return xxx_messageInfo_RestartInstanceRequest.Size(m) +} +func (m *RestartInstanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RestartInstanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RestartInstanceRequest proto.InternalMessageInfo + +func (m *RestartInstanceRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +type RestartInstanceMetadata struct { + // ID of the instance. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestartInstanceMetadata) Reset() { *m = RestartInstanceMetadata{} } +func (m *RestartInstanceMetadata) String() string { return proto.CompactTextString(m) } +func (*RestartInstanceMetadata) ProtoMessage() {} +func (*RestartInstanceMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{18} +} +func (m *RestartInstanceMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestartInstanceMetadata.Unmarshal(m, b) +} +func (m *RestartInstanceMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestartInstanceMetadata.Marshal(b, m, deterministic) +} +func (dst *RestartInstanceMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestartInstanceMetadata.Merge(dst, src) +} +func (m *RestartInstanceMetadata) XXX_Size() int { + return xxx_messageInfo_RestartInstanceMetadata.Size(m) +} +func (m *RestartInstanceMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_RestartInstanceMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_RestartInstanceMetadata proto.InternalMessageInfo + +func (m *RestartInstanceMetadata) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +type AttachInstanceDiskRequest struct { + // ID of the instance to attach the disk to. + // To get the instance ID, use a [InstanceService.List] request. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + // Disk that should be attached. + AttachedDiskSpec *AttachedDiskSpec `protobuf:"bytes,2,opt,name=attached_disk_spec,json=attachedDiskSpec,proto3" json:"attached_disk_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttachInstanceDiskRequest) Reset() { *m = AttachInstanceDiskRequest{} } +func (m *AttachInstanceDiskRequest) String() string { return proto.CompactTextString(m) } +func (*AttachInstanceDiskRequest) ProtoMessage() {} +func (*AttachInstanceDiskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{19} +} +func (m *AttachInstanceDiskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttachInstanceDiskRequest.Unmarshal(m, b) +} +func (m *AttachInstanceDiskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttachInstanceDiskRequest.Marshal(b, m, deterministic) +} +func (dst *AttachInstanceDiskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttachInstanceDiskRequest.Merge(dst, src) +} +func (m *AttachInstanceDiskRequest) XXX_Size() int { + return xxx_messageInfo_AttachInstanceDiskRequest.Size(m) +} +func (m *AttachInstanceDiskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AttachInstanceDiskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AttachInstanceDiskRequest proto.InternalMessageInfo + +func (m *AttachInstanceDiskRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +func (m *AttachInstanceDiskRequest) GetAttachedDiskSpec() *AttachedDiskSpec { + if m != nil { + return m.AttachedDiskSpec + } + return nil +} + +type AttachInstanceDiskMetadata struct { + // ID of the instance. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + // ID of the disk. + DiskId string `protobuf:"bytes,2,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttachInstanceDiskMetadata) Reset() { *m = AttachInstanceDiskMetadata{} } +func (m *AttachInstanceDiskMetadata) String() string { return proto.CompactTextString(m) } +func (*AttachInstanceDiskMetadata) ProtoMessage() {} +func (*AttachInstanceDiskMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{20} +} +func (m *AttachInstanceDiskMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttachInstanceDiskMetadata.Unmarshal(m, b) +} +func (m *AttachInstanceDiskMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttachInstanceDiskMetadata.Marshal(b, m, deterministic) +} +func (dst *AttachInstanceDiskMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttachInstanceDiskMetadata.Merge(dst, src) +} +func (m *AttachInstanceDiskMetadata) XXX_Size() int { + return xxx_messageInfo_AttachInstanceDiskMetadata.Size(m) +} +func (m *AttachInstanceDiskMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_AttachInstanceDiskMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_AttachInstanceDiskMetadata proto.InternalMessageInfo + +func (m *AttachInstanceDiskMetadata) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +func (m *AttachInstanceDiskMetadata) GetDiskId() string { + if m != nil { + return m.DiskId + } + return "" +} + +type DetachInstanceDiskRequest struct { + // ID of the instance to detach the disk from. + // To get the instance ID, use a [InstanceService.List] request. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + // Types that are valid to be assigned to Disk: + // *DetachInstanceDiskRequest_DiskId + // *DetachInstanceDiskRequest_DeviceName + Disk isDetachInstanceDiskRequest_Disk `protobuf_oneof:"disk"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DetachInstanceDiskRequest) Reset() { *m = DetachInstanceDiskRequest{} } +func (m *DetachInstanceDiskRequest) String() string { return proto.CompactTextString(m) } +func (*DetachInstanceDiskRequest) ProtoMessage() {} +func (*DetachInstanceDiskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{21} +} +func (m *DetachInstanceDiskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DetachInstanceDiskRequest.Unmarshal(m, b) +} +func (m *DetachInstanceDiskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DetachInstanceDiskRequest.Marshal(b, m, deterministic) +} +func (dst *DetachInstanceDiskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DetachInstanceDiskRequest.Merge(dst, src) +} +func (m *DetachInstanceDiskRequest) XXX_Size() int { + return xxx_messageInfo_DetachInstanceDiskRequest.Size(m) +} +func (m *DetachInstanceDiskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DetachInstanceDiskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DetachInstanceDiskRequest proto.InternalMessageInfo + +func (m *DetachInstanceDiskRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +type isDetachInstanceDiskRequest_Disk interface { + isDetachInstanceDiskRequest_Disk() +} + +type DetachInstanceDiskRequest_DiskId struct { + DiskId string `protobuf:"bytes,2,opt,name=disk_id,json=diskId,proto3,oneof"` +} + +type DetachInstanceDiskRequest_DeviceName struct { + DeviceName string `protobuf:"bytes,3,opt,name=device_name,json=deviceName,proto3,oneof"` +} + +func (*DetachInstanceDiskRequest_DiskId) isDetachInstanceDiskRequest_Disk() {} + +func (*DetachInstanceDiskRequest_DeviceName) isDetachInstanceDiskRequest_Disk() {} + +func (m *DetachInstanceDiskRequest) GetDisk() isDetachInstanceDiskRequest_Disk { + if m != nil { + return m.Disk + } + return nil +} + +func (m *DetachInstanceDiskRequest) GetDiskId() string { + if x, ok := m.GetDisk().(*DetachInstanceDiskRequest_DiskId); ok { + return x.DiskId + } + return "" +} + +func (m *DetachInstanceDiskRequest) GetDeviceName() string { + if x, ok := m.GetDisk().(*DetachInstanceDiskRequest_DeviceName); ok { + return x.DeviceName + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*DetachInstanceDiskRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _DetachInstanceDiskRequest_OneofMarshaler, _DetachInstanceDiskRequest_OneofUnmarshaler, _DetachInstanceDiskRequest_OneofSizer, []interface{}{ + (*DetachInstanceDiskRequest_DiskId)(nil), + (*DetachInstanceDiskRequest_DeviceName)(nil), + } +} + +func _DetachInstanceDiskRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*DetachInstanceDiskRequest) + // disk + switch x := m.Disk.(type) { + case *DetachInstanceDiskRequest_DiskId: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.DiskId) + case *DetachInstanceDiskRequest_DeviceName: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.DeviceName) + case nil: + default: + return fmt.Errorf("DetachInstanceDiskRequest.Disk has unexpected type %T", x) + } + return nil +} + +func _DetachInstanceDiskRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*DetachInstanceDiskRequest) + switch tag { + case 2: // disk.disk_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Disk = &DetachInstanceDiskRequest_DiskId{x} + return true, err + case 3: // disk.device_name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Disk = &DetachInstanceDiskRequest_DeviceName{x} + return true, err + default: + return false, nil + } +} + +func _DetachInstanceDiskRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*DetachInstanceDiskRequest) + // disk + switch x := m.Disk.(type) { + case *DetachInstanceDiskRequest_DiskId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.DiskId))) + n += len(x.DiskId) + case *DetachInstanceDiskRequest_DeviceName: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.DeviceName))) + n += len(x.DeviceName) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type DetachInstanceDiskMetadata struct { + // ID of the instance. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + // ID of the disk. + DiskId string `protobuf:"bytes,2,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DetachInstanceDiskMetadata) Reset() { *m = DetachInstanceDiskMetadata{} } +func (m *DetachInstanceDiskMetadata) String() string { return proto.CompactTextString(m) } +func (*DetachInstanceDiskMetadata) ProtoMessage() {} +func (*DetachInstanceDiskMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{22} +} +func (m *DetachInstanceDiskMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DetachInstanceDiskMetadata.Unmarshal(m, b) +} +func (m *DetachInstanceDiskMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DetachInstanceDiskMetadata.Marshal(b, m, deterministic) +} +func (dst *DetachInstanceDiskMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DetachInstanceDiskMetadata.Merge(dst, src) +} +func (m *DetachInstanceDiskMetadata) XXX_Size() int { + return xxx_messageInfo_DetachInstanceDiskMetadata.Size(m) +} +func (m *DetachInstanceDiskMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DetachInstanceDiskMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DetachInstanceDiskMetadata proto.InternalMessageInfo + +func (m *DetachInstanceDiskMetadata) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +func (m *DetachInstanceDiskMetadata) GetDiskId() string { + if m != nil { + return m.DiskId + } + return "" +} + +type ListInstanceOperationsRequest struct { + // ID of the Instance resource to list operations for. + InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListInstanceOperationsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListInstanceOperationsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListInstanceOperationsRequest) Reset() { *m = ListInstanceOperationsRequest{} } +func (m *ListInstanceOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListInstanceOperationsRequest) ProtoMessage() {} +func (*ListInstanceOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{23} +} +func (m *ListInstanceOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListInstanceOperationsRequest.Unmarshal(m, b) +} +func (m *ListInstanceOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListInstanceOperationsRequest.Marshal(b, m, deterministic) +} +func (dst *ListInstanceOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListInstanceOperationsRequest.Merge(dst, src) +} +func (m *ListInstanceOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListInstanceOperationsRequest.Size(m) +} +func (m *ListInstanceOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListInstanceOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListInstanceOperationsRequest proto.InternalMessageInfo + +func (m *ListInstanceOperationsRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +func (m *ListInstanceOperationsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListInstanceOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListInstanceOperationsResponse struct { + // List of operations for the specified instance. + Operations []*operation.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListInstanceOperationsRequest.page_size], use the [next_page_token] as the value + // for the [ListInstanceOperationsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListInstanceOperationsResponse) Reset() { *m = ListInstanceOperationsResponse{} } +func (m *ListInstanceOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListInstanceOperationsResponse) ProtoMessage() {} +func (*ListInstanceOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{24} +} +func (m *ListInstanceOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListInstanceOperationsResponse.Unmarshal(m, b) +} +func (m *ListInstanceOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListInstanceOperationsResponse.Marshal(b, m, deterministic) +} +func (dst *ListInstanceOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListInstanceOperationsResponse.Merge(dst, src) +} +func (m *ListInstanceOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListInstanceOperationsResponse.Size(m) +} +func (m *ListInstanceOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListInstanceOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListInstanceOperationsResponse proto.InternalMessageInfo + +func (m *ListInstanceOperationsResponse) GetOperations() []*operation.Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListInstanceOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ResourcesSpec struct { + // The amount of memory available to the instance, specified in bytes. + Memory int64 `protobuf:"varint,1,opt,name=memory,proto3" json:"memory,omitempty"` + // The number of cores available to the instance. + Cores int64 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores,omitempty"` + // Baseline level of CPU performance with the ability to burst performance above that baseline level. + // This field sets baseline performance for each core. + // + // For example, if you need only 5% of the CPU performance, you can set core_fraction=5. + // For more information, see [documentation](/docs/compute/concepts/vm-types#burstable-cores). + CoreFraction int64 `protobuf:"varint,3,opt,name=core_fraction,json=coreFraction,proto3" json:"core_fraction,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourcesSpec) Reset() { *m = ResourcesSpec{} } +func (m *ResourcesSpec) String() string { return proto.CompactTextString(m) } +func (*ResourcesSpec) ProtoMessage() {} +func (*ResourcesSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{25} +} +func (m *ResourcesSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourcesSpec.Unmarshal(m, b) +} +func (m *ResourcesSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourcesSpec.Marshal(b, m, deterministic) +} +func (dst *ResourcesSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcesSpec.Merge(dst, src) +} +func (m *ResourcesSpec) XXX_Size() int { + return xxx_messageInfo_ResourcesSpec.Size(m) +} +func (m *ResourcesSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcesSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcesSpec proto.InternalMessageInfo + +func (m *ResourcesSpec) GetMemory() int64 { + if m != nil { + return m.Memory + } + return 0 +} + +func (m *ResourcesSpec) GetCores() int64 { + if m != nil { + return m.Cores + } + return 0 +} + +func (m *ResourcesSpec) GetCoreFraction() int64 { + if m != nil { + return m.CoreFraction + } + return 0 +} + +type AttachedDiskSpec struct { + // The mode in which to attach this disk. + Mode AttachedDiskSpec_Mode `protobuf:"varint,1,opt,name=mode,proto3,enum=yandex.cloud.compute.v1.AttachedDiskSpec_Mode" json:"mode,omitempty"` + // Specifies a unique serial number of your choice that is reflected into the /dev/disk/by-id/ tree + // of a Linux operating system running within the instance. + // + // This value can be used to reference the device for mounting, resizing, and so on, from within the instance. + // If not specified, a random value will be generated. + DeviceName string `protobuf:"bytes,2,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"` + // Specifies whether the disk will be auto-deleted when the instance is deleted. + AutoDelete bool `protobuf:"varint,3,opt,name=auto_delete,json=autoDelete,proto3" json:"auto_delete,omitempty"` + // Types that are valid to be assigned to Disk: + // *AttachedDiskSpec_DiskSpec_ + // *AttachedDiskSpec_DiskId + Disk isAttachedDiskSpec_Disk `protobuf_oneof:"disk"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttachedDiskSpec) Reset() { *m = AttachedDiskSpec{} } +func (m *AttachedDiskSpec) String() string { return proto.CompactTextString(m) } +func (*AttachedDiskSpec) ProtoMessage() {} +func (*AttachedDiskSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{26} +} +func (m *AttachedDiskSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttachedDiskSpec.Unmarshal(m, b) +} +func (m *AttachedDiskSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttachedDiskSpec.Marshal(b, m, deterministic) +} +func (dst *AttachedDiskSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttachedDiskSpec.Merge(dst, src) +} +func (m *AttachedDiskSpec) XXX_Size() int { + return xxx_messageInfo_AttachedDiskSpec.Size(m) +} +func (m *AttachedDiskSpec) XXX_DiscardUnknown() { + xxx_messageInfo_AttachedDiskSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_AttachedDiskSpec proto.InternalMessageInfo + +func (m *AttachedDiskSpec) GetMode() AttachedDiskSpec_Mode { + if m != nil { + return m.Mode + } + return AttachedDiskSpec_MODE_UNSPECIFIED +} + +func (m *AttachedDiskSpec) GetDeviceName() string { + if m != nil { + return m.DeviceName + } + return "" +} + +func (m *AttachedDiskSpec) GetAutoDelete() bool { + if m != nil { + return m.AutoDelete + } + return false +} + +type isAttachedDiskSpec_Disk interface { + isAttachedDiskSpec_Disk() +} + +type AttachedDiskSpec_DiskSpec_ struct { + DiskSpec *AttachedDiskSpec_DiskSpec `protobuf:"bytes,4,opt,name=disk_spec,json=diskSpec,proto3,oneof"` +} + +type AttachedDiskSpec_DiskId struct { + DiskId string `protobuf:"bytes,5,opt,name=disk_id,json=diskId,proto3,oneof"` +} + +func (*AttachedDiskSpec_DiskSpec_) isAttachedDiskSpec_Disk() {} + +func (*AttachedDiskSpec_DiskId) isAttachedDiskSpec_Disk() {} + +func (m *AttachedDiskSpec) GetDisk() isAttachedDiskSpec_Disk { + if m != nil { + return m.Disk + } + return nil +} + +func (m *AttachedDiskSpec) GetDiskSpec() *AttachedDiskSpec_DiskSpec { + if x, ok := m.GetDisk().(*AttachedDiskSpec_DiskSpec_); ok { + return x.DiskSpec + } + return nil +} + +func (m *AttachedDiskSpec) GetDiskId() string { + if x, ok := m.GetDisk().(*AttachedDiskSpec_DiskId); ok { + return x.DiskId + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AttachedDiskSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AttachedDiskSpec_OneofMarshaler, _AttachedDiskSpec_OneofUnmarshaler, _AttachedDiskSpec_OneofSizer, []interface{}{ + (*AttachedDiskSpec_DiskSpec_)(nil), + (*AttachedDiskSpec_DiskId)(nil), + } +} + +func _AttachedDiskSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AttachedDiskSpec) + // disk + switch x := m.Disk.(type) { + case *AttachedDiskSpec_DiskSpec_: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DiskSpec); err != nil { + return err + } + case *AttachedDiskSpec_DiskId: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.DiskId) + case nil: + default: + return fmt.Errorf("AttachedDiskSpec.Disk has unexpected type %T", x) + } + return nil +} + +func _AttachedDiskSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AttachedDiskSpec) + switch tag { + case 4: // disk.disk_spec + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AttachedDiskSpec_DiskSpec) + err := b.DecodeMessage(msg) + m.Disk = &AttachedDiskSpec_DiskSpec_{msg} + return true, err + case 5: // disk.disk_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Disk = &AttachedDiskSpec_DiskId{x} + return true, err + default: + return false, nil + } +} + +func _AttachedDiskSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AttachedDiskSpec) + // disk + switch x := m.Disk.(type) { + case *AttachedDiskSpec_DiskSpec_: + s := proto.Size(x.DiskSpec) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *AttachedDiskSpec_DiskId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.DiskId))) + n += len(x.DiskId) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type AttachedDiskSpec_DiskSpec struct { + // Name of the disk. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Description of the disk. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // ID of the disk type. + // To get a list of available disk types, use the [yandex.cloud.compute.v1.DiskTypeService.List] request. + TypeId string `protobuf:"bytes,3,opt,name=type_id,json=typeId,proto3" json:"type_id,omitempty"` + // Size of the disk, specified in bytes. + Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` + // Types that are valid to be assigned to Source: + // *AttachedDiskSpec_DiskSpec_ImageId + // *AttachedDiskSpec_DiskSpec_SnapshotId + Source isAttachedDiskSpec_DiskSpec_Source `protobuf_oneof:"source"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttachedDiskSpec_DiskSpec) Reset() { *m = AttachedDiskSpec_DiskSpec{} } +func (m *AttachedDiskSpec_DiskSpec) String() string { return proto.CompactTextString(m) } +func (*AttachedDiskSpec_DiskSpec) ProtoMessage() {} +func (*AttachedDiskSpec_DiskSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{26, 0} +} +func (m *AttachedDiskSpec_DiskSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttachedDiskSpec_DiskSpec.Unmarshal(m, b) +} +func (m *AttachedDiskSpec_DiskSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttachedDiskSpec_DiskSpec.Marshal(b, m, deterministic) +} +func (dst *AttachedDiskSpec_DiskSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttachedDiskSpec_DiskSpec.Merge(dst, src) +} +func (m *AttachedDiskSpec_DiskSpec) XXX_Size() int { + return xxx_messageInfo_AttachedDiskSpec_DiskSpec.Size(m) +} +func (m *AttachedDiskSpec_DiskSpec) XXX_DiscardUnknown() { + xxx_messageInfo_AttachedDiskSpec_DiskSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_AttachedDiskSpec_DiskSpec proto.InternalMessageInfo + +func (m *AttachedDiskSpec_DiskSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AttachedDiskSpec_DiskSpec) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *AttachedDiskSpec_DiskSpec) GetTypeId() string { + if m != nil { + return m.TypeId + } + return "" +} + +func (m *AttachedDiskSpec_DiskSpec) GetSize() int64 { + if m != nil { + return m.Size + } + return 0 +} + +type isAttachedDiskSpec_DiskSpec_Source interface { + isAttachedDiskSpec_DiskSpec_Source() +} + +type AttachedDiskSpec_DiskSpec_ImageId struct { + ImageId string `protobuf:"bytes,5,opt,name=image_id,json=imageId,proto3,oneof"` +} + +type AttachedDiskSpec_DiskSpec_SnapshotId struct { + SnapshotId string `protobuf:"bytes,6,opt,name=snapshot_id,json=snapshotId,proto3,oneof"` +} + +func (*AttachedDiskSpec_DiskSpec_ImageId) isAttachedDiskSpec_DiskSpec_Source() {} + +func (*AttachedDiskSpec_DiskSpec_SnapshotId) isAttachedDiskSpec_DiskSpec_Source() {} + +func (m *AttachedDiskSpec_DiskSpec) GetSource() isAttachedDiskSpec_DiskSpec_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *AttachedDiskSpec_DiskSpec) GetImageId() string { + if x, ok := m.GetSource().(*AttachedDiskSpec_DiskSpec_ImageId); ok { + return x.ImageId + } + return "" +} + +func (m *AttachedDiskSpec_DiskSpec) GetSnapshotId() string { + if x, ok := m.GetSource().(*AttachedDiskSpec_DiskSpec_SnapshotId); ok { + return x.SnapshotId + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AttachedDiskSpec_DiskSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AttachedDiskSpec_DiskSpec_OneofMarshaler, _AttachedDiskSpec_DiskSpec_OneofUnmarshaler, _AttachedDiskSpec_DiskSpec_OneofSizer, []interface{}{ + (*AttachedDiskSpec_DiskSpec_ImageId)(nil), + (*AttachedDiskSpec_DiskSpec_SnapshotId)(nil), + } +} + +func _AttachedDiskSpec_DiskSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AttachedDiskSpec_DiskSpec) + // source + switch x := m.Source.(type) { + case *AttachedDiskSpec_DiskSpec_ImageId: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ImageId) + case *AttachedDiskSpec_DiskSpec_SnapshotId: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.SnapshotId) + case nil: + default: + return fmt.Errorf("AttachedDiskSpec_DiskSpec.Source has unexpected type %T", x) + } + return nil +} + +func _AttachedDiskSpec_DiskSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AttachedDiskSpec_DiskSpec) + switch tag { + case 5: // source.image_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &AttachedDiskSpec_DiskSpec_ImageId{x} + return true, err + case 6: // source.snapshot_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &AttachedDiskSpec_DiskSpec_SnapshotId{x} + return true, err + default: + return false, nil + } +} + +func _AttachedDiskSpec_DiskSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AttachedDiskSpec_DiskSpec) + // source + switch x := m.Source.(type) { + case *AttachedDiskSpec_DiskSpec_ImageId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.ImageId))) + n += len(x.ImageId) + case *AttachedDiskSpec_DiskSpec_SnapshotId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.SnapshotId))) + n += len(x.SnapshotId) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type NetworkInterfaceSpec struct { + // ID of the subnet. + SubnetId string `protobuf:"bytes,1,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + // Primary IPv4 address that will be assigned to the instance for this network interface. + PrimaryV4AddressSpec *PrimaryAddressSpec `protobuf:"bytes,2,opt,name=primary_v4_address_spec,json=primaryV4AddressSpec,proto3" json:"primary_v4_address_spec,omitempty"` + // Primary IPv6 address that will be assigned to the instance for this network interface. IPv6 not available yet. + PrimaryV6AddressSpec *PrimaryAddressSpec `protobuf:"bytes,3,opt,name=primary_v6_address_spec,json=primaryV6AddressSpec,proto3" json:"primary_v6_address_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkInterfaceSpec) Reset() { *m = NetworkInterfaceSpec{} } +func (m *NetworkInterfaceSpec) String() string { return proto.CompactTextString(m) } +func (*NetworkInterfaceSpec) ProtoMessage() {} +func (*NetworkInterfaceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{27} +} +func (m *NetworkInterfaceSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NetworkInterfaceSpec.Unmarshal(m, b) +} +func (m *NetworkInterfaceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NetworkInterfaceSpec.Marshal(b, m, deterministic) +} +func (dst *NetworkInterfaceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkInterfaceSpec.Merge(dst, src) +} +func (m *NetworkInterfaceSpec) XXX_Size() int { + return xxx_messageInfo_NetworkInterfaceSpec.Size(m) +} +func (m *NetworkInterfaceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkInterfaceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkInterfaceSpec proto.InternalMessageInfo + +func (m *NetworkInterfaceSpec) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +func (m *NetworkInterfaceSpec) GetPrimaryV4AddressSpec() *PrimaryAddressSpec { + if m != nil { + return m.PrimaryV4AddressSpec + } + return nil +} + +func (m *NetworkInterfaceSpec) GetPrimaryV6AddressSpec() *PrimaryAddressSpec { + if m != nil { + return m.PrimaryV6AddressSpec + } + return nil +} + +type PrimaryAddressSpec struct { + // An IPv4 internal network address that is assigned to the instance for this network interface. + // If not specified by the user, an unused internal IP is assigned by the system. + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // An external IP address configuration. + // If not specified, then this instance will have no external internet access. + OneToOneNatSpec *OneToOneNatSpec `protobuf:"bytes,2,opt,name=one_to_one_nat_spec,json=oneToOneNatSpec,proto3" json:"one_to_one_nat_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrimaryAddressSpec) Reset() { *m = PrimaryAddressSpec{} } +func (m *PrimaryAddressSpec) String() string { return proto.CompactTextString(m) } +func (*PrimaryAddressSpec) ProtoMessage() {} +func (*PrimaryAddressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{28} +} +func (m *PrimaryAddressSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrimaryAddressSpec.Unmarshal(m, b) +} +func (m *PrimaryAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrimaryAddressSpec.Marshal(b, m, deterministic) +} +func (dst *PrimaryAddressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrimaryAddressSpec.Merge(dst, src) +} +func (m *PrimaryAddressSpec) XXX_Size() int { + return xxx_messageInfo_PrimaryAddressSpec.Size(m) +} +func (m *PrimaryAddressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PrimaryAddressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PrimaryAddressSpec proto.InternalMessageInfo + +func (m *PrimaryAddressSpec) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *PrimaryAddressSpec) GetOneToOneNatSpec() *OneToOneNatSpec { + if m != nil { + return m.OneToOneNatSpec + } + return nil +} + +type OneToOneNatSpec struct { + // External IP address version. + IpVersion IpVersion `protobuf:"varint,1,opt,name=ip_version,json=ipVersion,proto3,enum=yandex.cloud.compute.v1.IpVersion" json:"ip_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneToOneNatSpec) Reset() { *m = OneToOneNatSpec{} } +func (m *OneToOneNatSpec) String() string { return proto.CompactTextString(m) } +func (*OneToOneNatSpec) ProtoMessage() {} +func (*OneToOneNatSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_instance_service_80618e87048981ad, []int{29} +} +func (m *OneToOneNatSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneToOneNatSpec.Unmarshal(m, b) +} +func (m *OneToOneNatSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneToOneNatSpec.Marshal(b, m, deterministic) +} +func (dst *OneToOneNatSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneToOneNatSpec.Merge(dst, src) +} +func (m *OneToOneNatSpec) XXX_Size() int { + return xxx_messageInfo_OneToOneNatSpec.Size(m) +} +func (m *OneToOneNatSpec) XXX_DiscardUnknown() { + xxx_messageInfo_OneToOneNatSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_OneToOneNatSpec proto.InternalMessageInfo + +func (m *OneToOneNatSpec) GetIpVersion() IpVersion { + if m != nil { + return m.IpVersion + } + return IpVersion_IP_VERSION_UNSPECIFIED +} + +func init() { + proto.RegisterType((*GetInstanceRequest)(nil), "yandex.cloud.compute.v1.GetInstanceRequest") + proto.RegisterType((*ListInstancesRequest)(nil), "yandex.cloud.compute.v1.ListInstancesRequest") + proto.RegisterType((*ListInstancesResponse)(nil), "yandex.cloud.compute.v1.ListInstancesResponse") + proto.RegisterType((*CreateInstanceRequest)(nil), "yandex.cloud.compute.v1.CreateInstanceRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.CreateInstanceRequest.LabelsEntry") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.CreateInstanceRequest.MetadataEntry") + proto.RegisterType((*CreateInstanceMetadata)(nil), "yandex.cloud.compute.v1.CreateInstanceMetadata") + proto.RegisterType((*UpdateInstanceRequest)(nil), "yandex.cloud.compute.v1.UpdateInstanceRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.UpdateInstanceRequest.LabelsEntry") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.UpdateInstanceRequest.MetadataEntry") + proto.RegisterType((*UpdateInstanceMetadata)(nil), "yandex.cloud.compute.v1.UpdateInstanceMetadata") + proto.RegisterType((*DeleteInstanceRequest)(nil), "yandex.cloud.compute.v1.DeleteInstanceRequest") + proto.RegisterType((*DeleteInstanceMetadata)(nil), "yandex.cloud.compute.v1.DeleteInstanceMetadata") + proto.RegisterType((*UpdateInstanceMetadataRequest)(nil), "yandex.cloud.compute.v1.UpdateInstanceMetadataRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.UpdateInstanceMetadataRequest.UpsertEntry") + proto.RegisterType((*UpdateInstanceMetadataMetadata)(nil), "yandex.cloud.compute.v1.UpdateInstanceMetadataMetadata") + proto.RegisterType((*GetInstanceSerialPortOutputRequest)(nil), "yandex.cloud.compute.v1.GetInstanceSerialPortOutputRequest") + proto.RegisterType((*GetInstanceSerialPortOutputResponse)(nil), "yandex.cloud.compute.v1.GetInstanceSerialPortOutputResponse") + proto.RegisterType((*StopInstanceRequest)(nil), "yandex.cloud.compute.v1.StopInstanceRequest") + proto.RegisterType((*StopInstanceMetadata)(nil), "yandex.cloud.compute.v1.StopInstanceMetadata") + proto.RegisterType((*StartInstanceRequest)(nil), "yandex.cloud.compute.v1.StartInstanceRequest") + proto.RegisterType((*StartInstanceMetadata)(nil), "yandex.cloud.compute.v1.StartInstanceMetadata") + proto.RegisterType((*RestartInstanceRequest)(nil), "yandex.cloud.compute.v1.RestartInstanceRequest") + proto.RegisterType((*RestartInstanceMetadata)(nil), "yandex.cloud.compute.v1.RestartInstanceMetadata") + proto.RegisterType((*AttachInstanceDiskRequest)(nil), "yandex.cloud.compute.v1.AttachInstanceDiskRequest") + proto.RegisterType((*AttachInstanceDiskMetadata)(nil), "yandex.cloud.compute.v1.AttachInstanceDiskMetadata") + proto.RegisterType((*DetachInstanceDiskRequest)(nil), "yandex.cloud.compute.v1.DetachInstanceDiskRequest") + proto.RegisterType((*DetachInstanceDiskMetadata)(nil), "yandex.cloud.compute.v1.DetachInstanceDiskMetadata") + proto.RegisterType((*ListInstanceOperationsRequest)(nil), "yandex.cloud.compute.v1.ListInstanceOperationsRequest") + proto.RegisterType((*ListInstanceOperationsResponse)(nil), "yandex.cloud.compute.v1.ListInstanceOperationsResponse") + proto.RegisterType((*ResourcesSpec)(nil), "yandex.cloud.compute.v1.ResourcesSpec") + proto.RegisterType((*AttachedDiskSpec)(nil), "yandex.cloud.compute.v1.AttachedDiskSpec") + proto.RegisterType((*AttachedDiskSpec_DiskSpec)(nil), "yandex.cloud.compute.v1.AttachedDiskSpec.DiskSpec") + proto.RegisterType((*NetworkInterfaceSpec)(nil), "yandex.cloud.compute.v1.NetworkInterfaceSpec") + proto.RegisterType((*PrimaryAddressSpec)(nil), "yandex.cloud.compute.v1.PrimaryAddressSpec") + proto.RegisterType((*OneToOneNatSpec)(nil), "yandex.cloud.compute.v1.OneToOneNatSpec") + proto.RegisterEnum("yandex.cloud.compute.v1.InstanceView", InstanceView_name, InstanceView_value) + proto.RegisterEnum("yandex.cloud.compute.v1.AttachedDiskSpec_Mode", AttachedDiskSpec_Mode_name, AttachedDiskSpec_Mode_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// InstanceServiceClient is the client API for InstanceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type InstanceServiceClient interface { + // Returns the specified Instance resource. + // + // To get the list of available Instance resources, make a [List] request. + Get(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) + // Retrieves the list of Instance resources in the specified folder. + List(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error) + // Creates an instance in the specified folder. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Create(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified instance. + Update(ctx context.Context, in *UpdateInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified instance. + Delete(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) + UpdateMetadata(ctx context.Context, in *UpdateInstanceMetadataRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Returns the serial port output of the specified Instance resource. + GetSerialPortOutput(ctx context.Context, in *GetInstanceSerialPortOutputRequest, opts ...grpc.CallOption) (*GetInstanceSerialPortOutputResponse, error) + // Stops the running instance. + // + // You can start the instance later using the [InstanceService.Start] method. + Stop(ctx context.Context, in *StopInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Starts the stopped instance. + Start(ctx context.Context, in *StartInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Restarts the running instance. + Restart(ctx context.Context, in *RestartInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Attaches the disk to the instance. + AttachDisk(ctx context.Context, in *AttachInstanceDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Detaches the disk from the instance. + DetachDisk(ctx context.Context, in *DetachInstanceDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Lists operations for the specified instance. + ListOperations(ctx context.Context, in *ListInstanceOperationsRequest, opts ...grpc.CallOption) (*ListInstanceOperationsResponse, error) +} + +type instanceServiceClient struct { + cc *grpc.ClientConn +} + +func NewInstanceServiceClient(cc *grpc.ClientConn) InstanceServiceClient { + return &instanceServiceClient{cc} +} + +func (c *instanceServiceClient) Get(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) { + out := new(Instance) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.InstanceService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) List(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error) { + out := new(ListInstancesResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.InstanceService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) Create(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.InstanceService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) Update(ctx context.Context, in *UpdateInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.InstanceService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) Delete(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.InstanceService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) UpdateMetadata(ctx context.Context, in *UpdateInstanceMetadataRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.InstanceService/UpdateMetadata", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) GetSerialPortOutput(ctx context.Context, in *GetInstanceSerialPortOutputRequest, opts ...grpc.CallOption) (*GetInstanceSerialPortOutputResponse, error) { + out := new(GetInstanceSerialPortOutputResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.InstanceService/GetSerialPortOutput", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) Stop(ctx context.Context, in *StopInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.InstanceService/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) Start(ctx context.Context, in *StartInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.InstanceService/Start", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) Restart(ctx context.Context, in *RestartInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.InstanceService/Restart", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) AttachDisk(ctx context.Context, in *AttachInstanceDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.InstanceService/AttachDisk", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) DetachDisk(ctx context.Context, in *DetachInstanceDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.InstanceService/DetachDisk", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) ListOperations(ctx context.Context, in *ListInstanceOperationsRequest, opts ...grpc.CallOption) (*ListInstanceOperationsResponse, error) { + out := new(ListInstanceOperationsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.InstanceService/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// InstanceServiceServer is the server API for InstanceService service. +type InstanceServiceServer interface { + // Returns the specified Instance resource. + // + // To get the list of available Instance resources, make a [List] request. + Get(context.Context, *GetInstanceRequest) (*Instance, error) + // Retrieves the list of Instance resources in the specified folder. + List(context.Context, *ListInstancesRequest) (*ListInstancesResponse, error) + // Creates an instance in the specified folder. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Create(context.Context, *CreateInstanceRequest) (*operation.Operation, error) + // Updates the specified instance. + Update(context.Context, *UpdateInstanceRequest) (*operation.Operation, error) + // Deletes the specified instance. + Delete(context.Context, *DeleteInstanceRequest) (*operation.Operation, error) + UpdateMetadata(context.Context, *UpdateInstanceMetadataRequest) (*operation.Operation, error) + // Returns the serial port output of the specified Instance resource. + GetSerialPortOutput(context.Context, *GetInstanceSerialPortOutputRequest) (*GetInstanceSerialPortOutputResponse, error) + // Stops the running instance. + // + // You can start the instance later using the [InstanceService.Start] method. + Stop(context.Context, *StopInstanceRequest) (*operation.Operation, error) + // Starts the stopped instance. + Start(context.Context, *StartInstanceRequest) (*operation.Operation, error) + // Restarts the running instance. + Restart(context.Context, *RestartInstanceRequest) (*operation.Operation, error) + // Attaches the disk to the instance. + AttachDisk(context.Context, *AttachInstanceDiskRequest) (*operation.Operation, error) + // Detaches the disk from the instance. + DetachDisk(context.Context, *DetachInstanceDiskRequest) (*operation.Operation, error) + // Lists operations for the specified instance. + ListOperations(context.Context, *ListInstanceOperationsRequest) (*ListInstanceOperationsResponse, error) +} + +func RegisterInstanceServiceServer(s *grpc.Server, srv InstanceServiceServer) { + s.RegisterService(&_InstanceService_serviceDesc, srv) +} + +func _InstanceService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.InstanceService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).Get(ctx, req.(*GetInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListInstancesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.InstanceService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).List(ctx, req.(*ListInstancesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.InstanceService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).Create(ctx, req.(*CreateInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.InstanceService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).Update(ctx, req.(*UpdateInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.InstanceService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).Delete(ctx, req.(*DeleteInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_UpdateMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateInstanceMetadataRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).UpdateMetadata(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.InstanceService/UpdateMetadata", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).UpdateMetadata(ctx, req.(*UpdateInstanceMetadataRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_GetSerialPortOutput_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetInstanceSerialPortOutputRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).GetSerialPortOutput(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.InstanceService/GetSerialPortOutput", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).GetSerialPortOutput(ctx, req.(*GetInstanceSerialPortOutputRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.InstanceService/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).Stop(ctx, req.(*StopInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.InstanceService/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).Start(ctx, req.(*StartInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_Restart_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestartInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).Restart(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.InstanceService/Restart", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).Restart(ctx, req.(*RestartInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_AttachDisk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AttachInstanceDiskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).AttachDisk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.InstanceService/AttachDisk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).AttachDisk(ctx, req.(*AttachInstanceDiskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_DetachDisk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DetachInstanceDiskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).DetachDisk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.InstanceService/DetachDisk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).DetachDisk(ctx, req.(*DetachInstanceDiskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListInstanceOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.InstanceService/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).ListOperations(ctx, req.(*ListInstanceOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _InstanceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.compute.v1.InstanceService", + HandlerType: (*InstanceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _InstanceService_Get_Handler, + }, + { + MethodName: "List", + Handler: _InstanceService_List_Handler, + }, + { + MethodName: "Create", + Handler: _InstanceService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _InstanceService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _InstanceService_Delete_Handler, + }, + { + MethodName: "UpdateMetadata", + Handler: _InstanceService_UpdateMetadata_Handler, + }, + { + MethodName: "GetSerialPortOutput", + Handler: _InstanceService_GetSerialPortOutput_Handler, + }, + { + MethodName: "Stop", + Handler: _InstanceService_Stop_Handler, + }, + { + MethodName: "Start", + Handler: _InstanceService_Start_Handler, + }, + { + MethodName: "Restart", + Handler: _InstanceService_Restart_Handler, + }, + { + MethodName: "AttachDisk", + Handler: _InstanceService_AttachDisk_Handler, + }, + { + MethodName: "DetachDisk", + Handler: _InstanceService_DetachDisk_Handler, + }, + { + MethodName: "ListOperations", + Handler: _InstanceService_ListOperations_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/compute/v1/instance_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/compute/v1/instance_service.proto", fileDescriptor_instance_service_80618e87048981ad) +} + +var fileDescriptor_instance_service_80618e87048981ad = []byte{ + // 2334 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xcd, 0x4f, 0x1b, 0xdb, + 0x15, 0xcf, 0xe0, 0x0f, 0xec, 0x63, 0x20, 0xee, 0x0d, 0x04, 0x3f, 0xab, 0xbc, 0xf2, 0x26, 0x4a, + 0x4a, 0x9c, 0xf1, 0xc7, 0x18, 0x63, 0x70, 0x42, 0xf4, 0x84, 0x03, 0x24, 0x56, 0x93, 0x90, 0x0e, + 0x09, 0x6d, 0x13, 0xa5, 0xd6, 0xe0, 0xb9, 0x90, 0x11, 0xf6, 0xcc, 0x74, 0x66, 0x4c, 0x1e, 0xa4, + 0xa9, 0x2a, 0x54, 0x55, 0x6a, 0xba, 0x7c, 0xbb, 0xee, 0xda, 0xae, 0xba, 0x2c, 0xab, 0xf6, 0x55, + 0x7a, 0x6a, 0x37, 0x85, 0x55, 0x55, 0xd1, 0x4d, 0xff, 0x80, 0x2e, 0xba, 0xab, 0x94, 0x65, 0x56, + 0xd5, 0xbd, 0x33, 0x63, 0xc6, 0x66, 0xc6, 0x0c, 0x90, 0xaa, 0xab, 0x78, 0xee, 0x3d, 0xe7, 0xdc, + 0xdf, 0x39, 0xf7, 0x7c, 0x5e, 0x02, 0xb9, 0x1d, 0x51, 0x91, 0xf0, 0x17, 0xf9, 0x46, 0x53, 0x6d, + 0x4b, 0xf9, 0x86, 0xda, 0xd2, 0xda, 0x26, 0xce, 0x6f, 0xf3, 0x79, 0x59, 0x31, 0x4c, 0x51, 0x69, + 0xe0, 0xba, 0x81, 0xf5, 0x6d, 0xb9, 0x81, 0x73, 0x9a, 0xae, 0x9a, 0x2a, 0x1a, 0xb7, 0xe8, 0x73, + 0x94, 0x3e, 0x67, 0xd3, 0xe7, 0xb6, 0xf9, 0xf4, 0x37, 0x37, 0x55, 0x75, 0xb3, 0x89, 0xf3, 0xa2, + 0x26, 0xe7, 0x45, 0x45, 0x51, 0x4d, 0xd1, 0x94, 0x55, 0xc5, 0xb0, 0xd8, 0xd2, 0x93, 0xf6, 0x2e, + 0xfd, 0x5a, 0x6f, 0x6f, 0xe4, 0x37, 0x64, 0xdc, 0x94, 0xea, 0x2d, 0xd1, 0xd8, 0xb2, 0x29, 0xd2, + 0x36, 0x10, 0xc2, 0xaf, 0x6a, 0x58, 0xa7, 0xec, 0xf6, 0xde, 0x8d, 0xd3, 0x40, 0x7a, 0xd2, 0x75, + 0xa4, 0x9c, 0x90, 0x37, 0xd1, 0x45, 0xb7, 0x2d, 0x36, 0x65, 0xc9, 0xb5, 0xcd, 0xfe, 0x04, 0xd0, + 0x7d, 0x6c, 0xd6, 0x6c, 0xd9, 0x02, 0xfe, 0x51, 0x1b, 0x1b, 0x26, 0xca, 0x42, 0xa2, 0x63, 0x13, + 0x59, 0x4a, 0x31, 0x93, 0xcc, 0x54, 0xbc, 0x3a, 0xf4, 0xef, 0x03, 0x9e, 0x79, 0x77, 0xc8, 0x87, + 0xe7, 0xef, 0xce, 0x14, 0x04, 0x70, 0x08, 0x6a, 0x12, 0xaa, 0x40, 0x78, 0x5b, 0xc6, 0xaf, 0x53, + 0x03, 0x93, 0xcc, 0xd4, 0x48, 0xf1, 0x7a, 0xce, 0xc7, 0x6e, 0x39, 0xe7, 0x98, 0x35, 0x19, 0xbf, + 0x16, 0x28, 0x0b, 0xfb, 0x07, 0x06, 0x46, 0x1f, 0xca, 0x46, 0x07, 0x81, 0xe1, 0x40, 0xb8, 0x09, + 0xf1, 0x0d, 0xb5, 0x29, 0x61, 0xdd, 0x0f, 0x40, 0xcc, 0xda, 0xae, 0x49, 0xe8, 0xdb, 0x10, 0xd7, + 0xc4, 0x4d, 0x5c, 0x37, 0xe4, 0x5d, 0x4c, 0x31, 0x84, 0xaa, 0xf0, 0xe1, 0x80, 0x8f, 0xce, 0xdf, + 0xe5, 0x0b, 0x85, 0x82, 0x10, 0x23, 0x9b, 0xab, 0xf2, 0x2e, 0x46, 0x53, 0x00, 0x94, 0xd0, 0x54, + 0xb7, 0xb0, 0x92, 0x0a, 0x51, 0xa1, 0xf1, 0x77, 0x87, 0x7c, 0x84, 0x52, 0x0a, 0x54, 0xca, 0x53, + 0xb2, 0x87, 0x58, 0x88, 0x6e, 0xc8, 0x4d, 0x13, 0xeb, 0xa9, 0x30, 0xa5, 0x82, 0x77, 0x87, 0x1d, + 0x79, 0xf6, 0x0e, 0xfb, 0x53, 0x06, 0xc6, 0x7a, 0xa0, 0x1b, 0x9a, 0xaa, 0x18, 0x18, 0x7d, 0x0e, + 0x71, 0xc7, 0x3a, 0x46, 0x8a, 0x99, 0x0c, 0x4d, 0x25, 0x8a, 0x9f, 0x9d, 0x6a, 0x14, 0xe1, 0x98, + 0x07, 0xdd, 0x80, 0xcb, 0x0a, 0xfe, 0xc2, 0xac, 0xbb, 0xd0, 0x12, 0xbd, 0xe2, 0xc2, 0x30, 0x59, + 0x7e, 0xe2, 0xc0, 0x64, 0xf7, 0xe2, 0x30, 0x76, 0x4f, 0xc7, 0xa2, 0x89, 0x7b, 0x6f, 0xf0, 0x0c, + 0xe6, 0x9b, 0x81, 0xb0, 0x22, 0xb6, 0x2c, 0xcb, 0xc5, 0xab, 0x9f, 0xbd, 0x3f, 0xe0, 0x27, 0x7e, + 0xfc, 0x42, 0xcc, 0xee, 0xbe, 0x7c, 0x91, 0x15, 0xb3, 0xbb, 0x85, 0x6c, 0xe5, 0xe5, 0x1b, 0x9e, + 0x2b, 0xf3, 0x6f, 0x5f, 0xd8, 0x5f, 0x02, 0x25, 0x47, 0xb7, 0x20, 0x21, 0x61, 0xa3, 0xa1, 0xcb, + 0x1a, 0x71, 0xa7, 0x6e, 0x6b, 0x16, 0x67, 0xca, 0x82, 0x7b, 0x17, 0x7d, 0xc9, 0x40, 0xb4, 0x29, + 0xae, 0xe3, 0xa6, 0x91, 0x0a, 0x53, 0x7b, 0xdc, 0xf6, 0xb5, 0x87, 0xa7, 0x3e, 0xb9, 0x87, 0x94, + 0x79, 0x49, 0x31, 0xf5, 0x9d, 0xea, 0xe7, 0xef, 0x0f, 0xf8, 0xc4, 0x8b, 0x6c, 0xbd, 0x90, 0xad, + 0x10, 0x98, 0x99, 0x3d, 0xaa, 0x53, 0xb9, 0x64, 0xe9, 0x56, 0x9e, 0xde, 0x3f, 0xe4, 0xa3, 0xe9, + 0x30, 0x9f, 0xa5, 0xbf, 0x10, 0x4a, 0xda, 0xca, 0x74, 0xe8, 0x05, 0x1b, 0x0a, 0xba, 0x0e, 0x83, + 0xbb, 0xaa, 0x42, 0x5d, 0x3c, 0xe2, 0x61, 0xa2, 0x28, 0xd9, 0xac, 0x49, 0xa8, 0x04, 0x09, 0xad, + 0x29, 0x9a, 0x1b, 0xaa, 0xde, 0x22, 0xa4, 0x51, 0x4a, 0x7a, 0x85, 0x90, 0x12, 0x20, 0x04, 0xa7, + 0x24, 0xea, 0x52, 0x76, 0x9b, 0x17, 0xc0, 0xa1, 0xab, 0x49, 0x68, 0x15, 0x46, 0x74, 0x6c, 0xa8, + 0x6d, 0xbd, 0x81, 0x8d, 0xba, 0xa1, 0xe1, 0x46, 0x6a, 0x70, 0x92, 0x99, 0x4a, 0x14, 0x6f, 0xf8, + 0x6a, 0x2e, 0x38, 0xe4, 0xab, 0x1a, 0x6e, 0x54, 0xc3, 0xe4, 0x00, 0x61, 0x58, 0x77, 0x2f, 0xa2, + 0xef, 0x43, 0xac, 0x85, 0x4d, 0x51, 0x12, 0x4d, 0x31, 0x15, 0xa3, 0x86, 0x9c, 0x3f, 0xa3, 0x21, + 0x1f, 0xd9, 0xec, 0xd4, 0x94, 0x42, 0x47, 0x1a, 0x7a, 0x06, 0x23, 0xeb, 0xaa, 0x6a, 0xd6, 0x25, + 0xd9, 0xd8, 0xb2, 0xe0, 0xc6, 0x29, 0xdc, 0x9b, 0xbe, 0xf2, 0x17, 0x4c, 0x53, 0x6c, 0xbc, 0xc2, + 0xd2, 0xa2, 0x6c, 0x6c, 0xb9, 0x10, 0x0f, 0x11, 0x31, 0xce, 0x1a, 0x92, 0x60, 0xd4, 0xc0, 0x0d, + 0x95, 0x58, 0x68, 0xe7, 0x58, 0xb6, 0x91, 0x02, 0x0a, 0xfe, 0x0c, 0xc2, 0x07, 0xf7, 0x0e, 0xf9, + 0xd0, 0xfc, 0xdd, 0x69, 0x01, 0x75, 0xe4, 0x39, 0x7b, 0x06, 0x6a, 0xc2, 0xb8, 0x82, 0xcd, 0xd7, + 0xaa, 0xbe, 0x55, 0x97, 0x15, 0x13, 0xeb, 0x1b, 0x22, 0x49, 0xe6, 0xf4, 0xa0, 0x04, 0x3d, 0x28, + 0xeb, 0x7b, 0xd0, 0x63, 0x8b, 0xaf, 0xe6, 0xb0, 0xd1, 0xc3, 0x22, 0x7b, 0x87, 0x3c, 0xc3, 0x0b, + 0x63, 0x8a, 0xc7, 0xa6, 0x81, 0xee, 0x42, 0xec, 0x95, 0x6a, 0x98, 0x34, 0x68, 0x86, 0x82, 0x06, + 0x4d, 0x87, 0x05, 0xad, 0xc1, 0x37, 0x0c, 0xa2, 0x5a, 0xbb, 0x29, 0x2b, 0x9b, 0x75, 0x4d, 0x6d, + 0xca, 0x8d, 0x9d, 0xd4, 0xf0, 0x29, 0xc6, 0x5e, 0xed, 0x70, 0x3c, 0xa1, 0x0c, 0x42, 0xd2, 0xe8, + 0x59, 0x49, 0x57, 0x20, 0xe1, 0x8a, 0x12, 0x94, 0x84, 0xd0, 0x16, 0xde, 0xb1, 0x62, 0x5f, 0x20, + 0x3f, 0xd1, 0x28, 0x44, 0xb6, 0xc5, 0x66, 0xdb, 0x8e, 0x74, 0xc1, 0xfa, 0xb8, 0x3d, 0x30, 0xc7, + 0xa4, 0xef, 0xc0, 0x70, 0x97, 0x5f, 0x9c, 0x85, 0x99, 0xbd, 0x07, 0x57, 0xbb, 0x5d, 0xcd, 0x11, + 0x85, 0x6e, 0x7a, 0x95, 0x91, 0x98, 0x57, 0x09, 0x61, 0xff, 0x1e, 0x81, 0xb1, 0x67, 0x9a, 0xe4, + 0x91, 0xc9, 0xce, 0x58, 0x8b, 0xee, 0x40, 0xa2, 0x4d, 0xe5, 0xd0, 0x82, 0x4b, 0xd1, 0x26, 0x8a, + 0xe9, 0x9c, 0x55, 0x93, 0x73, 0x4e, 0x4d, 0xce, 0x2d, 0x93, 0x9a, 0xfc, 0x48, 0x34, 0xb6, 0x04, + 0xb0, 0xc8, 0xc9, 0xef, 0x4e, 0x2a, 0x0c, 0x5d, 0x28, 0x15, 0x86, 0x83, 0xa6, 0xc2, 0xc8, 0x29, + 0xa9, 0xd0, 0xd3, 0x20, 0xff, 0x9b, 0x54, 0x58, 0xf0, 0xca, 0x71, 0x97, 0xfb, 0xe5, 0xb7, 0x47, + 0x17, 0xcb, 0x6f, 0x17, 0xc9, 0x6c, 0xde, 0x76, 0xf1, 0xc9, 0x6c, 0xff, 0xb7, 0xb8, 0xa8, 0xc0, + 0xd5, 0x6e, 0xa0, 0x9d, 0xb8, 0xf8, 0x96, 0x87, 0x4b, 0x77, 0x45, 0xc3, 0x32, 0x8c, 0x2d, 0xe2, + 0x26, 0xbe, 0x68, 0x30, 0x10, 0x08, 0xdd, 0x72, 0x82, 0x43, 0xf8, 0x0f, 0x03, 0x13, 0xde, 0xf0, + 0x1d, 0x2c, 0xa7, 0x89, 0x40, 0x57, 0x21, 0x2a, 0xd1, 0xd3, 0x53, 0x03, 0x93, 0xa1, 0xa9, 0xb8, + 0x60, 0x7f, 0xa1, 0xe7, 0x10, 0x6d, 0x6b, 0x06, 0xd6, 0xcd, 0x54, 0x88, 0x5e, 0x74, 0x35, 0xe0, + 0x45, 0xf7, 0x00, 0xc8, 0x3d, 0xa3, 0x42, 0xac, 0xeb, 0xb6, 0x25, 0x92, 0xcb, 0x76, 0x2d, 0x9f, + 0xe9, 0xbe, 0x16, 0xe0, 0x53, 0xef, 0xf3, 0x82, 0x1b, 0x6d, 0x15, 0x58, 0x57, 0x37, 0xbd, 0x8a, + 0x75, 0x59, 0x6c, 0x3e, 0x51, 0x75, 0x73, 0xa5, 0x6d, 0x6a, 0x6d, 0xf3, 0x9c, 0x97, 0xb8, 0x00, + 0xd7, 0xfa, 0x0a, 0xb5, 0x9b, 0xce, 0x34, 0xc4, 0x1a, 0xaa, 0x62, 0x62, 0xc5, 0x34, 0x6c, 0x64, + 0x9d, 0x6f, 0x76, 0x11, 0xae, 0xac, 0x9a, 0xaa, 0x76, 0x41, 0x6f, 0x9a, 0x85, 0x51, 0xb7, 0x94, + 0xe0, 0x66, 0x59, 0x22, 0x8c, 0xa2, 0x7e, 0xc1, 0x31, 0x83, 0x9d, 0x83, 0xb1, 0x2e, 0x31, 0xc1, + 0x01, 0xdc, 0x87, 0xab, 0x02, 0x36, 0x3e, 0x02, 0x84, 0xdb, 0x30, 0xde, 0x23, 0x28, 0x38, 0x88, + 0xdf, 0x31, 0xf0, 0x89, 0xd5, 0xd6, 0x38, 0xbc, 0xa4, 0x81, 0x39, 0x67, 0x99, 0x7b, 0x09, 0x48, + 0xb4, 0x5b, 0x24, 0x57, 0xcb, 0x36, 0x70, 0xbe, 0x96, 0x2d, 0x29, 0xf6, 0xac, 0xb3, 0x6b, 0x90, + 0x3e, 0x09, 0x35, 0xb0, 0xaa, 0x68, 0x1c, 0x06, 0x29, 0x28, 0x59, 0xb2, 0xc3, 0x2c, 0x4a, 0x3e, + 0x6b, 0x12, 0xfb, 0x15, 0x03, 0x9f, 0x2c, 0xe2, 0x8f, 0x64, 0x83, 0x6b, 0x3d, 0xa7, 0x1c, 0xb7, + 0x16, 0x0f, 0x2e, 0x39, 0x27, 0x92, 0x7e, 0x40, 0xc2, 0x64, 0xa8, 0xaf, 0xbb, 0x2a, 0x7b, 0xea, + 0xfd, 0x01, 0x3f, 0x6a, 0xd5, 0x42, 0xab, 0x92, 0x67, 0xeb, 0x2f, 0xdf, 0x70, 0x7c, 0xe5, 0xed, + 0x83, 0x4b, 0x02, 0x58, 0xe4, 0x8f, 0xc5, 0x16, 0xae, 0x0e, 0x41, 0x98, 0x88, 0x41, 0xe1, 0x3f, + 0xff, 0x85, 0x67, 0x88, 0x51, 0x4e, 0x62, 0xff, 0x08, 0x46, 0xf9, 0x35, 0x03, 0x13, 0xee, 0x41, + 0x72, 0xc5, 0x19, 0xe1, 0x8d, 0x73, 0x1a, 0xe6, 0xe3, 0x0f, 0xc4, 0xec, 0x2f, 0x19, 0xf8, 0xd4, + 0x0f, 0xa3, 0x9d, 0x80, 0x16, 0x00, 0x3a, 0x8f, 0x0f, 0x3e, 0x63, 0xef, 0xf1, 0xe3, 0x44, 0x87, + 0x5f, 0x70, 0x31, 0x05, 0x9e, 0x7b, 0xbf, 0x66, 0x60, 0xb8, 0xab, 0x9b, 0x40, 0x39, 0x88, 0xb6, + 0x70, 0x4b, 0xd5, 0xad, 0x5c, 0x1f, 0xaa, 0x5e, 0x25, 0xc6, 0xf9, 0x70, 0xc0, 0x8f, 0xcc, 0xdf, + 0x2d, 0xce, 0x96, 0xe6, 0x66, 0x67, 0x2b, 0x85, 0x72, 0xa5, 0x54, 0x12, 0x6c, 0x2a, 0xf4, 0x1d, + 0x88, 0x34, 0x54, 0x1d, 0x1b, 0xb6, 0x79, 0x66, 0x6c, 0xf2, 0x2c, 0xcf, 0x15, 0xb9, 0x12, 0x57, + 0xe6, 0xe6, 0x38, 0xbe, 0xc0, 0xf1, 0x45, 0x8e, 0x2f, 0x71, 0x7c, 0x99, 0xe3, 0xe7, 0xb8, 0x62, + 0x81, 0x2b, 0x16, 0xb9, 0x62, 0x89, 0x2b, 0x96, 0xb9, 0xe2, 0x1c, 0x37, 0x5d, 0xe0, 0xa6, 0x8b, + 0x82, 0x25, 0x03, 0x15, 0x60, 0x98, 0xfc, 0xa8, 0x6f, 0xe8, 0x62, 0xa3, 0x33, 0x0c, 0x87, 0xaa, + 0x89, 0x0f, 0x07, 0xfc, 0x60, 0x81, 0x9b, 0xe1, 0x88, 0x2d, 0x87, 0x08, 0xc5, 0xb2, 0x4d, 0xc0, + 0xfe, 0x2a, 0x02, 0xc9, 0xde, 0x60, 0x44, 0x55, 0x08, 0xb7, 0x54, 0x09, 0x53, 0x0d, 0x46, 0x8a, + 0xb9, 0xc0, 0x51, 0x9c, 0x7b, 0xa4, 0x4a, 0x58, 0xa0, 0xbc, 0xa8, 0xd2, 0xed, 0xee, 0x03, 0xfd, + 0xdd, 0xdd, 0xed, 0xec, 0xc4, 0x81, 0xc5, 0xb6, 0xa9, 0xd6, 0xed, 0x9a, 0x4d, 0x74, 0x88, 0x09, + 0x40, 0x96, 0xac, 0x1e, 0x02, 0x7d, 0x17, 0xe2, 0xc7, 0xa9, 0x26, 0x4c, 0x53, 0x4d, 0x31, 0x38, + 0x48, 0xe7, 0xc7, 0x83, 0x4b, 0x42, 0x4c, 0x72, 0x54, 0x76, 0x85, 0x70, 0xc4, 0x2f, 0x84, 0xd3, + 0xbf, 0x19, 0x80, 0x58, 0xc7, 0x48, 0x4e, 0x8b, 0xce, 0x5c, 0xa8, 0x45, 0x1f, 0xe8, 0xdb, 0xa2, + 0x7f, 0x06, 0x83, 0xe6, 0x8e, 0x46, 0x43, 0x2d, 0xd4, 0x33, 0xb3, 0x44, 0xc9, 0x46, 0x4d, 0x42, + 0x3c, 0x84, 0x69, 0x74, 0x85, 0xe9, 0x4d, 0x4f, 0xd8, 0xee, 0x33, 0x56, 0xe2, 0x2b, 0xa5, 0xe9, + 0x42, 0x29, 0x5b, 0x9a, 0xae, 0xcc, 0x15, 0x4a, 0xe5, 0x19, 0x9e, 0xe7, 0x0b, 0x25, 0x81, 0x92, + 0xa2, 0xeb, 0x10, 0x93, 0x5b, 0xc4, 0xb1, 0x3d, 0x95, 0x1d, 0xa4, 0x7b, 0x35, 0x89, 0x20, 0x35, + 0x14, 0x51, 0x33, 0x5e, 0xa9, 0xe6, 0x71, 0x27, 0xee, 0xa6, 0x04, 0x67, 0xbb, 0x26, 0x55, 0x63, + 0x10, 0xb5, 0xa2, 0x80, 0xbd, 0x03, 0x61, 0xe2, 0x06, 0x68, 0x14, 0x92, 0x8f, 0x56, 0x16, 0x97, + 0xea, 0xcf, 0x1e, 0xaf, 0x3e, 0x59, 0xba, 0x57, 0x5b, 0xae, 0x2d, 0x2d, 0x26, 0x2f, 0xa1, 0x61, + 0x88, 0x0b, 0x4b, 0x0b, 0x8b, 0xf5, 0x95, 0xc7, 0x0f, 0x7f, 0x90, 0x64, 0xd0, 0x08, 0x00, 0xfd, + 0xfc, 0x9e, 0x50, 0x7b, 0xba, 0x94, 0x1c, 0xe8, 0xc9, 0x73, 0xbf, 0x18, 0x80, 0x51, 0xaf, 0xb1, + 0x18, 0xdd, 0x84, 0xb8, 0xd1, 0x5e, 0x57, 0xb0, 0xe9, 0xfb, 0xa8, 0x64, 0x6d, 0xd7, 0x24, 0xb4, + 0x0e, 0xe3, 0x9a, 0x2e, 0xb7, 0xc8, 0xd4, 0xbf, 0x5d, 0xaa, 0x8b, 0x92, 0xa4, 0x63, 0xc3, 0x70, + 0x17, 0xa9, 0x5b, 0xbe, 0x9e, 0xf3, 0xc4, 0xe2, 0x5b, 0xb0, 0x78, 0xe8, 0xac, 0x30, 0x6a, 0xcb, + 0x5a, 0x2b, 0xb9, 0x56, 0xbb, 0xce, 0x28, 0x77, 0x9f, 0x11, 0xba, 0xc0, 0x19, 0x65, 0xd7, 0x2a, + 0xfb, 0x73, 0x06, 0xd0, 0x49, 0x62, 0x94, 0x82, 0x41, 0xfb, 0x3c, 0x3b, 0xd1, 0x3b, 0x9f, 0x68, + 0x0d, 0xae, 0xa8, 0x0a, 0x49, 0x5e, 0x75, 0xf2, 0x8f, 0x22, 0x9a, 0x6e, 0xa5, 0xa7, 0x7c, 0x01, + 0xad, 0x28, 0xf8, 0xa9, 0xba, 0xa2, 0xe0, 0xc7, 0xa2, 0x49, 0xd1, 0x5c, 0x56, 0xbb, 0x17, 0xd8, + 0xe7, 0x70, 0xb9, 0x87, 0x06, 0xdd, 0x07, 0x90, 0xb5, 0xfa, 0x36, 0xd6, 0x0d, 0xe2, 0xd2, 0x56, + 0xd6, 0x60, 0xfd, 0xdf, 0x19, 0xb5, 0x35, 0x8b, 0xd2, 0x2e, 0xfa, 0x71, 0xd9, 0x59, 0xc8, 0x5c, + 0x83, 0x21, 0xf7, 0xd3, 0x2c, 0x8a, 0x43, 0xa4, 0xba, 0xb0, 0x5a, 0xbb, 0x97, 0xbc, 0x84, 0x62, + 0x10, 0x5e, 0x7e, 0xf6, 0xf0, 0x61, 0x92, 0x29, 0x7e, 0x40, 0x70, 0xd9, 0xd5, 0x84, 0x92, 0xb4, + 0x81, 0xf6, 0x18, 0x08, 0xdd, 0xc7, 0x26, 0xf2, 0x37, 0xf4, 0xc9, 0xc7, 0xe5, 0xf4, 0xe9, 0x4f, + 0xa1, 0xec, 0xad, 0xbd, 0x7f, 0xfc, 0xeb, 0xcb, 0x81, 0xeb, 0xe8, 0x9a, 0xd7, 0x03, 0xb8, 0x91, + 0x7f, 0xe3, 0x2a, 0x86, 0x6f, 0xd1, 0xcf, 0x18, 0x08, 0x93, 0xd2, 0x84, 0xfc, 0x1f, 0x79, 0xbc, + 0x5e, 0x98, 0xd3, 0xb9, 0xa0, 0xe4, 0x56, 0x7d, 0x63, 0x27, 0x28, 0xa8, 0x71, 0x34, 0xe6, 0x09, + 0x0a, 0xfd, 0x96, 0x81, 0xa8, 0xf5, 0x0e, 0x82, 0x72, 0x67, 0x7b, 0x93, 0x4b, 0x9f, 0x5e, 0x25, + 0xd9, 0xe5, 0xfd, 0xa3, 0x0c, 0xeb, 0xfb, 0xd0, 0x12, 0x73, 0x56, 0x28, 0xc4, 0x34, 0xeb, 0x0d, + 0xf1, 0x36, 0x93, 0x41, 0xbf, 0x67, 0x20, 0x6a, 0x4d, 0x39, 0x7d, 0x50, 0x7a, 0xce, 0xd7, 0x41, + 0x50, 0x3e, 0xb5, 0x50, 0xfa, 0x8c, 0xbd, 0xdd, 0x28, 0xa7, 0x8a, 0x41, 0x6e, 0x97, 0x60, 0xfe, + 0x13, 0x03, 0x51, 0xbb, 0x04, 0xf9, 0x63, 0xf6, 0x9c, 0x97, 0x83, 0x60, 0xfe, 0xe1, 0xfe, 0x51, + 0x26, 0xef, 0x3b, 0x27, 0x8f, 0xf5, 0x3e, 0x1c, 0x2d, 0xb5, 0x34, 0x73, 0xc7, 0x72, 0xcf, 0x4c, + 0x20, 0xf7, 0xfc, 0x27, 0x03, 0x23, 0x96, 0x41, 0x3a, 0x42, 0xcb, 0xe7, 0x1b, 0x78, 0x83, 0x68, + 0xb3, 0xb5, 0x7f, 0x94, 0xc9, 0x9c, 0x3a, 0xc8, 0x76, 0xdf, 0xc4, 0x2c, 0x5b, 0x0c, 0xa0, 0x48, + 0xbe, 0xdd, 0xa5, 0x04, 0xb9, 0x98, 0xbf, 0x31, 0x70, 0xe5, 0x3e, 0x36, 0x7b, 0x47, 0x52, 0x74, + 0x27, 0x48, 0x3a, 0xf0, 0x99, 0x8e, 0xd3, 0xf3, 0xe7, 0x63, 0xb6, 0x83, 0x74, 0x9e, 0x6a, 0x54, + 0x46, 0xa5, 0x20, 0xbe, 0x65, 0xf4, 0x02, 0xff, 0x8a, 0x81, 0x30, 0x19, 0x71, 0x11, 0xe7, 0xff, + 0x10, 0x7b, 0x72, 0x8e, 0x0e, 0x72, 0x2f, 0xe2, 0xfe, 0x51, 0x26, 0xeb, 0x33, 0x3f, 0xf7, 0xf1, + 0xb1, 0x0c, 0x3b, 0x15, 0x48, 0x11, 0x82, 0xf9, 0x6b, 0x06, 0x22, 0x74, 0x3e, 0xee, 0x93, 0x08, + 0xbd, 0xc6, 0xf0, 0x20, 0xf0, 0x1b, 0xfb, 0x47, 0x99, 0x9c, 0xdf, 0xf8, 0xdd, 0x07, 0xff, 0x2d, + 0xf6, 0x66, 0x30, 0xfc, 0x04, 0xf6, 0x5f, 0x19, 0x18, 0xb4, 0xa7, 0x6b, 0x94, 0xef, 0xf7, 0x8a, + 0x78, 0x4e, 0x25, 0x36, 0xf7, 0x8f, 0x32, 0x05, 0xff, 0x01, 0xbe, 0x8f, 0x1a, 0x59, 0xf6, 0x56, + 0x10, 0x35, 0x74, 0x1b, 0xfc, 0x21, 0x03, 0x60, 0xf5, 0xbf, 0xa4, 0x6d, 0x45, 0xa7, 0x35, 0xc9, + 0x1e, 0xa3, 0x70, 0xd0, 0x3b, 0xb9, 0xd1, 0x77, 0x4e, 0xef, 0x0e, 0xf3, 0x69, 0x36, 0x17, 0x44, + 0x09, 0xb1, 0x03, 0x9c, 0x84, 0x38, 0x51, 0xc5, 0x1a, 0x7a, 0x4f, 0x51, 0xc5, 0x77, 0xaa, 0x3f, + 0x83, 0x2a, 0x7d, 0xa6, 0xeb, 0x73, 0xa9, 0x22, 0x61, 0xb7, 0x2a, 0x7f, 0x64, 0x60, 0x84, 0x54, + 0xf6, 0xe3, 0xd1, 0xb5, 0x4f, 0x22, 0xee, 0x3b, 0x8f, 0xa7, 0x67, 0xcf, 0xcc, 0x67, 0xa7, 0xa7, + 0x32, 0x85, 0x5f, 0x40, 0x41, 0xe0, 0x1f, 0xff, 0x29, 0xdf, 0xa8, 0x2e, 0x3d, 0xbf, 0xb7, 0x29, + 0x9b, 0xaf, 0xda, 0xeb, 0xe4, 0xac, 0xbc, 0x75, 0x78, 0xd6, 0xfa, 0x93, 0xfe, 0xa6, 0x9a, 0xdd, + 0xc4, 0x0a, 0x75, 0xda, 0xbc, 0xcf, 0xff, 0x1d, 0xb8, 0x63, 0xff, 0x5c, 0x8f, 0x52, 0xb2, 0xe9, + 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x1b, 0x6d, 0x48, 0x30, 0x0a, 0x21, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/snapshot.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/snapshot.pb.go new file mode 100644 index 000000000..17f929e4a --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/snapshot.pb.go @@ -0,0 +1,238 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/compute/v1/snapshot.proto + +package compute // import "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Snapshot_Status int32 + +const ( + Snapshot_STATUS_UNSPECIFIED Snapshot_Status = 0 + // Snapshot is being created. + Snapshot_CREATING Snapshot_Status = 1 + // Snapshot is ready to use. + Snapshot_READY Snapshot_Status = 2 + // Snapshot encountered a problem and cannot operate. + Snapshot_ERROR Snapshot_Status = 3 + // Snapshot is being deleted. + Snapshot_DELETING Snapshot_Status = 4 +) + +var Snapshot_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "CREATING", + 2: "READY", + 3: "ERROR", + 4: "DELETING", +} +var Snapshot_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "CREATING": 1, + "READY": 2, + "ERROR": 3, + "DELETING": 4, +} + +func (x Snapshot_Status) String() string { + return proto.EnumName(Snapshot_Status_name, int32(x)) +} +func (Snapshot_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_snapshot_3c7ca140728f66a5, []int{0, 0} +} + +// A Snapshot resource. For more information, see [Snapshots](/docs/compute/concepts/snapshot). +type Snapshot struct { + // ID of the snapshot. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the snapshot belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Name of the snapshot. 1-63 characters long. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Description of the snapshot. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. Maximum of 64 per resource. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Size of the snapshot, specified in bytes. + StorageSize int64 `protobuf:"varint,7,opt,name=storage_size,json=storageSize,proto3" json:"storage_size,omitempty"` + // Size of the disk when the snapshot was created, specified in bytes. + DiskSize int64 `protobuf:"varint,8,opt,name=disk_size,json=diskSize,proto3" json:"disk_size,omitempty"` + // License IDs that indicate which licenses are attached to this resource. + // License IDs are used to calculate additional charges for the use of the virtual machine. + // + // The correct license ID is generated by Yandex.Cloud. IDs are inherited by new resources created from this resource. + // + // If you know the license IDs, specify them when you create the image. + // For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. + // You can specify them in the [yandex.cloud.compute.v1.ImageService.Create] request. + ProductIds []string `protobuf:"bytes,9,rep,name=product_ids,json=productIds,proto3" json:"product_ids,omitempty"` + // Current status of the snapshot. + Status Snapshot_Status `protobuf:"varint,10,opt,name=status,proto3,enum=yandex.cloud.compute.v1.Snapshot_Status" json:"status,omitempty"` + // ID of the source disk used to create this snapshot. + SourceDiskId string `protobuf:"bytes,11,opt,name=source_disk_id,json=sourceDiskId,proto3" json:"source_disk_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_snapshot_3c7ca140728f66a5, []int{0} +} +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Snapshot.Unmarshal(m, b) +} +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) +} +func (dst *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(dst, src) +} +func (m *Snapshot) XXX_Size() int { + return xxx_messageInfo_Snapshot.Size(m) +} +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_Snapshot proto.InternalMessageInfo + +func (m *Snapshot) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Snapshot) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *Snapshot) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Snapshot) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Snapshot) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Snapshot) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Snapshot) GetStorageSize() int64 { + if m != nil { + return m.StorageSize + } + return 0 +} + +func (m *Snapshot) GetDiskSize() int64 { + if m != nil { + return m.DiskSize + } + return 0 +} + +func (m *Snapshot) GetProductIds() []string { + if m != nil { + return m.ProductIds + } + return nil +} + +func (m *Snapshot) GetStatus() Snapshot_Status { + if m != nil { + return m.Status + } + return Snapshot_STATUS_UNSPECIFIED +} + +func (m *Snapshot) GetSourceDiskId() string { + if m != nil { + return m.SourceDiskId + } + return "" +} + +func init() { + proto.RegisterType((*Snapshot)(nil), "yandex.cloud.compute.v1.Snapshot") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.Snapshot.LabelsEntry") + proto.RegisterEnum("yandex.cloud.compute.v1.Snapshot_Status", Snapshot_Status_name, Snapshot_Status_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/compute/v1/snapshot.proto", fileDescriptor_snapshot_3c7ca140728f66a5) +} + +var fileDescriptor_snapshot_3c7ca140728f66a5 = []byte{ + // 484 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x41, 0x8f, 0x9b, 0x3e, + 0x10, 0xc5, 0xff, 0x84, 0x24, 0xff, 0x30, 0x44, 0x11, 0xb2, 0xaa, 0x16, 0xa5, 0x87, 0xa5, 0xab, + 0xaa, 0xe2, 0x12, 0xd0, 0xa6, 0x97, 0x6e, 0x7b, 0x69, 0x9a, 0xd0, 0x0a, 0x69, 0xb5, 0xad, 0x4c, + 0xf6, 0xd0, 0x5e, 0x10, 0xc1, 0x5e, 0xd6, 0x0a, 0xc1, 0x08, 0x9b, 0xa8, 0xd9, 0x2f, 0xd8, 0xaf, + 0x55, 0x61, 0x1c, 0x69, 0x2f, 0xab, 0xde, 0x86, 0x37, 0xbf, 0xf1, 0xf3, 0xc3, 0x03, 0xef, 0x4e, + 0x59, 0x45, 0xe8, 0xef, 0x30, 0x2f, 0x79, 0x4b, 0xc2, 0x9c, 0x1f, 0xea, 0x56, 0xd2, 0xf0, 0x78, + 0x15, 0x8a, 0x2a, 0xab, 0xc5, 0x03, 0x97, 0x41, 0xdd, 0x70, 0xc9, 0xd1, 0xab, 0x9e, 0x0b, 0x14, + 0x17, 0x68, 0x2e, 0x38, 0x5e, 0xcd, 0x2f, 0x0a, 0xce, 0x8b, 0x92, 0x86, 0x0a, 0xdb, 0xb5, 0xf7, + 0xa1, 0x64, 0x07, 0x2a, 0x64, 0x76, 0xa8, 0xfb, 0xc9, 0xcb, 0x3f, 0x43, 0x98, 0x24, 0xfa, 0x30, + 0x34, 0x83, 0x01, 0x23, 0xae, 0xe1, 0x19, 0xbe, 0x85, 0x07, 0x8c, 0xa0, 0xd7, 0x60, 0xdd, 0xf3, + 0x92, 0xd0, 0x26, 0x65, 0xc4, 0x1d, 0x28, 0x79, 0xd2, 0x0b, 0x31, 0x41, 0xd7, 0x00, 0x79, 0x43, + 0x33, 0x49, 0x49, 0x9a, 0x49, 0xd7, 0xf4, 0x0c, 0xdf, 0x5e, 0xce, 0x83, 0xde, 0x2f, 0x38, 0xfb, + 0x05, 0xdb, 0xb3, 0x1f, 0xb6, 0x34, 0xbd, 0x92, 0x08, 0xc1, 0xb0, 0xca, 0x0e, 0xd4, 0x1d, 0xaa, + 0x23, 0x55, 0x8d, 0x3c, 0xb0, 0x09, 0x15, 0x79, 0xc3, 0x6a, 0xc9, 0x78, 0xe5, 0x8e, 0x54, 0xeb, + 0xa9, 0x84, 0x22, 0x18, 0x97, 0xd9, 0x8e, 0x96, 0xc2, 0x1d, 0x7b, 0xa6, 0x6f, 0x2f, 0x17, 0xc1, + 0x33, 0xa9, 0x83, 0x73, 0xa0, 0xe0, 0x46, 0xf1, 0x51, 0x25, 0x9b, 0x13, 0xd6, 0xc3, 0xe8, 0x0d, + 0x4c, 0x85, 0xe4, 0x4d, 0x56, 0xd0, 0x54, 0xb0, 0x47, 0xea, 0xfe, 0xef, 0x19, 0xbe, 0x89, 0x6d, + 0xad, 0x25, 0xec, 0x91, 0x76, 0xb9, 0x09, 0x13, 0xfb, 0xbe, 0x3f, 0x51, 0xfd, 0x49, 0x27, 0xa8, + 0xe6, 0x05, 0xd8, 0x75, 0xc3, 0x49, 0x9b, 0xcb, 0x94, 0x11, 0xe1, 0x5a, 0x9e, 0xe9, 0x5b, 0x18, + 0xb4, 0x14, 0x13, 0x81, 0x3e, 0xc3, 0x58, 0xc8, 0x4c, 0xb6, 0xc2, 0x05, 0xcf, 0xf0, 0x67, 0x4b, + 0xff, 0xdf, 0xf7, 0x4c, 0x14, 0x8f, 0xf5, 0x1c, 0x7a, 0x0b, 0x33, 0xc1, 0xdb, 0x26, 0xa7, 0xa9, + 0xba, 0x06, 0x23, 0xae, 0xad, 0x7e, 0xc7, 0xb4, 0x57, 0x37, 0x4c, 0xec, 0x63, 0x32, 0xbf, 0x06, + 0xfb, 0x49, 0x3e, 0xe4, 0x80, 0xb9, 0xa7, 0x27, 0xfd, 0x7a, 0x5d, 0x89, 0x5e, 0xc0, 0xe8, 0x98, + 0x95, 0x2d, 0xd5, 0x4f, 0xd7, 0x7f, 0x7c, 0x1c, 0x7c, 0x30, 0x2e, 0x31, 0x8c, 0x7b, 0x4b, 0xf4, + 0x12, 0x50, 0xb2, 0x5d, 0x6d, 0xef, 0x92, 0xf4, 0xee, 0x36, 0xf9, 0x11, 0xad, 0xe3, 0xaf, 0x71, + 0xb4, 0x71, 0xfe, 0x43, 0x53, 0x98, 0xac, 0x71, 0xb4, 0xda, 0xc6, 0xb7, 0xdf, 0x1c, 0x03, 0x59, + 0x30, 0xc2, 0xd1, 0x6a, 0xf3, 0xd3, 0x19, 0x74, 0x65, 0x84, 0xf1, 0x77, 0xec, 0x98, 0x1d, 0xb3, + 0x89, 0x6e, 0x22, 0xc5, 0x0c, 0xbf, 0x44, 0xbf, 0xd6, 0x05, 0x93, 0x0f, 0xed, 0xae, 0x4b, 0x18, + 0xf6, 0x91, 0x17, 0xfd, 0xe2, 0x16, 0x7c, 0x51, 0xd0, 0x4a, 0xed, 0x44, 0xf8, 0xcc, 0x46, 0x7f, + 0xd2, 0xe5, 0x6e, 0xac, 0xb0, 0xf7, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf6, 0xec, 0x41, 0x99, + 0xfb, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/snapshot_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/snapshot_service.pb.go new file mode 100644 index 000000000..b802cfe7d --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/snapshot_service.pb.go @@ -0,0 +1,976 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/compute/v1/snapshot_service.proto + +package compute // import "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetSnapshotRequest struct { + // ID of the Snapshot resource to return. + // To get the snapshot ID, use a [SnapshotService.List] request. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSnapshotRequest) Reset() { *m = GetSnapshotRequest{} } +func (m *GetSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*GetSnapshotRequest) ProtoMessage() {} +func (*GetSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_snapshot_service_150bb34e00a40392, []int{0} +} +func (m *GetSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSnapshotRequest.Unmarshal(m, b) +} +func (m *GetSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSnapshotRequest.Marshal(b, m, deterministic) +} +func (dst *GetSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSnapshotRequest.Merge(dst, src) +} +func (m *GetSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_GetSnapshotRequest.Size(m) +} +func (m *GetSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSnapshotRequest proto.InternalMessageInfo + +func (m *GetSnapshotRequest) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +type ListSnapshotsRequest struct { + // ID of the folder to list snapshots in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListSnapshotsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListSnapshotsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters resources listed in the response. + // The expression must specify: + // 1. The field name. Currently you can use filtering only on the [Snapshot.name] field. + // 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + // 3. The value. Мust be 3-63 characters long and match the regular expression `^[a-z]([-a-z0-9]{,61}[a-z0-9])?$`. + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsRequest) Reset() { *m = ListSnapshotsRequest{} } +func (m *ListSnapshotsRequest) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsRequest) ProtoMessage() {} +func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_snapshot_service_150bb34e00a40392, []int{1} +} +func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsRequest.Unmarshal(m, b) +} +func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic) +} +func (dst *ListSnapshotsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsRequest.Merge(dst, src) +} +func (m *ListSnapshotsRequest) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsRequest.Size(m) +} +func (m *ListSnapshotsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo + +func (m *ListSnapshotsRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListSnapshotsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListSnapshotsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListSnapshotsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListSnapshotsResponse struct { + // List of snapshots. + Snapshots []*Snapshot `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListSnapshotsRequest.page_size], use + // the [next_page_token] as the value + // for the [ListSnapshotsRequest.page_token] query parameter + // in the next list request. Each subsequent list request will have its own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsResponse) Reset() { *m = ListSnapshotsResponse{} } +func (m *ListSnapshotsResponse) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsResponse) ProtoMessage() {} +func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_snapshot_service_150bb34e00a40392, []int{2} +} +func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsResponse.Unmarshal(m, b) +} +func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic) +} +func (dst *ListSnapshotsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsResponse.Merge(dst, src) +} +func (m *ListSnapshotsResponse) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsResponse.Size(m) +} +func (m *ListSnapshotsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo + +func (m *ListSnapshotsResponse) GetSnapshots() []*Snapshot { + if m != nil { + return m.Snapshots + } + return nil +} + +func (m *ListSnapshotsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateSnapshotRequest struct { + // ID of the folder to create a snapshot in. + // To get the folder ID use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // ID of the disk to create the snapshot from. + // To get the disk ID use a [yandex.cloud.compute.v1.DiskService.List] request. + DiskId string `protobuf:"bytes,2,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` + // Name of the snapshot. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Description of the snapshot. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSnapshotRequest) Reset() { *m = CreateSnapshotRequest{} } +func (m *CreateSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSnapshotRequest) ProtoMessage() {} +func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_snapshot_service_150bb34e00a40392, []int{3} +} +func (m *CreateSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSnapshotRequest.Unmarshal(m, b) +} +func (m *CreateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSnapshotRequest.Marshal(b, m, deterministic) +} +func (dst *CreateSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSnapshotRequest.Merge(dst, src) +} +func (m *CreateSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_CreateSnapshotRequest.Size(m) +} +func (m *CreateSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSnapshotRequest proto.InternalMessageInfo + +func (m *CreateSnapshotRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *CreateSnapshotRequest) GetDiskId() string { + if m != nil { + return m.DiskId + } + return "" +} + +func (m *CreateSnapshotRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateSnapshotRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *CreateSnapshotRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +type CreateSnapshotMetadata struct { + // ID of the snapshot that is being created. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + // ID of the source disk used to create this snapshot. + DiskId string `protobuf:"bytes,2,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSnapshotMetadata) Reset() { *m = CreateSnapshotMetadata{} } +func (m *CreateSnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateSnapshotMetadata) ProtoMessage() {} +func (*CreateSnapshotMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_snapshot_service_150bb34e00a40392, []int{4} +} +func (m *CreateSnapshotMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSnapshotMetadata.Unmarshal(m, b) +} +func (m *CreateSnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSnapshotMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateSnapshotMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSnapshotMetadata.Merge(dst, src) +} +func (m *CreateSnapshotMetadata) XXX_Size() int { + return xxx_messageInfo_CreateSnapshotMetadata.Size(m) +} +func (m *CreateSnapshotMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSnapshotMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSnapshotMetadata proto.InternalMessageInfo + +func (m *CreateSnapshotMetadata) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +func (m *CreateSnapshotMetadata) GetDiskId() string { + if m != nil { + return m.DiskId + } + return "" +} + +type UpdateSnapshotRequest struct { + // ID of the Snapshot resource to update. + // To get the snapshot ID use a [SnapshotService.List] request. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + // Field mask that specifies which fields of the Snapshot resource are going to be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Name of the snapshot. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Description of the snapshot. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. + // + // Existing set of `` labels `` is completely replaced by the provided set. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateSnapshotRequest) Reset() { *m = UpdateSnapshotRequest{} } +func (m *UpdateSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateSnapshotRequest) ProtoMessage() {} +func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_snapshot_service_150bb34e00a40392, []int{5} +} +func (m *UpdateSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateSnapshotRequest.Unmarshal(m, b) +} +func (m *UpdateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateSnapshotRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateSnapshotRequest.Merge(dst, src) +} +func (m *UpdateSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_UpdateSnapshotRequest.Size(m) +} +func (m *UpdateSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateSnapshotRequest proto.InternalMessageInfo + +func (m *UpdateSnapshotRequest) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +func (m *UpdateSnapshotRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateSnapshotRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateSnapshotRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *UpdateSnapshotRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +type UpdateSnapshotMetadata struct { + // ID of the Snapshot resource that is being updated. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateSnapshotMetadata) Reset() { *m = UpdateSnapshotMetadata{} } +func (m *UpdateSnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateSnapshotMetadata) ProtoMessage() {} +func (*UpdateSnapshotMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_snapshot_service_150bb34e00a40392, []int{6} +} +func (m *UpdateSnapshotMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateSnapshotMetadata.Unmarshal(m, b) +} +func (m *UpdateSnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateSnapshotMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateSnapshotMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateSnapshotMetadata.Merge(dst, src) +} +func (m *UpdateSnapshotMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateSnapshotMetadata.Size(m) +} +func (m *UpdateSnapshotMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateSnapshotMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateSnapshotMetadata proto.InternalMessageInfo + +func (m *UpdateSnapshotMetadata) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +type DeleteSnapshotRequest struct { + // ID of the snapshot to delete. + // To get the snapshot ID, use a [SnapshotService.List] request. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSnapshotRequest) Reset() { *m = DeleteSnapshotRequest{} } +func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotRequest) ProtoMessage() {} +func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_snapshot_service_150bb34e00a40392, []int{7} +} +func (m *DeleteSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteSnapshotRequest.Unmarshal(m, b) +} +func (m *DeleteSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteSnapshotRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSnapshotRequest.Merge(dst, src) +} +func (m *DeleteSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_DeleteSnapshotRequest.Size(m) +} +func (m *DeleteSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSnapshotRequest proto.InternalMessageInfo + +func (m *DeleteSnapshotRequest) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +type DeleteSnapshotMetadata struct { + // ID of the snapshot that is being deleted. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSnapshotMetadata) Reset() { *m = DeleteSnapshotMetadata{} } +func (m *DeleteSnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotMetadata) ProtoMessage() {} +func (*DeleteSnapshotMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_snapshot_service_150bb34e00a40392, []int{8} +} +func (m *DeleteSnapshotMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteSnapshotMetadata.Unmarshal(m, b) +} +func (m *DeleteSnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteSnapshotMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteSnapshotMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSnapshotMetadata.Merge(dst, src) +} +func (m *DeleteSnapshotMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteSnapshotMetadata.Size(m) +} +func (m *DeleteSnapshotMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSnapshotMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSnapshotMetadata proto.InternalMessageInfo + +func (m *DeleteSnapshotMetadata) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +type ListSnapshotOperationsRequest struct { + // ID of the Snapshot resource to list operations for. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListSnapshotOperationsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListSnapshotOperationsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotOperationsRequest) Reset() { *m = ListSnapshotOperationsRequest{} } +func (m *ListSnapshotOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotOperationsRequest) ProtoMessage() {} +func (*ListSnapshotOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_snapshot_service_150bb34e00a40392, []int{9} +} +func (m *ListSnapshotOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotOperationsRequest.Unmarshal(m, b) +} +func (m *ListSnapshotOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotOperationsRequest.Marshal(b, m, deterministic) +} +func (dst *ListSnapshotOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotOperationsRequest.Merge(dst, src) +} +func (m *ListSnapshotOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListSnapshotOperationsRequest.Size(m) +} +func (m *ListSnapshotOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotOperationsRequest proto.InternalMessageInfo + +func (m *ListSnapshotOperationsRequest) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +func (m *ListSnapshotOperationsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListSnapshotOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListSnapshotOperationsResponse struct { + // List of operations for the specified snapshot. + Operations []*operation.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListSnapshotOperationsRequest.page_size], use the [next_page_token] as the value + // for the [ListSnapshotOperationsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotOperationsResponse) Reset() { *m = ListSnapshotOperationsResponse{} } +func (m *ListSnapshotOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotOperationsResponse) ProtoMessage() {} +func (*ListSnapshotOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_snapshot_service_150bb34e00a40392, []int{10} +} +func (m *ListSnapshotOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotOperationsResponse.Unmarshal(m, b) +} +func (m *ListSnapshotOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotOperationsResponse.Marshal(b, m, deterministic) +} +func (dst *ListSnapshotOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotOperationsResponse.Merge(dst, src) +} +func (m *ListSnapshotOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListSnapshotOperationsResponse.Size(m) +} +func (m *ListSnapshotOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotOperationsResponse proto.InternalMessageInfo + +func (m *ListSnapshotOperationsResponse) GetOperations() []*operation.Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListSnapshotOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetSnapshotRequest)(nil), "yandex.cloud.compute.v1.GetSnapshotRequest") + proto.RegisterType((*ListSnapshotsRequest)(nil), "yandex.cloud.compute.v1.ListSnapshotsRequest") + proto.RegisterType((*ListSnapshotsResponse)(nil), "yandex.cloud.compute.v1.ListSnapshotsResponse") + proto.RegisterType((*CreateSnapshotRequest)(nil), "yandex.cloud.compute.v1.CreateSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.CreateSnapshotRequest.LabelsEntry") + proto.RegisterType((*CreateSnapshotMetadata)(nil), "yandex.cloud.compute.v1.CreateSnapshotMetadata") + proto.RegisterType((*UpdateSnapshotRequest)(nil), "yandex.cloud.compute.v1.UpdateSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.compute.v1.UpdateSnapshotRequest.LabelsEntry") + proto.RegisterType((*UpdateSnapshotMetadata)(nil), "yandex.cloud.compute.v1.UpdateSnapshotMetadata") + proto.RegisterType((*DeleteSnapshotRequest)(nil), "yandex.cloud.compute.v1.DeleteSnapshotRequest") + proto.RegisterType((*DeleteSnapshotMetadata)(nil), "yandex.cloud.compute.v1.DeleteSnapshotMetadata") + proto.RegisterType((*ListSnapshotOperationsRequest)(nil), "yandex.cloud.compute.v1.ListSnapshotOperationsRequest") + proto.RegisterType((*ListSnapshotOperationsResponse)(nil), "yandex.cloud.compute.v1.ListSnapshotOperationsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// SnapshotServiceClient is the client API for SnapshotService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SnapshotServiceClient interface { + // Returns the specified Snapshot resource. + // + // To get the list of available Snapshot resources, make a [List] request. + Get(ctx context.Context, in *GetSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) + // Retrieves the list of Snapshot resources in the specified folder. + List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) + // Creates a snapshot of the specified disk. + Create(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified snapshot. + // + // Values of omitted parameters are not changed. + Update(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified snapshot. + // + // Deleting a snapshot removes its data permanently and is irreversible. + Delete(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Lists operations for the specified snapshot. + ListOperations(ctx context.Context, in *ListSnapshotOperationsRequest, opts ...grpc.CallOption) (*ListSnapshotOperationsResponse, error) +} + +type snapshotServiceClient struct { + cc *grpc.ClientConn +} + +func NewSnapshotServiceClient(cc *grpc.ClientConn) SnapshotServiceClient { + return &snapshotServiceClient{cc} +} + +func (c *snapshotServiceClient) Get(ctx context.Context, in *GetSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) { + out := new(Snapshot) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.SnapshotService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotServiceClient) List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { + out := new(ListSnapshotsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.SnapshotService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotServiceClient) Create(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.SnapshotService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotServiceClient) Update(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.SnapshotService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotServiceClient) Delete(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.SnapshotService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotServiceClient) ListOperations(ctx context.Context, in *ListSnapshotOperationsRequest, opts ...grpc.CallOption) (*ListSnapshotOperationsResponse, error) { + out := new(ListSnapshotOperationsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.SnapshotService/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SnapshotServiceServer is the server API for SnapshotService service. +type SnapshotServiceServer interface { + // Returns the specified Snapshot resource. + // + // To get the list of available Snapshot resources, make a [List] request. + Get(context.Context, *GetSnapshotRequest) (*Snapshot, error) + // Retrieves the list of Snapshot resources in the specified folder. + List(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) + // Creates a snapshot of the specified disk. + Create(context.Context, *CreateSnapshotRequest) (*operation.Operation, error) + // Updates the specified snapshot. + // + // Values of omitted parameters are not changed. + Update(context.Context, *UpdateSnapshotRequest) (*operation.Operation, error) + // Deletes the specified snapshot. + // + // Deleting a snapshot removes its data permanently and is irreversible. + Delete(context.Context, *DeleteSnapshotRequest) (*operation.Operation, error) + // Lists operations for the specified snapshot. + ListOperations(context.Context, *ListSnapshotOperationsRequest) (*ListSnapshotOperationsResponse, error) +} + +func RegisterSnapshotServiceServer(s *grpc.Server, srv SnapshotServiceServer) { + s.RegisterService(&_SnapshotService_serviceDesc, srv) +} + +func _SnapshotService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.SnapshotService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotServiceServer).Get(ctx, req.(*GetSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnapshotService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnapshotsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.SnapshotService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotServiceServer).List(ctx, req.(*ListSnapshotsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnapshotService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.SnapshotService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotServiceServer).Create(ctx, req.(*CreateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnapshotService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.SnapshotService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotServiceServer).Update(ctx, req.(*UpdateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnapshotService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.SnapshotService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotServiceServer).Delete(ctx, req.(*DeleteSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnapshotService_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnapshotOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotServiceServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.SnapshotService/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotServiceServer).ListOperations(ctx, req.(*ListSnapshotOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SnapshotService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.compute.v1.SnapshotService", + HandlerType: (*SnapshotServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _SnapshotService_Get_Handler, + }, + { + MethodName: "List", + Handler: _SnapshotService_List_Handler, + }, + { + MethodName: "Create", + Handler: _SnapshotService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _SnapshotService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _SnapshotService_Delete_Handler, + }, + { + MethodName: "ListOperations", + Handler: _SnapshotService_ListOperations_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/compute/v1/snapshot_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/compute/v1/snapshot_service.proto", fileDescriptor_snapshot_service_150bb34e00a40392) +} + +var fileDescriptor_snapshot_service_150bb34e00a40392 = []byte{ + // 984 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0x41, 0x6f, 0xdc, 0x44, + 0x14, 0xd6, 0x64, 0x13, 0x37, 0xfb, 0x16, 0x68, 0x35, 0xea, 0x36, 0x2b, 0x8b, 0x40, 0x6a, 0xd4, + 0xb2, 0x6c, 0xb0, 0xbd, 0xde, 0x92, 0x85, 0xa4, 0xad, 0x2a, 0x12, 0x92, 0x2a, 0x52, 0x2b, 0x90, + 0x53, 0x2e, 0x44, 0x65, 0x35, 0x89, 0x27, 0x5b, 0x6b, 0x1d, 0xdb, 0xec, 0x78, 0x57, 0x4d, 0x4a, + 0x25, 0x14, 0x71, 0x0a, 0xc7, 0xde, 0x91, 0x10, 0xbf, 0x80, 0x9c, 0x8a, 0xf8, 0x01, 0xc9, 0xb9, + 0xfc, 0x05, 0x0e, 0x5c, 0xe9, 0x91, 0x13, 0xf2, 0xcc, 0x78, 0xb3, 0x9b, 0xd8, 0x8d, 0x03, 0x08, + 0x71, 0x1b, 0xfb, 0x7d, 0xef, 0xcd, 0x37, 0xef, 0xbd, 0xf9, 0xde, 0x80, 0xb1, 0x43, 0x7c, 0x87, + 0x3e, 0x36, 0x37, 0xbd, 0xa0, 0xe7, 0x98, 0x9b, 0xc1, 0x76, 0xd8, 0x8b, 0xa8, 0xd9, 0xb7, 0x4c, + 0xe6, 0x93, 0x90, 0x3d, 0x0a, 0xa2, 0x16, 0xa3, 0xdd, 0xbe, 0xbb, 0x49, 0x8d, 0xb0, 0x1b, 0x44, + 0x01, 0x9e, 0x12, 0x78, 0x83, 0xe3, 0x0d, 0x89, 0x37, 0xfa, 0x96, 0xfa, 0x66, 0x3b, 0x08, 0xda, + 0x1e, 0x35, 0x49, 0xe8, 0x9a, 0xc4, 0xf7, 0x83, 0x88, 0x44, 0x6e, 0xe0, 0x33, 0xe1, 0xa6, 0xce, + 0x48, 0x2b, 0xff, 0xda, 0xe8, 0x6d, 0x99, 0x5b, 0x2e, 0xf5, 0x9c, 0xd6, 0x36, 0x61, 0x1d, 0x89, + 0x50, 0x25, 0x91, 0xd8, 0x3f, 0x08, 0x69, 0x97, 0xbb, 0x4b, 0xdb, 0xf5, 0xb3, 0x48, 0xa6, 0xe2, + 0x06, 0x51, 0x4e, 0xc5, 0x9b, 0x1e, 0xc1, 0xf5, 0x89, 0xe7, 0x3a, 0x43, 0x66, 0x6d, 0x09, 0xf0, + 0x5d, 0x1a, 0xad, 0xc9, 0xd8, 0x36, 0xfd, 0xaa, 0x47, 0x59, 0x84, 0x75, 0x28, 0x0d, 0x72, 0xe2, + 0x3a, 0x15, 0x34, 0x83, 0xaa, 0xc5, 0xc5, 0xd7, 0x7e, 0x3f, 0xb4, 0xd0, 0xfe, 0x91, 0x35, 0x7e, + 0xeb, 0xf6, 0x5c, 0xdd, 0x86, 0x04, 0xb0, 0xea, 0x68, 0xcf, 0x11, 0x5c, 0xbe, 0xe7, 0xb2, 0x41, + 0x18, 0x96, 0xc4, 0x79, 0x0f, 0x8a, 0x5b, 0x81, 0xe7, 0xd0, 0x6e, 0x56, 0x94, 0x49, 0x61, 0x5e, + 0x75, 0xf0, 0xbb, 0x50, 0x0c, 0x49, 0x9b, 0xb6, 0x98, 0xbb, 0x4b, 0x2b, 0x63, 0x33, 0xa8, 0x5a, + 0x58, 0x84, 0x3f, 0x0f, 0x2d, 0xe5, 0xd6, 0x6d, 0xab, 0x5e, 0xaf, 0xdb, 0x93, 0xb1, 0x71, 0xcd, + 0xdd, 0xa5, 0xb8, 0x0a, 0xc0, 0x81, 0x51, 0xd0, 0xa1, 0x7e, 0xa5, 0xc0, 0x83, 0x16, 0xf7, 0x8f, + 0xac, 0x09, 0x8e, 0xb4, 0x79, 0x94, 0x07, 0xb1, 0x0d, 0x6b, 0xa0, 0x6c, 0xb9, 0x5e, 0x44, 0xbb, + 0x95, 0x71, 0x8e, 0x82, 0xfd, 0xa3, 0x41, 0x3c, 0x69, 0xd1, 0xbe, 0x41, 0x50, 0x3e, 0x41, 0x9d, + 0x85, 0x81, 0xcf, 0x28, 0xbe, 0x03, 0xc5, 0xe4, 0x88, 0xac, 0x82, 0x66, 0x0a, 0xd5, 0x52, 0xe3, + 0xaa, 0x91, 0xd1, 0x11, 0xc6, 0x20, 0x81, 0xc7, 0x3e, 0xf8, 0x3a, 0x5c, 0xf4, 0xe9, 0xe3, 0xa8, + 0x35, 0xc4, 0x36, 0x3e, 0x57, 0xd1, 0x7e, 0x3d, 0xfe, 0xfd, 0x59, 0x42, 0x53, 0xfb, 0xbe, 0x00, + 0xe5, 0xa5, 0x2e, 0x25, 0x11, 0x3d, 0x59, 0x86, 0x73, 0xa4, 0xef, 0x1a, 0x5c, 0x70, 0x5c, 0xd6, + 0x89, 0x81, 0x63, 0x29, 0x40, 0x25, 0x36, 0xae, 0x3a, 0x78, 0x0e, 0xc6, 0x7d, 0xb2, 0x4d, 0x65, + 0xda, 0xae, 0xbe, 0x3c, 0xb4, 0xa6, 0xbf, 0x5e, 0x27, 0xfa, 0xee, 0xc3, 0x75, 0x9d, 0xe8, 0xbb, + 0x75, 0x7d, 0xfe, 0xe1, 0x13, 0xeb, 0xfd, 0xa6, 0xf5, 0x74, 0x5d, 0x7e, 0xd9, 0x1c, 0x8e, 0x67, + 0xa1, 0xe4, 0x50, 0xb6, 0xd9, 0x75, 0xc3, 0xb8, 0x75, 0x64, 0x3a, 0x65, 0xd2, 0x1b, 0x73, 0x4d, + 0x7b, 0xd8, 0x8a, 0x9f, 0x21, 0x50, 0x3c, 0xb2, 0x41, 0x3d, 0x56, 0x51, 0x78, 0xda, 0x16, 0x32, + 0xd3, 0x96, 0x7a, 0x6c, 0xe3, 0x1e, 0x77, 0x5e, 0xf6, 0xa3, 0xee, 0xce, 0xe2, 0x9d, 0x97, 0x87, + 0x56, 0x69, 0x5d, 0x6f, 0xd5, 0xf5, 0xf9, 0x98, 0x66, 0x6d, 0x8f, 0x9f, 0xa8, 0xf9, 0x81, 0x38, + 0x59, 0xf3, 0xc6, 0xc1, 0x91, 0xa5, 0xa8, 0xe3, 0x96, 0xce, 0x57, 0x18, 0x5f, 0x92, 0x87, 0x19, + 0xe0, 0x6d, 0x49, 0x45, 0x9d, 0x87, 0xd2, 0x50, 0x5c, 0x7c, 0x09, 0x0a, 0x1d, 0xba, 0x23, 0x92, + 0x6a, 0xc7, 0x4b, 0x7c, 0x19, 0x26, 0xfa, 0xc4, 0xeb, 0x51, 0x59, 0x24, 0xf1, 0xb1, 0x30, 0xf6, + 0x11, 0xd2, 0x6c, 0xb8, 0x32, 0x4a, 0xf4, 0x3e, 0x8d, 0x88, 0x43, 0x22, 0x82, 0xdf, 0x4e, 0xb9, + 0x27, 0xc3, 0x37, 0x03, 0x4f, 0x9d, 0x28, 0x4b, 0x52, 0x08, 0xed, 0x79, 0x01, 0xca, 0x9f, 0x87, + 0x4e, 0x4a, 0xd1, 0xcf, 0x77, 0xf7, 0xf0, 0x4d, 0x28, 0xf5, 0x78, 0x1c, 0x2e, 0x30, 0x7c, 0x97, + 0x52, 0x43, 0x35, 0x84, 0x06, 0x19, 0x89, 0x06, 0x19, 0x2b, 0xb1, 0x06, 0xdd, 0x27, 0xac, 0x63, + 0x83, 0x80, 0xc7, 0xeb, 0xff, 0xba, 0x1d, 0x26, 0xce, 0x68, 0x87, 0xd4, 0x84, 0xfc, 0xef, 0xda, + 0x61, 0x1e, 0xae, 0x8c, 0x12, 0xcd, 0xdd, 0x0e, 0xda, 0x0a, 0x94, 0x3f, 0xa1, 0x1e, 0xfd, 0xa7, + 0x45, 0x8f, 0x29, 0x8c, 0xc6, 0xc9, 0x4f, 0xe1, 0x07, 0x04, 0xd3, 0xc3, 0x82, 0xf7, 0x69, 0x32, + 0x2f, 0xd8, 0xdf, 0x6c, 0xc0, 0x7f, 0x5f, 0xb8, 0xb5, 0xef, 0x10, 0xbc, 0x95, 0xc5, 0x51, 0xaa, + 0xf3, 0xc7, 0x00, 0x83, 0x49, 0x97, 0x21, 0xcf, 0xc7, 0x93, 0x70, 0xe0, 0x6f, 0x0f, 0x39, 0xe5, + 0xd5, 0xe7, 0xc6, 0x1f, 0x17, 0xe0, 0x62, 0xc2, 0x64, 0x4d, 0x3c, 0x10, 0xf0, 0x1e, 0x82, 0xc2, + 0x5d, 0x1a, 0xe1, 0xd9, 0xcc, 0x5e, 0x3e, 0x3d, 0x55, 0xd5, 0xb3, 0xc7, 0x87, 0x36, 0xbb, 0xf7, + 0xeb, 0x6f, 0xcf, 0xc6, 0xae, 0xe1, 0x77, 0xd2, 0x26, 0x3f, 0x33, 0x9f, 0x0c, 0x15, 0xe6, 0x29, + 0xfe, 0x16, 0xc1, 0x78, 0x9c, 0x26, 0xac, 0x67, 0x06, 0x4e, 0x9b, 0xca, 0xaa, 0x91, 0x17, 0x2e, + 0x72, 0xad, 0x4d, 0x73, 0x52, 0x53, 0xb8, 0x9c, 0x4a, 0x0a, 0xff, 0x88, 0x40, 0x11, 0xfa, 0x88, + 0x8d, 0xf3, 0x29, 0xbd, 0x7a, 0x76, 0xc5, 0xb4, 0x95, 0x83, 0x17, 0x35, 0x2d, 0x53, 0x80, 0x27, + 0x93, 0x3f, 0x9c, 0xa2, 0xaa, 0xa5, 0x53, 0x5c, 0x40, 0x35, 0xfc, 0x13, 0x02, 0x45, 0x5c, 0xdb, + 0x57, 0xb0, 0x4c, 0x15, 0xa0, 0x3c, 0x2c, 0x1f, 0x08, 0x96, 0x19, 0xba, 0x30, 0xca, 0xb2, 0xda, + 0xc8, 0x53, 0xdd, 0x98, 0xf3, 0x2f, 0x08, 0x14, 0x71, 0xcf, 0x5f, 0xc1, 0x39, 0x55, 0x50, 0xf2, + 0x70, 0xfe, 0xf2, 0xe0, 0x45, 0xcd, 0xcc, 0x14, 0x92, 0xf2, 0xc9, 0x09, 0xb2, 0xbc, 0x1d, 0x46, + 0x3b, 0xa2, 0x3d, 0x6b, 0xb9, 0xda, 0xf3, 0x67, 0x04, 0x6f, 0xc4, 0x0d, 0x75, 0x7c, 0x7b, 0x71, + 0x33, 0x57, 0xe7, 0x9d, 0x92, 0x24, 0xf5, 0xc3, 0x73, 0xfb, 0xc9, 0xd6, 0x6d, 0x72, 0xc2, 0x75, + 0x6c, 0xe4, 0x20, 0x7c, 0xfc, 0x74, 0x66, 0x8b, 0xcb, 0x5f, 0x2c, 0xb5, 0xdd, 0xe8, 0x51, 0x6f, + 0x23, 0xde, 0xcb, 0x14, 0x9b, 0xeb, 0xe2, 0x09, 0xdd, 0x0e, 0xf4, 0x36, 0xf5, 0x79, 0x5a, 0xcc, + 0x8c, 0xb7, 0xfa, 0x4d, 0xb9, 0xdc, 0x50, 0x38, 0xec, 0xc6, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0xfd, 0xcf, 0xec, 0x9e, 0x7a, 0x0c, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/zone.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/zone.pb.go new file mode 100644 index 000000000..e35f70673 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/zone.pb.go @@ -0,0 +1,133 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/compute/v1/zone.proto + +package compute // import "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Zone_Status int32 + +const ( + Zone_STATUS_UNSPECIFIED Zone_Status = 0 + // Zone is available. You can access the resources allocated in this zone. + Zone_UP Zone_Status = 1 + // Zone is not available. + Zone_DOWN Zone_Status = 2 +) + +var Zone_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "UP", + 2: "DOWN", +} +var Zone_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "UP": 1, + "DOWN": 2, +} + +func (x Zone_Status) String() string { + return proto.EnumName(Zone_Status_name, int32(x)) +} +func (Zone_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_zone_f47efd8ae474576f, []int{0, 0} +} + +// Availability zone. For more information, see [Availability zones](/docs/overview/concepts/geo-scope). +type Zone struct { + // ID of the zone. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the region. + RegionId string `protobuf:"bytes,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` + // Status of the zone. + Status Zone_Status `protobuf:"varint,3,opt,name=status,proto3,enum=yandex.cloud.compute.v1.Zone_Status" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Zone) Reset() { *m = Zone{} } +func (m *Zone) String() string { return proto.CompactTextString(m) } +func (*Zone) ProtoMessage() {} +func (*Zone) Descriptor() ([]byte, []int) { + return fileDescriptor_zone_f47efd8ae474576f, []int{0} +} +func (m *Zone) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Zone.Unmarshal(m, b) +} +func (m *Zone) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Zone.Marshal(b, m, deterministic) +} +func (dst *Zone) XXX_Merge(src proto.Message) { + xxx_messageInfo_Zone.Merge(dst, src) +} +func (m *Zone) XXX_Size() int { + return xxx_messageInfo_Zone.Size(m) +} +func (m *Zone) XXX_DiscardUnknown() { + xxx_messageInfo_Zone.DiscardUnknown(m) +} + +var xxx_messageInfo_Zone proto.InternalMessageInfo + +func (m *Zone) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Zone) GetRegionId() string { + if m != nil { + return m.RegionId + } + return "" +} + +func (m *Zone) GetStatus() Zone_Status { + if m != nil { + return m.Status + } + return Zone_STATUS_UNSPECIFIED +} + +func init() { + proto.RegisterType((*Zone)(nil), "yandex.cloud.compute.v1.Zone") + proto.RegisterEnum("yandex.cloud.compute.v1.Zone_Status", Zone_Status_name, Zone_Status_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/compute/v1/zone.proto", fileDescriptor_zone_f47efd8ae474576f) +} + +var fileDescriptor_zone_f47efd8ae474576f = []byte{ + // 237 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xaa, 0x4c, 0xcc, 0x4b, + 0x49, 0xad, 0xd0, 0x4f, 0xce, 0xc9, 0x2f, 0x4d, 0xd1, 0x4f, 0xce, 0xcf, 0x2d, 0x28, 0x2d, 0x49, + 0xd5, 0x2f, 0x33, 0xd4, 0xaf, 0xca, 0xcf, 0x4b, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, + 0x87, 0xa8, 0xd1, 0x03, 0xab, 0xd1, 0x83, 0xaa, 0xd1, 0x2b, 0x33, 0x54, 0x5a, 0xca, 0xc8, 0xc5, + 0x12, 0x95, 0x9f, 0x97, 0x2a, 0xc4, 0xc7, 0xc5, 0x94, 0x99, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, + 0x19, 0xc4, 0x94, 0x99, 0x22, 0x24, 0xcd, 0xc5, 0x59, 0x94, 0x9a, 0x9e, 0x99, 0x9f, 0x17, 0x9f, + 0x99, 0x22, 0xc1, 0x04, 0x16, 0xe6, 0x80, 0x08, 0x78, 0xa6, 0x08, 0xd9, 0x70, 0xb1, 0x15, 0x97, + 0x24, 0x96, 0x94, 0x16, 0x4b, 0x30, 0x2b, 0x30, 0x6a, 0xf0, 0x19, 0xa9, 0xe8, 0xe1, 0x30, 0x5f, + 0x0f, 0x64, 0xb6, 0x5e, 0x30, 0x58, 0x6d, 0x10, 0x54, 0x8f, 0x92, 0x11, 0x17, 0x1b, 0x44, 0x44, + 0x48, 0x8c, 0x4b, 0x28, 0x38, 0xc4, 0x31, 0x24, 0x34, 0x38, 0x3e, 0xd4, 0x2f, 0x38, 0xc0, 0xd5, + 0xd9, 0xd3, 0xcd, 0xd3, 0xd5, 0x45, 0x80, 0x41, 0x88, 0x8d, 0x8b, 0x29, 0x34, 0x40, 0x80, 0x51, + 0x88, 0x83, 0x8b, 0xc5, 0xc5, 0x3f, 0xdc, 0x4f, 0x80, 0xc9, 0xc9, 0x35, 0xca, 0x39, 0x3d, 0xb3, + 0x24, 0xa3, 0x34, 0x09, 0x64, 0xb8, 0x3e, 0xc4, 0x36, 0x5d, 0x88, 0x8f, 0xd3, 0xf3, 0x75, 0xd3, + 0x53, 0xf3, 0xc0, 0xfe, 0xd4, 0xc7, 0x11, 0x14, 0xd6, 0x50, 0x66, 0x12, 0x1b, 0x58, 0x99, 0x31, + 0x20, 0x00, 0x00, 0xff, 0xff, 0xa6, 0xd7, 0x67, 0x16, 0x34, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/zone_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/zone_service.pb.go new file mode 100644 index 000000000..653c2da44 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1/zone_service.pb.go @@ -0,0 +1,324 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/compute/v1/zone_service.proto + +package compute // import "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ListZonesRequest struct { + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListZonesResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListZonesResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListZonesRequest) Reset() { *m = ListZonesRequest{} } +func (m *ListZonesRequest) String() string { return proto.CompactTextString(m) } +func (*ListZonesRequest) ProtoMessage() {} +func (*ListZonesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_zone_service_e9c86d533e92d608, []int{0} +} +func (m *ListZonesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListZonesRequest.Unmarshal(m, b) +} +func (m *ListZonesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListZonesRequest.Marshal(b, m, deterministic) +} +func (dst *ListZonesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListZonesRequest.Merge(dst, src) +} +func (m *ListZonesRequest) XXX_Size() int { + return xxx_messageInfo_ListZonesRequest.Size(m) +} +func (m *ListZonesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListZonesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListZonesRequest proto.InternalMessageInfo + +func (m *ListZonesRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListZonesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListZonesResponse struct { + // List of availability zones. + Zones []*Zone `protobuf:"bytes,1,rep,name=zones,proto3" json:"zones,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListZonesRequest.page_size], use + // the [ListZonesRequest.page_token] as the value + // for the [ListZonesRequest.page_token] query parameter + // in the next list request. Subsequent list requests will have their own + // [ListZonesRequest.page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListZonesResponse) Reset() { *m = ListZonesResponse{} } +func (m *ListZonesResponse) String() string { return proto.CompactTextString(m) } +func (*ListZonesResponse) ProtoMessage() {} +func (*ListZonesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_zone_service_e9c86d533e92d608, []int{1} +} +func (m *ListZonesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListZonesResponse.Unmarshal(m, b) +} +func (m *ListZonesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListZonesResponse.Marshal(b, m, deterministic) +} +func (dst *ListZonesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListZonesResponse.Merge(dst, src) +} +func (m *ListZonesResponse) XXX_Size() int { + return xxx_messageInfo_ListZonesResponse.Size(m) +} +func (m *ListZonesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListZonesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListZonesResponse proto.InternalMessageInfo + +func (m *ListZonesResponse) GetZones() []*Zone { + if m != nil { + return m.Zones + } + return nil +} + +func (m *ListZonesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type GetZoneRequest struct { + // ID of the availability zone to return information about. + ZoneId string `protobuf:"bytes,1,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetZoneRequest) Reset() { *m = GetZoneRequest{} } +func (m *GetZoneRequest) String() string { return proto.CompactTextString(m) } +func (*GetZoneRequest) ProtoMessage() {} +func (*GetZoneRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_zone_service_e9c86d533e92d608, []int{2} +} +func (m *GetZoneRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetZoneRequest.Unmarshal(m, b) +} +func (m *GetZoneRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetZoneRequest.Marshal(b, m, deterministic) +} +func (dst *GetZoneRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetZoneRequest.Merge(dst, src) +} +func (m *GetZoneRequest) XXX_Size() int { + return xxx_messageInfo_GetZoneRequest.Size(m) +} +func (m *GetZoneRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetZoneRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetZoneRequest proto.InternalMessageInfo + +func (m *GetZoneRequest) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func init() { + proto.RegisterType((*ListZonesRequest)(nil), "yandex.cloud.compute.v1.ListZonesRequest") + proto.RegisterType((*ListZonesResponse)(nil), "yandex.cloud.compute.v1.ListZonesResponse") + proto.RegisterType((*GetZoneRequest)(nil), "yandex.cloud.compute.v1.GetZoneRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ZoneServiceClient is the client API for ZoneService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ZoneServiceClient interface { + // Returns the information about the specified availability zone. + // + // To get the list of availability zones, make a [List] request. + Get(ctx context.Context, in *GetZoneRequest, opts ...grpc.CallOption) (*Zone, error) + // Retrieves the list of availability zones. + List(ctx context.Context, in *ListZonesRequest, opts ...grpc.CallOption) (*ListZonesResponse, error) +} + +type zoneServiceClient struct { + cc *grpc.ClientConn +} + +func NewZoneServiceClient(cc *grpc.ClientConn) ZoneServiceClient { + return &zoneServiceClient{cc} +} + +func (c *zoneServiceClient) Get(ctx context.Context, in *GetZoneRequest, opts ...grpc.CallOption) (*Zone, error) { + out := new(Zone) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.ZoneService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *zoneServiceClient) List(ctx context.Context, in *ListZonesRequest, opts ...grpc.CallOption) (*ListZonesResponse, error) { + out := new(ListZonesResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.compute.v1.ZoneService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ZoneServiceServer is the server API for ZoneService service. +type ZoneServiceServer interface { + // Returns the information about the specified availability zone. + // + // To get the list of availability zones, make a [List] request. + Get(context.Context, *GetZoneRequest) (*Zone, error) + // Retrieves the list of availability zones. + List(context.Context, *ListZonesRequest) (*ListZonesResponse, error) +} + +func RegisterZoneServiceServer(s *grpc.Server, srv ZoneServiceServer) { + s.RegisterService(&_ZoneService_serviceDesc, srv) +} + +func _ZoneService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetZoneRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ZoneServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.ZoneService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ZoneServiceServer).Get(ctx, req.(*GetZoneRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ZoneService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListZonesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ZoneServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.compute.v1.ZoneService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ZoneServiceServer).List(ctx, req.(*ListZonesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ZoneService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.compute.v1.ZoneService", + HandlerType: (*ZoneServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _ZoneService_Get_Handler, + }, + { + MethodName: "List", + Handler: _ZoneService_List_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/compute/v1/zone_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/compute/v1/zone_service.proto", fileDescriptor_zone_service_e9c86d533e92d608) +} + +var fileDescriptor_zone_service_e9c86d533e92d608 = []byte{ + // 421 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x8b, 0xd3, 0x40, + 0x18, 0xc6, 0xc9, 0x76, 0xb7, 0x9a, 0x59, 0xff, 0xed, 0x78, 0xb0, 0x46, 0x0b, 0x25, 0xa2, 0x1b, + 0x17, 0x36, 0x93, 0x6c, 0x11, 0x0f, 0xb6, 0x97, 0x88, 0x14, 0xc1, 0x83, 0xa4, 0x9e, 0x7a, 0x29, + 0x69, 0xf3, 0x12, 0x07, 0xeb, 0x4c, 0xec, 0x4c, 0x42, 0xad, 0x78, 0xf1, 0xd8, 0xab, 0x1f, 0xaa, + 0xbd, 0xfb, 0x15, 0x3c, 0xf8, 0x19, 0xf4, 0x22, 0x33, 0x13, 0xc5, 0x56, 0x52, 0xf6, 0x16, 0xf2, + 0xfe, 0xe6, 0x79, 0x9e, 0xf7, 0x0f, 0x3a, 0xfb, 0x98, 0xb0, 0x14, 0x16, 0x64, 0x3a, 0xe3, 0x45, + 0x4a, 0xa6, 0xfc, 0x7d, 0x5e, 0x48, 0x20, 0x65, 0x48, 0x96, 0x9c, 0xc1, 0x58, 0xc0, 0xbc, 0xa4, + 0x53, 0xf0, 0xf3, 0x39, 0x97, 0x1c, 0xdf, 0x31, 0xac, 0xaf, 0x59, 0xbf, 0x62, 0xfd, 0x32, 0x74, + 0xee, 0x67, 0x9c, 0x67, 0x33, 0x20, 0x49, 0x4e, 0x49, 0xc2, 0x18, 0x97, 0x89, 0xa4, 0x9c, 0x09, + 0xf3, 0xcc, 0x71, 0xf7, 0x59, 0x54, 0x4c, 0x7b, 0x8b, 0x29, 0x93, 0x19, 0x4d, 0xb5, 0x86, 0x29, + 0xbb, 0x80, 0x6e, 0xbd, 0xa2, 0x42, 0x8e, 0x38, 0x03, 0x11, 0xc3, 0x87, 0x02, 0x84, 0xc4, 0xa7, + 0xc8, 0xce, 0x93, 0x0c, 0xc6, 0x82, 0x2e, 0xa1, 0x65, 0x75, 0x2c, 0xaf, 0x11, 0xa1, 0x9f, 0xeb, + 0xb0, 0xd9, 0xeb, 0x87, 0x41, 0x10, 0xc4, 0x57, 0x55, 0x71, 0x48, 0x97, 0x80, 0x3d, 0x84, 0x34, + 0x28, 0xf9, 0x3b, 0x60, 0xad, 0x83, 0x8e, 0xe5, 0xd9, 0x91, 0xbd, 0xda, 0x84, 0x47, 0x9a, 0x8c, + 0xb5, 0xca, 0x1b, 0x55, 0x73, 0x73, 0x74, 0xf2, 0x8f, 0x8d, 0xc8, 0x39, 0x13, 0x80, 0xbb, 0xe8, + 0x48, 0x05, 0x15, 0x2d, 0xab, 0xd3, 0xf0, 0x8e, 0x2f, 0xda, 0x7e, 0xcd, 0x14, 0x7c, 0xf5, 0x2c, + 0x36, 0x2c, 0x7e, 0x84, 0x6e, 0x32, 0x58, 0xc8, 0xf1, 0xae, 0x71, 0x7c, 0x5d, 0xfd, 0x7e, 0xfd, + 0xd7, 0xf1, 0x29, 0xba, 0x31, 0x00, 0x6d, 0xf8, 0xa7, 0xad, 0x87, 0xe8, 0x8a, 0x1e, 0x3d, 0x4d, + 0x75, 0x53, 0x76, 0x74, 0xed, 0xc7, 0x3a, 0xb4, 0x56, 0x9b, 0xf0, 0xb0, 0xd7, 0x7f, 0x12, 0xc4, + 0x4d, 0x55, 0x7c, 0x99, 0x5e, 0xfc, 0xb2, 0xd0, 0xb1, 0x7a, 0x36, 0x34, 0x1b, 0xc2, 0x73, 0xd4, + 0x18, 0x80, 0xc4, 0xa7, 0xb5, 0xe9, 0xb6, 0x6d, 0x9c, 0xfd, 0x6d, 0xb8, 0x0f, 0xbe, 0x7c, 0xfb, + 0xfe, 0xf5, 0xa0, 0x8d, 0xef, 0xed, 0xee, 0x4b, 0x90, 0x4f, 0x55, 0xbc, 0xcf, 0x78, 0x81, 0x0e, + 0xd5, 0xb8, 0xf0, 0xe3, 0x5a, 0xad, 0xdd, 0xa5, 0x39, 0x67, 0x97, 0x41, 0xcd, 0xe0, 0xdd, 0xbb, + 0x3a, 0xc3, 0x6d, 0x7c, 0xf2, 0x5f, 0x86, 0xe8, 0xc5, 0xe8, 0x79, 0x46, 0xe5, 0xdb, 0x62, 0xa2, + 0x14, 0x88, 0x91, 0x3c, 0x37, 0xb7, 0x93, 0xf1, 0xf3, 0x0c, 0x98, 0x3e, 0x1b, 0x52, 0x73, 0x78, + 0xcf, 0xaa, 0xcf, 0x49, 0x53, 0x63, 0xdd, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x18, 0x19, 0x69, + 0xb4, 0x05, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/endpoint/api_endpoint.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/endpoint/api_endpoint.pb.go new file mode 100644 index 000000000..9be8fe363 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/endpoint/api_endpoint.pb.go @@ -0,0 +1,87 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/endpoint/api_endpoint.proto + +package endpoint // import "github.com/yandex-cloud/go-genproto/yandex/cloud/endpoint" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ApiEndpoint struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApiEndpoint) Reset() { *m = ApiEndpoint{} } +func (m *ApiEndpoint) String() string { return proto.CompactTextString(m) } +func (*ApiEndpoint) ProtoMessage() {} +func (*ApiEndpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_api_endpoint_f9ce6e4a311495de, []int{0} +} +func (m *ApiEndpoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApiEndpoint.Unmarshal(m, b) +} +func (m *ApiEndpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApiEndpoint.Marshal(b, m, deterministic) +} +func (dst *ApiEndpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApiEndpoint.Merge(dst, src) +} +func (m *ApiEndpoint) XXX_Size() int { + return xxx_messageInfo_ApiEndpoint.Size(m) +} +func (m *ApiEndpoint) XXX_DiscardUnknown() { + xxx_messageInfo_ApiEndpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_ApiEndpoint proto.InternalMessageInfo + +func (m *ApiEndpoint) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ApiEndpoint) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func init() { + proto.RegisterType((*ApiEndpoint)(nil), "yandex.cloud.endpoint.ApiEndpoint") +} + +func init() { + proto.RegisterFile("yandex/cloud/endpoint/api_endpoint.proto", fileDescriptor_api_endpoint_f9ce6e4a311495de) +} + +var fileDescriptor_api_endpoint_f9ce6e4a311495de = []byte{ + // 152 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xa8, 0x4c, 0xcc, 0x4b, + 0x49, 0xad, 0xd0, 0x4f, 0xce, 0xc9, 0x2f, 0x4d, 0xd1, 0x4f, 0xcd, 0x4b, 0x29, 0xc8, 0xcf, 0xcc, + 0x2b, 0xd1, 0x4f, 0x2c, 0xc8, 0x8c, 0x87, 0x71, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x44, + 0x21, 0x2a, 0xf5, 0xc0, 0x2a, 0xf5, 0x60, 0x92, 0x4a, 0xe6, 0x5c, 0xdc, 0x8e, 0x05, 0x99, 0xae, + 0x50, 0xae, 0x10, 0x1f, 0x17, 0x53, 0x66, 0x8a, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x53, + 0x66, 0x8a, 0x90, 0x04, 0x17, 0x7b, 0x62, 0x4a, 0x4a, 0x51, 0x6a, 0x71, 0xb1, 0x04, 0x13, 0x58, + 0x10, 0xc6, 0x75, 0x72, 0x89, 0x72, 0x4a, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, + 0xd5, 0x87, 0x18, 0xae, 0x0b, 0x71, 0x46, 0x7a, 0xbe, 0x6e, 0x7a, 0x6a, 0x1e, 0xd8, 0x5a, 0x7d, + 0xac, 0xee, 0xb3, 0x86, 0x31, 0x92, 0xd8, 0xc0, 0xaa, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x42, 0x6c, 0x3b, 0xb8, 0xc8, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/endpoint/api_endpoint_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/endpoint/api_endpoint_service.pb.go new file mode 100644 index 000000000..28783722e --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/endpoint/api_endpoint_service.pb.go @@ -0,0 +1,298 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/endpoint/api_endpoint_service.proto + +package endpoint // import "github.com/yandex-cloud/go-genproto/yandex/cloud/endpoint" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetApiEndpointRequest struct { + ApiEndpointId string `protobuf:"bytes,1,opt,name=api_endpoint_id,json=apiEndpointId,proto3" json:"api_endpoint_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetApiEndpointRequest) Reset() { *m = GetApiEndpointRequest{} } +func (m *GetApiEndpointRequest) String() string { return proto.CompactTextString(m) } +func (*GetApiEndpointRequest) ProtoMessage() {} +func (*GetApiEndpointRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_api_endpoint_service_3b852f01606ed782, []int{0} +} +func (m *GetApiEndpointRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetApiEndpointRequest.Unmarshal(m, b) +} +func (m *GetApiEndpointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetApiEndpointRequest.Marshal(b, m, deterministic) +} +func (dst *GetApiEndpointRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetApiEndpointRequest.Merge(dst, src) +} +func (m *GetApiEndpointRequest) XXX_Size() int { + return xxx_messageInfo_GetApiEndpointRequest.Size(m) +} +func (m *GetApiEndpointRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetApiEndpointRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetApiEndpointRequest proto.InternalMessageInfo + +func (m *GetApiEndpointRequest) GetApiEndpointId() string { + if m != nil { + return m.ApiEndpointId + } + return "" +} + +type ListApiEndpointsRequest struct { + PageSize int64 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListApiEndpointsRequest) Reset() { *m = ListApiEndpointsRequest{} } +func (m *ListApiEndpointsRequest) String() string { return proto.CompactTextString(m) } +func (*ListApiEndpointsRequest) ProtoMessage() {} +func (*ListApiEndpointsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_api_endpoint_service_3b852f01606ed782, []int{1} +} +func (m *ListApiEndpointsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListApiEndpointsRequest.Unmarshal(m, b) +} +func (m *ListApiEndpointsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListApiEndpointsRequest.Marshal(b, m, deterministic) +} +func (dst *ListApiEndpointsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListApiEndpointsRequest.Merge(dst, src) +} +func (m *ListApiEndpointsRequest) XXX_Size() int { + return xxx_messageInfo_ListApiEndpointsRequest.Size(m) +} +func (m *ListApiEndpointsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListApiEndpointsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListApiEndpointsRequest proto.InternalMessageInfo + +func (m *ListApiEndpointsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListApiEndpointsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListApiEndpointsResponse struct { + Endpoints []*ApiEndpoint `protobuf:"bytes,1,rep,name=endpoints,proto3" json:"endpoints,omitempty"` + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListApiEndpointsResponse) Reset() { *m = ListApiEndpointsResponse{} } +func (m *ListApiEndpointsResponse) String() string { return proto.CompactTextString(m) } +func (*ListApiEndpointsResponse) ProtoMessage() {} +func (*ListApiEndpointsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_api_endpoint_service_3b852f01606ed782, []int{2} +} +func (m *ListApiEndpointsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListApiEndpointsResponse.Unmarshal(m, b) +} +func (m *ListApiEndpointsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListApiEndpointsResponse.Marshal(b, m, deterministic) +} +func (dst *ListApiEndpointsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListApiEndpointsResponse.Merge(dst, src) +} +func (m *ListApiEndpointsResponse) XXX_Size() int { + return xxx_messageInfo_ListApiEndpointsResponse.Size(m) +} +func (m *ListApiEndpointsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListApiEndpointsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListApiEndpointsResponse proto.InternalMessageInfo + +func (m *ListApiEndpointsResponse) GetEndpoints() []*ApiEndpoint { + if m != nil { + return m.Endpoints + } + return nil +} + +func (m *ListApiEndpointsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetApiEndpointRequest)(nil), "yandex.cloud.endpoint.GetApiEndpointRequest") + proto.RegisterType((*ListApiEndpointsRequest)(nil), "yandex.cloud.endpoint.ListApiEndpointsRequest") + proto.RegisterType((*ListApiEndpointsResponse)(nil), "yandex.cloud.endpoint.ListApiEndpointsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ApiEndpointServiceClient is the client API for ApiEndpointService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ApiEndpointServiceClient interface { + Get(ctx context.Context, in *GetApiEndpointRequest, opts ...grpc.CallOption) (*ApiEndpoint, error) + List(ctx context.Context, in *ListApiEndpointsRequest, opts ...grpc.CallOption) (*ListApiEndpointsResponse, error) +} + +type apiEndpointServiceClient struct { + cc *grpc.ClientConn +} + +func NewApiEndpointServiceClient(cc *grpc.ClientConn) ApiEndpointServiceClient { + return &apiEndpointServiceClient{cc} +} + +func (c *apiEndpointServiceClient) Get(ctx context.Context, in *GetApiEndpointRequest, opts ...grpc.CallOption) (*ApiEndpoint, error) { + out := new(ApiEndpoint) + err := c.cc.Invoke(ctx, "/yandex.cloud.endpoint.ApiEndpointService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *apiEndpointServiceClient) List(ctx context.Context, in *ListApiEndpointsRequest, opts ...grpc.CallOption) (*ListApiEndpointsResponse, error) { + out := new(ListApiEndpointsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.endpoint.ApiEndpointService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ApiEndpointServiceServer is the server API for ApiEndpointService service. +type ApiEndpointServiceServer interface { + Get(context.Context, *GetApiEndpointRequest) (*ApiEndpoint, error) + List(context.Context, *ListApiEndpointsRequest) (*ListApiEndpointsResponse, error) +} + +func RegisterApiEndpointServiceServer(s *grpc.Server, srv ApiEndpointServiceServer) { + s.RegisterService(&_ApiEndpointService_serviceDesc, srv) +} + +func _ApiEndpointService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetApiEndpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ApiEndpointServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.endpoint.ApiEndpointService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ApiEndpointServiceServer).Get(ctx, req.(*GetApiEndpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ApiEndpointService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListApiEndpointsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ApiEndpointServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.endpoint.ApiEndpointService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ApiEndpointServiceServer).List(ctx, req.(*ListApiEndpointsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ApiEndpointService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.endpoint.ApiEndpointService", + HandlerType: (*ApiEndpointServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _ApiEndpointService_Get_Handler, + }, + { + MethodName: "List", + Handler: _ApiEndpointService_List_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/endpoint/api_endpoint_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/endpoint/api_endpoint_service.proto", fileDescriptor_api_endpoint_service_3b852f01606ed782) +} + +var fileDescriptor_api_endpoint_service_3b852f01606ed782 = []byte{ + // 370 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x51, 0x4b, 0x2a, 0x41, + 0x18, 0x65, 0xf5, 0x72, 0xb9, 0x7e, 0xf7, 0xca, 0x85, 0x01, 0x69, 0xd9, 0x2c, 0x64, 0x89, 0xf0, + 0x21, 0x67, 0xc2, 0x1e, 0x7b, 0xa8, 0xa4, 0x90, 0xa0, 0x87, 0xd0, 0x7a, 0xe9, 0x65, 0x59, 0xdd, + 0x8f, 0x6d, 0xc8, 0x66, 0x36, 0x67, 0x0c, 0x53, 0x7c, 0x89, 0x7e, 0x40, 0xd0, 0x4f, 0xeb, 0x2f, + 0xf4, 0x43, 0x62, 0x67, 0xda, 0xb4, 0x54, 0xf2, 0x6d, 0xf7, 0x9b, 0x73, 0xce, 0x9c, 0x39, 0xe7, + 0x83, 0xdd, 0x87, 0x50, 0x44, 0x38, 0x64, 0xdd, 0x9e, 0x1c, 0x44, 0x0c, 0x45, 0x94, 0x48, 0x2e, + 0x34, 0x0b, 0x13, 0x1e, 0x64, 0x3f, 0x81, 0xc2, 0xfe, 0x3d, 0xef, 0x22, 0x4d, 0xfa, 0x52, 0x4b, + 0x52, 0xb2, 0x0c, 0x6a, 0x18, 0x34, 0x03, 0x79, 0xe5, 0x58, 0xca, 0xb8, 0x87, 0x29, 0x93, 0x85, + 0x42, 0x48, 0x1d, 0x6a, 0x2e, 0x85, 0xb2, 0x24, 0xaf, 0xfa, 0xf3, 0x35, 0x16, 0xe9, 0x1f, 0x40, + 0xa9, 0x89, 0xfa, 0x28, 0xe1, 0x27, 0x1f, 0xf3, 0x16, 0xde, 0x0d, 0x50, 0x69, 0xb2, 0x0d, 0xff, + 0xbf, 0xb8, 0xe2, 0x91, 0xeb, 0x54, 0x9c, 0x6a, 0xa1, 0x55, 0x0c, 0xa7, 0xe0, 0xd3, 0xc8, 0xbf, + 0x84, 0xb5, 0x33, 0xae, 0x66, 0x15, 0x54, 0x26, 0xb1, 0x0e, 0x85, 0x24, 0x8c, 0x31, 0x50, 0x7c, + 0x84, 0x86, 0x9c, 0x6f, 0xfd, 0x49, 0x07, 0x6d, 0x3e, 0x42, 0xb2, 0x01, 0x60, 0x0e, 0xb5, 0xbc, + 0x41, 0xe1, 0xe6, 0x8c, 0xb4, 0x81, 0x5f, 0xa4, 0x03, 0xff, 0xc9, 0x01, 0x77, 0x5e, 0x57, 0x25, + 0x52, 0x28, 0x24, 0x87, 0x50, 0xc8, 0x7c, 0x29, 0xd7, 0xa9, 0xe4, 0xab, 0x7f, 0xeb, 0x3e, 0x5d, + 0x98, 0x13, 0x9d, 0x7d, 0xd9, 0x94, 0x94, 0xbe, 0x4e, 0xe0, 0x50, 0x07, 0x73, 0x16, 0x8a, 0xe9, + 0xf8, 0x3c, 0xb3, 0x51, 0x7f, 0xce, 0x01, 0x99, 0x91, 0x68, 0xdb, 0x6a, 0xc8, 0x04, 0xf2, 0x4d, + 0xd4, 0x64, 0x67, 0xc9, 0xa5, 0x0b, 0x13, 0xf5, 0x56, 0xb0, 0xe8, 0x6f, 0x3d, 0xbe, 0xbe, 0xbd, + 0xe4, 0x36, 0x49, 0xf9, 0xb3, 0x34, 0xc5, 0xc6, 0xdf, 0x7a, 0x98, 0x90, 0x31, 0xfc, 0x4a, 0xb3, + 0x21, 0x74, 0x89, 0xe2, 0x92, 0x42, 0x3c, 0xb6, 0x32, 0xde, 0x06, 0xed, 0x13, 0x63, 0xe7, 0x1f, + 0x81, 0xa9, 0x9d, 0xc6, 0xf1, 0x55, 0x23, 0xe6, 0xfa, 0x7a, 0xd0, 0xa1, 0x5d, 0x79, 0xcb, 0xac, + 0x60, 0xcd, 0x2e, 0x5a, 0x2c, 0x6b, 0x31, 0x0a, 0xb3, 0x58, 0x6c, 0xe1, 0x06, 0xee, 0x67, 0x1f, + 0x9d, 0xdf, 0x06, 0xb5, 0xf7, 0x1e, 0x00, 0x00, 0xff, 0xff, 0x83, 0x68, 0x07, 0x93, 0x11, 0x03, + 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/awscompatibility/access_key.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/awscompatibility/access_key.pb.go new file mode 100644 index 000000000..3d1988911 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/awscompatibility/access_key.pb.go @@ -0,0 +1,129 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/iam/v1/awscompatibility/access_key.proto + +package awscompatibility // import "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/awscompatibility" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// An AccessKey resource. +// For more information, see [AWS-compatible access keys](/docs/iam/concepts/authorization/access-key). +type AccessKey struct { + // ID of the AccessKey resource. + // It is used to manage secret credentials: an access key ID and a secret access key. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the service account that the access key belongs to. + ServiceAccountId string `protobuf:"bytes,2,opt,name=service_account_id,json=serviceAccountId,proto3" json:"service_account_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Description of the access key. 0-256 characters long. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // ID of the access key. + // The key is AWS compatible. + KeyId string `protobuf:"bytes,5,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AccessKey) Reset() { *m = AccessKey{} } +func (m *AccessKey) String() string { return proto.CompactTextString(m) } +func (*AccessKey) ProtoMessage() {} +func (*AccessKey) Descriptor() ([]byte, []int) { + return fileDescriptor_access_key_dd425dc954899590, []int{0} +} +func (m *AccessKey) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AccessKey.Unmarshal(m, b) +} +func (m *AccessKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AccessKey.Marshal(b, m, deterministic) +} +func (dst *AccessKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_AccessKey.Merge(dst, src) +} +func (m *AccessKey) XXX_Size() int { + return xxx_messageInfo_AccessKey.Size(m) +} +func (m *AccessKey) XXX_DiscardUnknown() { + xxx_messageInfo_AccessKey.DiscardUnknown(m) +} + +var xxx_messageInfo_AccessKey proto.InternalMessageInfo + +func (m *AccessKey) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *AccessKey) GetServiceAccountId() string { + if m != nil { + return m.ServiceAccountId + } + return "" +} + +func (m *AccessKey) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *AccessKey) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *AccessKey) GetKeyId() string { + if m != nil { + return m.KeyId + } + return "" +} + +func init() { + proto.RegisterType((*AccessKey)(nil), "yandex.cloud.iam.v1.awscompatibility.AccessKey") +} + +func init() { + proto.RegisterFile("yandex/cloud/iam/v1/awscompatibility/access_key.proto", fileDescriptor_access_key_dd425dc954899590) +} + +var fileDescriptor_access_key_dd425dc954899590 = []byte{ + // 285 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0x41, 0x4b, 0xfb, 0x30, + 0x18, 0xc6, 0xe9, 0xfe, 0xff, 0x0d, 0x96, 0x81, 0x48, 0x40, 0x28, 0xbb, 0x58, 0xc4, 0xc3, 0x0e, + 0x2e, 0x61, 0x8a, 0x07, 0xf1, 0x54, 0x6f, 0xc3, 0xdb, 0xf0, 0xa2, 0x1e, 0x4a, 0x9a, 0xbc, 0xd6, + 0x97, 0x36, 0x4d, 0x69, 0xd2, 0x6a, 0x3e, 0x9c, 0xdf, 0x4d, 0x4c, 0x36, 0x90, 0x9d, 0xbc, 0xbe, + 0xcf, 0xfb, 0x7b, 0x9e, 0x87, 0x87, 0xdc, 0x7a, 0xd1, 0x2a, 0xf8, 0xe4, 0xb2, 0x31, 0x83, 0xe2, + 0x28, 0x34, 0x1f, 0x37, 0x5c, 0x7c, 0x58, 0x69, 0x74, 0x27, 0x1c, 0x96, 0xd8, 0xa0, 0xf3, 0x5c, + 0x48, 0x09, 0xd6, 0x16, 0x35, 0x78, 0xd6, 0xf5, 0xc6, 0x19, 0x7a, 0x19, 0x31, 0x16, 0x30, 0x86, + 0x42, 0xb3, 0x71, 0xc3, 0x8e, 0xb1, 0xe5, 0x79, 0x65, 0x4c, 0xd5, 0x00, 0x0f, 0x4c, 0x39, 0xbc, + 0x71, 0x87, 0x1a, 0xac, 0x13, 0xba, 0x8b, 0x36, 0x17, 0x5f, 0x09, 0x99, 0xe7, 0xc1, 0xfb, 0x11, + 0x3c, 0x3d, 0x21, 0x13, 0x54, 0x69, 0x92, 0x25, 0xab, 0xf9, 0x6e, 0x82, 0x8a, 0x5e, 0x11, 0x6a, + 0xa1, 0x1f, 0x51, 0x42, 0x21, 0xa4, 0x34, 0x43, 0xeb, 0x0a, 0x54, 0xe9, 0x24, 0xe8, 0xa7, 0x7b, + 0x25, 0x8f, 0xc2, 0x56, 0xd1, 0x3b, 0x42, 0x64, 0x0f, 0xc2, 0x81, 0x2a, 0x84, 0x4b, 0xff, 0x65, + 0xc9, 0x6a, 0x71, 0xbd, 0x64, 0xb1, 0x01, 0x3b, 0x34, 0x60, 0x4f, 0x87, 0x06, 0xbb, 0xf9, 0xfe, + 0x3b, 0x77, 0x34, 0x23, 0x0b, 0x05, 0x56, 0xf6, 0xd8, 0x39, 0x34, 0x6d, 0xfa, 0x3f, 0x24, 0xfc, + 0x3e, 0xd1, 0x33, 0x32, 0xab, 0xc1, 0xff, 0xc4, 0x4f, 0x83, 0x38, 0xad, 0xc1, 0x6f, 0xd5, 0xc3, + 0xeb, 0xcb, 0x73, 0x85, 0xee, 0x7d, 0x28, 0x99, 0x34, 0x9a, 0xc7, 0x4d, 0xd6, 0x71, 0xca, 0xca, + 0xac, 0x2b, 0x68, 0x43, 0x2e, 0xff, 0xcb, 0xc6, 0xf7, 0xc7, 0x87, 0x72, 0x16, 0xe0, 0x9b, 0xef, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x37, 0x35, 0xf6, 0xa3, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/awscompatibility/access_key_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/awscompatibility/access_key_service.pb.go new file mode 100644 index 000000000..6f829dc10 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/awscompatibility/access_key_service.pb.go @@ -0,0 +1,565 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/iam/v1/awscompatibility/access_key_service.proto + +package awscompatibility // import "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/awscompatibility" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import empty "github.com/golang/protobuf/ptypes/empty" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetAccessKeyRequest struct { + // ID of the AccessKey resource to return. + // To get the access key ID, use a [AccessKeyService.List] request. + AccessKeyId string `protobuf:"bytes,1,opt,name=access_key_id,json=accessKeyId,proto3" json:"access_key_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAccessKeyRequest) Reset() { *m = GetAccessKeyRequest{} } +func (m *GetAccessKeyRequest) String() string { return proto.CompactTextString(m) } +func (*GetAccessKeyRequest) ProtoMessage() {} +func (*GetAccessKeyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_access_key_service_efeaa691b26615d7, []int{0} +} +func (m *GetAccessKeyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetAccessKeyRequest.Unmarshal(m, b) +} +func (m *GetAccessKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetAccessKeyRequest.Marshal(b, m, deterministic) +} +func (dst *GetAccessKeyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAccessKeyRequest.Merge(dst, src) +} +func (m *GetAccessKeyRequest) XXX_Size() int { + return xxx_messageInfo_GetAccessKeyRequest.Size(m) +} +func (m *GetAccessKeyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetAccessKeyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAccessKeyRequest proto.InternalMessageInfo + +func (m *GetAccessKeyRequest) GetAccessKeyId() string { + if m != nil { + return m.AccessKeyId + } + return "" +} + +type ListAccessKeysRequest struct { + // ID of the service account to list access keys for. + // To get the service account ID, use a [yandex.cloud.iam.v1.ServiceAccountService.List] request. + // If not specified, it defaults to the subject that made the request. + ServiceAccountId string `protobuf:"bytes,1,opt,name=service_account_id,json=serviceAccountId,proto3" json:"service_account_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListAccessKeysResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + // Default value: 100. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] + // to the [ListAccessKeysResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListAccessKeysRequest) Reset() { *m = ListAccessKeysRequest{} } +func (m *ListAccessKeysRequest) String() string { return proto.CompactTextString(m) } +func (*ListAccessKeysRequest) ProtoMessage() {} +func (*ListAccessKeysRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_access_key_service_efeaa691b26615d7, []int{1} +} +func (m *ListAccessKeysRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListAccessKeysRequest.Unmarshal(m, b) +} +func (m *ListAccessKeysRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListAccessKeysRequest.Marshal(b, m, deterministic) +} +func (dst *ListAccessKeysRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListAccessKeysRequest.Merge(dst, src) +} +func (m *ListAccessKeysRequest) XXX_Size() int { + return xxx_messageInfo_ListAccessKeysRequest.Size(m) +} +func (m *ListAccessKeysRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListAccessKeysRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListAccessKeysRequest proto.InternalMessageInfo + +func (m *ListAccessKeysRequest) GetServiceAccountId() string { + if m != nil { + return m.ServiceAccountId + } + return "" +} + +func (m *ListAccessKeysRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListAccessKeysRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListAccessKeysResponse struct { + // List of AccessKey resources. + AccessKeys []*AccessKey `protobuf:"bytes,1,rep,name=access_keys,json=accessKeys,proto3" json:"access_keys,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListAccessKeysRequest.page_size], use + // the [next_page_token] as the value + // for the [ListAccessKeysRequest.page_token] query parameter + // in the next list request. Each subsequent list request will have its own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListAccessKeysResponse) Reset() { *m = ListAccessKeysResponse{} } +func (m *ListAccessKeysResponse) String() string { return proto.CompactTextString(m) } +func (*ListAccessKeysResponse) ProtoMessage() {} +func (*ListAccessKeysResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_access_key_service_efeaa691b26615d7, []int{2} +} +func (m *ListAccessKeysResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListAccessKeysResponse.Unmarshal(m, b) +} +func (m *ListAccessKeysResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListAccessKeysResponse.Marshal(b, m, deterministic) +} +func (dst *ListAccessKeysResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListAccessKeysResponse.Merge(dst, src) +} +func (m *ListAccessKeysResponse) XXX_Size() int { + return xxx_messageInfo_ListAccessKeysResponse.Size(m) +} +func (m *ListAccessKeysResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListAccessKeysResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListAccessKeysResponse proto.InternalMessageInfo + +func (m *ListAccessKeysResponse) GetAccessKeys() []*AccessKey { + if m != nil { + return m.AccessKeys + } + return nil +} + +func (m *ListAccessKeysResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateAccessKeyRequest struct { + // ID of the service account to create an access key for. + // To get the service account ID, use a [yandex.cloud.iam.v1.ServiceAccountService.List] request. + // If not specified, it defaults to the subject that made the request. + ServiceAccountId string `protobuf:"bytes,1,opt,name=service_account_id,json=serviceAccountId,proto3" json:"service_account_id,omitempty"` + // Description of the access key. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateAccessKeyRequest) Reset() { *m = CreateAccessKeyRequest{} } +func (m *CreateAccessKeyRequest) String() string { return proto.CompactTextString(m) } +func (*CreateAccessKeyRequest) ProtoMessage() {} +func (*CreateAccessKeyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_access_key_service_efeaa691b26615d7, []int{3} +} +func (m *CreateAccessKeyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateAccessKeyRequest.Unmarshal(m, b) +} +func (m *CreateAccessKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateAccessKeyRequest.Marshal(b, m, deterministic) +} +func (dst *CreateAccessKeyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateAccessKeyRequest.Merge(dst, src) +} +func (m *CreateAccessKeyRequest) XXX_Size() int { + return xxx_messageInfo_CreateAccessKeyRequest.Size(m) +} +func (m *CreateAccessKeyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateAccessKeyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateAccessKeyRequest proto.InternalMessageInfo + +func (m *CreateAccessKeyRequest) GetServiceAccountId() string { + if m != nil { + return m.ServiceAccountId + } + return "" +} + +func (m *CreateAccessKeyRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +type CreateAccessKeyResponse struct { + // AccessKey resource. + AccessKey *AccessKey `protobuf:"bytes,1,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"` + // Secret access key. + // The key is AWS compatible. + Secret string `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateAccessKeyResponse) Reset() { *m = CreateAccessKeyResponse{} } +func (m *CreateAccessKeyResponse) String() string { return proto.CompactTextString(m) } +func (*CreateAccessKeyResponse) ProtoMessage() {} +func (*CreateAccessKeyResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_access_key_service_efeaa691b26615d7, []int{4} +} +func (m *CreateAccessKeyResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateAccessKeyResponse.Unmarshal(m, b) +} +func (m *CreateAccessKeyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateAccessKeyResponse.Marshal(b, m, deterministic) +} +func (dst *CreateAccessKeyResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateAccessKeyResponse.Merge(dst, src) +} +func (m *CreateAccessKeyResponse) XXX_Size() int { + return xxx_messageInfo_CreateAccessKeyResponse.Size(m) +} +func (m *CreateAccessKeyResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateAccessKeyResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateAccessKeyResponse proto.InternalMessageInfo + +func (m *CreateAccessKeyResponse) GetAccessKey() *AccessKey { + if m != nil { + return m.AccessKey + } + return nil +} + +func (m *CreateAccessKeyResponse) GetSecret() string { + if m != nil { + return m.Secret + } + return "" +} + +type DeleteAccessKeyRequest struct { + // ID of the access key to delete. + // To get the access key ID, use a [AccessKeyService.List] request. + AccessKeyId string `protobuf:"bytes,1,opt,name=access_key_id,json=accessKeyId,proto3" json:"access_key_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteAccessKeyRequest) Reset() { *m = DeleteAccessKeyRequest{} } +func (m *DeleteAccessKeyRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteAccessKeyRequest) ProtoMessage() {} +func (*DeleteAccessKeyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_access_key_service_efeaa691b26615d7, []int{5} +} +func (m *DeleteAccessKeyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteAccessKeyRequest.Unmarshal(m, b) +} +func (m *DeleteAccessKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteAccessKeyRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteAccessKeyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteAccessKeyRequest.Merge(dst, src) +} +func (m *DeleteAccessKeyRequest) XXX_Size() int { + return xxx_messageInfo_DeleteAccessKeyRequest.Size(m) +} +func (m *DeleteAccessKeyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteAccessKeyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteAccessKeyRequest proto.InternalMessageInfo + +func (m *DeleteAccessKeyRequest) GetAccessKeyId() string { + if m != nil { + return m.AccessKeyId + } + return "" +} + +func init() { + proto.RegisterType((*GetAccessKeyRequest)(nil), "yandex.cloud.iam.v1.awscompatibility.GetAccessKeyRequest") + proto.RegisterType((*ListAccessKeysRequest)(nil), "yandex.cloud.iam.v1.awscompatibility.ListAccessKeysRequest") + proto.RegisterType((*ListAccessKeysResponse)(nil), "yandex.cloud.iam.v1.awscompatibility.ListAccessKeysResponse") + proto.RegisterType((*CreateAccessKeyRequest)(nil), "yandex.cloud.iam.v1.awscompatibility.CreateAccessKeyRequest") + proto.RegisterType((*CreateAccessKeyResponse)(nil), "yandex.cloud.iam.v1.awscompatibility.CreateAccessKeyResponse") + proto.RegisterType((*DeleteAccessKeyRequest)(nil), "yandex.cloud.iam.v1.awscompatibility.DeleteAccessKeyRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// AccessKeyServiceClient is the client API for AccessKeyService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type AccessKeyServiceClient interface { + // Retrieves the list of AccessKey resources for the specified service account. + List(ctx context.Context, in *ListAccessKeysRequest, opts ...grpc.CallOption) (*ListAccessKeysResponse, error) + // Returns the specified AccessKey resource. + // + // To get the list of available AccessKey resources, make a [List] request. + Get(ctx context.Context, in *GetAccessKeyRequest, opts ...grpc.CallOption) (*AccessKey, error) + // Creates an access key for the specified service account. + Create(ctx context.Context, in *CreateAccessKeyRequest, opts ...grpc.CallOption) (*CreateAccessKeyResponse, error) + // Deletes the specified access key. + Delete(ctx context.Context, in *DeleteAccessKeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) +} + +type accessKeyServiceClient struct { + cc *grpc.ClientConn +} + +func NewAccessKeyServiceClient(cc *grpc.ClientConn) AccessKeyServiceClient { + return &accessKeyServiceClient{cc} +} + +func (c *accessKeyServiceClient) List(ctx context.Context, in *ListAccessKeysRequest, opts ...grpc.CallOption) (*ListAccessKeysResponse, error) { + out := new(ListAccessKeysResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.awscompatibility.AccessKeyService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *accessKeyServiceClient) Get(ctx context.Context, in *GetAccessKeyRequest, opts ...grpc.CallOption) (*AccessKey, error) { + out := new(AccessKey) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.awscompatibility.AccessKeyService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *accessKeyServiceClient) Create(ctx context.Context, in *CreateAccessKeyRequest, opts ...grpc.CallOption) (*CreateAccessKeyResponse, error) { + out := new(CreateAccessKeyResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.awscompatibility.AccessKeyService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *accessKeyServiceClient) Delete(ctx context.Context, in *DeleteAccessKeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.awscompatibility.AccessKeyService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AccessKeyServiceServer is the server API for AccessKeyService service. +type AccessKeyServiceServer interface { + // Retrieves the list of AccessKey resources for the specified service account. + List(context.Context, *ListAccessKeysRequest) (*ListAccessKeysResponse, error) + // Returns the specified AccessKey resource. + // + // To get the list of available AccessKey resources, make a [List] request. + Get(context.Context, *GetAccessKeyRequest) (*AccessKey, error) + // Creates an access key for the specified service account. + Create(context.Context, *CreateAccessKeyRequest) (*CreateAccessKeyResponse, error) + // Deletes the specified access key. + Delete(context.Context, *DeleteAccessKeyRequest) (*empty.Empty, error) +} + +func RegisterAccessKeyServiceServer(s *grpc.Server, srv AccessKeyServiceServer) { + s.RegisterService(&_AccessKeyService_serviceDesc, srv) +} + +func _AccessKeyService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListAccessKeysRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AccessKeyServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.awscompatibility.AccessKeyService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AccessKeyServiceServer).List(ctx, req.(*ListAccessKeysRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AccessKeyService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAccessKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AccessKeyServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.awscompatibility.AccessKeyService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AccessKeyServiceServer).Get(ctx, req.(*GetAccessKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AccessKeyService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateAccessKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AccessKeyServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.awscompatibility.AccessKeyService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AccessKeyServiceServer).Create(ctx, req.(*CreateAccessKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AccessKeyService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteAccessKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AccessKeyServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.awscompatibility.AccessKeyService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AccessKeyServiceServer).Delete(ctx, req.(*DeleteAccessKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _AccessKeyService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.iam.v1.awscompatibility.AccessKeyService", + HandlerType: (*AccessKeyServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "List", + Handler: _AccessKeyService_List_Handler, + }, + { + MethodName: "Get", + Handler: _AccessKeyService_Get_Handler, + }, + { + MethodName: "Create", + Handler: _AccessKeyService_Create_Handler, + }, + { + MethodName: "Delete", + Handler: _AccessKeyService_Delete_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/iam/v1/awscompatibility/access_key_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/iam/v1/awscompatibility/access_key_service.proto", fileDescriptor_access_key_service_efeaa691b26615d7) +} + +var fileDescriptor_access_key_service_efeaa691b26615d7 = []byte{ + // 649 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x95, 0xbf, 0x6e, 0x13, 0x4b, + 0x14, 0xc6, 0x35, 0x71, 0x62, 0xc5, 0xc7, 0x37, 0xba, 0xd1, 0x5c, 0x5d, 0x63, 0x19, 0x90, 0xa2, + 0x55, 0x14, 0x4c, 0x20, 0x3b, 0xde, 0x40, 0x22, 0x41, 0xec, 0x22, 0x01, 0x14, 0x05, 0x10, 0x8a, + 0x1c, 0x1a, 0xa0, 0xb0, 0xc6, 0xbb, 0x07, 0x33, 0x8a, 0xbd, 0xb3, 0x78, 0xc6, 0x4e, 0x1c, 0x94, + 0x02, 0xca, 0x94, 0xd0, 0x52, 0xf1, 0x02, 0xe9, 0x78, 0x85, 0xa4, 0xa2, 0xe1, 0x15, 0x28, 0x78, + 0x06, 0x2a, 0xb4, 0xb3, 0x6b, 0x3b, 0x7f, 0x5c, 0x6c, 0x5c, 0xee, 0x9c, 0xf9, 0xce, 0xf9, 0xe6, + 0x77, 0xce, 0xcc, 0x42, 0xa5, 0xc7, 0x7d, 0x0f, 0xf7, 0x99, 0xdb, 0x94, 0x1d, 0x8f, 0x09, 0xde, + 0x62, 0x5d, 0x87, 0xf1, 0x3d, 0xe5, 0xca, 0x56, 0xc0, 0xb5, 0xa8, 0x8b, 0xa6, 0xd0, 0x3d, 0xc6, + 0x5d, 0x17, 0x95, 0xaa, 0xed, 0x62, 0xaf, 0xa6, 0xb0, 0xdd, 0x15, 0x2e, 0xda, 0x41, 0x5b, 0x6a, + 0x49, 0xe7, 0x23, 0xb9, 0x6d, 0xe4, 0xb6, 0xe0, 0x2d, 0xbb, 0xeb, 0xd8, 0x17, 0xe5, 0x85, 0x1b, + 0x0d, 0x29, 0x1b, 0x4d, 0x64, 0x3c, 0x10, 0x8c, 0xfb, 0xbe, 0xd4, 0x5c, 0x0b, 0xe9, 0xab, 0x28, + 0x47, 0xe1, 0x7a, 0x1c, 0x35, 0x5f, 0xf5, 0xce, 0x5b, 0x86, 0xad, 0x40, 0xf7, 0xe2, 0xe0, 0xca, + 0x15, 0xfd, 0xc5, 0xb2, 0x9b, 0xe7, 0x64, 0x5d, 0xde, 0x14, 0x9e, 0xa9, 0x19, 0x85, 0xad, 0x4d, + 0xf8, 0x6f, 0x13, 0xf5, 0xba, 0x51, 0x3d, 0xc3, 0x5e, 0x15, 0xdf, 0x77, 0x50, 0x69, 0x5a, 0x82, + 0x99, 0x33, 0x27, 0x15, 0x5e, 0x9e, 0xcc, 0x91, 0x62, 0x66, 0xe3, 0x9f, 0xdf, 0x27, 0x0e, 0x39, + 0x3a, 0x75, 0x26, 0xcb, 0x95, 0x95, 0x52, 0x35, 0xcb, 0xfb, 0xb2, 0x2d, 0xcf, 0xfa, 0x46, 0xe0, + 0xff, 0xe7, 0x42, 0x0d, 0x53, 0xa9, 0x7e, 0xae, 0x55, 0xa0, 0x31, 0xaa, 0x1a, 0x77, 0x5d, 0xd9, + 0xf1, 0xf5, 0x30, 0xe1, 0xf4, 0x20, 0xd9, 0x6c, 0xbc, 0x67, 0x3d, 0xda, 0xb2, 0xe5, 0xd1, 0x5b, + 0x90, 0x09, 0x78, 0x03, 0x6b, 0x4a, 0x1c, 0x60, 0x7e, 0x62, 0x8e, 0x14, 0x53, 0x1b, 0xf0, 0xe7, + 0xc4, 0x49, 0x97, 0x2b, 0x4e, 0xa9, 0x54, 0xaa, 0x4e, 0x87, 0xc1, 0x1d, 0x71, 0x80, 0xb4, 0x08, + 0x60, 0x36, 0x6a, 0xb9, 0x8b, 0x7e, 0x3e, 0x65, 0x12, 0x67, 0x8e, 0x4e, 0x9d, 0x29, 0xb3, 0xb3, + 0x6a, 0xb2, 0xbc, 0x0c, 0x63, 0xd6, 0x67, 0x02, 0xb9, 0x8b, 0x26, 0x55, 0x20, 0x7d, 0x85, 0x74, + 0x1b, 0xb2, 0xc3, 0x13, 0xab, 0x3c, 0x99, 0x4b, 0x15, 0xb3, 0xcb, 0xcc, 0x4e, 0xd2, 0x55, 0x7b, + 0x88, 0x0f, 0x06, 0x48, 0x14, 0x5d, 0x80, 0x7f, 0x7d, 0xdc, 0xd7, 0xb5, 0x33, 0xde, 0xc2, 0x53, + 0x64, 0xaa, 0x33, 0xe1, 0xf2, 0xf6, 0xc0, 0xd4, 0x21, 0xe4, 0x1e, 0xb5, 0x91, 0x6b, 0xbc, 0xd4, + 0x85, 0x71, 0xc9, 0xdd, 0x81, 0xac, 0x87, 0xca, 0x6d, 0x8b, 0x20, 0xec, 0x74, 0x54, 0xb5, 0x4f, + 0x64, 0x79, 0x65, 0xb5, 0x7a, 0x36, 0x6a, 0x7d, 0x24, 0x70, 0xed, 0x52, 0xfd, 0x18, 0xca, 0x0b, + 0x80, 0x21, 0x14, 0x53, 0x78, 0x0c, 0x26, 0x99, 0x01, 0x13, 0x9a, 0x83, 0xb4, 0x42, 0xb7, 0x8d, + 0x3a, 0x26, 0x11, 0x7f, 0x59, 0x4f, 0x21, 0xf7, 0x18, 0x9b, 0x38, 0x02, 0xc1, 0x95, 0x07, 0x71, + 0xf9, 0xc7, 0x14, 0xcc, 0x0e, 0xd2, 0xec, 0x44, 0x68, 0xe8, 0x31, 0x81, 0xc9, 0xb0, 0xf1, 0x74, + 0x2d, 0x99, 0xfb, 0x91, 0x93, 0x5c, 0x28, 0x8f, 0x27, 0x8e, 0x60, 0x5a, 0x77, 0x3f, 0xfd, 0xfc, + 0xf5, 0x65, 0x62, 0x81, 0xce, 0x9b, 0xcb, 0xcb, 0xf7, 0xd4, 0xd2, 0xf9, 0xab, 0x1b, 0x5e, 0xe7, + 0xe1, 0xf4, 0x1c, 0x13, 0x48, 0x6d, 0xa2, 0xa6, 0x0f, 0x92, 0xd5, 0x1c, 0x71, 0x89, 0x0b, 0x57, + 0xed, 0x94, 0x55, 0x36, 0x0e, 0x57, 0xe9, 0xfd, 0x24, 0x0e, 0xd9, 0x87, 0x73, 0x8d, 0x39, 0xa4, + 0xdf, 0x09, 0xa4, 0xa3, 0x41, 0xa2, 0x09, 0x41, 0x8d, 0x1e, 0xfb, 0x42, 0x65, 0x4c, 0x75, 0xcc, + 0x99, 0x99, 0x53, 0xdc, 0xb6, 0x12, 0x71, 0x7e, 0x48, 0x16, 0xe9, 0x57, 0x02, 0xe9, 0x68, 0xfc, + 0x92, 0x1a, 0x1f, 0x3d, 0xac, 0x85, 0x9c, 0x1d, 0x3d, 0xe0, 0x76, 0xff, 0x01, 0xb7, 0x9f, 0x84, + 0x0f, 0x78, 0x9f, 0xeb, 0xe2, 0x58, 0x5c, 0x37, 0xde, 0xbc, 0x7e, 0xd5, 0x10, 0xfa, 0x5d, 0xa7, + 0x6e, 0xbb, 0xb2, 0xc5, 0x22, 0x7f, 0x4b, 0xd1, 0x73, 0xde, 0x90, 0x4b, 0x0d, 0xf4, 0x4d, 0x35, + 0x96, 0xe4, 0xf7, 0xb0, 0x76, 0x71, 0xa1, 0x9e, 0x36, 0xe2, 0x7b, 0x7f, 0x03, 0x00, 0x00, 0xff, + 0xff, 0xaa, 0xd6, 0x8e, 0x16, 0xfe, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/iam_token_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/iam_token_service.pb.go new file mode 100644 index 000000000..4ed6fc73a --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/iam_token_service.pb.go @@ -0,0 +1,326 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/iam/v1/iam_token_service.proto + +package iam // import "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type CreateIamTokenRequest struct { + // Types that are valid to be assigned to Identity: + // *CreateIamTokenRequest_YandexPassportOauthToken + // *CreateIamTokenRequest_Jwt + Identity isCreateIamTokenRequest_Identity `protobuf_oneof:"identity"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateIamTokenRequest) Reset() { *m = CreateIamTokenRequest{} } +func (m *CreateIamTokenRequest) String() string { return proto.CompactTextString(m) } +func (*CreateIamTokenRequest) ProtoMessage() {} +func (*CreateIamTokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_iam_token_service_021a8da7029cec45, []int{0} +} +func (m *CreateIamTokenRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateIamTokenRequest.Unmarshal(m, b) +} +func (m *CreateIamTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateIamTokenRequest.Marshal(b, m, deterministic) +} +func (dst *CreateIamTokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateIamTokenRequest.Merge(dst, src) +} +func (m *CreateIamTokenRequest) XXX_Size() int { + return xxx_messageInfo_CreateIamTokenRequest.Size(m) +} +func (m *CreateIamTokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateIamTokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateIamTokenRequest proto.InternalMessageInfo + +type isCreateIamTokenRequest_Identity interface { + isCreateIamTokenRequest_Identity() +} + +type CreateIamTokenRequest_YandexPassportOauthToken struct { + YandexPassportOauthToken string `protobuf:"bytes,1,opt,name=yandex_passport_oauth_token,json=yandexPassportOauthToken,proto3,oneof"` +} + +type CreateIamTokenRequest_Jwt struct { + Jwt string `protobuf:"bytes,2,opt,name=jwt,proto3,oneof"` +} + +func (*CreateIamTokenRequest_YandexPassportOauthToken) isCreateIamTokenRequest_Identity() {} + +func (*CreateIamTokenRequest_Jwt) isCreateIamTokenRequest_Identity() {} + +func (m *CreateIamTokenRequest) GetIdentity() isCreateIamTokenRequest_Identity { + if m != nil { + return m.Identity + } + return nil +} + +func (m *CreateIamTokenRequest) GetYandexPassportOauthToken() string { + if x, ok := m.GetIdentity().(*CreateIamTokenRequest_YandexPassportOauthToken); ok { + return x.YandexPassportOauthToken + } + return "" +} + +func (m *CreateIamTokenRequest) GetJwt() string { + if x, ok := m.GetIdentity().(*CreateIamTokenRequest_Jwt); ok { + return x.Jwt + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CreateIamTokenRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CreateIamTokenRequest_OneofMarshaler, _CreateIamTokenRequest_OneofUnmarshaler, _CreateIamTokenRequest_OneofSizer, []interface{}{ + (*CreateIamTokenRequest_YandexPassportOauthToken)(nil), + (*CreateIamTokenRequest_Jwt)(nil), + } +} + +func _CreateIamTokenRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CreateIamTokenRequest) + // identity + switch x := m.Identity.(type) { + case *CreateIamTokenRequest_YandexPassportOauthToken: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.YandexPassportOauthToken) + case *CreateIamTokenRequest_Jwt: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Jwt) + case nil: + default: + return fmt.Errorf("CreateIamTokenRequest.Identity has unexpected type %T", x) + } + return nil +} + +func _CreateIamTokenRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CreateIamTokenRequest) + switch tag { + case 1: // identity.yandex_passport_oauth_token + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Identity = &CreateIamTokenRequest_YandexPassportOauthToken{x} + return true, err + case 2: // identity.jwt + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Identity = &CreateIamTokenRequest_Jwt{x} + return true, err + default: + return false, nil + } +} + +func _CreateIamTokenRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CreateIamTokenRequest) + // identity + switch x := m.Identity.(type) { + case *CreateIamTokenRequest_YandexPassportOauthToken: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.YandexPassportOauthToken))) + n += len(x.YandexPassportOauthToken) + case *CreateIamTokenRequest_Jwt: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Jwt))) + n += len(x.Jwt) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type CreateIamTokenResponse struct { + // IAM token for the specified identity. + // + // You should pass the token in the `Authorization` header for any further API requests. + // For example, `Authorization: Bearer [iam_token]`. + IamToken string `protobuf:"bytes,1,opt,name=iam_token,json=iamToken,proto3" json:"iam_token,omitempty"` + // IAM token expiration time, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + ExpiresAt *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateIamTokenResponse) Reset() { *m = CreateIamTokenResponse{} } +func (m *CreateIamTokenResponse) String() string { return proto.CompactTextString(m) } +func (*CreateIamTokenResponse) ProtoMessage() {} +func (*CreateIamTokenResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_iam_token_service_021a8da7029cec45, []int{1} +} +func (m *CreateIamTokenResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateIamTokenResponse.Unmarshal(m, b) +} +func (m *CreateIamTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateIamTokenResponse.Marshal(b, m, deterministic) +} +func (dst *CreateIamTokenResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateIamTokenResponse.Merge(dst, src) +} +func (m *CreateIamTokenResponse) XXX_Size() int { + return xxx_messageInfo_CreateIamTokenResponse.Size(m) +} +func (m *CreateIamTokenResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateIamTokenResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateIamTokenResponse proto.InternalMessageInfo + +func (m *CreateIamTokenResponse) GetIamToken() string { + if m != nil { + return m.IamToken + } + return "" +} + +func (m *CreateIamTokenResponse) GetExpiresAt() *timestamp.Timestamp { + if m != nil { + return m.ExpiresAt + } + return nil +} + +func init() { + proto.RegisterType((*CreateIamTokenRequest)(nil), "yandex.cloud.iam.v1.CreateIamTokenRequest") + proto.RegisterType((*CreateIamTokenResponse)(nil), "yandex.cloud.iam.v1.CreateIamTokenResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// IamTokenServiceClient is the client API for IamTokenService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IamTokenServiceClient interface { + // Creates an IAM token for the specified identity. + Create(ctx context.Context, in *CreateIamTokenRequest, opts ...grpc.CallOption) (*CreateIamTokenResponse, error) +} + +type iamTokenServiceClient struct { + cc *grpc.ClientConn +} + +func NewIamTokenServiceClient(cc *grpc.ClientConn) IamTokenServiceClient { + return &iamTokenServiceClient{cc} +} + +func (c *iamTokenServiceClient) Create(ctx context.Context, in *CreateIamTokenRequest, opts ...grpc.CallOption) (*CreateIamTokenResponse, error) { + out := new(CreateIamTokenResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.IamTokenService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IamTokenServiceServer is the server API for IamTokenService service. +type IamTokenServiceServer interface { + // Creates an IAM token for the specified identity. + Create(context.Context, *CreateIamTokenRequest) (*CreateIamTokenResponse, error) +} + +func RegisterIamTokenServiceServer(s *grpc.Server, srv IamTokenServiceServer) { + s.RegisterService(&_IamTokenService_serviceDesc, srv) +} + +func _IamTokenService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateIamTokenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IamTokenServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.IamTokenService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IamTokenServiceServer).Create(ctx, req.(*CreateIamTokenRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _IamTokenService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.iam.v1.IamTokenService", + HandlerType: (*IamTokenServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Create", + Handler: _IamTokenService_Create_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/iam/v1/iam_token_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/iam/v1/iam_token_service.proto", fileDescriptor_iam_token_service_021a8da7029cec45) +} + +var fileDescriptor_iam_token_service_021a8da7029cec45 = []byte{ + // 382 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x4e, 0xdb, 0x30, + 0x18, 0xc7, 0x97, 0x6e, 0xaa, 0x5a, 0x4f, 0xda, 0x26, 0x57, 0x9b, 0xba, 0x74, 0xd3, 0xa6, 0x9c, + 0x50, 0xab, 0xda, 0x6a, 0x39, 0x41, 0x85, 0x10, 0xe5, 0x02, 0x27, 0x50, 0xe8, 0x89, 0x4b, 0xe4, + 0x36, 0x26, 0x35, 0x34, 0xb6, 0x89, 0xbf, 0x84, 0x56, 0x42, 0x1c, 0x78, 0x01, 0x0e, 0xbc, 0x10, + 0x3c, 0x03, 0xaf, 0xc0, 0x83, 0xa0, 0xc4, 0x09, 0x12, 0xa8, 0x07, 0x4e, 0x96, 0xf5, 0xfd, 0xfc, + 0xfd, 0xff, 0xdf, 0xdf, 0x1f, 0xea, 0xad, 0x98, 0x0c, 0xf9, 0x92, 0xce, 0x16, 0x2a, 0x0d, 0xa9, + 0x60, 0x31, 0xcd, 0x06, 0xf9, 0x11, 0x80, 0xba, 0xe0, 0x32, 0x30, 0x3c, 0xc9, 0xc4, 0x8c, 0x13, + 0x9d, 0x28, 0x50, 0xb8, 0x65, 0x61, 0x52, 0xc0, 0x44, 0xb0, 0x98, 0x64, 0x03, 0xf7, 0x4f, 0xa4, + 0x54, 0xb4, 0xe0, 0x94, 0x69, 0x41, 0x99, 0x94, 0x0a, 0x18, 0x08, 0x25, 0x8d, 0x7d, 0xe2, 0xfe, + 0x2b, 0xab, 0xc5, 0x6d, 0x9a, 0x9e, 0x51, 0x10, 0x31, 0x37, 0xc0, 0x62, 0x5d, 0x02, 0x7f, 0xdf, + 0x18, 0xc8, 0xd8, 0x42, 0x84, 0x45, 0x03, 0x5b, 0xf6, 0x6e, 0xd0, 0xcf, 0xfd, 0x84, 0x33, 0xe0, + 0x87, 0x2c, 0x9e, 0xe4, 0x96, 0x7c, 0x7e, 0x99, 0x72, 0x03, 0x78, 0x17, 0x75, 0xec, 0xcb, 0x40, + 0x33, 0x63, 0xb4, 0x4a, 0x20, 0x50, 0x2c, 0x85, 0xb9, 0x35, 0xde, 0x76, 0xfe, 0x3b, 0x1b, 0xcd, + 0x83, 0x4f, 0x7e, 0xdb, 0x42, 0xc7, 0x25, 0x73, 0x94, 0x23, 0x45, 0x1f, 0x8c, 0xd1, 0xe7, 0xf3, + 0x2b, 0x68, 0xd7, 0x4a, 0x30, 0xbf, 0x8c, 0x7f, 0xa0, 0x86, 0x08, 0xb9, 0x04, 0x01, 0x2b, 0xfc, + 0xe5, 0xe1, 0x71, 0xe0, 0x78, 0x1a, 0xfd, 0x7a, 0xaf, 0x6f, 0xb4, 0x92, 0x86, 0xe3, 0x0e, 0x6a, + 0xbe, 0xe6, 0x64, 0xe5, 0xfc, 0x86, 0x28, 0x21, 0xbc, 0x85, 0x10, 0x5f, 0x6a, 0x91, 0x70, 0x13, + 0x30, 0xab, 0xf1, 0x75, 0xe8, 0x12, 0x9b, 0x05, 0xa9, 0xb2, 0x20, 0x93, 0x2a, 0x0b, 0xbf, 0x59, + 0xd2, 0x7b, 0x30, 0xbc, 0x73, 0xd0, 0xf7, 0x4a, 0xec, 0xc4, 0xc6, 0x8f, 0xaf, 0x51, 0xdd, 0xba, + 0xc0, 0x5d, 0xb2, 0xe6, 0x0f, 0xc8, 0xda, 0x88, 0xdc, 0xde, 0x87, 0x58, 0x3b, 0x8e, 0xf7, 0xfb, + 0xf6, 0xe9, 0xf9, 0xbe, 0xd6, 0xf2, 0xbe, 0x55, 0x4b, 0x50, 0x0c, 0x66, 0xb6, 0x9d, 0xee, 0x78, + 0xe7, 0x74, 0x14, 0x09, 0x98, 0xa7, 0x53, 0x32, 0x53, 0x31, 0xb5, 0x3d, 0xfb, 0xf6, 0xbf, 0x22, + 0xd5, 0x8f, 0xb8, 0x2c, 0x06, 0xa2, 0x6b, 0x36, 0x69, 0x24, 0x58, 0x3c, 0xad, 0x17, 0xe5, 0xcd, + 0x97, 0x00, 0x00, 0x00, 0xff, 0xff, 0xda, 0xac, 0x14, 0x7c, 0x6b, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/key.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/key.pb.go new file mode 100644 index 000000000..6a780526e --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/key.pb.go @@ -0,0 +1,266 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/iam/v1/key.proto + +package iam // import "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Key_Algorithm int32 + +const ( + Key_ALGORITHM_UNSPECIFIED Key_Algorithm = 0 + // RSA with a 2048-bit key size. Default value. + Key_RSA_2048 Key_Algorithm = 1 + // RSA with a 4096-bit key size. + Key_RSA_4096 Key_Algorithm = 2 +) + +var Key_Algorithm_name = map[int32]string{ + 0: "ALGORITHM_UNSPECIFIED", + 1: "RSA_2048", + 2: "RSA_4096", +} +var Key_Algorithm_value = map[string]int32{ + "ALGORITHM_UNSPECIFIED": 0, + "RSA_2048": 1, + "RSA_4096": 2, +} + +func (x Key_Algorithm) String() string { + return proto.EnumName(Key_Algorithm_name, int32(x)) +} +func (Key_Algorithm) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_key_8778cd8d03fc7198, []int{0, 0} +} + +// A Key resource. For more information, see [Authorized keys](/docs/iam/concepts/authorization/key). +type Key struct { + // ID of the Key resource. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Types that are valid to be assigned to Subject: + // *Key_UserAccountId + // *Key_ServiceAccountId + Subject isKey_Subject `protobuf_oneof:"subject"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Description of the Key resource. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // An algorithm used to generate a key pair of the Key resource. + KeyAlgorithm Key_Algorithm `protobuf:"varint,6,opt,name=key_algorithm,json=keyAlgorithm,proto3,enum=yandex.cloud.iam.v1.Key_Algorithm" json:"key_algorithm,omitempty"` + // A public key of the Key resource. + PublicKey string `protobuf:"bytes,7,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Key) Reset() { *m = Key{} } +func (m *Key) String() string { return proto.CompactTextString(m) } +func (*Key) ProtoMessage() {} +func (*Key) Descriptor() ([]byte, []int) { + return fileDescriptor_key_8778cd8d03fc7198, []int{0} +} +func (m *Key) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Key.Unmarshal(m, b) +} +func (m *Key) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Key.Marshal(b, m, deterministic) +} +func (dst *Key) XXX_Merge(src proto.Message) { + xxx_messageInfo_Key.Merge(dst, src) +} +func (m *Key) XXX_Size() int { + return xxx_messageInfo_Key.Size(m) +} +func (m *Key) XXX_DiscardUnknown() { + xxx_messageInfo_Key.DiscardUnknown(m) +} + +var xxx_messageInfo_Key proto.InternalMessageInfo + +func (m *Key) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type isKey_Subject interface { + isKey_Subject() +} + +type Key_UserAccountId struct { + UserAccountId string `protobuf:"bytes,2,opt,name=user_account_id,json=userAccountId,proto3,oneof"` +} + +type Key_ServiceAccountId struct { + ServiceAccountId string `protobuf:"bytes,3,opt,name=service_account_id,json=serviceAccountId,proto3,oneof"` +} + +func (*Key_UserAccountId) isKey_Subject() {} + +func (*Key_ServiceAccountId) isKey_Subject() {} + +func (m *Key) GetSubject() isKey_Subject { + if m != nil { + return m.Subject + } + return nil +} + +func (m *Key) GetUserAccountId() string { + if x, ok := m.GetSubject().(*Key_UserAccountId); ok { + return x.UserAccountId + } + return "" +} + +func (m *Key) GetServiceAccountId() string { + if x, ok := m.GetSubject().(*Key_ServiceAccountId); ok { + return x.ServiceAccountId + } + return "" +} + +func (m *Key) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Key) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Key) GetKeyAlgorithm() Key_Algorithm { + if m != nil { + return m.KeyAlgorithm + } + return Key_ALGORITHM_UNSPECIFIED +} + +func (m *Key) GetPublicKey() string { + if m != nil { + return m.PublicKey + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Key) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Key_OneofMarshaler, _Key_OneofUnmarshaler, _Key_OneofSizer, []interface{}{ + (*Key_UserAccountId)(nil), + (*Key_ServiceAccountId)(nil), + } +} + +func _Key_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Key) + // subject + switch x := m.Subject.(type) { + case *Key_UserAccountId: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.UserAccountId) + case *Key_ServiceAccountId: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ServiceAccountId) + case nil: + default: + return fmt.Errorf("Key.Subject has unexpected type %T", x) + } + return nil +} + +func _Key_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Key) + switch tag { + case 2: // subject.user_account_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Subject = &Key_UserAccountId{x} + return true, err + case 3: // subject.service_account_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Subject = &Key_ServiceAccountId{x} + return true, err + default: + return false, nil + } +} + +func _Key_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Key) + // subject + switch x := m.Subject.(type) { + case *Key_UserAccountId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.UserAccountId))) + n += len(x.UserAccountId) + case *Key_ServiceAccountId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.ServiceAccountId))) + n += len(x.ServiceAccountId) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*Key)(nil), "yandex.cloud.iam.v1.Key") + proto.RegisterEnum("yandex.cloud.iam.v1.Key_Algorithm", Key_Algorithm_name, Key_Algorithm_value) +} + +func init() { proto.RegisterFile("yandex/cloud/iam/v1/key.proto", fileDescriptor_key_8778cd8d03fc7198) } + +var fileDescriptor_key_8778cd8d03fc7198 = []byte{ + // 384 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x4b, 0x6f, 0xd4, 0x30, + 0x14, 0x85, 0x9b, 0x0c, 0xb4, 0xc4, 0x7d, 0x10, 0x19, 0x21, 0x85, 0x4a, 0x15, 0xd1, 0xac, 0xb2, + 0xa9, 0xdd, 0x0e, 0x15, 0xa2, 0xaa, 0x58, 0x64, 0xa0, 0xb4, 0xd1, 0xf0, 0x52, 0x5a, 0x36, 0x6c, + 0x22, 0xc7, 0xbe, 0xa4, 0x26, 0x0f, 0x47, 0x89, 0x33, 0xc2, 0x6b, 0xfe, 0x38, 0x6a, 0x32, 0x61, + 0x66, 0x31, 0xcb, 0x7b, 0xbe, 0xcf, 0xf2, 0xd1, 0xb5, 0xd1, 0x89, 0x61, 0x95, 0x80, 0x3f, 0x94, + 0x17, 0xaa, 0x13, 0x54, 0xb2, 0x92, 0x2e, 0xcf, 0x69, 0x0e, 0x86, 0xd4, 0x8d, 0xd2, 0x0a, 0xbf, + 0x18, 0x30, 0xe9, 0x31, 0x91, 0xac, 0x24, 0xcb, 0xf3, 0xe3, 0xd7, 0x99, 0x52, 0x59, 0x01, 0xb4, + 0x57, 0xd2, 0xee, 0x17, 0xd5, 0xb2, 0x84, 0x56, 0xb3, 0xb2, 0x1e, 0x4e, 0x4d, 0xff, 0x4e, 0xd0, + 0x64, 0x01, 0x06, 0x1f, 0x21, 0x5b, 0x0a, 0xcf, 0xf2, 0xad, 0xc0, 0x89, 0x6d, 0x29, 0x70, 0x80, + 0x9e, 0x77, 0x2d, 0x34, 0x09, 0xe3, 0x5c, 0x75, 0x95, 0x4e, 0xa4, 0xf0, 0xec, 0x47, 0x78, 0xbb, + 0x13, 0x1f, 0x3e, 0x82, 0x70, 0xc8, 0x23, 0x81, 0x09, 0xc2, 0x2d, 0x34, 0x4b, 0xc9, 0x61, 0x53, + 0x9e, 0xac, 0x64, 0x77, 0xc5, 0xd6, 0xfe, 0x25, 0x42, 0xbc, 0x01, 0xa6, 0x41, 0x24, 0x4c, 0x7b, + 0x4f, 0x7c, 0x2b, 0xd8, 0x9f, 0x1d, 0x93, 0xa1, 0x27, 0x19, 0x7b, 0x92, 0xfb, 0xb1, 0x67, 0xec, + 0xac, 0xec, 0x50, 0x63, 0x1f, 0xed, 0x0b, 0x68, 0x79, 0x23, 0x6b, 0x2d, 0x55, 0xe5, 0x3d, 0xed, + 0xdb, 0x6e, 0x46, 0xf8, 0x06, 0x1d, 0xe6, 0x60, 0x12, 0x56, 0x64, 0xaa, 0x91, 0xfa, 0xa1, 0xf4, + 0x76, 0x7d, 0x2b, 0x38, 0x9a, 0x4d, 0xc9, 0x96, 0xe5, 0x90, 0x05, 0x18, 0x12, 0x8e, 0x66, 0x7c, + 0x90, 0x83, 0xf9, 0x3f, 0xe1, 0x13, 0x84, 0xea, 0x2e, 0x2d, 0x24, 0x4f, 0x72, 0x30, 0xde, 0x5e, + 0x7f, 0x93, 0x33, 0x24, 0x0b, 0x30, 0xd3, 0x39, 0x72, 0xd6, 0xee, 0x2b, 0xf4, 0x32, 0xfc, 0x7c, + 0xf3, 0x2d, 0x8e, 0xee, 0x6f, 0xbf, 0x24, 0x3f, 0xbe, 0xde, 0x7d, 0xbf, 0xfe, 0x10, 0x7d, 0x8a, + 0xae, 0x3f, 0xba, 0x3b, 0xf8, 0x00, 0x3d, 0x8b, 0xef, 0xc2, 0x64, 0x76, 0x76, 0xf1, 0xce, 0xb5, + 0xc6, 0xe9, 0xe2, 0xec, 0xf2, 0xad, 0x6b, 0xcf, 0x1d, 0xb4, 0xd7, 0x76, 0xe9, 0x6f, 0xe0, 0x7a, + 0xfe, 0xfe, 0xe7, 0x55, 0x26, 0xf5, 0x43, 0x97, 0x12, 0xae, 0x4a, 0x3a, 0x74, 0x3d, 0x1d, 0xde, + 0x39, 0x53, 0xa7, 0x19, 0x54, 0xfd, 0x5e, 0xe8, 0x96, 0x0f, 0x70, 0x25, 0x59, 0x99, 0xee, 0xf6, + 0xf8, 0xcd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x41, 0xba, 0x65, 0x21, 0x22, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/key_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/key_service.pb.go new file mode 100644 index 000000000..ff2655973 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/key_service.pb.go @@ -0,0 +1,624 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/iam/v1/key_service.proto + +package iam // import "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import empty "github.com/golang/protobuf/ptypes/empty" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type KeyFormat int32 + +const ( + // Privacy-Enhanced Mail (PEM) format. Default value. + KeyFormat_PEM_FILE KeyFormat = 0 +) + +var KeyFormat_name = map[int32]string{ + 0: "PEM_FILE", +} +var KeyFormat_value = map[string]int32{ + "PEM_FILE": 0, +} + +func (x KeyFormat) String() string { + return proto.EnumName(KeyFormat_name, int32(x)) +} +func (KeyFormat) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_key_service_16d84045cfb780d5, []int{0} +} + +type GetKeyRequest struct { + // ID of the Key resource to return. + // To get the ID use a [KeyService.List] request. + KeyId string `protobuf:"bytes,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` + // Output format of the key. + Format KeyFormat `protobuf:"varint,2,opt,name=format,proto3,enum=yandex.cloud.iam.v1.KeyFormat" json:"format,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetKeyRequest) Reset() { *m = GetKeyRequest{} } +func (m *GetKeyRequest) String() string { return proto.CompactTextString(m) } +func (*GetKeyRequest) ProtoMessage() {} +func (*GetKeyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_key_service_16d84045cfb780d5, []int{0} +} +func (m *GetKeyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetKeyRequest.Unmarshal(m, b) +} +func (m *GetKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetKeyRequest.Marshal(b, m, deterministic) +} +func (dst *GetKeyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetKeyRequest.Merge(dst, src) +} +func (m *GetKeyRequest) XXX_Size() int { + return xxx_messageInfo_GetKeyRequest.Size(m) +} +func (m *GetKeyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetKeyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetKeyRequest proto.InternalMessageInfo + +func (m *GetKeyRequest) GetKeyId() string { + if m != nil { + return m.KeyId + } + return "" +} + +func (m *GetKeyRequest) GetFormat() KeyFormat { + if m != nil { + return m.Format + } + return KeyFormat_PEM_FILE +} + +type ListKeysRequest struct { + // Output format of the key. + Format KeyFormat `protobuf:"varint,1,opt,name=format,proto3,enum=yandex.cloud.iam.v1.KeyFormat" json:"format,omitempty"` + // ID of the service account to list key pairs for. + // To get the service account ID, use a [yandex.cloud.iam.v1.ServiceAccountService.List] request. + // If not specified, it defaults to the subject that made the request. + ServiceAccountId string `protobuf:"bytes,2,opt,name=service_account_id,json=serviceAccountId,proto3" json:"service_account_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListKeysResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + // Default value: 100. + PageSize int64 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListKeysResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListKeysRequest) Reset() { *m = ListKeysRequest{} } +func (m *ListKeysRequest) String() string { return proto.CompactTextString(m) } +func (*ListKeysRequest) ProtoMessage() {} +func (*ListKeysRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_key_service_16d84045cfb780d5, []int{1} +} +func (m *ListKeysRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListKeysRequest.Unmarshal(m, b) +} +func (m *ListKeysRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListKeysRequest.Marshal(b, m, deterministic) +} +func (dst *ListKeysRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListKeysRequest.Merge(dst, src) +} +func (m *ListKeysRequest) XXX_Size() int { + return xxx_messageInfo_ListKeysRequest.Size(m) +} +func (m *ListKeysRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListKeysRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListKeysRequest proto.InternalMessageInfo + +func (m *ListKeysRequest) GetFormat() KeyFormat { + if m != nil { + return m.Format + } + return KeyFormat_PEM_FILE +} + +func (m *ListKeysRequest) GetServiceAccountId() string { + if m != nil { + return m.ServiceAccountId + } + return "" +} + +func (m *ListKeysRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListKeysRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListKeysResponse struct { + // List of Key resources. + Keys []*Key `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListKeysRequest.page_size], use + // the [next_page_token] as the value + // for the [ListKeysRequest.page_token] query parameter + // in the next list request. Each subsequent list request will have its own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListKeysResponse) Reset() { *m = ListKeysResponse{} } +func (m *ListKeysResponse) String() string { return proto.CompactTextString(m) } +func (*ListKeysResponse) ProtoMessage() {} +func (*ListKeysResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_key_service_16d84045cfb780d5, []int{2} +} +func (m *ListKeysResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListKeysResponse.Unmarshal(m, b) +} +func (m *ListKeysResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListKeysResponse.Marshal(b, m, deterministic) +} +func (dst *ListKeysResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListKeysResponse.Merge(dst, src) +} +func (m *ListKeysResponse) XXX_Size() int { + return xxx_messageInfo_ListKeysResponse.Size(m) +} +func (m *ListKeysResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListKeysResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListKeysResponse proto.InternalMessageInfo + +func (m *ListKeysResponse) GetKeys() []*Key { + if m != nil { + return m.Keys + } + return nil +} + +func (m *ListKeysResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateKeyRequest struct { + // ID of the service account to create a key pair for. + // To get the service account ID, use a [yandex.cloud.iam.v1.ServiceAccountService.List] request. + // If not specified, it defaults to the subject that made the request. + ServiceAccountId string `protobuf:"bytes,1,opt,name=service_account_id,json=serviceAccountId,proto3" json:"service_account_id,omitempty"` + // Description of the key pair. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // Output format of the key. + Format KeyFormat `protobuf:"varint,3,opt,name=format,proto3,enum=yandex.cloud.iam.v1.KeyFormat" json:"format,omitempty"` + // An algorithm used to generate a key pair of the Key resource. + KeyAlgorithm Key_Algorithm `protobuf:"varint,4,opt,name=key_algorithm,json=keyAlgorithm,proto3,enum=yandex.cloud.iam.v1.Key_Algorithm" json:"key_algorithm,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateKeyRequest) Reset() { *m = CreateKeyRequest{} } +func (m *CreateKeyRequest) String() string { return proto.CompactTextString(m) } +func (*CreateKeyRequest) ProtoMessage() {} +func (*CreateKeyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_key_service_16d84045cfb780d5, []int{3} +} +func (m *CreateKeyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateKeyRequest.Unmarshal(m, b) +} +func (m *CreateKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateKeyRequest.Marshal(b, m, deterministic) +} +func (dst *CreateKeyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateKeyRequest.Merge(dst, src) +} +func (m *CreateKeyRequest) XXX_Size() int { + return xxx_messageInfo_CreateKeyRequest.Size(m) +} +func (m *CreateKeyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateKeyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateKeyRequest proto.InternalMessageInfo + +func (m *CreateKeyRequest) GetServiceAccountId() string { + if m != nil { + return m.ServiceAccountId + } + return "" +} + +func (m *CreateKeyRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *CreateKeyRequest) GetFormat() KeyFormat { + if m != nil { + return m.Format + } + return KeyFormat_PEM_FILE +} + +func (m *CreateKeyRequest) GetKeyAlgorithm() Key_Algorithm { + if m != nil { + return m.KeyAlgorithm + } + return Key_ALGORITHM_UNSPECIFIED +} + +type CreateKeyResponse struct { + // Key resource. + Key *Key `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // A private key of the Key resource. + // This key must be stored securely. + PrivateKey string `protobuf:"bytes,2,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateKeyResponse) Reset() { *m = CreateKeyResponse{} } +func (m *CreateKeyResponse) String() string { return proto.CompactTextString(m) } +func (*CreateKeyResponse) ProtoMessage() {} +func (*CreateKeyResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_key_service_16d84045cfb780d5, []int{4} +} +func (m *CreateKeyResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateKeyResponse.Unmarshal(m, b) +} +func (m *CreateKeyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateKeyResponse.Marshal(b, m, deterministic) +} +func (dst *CreateKeyResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateKeyResponse.Merge(dst, src) +} +func (m *CreateKeyResponse) XXX_Size() int { + return xxx_messageInfo_CreateKeyResponse.Size(m) +} +func (m *CreateKeyResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateKeyResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateKeyResponse proto.InternalMessageInfo + +func (m *CreateKeyResponse) GetKey() *Key { + if m != nil { + return m.Key + } + return nil +} + +func (m *CreateKeyResponse) GetPrivateKey() string { + if m != nil { + return m.PrivateKey + } + return "" +} + +type DeleteKeyRequest struct { + // ID of the key to delete. + // To get key ID use a [KeyService.List] request. + KeyId string `protobuf:"bytes,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteKeyRequest) Reset() { *m = DeleteKeyRequest{} } +func (m *DeleteKeyRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteKeyRequest) ProtoMessage() {} +func (*DeleteKeyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_key_service_16d84045cfb780d5, []int{5} +} +func (m *DeleteKeyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteKeyRequest.Unmarshal(m, b) +} +func (m *DeleteKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteKeyRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteKeyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteKeyRequest.Merge(dst, src) +} +func (m *DeleteKeyRequest) XXX_Size() int { + return xxx_messageInfo_DeleteKeyRequest.Size(m) +} +func (m *DeleteKeyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteKeyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteKeyRequest proto.InternalMessageInfo + +func (m *DeleteKeyRequest) GetKeyId() string { + if m != nil { + return m.KeyId + } + return "" +} + +func init() { + proto.RegisterType((*GetKeyRequest)(nil), "yandex.cloud.iam.v1.GetKeyRequest") + proto.RegisterType((*ListKeysRequest)(nil), "yandex.cloud.iam.v1.ListKeysRequest") + proto.RegisterType((*ListKeysResponse)(nil), "yandex.cloud.iam.v1.ListKeysResponse") + proto.RegisterType((*CreateKeyRequest)(nil), "yandex.cloud.iam.v1.CreateKeyRequest") + proto.RegisterType((*CreateKeyResponse)(nil), "yandex.cloud.iam.v1.CreateKeyResponse") + proto.RegisterType((*DeleteKeyRequest)(nil), "yandex.cloud.iam.v1.DeleteKeyRequest") + proto.RegisterEnum("yandex.cloud.iam.v1.KeyFormat", KeyFormat_name, KeyFormat_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// KeyServiceClient is the client API for KeyService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyServiceClient interface { + // Returns the specified Key resource. + // + // To get the list of available Key resources, make a [List] request. + Get(ctx context.Context, in *GetKeyRequest, opts ...grpc.CallOption) (*Key, error) + // Retrieves the list of Key resources for the specified service account. + List(ctx context.Context, in *ListKeysRequest, opts ...grpc.CallOption) (*ListKeysResponse, error) + // Creates a key pair for the specified service account. + Create(ctx context.Context, in *CreateKeyRequest, opts ...grpc.CallOption) (*CreateKeyResponse, error) + // Deletes the specified key pair. + Delete(ctx context.Context, in *DeleteKeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) +} + +type keyServiceClient struct { + cc *grpc.ClientConn +} + +func NewKeyServiceClient(cc *grpc.ClientConn) KeyServiceClient { + return &keyServiceClient{cc} +} + +func (c *keyServiceClient) Get(ctx context.Context, in *GetKeyRequest, opts ...grpc.CallOption) (*Key, error) { + out := new(Key) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.KeyService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyServiceClient) List(ctx context.Context, in *ListKeysRequest, opts ...grpc.CallOption) (*ListKeysResponse, error) { + out := new(ListKeysResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.KeyService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyServiceClient) Create(ctx context.Context, in *CreateKeyRequest, opts ...grpc.CallOption) (*CreateKeyResponse, error) { + out := new(CreateKeyResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.KeyService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyServiceClient) Delete(ctx context.Context, in *DeleteKeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.KeyService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyServiceServer is the server API for KeyService service. +type KeyServiceServer interface { + // Returns the specified Key resource. + // + // To get the list of available Key resources, make a [List] request. + Get(context.Context, *GetKeyRequest) (*Key, error) + // Retrieves the list of Key resources for the specified service account. + List(context.Context, *ListKeysRequest) (*ListKeysResponse, error) + // Creates a key pair for the specified service account. + Create(context.Context, *CreateKeyRequest) (*CreateKeyResponse, error) + // Deletes the specified key pair. + Delete(context.Context, *DeleteKeyRequest) (*empty.Empty, error) +} + +func RegisterKeyServiceServer(s *grpc.Server, srv KeyServiceServer) { + s.RegisterService(&_KeyService_serviceDesc, srv) +} + +func _KeyService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.KeyService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyServiceServer).Get(ctx, req.(*GetKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListKeysRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.KeyService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyServiceServer).List(ctx, req.(*ListKeysRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.KeyService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyServiceServer).Create(ctx, req.(*CreateKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.KeyService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyServiceServer).Delete(ctx, req.(*DeleteKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.iam.v1.KeyService", + HandlerType: (*KeyServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _KeyService_Get_Handler, + }, + { + MethodName: "List", + Handler: _KeyService_List_Handler, + }, + { + MethodName: "Create", + Handler: _KeyService_Create_Handler, + }, + { + MethodName: "Delete", + Handler: _KeyService_Delete_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/iam/v1/key_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/iam/v1/key_service.proto", fileDescriptor_key_service_16d84045cfb780d5) +} + +var fileDescriptor_key_service_16d84045cfb780d5 = []byte{ + // 685 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x41, 0x53, 0xd3, 0x5c, + 0x14, 0xfd, 0x42, 0x4b, 0x87, 0x5e, 0x0a, 0x94, 0xf7, 0xa9, 0xd4, 0x2a, 0xda, 0x89, 0x82, 0x9d, + 0x2a, 0x49, 0x5b, 0x07, 0x9c, 0x11, 0x58, 0x80, 0x02, 0xc3, 0x14, 0x67, 0x98, 0xe0, 0xca, 0x4d, + 0x7d, 0x6d, 0x2f, 0xe5, 0x4d, 0x9b, 0xbc, 0x98, 0xbc, 0x76, 0x08, 0x8e, 0x1b, 0x97, 0x6c, 0xfd, + 0x2d, 0xfe, 0x06, 0xd8, 0xeb, 0x4f, 0x70, 0xe1, 0x4f, 0x70, 0x5c, 0x39, 0x79, 0x49, 0x4b, 0x60, + 0x52, 0x91, 0x65, 0x72, 0xcf, 0x3b, 0xe7, 0x9e, 0x73, 0xdf, 0xbb, 0xb0, 0xe0, 0x51, 0xab, 0x85, + 0xc7, 0x7a, 0xb3, 0xcb, 0x7b, 0x2d, 0x9d, 0x51, 0x53, 0xef, 0x57, 0xf4, 0x0e, 0x7a, 0x75, 0x17, + 0x9d, 0x3e, 0x6b, 0xa2, 0x66, 0x3b, 0x5c, 0x70, 0xf2, 0x7f, 0x00, 0xd3, 0x24, 0x4c, 0x63, 0xd4, + 0xd4, 0xfa, 0x95, 0xfc, 0xfd, 0x36, 0xe7, 0xed, 0x2e, 0xea, 0xd4, 0x66, 0x3a, 0xb5, 0x2c, 0x2e, + 0xa8, 0x60, 0xdc, 0x72, 0x83, 0x23, 0xf9, 0x7b, 0x61, 0x55, 0x7e, 0x35, 0x7a, 0x87, 0x3a, 0x9a, + 0xb6, 0xf0, 0xc2, 0xe2, 0xfc, 0x08, 0xd9, 0xd8, 0x72, 0x9f, 0x76, 0x59, 0x4b, 0x72, 0x07, 0x65, + 0xb5, 0x0b, 0x53, 0x3b, 0x28, 0x6a, 0xe8, 0x19, 0xf8, 0xa1, 0x87, 0xae, 0x20, 0x8f, 0x20, 0xe5, + 0xf7, 0xcc, 0x5a, 0x39, 0xa5, 0xa0, 0x14, 0xd3, 0x9b, 0x99, 0x9f, 0x67, 0x15, 0xe5, 0xf4, 0xbc, + 0x92, 0x5c, 0x5b, 0x5f, 0x2e, 0x1b, 0xe3, 0x1d, 0xf4, 0x76, 0x5b, 0x64, 0x05, 0x52, 0x87, 0xdc, + 0x31, 0xa9, 0xc8, 0x8d, 0x15, 0x94, 0xe2, 0x74, 0xf5, 0x81, 0x16, 0x63, 0x4a, 0xab, 0xa1, 0xb7, + 0x2d, 0x51, 0x46, 0x88, 0x56, 0xbf, 0x2b, 0x30, 0xb3, 0xc7, 0x5c, 0x5f, 0xcf, 0x1d, 0x08, 0x5e, + 0x70, 0x29, 0x37, 0xe1, 0x22, 0x2b, 0x40, 0xc2, 0x60, 0xeb, 0xb4, 0xd9, 0xe4, 0x3d, 0x4b, 0xf8, + 0x4d, 0x8f, 0xc9, 0xa6, 0x27, 0x86, 0x0d, 0x67, 0x43, 0xcc, 0x46, 0x00, 0xd9, 0x6d, 0x91, 0x27, + 0x90, 0xb6, 0x69, 0x1b, 0xeb, 0x2e, 0x3b, 0xc1, 0x5c, 0xa2, 0xa0, 0x14, 0x13, 0x9b, 0xf0, 0xfb, + 0xac, 0x92, 0x5a, 0x5b, 0xaf, 0x94, 0xcb, 0x65, 0x63, 0xc2, 0x2f, 0x1e, 0xb0, 0x13, 0x24, 0x45, + 0x00, 0x09, 0x14, 0xbc, 0x83, 0x56, 0x2e, 0x29, 0x89, 0xd3, 0xa7, 0xe7, 0x95, 0x71, 0x89, 0x34, + 0x24, 0xcb, 0x5b, 0xbf, 0xa6, 0x1e, 0x41, 0xf6, 0xc2, 0x95, 0x6b, 0x73, 0xcb, 0x45, 0xf2, 0x0c, + 0x92, 0x1d, 0xf4, 0xdc, 0x9c, 0x52, 0x48, 0x14, 0x27, 0xab, 0xb9, 0x51, 0xa6, 0x0c, 0x89, 0x22, + 0x8b, 0x30, 0x63, 0xe1, 0xb1, 0xa8, 0x47, 0x04, 0xa5, 0x13, 0x63, 0xca, 0xff, 0xbd, 0x3f, 0x54, + 0xfa, 0xa5, 0x40, 0xf6, 0x95, 0x83, 0x54, 0x60, 0x64, 0x64, 0xf1, 0x49, 0x28, 0xd7, 0x26, 0xf1, + 0x14, 0x26, 0x5b, 0xe8, 0x36, 0x1d, 0x66, 0xfb, 0x17, 0x22, 0x8c, 0x2e, 0x74, 0x58, 0x5d, 0x5e, + 0x31, 0xa2, 0xd5, 0xc8, 0x98, 0x12, 0x37, 0x1a, 0xd3, 0x0e, 0x4c, 0xf9, 0xf7, 0x89, 0x76, 0xdb, + 0xdc, 0x61, 0xe2, 0xc8, 0x94, 0x41, 0x4e, 0x57, 0xd5, 0x51, 0xc7, 0xb5, 0x8d, 0x01, 0xd2, 0xc8, + 0x74, 0xd0, 0x1b, 0x7e, 0xa9, 0xef, 0x61, 0x36, 0xe2, 0x3c, 0x4c, 0xb9, 0x04, 0x89, 0x0e, 0x7a, + 0xd2, 0xeb, 0xdf, 0x42, 0xf6, 0x41, 0xe4, 0x21, 0x4c, 0xda, 0x0e, 0xeb, 0x53, 0x81, 0x75, 0xff, + 0x4c, 0x90, 0x2f, 0x84, 0xbf, 0x6a, 0xe8, 0xa9, 0x2f, 0x20, 0xfb, 0x1a, 0xbb, 0x78, 0x29, 0xdb, + 0x7f, 0x79, 0x0e, 0xa5, 0xbb, 0x90, 0x1e, 0x1a, 0x27, 0x19, 0x98, 0xd8, 0xdf, 0x7a, 0x53, 0xdf, + 0xde, 0xdd, 0xdb, 0xca, 0xfe, 0x57, 0xfd, 0x9a, 0x00, 0xa8, 0xa1, 0x77, 0x10, 0x64, 0x4f, 0x1a, + 0x90, 0xd8, 0x41, 0x41, 0xe2, 0xdd, 0x5f, 0x7a, 0x88, 0xf9, 0x91, 0x6e, 0xd4, 0xf9, 0xcf, 0xdf, + 0x7e, 0x7c, 0x19, 0x9b, 0x23, 0xb7, 0x23, 0xaf, 0xdd, 0xd5, 0x3f, 0x06, 0x7d, 0x7e, 0x22, 0x0c, + 0x92, 0xfe, 0x6d, 0x24, 0x8f, 0x63, 0x09, 0xae, 0x3c, 0xbf, 0xfc, 0xc2, 0x35, 0xa8, 0x20, 0x68, + 0xf5, 0x96, 0xd4, 0x9c, 0x26, 0x99, 0xa8, 0x26, 0xb1, 0x21, 0x15, 0xcc, 0x84, 0xc4, 0xd3, 0x5c, + 0xbd, 0xaa, 0xf9, 0xc5, 0xeb, 0x60, 0xa1, 0xdc, 0x9c, 0x94, 0x9b, 0x55, 0x2f, 0xc9, 0xbd, 0x54, + 0x4a, 0xe4, 0x10, 0x52, 0xc1, 0x8c, 0x46, 0x28, 0x5e, 0x1d, 0x60, 0xfe, 0x8e, 0x16, 0x2c, 0x4f, + 0x6d, 0xb0, 0x3c, 0xb5, 0x2d, 0x7f, 0x79, 0x0e, 0x42, 0x2c, 0xc5, 0x87, 0xb8, 0xb9, 0xfe, 0x6e, + 0xb5, 0xcd, 0xc4, 0x51, 0xaf, 0xa1, 0x35, 0xb9, 0xa9, 0x07, 0x4a, 0x4b, 0xc1, 0x0e, 0x6d, 0xf3, + 0xa5, 0x36, 0x5a, 0x92, 0x4e, 0x8f, 0xd9, 0xbd, 0xab, 0x8c, 0x9a, 0x8d, 0x94, 0x2c, 0x3f, 0xff, + 0x13, 0x00, 0x00, 0xff, 0xff, 0xca, 0x77, 0xed, 0xa5, 0x14, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/role.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/role.pb.go new file mode 100644 index 000000000..7d1f65b2d --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/role.pb.go @@ -0,0 +1,90 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/iam/v1/role.proto + +package iam // import "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Role resource. For more information, see [Roles](/docs/iam/concepts/access-control/roles). +type Role struct { + // ID of the role. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Description of the role. 0-256 characters long. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Role) Reset() { *m = Role{} } +func (m *Role) String() string { return proto.CompactTextString(m) } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_role_656bcd2fea39e7f2, []int{0} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Role.Unmarshal(m, b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Role.Marshal(b, m, deterministic) +} +func (dst *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(dst, src) +} +func (m *Role) XXX_Size() int { + return xxx_messageInfo_Role.Size(m) +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} + +var xxx_messageInfo_Role proto.InternalMessageInfo + +func (m *Role) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Role) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func init() { + proto.RegisterType((*Role)(nil), "yandex.cloud.iam.v1.Role") +} + +func init() { + proto.RegisterFile("yandex/cloud/iam/v1/role.proto", fileDescriptor_role_656bcd2fea39e7f2) +} + +var fileDescriptor_role_656bcd2fea39e7f2 = []byte{ + // 155 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xab, 0x4c, 0xcc, 0x4b, + 0x49, 0xad, 0xd0, 0x4f, 0xce, 0xc9, 0x2f, 0x4d, 0xd1, 0xcf, 0x4c, 0xcc, 0xd5, 0x2f, 0x33, 0xd4, + 0x2f, 0xca, 0xcf, 0x49, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x86, 0xc8, 0xeb, 0x81, + 0xe5, 0xf5, 0x32, 0x13, 0x73, 0xf5, 0xca, 0x0c, 0x95, 0x2c, 0xb8, 0x58, 0x82, 0xf2, 0x73, 0x52, + 0x85, 0xf8, 0xb8, 0x98, 0x32, 0x53, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x98, 0x32, 0x53, + 0x84, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0x24, + 0x98, 0xc0, 0x12, 0xc8, 0x42, 0x4e, 0xb6, 0x51, 0xd6, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, + 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0xb3, 0x75, 0x21, 0x76, 0xa7, 0xe7, 0xeb, 0xa6, 0xa7, 0xe6, 0x81, + 0x6d, 0xd5, 0xc7, 0xe2, 0x28, 0xeb, 0xcc, 0xc4, 0xdc, 0x24, 0x36, 0xb0, 0xb4, 0x31, 0x20, 0x00, + 0x00, 0xff, 0xff, 0x48, 0x72, 0x28, 0xb9, 0xb6, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/role_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/role_service.pb.go new file mode 100644 index 000000000..51b4a8c8f --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/role_service.pb.go @@ -0,0 +1,336 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/iam/v1/role_service.proto + +package iam // import "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetRoleRequest struct { + // ID of the Role resource to return. + // To get the role ID, use a [RoleService.List] request. + RoleId string `protobuf:"bytes,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRoleRequest) Reset() { *m = GetRoleRequest{} } +func (m *GetRoleRequest) String() string { return proto.CompactTextString(m) } +func (*GetRoleRequest) ProtoMessage() {} +func (*GetRoleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_role_service_e49934510c2989c5, []int{0} +} +func (m *GetRoleRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRoleRequest.Unmarshal(m, b) +} +func (m *GetRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRoleRequest.Marshal(b, m, deterministic) +} +func (dst *GetRoleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRoleRequest.Merge(dst, src) +} +func (m *GetRoleRequest) XXX_Size() int { + return xxx_messageInfo_GetRoleRequest.Size(m) +} +func (m *GetRoleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRoleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRoleRequest proto.InternalMessageInfo + +func (m *GetRoleRequest) GetRoleId() string { + if m != nil { + return m.RoleId + } + return "" +} + +type ListRolesRequest struct { + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListRolesResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + // Default value: 100. + PageSize int64 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] + // to the [ListRolesResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters resources listed in the response. + Filter string `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListRolesRequest) Reset() { *m = ListRolesRequest{} } +func (m *ListRolesRequest) String() string { return proto.CompactTextString(m) } +func (*ListRolesRequest) ProtoMessage() {} +func (*ListRolesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_role_service_e49934510c2989c5, []int{1} +} +func (m *ListRolesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListRolesRequest.Unmarshal(m, b) +} +func (m *ListRolesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListRolesRequest.Marshal(b, m, deterministic) +} +func (dst *ListRolesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListRolesRequest.Merge(dst, src) +} +func (m *ListRolesRequest) XXX_Size() int { + return xxx_messageInfo_ListRolesRequest.Size(m) +} +func (m *ListRolesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListRolesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListRolesRequest proto.InternalMessageInfo + +func (m *ListRolesRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListRolesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListRolesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListRolesResponse struct { + // List of Role resources. + Roles []*Role `protobuf:"bytes,1,rep,name=roles,proto3" json:"roles,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListRolesRequest.page_size], use + // the [next_page_token] as the value + // for the [ListRolesRequest.page_token] query parameter + // in the next list request. Each subsequent list request will have its own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListRolesResponse) Reset() { *m = ListRolesResponse{} } +func (m *ListRolesResponse) String() string { return proto.CompactTextString(m) } +func (*ListRolesResponse) ProtoMessage() {} +func (*ListRolesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_role_service_e49934510c2989c5, []int{2} +} +func (m *ListRolesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListRolesResponse.Unmarshal(m, b) +} +func (m *ListRolesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListRolesResponse.Marshal(b, m, deterministic) +} +func (dst *ListRolesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListRolesResponse.Merge(dst, src) +} +func (m *ListRolesResponse) XXX_Size() int { + return xxx_messageInfo_ListRolesResponse.Size(m) +} +func (m *ListRolesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListRolesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListRolesResponse proto.InternalMessageInfo + +func (m *ListRolesResponse) GetRoles() []*Role { + if m != nil { + return m.Roles + } + return nil +} + +func (m *ListRolesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetRoleRequest)(nil), "yandex.cloud.iam.v1.GetRoleRequest") + proto.RegisterType((*ListRolesRequest)(nil), "yandex.cloud.iam.v1.ListRolesRequest") + proto.RegisterType((*ListRolesResponse)(nil), "yandex.cloud.iam.v1.ListRolesResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// RoleServiceClient is the client API for RoleService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type RoleServiceClient interface { + // Returns the specified Role resource. + // + // To get the list of available Role resources, use a [List] request. + Get(ctx context.Context, in *GetRoleRequest, opts ...grpc.CallOption) (*Role, error) + // Retrieves the list of Role resources. + List(ctx context.Context, in *ListRolesRequest, opts ...grpc.CallOption) (*ListRolesResponse, error) +} + +type roleServiceClient struct { + cc *grpc.ClientConn +} + +func NewRoleServiceClient(cc *grpc.ClientConn) RoleServiceClient { + return &roleServiceClient{cc} +} + +func (c *roleServiceClient) Get(ctx context.Context, in *GetRoleRequest, opts ...grpc.CallOption) (*Role, error) { + out := new(Role) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.RoleService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *roleServiceClient) List(ctx context.Context, in *ListRolesRequest, opts ...grpc.CallOption) (*ListRolesResponse, error) { + out := new(ListRolesResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.RoleService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RoleServiceServer is the server API for RoleService service. +type RoleServiceServer interface { + // Returns the specified Role resource. + // + // To get the list of available Role resources, use a [List] request. + Get(context.Context, *GetRoleRequest) (*Role, error) + // Retrieves the list of Role resources. + List(context.Context, *ListRolesRequest) (*ListRolesResponse, error) +} + +func RegisterRoleServiceServer(s *grpc.Server, srv RoleServiceServer) { + s.RegisterService(&_RoleService_serviceDesc, srv) +} + +func _RoleService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RoleServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.RoleService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RoleServiceServer).Get(ctx, req.(*GetRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RoleService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListRolesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RoleServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.RoleService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RoleServiceServer).List(ctx, req.(*ListRolesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _RoleService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.iam.v1.RoleService", + HandlerType: (*RoleServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _RoleService_Get_Handler, + }, + { + MethodName: "List", + Handler: _RoleService_List_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/iam/v1/role_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/iam/v1/role_service.proto", fileDescriptor_role_service_e49934510c2989c5) +} + +var fileDescriptor_role_service_e49934510c2989c5 = []byte{ + // 431 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x52, 0x3f, 0x6f, 0xd3, 0x40, + 0x14, 0x97, 0x9b, 0x36, 0x90, 0x57, 0x4a, 0xe1, 0x10, 0xc2, 0xb5, 0xf8, 0x53, 0x19, 0x35, 0x64, + 0xa9, 0xcf, 0x2e, 0x42, 0x0c, 0x6d, 0x96, 0x2c, 0x15, 0x12, 0x03, 0x72, 0x99, 0x58, 0xa2, 0x6b, + 0xfc, 0x6a, 0x4e, 0x9c, 0xef, 0x8c, 0xef, 0x62, 0x95, 0x22, 0x16, 0x36, 0xb2, 0xf2, 0xa1, 0x9a, + 0x9d, 0x8f, 0x00, 0x03, 0x9f, 0x81, 0x09, 0xf9, 0x2e, 0x45, 0x4d, 0xe5, 0x8a, 0xf5, 0x7e, 0x7f, + 0xef, 0xbd, 0x07, 0xfd, 0x4f, 0x4c, 0x66, 0x78, 0x4a, 0x27, 0x42, 0x4d, 0x33, 0xca, 0x59, 0x41, + 0xeb, 0x84, 0x56, 0x4a, 0xe0, 0x58, 0x63, 0x55, 0xf3, 0x09, 0x46, 0x65, 0xa5, 0x8c, 0x22, 0xf7, + 0x1c, 0x2f, 0xb2, 0xbc, 0x88, 0xb3, 0x22, 0xaa, 0x93, 0xe0, 0x61, 0xae, 0x54, 0x2e, 0x90, 0xb2, + 0x92, 0x53, 0x26, 0xa5, 0x32, 0xcc, 0x70, 0x25, 0xb5, 0x93, 0x04, 0x8f, 0x96, 0xac, 0x6b, 0x26, + 0x78, 0x66, 0xf1, 0x05, 0xfc, 0xf8, 0xba, 0x64, 0x87, 0x87, 0x2f, 0xe1, 0xf6, 0x21, 0x9a, 0x54, + 0x09, 0x4c, 0xf1, 0xe3, 0x14, 0xb5, 0x21, 0x3b, 0x70, 0xc3, 0x36, 0xe3, 0x99, 0xef, 0x6d, 0x7b, + 0x83, 0xde, 0xe8, 0xd6, 0xef, 0xf3, 0xc4, 0x9b, 0xcd, 0x93, 0xd5, 0x83, 0xe1, 0x8b, 0x38, 0xed, + 0x36, 0xe0, 0xab, 0x2c, 0xfc, 0xe6, 0xc1, 0x9d, 0xd7, 0x5c, 0x5b, 0xa9, 0xbe, 0xd0, 0x3e, 0x83, + 0x5e, 0xc9, 0x72, 0x1c, 0x6b, 0x7e, 0x86, 0x56, 0xdd, 0x19, 0xc1, 0x9f, 0xf3, 0xa4, 0x7b, 0x30, + 0x4c, 0xe2, 0x38, 0x4e, 0x6f, 0x36, 0xe0, 0x11, 0x3f, 0x43, 0x32, 0x00, 0xb0, 0x44, 0xa3, 0x3e, + 0xa0, 0xf4, 0x57, 0x6c, 0x4e, 0x6f, 0x36, 0x4f, 0xd6, 0x2c, 0x33, 0xb5, 0x2e, 0x6f, 0x1b, 0x8c, + 0x84, 0xd0, 0x3d, 0xe1, 0xc2, 0x60, 0xe5, 0x77, 0x2c, 0x0b, 0x66, 0xf3, 0x7f, 0x7e, 0x0b, 0x24, + 0x14, 0x70, 0xf7, 0x52, 0x15, 0x5d, 0x2a, 0xa9, 0x91, 0x50, 0x58, 0x6b, 0xaa, 0x6a, 0xdf, 0xdb, + 0xee, 0x0c, 0xd6, 0xf7, 0xb6, 0xa2, 0x96, 0xd9, 0x46, 0xf6, 0xe3, 0x8e, 0x47, 0xfa, 0xb0, 0x29, + 0xf1, 0xd4, 0x8c, 0xaf, 0x16, 0x4b, 0x37, 0x9a, 0xe7, 0x37, 0x17, 0x8d, 0xf6, 0x7e, 0x7a, 0xb0, + 0xde, 0xe8, 0x8e, 0xdc, 0xea, 0xc8, 0x09, 0x74, 0x0e, 0xd1, 0x90, 0xa7, 0xad, 0x01, 0xcb, 0xc3, + 0x0d, 0xae, 0x6f, 0x11, 0x3e, 0xf9, 0xfa, 0xe3, 0xd7, 0xf7, 0x95, 0x2d, 0xf2, 0xe0, 0xf2, 0x96, + 0x34, 0xfd, 0xbc, 0x58, 0xc6, 0x17, 0x22, 0x60, 0xb5, 0xf9, 0x25, 0xd9, 0x69, 0xf5, 0xb8, 0xba, + 0x8b, 0xa0, 0xff, 0x3f, 0x9a, 0x9b, 0x53, 0x78, 0xdf, 0xe6, 0x6e, 0x92, 0x8d, 0xa5, 0xdc, 0xd1, + 0xf0, 0xdd, 0x7e, 0xce, 0xcd, 0xfb, 0xe9, 0x71, 0x34, 0x51, 0x05, 0x75, 0x56, 0xbb, 0xee, 0x8a, + 0x72, 0xb5, 0x9b, 0xa3, 0xb4, 0xf7, 0x43, 0x5b, 0xce, 0x6b, 0x9f, 0xb3, 0xe2, 0xb8, 0x6b, 0xe1, + 0xe7, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x85, 0xbc, 0xc9, 0x90, 0xfa, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/service_account.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/service_account.pb.go new file mode 100644 index 000000000..d71c9e987 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/service_account.pb.go @@ -0,0 +1,127 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/iam/v1/service_account.proto + +package iam // import "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A ServiceAccount resource. For more information, see [Service accounts](/docs/iam/concepts/users/service-accounts). +type ServiceAccount struct { + // ID of the service account. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the service account belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Name of the service account. + // The name is unique within the cloud. 3-63 characters long. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Description of the service account. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } +func (m *ServiceAccount) String() string { return proto.CompactTextString(m) } +func (*ServiceAccount) ProtoMessage() {} +func (*ServiceAccount) Descriptor() ([]byte, []int) { + return fileDescriptor_service_account_ff4a14d624b4e70e, []int{0} +} +func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceAccount.Unmarshal(m, b) +} +func (m *ServiceAccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceAccount.Marshal(b, m, deterministic) +} +func (dst *ServiceAccount) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccount.Merge(dst, src) +} +func (m *ServiceAccount) XXX_Size() int { + return xxx_messageInfo_ServiceAccount.Size(m) +} +func (m *ServiceAccount) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccount.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo + +func (m *ServiceAccount) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ServiceAccount) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ServiceAccount) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *ServiceAccount) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ServiceAccount) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func init() { + proto.RegisterType((*ServiceAccount)(nil), "yandex.cloud.iam.v1.ServiceAccount") +} + +func init() { + proto.RegisterFile("yandex/cloud/iam/v1/service_account.proto", fileDescriptor_service_account_ff4a14d624b4e70e) +} + +var fileDescriptor_service_account_ff4a14d624b4e70e = []byte{ + // 269 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4b, 0xc3, 0x30, + 0x18, 0xc5, 0x69, 0x9d, 0x62, 0x33, 0xd8, 0x21, 0x5e, 0x4a, 0x45, 0x2c, 0x9e, 0xe6, 0x61, 0x09, + 0xd3, 0x93, 0x0c, 0x0f, 0xf3, 0xe6, 0x75, 0x7a, 0xf2, 0x52, 0xbe, 0x26, 0xdf, 0xea, 0x07, 0x4d, + 0x53, 0xd2, 0xb4, 0xe8, 0x3f, 0xe5, 0xdf, 0x28, 0x26, 0x0e, 0x14, 0x76, 0x0b, 0xef, 0xbd, 0xe4, + 0xfd, 0x5e, 0xd8, 0xed, 0x27, 0x74, 0x1a, 0x3f, 0xa4, 0x6a, 0xed, 0xa8, 0x25, 0x81, 0x91, 0xd3, + 0x5a, 0x0e, 0xe8, 0x26, 0x52, 0x58, 0x81, 0x52, 0x76, 0xec, 0xbc, 0xe8, 0x9d, 0xf5, 0x96, 0x5f, + 0xc4, 0xa8, 0x08, 0x51, 0x41, 0x60, 0xc4, 0xb4, 0x2e, 0xae, 0x1b, 0x6b, 0x9b, 0x16, 0x65, 0x88, + 0xd4, 0xe3, 0x5e, 0x7a, 0x32, 0x38, 0x78, 0x30, 0x7d, 0xbc, 0x55, 0x5c, 0xfd, 0x2b, 0x98, 0xa0, + 0x25, 0x0d, 0x9e, 0x6c, 0x17, 0xed, 0x9b, 0xaf, 0x84, 0x2d, 0x5e, 0x62, 0xdd, 0x36, 0xb6, 0xf1, + 0x05, 0x4b, 0x49, 0xe7, 0x49, 0x99, 0x2c, 0xb3, 0x5d, 0x4a, 0x9a, 0x5f, 0xb2, 0x6c, 0x6f, 0x5b, + 0x8d, 0xae, 0x22, 0x9d, 0xa7, 0x41, 0x3e, 0x8f, 0xc2, 0xb3, 0xe6, 0x0f, 0x8c, 0x29, 0x87, 0xe0, + 0x51, 0x57, 0xe0, 0xf3, 0x93, 0x32, 0x59, 0xce, 0xef, 0x0a, 0x11, 0xa1, 0xc4, 0x01, 0x4a, 0xbc, + 0x1e, 0xa0, 0x76, 0xd9, 0x6f, 0x7a, 0xeb, 0x39, 0x67, 0xb3, 0x0e, 0x0c, 0xe6, 0xb3, 0xf0, 0x64, + 0x38, 0xf3, 0x92, 0xcd, 0x35, 0x0e, 0xca, 0x51, 0xff, 0xc3, 0x98, 0x9f, 0x06, 0xeb, 0xaf, 0xf4, + 0xf4, 0xf8, 0xb6, 0x69, 0xc8, 0xbf, 0x8f, 0xb5, 0x50, 0xd6, 0xc8, 0x38, 0x6e, 0x15, 0xc7, 0x35, + 0x76, 0xd5, 0x60, 0x17, 0x4a, 0xe5, 0x91, 0x6f, 0xdd, 0x10, 0x98, 0xfa, 0x2c, 0xd8, 0xf7, 0xdf, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x65, 0x42, 0x1f, 0xbf, 0x78, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/service_account_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/service_account_service.pb.go new file mode 100644 index 000000000..233a68831 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/service_account_service.pb.go @@ -0,0 +1,1041 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/iam/v1/service_account_service.proto + +package iam // import "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import access "github.com/yandex-cloud/go-genproto/yandex/cloud/access" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetServiceAccountRequest struct { + // ID of the ServiceAccount resource to return. + // To get the service account ID, use a [ServiceAccountService.List] request. + ServiceAccountId string `protobuf:"bytes,1,opt,name=service_account_id,json=serviceAccountId,proto3" json:"service_account_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetServiceAccountRequest) Reset() { *m = GetServiceAccountRequest{} } +func (m *GetServiceAccountRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountRequest) ProtoMessage() {} +func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_account_service_4c6626479d4f1223, []int{0} +} +func (m *GetServiceAccountRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetServiceAccountRequest.Unmarshal(m, b) +} +func (m *GetServiceAccountRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetServiceAccountRequest.Marshal(b, m, deterministic) +} +func (dst *GetServiceAccountRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetServiceAccountRequest.Merge(dst, src) +} +func (m *GetServiceAccountRequest) XXX_Size() int { + return xxx_messageInfo_GetServiceAccountRequest.Size(m) +} +func (m *GetServiceAccountRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetServiceAccountRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetServiceAccountRequest proto.InternalMessageInfo + +func (m *GetServiceAccountRequest) GetServiceAccountId() string { + if m != nil { + return m.ServiceAccountId + } + return "" +} + +type ListServiceAccountsRequest struct { + // ID of the folder to list service accounts in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListServiceAccountsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + // Default value: 100 + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] + // to the [ListServiceAccountsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters resources listed in the response. + // The expression must specify: + // 1. The field name. Currently you can use filtering only on the [ServiceAccount.name] field. + // 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + // 3. The value. Must be 3-63 characters long and match the regular expression `^[a-z][-a-z0-9]{1,61}[a-z0-9]$`. + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListServiceAccountsRequest) Reset() { *m = ListServiceAccountsRequest{} } +func (m *ListServiceAccountsRequest) String() string { return proto.CompactTextString(m) } +func (*ListServiceAccountsRequest) ProtoMessage() {} +func (*ListServiceAccountsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_account_service_4c6626479d4f1223, []int{1} +} +func (m *ListServiceAccountsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListServiceAccountsRequest.Unmarshal(m, b) +} +func (m *ListServiceAccountsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListServiceAccountsRequest.Marshal(b, m, deterministic) +} +func (dst *ListServiceAccountsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListServiceAccountsRequest.Merge(dst, src) +} +func (m *ListServiceAccountsRequest) XXX_Size() int { + return xxx_messageInfo_ListServiceAccountsRequest.Size(m) +} +func (m *ListServiceAccountsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListServiceAccountsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListServiceAccountsRequest proto.InternalMessageInfo + +func (m *ListServiceAccountsRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListServiceAccountsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListServiceAccountsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListServiceAccountsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListServiceAccountsResponse struct { + // List of ServiceAccount resources. + ServiceAccounts []*ServiceAccount `protobuf:"bytes,1,rep,name=service_accounts,json=serviceAccounts,proto3" json:"service_accounts,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListServiceAccountsRequest.page_size], use + // the [next_page_token] as the value + // for the [ListServiceAccountsRequest.page_token] query parameter + // in the next list request. Each subsequent list request will have its own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListServiceAccountsResponse) Reset() { *m = ListServiceAccountsResponse{} } +func (m *ListServiceAccountsResponse) String() string { return proto.CompactTextString(m) } +func (*ListServiceAccountsResponse) ProtoMessage() {} +func (*ListServiceAccountsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_service_account_service_4c6626479d4f1223, []int{2} +} +func (m *ListServiceAccountsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListServiceAccountsResponse.Unmarshal(m, b) +} +func (m *ListServiceAccountsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListServiceAccountsResponse.Marshal(b, m, deterministic) +} +func (dst *ListServiceAccountsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListServiceAccountsResponse.Merge(dst, src) +} +func (m *ListServiceAccountsResponse) XXX_Size() int { + return xxx_messageInfo_ListServiceAccountsResponse.Size(m) +} +func (m *ListServiceAccountsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListServiceAccountsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListServiceAccountsResponse proto.InternalMessageInfo + +func (m *ListServiceAccountsResponse) GetServiceAccounts() []*ServiceAccount { + if m != nil { + return m.ServiceAccounts + } + return nil +} + +func (m *ListServiceAccountsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateServiceAccountRequest struct { + // ID of the folder to create a service account in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Name of the service account. + // The name must be unique within the cloud. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the service account. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateServiceAccountRequest) Reset() { *m = CreateServiceAccountRequest{} } +func (m *CreateServiceAccountRequest) String() string { return proto.CompactTextString(m) } +func (*CreateServiceAccountRequest) ProtoMessage() {} +func (*CreateServiceAccountRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_account_service_4c6626479d4f1223, []int{3} +} +func (m *CreateServiceAccountRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateServiceAccountRequest.Unmarshal(m, b) +} +func (m *CreateServiceAccountRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateServiceAccountRequest.Marshal(b, m, deterministic) +} +func (dst *CreateServiceAccountRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateServiceAccountRequest.Merge(dst, src) +} +func (m *CreateServiceAccountRequest) XXX_Size() int { + return xxx_messageInfo_CreateServiceAccountRequest.Size(m) +} +func (m *CreateServiceAccountRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateServiceAccountRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateServiceAccountRequest proto.InternalMessageInfo + +func (m *CreateServiceAccountRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *CreateServiceAccountRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateServiceAccountRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +type CreateServiceAccountMetadata struct { + // ID of the service account that is being created. + ServiceAccountId string `protobuf:"bytes,1,opt,name=service_account_id,json=serviceAccountId,proto3" json:"service_account_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateServiceAccountMetadata) Reset() { *m = CreateServiceAccountMetadata{} } +func (m *CreateServiceAccountMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateServiceAccountMetadata) ProtoMessage() {} +func (*CreateServiceAccountMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_service_account_service_4c6626479d4f1223, []int{4} +} +func (m *CreateServiceAccountMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateServiceAccountMetadata.Unmarshal(m, b) +} +func (m *CreateServiceAccountMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateServiceAccountMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateServiceAccountMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateServiceAccountMetadata.Merge(dst, src) +} +func (m *CreateServiceAccountMetadata) XXX_Size() int { + return xxx_messageInfo_CreateServiceAccountMetadata.Size(m) +} +func (m *CreateServiceAccountMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateServiceAccountMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateServiceAccountMetadata proto.InternalMessageInfo + +func (m *CreateServiceAccountMetadata) GetServiceAccountId() string { + if m != nil { + return m.ServiceAccountId + } + return "" +} + +type UpdateServiceAccountRequest struct { + // ID of the ServiceAccount resource to update. + // To get the service account ID, use a [ServiceAccountService.List] request. + ServiceAccountId string `protobuf:"bytes,1,opt,name=service_account_id,json=serviceAccountId,proto3" json:"service_account_id,omitempty"` + // Field mask that specifies which fields of the ServiceAccount resource are going to be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Name of the service account. + // The name must be unique within the cloud. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Description of the service account. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateServiceAccountRequest) Reset() { *m = UpdateServiceAccountRequest{} } +func (m *UpdateServiceAccountRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateServiceAccountRequest) ProtoMessage() {} +func (*UpdateServiceAccountRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_account_service_4c6626479d4f1223, []int{5} +} +func (m *UpdateServiceAccountRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateServiceAccountRequest.Unmarshal(m, b) +} +func (m *UpdateServiceAccountRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateServiceAccountRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateServiceAccountRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateServiceAccountRequest.Merge(dst, src) +} +func (m *UpdateServiceAccountRequest) XXX_Size() int { + return xxx_messageInfo_UpdateServiceAccountRequest.Size(m) +} +func (m *UpdateServiceAccountRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateServiceAccountRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateServiceAccountRequest proto.InternalMessageInfo + +func (m *UpdateServiceAccountRequest) GetServiceAccountId() string { + if m != nil { + return m.ServiceAccountId + } + return "" +} + +func (m *UpdateServiceAccountRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateServiceAccountRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateServiceAccountRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +type UpdateServiceAccountMetadata struct { + // ID of the ServiceAccount resource that is being updated. + ServiceAccountId string `protobuf:"bytes,1,opt,name=service_account_id,json=serviceAccountId,proto3" json:"service_account_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateServiceAccountMetadata) Reset() { *m = UpdateServiceAccountMetadata{} } +func (m *UpdateServiceAccountMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateServiceAccountMetadata) ProtoMessage() {} +func (*UpdateServiceAccountMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_service_account_service_4c6626479d4f1223, []int{6} +} +func (m *UpdateServiceAccountMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateServiceAccountMetadata.Unmarshal(m, b) +} +func (m *UpdateServiceAccountMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateServiceAccountMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateServiceAccountMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateServiceAccountMetadata.Merge(dst, src) +} +func (m *UpdateServiceAccountMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateServiceAccountMetadata.Size(m) +} +func (m *UpdateServiceAccountMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateServiceAccountMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateServiceAccountMetadata proto.InternalMessageInfo + +func (m *UpdateServiceAccountMetadata) GetServiceAccountId() string { + if m != nil { + return m.ServiceAccountId + } + return "" +} + +type DeleteServiceAccountRequest struct { + // ID of the service account to delete. + // To get the service account ID, use a [ServiceAccountService.List] request. + ServiceAccountId string `protobuf:"bytes,1,opt,name=service_account_id,json=serviceAccountId,proto3" json:"service_account_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteServiceAccountRequest) Reset() { *m = DeleteServiceAccountRequest{} } +func (m *DeleteServiceAccountRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteServiceAccountRequest) ProtoMessage() {} +func (*DeleteServiceAccountRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_account_service_4c6626479d4f1223, []int{7} +} +func (m *DeleteServiceAccountRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteServiceAccountRequest.Unmarshal(m, b) +} +func (m *DeleteServiceAccountRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteServiceAccountRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteServiceAccountRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteServiceAccountRequest.Merge(dst, src) +} +func (m *DeleteServiceAccountRequest) XXX_Size() int { + return xxx_messageInfo_DeleteServiceAccountRequest.Size(m) +} +func (m *DeleteServiceAccountRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteServiceAccountRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteServiceAccountRequest proto.InternalMessageInfo + +func (m *DeleteServiceAccountRequest) GetServiceAccountId() string { + if m != nil { + return m.ServiceAccountId + } + return "" +} + +type DeleteServiceAccountMetadata struct { + // ID of the service account that is being deleted. + ServiceAccountId string `protobuf:"bytes,1,opt,name=service_account_id,json=serviceAccountId,proto3" json:"service_account_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteServiceAccountMetadata) Reset() { *m = DeleteServiceAccountMetadata{} } +func (m *DeleteServiceAccountMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteServiceAccountMetadata) ProtoMessage() {} +func (*DeleteServiceAccountMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_service_account_service_4c6626479d4f1223, []int{8} +} +func (m *DeleteServiceAccountMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteServiceAccountMetadata.Unmarshal(m, b) +} +func (m *DeleteServiceAccountMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteServiceAccountMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteServiceAccountMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteServiceAccountMetadata.Merge(dst, src) +} +func (m *DeleteServiceAccountMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteServiceAccountMetadata.Size(m) +} +func (m *DeleteServiceAccountMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteServiceAccountMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteServiceAccountMetadata proto.InternalMessageInfo + +func (m *DeleteServiceAccountMetadata) GetServiceAccountId() string { + if m != nil { + return m.ServiceAccountId + } + return "" +} + +type ListServiceAccountOperationsRequest struct { + // ID of the ServiceAccount resource to list operations for. + ServiceAccountId string `protobuf:"bytes,1,opt,name=service_account_id,json=serviceAccountId,proto3" json:"service_account_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListServiceAccountOperationsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + // Default value: 100. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] + // to the [ListServiceAccountOperationsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListServiceAccountOperationsRequest) Reset() { *m = ListServiceAccountOperationsRequest{} } +func (m *ListServiceAccountOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListServiceAccountOperationsRequest) ProtoMessage() {} +func (*ListServiceAccountOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_service_account_service_4c6626479d4f1223, []int{9} +} +func (m *ListServiceAccountOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListServiceAccountOperationsRequest.Unmarshal(m, b) +} +func (m *ListServiceAccountOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListServiceAccountOperationsRequest.Marshal(b, m, deterministic) +} +func (dst *ListServiceAccountOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListServiceAccountOperationsRequest.Merge(dst, src) +} +func (m *ListServiceAccountOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListServiceAccountOperationsRequest.Size(m) +} +func (m *ListServiceAccountOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListServiceAccountOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListServiceAccountOperationsRequest proto.InternalMessageInfo + +func (m *ListServiceAccountOperationsRequest) GetServiceAccountId() string { + if m != nil { + return m.ServiceAccountId + } + return "" +} + +func (m *ListServiceAccountOperationsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListServiceAccountOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListServiceAccountOperationsResponse struct { + // List of operations for the specified service account. + Operations []*operation.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListServiceAccountOperationsRequest.page_size], use the [next_page_token] as the value + // for the [ListServiceAccountOperationsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListServiceAccountOperationsResponse) Reset() { *m = ListServiceAccountOperationsResponse{} } +func (m *ListServiceAccountOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListServiceAccountOperationsResponse) ProtoMessage() {} +func (*ListServiceAccountOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_service_account_service_4c6626479d4f1223, []int{10} +} +func (m *ListServiceAccountOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListServiceAccountOperationsResponse.Unmarshal(m, b) +} +func (m *ListServiceAccountOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListServiceAccountOperationsResponse.Marshal(b, m, deterministic) +} +func (dst *ListServiceAccountOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListServiceAccountOperationsResponse.Merge(dst, src) +} +func (m *ListServiceAccountOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListServiceAccountOperationsResponse.Size(m) +} +func (m *ListServiceAccountOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListServiceAccountOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListServiceAccountOperationsResponse proto.InternalMessageInfo + +func (m *ListServiceAccountOperationsResponse) GetOperations() []*operation.Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListServiceAccountOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetServiceAccountRequest)(nil), "yandex.cloud.iam.v1.GetServiceAccountRequest") + proto.RegisterType((*ListServiceAccountsRequest)(nil), "yandex.cloud.iam.v1.ListServiceAccountsRequest") + proto.RegisterType((*ListServiceAccountsResponse)(nil), "yandex.cloud.iam.v1.ListServiceAccountsResponse") + proto.RegisterType((*CreateServiceAccountRequest)(nil), "yandex.cloud.iam.v1.CreateServiceAccountRequest") + proto.RegisterType((*CreateServiceAccountMetadata)(nil), "yandex.cloud.iam.v1.CreateServiceAccountMetadata") + proto.RegisterType((*UpdateServiceAccountRequest)(nil), "yandex.cloud.iam.v1.UpdateServiceAccountRequest") + proto.RegisterType((*UpdateServiceAccountMetadata)(nil), "yandex.cloud.iam.v1.UpdateServiceAccountMetadata") + proto.RegisterType((*DeleteServiceAccountRequest)(nil), "yandex.cloud.iam.v1.DeleteServiceAccountRequest") + proto.RegisterType((*DeleteServiceAccountMetadata)(nil), "yandex.cloud.iam.v1.DeleteServiceAccountMetadata") + proto.RegisterType((*ListServiceAccountOperationsRequest)(nil), "yandex.cloud.iam.v1.ListServiceAccountOperationsRequest") + proto.RegisterType((*ListServiceAccountOperationsResponse)(nil), "yandex.cloud.iam.v1.ListServiceAccountOperationsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ServiceAccountServiceClient is the client API for ServiceAccountService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ServiceAccountServiceClient interface { + // Returns the specified ServiceAccount resource. + // + // To get the list of available ServiceAccount resources, use a [List] request. + Get(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) + // Retrieves the list of ServiceAccount resources in the specified folder. + List(ctx context.Context, in *ListServiceAccountsRequest, opts ...grpc.CallOption) (*ListServiceAccountsResponse, error) + // Creates a service account in the specified folder. + Create(ctx context.Context, in *CreateServiceAccountRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified service account. + Update(ctx context.Context, in *UpdateServiceAccountRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified service account. + Delete(ctx context.Context, in *DeleteServiceAccountRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Lists access bindings for the specified service account. + ListAccessBindings(ctx context.Context, in *access.ListAccessBindingsRequest, opts ...grpc.CallOption) (*access.ListAccessBindingsResponse, error) + // Sets access bindings for the service account. + SetAccessBindings(ctx context.Context, in *access.SetAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates access bindings for the specified service account. + UpdateAccessBindings(ctx context.Context, in *access.UpdateAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Lists operations for the specified service account. + ListOperations(ctx context.Context, in *ListServiceAccountOperationsRequest, opts ...grpc.CallOption) (*ListServiceAccountOperationsResponse, error) +} + +type serviceAccountServiceClient struct { + cc *grpc.ClientConn +} + +func NewServiceAccountServiceClient(cc *grpc.ClientConn) ServiceAccountServiceClient { + return &serviceAccountServiceClient{cc} +} + +func (c *serviceAccountServiceClient) Get(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) { + out := new(ServiceAccount) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.ServiceAccountService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceAccountServiceClient) List(ctx context.Context, in *ListServiceAccountsRequest, opts ...grpc.CallOption) (*ListServiceAccountsResponse, error) { + out := new(ListServiceAccountsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.ServiceAccountService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceAccountServiceClient) Create(ctx context.Context, in *CreateServiceAccountRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.ServiceAccountService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceAccountServiceClient) Update(ctx context.Context, in *UpdateServiceAccountRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.ServiceAccountService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceAccountServiceClient) Delete(ctx context.Context, in *DeleteServiceAccountRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.ServiceAccountService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceAccountServiceClient) ListAccessBindings(ctx context.Context, in *access.ListAccessBindingsRequest, opts ...grpc.CallOption) (*access.ListAccessBindingsResponse, error) { + out := new(access.ListAccessBindingsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.ServiceAccountService/ListAccessBindings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceAccountServiceClient) SetAccessBindings(ctx context.Context, in *access.SetAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.ServiceAccountService/SetAccessBindings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceAccountServiceClient) UpdateAccessBindings(ctx context.Context, in *access.UpdateAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.ServiceAccountService/UpdateAccessBindings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceAccountServiceClient) ListOperations(ctx context.Context, in *ListServiceAccountOperationsRequest, opts ...grpc.CallOption) (*ListServiceAccountOperationsResponse, error) { + out := new(ListServiceAccountOperationsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.ServiceAccountService/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ServiceAccountServiceServer is the server API for ServiceAccountService service. +type ServiceAccountServiceServer interface { + // Returns the specified ServiceAccount resource. + // + // To get the list of available ServiceAccount resources, use a [List] request. + Get(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) + // Retrieves the list of ServiceAccount resources in the specified folder. + List(context.Context, *ListServiceAccountsRequest) (*ListServiceAccountsResponse, error) + // Creates a service account in the specified folder. + Create(context.Context, *CreateServiceAccountRequest) (*operation.Operation, error) + // Updates the specified service account. + Update(context.Context, *UpdateServiceAccountRequest) (*operation.Operation, error) + // Deletes the specified service account. + Delete(context.Context, *DeleteServiceAccountRequest) (*operation.Operation, error) + // Lists access bindings for the specified service account. + ListAccessBindings(context.Context, *access.ListAccessBindingsRequest) (*access.ListAccessBindingsResponse, error) + // Sets access bindings for the service account. + SetAccessBindings(context.Context, *access.SetAccessBindingsRequest) (*operation.Operation, error) + // Updates access bindings for the specified service account. + UpdateAccessBindings(context.Context, *access.UpdateAccessBindingsRequest) (*operation.Operation, error) + // Lists operations for the specified service account. + ListOperations(context.Context, *ListServiceAccountOperationsRequest) (*ListServiceAccountOperationsResponse, error) +} + +func RegisterServiceAccountServiceServer(s *grpc.Server, srv ServiceAccountServiceServer) { + s.RegisterService(&_ServiceAccountService_serviceDesc, srv) +} + +func _ServiceAccountService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceAccountServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.ServiceAccountService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceAccountServiceServer).Get(ctx, req.(*GetServiceAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceAccountService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServiceAccountsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceAccountServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.ServiceAccountService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceAccountServiceServer).List(ctx, req.(*ListServiceAccountsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceAccountService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceAccountServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.ServiceAccountService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceAccountServiceServer).Create(ctx, req.(*CreateServiceAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceAccountService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateServiceAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceAccountServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.ServiceAccountService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceAccountServiceServer).Update(ctx, req.(*UpdateServiceAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceAccountService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteServiceAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceAccountServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.ServiceAccountService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceAccountServiceServer).Delete(ctx, req.(*DeleteServiceAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceAccountService_ListAccessBindings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(access.ListAccessBindingsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceAccountServiceServer).ListAccessBindings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.ServiceAccountService/ListAccessBindings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceAccountServiceServer).ListAccessBindings(ctx, req.(*access.ListAccessBindingsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceAccountService_SetAccessBindings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(access.SetAccessBindingsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceAccountServiceServer).SetAccessBindings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.ServiceAccountService/SetAccessBindings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceAccountServiceServer).SetAccessBindings(ctx, req.(*access.SetAccessBindingsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceAccountService_UpdateAccessBindings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(access.UpdateAccessBindingsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceAccountServiceServer).UpdateAccessBindings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.ServiceAccountService/UpdateAccessBindings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceAccountServiceServer).UpdateAccessBindings(ctx, req.(*access.UpdateAccessBindingsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceAccountService_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServiceAccountOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceAccountServiceServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.ServiceAccountService/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceAccountServiceServer).ListOperations(ctx, req.(*ListServiceAccountOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ServiceAccountService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.iam.v1.ServiceAccountService", + HandlerType: (*ServiceAccountServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _ServiceAccountService_Get_Handler, + }, + { + MethodName: "List", + Handler: _ServiceAccountService_List_Handler, + }, + { + MethodName: "Create", + Handler: _ServiceAccountService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _ServiceAccountService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _ServiceAccountService_Delete_Handler, + }, + { + MethodName: "ListAccessBindings", + Handler: _ServiceAccountService_ListAccessBindings_Handler, + }, + { + MethodName: "SetAccessBindings", + Handler: _ServiceAccountService_SetAccessBindings_Handler, + }, + { + MethodName: "UpdateAccessBindings", + Handler: _ServiceAccountService_UpdateAccessBindings_Handler, + }, + { + MethodName: "ListOperations", + Handler: _ServiceAccountService_ListOperations_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/iam/v1/service_account_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/iam/v1/service_account_service.proto", fileDescriptor_service_account_service_4c6626479d4f1223) +} + +var fileDescriptor_service_account_service_4c6626479d4f1223 = []byte{ + // 1016 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x5d, 0x6f, 0xdb, 0x54, + 0x18, 0xd6, 0x59, 0x43, 0xb4, 0xbc, 0x65, 0x1f, 0x1c, 0x98, 0x08, 0x6e, 0x27, 0x82, 0x0b, 0xa3, + 0x0b, 0x8d, 0x1d, 0x07, 0xba, 0x2d, 0xfd, 0xd0, 0x68, 0xf8, 0x98, 0x26, 0x6d, 0x80, 0xdc, 0x81, + 0x04, 0xd5, 0x14, 0x9d, 0xda, 0xa7, 0xe1, 0xa8, 0x89, 0x1d, 0x7c, 0x9c, 0x6a, 0xeb, 0x98, 0x90, + 0x90, 0xb8, 0xe9, 0x0d, 0x42, 0x88, 0xff, 0x81, 0x0a, 0x12, 0x77, 0x5c, 0xd2, 0x5e, 0x70, 0x55, + 0xfe, 0x02, 0x42, 0xbb, 0x86, 0x3b, 0x2e, 0x10, 0xf2, 0x39, 0x4e, 0x5a, 0x3b, 0x76, 0xea, 0x74, + 0xdb, 0x55, 0x6b, 0xbf, 0x1f, 0xe7, 0x79, 0x9e, 0xf3, 0x9e, 0xe7, 0xc4, 0x60, 0xdc, 0x27, 0x8e, + 0x4d, 0xef, 0xe9, 0x56, 0xdb, 0xed, 0xd9, 0x3a, 0x23, 0x1d, 0x7d, 0xcb, 0xd0, 0x39, 0xf5, 0xb6, + 0x98, 0x45, 0x9b, 0xc4, 0xb2, 0xdc, 0x9e, 0xe3, 0x37, 0xc3, 0x67, 0xad, 0xeb, 0xb9, 0xbe, 0x8b, + 0x9f, 0x97, 0x25, 0x9a, 0x28, 0xd1, 0x18, 0xe9, 0x68, 0x5b, 0x86, 0x32, 0xdd, 0x72, 0xdd, 0x56, + 0x9b, 0xea, 0xa4, 0xcb, 0x74, 0xe2, 0x38, 0xae, 0x4f, 0x7c, 0xe6, 0x3a, 0x5c, 0x96, 0x28, 0xa5, + 0x30, 0x2a, 0x9e, 0xd6, 0x7b, 0x1b, 0xfa, 0x06, 0xa3, 0x6d, 0xbb, 0xd9, 0x21, 0x7c, 0x33, 0xcc, + 0x50, 0x42, 0x1c, 0x41, 0xbd, 0xdb, 0xa5, 0x9e, 0x28, 0x0f, 0x63, 0x97, 0x33, 0x60, 0xec, 0x2f, + 0x14, 0x49, 0x25, 0x96, 0x45, 0x39, 0x0f, 0xff, 0x84, 0x19, 0x97, 0x22, 0x19, 0x83, 0xa5, 0x86, + 0x16, 0xbd, 0x18, 0xc9, 0xdb, 0x22, 0x6d, 0x66, 0x1f, 0x09, 0xab, 0x9f, 0x40, 0xf1, 0x06, 0xf5, + 0x57, 0x25, 0x88, 0x15, 0x89, 0xc1, 0xa4, 0x5f, 0xf4, 0x28, 0xf7, 0xf1, 0x02, 0xe0, 0xb8, 0x82, + 0xcc, 0x2e, 0xa2, 0x12, 0x9a, 0x2d, 0x34, 0x9e, 0x7d, 0xb4, 0x67, 0xa0, 0x9d, 0x7d, 0x23, 0xb7, + 0xb4, 0x3c, 0x5f, 0x35, 0xcf, 0xf3, 0x48, 0x83, 0x9b, 0xb6, 0xfa, 0x2b, 0x02, 0xe5, 0x16, 0xe3, + 0xb1, 0xce, 0xbc, 0xdf, 0xfa, 0x32, 0x14, 0x36, 0xdc, 0xb6, 0x4d, 0xbd, 0xb4, 0x8e, 0xa7, 0x65, + 0xf8, 0xa6, 0x8d, 0x5f, 0x87, 0x42, 0x97, 0xb4, 0x68, 0x93, 0xb3, 0x6d, 0x5a, 0x3c, 0x55, 0x42, + 0xb3, 0x13, 0x0d, 0xf8, 0x77, 0xcf, 0xc8, 0x2f, 0x2d, 0x1b, 0xd5, 0x6a, 0xd5, 0x3c, 0x1d, 0x04, + 0x57, 0xd9, 0x36, 0xc5, 0xb3, 0x00, 0x22, 0xd1, 0x77, 0x37, 0xa9, 0x53, 0x9c, 0x10, 0x4d, 0x0b, + 0x3b, 0xfb, 0xc6, 0x33, 0x22, 0xd3, 0x14, 0x5d, 0xee, 0x04, 0x31, 0xac, 0x42, 0x7e, 0x83, 0xb5, + 0x7d, 0xea, 0x15, 0x73, 0x22, 0x0b, 0x76, 0xf6, 0x07, 0xfd, 0xc2, 0x88, 0xfa, 0x03, 0x82, 0xa9, + 0x44, 0x02, 0xbc, 0xeb, 0x3a, 0x9c, 0xe2, 0x0f, 0xe0, 0x7c, 0x4c, 0x1c, 0x5e, 0x44, 0xa5, 0x89, + 0xd9, 0xc9, 0xda, 0x8c, 0x96, 0x30, 0x58, 0x5a, 0x4c, 0xe2, 0x73, 0x51, 0xc5, 0x38, 0xbe, 0x04, + 0xe7, 0x1c, 0x7a, 0xcf, 0x6f, 0x1e, 0xa1, 0x10, 0x90, 0x2d, 0x98, 0x67, 0x82, 0xd7, 0x1f, 0xf5, + 0xb1, 0xab, 0x3f, 0x22, 0x98, 0x7a, 0xc7, 0xa3, 0xc4, 0xa7, 0xc9, 0x9b, 0x36, 0x86, 0xb2, 0x75, + 0xc8, 0x39, 0xa4, 0x23, 0x45, 0x2d, 0x34, 0x5e, 0x0b, 0xb2, 0xfe, 0xde, 0x33, 0x2e, 0x7e, 0xb9, + 0x46, 0x2a, 0xdb, 0x77, 0xd7, 0x2a, 0xa4, 0xb2, 0x5d, 0xad, 0xd4, 0xef, 0x3e, 0x30, 0xe6, 0xae, + 0x18, 0x0f, 0xd7, 0xc2, 0x27, 0x53, 0x94, 0xe0, 0x37, 0x60, 0xd2, 0xa6, 0xdc, 0xf2, 0x58, 0x37, + 0x98, 0xa5, 0xa8, 0xd8, 0xb5, 0xf9, 0x2b, 0xe6, 0xd1, 0xa8, 0x7a, 0x0b, 0xa6, 0x93, 0x10, 0xdf, + 0xa6, 0x3e, 0xb1, 0x89, 0x4f, 0xf0, 0x5c, 0xfa, 0x9c, 0x25, 0x4c, 0xd6, 0x7f, 0x08, 0xa6, 0x3e, + 0xee, 0xda, 0xa9, 0x02, 0x3c, 0xc6, 0xd4, 0xe2, 0x45, 0x98, 0xec, 0x89, 0xd6, 0xe2, 0x48, 0x0b, + 0x61, 0x26, 0x6b, 0x8a, 0x26, 0x4f, 0xbd, 0xd6, 0x3f, 0xf5, 0xda, 0xfb, 0xc1, 0xa9, 0xbf, 0x4d, + 0xf8, 0xa6, 0x09, 0x32, 0x3d, 0xf8, 0x7f, 0x20, 0xe7, 0xc4, 0x63, 0xcb, 0x99, 0x3b, 0x4e, 0xce, + 0x24, 0xfe, 0x27, 0x94, 0xf3, 0x53, 0x98, 0x7a, 0x97, 0xb6, 0xe9, 0x53, 0x50, 0x33, 0x00, 0x9a, + 0xd4, 0xfa, 0x84, 0x40, 0x7f, 0x46, 0x30, 0x33, 0x7c, 0x20, 0x3f, 0xec, 0xdb, 0x1d, 0x7f, 0x12, + 0xfb, 0xff, 0xe4, 0xbd, 0x46, 0xfd, 0x0e, 0xc1, 0xab, 0xa3, 0x61, 0x87, 0x86, 0xb2, 0x02, 0x30, + 0xf0, 0xee, 0xbe, 0x95, 0xbc, 0x12, 0xb5, 0x92, 0x43, 0x6f, 0x1f, 0xd4, 0x9b, 0x47, 0x8a, 0xb2, + 0x7a, 0x48, 0xed, 0xd1, 0x19, 0xb8, 0x10, 0xc5, 0x13, 0x3e, 0xe1, 0x6f, 0x11, 0x4c, 0xdc, 0xa0, + 0x3e, 0xae, 0x24, 0x7a, 0x58, 0xda, 0x4d, 0xa1, 0x64, 0xb1, 0x3c, 0xf5, 0xad, 0xaf, 0xff, 0xf8, + 0xf3, 0xfb, 0x53, 0x1a, 0x9e, 0x8b, 0x5d, 0x7d, 0x7d, 0x0b, 0xd4, 0x1f, 0x0c, 0xef, 0xdb, 0x43, + 0xbc, 0x83, 0x20, 0x17, 0xe8, 0x87, 0xf5, 0xc4, 0x35, 0xd2, 0xef, 0x18, 0xa5, 0x9a, 0xbd, 0x40, + 0x6e, 0x81, 0xfa, 0xb2, 0x40, 0xf8, 0x12, 0x7e, 0x31, 0x05, 0x21, 0xfe, 0x09, 0x41, 0x5e, 0x5a, + 0x19, 0x4e, 0xee, 0x3e, 0xc2, 0x99, 0x95, 0xe3, 0x37, 0x53, 0xbd, 0xb3, 0x7b, 0x50, 0xd6, 0x8e, + 0x71, 0xcb, 0xb3, 0xd1, 0xf7, 0x02, 0xf2, 0xb4, 0x9a, 0x06, 0x79, 0x01, 0x95, 0xf1, 0x3e, 0x82, + 0xbc, 0x74, 0x8c, 0x14, 0xd4, 0x23, 0xec, 0x34, 0x0b, 0x6a, 0x26, 0x51, 0x8f, 0x34, 0xa5, 0x24, + 0xd4, 0x46, 0x6d, 0xac, 0x51, 0x08, 0xa8, 0xfc, 0x8e, 0x20, 0x2f, 0x3d, 0x25, 0x85, 0xca, 0x08, + 0x2f, 0xcb, 0x42, 0xa5, 0xb3, 0x7b, 0x50, 0x9e, 0x3f, 0xc6, 0xb6, 0x2e, 0xc4, 0xaf, 0x83, 0xf7, + 0x3a, 0x5d, 0xff, 0xbe, 0x1c, 0xee, 0xf2, 0x78, 0xc3, 0xfd, 0x0b, 0x02, 0x1c, 0x0c, 0xe4, 0x8a, + 0xf8, 0x65, 0xd7, 0x60, 0x8e, 0xcd, 0x9c, 0x16, 0xc7, 0x5a, 0x14, 0x68, 0xf8, 0xbb, 0x6f, 0x38, + 0xb1, 0x4f, 0x4c, 0xcf, 0x9c, 0x1f, 0x0e, 0xfa, 0xdb, 0x02, 0xed, 0x02, 0xbe, 0x96, 0x8a, 0xd6, + 0xa3, 0xdc, 0xed, 0x79, 0x16, 0x15, 0xc2, 0xb7, 0x87, 0x21, 0xfe, 0x85, 0xe0, 0xb9, 0x55, 0x1a, + 0x7f, 0x5b, 0x49, 0x04, 0x32, 0x94, 0x37, 0xc6, 0x86, 0x7c, 0xb5, 0x7b, 0x50, 0xae, 0x43, 0x29, + 0xad, 0x53, 0x96, 0x4d, 0x59, 0x52, 0xaf, 0x66, 0xa3, 0xc9, 0xe3, 0xdd, 0x83, 0x89, 0xfb, 0x07, + 0xc1, 0x0b, 0x72, 0xb2, 0x63, 0x5c, 0xab, 0x89, 0x5c, 0x93, 0x52, 0xc7, 0xa0, 0xfb, 0x0d, 0xda, + 0x3d, 0x28, 0x2f, 0xc1, 0xcc, 0x88, 0x6e, 0x59, 0x28, 0x5f, 0x57, 0x17, 0xb2, 0x51, 0xee, 0x25, + 0x2c, 0x10, 0xb0, 0xfe, 0x0d, 0xc1, 0xd9, 0x60, 0x7e, 0x0e, 0xef, 0x29, 0x7c, 0x2d, 0xa3, 0x9d, + 0x0e, 0xdd, 0xc8, 0x4a, 0xfd, 0x04, 0x95, 0xe1, 0xa0, 0x5e, 0x17, 0x74, 0xea, 0xf8, 0xea, 0x38, + 0xc7, 0xea, 0xf0, 0x1b, 0x88, 0x37, 0x96, 0x3f, 0x5b, 0x6c, 0x31, 0xff, 0xf3, 0xde, 0xba, 0x66, + 0xb9, 0x1d, 0x5d, 0xe2, 0xa8, 0xc8, 0x6f, 0xa1, 0x96, 0x5b, 0x69, 0x51, 0x47, 0xa8, 0xa7, 0x27, + 0x7c, 0x99, 0x2d, 0x32, 0xd2, 0x59, 0xcf, 0x8b, 0xf0, 0x9b, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, + 0x12, 0x75, 0xb8, 0x07, 0x5f, 0x0e, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/user_account.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/user_account.pb.go new file mode 100644 index 000000000..bc27832d2 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/user_account.pb.go @@ -0,0 +1,221 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/iam/v1/user_account.proto + +package iam // import "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Currently represents only [Yandex.Passport account](/docs/iam/concepts/#passport). +type UserAccount struct { + // ID of the user account. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Types that are valid to be assigned to UserAccount: + // *UserAccount_YandexPassportUserAccount + UserAccount isUserAccount_UserAccount `protobuf_oneof:"user_account"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserAccount) Reset() { *m = UserAccount{} } +func (m *UserAccount) String() string { return proto.CompactTextString(m) } +func (*UserAccount) ProtoMessage() {} +func (*UserAccount) Descriptor() ([]byte, []int) { + return fileDescriptor_user_account_ced378befb1b3d2f, []int{0} +} +func (m *UserAccount) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UserAccount.Unmarshal(m, b) +} +func (m *UserAccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UserAccount.Marshal(b, m, deterministic) +} +func (dst *UserAccount) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserAccount.Merge(dst, src) +} +func (m *UserAccount) XXX_Size() int { + return xxx_messageInfo_UserAccount.Size(m) +} +func (m *UserAccount) XXX_DiscardUnknown() { + xxx_messageInfo_UserAccount.DiscardUnknown(m) +} + +var xxx_messageInfo_UserAccount proto.InternalMessageInfo + +func (m *UserAccount) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type isUserAccount_UserAccount interface { + isUserAccount_UserAccount() +} + +type UserAccount_YandexPassportUserAccount struct { + YandexPassportUserAccount *YandexPassportUserAccount `protobuf:"bytes,2,opt,name=yandex_passport_user_account,json=yandexPassportUserAccount,proto3,oneof"` +} + +func (*UserAccount_YandexPassportUserAccount) isUserAccount_UserAccount() {} + +func (m *UserAccount) GetUserAccount() isUserAccount_UserAccount { + if m != nil { + return m.UserAccount + } + return nil +} + +func (m *UserAccount) GetYandexPassportUserAccount() *YandexPassportUserAccount { + if x, ok := m.GetUserAccount().(*UserAccount_YandexPassportUserAccount); ok { + return x.YandexPassportUserAccount + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*UserAccount) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _UserAccount_OneofMarshaler, _UserAccount_OneofUnmarshaler, _UserAccount_OneofSizer, []interface{}{ + (*UserAccount_YandexPassportUserAccount)(nil), + } +} + +func _UserAccount_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*UserAccount) + // user_account + switch x := m.UserAccount.(type) { + case *UserAccount_YandexPassportUserAccount: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.YandexPassportUserAccount); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("UserAccount.UserAccount has unexpected type %T", x) + } + return nil +} + +func _UserAccount_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*UserAccount) + switch tag { + case 2: // user_account.yandex_passport_user_account + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(YandexPassportUserAccount) + err := b.DecodeMessage(msg) + m.UserAccount = &UserAccount_YandexPassportUserAccount{msg} + return true, err + default: + return false, nil + } +} + +func _UserAccount_OneofSizer(msg proto.Message) (n int) { + m := msg.(*UserAccount) + // user_account + switch x := m.UserAccount.(type) { + case *UserAccount_YandexPassportUserAccount: + s := proto.Size(x.YandexPassportUserAccount) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A YandexPassportUserAccount resource. For more information, see [Yandex.Passport account](/docs/iam/concepts/#passport). +type YandexPassportUserAccount struct { + // Login of the Yandex.Passport user account. + Login string `protobuf:"bytes,1,opt,name=login,proto3" json:"login,omitempty"` + // Default email of the Yandex.Passport user account. + DefaultEmail string `protobuf:"bytes,2,opt,name=default_email,json=defaultEmail,proto3" json:"default_email,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *YandexPassportUserAccount) Reset() { *m = YandexPassportUserAccount{} } +func (m *YandexPassportUserAccount) String() string { return proto.CompactTextString(m) } +func (*YandexPassportUserAccount) ProtoMessage() {} +func (*YandexPassportUserAccount) Descriptor() ([]byte, []int) { + return fileDescriptor_user_account_ced378befb1b3d2f, []int{1} +} +func (m *YandexPassportUserAccount) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_YandexPassportUserAccount.Unmarshal(m, b) +} +func (m *YandexPassportUserAccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_YandexPassportUserAccount.Marshal(b, m, deterministic) +} +func (dst *YandexPassportUserAccount) XXX_Merge(src proto.Message) { + xxx_messageInfo_YandexPassportUserAccount.Merge(dst, src) +} +func (m *YandexPassportUserAccount) XXX_Size() int { + return xxx_messageInfo_YandexPassportUserAccount.Size(m) +} +func (m *YandexPassportUserAccount) XXX_DiscardUnknown() { + xxx_messageInfo_YandexPassportUserAccount.DiscardUnknown(m) +} + +var xxx_messageInfo_YandexPassportUserAccount proto.InternalMessageInfo + +func (m *YandexPassportUserAccount) GetLogin() string { + if m != nil { + return m.Login + } + return "" +} + +func (m *YandexPassportUserAccount) GetDefaultEmail() string { + if m != nil { + return m.DefaultEmail + } + return "" +} + +func init() { + proto.RegisterType((*UserAccount)(nil), "yandex.cloud.iam.v1.UserAccount") + proto.RegisterType((*YandexPassportUserAccount)(nil), "yandex.cloud.iam.v1.YandexPassportUserAccount") +} + +func init() { + proto.RegisterFile("yandex/cloud/iam/v1/user_account.proto", fileDescriptor_user_account_ced378befb1b3d2f) +} + +var fileDescriptor_user_account_ced378befb1b3d2f = []byte{ + // 258 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x50, 0xcf, 0x4b, 0xc3, 0x30, + 0x14, 0xb6, 0x05, 0x85, 0x65, 0x73, 0x87, 0xe8, 0x61, 0x13, 0x85, 0x31, 0x41, 0x76, 0x59, 0xc2, + 0xf4, 0x38, 0x3c, 0x38, 0x10, 0x3c, 0x4a, 0x41, 0x41, 0x2f, 0xe5, 0xad, 0x89, 0xf5, 0x41, 0x7e, + 0xd4, 0x36, 0x29, 0xee, 0xbf, 0xf1, 0x4f, 0x15, 0x93, 0x1e, 0x2a, 0x6c, 0xc7, 0xf7, 0xbd, 0xef, + 0x17, 0x1f, 0xb9, 0xd9, 0x81, 0x11, 0xf2, 0x9b, 0x17, 0xca, 0x7a, 0xc1, 0x11, 0x34, 0x6f, 0x57, + 0xdc, 0x37, 0xb2, 0xce, 0xa1, 0x28, 0xac, 0x37, 0x8e, 0x55, 0xb5, 0x75, 0x96, 0x9e, 0x45, 0x1e, + 0x0b, 0x3c, 0x86, 0xa0, 0x59, 0xbb, 0xba, 0xb8, 0xfa, 0x27, 0x6e, 0x41, 0xa1, 0x00, 0x87, 0xd6, + 0x44, 0xcd, 0xfc, 0x27, 0x21, 0xc3, 0x97, 0x46, 0xd6, 0x0f, 0xd1, 0x89, 0x8e, 0x49, 0x8a, 0x62, + 0x92, 0xcc, 0x92, 0xc5, 0x20, 0x4b, 0x51, 0xd0, 0x2f, 0x72, 0x19, 0x0d, 0xf2, 0x0a, 0x9a, 0xa6, + 0xb2, 0xb5, 0xcb, 0xfb, 0xc9, 0x93, 0x74, 0x96, 0x2c, 0x86, 0xb7, 0x8c, 0xed, 0x89, 0x66, 0x6f, + 0x01, 0x7b, 0xee, 0x74, 0xbd, 0x94, 0xa7, 0xa3, 0x6c, 0xba, 0x3b, 0xf4, 0xdc, 0x8c, 0xc9, 0xa8, + 0x1f, 0x31, 0x7f, 0x25, 0xd3, 0x83, 0x4e, 0xf4, 0x9c, 0x1c, 0x2b, 0x5b, 0xa2, 0xe9, 0x2a, 0xc7, + 0x83, 0x5e, 0x93, 0x53, 0x21, 0x3f, 0xc0, 0x2b, 0x97, 0x4b, 0x0d, 0xa8, 0x42, 0xcd, 0x41, 0x36, + 0xea, 0xc0, 0xc7, 0x3f, 0x6c, 0x73, 0xff, 0xbe, 0x2e, 0xd1, 0x7d, 0xfa, 0x2d, 0x2b, 0xac, 0xe6, + 0xb1, 0xcf, 0x32, 0xce, 0x54, 0xda, 0x65, 0x29, 0x4d, 0x58, 0x88, 0xef, 0x19, 0x7f, 0x8d, 0xa0, + 0xb7, 0x27, 0xe1, 0x7d, 0xf7, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xdc, 0xc7, 0x2e, 0x9e, 0x01, + 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/user_account_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/user_account_service.pb.go new file mode 100644 index 000000000..69ecb7ee4 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/user_account_service.pb.go @@ -0,0 +1,169 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/iam/v1/user_account_service.proto + +package iam // import "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetUserAccountRequest struct { + // ID of the UserAccount resource to return. + UserAccountId string `protobuf:"bytes,1,opt,name=user_account_id,json=userAccountId,proto3" json:"user_account_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetUserAccountRequest) Reset() { *m = GetUserAccountRequest{} } +func (m *GetUserAccountRequest) String() string { return proto.CompactTextString(m) } +func (*GetUserAccountRequest) ProtoMessage() {} +func (*GetUserAccountRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_account_service_d671b5c11a2773d8, []int{0} +} +func (m *GetUserAccountRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetUserAccountRequest.Unmarshal(m, b) +} +func (m *GetUserAccountRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetUserAccountRequest.Marshal(b, m, deterministic) +} +func (dst *GetUserAccountRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetUserAccountRequest.Merge(dst, src) +} +func (m *GetUserAccountRequest) XXX_Size() int { + return xxx_messageInfo_GetUserAccountRequest.Size(m) +} +func (m *GetUserAccountRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetUserAccountRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetUserAccountRequest proto.InternalMessageInfo + +func (m *GetUserAccountRequest) GetUserAccountId() string { + if m != nil { + return m.UserAccountId + } + return "" +} + +func init() { + proto.RegisterType((*GetUserAccountRequest)(nil), "yandex.cloud.iam.v1.GetUserAccountRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// UserAccountServiceClient is the client API for UserAccountService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UserAccountServiceClient interface { + // Returns the specified UserAccount resource. + Get(ctx context.Context, in *GetUserAccountRequest, opts ...grpc.CallOption) (*UserAccount, error) +} + +type userAccountServiceClient struct { + cc *grpc.ClientConn +} + +func NewUserAccountServiceClient(cc *grpc.ClientConn) UserAccountServiceClient { + return &userAccountServiceClient{cc} +} + +func (c *userAccountServiceClient) Get(ctx context.Context, in *GetUserAccountRequest, opts ...grpc.CallOption) (*UserAccount, error) { + out := new(UserAccount) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.UserAccountService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UserAccountServiceServer is the server API for UserAccountService service. +type UserAccountServiceServer interface { + // Returns the specified UserAccount resource. + Get(context.Context, *GetUserAccountRequest) (*UserAccount, error) +} + +func RegisterUserAccountServiceServer(s *grpc.Server, srv UserAccountServiceServer) { + s.RegisterService(&_UserAccountService_serviceDesc, srv) +} + +func _UserAccountService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUserAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserAccountServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.UserAccountService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserAccountServiceServer).Get(ctx, req.(*GetUserAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _UserAccountService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.iam.v1.UserAccountService", + HandlerType: (*UserAccountServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _UserAccountService_Get_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/iam/v1/user_account_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/iam/v1/user_account_service.proto", fileDescriptor_user_account_service_d671b5c11a2773d8) +} + +var fileDescriptor_user_account_service_d671b5c11a2773d8 = []byte{ + // 284 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xab, 0x4c, 0xcc, 0x4b, + 0x49, 0xad, 0xd0, 0x4f, 0xce, 0xc9, 0x2f, 0x4d, 0xd1, 0xcf, 0x4c, 0xcc, 0xd5, 0x2f, 0x33, 0xd4, + 0x2f, 0x2d, 0x4e, 0x2d, 0x8a, 0x4f, 0x4c, 0x4e, 0xce, 0x2f, 0xcd, 0x2b, 0x89, 0x2f, 0x4e, 0x2d, + 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x86, 0xa8, 0xd7, 0x03, + 0xab, 0xd7, 0xcb, 0x4c, 0xcc, 0xd5, 0x2b, 0x33, 0x94, 0x92, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, + 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, + 0x86, 0x68, 0x91, 0x52, 0x23, 0x64, 0x05, 0x54, 0x9d, 0x2c, 0x8a, 0xba, 0xb2, 0xc4, 0x9c, 0xcc, + 0x14, 0xb0, 0x39, 0x10, 0x69, 0x25, 0x5f, 0x2e, 0x51, 0xf7, 0xd4, 0x92, 0xd0, 0xe2, 0xd4, 0x22, + 0x47, 0x88, 0xb6, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0x21, 0x13, 0x2e, 0x7e, 0x14, 0x07, + 0x67, 0xa6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x3a, 0xf1, 0xbc, 0x38, 0x6e, 0xc8, 0xd8, 0x75, + 0xc2, 0x90, 0xc5, 0xc6, 0xd6, 0xd4, 0x20, 0x88, 0xb7, 0x14, 0xa1, 0xd5, 0x33, 0xc5, 0x68, 0x16, + 0x23, 0x97, 0x10, 0x92, 0x61, 0xc1, 0x10, 0x5f, 0x0a, 0x35, 0x33, 0x72, 0x31, 0xbb, 0xa7, 0x96, + 0x08, 0x69, 0xe9, 0x61, 0xf1, 0xa8, 0x1e, 0x56, 0x07, 0x48, 0x29, 0x60, 0x55, 0x8b, 0xa4, 0x50, + 0x49, 0xaf, 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0x1a, 0x42, 0x6a, 0xc8, 0xde, 0x87, 0x4a, 0x16, 0xeb, + 0x57, 0xa3, 0x39, 0xbf, 0xd6, 0xc9, 0x36, 0xca, 0x3a, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, + 0x39, 0x3f, 0x57, 0x1f, 0x62, 0xba, 0x2e, 0x24, 0x5c, 0xd2, 0xf3, 0x75, 0xd3, 0x53, 0xf3, 0xc0, + 0x41, 0xa2, 0x8f, 0x25, 0x60, 0xad, 0x33, 0x13, 0x73, 0x93, 0xd8, 0xc0, 0xd2, 0xc6, 0x80, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x80, 0x94, 0x59, 0x76, 0xdd, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/yandex_passport_user_account_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/yandex_passport_user_account_service.pb.go new file mode 100644 index 000000000..6be909550 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/yandex_passport_user_account_service.pb.go @@ -0,0 +1,170 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/iam/v1/yandex_passport_user_account_service.proto + +package iam // import "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetUserAccountByLoginRequest struct { + // Login of the YandexPassportUserAccount resource to return. + Login string `protobuf:"bytes,1,opt,name=login,proto3" json:"login,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetUserAccountByLoginRequest) Reset() { *m = GetUserAccountByLoginRequest{} } +func (m *GetUserAccountByLoginRequest) String() string { return proto.CompactTextString(m) } +func (*GetUserAccountByLoginRequest) ProtoMessage() {} +func (*GetUserAccountByLoginRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_yandex_passport_user_account_service_fed4b1e4abf6d5f3, []int{0} +} +func (m *GetUserAccountByLoginRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetUserAccountByLoginRequest.Unmarshal(m, b) +} +func (m *GetUserAccountByLoginRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetUserAccountByLoginRequest.Marshal(b, m, deterministic) +} +func (dst *GetUserAccountByLoginRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetUserAccountByLoginRequest.Merge(dst, src) +} +func (m *GetUserAccountByLoginRequest) XXX_Size() int { + return xxx_messageInfo_GetUserAccountByLoginRequest.Size(m) +} +func (m *GetUserAccountByLoginRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetUserAccountByLoginRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetUserAccountByLoginRequest proto.InternalMessageInfo + +func (m *GetUserAccountByLoginRequest) GetLogin() string { + if m != nil { + return m.Login + } + return "" +} + +func init() { + proto.RegisterType((*GetUserAccountByLoginRequest)(nil), "yandex.cloud.iam.v1.GetUserAccountByLoginRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// YandexPassportUserAccountServiceClient is the client API for YandexPassportUserAccountService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type YandexPassportUserAccountServiceClient interface { + // Returns the specified YandexPassportUserAccount resource. + GetByLogin(ctx context.Context, in *GetUserAccountByLoginRequest, opts ...grpc.CallOption) (*UserAccount, error) +} + +type yandexPassportUserAccountServiceClient struct { + cc *grpc.ClientConn +} + +func NewYandexPassportUserAccountServiceClient(cc *grpc.ClientConn) YandexPassportUserAccountServiceClient { + return &yandexPassportUserAccountServiceClient{cc} +} + +func (c *yandexPassportUserAccountServiceClient) GetByLogin(ctx context.Context, in *GetUserAccountByLoginRequest, opts ...grpc.CallOption) (*UserAccount, error) { + out := new(UserAccount) + err := c.cc.Invoke(ctx, "/yandex.cloud.iam.v1.YandexPassportUserAccountService/GetByLogin", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// YandexPassportUserAccountServiceServer is the server API for YandexPassportUserAccountService service. +type YandexPassportUserAccountServiceServer interface { + // Returns the specified YandexPassportUserAccount resource. + GetByLogin(context.Context, *GetUserAccountByLoginRequest) (*UserAccount, error) +} + +func RegisterYandexPassportUserAccountServiceServer(s *grpc.Server, srv YandexPassportUserAccountServiceServer) { + s.RegisterService(&_YandexPassportUserAccountService_serviceDesc, srv) +} + +func _YandexPassportUserAccountService_GetByLogin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUserAccountByLoginRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(YandexPassportUserAccountServiceServer).GetByLogin(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.iam.v1.YandexPassportUserAccountService/GetByLogin", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(YandexPassportUserAccountServiceServer).GetByLogin(ctx, req.(*GetUserAccountByLoginRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _YandexPassportUserAccountService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.iam.v1.YandexPassportUserAccountService", + HandlerType: (*YandexPassportUserAccountServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetByLogin", + Handler: _YandexPassportUserAccountService_GetByLogin_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/iam/v1/yandex_passport_user_account_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/iam/v1/yandex_passport_user_account_service.proto", fileDescriptor_yandex_passport_user_account_service_fed4b1e4abf6d5f3) +} + +var fileDescriptor_yandex_passport_user_account_service_fed4b1e4abf6d5f3 = []byte{ + // 297 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xab, 0x4c, 0xcc, 0x4b, + 0x49, 0xad, 0xd0, 0x4f, 0xce, 0xc9, 0x2f, 0x4d, 0xd1, 0xcf, 0x4c, 0xcc, 0xd5, 0x2f, 0x33, 0xd4, + 0x87, 0x88, 0xc5, 0x17, 0x24, 0x16, 0x17, 0x17, 0xe4, 0x17, 0x95, 0xc4, 0x97, 0x16, 0xa7, 0x16, + 0xc5, 0x27, 0x26, 0x27, 0xe7, 0x97, 0xe6, 0x95, 0xc4, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, + 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x43, 0xd4, 0xea, 0x81, 0xf5, 0xeb, 0x65, 0x26, + 0xe6, 0xea, 0x95, 0x19, 0x4a, 0xc9, 0xa4, 0xe7, 0xe7, 0xa7, 0xe7, 0xa4, 0xea, 0x27, 0x16, 0x64, + 0xea, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7, 0x15, 0x43, 0xb4, 0x48, 0xa9, + 0x61, 0xb3, 0x12, 0xd9, 0x0a, 0xa8, 0x3a, 0x59, 0x14, 0x75, 0x65, 0x89, 0x39, 0x99, 0x29, 0x60, + 0x73, 0x20, 0xd2, 0x4a, 0x56, 0x5c, 0x32, 0xee, 0xa9, 0x25, 0xa1, 0xc5, 0xa9, 0x45, 0x8e, 0x10, + 0x6d, 0x4e, 0x95, 0x3e, 0xf9, 0xe9, 0x99, 0x79, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, + 0x52, 0x5c, 0xac, 0x39, 0x20, 0xbe, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x13, 0xcb, 0x8b, 0xe3, + 0x86, 0x8c, 0x41, 0x10, 0x21, 0xa3, 0x5d, 0x8c, 0x5c, 0x0a, 0x91, 0x60, 0xd3, 0x03, 0xa0, 0x7e, + 0x44, 0x32, 0x27, 0x18, 0xe2, 0x41, 0xa1, 0xa9, 0x8c, 0x5c, 0x5c, 0xee, 0xa9, 0x30, 0x63, 0x85, + 0x0c, 0xf5, 0xb0, 0x78, 0x55, 0x0f, 0x9f, 0x13, 0xa4, 0x14, 0xb0, 0x6a, 0x41, 0x52, 0xaf, 0x64, + 0xd4, 0x74, 0xf9, 0xc9, 0x64, 0x26, 0x1d, 0x21, 0x2d, 0xd4, 0xa0, 0xc7, 0xe2, 0xaa, 0x62, 0xab, + 0x24, 0x88, 0xe1, 0x4e, 0xb6, 0x51, 0xd6, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, + 0xb9, 0x50, 0x0d, 0xba, 0x90, 0x40, 0x4a, 0xcf, 0xd7, 0x4d, 0x4f, 0xcd, 0x03, 0x87, 0x8f, 0x3e, + 0x96, 0x50, 0xb6, 0xce, 0x4c, 0xcc, 0x4d, 0x62, 0x03, 0x4b, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, + 0xff, 0xd5, 0x6e, 0x8b, 0x84, 0xfa, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/backup.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/backup.pb.go new file mode 100644 index 000000000..b6791774d --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/backup.pb.go @@ -0,0 +1,137 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/clickhouse/v1/backup.proto + +package clickhouse // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A ClickHouse Backup resource. See the [Developer's Guide](/docs/managed-clickhouse/concepts) +// for more information. +type Backup struct { + // ID of the backup. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the backup belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format + // (i.e. when the backup operation was completed). + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // ID of the ClickHouse cluster that the backup was created for. + SourceClusterId string `protobuf:"bytes,4,opt,name=source_cluster_id,json=sourceClusterId,proto3" json:"source_cluster_id,omitempty"` + SourceShardNames []string `protobuf:"bytes,6,rep,name=source_shard_names,json=sourceShardNames,proto3" json:"source_shard_names,omitempty"` + // Time when the backup operation was started. + StartedAt *timestamp.Timestamp `protobuf:"bytes,5,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup) Reset() { *m = Backup{} } +func (m *Backup) String() string { return proto.CompactTextString(m) } +func (*Backup) ProtoMessage() {} +func (*Backup) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_63c80d0c65f0a55c, []int{0} +} +func (m *Backup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Backup.Unmarshal(m, b) +} +func (m *Backup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Backup.Marshal(b, m, deterministic) +} +func (dst *Backup) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup.Merge(dst, src) +} +func (m *Backup) XXX_Size() int { + return xxx_messageInfo_Backup.Size(m) +} +func (m *Backup) XXX_DiscardUnknown() { + xxx_messageInfo_Backup.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup proto.InternalMessageInfo + +func (m *Backup) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Backup) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *Backup) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Backup) GetSourceClusterId() string { + if m != nil { + return m.SourceClusterId + } + return "" +} + +func (m *Backup) GetSourceShardNames() []string { + if m != nil { + return m.SourceShardNames + } + return nil +} + +func (m *Backup) GetStartedAt() *timestamp.Timestamp { + if m != nil { + return m.StartedAt + } + return nil +} + +func init() { + proto.RegisterType((*Backup)(nil), "yandex.cloud.mdb.clickhouse.v1.Backup") +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/clickhouse/v1/backup.proto", fileDescriptor_backup_63c80d0c65f0a55c) +} + +var fileDescriptor_backup_63c80d0c65f0a55c = []byte{ + // 297 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0xcd, 0x4e, 0x02, 0x31, + 0x14, 0x85, 0xc3, 0xa0, 0xc4, 0xa9, 0x89, 0x3f, 0x5d, 0x4d, 0x30, 0x51, 0xe2, 0x8a, 0xa8, 0xb4, + 0x41, 0x57, 0xc6, 0x15, 0xb8, 0x62, 0xa1, 0x26, 0xe8, 0xca, 0xcd, 0xa4, 0xed, 0x2d, 0x43, 0xc3, + 0x94, 0x92, 0xfe, 0x10, 0x7d, 0x00, 0xdf, 0xdb, 0x4c, 0x3b, 0x84, 0x9d, 0x2e, 0x7b, 0xee, 0x77, + 0xcf, 0x39, 0xe9, 0x45, 0xb7, 0xdf, 0x6c, 0x0d, 0xf2, 0x8b, 0x8a, 0xda, 0x04, 0xa0, 0x1a, 0x38, + 0x15, 0xb5, 0x12, 0xab, 0xa5, 0x09, 0x4e, 0xd2, 0xed, 0x98, 0x72, 0x26, 0x56, 0x61, 0x43, 0x36, + 0xd6, 0x78, 0x83, 0x2f, 0x13, 0x4c, 0x22, 0x4c, 0x34, 0x70, 0xb2, 0x87, 0xc9, 0x76, 0xdc, 0xbf, + 0xaa, 0x8c, 0xa9, 0x6a, 0x49, 0x23, 0xcd, 0xc3, 0x82, 0x7a, 0xa5, 0xa5, 0xf3, 0x4c, 0xb7, 0x06, + 0xd7, 0x3f, 0x19, 0xea, 0x4d, 0xa3, 0x23, 0x3e, 0x41, 0x99, 0x82, 0xa2, 0x33, 0xe8, 0x0c, 0xf3, + 0x79, 0xa6, 0x00, 0x5f, 0xa0, 0x7c, 0x61, 0x6a, 0x90, 0xb6, 0x54, 0x50, 0x64, 0x51, 0x3e, 0x4a, + 0xc2, 0x0c, 0xf0, 0x23, 0x42, 0xc2, 0x4a, 0xe6, 0x25, 0x94, 0xcc, 0x17, 0xdd, 0x41, 0x67, 0x78, + 0x7c, 0xdf, 0x27, 0x29, 0x8d, 0xec, 0xd2, 0xc8, 0xc7, 0x2e, 0x6d, 0x9e, 0xb7, 0xf4, 0xc4, 0xe3, + 0x1b, 0x74, 0xee, 0x4c, 0xb0, 0x42, 0x96, 0xa2, 0x0e, 0xce, 0x27, 0xff, 0x83, 0xe8, 0x7f, 0x9a, + 0x06, 0xcf, 0x49, 0x9f, 0x01, 0xbe, 0x43, 0xb8, 0x65, 0xdd, 0x92, 0x59, 0x28, 0xd7, 0x4c, 0x4b, + 0x57, 0xf4, 0x06, 0xdd, 0x61, 0x3e, 0x3f, 0x4b, 0x93, 0xf7, 0x66, 0xf0, 0xda, 0xe8, 0x4d, 0x29, + 0xe7, 0x99, 0x6d, 0x4b, 0x1d, 0xfe, 0x5f, 0xaa, 0xa5, 0x27, 0x7e, 0xfa, 0xf6, 0xf9, 0x52, 0x29, + 0xbf, 0x0c, 0x9c, 0x08, 0xa3, 0x69, 0xfa, 0xd5, 0x51, 0x3a, 0x41, 0x65, 0x46, 0x95, 0x5c, 0xc7, + 0x75, 0xfa, 0xf7, 0x6d, 0x9e, 0xf6, 0x2f, 0xde, 0x8b, 0x0b, 0x0f, 0xbf, 0x01, 0x00, 0x00, 0xff, + 0xff, 0xcc, 0x05, 0xcb, 0xe2, 0xcf, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/backup_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/backup_service.pb.go new file mode 100644 index 000000000..2c5853354 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/backup_service.pb.go @@ -0,0 +1,335 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/clickhouse/v1/backup_service.proto + +package clickhouse // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetBackupRequest struct { + // ID of the backup to return information about. + // To get the backup ID, use a [ClusterService.ListBackups] request. + BackupId string `protobuf:"bytes,1,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetBackupRequest) Reset() { *m = GetBackupRequest{} } +func (m *GetBackupRequest) String() string { return proto.CompactTextString(m) } +func (*GetBackupRequest) ProtoMessage() {} +func (*GetBackupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_service_c0389392897d6e8b, []int{0} +} +func (m *GetBackupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetBackupRequest.Unmarshal(m, b) +} +func (m *GetBackupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetBackupRequest.Marshal(b, m, deterministic) +} +func (dst *GetBackupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetBackupRequest.Merge(dst, src) +} +func (m *GetBackupRequest) XXX_Size() int { + return xxx_messageInfo_GetBackupRequest.Size(m) +} +func (m *GetBackupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetBackupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetBackupRequest proto.InternalMessageInfo + +func (m *GetBackupRequest) GetBackupId() string { + if m != nil { + return m.BackupId + } + return "" +} + +type ListBackupsRequest struct { + // ID of the folder to list backups in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListBackupsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListBackupsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListBackupsRequest) Reset() { *m = ListBackupsRequest{} } +func (m *ListBackupsRequest) String() string { return proto.CompactTextString(m) } +func (*ListBackupsRequest) ProtoMessage() {} +func (*ListBackupsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_service_c0389392897d6e8b, []int{1} +} +func (m *ListBackupsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListBackupsRequest.Unmarshal(m, b) +} +func (m *ListBackupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListBackupsRequest.Marshal(b, m, deterministic) +} +func (dst *ListBackupsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListBackupsRequest.Merge(dst, src) +} +func (m *ListBackupsRequest) XXX_Size() int { + return xxx_messageInfo_ListBackupsRequest.Size(m) +} +func (m *ListBackupsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListBackupsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListBackupsRequest proto.InternalMessageInfo + +func (m *ListBackupsRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListBackupsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListBackupsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListBackupsResponse struct { + // List of Backup resources. + Backups []*Backup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListBackupsRequest.page_size], use the [next_page_token] as the value + // for the [ListBackupsRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListBackupsResponse) Reset() { *m = ListBackupsResponse{} } +func (m *ListBackupsResponse) String() string { return proto.CompactTextString(m) } +func (*ListBackupsResponse) ProtoMessage() {} +func (*ListBackupsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_service_c0389392897d6e8b, []int{2} +} +func (m *ListBackupsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListBackupsResponse.Unmarshal(m, b) +} +func (m *ListBackupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListBackupsResponse.Marshal(b, m, deterministic) +} +func (dst *ListBackupsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListBackupsResponse.Merge(dst, src) +} +func (m *ListBackupsResponse) XXX_Size() int { + return xxx_messageInfo_ListBackupsResponse.Size(m) +} +func (m *ListBackupsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListBackupsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListBackupsResponse proto.InternalMessageInfo + +func (m *ListBackupsResponse) GetBackups() []*Backup { + if m != nil { + return m.Backups + } + return nil +} + +func (m *ListBackupsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetBackupRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.GetBackupRequest") + proto.RegisterType((*ListBackupsRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.ListBackupsRequest") + proto.RegisterType((*ListBackupsResponse)(nil), "yandex.cloud.mdb.clickhouse.v1.ListBackupsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// BackupServiceClient is the client API for BackupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BackupServiceClient interface { + // Returns the specified ClickHouse Backup resource. + // + // To get the list of available ClickHouse Backup resources, make a [List] request. + Get(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error) + // Retrieves the list of Backup resources available for the specified folder. + List(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) +} + +type backupServiceClient struct { + cc *grpc.ClientConn +} + +func NewBackupServiceClient(cc *grpc.ClientConn) BackupServiceClient { + return &backupServiceClient{cc} +} + +func (c *backupServiceClient) Get(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error) { + out := new(Backup) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.BackupService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) List(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) { + out := new(ListBackupsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.BackupService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BackupServiceServer is the server API for BackupService service. +type BackupServiceServer interface { + // Returns the specified ClickHouse Backup resource. + // + // To get the list of available ClickHouse Backup resources, make a [List] request. + Get(context.Context, *GetBackupRequest) (*Backup, error) + // Retrieves the list of Backup resources available for the specified folder. + List(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error) +} + +func RegisterBackupServiceServer(s *grpc.Server, srv BackupServiceServer) { + s.RegisterService(&_BackupService_serviceDesc, srv) +} + +func _BackupService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBackupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.BackupService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).Get(ctx, req.(*GetBackupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBackupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.BackupService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).List(ctx, req.(*ListBackupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BackupService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.clickhouse.v1.BackupService", + HandlerType: (*BackupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _BackupService_Get_Handler, + }, + { + MethodName: "List", + Handler: _BackupService_List_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/clickhouse/v1/backup_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/clickhouse/v1/backup_service.proto", fileDescriptor_backup_service_c0389392897d6e8b) +} + +var fileDescriptor_backup_service_c0389392897d6e8b = []byte{ + // 466 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x3f, 0x6f, 0xd3, 0x40, + 0x14, 0xc0, 0xe5, 0x24, 0x94, 0xf8, 0xa0, 0x02, 0x1d, 0x4b, 0x14, 0x41, 0x15, 0x3c, 0x84, 0xf0, + 0x27, 0x3e, 0x3b, 0x51, 0x27, 0x5a, 0x09, 0x65, 0xa9, 0x2a, 0x81, 0x40, 0x2e, 0x13, 0x4b, 0x74, + 0xf6, 0x3d, 0xdc, 0x53, 0x9c, 0x3b, 0x93, 0x3b, 0x47, 0xa5, 0x08, 0x21, 0x31, 0x76, 0xa4, 0x03, + 0x1f, 0x87, 0xb1, 0xdd, 0xf9, 0x0a, 0x0c, 0x7c, 0x06, 0x26, 0xe4, 0x3b, 0x87, 0x50, 0x40, 0x69, + 0x18, 0x7d, 0xef, 0xfd, 0xde, 0xfb, 0xe9, 0xbd, 0x67, 0x34, 0x7c, 0x4b, 0x05, 0x83, 0x23, 0x92, + 0x64, 0xb2, 0x60, 0x64, 0xca, 0x62, 0x92, 0x64, 0x3c, 0x99, 0x1c, 0xca, 0x42, 0x01, 0x99, 0x87, + 0x24, 0xa6, 0xc9, 0xa4, 0xc8, 0xc7, 0x0a, 0x66, 0x73, 0x9e, 0x80, 0x9f, 0xcf, 0xa4, 0x96, 0x78, + 0xcb, 0x42, 0xbe, 0x81, 0xfc, 0x29, 0x8b, 0xfd, 0x25, 0xe4, 0xcf, 0xc3, 0xf6, 0xed, 0x54, 0xca, + 0x34, 0x03, 0x42, 0x73, 0x4e, 0xa8, 0x10, 0x52, 0x53, 0xcd, 0xa5, 0x50, 0x96, 0x6e, 0x3f, 0x5c, + 0xab, 0x65, 0x95, 0x7c, 0xe7, 0x42, 0xf2, 0x9c, 0x66, 0x9c, 0x99, 0x62, 0x36, 0xec, 0x6d, 0xa3, + 0x9b, 0x7b, 0xa0, 0x47, 0x86, 0x88, 0xe0, 0x4d, 0x01, 0x4a, 0xe3, 0xbb, 0xc8, 0xad, 0xac, 0x39, + 0x6b, 0x39, 0x1d, 0xa7, 0xe7, 0x8e, 0x1a, 0xdf, 0xcf, 0x42, 0x27, 0x6a, 0xda, 0xe7, 0x7d, 0xe6, + 0x7d, 0x72, 0x10, 0x7e, 0xca, 0x55, 0x05, 0xaa, 0x05, 0x79, 0x1f, 0xb9, 0xaf, 0x65, 0xc6, 0x60, + 0xb6, 0x24, 0xaf, 0x97, 0xe4, 0xc9, 0x79, 0xd8, 0xd8, 0xd9, 0xdd, 0x0e, 0xa2, 0xa6, 0x0d, 0xef, + 0x33, 0x7c, 0x0f, 0xb9, 0x39, 0x4d, 0x61, 0xac, 0xf8, 0x31, 0xb4, 0x6a, 0x1d, 0xa7, 0x57, 0x1f, + 0xa1, 0x1f, 0x67, 0xe1, 0xc6, 0xce, 0x6e, 0x18, 0x04, 0x41, 0xd4, 0x2c, 0x83, 0x07, 0xfc, 0x18, + 0x70, 0x0f, 0x21, 0x93, 0xa8, 0xe5, 0x04, 0x44, 0xab, 0x6e, 0x8a, 0xba, 0x27, 0xe7, 0xe1, 0x15, + 0x93, 0x19, 0x99, 0x2a, 0x2f, 0xcb, 0x98, 0xf7, 0x01, 0xdd, 0xba, 0xe0, 0xa4, 0x72, 0x29, 0x14, + 0xe0, 0x27, 0xe8, 0xaa, 0xf5, 0x56, 0x2d, 0xa7, 0x53, 0xef, 0x5d, 0x1b, 0x74, 0xfd, 0xd5, 0xe3, + 0xf7, 0xab, 0x71, 0x2c, 0x30, 0xdc, 0x45, 0x37, 0x04, 0x1c, 0xe9, 0xf1, 0x6f, 0x1e, 0xa5, 0xb1, + 0x1b, 0x6d, 0x96, 0xcf, 0x2f, 0x16, 0x02, 0x83, 0x2f, 0x35, 0xb4, 0x69, 0xd9, 0x03, 0xbb, 0x6e, + 0x7c, 0xea, 0xa0, 0xfa, 0x1e, 0x68, 0x1c, 0x5c, 0xd6, 0xf2, 0xcf, 0x25, 0xb4, 0xd7, 0x94, 0xf4, + 0x06, 0x1f, 0xbf, 0x7e, 0x3b, 0xad, 0x3d, 0xc2, 0x0f, 0xc8, 0x94, 0x0a, 0x9a, 0x02, 0xeb, 0xff, + 0xeb, 0x18, 0x14, 0x79, 0xf7, 0x6b, 0xa5, 0xef, 0xf1, 0x67, 0x07, 0x35, 0xca, 0x49, 0xe1, 0xc1, + 0x65, 0x4d, 0xfe, 0xde, 0x71, 0x7b, 0xf8, 0x5f, 0x8c, 0xdd, 0x81, 0xd7, 0x35, 0x96, 0x1d, 0xbc, + 0xb5, 0xda, 0x72, 0xf4, 0xfc, 0xd5, 0xb3, 0x94, 0xeb, 0xc3, 0x22, 0xf6, 0x13, 0x39, 0x25, 0xb6, + 0x51, 0xdf, 0x9e, 0x6e, 0x2a, 0xfb, 0x29, 0x08, 0x73, 0xb5, 0x64, 0xf5, 0x0f, 0xf0, 0x78, 0xf9, + 0x15, 0x6f, 0x18, 0x60, 0xf8, 0x33, 0x00, 0x00, 0xff, 0xff, 0xda, 0x84, 0x09, 0x05, 0xa7, 0x03, + 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/cluster.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/cluster.pb.go new file mode 100644 index 000000000..876539875 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/cluster.pb.go @@ -0,0 +1,1079 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/clickhouse/v1/cluster.proto + +package clickhouse // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import config "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/config" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Deployment environment. +type Cluster_Environment int32 + +const ( + Cluster_ENVIRONMENT_UNSPECIFIED Cluster_Environment = 0 + // Stable environment with a conservative update policy: + // only hotfixes are applied during regular maintenance. + Cluster_PRODUCTION Cluster_Environment = 1 + // Environment with more aggressive update policy: new versions + // are rolled out irrespective of backward compatibility. + Cluster_PRESTABLE Cluster_Environment = 2 +) + +var Cluster_Environment_name = map[int32]string{ + 0: "ENVIRONMENT_UNSPECIFIED", + 1: "PRODUCTION", + 2: "PRESTABLE", +} +var Cluster_Environment_value = map[string]int32{ + "ENVIRONMENT_UNSPECIFIED": 0, + "PRODUCTION": 1, + "PRESTABLE": 2, +} + +func (x Cluster_Environment) String() string { + return proto.EnumName(Cluster_Environment_name, int32(x)) +} +func (Cluster_Environment) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{0, 0} +} + +type Cluster_Health int32 + +const ( + // State of the cluster is unknown ([Host.health] for every host in the cluster is UNKNOWN). + Cluster_HEALTH_UNKNOWN Cluster_Health = 0 + // Cluster is alive and well ([Host.health] for every host in the cluster is ALIVE). + Cluster_ALIVE Cluster_Health = 1 + // Cluster is inoperable ([Host.health] for every host in the cluster is DEAD). + Cluster_DEAD Cluster_Health = 2 + // Cluster is working below capacity ([Host.health] for at least one host in the cluster is not ALIVE). + Cluster_DEGRADED Cluster_Health = 3 +) + +var Cluster_Health_name = map[int32]string{ + 0: "HEALTH_UNKNOWN", + 1: "ALIVE", + 2: "DEAD", + 3: "DEGRADED", +} +var Cluster_Health_value = map[string]int32{ + "HEALTH_UNKNOWN": 0, + "ALIVE": 1, + "DEAD": 2, + "DEGRADED": 3, +} + +func (x Cluster_Health) String() string { + return proto.EnumName(Cluster_Health_name, int32(x)) +} +func (Cluster_Health) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{0, 1} +} + +type Cluster_Status int32 + +const ( + // Cluster state is unknown. + Cluster_STATUS_UNKNOWN Cluster_Status = 0 + // Cluster is being created. + Cluster_CREATING Cluster_Status = 1 + // Cluster is running normally. + Cluster_RUNNING Cluster_Status = 2 + // Cluster encountered a problem and cannot operate. + Cluster_ERROR Cluster_Status = 3 + // Cluster is being updated. + Cluster_UPDATING Cluster_Status = 4 + // Cluster is stopping. + Cluster_STOPPING Cluster_Status = 5 + // Cluster stopped. + Cluster_STOPPED Cluster_Status = 6 + // Cluster is starting. + Cluster_STARTING Cluster_Status = 7 +) + +var Cluster_Status_name = map[int32]string{ + 0: "STATUS_UNKNOWN", + 1: "CREATING", + 2: "RUNNING", + 3: "ERROR", + 4: "UPDATING", + 5: "STOPPING", + 6: "STOPPED", + 7: "STARTING", +} +var Cluster_Status_value = map[string]int32{ + "STATUS_UNKNOWN": 0, + "CREATING": 1, + "RUNNING": 2, + "ERROR": 3, + "UPDATING": 4, + "STOPPING": 5, + "STOPPED": 6, + "STARTING": 7, +} + +func (x Cluster_Status) String() string { + return proto.EnumName(Cluster_Status_name, int32(x)) +} +func (Cluster_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{0, 2} +} + +type Host_Type int32 + +const ( + Host_TYPE_UNSPECIFIED Host_Type = 0 + // ClickHouse host. + Host_CLICKHOUSE Host_Type = 1 + // ZooKeeper host. + Host_ZOOKEEPER Host_Type = 2 +) + +var Host_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "CLICKHOUSE", + 2: "ZOOKEEPER", +} +var Host_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "CLICKHOUSE": 1, + "ZOOKEEPER": 2, +} + +func (x Host_Type) String() string { + return proto.EnumName(Host_Type_name, int32(x)) +} +func (Host_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{5, 0} +} + +type Host_Health int32 + +const ( + // Health of the host is unknown. + Host_UNKNOWN Host_Health = 0 + // The host is performing all its functions normally. + Host_ALIVE Host_Health = 1 + // The host is inoperable, and cannot perform any of its essential functions. + Host_DEAD Host_Health = 2 + // The host is degraded, and can perform only some of its essential functions. + Host_DEGRADED Host_Health = 3 +) + +var Host_Health_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ALIVE", + 2: "DEAD", + 3: "DEGRADED", +} +var Host_Health_value = map[string]int32{ + "UNKNOWN": 0, + "ALIVE": 1, + "DEAD": 2, + "DEGRADED": 3, +} + +func (x Host_Health) String() string { + return proto.EnumName(Host_Health_name, int32(x)) +} +func (Host_Health) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{5, 1} +} + +type Service_Type int32 + +const ( + Service_TYPE_UNSPECIFIED Service_Type = 0 + // The host is a ClickHouse server. + Service_CLICKHOUSE Service_Type = 1 + // The host is a ZooKeeper server. + Service_ZOOKEEPER Service_Type = 2 +) + +var Service_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "CLICKHOUSE", + 2: "ZOOKEEPER", +} +var Service_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "CLICKHOUSE": 1, + "ZOOKEEPER": 2, +} + +func (x Service_Type) String() string { + return proto.EnumName(Service_Type_name, int32(x)) +} +func (Service_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{6, 0} +} + +type Service_Health int32 + +const ( + // Health of the server is unknown. + Service_UNKNOWN Service_Health = 0 + // The server is working normally. + Service_ALIVE Service_Health = 1 + // The server is dead or unresponsive. + Service_DEAD Service_Health = 2 +) + +var Service_Health_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ALIVE", + 2: "DEAD", +} +var Service_Health_value = map[string]int32{ + "UNKNOWN": 0, + "ALIVE": 1, + "DEAD": 2, +} + +func (x Service_Health) String() string { + return proto.EnumName(Service_Health_name, int32(x)) +} +func (Service_Health) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{6, 1} +} + +// A ClickHouse Cluster resource. For more information, see the +// [Cluster](/docs/managed-clickhouse/concepts) section in the Developer's Guide. +type Cluster struct { + // ID of the ClickHouse cluster. + // This ID is assigned by MDB at creation time. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the ClickHouse cluster belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Name of the ClickHouse cluster. + // The name is unique within the folder. 1-63 characters long. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Description of the ClickHouse cluster. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the PostgreSQL cluster as `` key:value `` pairs. Maximum 64 per resource. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Deployment environment of the ClickHouse cluster. + Environment Cluster_Environment `protobuf:"varint,7,opt,name=environment,proto3,enum=yandex.cloud.mdb.clickhouse.v1.Cluster_Environment" json:"environment,omitempty"` + // Description of monitoring systems relevant to the ClickHouse cluster. + Monitoring []*Monitoring `protobuf:"bytes,8,rep,name=monitoring,proto3" json:"monitoring,omitempty"` + // Configuration of the ClickHouse cluster. + Config *ClusterConfig `protobuf:"bytes,9,opt,name=config,proto3" json:"config,omitempty"` + // ID of the network that the cluster belongs to. + NetworkId string `protobuf:"bytes,10,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // Aggregated cluster health. + Health Cluster_Health `protobuf:"varint,11,opt,name=health,proto3,enum=yandex.cloud.mdb.clickhouse.v1.Cluster_Health" json:"health,omitempty"` + // Current state of the cluster. + Status Cluster_Status `protobuf:"varint,12,opt,name=status,proto3,enum=yandex.cloud.mdb.clickhouse.v1.Cluster_Status" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{0} +} +func (m *Cluster) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cluster.Unmarshal(m, b) +} +func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) +} +func (dst *Cluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cluster.Merge(dst, src) +} +func (m *Cluster) XXX_Size() int { + return xxx_messageInfo_Cluster.Size(m) +} +func (m *Cluster) XXX_DiscardUnknown() { + xxx_messageInfo_Cluster.DiscardUnknown(m) +} + +var xxx_messageInfo_Cluster proto.InternalMessageInfo + +func (m *Cluster) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Cluster) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *Cluster) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Cluster) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Cluster) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Cluster) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Cluster) GetEnvironment() Cluster_Environment { + if m != nil { + return m.Environment + } + return Cluster_ENVIRONMENT_UNSPECIFIED +} + +func (m *Cluster) GetMonitoring() []*Monitoring { + if m != nil { + return m.Monitoring + } + return nil +} + +func (m *Cluster) GetConfig() *ClusterConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *Cluster) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +func (m *Cluster) GetHealth() Cluster_Health { + if m != nil { + return m.Health + } + return Cluster_HEALTH_UNKNOWN +} + +func (m *Cluster) GetStatus() Cluster_Status { + if m != nil { + return m.Status + } + return Cluster_STATUS_UNKNOWN +} + +// Monitoring system metadata. +type Monitoring struct { + // Name of the monitoring system. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Description of the monitoring system. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // Link to the monitoring system charts for the ClickHouse cluster. + Link string `protobuf:"bytes,3,opt,name=link,proto3" json:"link,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Monitoring) Reset() { *m = Monitoring{} } +func (m *Monitoring) String() string { return proto.CompactTextString(m) } +func (*Monitoring) ProtoMessage() {} +func (*Monitoring) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{1} +} +func (m *Monitoring) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Monitoring.Unmarshal(m, b) +} +func (m *Monitoring) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Monitoring.Marshal(b, m, deterministic) +} +func (dst *Monitoring) XXX_Merge(src proto.Message) { + xxx_messageInfo_Monitoring.Merge(dst, src) +} +func (m *Monitoring) XXX_Size() int { + return xxx_messageInfo_Monitoring.Size(m) +} +func (m *Monitoring) XXX_DiscardUnknown() { + xxx_messageInfo_Monitoring.DiscardUnknown(m) +} + +var xxx_messageInfo_Monitoring proto.InternalMessageInfo + +func (m *Monitoring) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Monitoring) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Monitoring) GetLink() string { + if m != nil { + return m.Link + } + return "" +} + +type ClusterConfig struct { + // Version of the ClickHouse server software. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Configuration and resource allocation for ClickHouse hosts. + Clickhouse *ClusterConfig_Clickhouse `protobuf:"bytes,2,opt,name=clickhouse,proto3" json:"clickhouse,omitempty"` + // Configuration and resource allocation for ZooKeeper hosts. + Zookeeper *ClusterConfig_Zookeeper `protobuf:"bytes,3,opt,name=zookeeper,proto3" json:"zookeeper,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterConfig) Reset() { *m = ClusterConfig{} } +func (m *ClusterConfig) String() string { return proto.CompactTextString(m) } +func (*ClusterConfig) ProtoMessage() {} +func (*ClusterConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{2} +} +func (m *ClusterConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterConfig.Unmarshal(m, b) +} +func (m *ClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterConfig.Marshal(b, m, deterministic) +} +func (dst *ClusterConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterConfig.Merge(dst, src) +} +func (m *ClusterConfig) XXX_Size() int { + return xxx_messageInfo_ClusterConfig.Size(m) +} +func (m *ClusterConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterConfig proto.InternalMessageInfo + +func (m *ClusterConfig) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *ClusterConfig) GetClickhouse() *ClusterConfig_Clickhouse { + if m != nil { + return m.Clickhouse + } + return nil +} + +func (m *ClusterConfig) GetZookeeper() *ClusterConfig_Zookeeper { + if m != nil { + return m.Zookeeper + } + return nil +} + +type ClusterConfig_Clickhouse struct { + // Configuration settings of a ClickHouse server. + Config *config.ClickhouseConfigSet `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + // Resources allocated to ClickHouse hosts. + Resources *Resources `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterConfig_Clickhouse) Reset() { *m = ClusterConfig_Clickhouse{} } +func (m *ClusterConfig_Clickhouse) String() string { return proto.CompactTextString(m) } +func (*ClusterConfig_Clickhouse) ProtoMessage() {} +func (*ClusterConfig_Clickhouse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{2, 0} +} +func (m *ClusterConfig_Clickhouse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterConfig_Clickhouse.Unmarshal(m, b) +} +func (m *ClusterConfig_Clickhouse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterConfig_Clickhouse.Marshal(b, m, deterministic) +} +func (dst *ClusterConfig_Clickhouse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterConfig_Clickhouse.Merge(dst, src) +} +func (m *ClusterConfig_Clickhouse) XXX_Size() int { + return xxx_messageInfo_ClusterConfig_Clickhouse.Size(m) +} +func (m *ClusterConfig_Clickhouse) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterConfig_Clickhouse.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterConfig_Clickhouse proto.InternalMessageInfo + +func (m *ClusterConfig_Clickhouse) GetConfig() *config.ClickhouseConfigSet { + if m != nil { + return m.Config + } + return nil +} + +func (m *ClusterConfig_Clickhouse) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +type ClusterConfig_Zookeeper struct { + // Resources allocated to ZooKeeper hosts. + Resources *Resources `protobuf:"bytes,1,opt,name=resources,proto3" json:"resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterConfig_Zookeeper) Reset() { *m = ClusterConfig_Zookeeper{} } +func (m *ClusterConfig_Zookeeper) String() string { return proto.CompactTextString(m) } +func (*ClusterConfig_Zookeeper) ProtoMessage() {} +func (*ClusterConfig_Zookeeper) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{2, 1} +} +func (m *ClusterConfig_Zookeeper) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterConfig_Zookeeper.Unmarshal(m, b) +} +func (m *ClusterConfig_Zookeeper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterConfig_Zookeeper.Marshal(b, m, deterministic) +} +func (dst *ClusterConfig_Zookeeper) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterConfig_Zookeeper.Merge(dst, src) +} +func (m *ClusterConfig_Zookeeper) XXX_Size() int { + return xxx_messageInfo_ClusterConfig_Zookeeper.Size(m) +} +func (m *ClusterConfig_Zookeeper) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterConfig_Zookeeper.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterConfig_Zookeeper proto.InternalMessageInfo + +func (m *ClusterConfig_Zookeeper) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +type Shard struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Config *ShardConfig `protobuf:"bytes,3,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Shard) Reset() { *m = Shard{} } +func (m *Shard) String() string { return proto.CompactTextString(m) } +func (*Shard) ProtoMessage() {} +func (*Shard) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{3} +} +func (m *Shard) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Shard.Unmarshal(m, b) +} +func (m *Shard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Shard.Marshal(b, m, deterministic) +} +func (dst *Shard) XXX_Merge(src proto.Message) { + xxx_messageInfo_Shard.Merge(dst, src) +} +func (m *Shard) XXX_Size() int { + return xxx_messageInfo_Shard.Size(m) +} +func (m *Shard) XXX_DiscardUnknown() { + xxx_messageInfo_Shard.DiscardUnknown(m) +} + +var xxx_messageInfo_Shard proto.InternalMessageInfo + +func (m *Shard) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Shard) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *Shard) GetConfig() *ShardConfig { + if m != nil { + return m.Config + } + return nil +} + +type ShardConfig struct { + Clickhouse *ShardConfig_Clickhouse `protobuf:"bytes,1,opt,name=clickhouse,proto3" json:"clickhouse,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShardConfig) Reset() { *m = ShardConfig{} } +func (m *ShardConfig) String() string { return proto.CompactTextString(m) } +func (*ShardConfig) ProtoMessage() {} +func (*ShardConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{4} +} +func (m *ShardConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShardConfig.Unmarshal(m, b) +} +func (m *ShardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShardConfig.Marshal(b, m, deterministic) +} +func (dst *ShardConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShardConfig.Merge(dst, src) +} +func (m *ShardConfig) XXX_Size() int { + return xxx_messageInfo_ShardConfig.Size(m) +} +func (m *ShardConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ShardConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ShardConfig proto.InternalMessageInfo + +func (m *ShardConfig) GetClickhouse() *ShardConfig_Clickhouse { + if m != nil { + return m.Clickhouse + } + return nil +} + +type ShardConfig_Clickhouse struct { + Config *config.ClickhouseConfigSet `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Resources *Resources `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` + Weight *wrappers.Int64Value `protobuf:"bytes,3,opt,name=weight,proto3" json:"weight,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShardConfig_Clickhouse) Reset() { *m = ShardConfig_Clickhouse{} } +func (m *ShardConfig_Clickhouse) String() string { return proto.CompactTextString(m) } +func (*ShardConfig_Clickhouse) ProtoMessage() {} +func (*ShardConfig_Clickhouse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{4, 0} +} +func (m *ShardConfig_Clickhouse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShardConfig_Clickhouse.Unmarshal(m, b) +} +func (m *ShardConfig_Clickhouse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShardConfig_Clickhouse.Marshal(b, m, deterministic) +} +func (dst *ShardConfig_Clickhouse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShardConfig_Clickhouse.Merge(dst, src) +} +func (m *ShardConfig_Clickhouse) XXX_Size() int { + return xxx_messageInfo_ShardConfig_Clickhouse.Size(m) +} +func (m *ShardConfig_Clickhouse) XXX_DiscardUnknown() { + xxx_messageInfo_ShardConfig_Clickhouse.DiscardUnknown(m) +} + +var xxx_messageInfo_ShardConfig_Clickhouse proto.InternalMessageInfo + +func (m *ShardConfig_Clickhouse) GetConfig() *config.ClickhouseConfigSet { + if m != nil { + return m.Config + } + return nil +} + +func (m *ShardConfig_Clickhouse) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *ShardConfig_Clickhouse) GetWeight() *wrappers.Int64Value { + if m != nil { + return m.Weight + } + return nil +} + +type Host struct { + // Name of the ClickHouse host. The host name is assigned by MDB at creation time, and cannot be changed. + // 1-63 characters long. + // + // The name is unique across all existing MDB hosts in Yandex.Cloud, as it defines the FQDN of the host. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // ID of the ClickHouse host. The ID is assigned by MDB at creation time. + ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // ID of the availability zone where the ClickHouse host resides. + ZoneId string `protobuf:"bytes,3,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + // Type of the host. + Type Host_Type `protobuf:"varint,4,opt,name=type,proto3,enum=yandex.cloud.mdb.clickhouse.v1.Host_Type" json:"type,omitempty"` + // Resources allocated to the ClickHouse host. + Resources *Resources `protobuf:"bytes,5,opt,name=resources,proto3" json:"resources,omitempty"` + // Status code of the aggregated health of the host. + Health Host_Health `protobuf:"varint,6,opt,name=health,proto3,enum=yandex.cloud.mdb.clickhouse.v1.Host_Health" json:"health,omitempty"` + // Services provided by the host. + Services []*Service `protobuf:"bytes,7,rep,name=services,proto3" json:"services,omitempty"` + // ID of the subnet that the host belongs to. + SubnetId string `protobuf:"bytes,8,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + // Flag showing public IP assignment status to this host. + AssignPublicIp bool `protobuf:"varint,9,opt,name=assign_public_ip,json=assignPublicIp,proto3" json:"assign_public_ip,omitempty"` + ShardName string `protobuf:"bytes,10,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Host) Reset() { *m = Host{} } +func (m *Host) String() string { return proto.CompactTextString(m) } +func (*Host) ProtoMessage() {} +func (*Host) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{5} +} +func (m *Host) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Host.Unmarshal(m, b) +} +func (m *Host) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Host.Marshal(b, m, deterministic) +} +func (dst *Host) XXX_Merge(src proto.Message) { + xxx_messageInfo_Host.Merge(dst, src) +} +func (m *Host) XXX_Size() int { + return xxx_messageInfo_Host.Size(m) +} +func (m *Host) XXX_DiscardUnknown() { + xxx_messageInfo_Host.DiscardUnknown(m) +} + +var xxx_messageInfo_Host proto.InternalMessageInfo + +func (m *Host) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Host) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *Host) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func (m *Host) GetType() Host_Type { + if m != nil { + return m.Type + } + return Host_TYPE_UNSPECIFIED +} + +func (m *Host) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *Host) GetHealth() Host_Health { + if m != nil { + return m.Health + } + return Host_UNKNOWN +} + +func (m *Host) GetServices() []*Service { + if m != nil { + return m.Services + } + return nil +} + +func (m *Host) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +func (m *Host) GetAssignPublicIp() bool { + if m != nil { + return m.AssignPublicIp + } + return false +} + +func (m *Host) GetShardName() string { + if m != nil { + return m.ShardName + } + return "" +} + +type Service struct { + // Type of the service provided by the host. + Type Service_Type `protobuf:"varint,1,opt,name=type,proto3,enum=yandex.cloud.mdb.clickhouse.v1.Service_Type" json:"type,omitempty"` + // Status code of server availability. + Health Service_Health `protobuf:"varint,2,opt,name=health,proto3,enum=yandex.cloud.mdb.clickhouse.v1.Service_Health" json:"health,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Service) Reset() { *m = Service{} } +func (m *Service) String() string { return proto.CompactTextString(m) } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{6} +} +func (m *Service) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Service.Unmarshal(m, b) +} +func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Service.Marshal(b, m, deterministic) +} +func (dst *Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service.Merge(dst, src) +} +func (m *Service) XXX_Size() int { + return xxx_messageInfo_Service.Size(m) +} +func (m *Service) XXX_DiscardUnknown() { + xxx_messageInfo_Service.DiscardUnknown(m) +} + +var xxx_messageInfo_Service proto.InternalMessageInfo + +func (m *Service) GetType() Service_Type { + if m != nil { + return m.Type + } + return Service_TYPE_UNSPECIFIED +} + +func (m *Service) GetHealth() Service_Health { + if m != nil { + return m.Health + } + return Service_UNKNOWN +} + +type Resources struct { + // ID of the preset for computational resources available to a host (CPU, memory etc.). + // All available presets are listed in the [documentation](/docs/managed-clickhouse/concepts/instance-types) + ResourcePresetId string `protobuf:"bytes,1,opt,name=resource_preset_id,json=resourcePresetId,proto3" json:"resource_preset_id,omitempty"` + // Volume of the storage available to a host, in bytes. + DiskSize int64 `protobuf:"varint,2,opt,name=disk_size,json=diskSize,proto3" json:"disk_size,omitempty"` + // Type of the storage environment for the host. + // Possible values: + // * network-hdd — network HDD drive, + // * network-nvme — network SSD drive, + // * local-nvme — local SSD storage. + DiskTypeId string `protobuf:"bytes,3,opt,name=disk_type_id,json=diskTypeId,proto3" json:"disk_type_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resources) Reset() { *m = Resources{} } +func (m *Resources) String() string { return proto.CompactTextString(m) } +func (*Resources) ProtoMessage() {} +func (*Resources) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_f1cb14d7e37924fe, []int{7} +} +func (m *Resources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resources.Unmarshal(m, b) +} +func (m *Resources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resources.Marshal(b, m, deterministic) +} +func (dst *Resources) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resources.Merge(dst, src) +} +func (m *Resources) XXX_Size() int { + return xxx_messageInfo_Resources.Size(m) +} +func (m *Resources) XXX_DiscardUnknown() { + xxx_messageInfo_Resources.DiscardUnknown(m) +} + +var xxx_messageInfo_Resources proto.InternalMessageInfo + +func (m *Resources) GetResourcePresetId() string { + if m != nil { + return m.ResourcePresetId + } + return "" +} + +func (m *Resources) GetDiskSize() int64 { + if m != nil { + return m.DiskSize + } + return 0 +} + +func (m *Resources) GetDiskTypeId() string { + if m != nil { + return m.DiskTypeId + } + return "" +} + +func init() { + proto.RegisterType((*Cluster)(nil), "yandex.cloud.mdb.clickhouse.v1.Cluster") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.clickhouse.v1.Cluster.LabelsEntry") + proto.RegisterType((*Monitoring)(nil), "yandex.cloud.mdb.clickhouse.v1.Monitoring") + proto.RegisterType((*ClusterConfig)(nil), "yandex.cloud.mdb.clickhouse.v1.ClusterConfig") + proto.RegisterType((*ClusterConfig_Clickhouse)(nil), "yandex.cloud.mdb.clickhouse.v1.ClusterConfig.Clickhouse") + proto.RegisterType((*ClusterConfig_Zookeeper)(nil), "yandex.cloud.mdb.clickhouse.v1.ClusterConfig.Zookeeper") + proto.RegisterType((*Shard)(nil), "yandex.cloud.mdb.clickhouse.v1.Shard") + proto.RegisterType((*ShardConfig)(nil), "yandex.cloud.mdb.clickhouse.v1.ShardConfig") + proto.RegisterType((*ShardConfig_Clickhouse)(nil), "yandex.cloud.mdb.clickhouse.v1.ShardConfig.Clickhouse") + proto.RegisterType((*Host)(nil), "yandex.cloud.mdb.clickhouse.v1.Host") + proto.RegisterType((*Service)(nil), "yandex.cloud.mdb.clickhouse.v1.Service") + proto.RegisterType((*Resources)(nil), "yandex.cloud.mdb.clickhouse.v1.Resources") + proto.RegisterEnum("yandex.cloud.mdb.clickhouse.v1.Cluster_Environment", Cluster_Environment_name, Cluster_Environment_value) + proto.RegisterEnum("yandex.cloud.mdb.clickhouse.v1.Cluster_Health", Cluster_Health_name, Cluster_Health_value) + proto.RegisterEnum("yandex.cloud.mdb.clickhouse.v1.Cluster_Status", Cluster_Status_name, Cluster_Status_value) + proto.RegisterEnum("yandex.cloud.mdb.clickhouse.v1.Host_Type", Host_Type_name, Host_Type_value) + proto.RegisterEnum("yandex.cloud.mdb.clickhouse.v1.Host_Health", Host_Health_name, Host_Health_value) + proto.RegisterEnum("yandex.cloud.mdb.clickhouse.v1.Service_Type", Service_Type_name, Service_Type_value) + proto.RegisterEnum("yandex.cloud.mdb.clickhouse.v1.Service_Health", Service_Health_name, Service_Health_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/clickhouse/v1/cluster.proto", fileDescriptor_cluster_f1cb14d7e37924fe) +} + +var fileDescriptor_cluster_f1cb14d7e37924fe = []byte{ + // 1171 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0xcf, 0x6e, 0xdb, 0xc6, + 0x13, 0x0e, 0xf5, 0x9f, 0x23, 0x47, 0x20, 0x16, 0x01, 0x42, 0xc8, 0x48, 0x7e, 0x06, 0x2f, 0x3f, + 0x37, 0xb1, 0x29, 0xc4, 0x2e, 0x5c, 0x27, 0x41, 0xd1, 0x2a, 0x12, 0x63, 0xb3, 0xb6, 0x29, 0x61, + 0x45, 0xb9, 0xad, 0x2f, 0x02, 0x25, 0xae, 0x25, 0x42, 0x12, 0x49, 0x90, 0x94, 0x1c, 0xf9, 0xd2, + 0xe7, 0xe9, 0x63, 0xf4, 0xd2, 0x6b, 0x5f, 0xa0, 0xe8, 0x93, 0xf4, 0x50, 0xec, 0x2e, 0x25, 0x4a, + 0x76, 0x13, 0xc9, 0xc9, 0xa9, 0x37, 0xee, 0xec, 0x7c, 0xdf, 0xce, 0x7e, 0x33, 0xc3, 0x59, 0xd8, + 0x9b, 0x59, 0xae, 0x4d, 0x3e, 0x54, 0x7a, 0x23, 0x6f, 0x62, 0x57, 0xc6, 0x76, 0xb7, 0xd2, 0x1b, + 0x39, 0xbd, 0xe1, 0xc0, 0x9b, 0x84, 0xa4, 0x32, 0x7d, 0x55, 0xe9, 0x8d, 0x26, 0x61, 0x44, 0x02, + 0xd5, 0x0f, 0xbc, 0xc8, 0x43, 0xcf, 0xb9, 0xb7, 0xca, 0xbc, 0xd5, 0xb1, 0xdd, 0x55, 0x13, 0x6f, + 0x75, 0xfa, 0xaa, 0xfc, 0xbf, 0xbe, 0xe7, 0xf5, 0x47, 0xa4, 0xc2, 0xbc, 0xbb, 0x93, 0xeb, 0x4a, + 0xe4, 0x8c, 0x49, 0x18, 0x59, 0x63, 0x9f, 0x13, 0x94, 0x9f, 0xdf, 0x75, 0xb8, 0x09, 0x2c, 0xdf, + 0x27, 0x41, 0x18, 0xef, 0x1f, 0xad, 0x0b, 0xc7, 0x73, 0xaf, 0x9d, 0xfe, 0x92, 0x91, 0xe3, 0x94, + 0x3f, 0xf2, 0x90, 0xaf, 0xf1, 0x50, 0x51, 0x09, 0x52, 0x8e, 0x2d, 0x0b, 0x3b, 0xc2, 0xae, 0x88, + 0x53, 0x8e, 0x8d, 0xb6, 0x41, 0xbc, 0xf6, 0x46, 0x36, 0x09, 0x3a, 0x8e, 0x2d, 0xa7, 0x98, 0xb9, + 0xc0, 0x0d, 0xba, 0x8d, 0x5e, 0x03, 0xf4, 0x02, 0x62, 0x45, 0xc4, 0xee, 0x58, 0x91, 0x9c, 0xde, + 0x11, 0x76, 0x8b, 0x07, 0x65, 0x95, 0x47, 0xa9, 0xce, 0xa3, 0x54, 0xcd, 0xf9, 0x35, 0xb0, 0x18, + 0x7b, 0x57, 0x23, 0x84, 0x20, 0xe3, 0x5a, 0x63, 0x22, 0x67, 0x18, 0x25, 0xfb, 0x46, 0x3b, 0x50, + 0xb4, 0x49, 0xd8, 0x0b, 0x1c, 0x3f, 0x72, 0x3c, 0x57, 0xce, 0xb2, 0xad, 0x65, 0x13, 0x3a, 0x83, + 0xdc, 0xc8, 0xea, 0x92, 0x51, 0x28, 0xe7, 0x76, 0xd2, 0xbb, 0xc5, 0x83, 0x43, 0xf5, 0xd3, 0x9a, + 0xaa, 0xf1, 0xb5, 0xd4, 0x73, 0x86, 0xd2, 0xdc, 0x28, 0x98, 0xe1, 0x98, 0x02, 0xb5, 0xa1, 0x48, + 0xdc, 0xa9, 0x13, 0x78, 0xee, 0x98, 0xb8, 0x91, 0x9c, 0xdf, 0x11, 0x76, 0x4b, 0x9b, 0x33, 0x6a, + 0x09, 0x14, 0x2f, 0xf3, 0xa0, 0x1f, 0x00, 0xc6, 0x9e, 0xeb, 0x44, 0x5e, 0xe0, 0xb8, 0x7d, 0xb9, + 0xc0, 0xe2, 0x7c, 0xb1, 0x8e, 0xf5, 0x62, 0x81, 0xc0, 0x4b, 0x68, 0xa4, 0x41, 0x8e, 0x27, 0x4d, + 0x16, 0x99, 0xb8, 0xfb, 0x1b, 0x46, 0x57, 0x63, 0x20, 0x1c, 0x83, 0xd1, 0x33, 0x00, 0x97, 0x44, + 0x37, 0x5e, 0x30, 0xa4, 0x59, 0x04, 0xa6, 0xab, 0x18, 0x5b, 0x74, 0x1b, 0xbd, 0x87, 0xdc, 0x80, + 0x58, 0xa3, 0x68, 0x20, 0x17, 0x99, 0x06, 0xea, 0xa6, 0x1a, 0x9c, 0x32, 0x14, 0x8e, 0xd1, 0x94, + 0x27, 0x8c, 0xac, 0x68, 0x12, 0xca, 0x5b, 0x0f, 0xe3, 0x69, 0x31, 0x14, 0x8e, 0xd1, 0xe5, 0xd7, + 0x50, 0x5c, 0xca, 0x17, 0x92, 0x20, 0x3d, 0x24, 0xb3, 0xb8, 0x26, 0xe9, 0x27, 0x7a, 0x02, 0xd9, + 0xa9, 0x35, 0x9a, 0x90, 0xb8, 0x20, 0xf9, 0xe2, 0x4d, 0xea, 0x58, 0x50, 0x74, 0x28, 0x2e, 0x25, + 0x06, 0x6d, 0xc3, 0x53, 0xcd, 0xb8, 0xd4, 0x71, 0xc3, 0xb8, 0xd0, 0x0c, 0xb3, 0xd3, 0x36, 0x5a, + 0x4d, 0xad, 0xa6, 0xbf, 0xd7, 0xb5, 0xba, 0xf4, 0x08, 0x95, 0x00, 0x9a, 0xb8, 0x51, 0x6f, 0xd7, + 0x4c, 0xbd, 0x61, 0x48, 0x02, 0x7a, 0x0c, 0x62, 0x13, 0x6b, 0x2d, 0xb3, 0xfa, 0xee, 0x5c, 0x93, + 0x52, 0xca, 0x77, 0x90, 0xe3, 0xf7, 0x43, 0x08, 0x4a, 0xa7, 0x5a, 0xf5, 0xdc, 0x3c, 0xed, 0xb4, + 0x8d, 0x33, 0xa3, 0xf1, 0xa3, 0x21, 0x3d, 0x42, 0x22, 0x64, 0xab, 0xe7, 0xfa, 0xa5, 0x26, 0x09, + 0xa8, 0x00, 0x99, 0xba, 0x56, 0xad, 0x4b, 0x29, 0xb4, 0x05, 0x85, 0xba, 0x76, 0x82, 0xab, 0x75, + 0xad, 0x2e, 0xa5, 0x95, 0x19, 0xe4, 0xf8, 0xc5, 0x28, 0x41, 0xcb, 0xac, 0x9a, 0xed, 0xd6, 0x12, + 0xc1, 0x16, 0x14, 0x6a, 0x58, 0xab, 0x9a, 0xba, 0x71, 0x22, 0x09, 0xa8, 0x08, 0x79, 0xdc, 0x36, + 0x0c, 0xba, 0x48, 0x51, 0x6e, 0x0d, 0xe3, 0x06, 0x96, 0xd2, 0xd4, 0xab, 0xdd, 0xac, 0x73, 0xaf, + 0x0c, 0x5d, 0xb5, 0xcc, 0x46, 0xb3, 0x49, 0x57, 0x59, 0x8a, 0x61, 0x2b, 0xad, 0x2e, 0xe5, 0xf8, + 0x56, 0x15, 0x33, 0xc7, 0xbc, 0x72, 0x09, 0x90, 0x54, 0xd4, 0xa2, 0xd7, 0x84, 0x8f, 0xf7, 0x5a, + 0xea, 0x7e, 0xaf, 0x21, 0xc8, 0x8c, 0x1c, 0x77, 0xc8, 0xda, 0x5a, 0xc4, 0xec, 0x5b, 0xf9, 0x33, + 0x0d, 0x8f, 0x57, 0x4a, 0x0c, 0xc9, 0x90, 0x9f, 0x92, 0x20, 0xa4, 0x1c, 0x9c, 0x7e, 0xbe, 0x44, + 0x3f, 0x01, 0x24, 0xd9, 0x66, 0x07, 0x14, 0x0f, 0x8e, 0x1f, 0x54, 0xbf, 0x6a, 0x6d, 0xb1, 0x87, + 0x97, 0xb8, 0x50, 0x1b, 0xc4, 0x5b, 0xcf, 0x1b, 0x12, 0xe2, 0x93, 0x20, 0xfe, 0xeb, 0x7c, 0xf3, + 0x30, 0xe2, 0xab, 0x39, 0x1c, 0x27, 0x4c, 0xe5, 0x5f, 0x05, 0x80, 0xe4, 0x44, 0x84, 0x17, 0xbd, + 0x27, 0xb0, 0x23, 0xde, 0xac, 0x3b, 0xa2, 0x77, 0x37, 0x68, 0x7e, 0x58, 0x8b, 0x44, 0x8b, 0x46, + 0x3c, 0x01, 0x31, 0x20, 0xa1, 0x37, 0x09, 0x7a, 0x24, 0x8c, 0x25, 0xf9, 0x6a, 0x1d, 0x2d, 0x9e, + 0x03, 0x70, 0x82, 0x2d, 0x9b, 0x20, 0x2e, 0xee, 0xb0, 0xca, 0x2a, 0x7c, 0x3e, 0xab, 0xf2, 0x0b, + 0x64, 0x5b, 0x03, 0x2b, 0xb0, 0xff, 0xb5, 0x62, 0x9e, 0xd1, 0x7c, 0x32, 0x11, 0x93, 0x51, 0x20, + 0xc6, 0x16, 0xdd, 0x46, 0xb5, 0x85, 0x5c, 0x3c, 0x23, 0x2f, 0xd7, 0x45, 0xc0, 0x4e, 0x5a, 0xfd, + 0x51, 0x29, 0xbf, 0xa7, 0xa0, 0xb8, 0x64, 0x47, 0x97, 0x2b, 0x35, 0xc4, 0xaf, 0x76, 0xf4, 0x00, + 0xe2, 0x8f, 0x54, 0x50, 0xf9, 0xaf, 0xff, 0x4e, 0xaa, 0xd1, 0x21, 0xe4, 0x6e, 0x88, 0xd3, 0x1f, + 0xcc, 0x07, 0xec, 0xf6, 0xbd, 0x01, 0xab, 0xbb, 0xd1, 0xd1, 0xd7, 0x97, 0xf4, 0x1f, 0x88, 0x63, + 0x57, 0xe5, 0xb7, 0x0c, 0x64, 0x4e, 0xbd, 0x30, 0xfa, 0x9c, 0x4c, 0x3e, 0x85, 0xfc, 0xad, 0xe7, + 0x12, 0xba, 0xc7, 0x7b, 0x3f, 0x47, 0x97, 0xba, 0x8d, 0xbe, 0x85, 0x4c, 0x34, 0xf3, 0xf9, 0xcc, + 0x2e, 0xad, 0xbf, 0x0d, 0x3d, 0x5f, 0x35, 0x67, 0x3e, 0xc1, 0x0c, 0xb6, 0xaa, 0x48, 0xf6, 0x0b, + 0x14, 0xa9, 0x2d, 0xe6, 0x55, 0x8e, 0x45, 0xf2, 0x72, 0xa3, 0x48, 0xee, 0x0c, 0xab, 0x1a, 0x14, + 0x42, 0x12, 0x4c, 0x1d, 0x1a, 0x4c, 0x9e, 0x0d, 0xe9, 0xff, 0xaf, 0x2d, 0x2c, 0xee, 0x8f, 0x17, + 0x40, 0xfa, 0x3a, 0x0a, 0x27, 0x5d, 0x97, 0x44, 0x54, 0xac, 0x02, 0x7f, 0x1d, 0x71, 0x83, 0x6e, + 0xa3, 0x5d, 0x90, 0xac, 0x30, 0x74, 0xfa, 0x6e, 0xc7, 0x9f, 0x74, 0x47, 0x4e, 0xaf, 0xe3, 0xf8, + 0x6c, 0x8c, 0x17, 0x70, 0x89, 0xdb, 0x9b, 0xcc, 0xac, 0xfb, 0x34, 0x21, 0x21, 0x2d, 0xda, 0x0e, + 0x4b, 0x55, 0x3c, 0x9f, 0x99, 0xc5, 0xb0, 0xc6, 0x44, 0x79, 0x0b, 0x19, 0x2a, 0x23, 0x7a, 0x02, + 0x92, 0xf9, 0x73, 0x53, 0xbb, 0x3f, 0xc6, 0x6a, 0xe7, 0x7a, 0xed, 0xec, 0xb4, 0xd1, 0x6e, 0x69, + 0x7c, 0x8c, 0x5d, 0x35, 0x1a, 0x67, 0x9a, 0xd6, 0xd4, 0xb0, 0x94, 0x52, 0x8e, 0x17, 0x63, 0xac, + 0x08, 0xf9, 0x07, 0xcd, 0xaf, 0xbf, 0x05, 0xc8, 0xc7, 0x57, 0x46, 0xdf, 0xc7, 0xa9, 0x17, 0x98, + 0xe0, 0x7b, 0x1b, 0x2a, 0xb5, 0x9c, 0xfd, 0xe4, 0x91, 0x91, 0xda, 0xec, 0x71, 0x30, 0xe7, 0x58, + 0xcd, 0xdb, 0x97, 0x89, 0xf1, 0x62, 0x73, 0x31, 0x94, 0x0f, 0x20, 0x2e, 0xaa, 0x0f, 0xed, 0x01, + 0x9a, 0xd7, 0x5f, 0xc7, 0x0f, 0x48, 0xc8, 0x33, 0xce, 0x9b, 0x4a, 0x9a, 0xef, 0x34, 0xd9, 0x86, + 0xce, 0x1e, 0xcd, 0xb6, 0x13, 0x0e, 0x3b, 0xa1, 0x73, 0xcb, 0x27, 0x5f, 0x1a, 0x17, 0xa8, 0xa1, + 0xe5, 0xdc, 0xd2, 0xc9, 0xbb, 0xc5, 0x36, 0xa9, 0x2a, 0x49, 0x8f, 0x01, 0xb5, 0xd1, 0x8b, 0xe9, + 0xf6, 0xbb, 0xc6, 0xd5, 0x45, 0xdf, 0x89, 0x06, 0x93, 0xae, 0xda, 0xf3, 0xc6, 0x15, 0x2e, 0xd3, + 0x3e, 0x7f, 0xd4, 0xf7, 0xbd, 0xfd, 0x3e, 0x71, 0x59, 0xe7, 0x57, 0x3e, 0xfd, 0xda, 0x7f, 0x9b, + 0xac, 0xba, 0x39, 0x06, 0x38, 0xfc, 0x27, 0x00, 0x00, 0xff, 0xff, 0x2e, 0x9c, 0xf6, 0xaa, 0xb0, + 0x0c, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/cluster_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/cluster_service.pb.go new file mode 100644 index 000000000..5d59a7b31 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/cluster_service.pb.go @@ -0,0 +1,3437 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/clickhouse/v1/cluster_service.proto + +package clickhouse // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import config "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/config" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ListClusterLogsRequest_ServiceType int32 + +const ( + ListClusterLogsRequest_SERVICE_TYPE_UNSPECIFIED ListClusterLogsRequest_ServiceType = 0 + // Logs of ClickHouse activity. + ListClusterLogsRequest_CLICKHOUSE ListClusterLogsRequest_ServiceType = 1 +) + +var ListClusterLogsRequest_ServiceType_name = map[int32]string{ + 0: "SERVICE_TYPE_UNSPECIFIED", + 1: "CLICKHOUSE", +} +var ListClusterLogsRequest_ServiceType_value = map[string]int32{ + "SERVICE_TYPE_UNSPECIFIED": 0, + "CLICKHOUSE": 1, +} + +func (x ListClusterLogsRequest_ServiceType) String() string { + return proto.EnumName(ListClusterLogsRequest_ServiceType_name, int32(x)) +} +func (ListClusterLogsRequest_ServiceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{18, 0} +} + +type GetClusterRequest struct { + // ID of the ClickHouse Cluster resource to return. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterRequest) ProtoMessage() {} +func (*GetClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{0} +} +func (m *GetClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetClusterRequest.Unmarshal(m, b) +} +func (m *GetClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetClusterRequest.Marshal(b, m, deterministic) +} +func (dst *GetClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterRequest.Merge(dst, src) +} +func (m *GetClusterRequest) XXX_Size() int { + return xxx_messageInfo_GetClusterRequest.Size(m) +} +func (m *GetClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterRequest proto.InternalMessageInfo + +func (m *GetClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type ListClustersRequest struct { + // ID of the folder to list ClickHouse clusters in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClustersResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters resources listed in the response. + // The expression must specify: + // 1. The field name. Currently you can only use filtering with the [Cluster.name] field. + // 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + // 3. The value. Мust be 1-63 characters long and match the regular expression `^[a-zA-Z0-9_-]+$`. + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) } +func (*ListClustersRequest) ProtoMessage() {} +func (*ListClustersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{1} +} +func (m *ListClustersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClustersRequest.Unmarshal(m, b) +} +func (m *ListClustersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClustersRequest.Marshal(b, m, deterministic) +} +func (dst *ListClustersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClustersRequest.Merge(dst, src) +} +func (m *ListClustersRequest) XXX_Size() int { + return xxx_messageInfo_ListClustersRequest.Size(m) +} +func (m *ListClustersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClustersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClustersRequest proto.InternalMessageInfo + +func (m *ListClustersRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListClustersRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClustersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListClustersRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListClustersResponse struct { + // List of ClickHouse Cluster resources. + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClustersRequest.page_size], use the [next_page_token] as the value + // for the [ListClustersRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) } +func (*ListClustersResponse) ProtoMessage() {} +func (*ListClustersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{2} +} +func (m *ListClustersResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClustersResponse.Unmarshal(m, b) +} +func (m *ListClustersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClustersResponse.Marshal(b, m, deterministic) +} +func (dst *ListClustersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClustersResponse.Merge(dst, src) +} +func (m *ListClustersResponse) XXX_Size() int { + return xxx_messageInfo_ListClustersResponse.Size(m) +} +func (m *ListClustersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClustersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClustersResponse proto.InternalMessageInfo + +func (m *ListClustersResponse) GetClusters() []*Cluster { + if m != nil { + return m.Clusters + } + return nil +} + +func (m *ListClustersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateClusterRequest struct { + // ID of the folder to create the ClickHouse cluster in. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Name of the ClickHouse cluster. The name must be unique within the folder. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the ClickHouse cluster. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the ClickHouse cluster as `` key:value `` pairs. Maximum 64 per resource. + // For example, "project": "mvp" or "source": "dictionary". + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Deployment environment of the ClickHouse cluster. + Environment Cluster_Environment `protobuf:"varint,5,opt,name=environment,proto3,enum=yandex.cloud.mdb.clickhouse.v1.Cluster_Environment" json:"environment,omitempty"` + // Configuration and resources for hosts that should be created for the ClickHouse cluster. + ConfigSpec *ConfigSpec `protobuf:"bytes,6,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + // Descriptions of databases to be created in the ClickHouse cluster. + DatabaseSpecs []*DatabaseSpec `protobuf:"bytes,7,rep,name=database_specs,json=databaseSpecs,proto3" json:"database_specs,omitempty"` + // Descriptions of database users to be created in the ClickHouse cluster. + UserSpecs []*UserSpec `protobuf:"bytes,8,rep,name=user_specs,json=userSpecs,proto3" json:"user_specs,omitempty"` + // Individual configurations for hosts that should be created for the ClickHouse cluster. + HostSpecs []*HostSpec `protobuf:"bytes,9,rep,name=host_specs,json=hostSpecs,proto3" json:"host_specs,omitempty"` + // ID of the network to create the cluster in. + NetworkId string `protobuf:"bytes,10,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // Name of the first shard in cluster. If not set, it defaults to the value 'shard1'. + ShardName string `protobuf:"bytes,11,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} } +func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*CreateClusterRequest) ProtoMessage() {} +func (*CreateClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{3} +} +func (m *CreateClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateClusterRequest.Unmarshal(m, b) +} +func (m *CreateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateClusterRequest.Marshal(b, m, deterministic) +} +func (dst *CreateClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateClusterRequest.Merge(dst, src) +} +func (m *CreateClusterRequest) XXX_Size() int { + return xxx_messageInfo_CreateClusterRequest.Size(m) +} +func (m *CreateClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateClusterRequest proto.InternalMessageInfo + +func (m *CreateClusterRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *CreateClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateClusterRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *CreateClusterRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *CreateClusterRequest) GetEnvironment() Cluster_Environment { + if m != nil { + return m.Environment + } + return Cluster_ENVIRONMENT_UNSPECIFIED +} + +func (m *CreateClusterRequest) GetConfigSpec() *ConfigSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +func (m *CreateClusterRequest) GetDatabaseSpecs() []*DatabaseSpec { + if m != nil { + return m.DatabaseSpecs + } + return nil +} + +func (m *CreateClusterRequest) GetUserSpecs() []*UserSpec { + if m != nil { + return m.UserSpecs + } + return nil +} + +func (m *CreateClusterRequest) GetHostSpecs() []*HostSpec { + if m != nil { + return m.HostSpecs + } + return nil +} + +func (m *CreateClusterRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +func (m *CreateClusterRequest) GetShardName() string { + if m != nil { + return m.ShardName + } + return "" +} + +type CreateClusterMetadata struct { + // ID of the ClickHouse cluster that is being created. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateClusterMetadata) Reset() { *m = CreateClusterMetadata{} } +func (m *CreateClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateClusterMetadata) ProtoMessage() {} +func (*CreateClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{4} +} +func (m *CreateClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateClusterMetadata.Unmarshal(m, b) +} +func (m *CreateClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateClusterMetadata.Merge(dst, src) +} +func (m *CreateClusterMetadata) XXX_Size() int { + return xxx_messageInfo_CreateClusterMetadata.Size(m) +} +func (m *CreateClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateClusterMetadata proto.InternalMessageInfo + +func (m *CreateClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type UpdateClusterRequest struct { + // ID of the ClickHouse Cluster resource to update. + // To get the ClickHouse cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Field mask that specifies which fields of the ClickHouse Cluster resource should be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // New description of the ClickHouse cluster. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the ClickHouse cluster as `` key:value `` pairs. Maximum 64 per resource. + // For example, "project": "mvp" or "source": "dictionary". + // + // The new set of labels will completely replace the old ones. To add a label, request the current + // set with the [ClusterService.Get] method, then send an [ClusterService.Update] request with the new label added to the set. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // New configuration and resources for hosts in the cluster. + ConfigSpec *ConfigSpec `protobuf:"bytes,5,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} } +func (m *UpdateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterRequest) ProtoMessage() {} +func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{5} +} +func (m *UpdateClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateClusterRequest.Unmarshal(m, b) +} +func (m *UpdateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateClusterRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateClusterRequest.Merge(dst, src) +} +func (m *UpdateClusterRequest) XXX_Size() int { + return xxx_messageInfo_UpdateClusterRequest.Size(m) +} +func (m *UpdateClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateClusterRequest proto.InternalMessageInfo + +func (m *UpdateClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateClusterRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateClusterRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *UpdateClusterRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *UpdateClusterRequest) GetConfigSpec() *ConfigSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +type UpdateClusterMetadata struct { + // ID of the ClickHouse Cluster resource that is being updated. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateClusterMetadata) Reset() { *m = UpdateClusterMetadata{} } +func (m *UpdateClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterMetadata) ProtoMessage() {} +func (*UpdateClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{6} +} +func (m *UpdateClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateClusterMetadata.Unmarshal(m, b) +} +func (m *UpdateClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateClusterMetadata.Merge(dst, src) +} +func (m *UpdateClusterMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateClusterMetadata.Size(m) +} +func (m *UpdateClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateClusterMetadata proto.InternalMessageInfo + +func (m *UpdateClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type DeleteClusterRequest struct { + // ID of the ClickHouse cluster to delete. + // To get the ClickHouse cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} } +func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterRequest) ProtoMessage() {} +func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{7} +} +func (m *DeleteClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterRequest.Unmarshal(m, b) +} +func (m *DeleteClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterRequest.Merge(dst, src) +} +func (m *DeleteClusterRequest) XXX_Size() int { + return xxx_messageInfo_DeleteClusterRequest.Size(m) +} +func (m *DeleteClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterRequest proto.InternalMessageInfo + +func (m *DeleteClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type DeleteClusterMetadata struct { + // ID of the ClickHouse cluster that is being deleted. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterMetadata) Reset() { *m = DeleteClusterMetadata{} } +func (m *DeleteClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterMetadata) ProtoMessage() {} +func (*DeleteClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{8} +} +func (m *DeleteClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterMetadata.Unmarshal(m, b) +} +func (m *DeleteClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterMetadata.Merge(dst, src) +} +func (m *DeleteClusterMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteClusterMetadata.Size(m) +} +func (m *DeleteClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterMetadata proto.InternalMessageInfo + +func (m *DeleteClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StartClusterRequest struct { + // Required. ID of the ClickHouse cluster to start. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartClusterRequest) Reset() { *m = StartClusterRequest{} } +func (m *StartClusterRequest) String() string { return proto.CompactTextString(m) } +func (*StartClusterRequest) ProtoMessage() {} +func (*StartClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{9} +} +func (m *StartClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartClusterRequest.Unmarshal(m, b) +} +func (m *StartClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartClusterRequest.Marshal(b, m, deterministic) +} +func (dst *StartClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartClusterRequest.Merge(dst, src) +} +func (m *StartClusterRequest) XXX_Size() int { + return xxx_messageInfo_StartClusterRequest.Size(m) +} +func (m *StartClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartClusterRequest proto.InternalMessageInfo + +func (m *StartClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StartClusterMetadata struct { + // Required. ID of the ClickHouse cluster. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartClusterMetadata) Reset() { *m = StartClusterMetadata{} } +func (m *StartClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*StartClusterMetadata) ProtoMessage() {} +func (*StartClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{10} +} +func (m *StartClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartClusterMetadata.Unmarshal(m, b) +} +func (m *StartClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *StartClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartClusterMetadata.Merge(dst, src) +} +func (m *StartClusterMetadata) XXX_Size() int { + return xxx_messageInfo_StartClusterMetadata.Size(m) +} +func (m *StartClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_StartClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_StartClusterMetadata proto.InternalMessageInfo + +func (m *StartClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StopClusterRequest struct { + // Required. ID of the ClickHouse cluster to stop. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopClusterRequest) Reset() { *m = StopClusterRequest{} } +func (m *StopClusterRequest) String() string { return proto.CompactTextString(m) } +func (*StopClusterRequest) ProtoMessage() {} +func (*StopClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{11} +} +func (m *StopClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopClusterRequest.Unmarshal(m, b) +} +func (m *StopClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopClusterRequest.Marshal(b, m, deterministic) +} +func (dst *StopClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopClusterRequest.Merge(dst, src) +} +func (m *StopClusterRequest) XXX_Size() int { + return xxx_messageInfo_StopClusterRequest.Size(m) +} +func (m *StopClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopClusterRequest proto.InternalMessageInfo + +func (m *StopClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StopClusterMetadata struct { + // Required. ID of the ClickHouse cluster. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopClusterMetadata) Reset() { *m = StopClusterMetadata{} } +func (m *StopClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*StopClusterMetadata) ProtoMessage() {} +func (*StopClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{12} +} +func (m *StopClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopClusterMetadata.Unmarshal(m, b) +} +func (m *StopClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *StopClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopClusterMetadata.Merge(dst, src) +} +func (m *StopClusterMetadata) XXX_Size() int { + return xxx_messageInfo_StopClusterMetadata.Size(m) +} +func (m *StopClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_StopClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_StopClusterMetadata proto.InternalMessageInfo + +func (m *StopClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type BackupClusterRequest struct { + // ID of the ClickHouse cluster to back up. + // To get the ClickHouse cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BackupClusterRequest) Reset() { *m = BackupClusterRequest{} } +func (m *BackupClusterRequest) String() string { return proto.CompactTextString(m) } +func (*BackupClusterRequest) ProtoMessage() {} +func (*BackupClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{13} +} +func (m *BackupClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BackupClusterRequest.Unmarshal(m, b) +} +func (m *BackupClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BackupClusterRequest.Marshal(b, m, deterministic) +} +func (dst *BackupClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupClusterRequest.Merge(dst, src) +} +func (m *BackupClusterRequest) XXX_Size() int { + return xxx_messageInfo_BackupClusterRequest.Size(m) +} +func (m *BackupClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BackupClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupClusterRequest proto.InternalMessageInfo + +func (m *BackupClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type BackupClusterMetadata struct { + // ID of the ClickHouse cluster that is being backed up. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BackupClusterMetadata) Reset() { *m = BackupClusterMetadata{} } +func (m *BackupClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*BackupClusterMetadata) ProtoMessage() {} +func (*BackupClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{14} +} +func (m *BackupClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BackupClusterMetadata.Unmarshal(m, b) +} +func (m *BackupClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BackupClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *BackupClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupClusterMetadata.Merge(dst, src) +} +func (m *BackupClusterMetadata) XXX_Size() int { + return xxx_messageInfo_BackupClusterMetadata.Size(m) +} +func (m *BackupClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_BackupClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupClusterMetadata proto.InternalMessageInfo + +func (m *BackupClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type RestoreClusterRequest struct { + // ID of the backup to create a cluster from. + // To get the backup ID, use a [ClusterService.ListBackups] request. + BackupId string `protobuf:"bytes,1,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"` + // Name of the new ClickHouse cluster. The name must be unique within the folder. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the new ClickHouse cluster. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the ClickHouse cluster as `` key:value `` pairs. Maximum 64 per resource. + // For example, "project": "mvp" or "source": "dictionary". + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Deployment environment of the new ClickHouse cluster. + Environment Cluster_Environment `protobuf:"varint,5,opt,name=environment,proto3,enum=yandex.cloud.mdb.clickhouse.v1.Cluster_Environment" json:"environment,omitempty"` + // Configuration for the ClickHouse cluster to be created. + ConfigSpec *ConfigSpec `protobuf:"bytes,6,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + // Configurations for ClickHouse hosts that should be created for + // the cluster that is being created from the backup. + HostSpecs []*HostSpec `protobuf:"bytes,7,rep,name=host_specs,json=hostSpecs,proto3" json:"host_specs,omitempty"` + // ID of the network to create the ClickHouse cluster in. + NetworkId string `protobuf:"bytes,8,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestoreClusterRequest) Reset() { *m = RestoreClusterRequest{} } +func (m *RestoreClusterRequest) String() string { return proto.CompactTextString(m) } +func (*RestoreClusterRequest) ProtoMessage() {} +func (*RestoreClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{15} +} +func (m *RestoreClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestoreClusterRequest.Unmarshal(m, b) +} +func (m *RestoreClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestoreClusterRequest.Marshal(b, m, deterministic) +} +func (dst *RestoreClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestoreClusterRequest.Merge(dst, src) +} +func (m *RestoreClusterRequest) XXX_Size() int { + return xxx_messageInfo_RestoreClusterRequest.Size(m) +} +func (m *RestoreClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RestoreClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RestoreClusterRequest proto.InternalMessageInfo + +func (m *RestoreClusterRequest) GetBackupId() string { + if m != nil { + return m.BackupId + } + return "" +} + +func (m *RestoreClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *RestoreClusterRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *RestoreClusterRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *RestoreClusterRequest) GetEnvironment() Cluster_Environment { + if m != nil { + return m.Environment + } + return Cluster_ENVIRONMENT_UNSPECIFIED +} + +func (m *RestoreClusterRequest) GetConfigSpec() *ConfigSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +func (m *RestoreClusterRequest) GetHostSpecs() []*HostSpec { + if m != nil { + return m.HostSpecs + } + return nil +} + +func (m *RestoreClusterRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +type RestoreClusterMetadata struct { + // ID of the new ClickHouse cluster that is being created from a backup. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // ID of the backup that is being used for creating a cluster. + BackupId string `protobuf:"bytes,2,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestoreClusterMetadata) Reset() { *m = RestoreClusterMetadata{} } +func (m *RestoreClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*RestoreClusterMetadata) ProtoMessage() {} +func (*RestoreClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{16} +} +func (m *RestoreClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestoreClusterMetadata.Unmarshal(m, b) +} +func (m *RestoreClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestoreClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *RestoreClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestoreClusterMetadata.Merge(dst, src) +} +func (m *RestoreClusterMetadata) XXX_Size() int { + return xxx_messageInfo_RestoreClusterMetadata.Size(m) +} +func (m *RestoreClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_RestoreClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_RestoreClusterMetadata proto.InternalMessageInfo + +func (m *RestoreClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *RestoreClusterMetadata) GetBackupId() string { + if m != nil { + return m.BackupId + } + return "" +} + +type LogRecord struct { + // Log record timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Contents of the log record. + Message map[string]string `protobuf:"bytes,2,rep,name=message,proto3" json:"message,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogRecord) Reset() { *m = LogRecord{} } +func (m *LogRecord) String() string { return proto.CompactTextString(m) } +func (*LogRecord) ProtoMessage() {} +func (*LogRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{17} +} +func (m *LogRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogRecord.Unmarshal(m, b) +} +func (m *LogRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogRecord.Marshal(b, m, deterministic) +} +func (dst *LogRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogRecord.Merge(dst, src) +} +func (m *LogRecord) XXX_Size() int { + return xxx_messageInfo_LogRecord.Size(m) +} +func (m *LogRecord) XXX_DiscardUnknown() { + xxx_messageInfo_LogRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_LogRecord proto.InternalMessageInfo + +func (m *LogRecord) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *LogRecord) GetMessage() map[string]string { + if m != nil { + return m.Message + } + return nil +} + +type ListClusterLogsRequest struct { + // ID of the ClickHouse cluster to request logs for. + // To get the ClickHouse cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Columns from logs table to request. + // If no columns are specified, entire log records are returned. + ColumnFilter []string `protobuf:"bytes,2,rep,name=column_filter,json=columnFilter,proto3" json:"column_filter,omitempty"` + // Type of the service to request logs about. + ServiceType ListClusterLogsRequest_ServiceType `protobuf:"varint,3,opt,name=service_type,json=serviceType,proto3,enum=yandex.cloud.mdb.clickhouse.v1.ListClusterLogsRequest_ServiceType" json:"service_type,omitempty"` + // Start timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + FromTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=from_time,json=fromTime,proto3" json:"from_time,omitempty"` + // End timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + ToTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=to_time,json=toTime,proto3" json:"to_time,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClusterLogsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,6,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListClusterLogsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,7,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterLogsRequest) Reset() { *m = ListClusterLogsRequest{} } +func (m *ListClusterLogsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterLogsRequest) ProtoMessage() {} +func (*ListClusterLogsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{18} +} +func (m *ListClusterLogsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterLogsRequest.Unmarshal(m, b) +} +func (m *ListClusterLogsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterLogsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterLogsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterLogsRequest.Merge(dst, src) +} +func (m *ListClusterLogsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterLogsRequest.Size(m) +} +func (m *ListClusterLogsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterLogsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterLogsRequest proto.InternalMessageInfo + +func (m *ListClusterLogsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterLogsRequest) GetColumnFilter() []string { + if m != nil { + return m.ColumnFilter + } + return nil +} + +func (m *ListClusterLogsRequest) GetServiceType() ListClusterLogsRequest_ServiceType { + if m != nil { + return m.ServiceType + } + return ListClusterLogsRequest_SERVICE_TYPE_UNSPECIFIED +} + +func (m *ListClusterLogsRequest) GetFromTime() *timestamp.Timestamp { + if m != nil { + return m.FromTime + } + return nil +} + +func (m *ListClusterLogsRequest) GetToTime() *timestamp.Timestamp { + if m != nil { + return m.ToTime + } + return nil +} + +func (m *ListClusterLogsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterLogsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterLogsResponse struct { + // Requested log records. + Logs []*LogRecord `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterLogsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterLogsResponse) Reset() { *m = ListClusterLogsResponse{} } +func (m *ListClusterLogsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterLogsResponse) ProtoMessage() {} +func (*ListClusterLogsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{19} +} +func (m *ListClusterLogsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterLogsResponse.Unmarshal(m, b) +} +func (m *ListClusterLogsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterLogsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterLogsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterLogsResponse.Merge(dst, src) +} +func (m *ListClusterLogsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterLogsResponse.Size(m) +} +func (m *ListClusterLogsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterLogsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterLogsResponse proto.InternalMessageInfo + +func (m *ListClusterLogsResponse) GetLogs() []*LogRecord { + if m != nil { + return m.Logs + } + return nil +} + +func (m *ListClusterLogsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ListClusterOperationsRequest struct { + // ID of the ClickHouse Cluster resource to list operations for. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClusterOperationsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterOperationsRequest) Reset() { *m = ListClusterOperationsRequest{} } +func (m *ListClusterOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterOperationsRequest) ProtoMessage() {} +func (*ListClusterOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{20} +} +func (m *ListClusterOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterOperationsRequest.Unmarshal(m, b) +} +func (m *ListClusterOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterOperationsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterOperationsRequest.Merge(dst, src) +} +func (m *ListClusterOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterOperationsRequest.Size(m) +} +func (m *ListClusterOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterOperationsRequest proto.InternalMessageInfo + +func (m *ListClusterOperationsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterOperationsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterOperationsResponse struct { + // List of Operation resources for the specified ClickHouse cluster. + Operations []*operation.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterOperationsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterOperationsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterOperationsResponse) Reset() { *m = ListClusterOperationsResponse{} } +func (m *ListClusterOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterOperationsResponse) ProtoMessage() {} +func (*ListClusterOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{21} +} +func (m *ListClusterOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterOperationsResponse.Unmarshal(m, b) +} +func (m *ListClusterOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterOperationsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterOperationsResponse.Merge(dst, src) +} +func (m *ListClusterOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterOperationsResponse.Size(m) +} +func (m *ListClusterOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterOperationsResponse proto.InternalMessageInfo + +func (m *ListClusterOperationsResponse) GetOperations() []*operation.Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListClusterOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ListClusterBackupsRequest struct { + // ID of the ClickHouse cluster. + // To get the ClickHouse cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClusterBackupsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListClusterBackupsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterBackupsRequest) Reset() { *m = ListClusterBackupsRequest{} } +func (m *ListClusterBackupsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterBackupsRequest) ProtoMessage() {} +func (*ListClusterBackupsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{22} +} +func (m *ListClusterBackupsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterBackupsRequest.Unmarshal(m, b) +} +func (m *ListClusterBackupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterBackupsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterBackupsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterBackupsRequest.Merge(dst, src) +} +func (m *ListClusterBackupsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterBackupsRequest.Size(m) +} +func (m *ListClusterBackupsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterBackupsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterBackupsRequest proto.InternalMessageInfo + +func (m *ListClusterBackupsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterBackupsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterBackupsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterBackupsResponse struct { + // List of ClickHouse Backup resources. + Backups []*Backup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterBackupsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterBackupsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterBackupsResponse) Reset() { *m = ListClusterBackupsResponse{} } +func (m *ListClusterBackupsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterBackupsResponse) ProtoMessage() {} +func (*ListClusterBackupsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{23} +} +func (m *ListClusterBackupsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterBackupsResponse.Unmarshal(m, b) +} +func (m *ListClusterBackupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterBackupsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterBackupsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterBackupsResponse.Merge(dst, src) +} +func (m *ListClusterBackupsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterBackupsResponse.Size(m) +} +func (m *ListClusterBackupsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterBackupsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterBackupsResponse proto.InternalMessageInfo + +func (m *ListClusterBackupsResponse) GetBackups() []*Backup { + if m != nil { + return m.Backups + } + return nil +} + +func (m *ListClusterBackupsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ListClusterHostsRequest struct { + // ID of the ClickHouse cluster. + // To get the ClickHouse cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClusterHostsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterHostsRequest) Reset() { *m = ListClusterHostsRequest{} } +func (m *ListClusterHostsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterHostsRequest) ProtoMessage() {} +func (*ListClusterHostsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{24} +} +func (m *ListClusterHostsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterHostsRequest.Unmarshal(m, b) +} +func (m *ListClusterHostsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterHostsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterHostsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterHostsRequest.Merge(dst, src) +} +func (m *ListClusterHostsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterHostsRequest.Size(m) +} +func (m *ListClusterHostsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterHostsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterHostsRequest proto.InternalMessageInfo + +func (m *ListClusterHostsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterHostsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterHostsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterHostsResponse struct { + // Requested list of hosts for the cluster. + Hosts []*Host `protobuf:"bytes,1,rep,name=hosts,proto3" json:"hosts,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterHostsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterHostsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterHostsResponse) Reset() { *m = ListClusterHostsResponse{} } +func (m *ListClusterHostsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterHostsResponse) ProtoMessage() {} +func (*ListClusterHostsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{25} +} +func (m *ListClusterHostsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterHostsResponse.Unmarshal(m, b) +} +func (m *ListClusterHostsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterHostsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterHostsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterHostsResponse.Merge(dst, src) +} +func (m *ListClusterHostsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterHostsResponse.Size(m) +} +func (m *ListClusterHostsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterHostsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterHostsResponse proto.InternalMessageInfo + +func (m *ListClusterHostsResponse) GetHosts() []*Host { + if m != nil { + return m.Hosts + } + return nil +} + +func (m *ListClusterHostsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type AddClusterHostsRequest struct { + // ID of the ClickHouse cluster to add hosts to. + // To get the ClickHouse cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Configurations for ClickHouse hosts that should be added to the cluster. + HostSpecs []*HostSpec `protobuf:"bytes,2,rep,name=host_specs,json=hostSpecs,proto3" json:"host_specs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddClusterHostsRequest) Reset() { *m = AddClusterHostsRequest{} } +func (m *AddClusterHostsRequest) String() string { return proto.CompactTextString(m) } +func (*AddClusterHostsRequest) ProtoMessage() {} +func (*AddClusterHostsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{26} +} +func (m *AddClusterHostsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddClusterHostsRequest.Unmarshal(m, b) +} +func (m *AddClusterHostsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddClusterHostsRequest.Marshal(b, m, deterministic) +} +func (dst *AddClusterHostsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddClusterHostsRequest.Merge(dst, src) +} +func (m *AddClusterHostsRequest) XXX_Size() int { + return xxx_messageInfo_AddClusterHostsRequest.Size(m) +} +func (m *AddClusterHostsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AddClusterHostsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AddClusterHostsRequest proto.InternalMessageInfo + +func (m *AddClusterHostsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *AddClusterHostsRequest) GetHostSpecs() []*HostSpec { + if m != nil { + return m.HostSpecs + } + return nil +} + +type AddClusterHostsMetadata struct { + // ID of the ClickHouse cluster to which the hosts are being added. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Names of hosts that are being added to the cluster. + HostNames []string `protobuf:"bytes,2,rep,name=host_names,json=hostNames,proto3" json:"host_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddClusterHostsMetadata) Reset() { *m = AddClusterHostsMetadata{} } +func (m *AddClusterHostsMetadata) String() string { return proto.CompactTextString(m) } +func (*AddClusterHostsMetadata) ProtoMessage() {} +func (*AddClusterHostsMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{27} +} +func (m *AddClusterHostsMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddClusterHostsMetadata.Unmarshal(m, b) +} +func (m *AddClusterHostsMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddClusterHostsMetadata.Marshal(b, m, deterministic) +} +func (dst *AddClusterHostsMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddClusterHostsMetadata.Merge(dst, src) +} +func (m *AddClusterHostsMetadata) XXX_Size() int { + return xxx_messageInfo_AddClusterHostsMetadata.Size(m) +} +func (m *AddClusterHostsMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_AddClusterHostsMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_AddClusterHostsMetadata proto.InternalMessageInfo + +func (m *AddClusterHostsMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *AddClusterHostsMetadata) GetHostNames() []string { + if m != nil { + return m.HostNames + } + return nil +} + +type DeleteClusterHostsRequest struct { + // ID of the ClickHouse cluster to remove hosts from. + // To get the ClickHouse cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Names of hosts to delete. + HostNames []string `protobuf:"bytes,2,rep,name=host_names,json=hostNames,proto3" json:"host_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterHostsRequest) Reset() { *m = DeleteClusterHostsRequest{} } +func (m *DeleteClusterHostsRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterHostsRequest) ProtoMessage() {} +func (*DeleteClusterHostsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{28} +} +func (m *DeleteClusterHostsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterHostsRequest.Unmarshal(m, b) +} +func (m *DeleteClusterHostsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterHostsRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterHostsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterHostsRequest.Merge(dst, src) +} +func (m *DeleteClusterHostsRequest) XXX_Size() int { + return xxx_messageInfo_DeleteClusterHostsRequest.Size(m) +} +func (m *DeleteClusterHostsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterHostsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterHostsRequest proto.InternalMessageInfo + +func (m *DeleteClusterHostsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteClusterHostsRequest) GetHostNames() []string { + if m != nil { + return m.HostNames + } + return nil +} + +type DeleteClusterHostsMetadata struct { + // ID of the ClickHouse cluster to remove hosts from. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Names of hosts that are being deleted. + HostNames []string `protobuf:"bytes,2,rep,name=host_names,json=hostNames,proto3" json:"host_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterHostsMetadata) Reset() { *m = DeleteClusterHostsMetadata{} } +func (m *DeleteClusterHostsMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterHostsMetadata) ProtoMessage() {} +func (*DeleteClusterHostsMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{29} +} +func (m *DeleteClusterHostsMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterHostsMetadata.Unmarshal(m, b) +} +func (m *DeleteClusterHostsMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterHostsMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterHostsMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterHostsMetadata.Merge(dst, src) +} +func (m *DeleteClusterHostsMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteClusterHostsMetadata.Size(m) +} +func (m *DeleteClusterHostsMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterHostsMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterHostsMetadata proto.InternalMessageInfo + +func (m *DeleteClusterHostsMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteClusterHostsMetadata) GetHostNames() []string { + if m != nil { + return m.HostNames + } + return nil +} + +type GetClusterShardRequest struct { + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterShardRequest) Reset() { *m = GetClusterShardRequest{} } +func (m *GetClusterShardRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterShardRequest) ProtoMessage() {} +func (*GetClusterShardRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{30} +} +func (m *GetClusterShardRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetClusterShardRequest.Unmarshal(m, b) +} +func (m *GetClusterShardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetClusterShardRequest.Marshal(b, m, deterministic) +} +func (dst *GetClusterShardRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterShardRequest.Merge(dst, src) +} +func (m *GetClusterShardRequest) XXX_Size() int { + return xxx_messageInfo_GetClusterShardRequest.Size(m) +} +func (m *GetClusterShardRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterShardRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterShardRequest proto.InternalMessageInfo + +func (m *GetClusterShardRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GetClusterShardRequest) GetShardName() string { + if m != nil { + return m.ShardName + } + return "" +} + +type ListClusterShardsRequest struct { + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterShardsRequest) Reset() { *m = ListClusterShardsRequest{} } +func (m *ListClusterShardsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterShardsRequest) ProtoMessage() {} +func (*ListClusterShardsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{31} +} +func (m *ListClusterShardsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterShardsRequest.Unmarshal(m, b) +} +func (m *ListClusterShardsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterShardsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterShardsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterShardsRequest.Merge(dst, src) +} +func (m *ListClusterShardsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterShardsRequest.Size(m) +} +func (m *ListClusterShardsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterShardsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterShardsRequest proto.InternalMessageInfo + +func (m *ListClusterShardsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterShardsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterShardsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterShardsResponse struct { + Shards []*Shard `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty"` + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterShardsResponse) Reset() { *m = ListClusterShardsResponse{} } +func (m *ListClusterShardsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterShardsResponse) ProtoMessage() {} +func (*ListClusterShardsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{32} +} +func (m *ListClusterShardsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterShardsResponse.Unmarshal(m, b) +} +func (m *ListClusterShardsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterShardsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterShardsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterShardsResponse.Merge(dst, src) +} +func (m *ListClusterShardsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterShardsResponse.Size(m) +} +func (m *ListClusterShardsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterShardsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterShardsResponse proto.InternalMessageInfo + +func (m *ListClusterShardsResponse) GetShards() []*Shard { + if m != nil { + return m.Shards + } + return nil +} + +func (m *ListClusterShardsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type AddClusterShardRequest struct { + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + ConfigSpec *ShardConfigSpec `protobuf:"bytes,3,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + HostSpecs []*HostSpec `protobuf:"bytes,4,rep,name=host_specs,json=hostSpecs,proto3" json:"host_specs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddClusterShardRequest) Reset() { *m = AddClusterShardRequest{} } +func (m *AddClusterShardRequest) String() string { return proto.CompactTextString(m) } +func (*AddClusterShardRequest) ProtoMessage() {} +func (*AddClusterShardRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{33} +} +func (m *AddClusterShardRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddClusterShardRequest.Unmarshal(m, b) +} +func (m *AddClusterShardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddClusterShardRequest.Marshal(b, m, deterministic) +} +func (dst *AddClusterShardRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddClusterShardRequest.Merge(dst, src) +} +func (m *AddClusterShardRequest) XXX_Size() int { + return xxx_messageInfo_AddClusterShardRequest.Size(m) +} +func (m *AddClusterShardRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AddClusterShardRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AddClusterShardRequest proto.InternalMessageInfo + +func (m *AddClusterShardRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *AddClusterShardRequest) GetShardName() string { + if m != nil { + return m.ShardName + } + return "" +} + +func (m *AddClusterShardRequest) GetConfigSpec() *ShardConfigSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +func (m *AddClusterShardRequest) GetHostSpecs() []*HostSpec { + if m != nil { + return m.HostSpecs + } + return nil +} + +type AddClusterShardMetadata struct { + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddClusterShardMetadata) Reset() { *m = AddClusterShardMetadata{} } +func (m *AddClusterShardMetadata) String() string { return proto.CompactTextString(m) } +func (*AddClusterShardMetadata) ProtoMessage() {} +func (*AddClusterShardMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{34} +} +func (m *AddClusterShardMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddClusterShardMetadata.Unmarshal(m, b) +} +func (m *AddClusterShardMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddClusterShardMetadata.Marshal(b, m, deterministic) +} +func (dst *AddClusterShardMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddClusterShardMetadata.Merge(dst, src) +} +func (m *AddClusterShardMetadata) XXX_Size() int { + return xxx_messageInfo_AddClusterShardMetadata.Size(m) +} +func (m *AddClusterShardMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_AddClusterShardMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_AddClusterShardMetadata proto.InternalMessageInfo + +func (m *AddClusterShardMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *AddClusterShardMetadata) GetShardName() string { + if m != nil { + return m.ShardName + } + return "" +} + +type UpdateClusterShardRequest struct { + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + UpdateMask *field_mask.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + ConfigSpec *ShardConfigSpec `protobuf:"bytes,4,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateClusterShardRequest) Reset() { *m = UpdateClusterShardRequest{} } +func (m *UpdateClusterShardRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterShardRequest) ProtoMessage() {} +func (*UpdateClusterShardRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{35} +} +func (m *UpdateClusterShardRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateClusterShardRequest.Unmarshal(m, b) +} +func (m *UpdateClusterShardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateClusterShardRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateClusterShardRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateClusterShardRequest.Merge(dst, src) +} +func (m *UpdateClusterShardRequest) XXX_Size() int { + return xxx_messageInfo_UpdateClusterShardRequest.Size(m) +} +func (m *UpdateClusterShardRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateClusterShardRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateClusterShardRequest proto.InternalMessageInfo + +func (m *UpdateClusterShardRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateClusterShardRequest) GetShardName() string { + if m != nil { + return m.ShardName + } + return "" +} + +func (m *UpdateClusterShardRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateClusterShardRequest) GetConfigSpec() *ShardConfigSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +type UpdateClusterShardMetadata struct { + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateClusterShardMetadata) Reset() { *m = UpdateClusterShardMetadata{} } +func (m *UpdateClusterShardMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterShardMetadata) ProtoMessage() {} +func (*UpdateClusterShardMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{36} +} +func (m *UpdateClusterShardMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateClusterShardMetadata.Unmarshal(m, b) +} +func (m *UpdateClusterShardMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateClusterShardMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateClusterShardMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateClusterShardMetadata.Merge(dst, src) +} +func (m *UpdateClusterShardMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateClusterShardMetadata.Size(m) +} +func (m *UpdateClusterShardMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateClusterShardMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateClusterShardMetadata proto.InternalMessageInfo + +func (m *UpdateClusterShardMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateClusterShardMetadata) GetShardName() string { + if m != nil { + return m.ShardName + } + return "" +} + +type DeleteClusterShardRequest struct { + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterShardRequest) Reset() { *m = DeleteClusterShardRequest{} } +func (m *DeleteClusterShardRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterShardRequest) ProtoMessage() {} +func (*DeleteClusterShardRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{37} +} +func (m *DeleteClusterShardRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterShardRequest.Unmarshal(m, b) +} +func (m *DeleteClusterShardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterShardRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterShardRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterShardRequest.Merge(dst, src) +} +func (m *DeleteClusterShardRequest) XXX_Size() int { + return xxx_messageInfo_DeleteClusterShardRequest.Size(m) +} +func (m *DeleteClusterShardRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterShardRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterShardRequest proto.InternalMessageInfo + +func (m *DeleteClusterShardRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteClusterShardRequest) GetShardName() string { + if m != nil { + return m.ShardName + } + return "" +} + +type DeleteClusterShardMetadata struct { + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterShardMetadata) Reset() { *m = DeleteClusterShardMetadata{} } +func (m *DeleteClusterShardMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterShardMetadata) ProtoMessage() {} +func (*DeleteClusterShardMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{38} +} +func (m *DeleteClusterShardMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterShardMetadata.Unmarshal(m, b) +} +func (m *DeleteClusterShardMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterShardMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterShardMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterShardMetadata.Merge(dst, src) +} +func (m *DeleteClusterShardMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteClusterShardMetadata.Size(m) +} +func (m *DeleteClusterShardMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterShardMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterShardMetadata proto.InternalMessageInfo + +func (m *DeleteClusterShardMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteClusterShardMetadata) GetShardName() string { + if m != nil { + return m.ShardName + } + return "" +} + +type HostSpec struct { + // ID of the availability zone where the host resides. + // To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request. + ZoneId string `protobuf:"bytes,1,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + // Type of the host to be deployed. + Type Host_Type `protobuf:"varint,2,opt,name=type,proto3,enum=yandex.cloud.mdb.clickhouse.v1.Host_Type" json:"type,omitempty"` + // ID of the subnet that the host should belong to. This subnet should be a part + // of the network that the cluster belongs to. + // The ID of the network is set in the [Cluster.network_id] field. + SubnetId string `protobuf:"bytes,3,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + // Whether the host should get a public IP address on creation. + // + // After a host has been created, this setting cannot be changed. To remove an assigned public IP, or to assign + // a public IP to a host without one, recreate the host with [assign_public_ip] set as needed. + // + // Possible values: + // * false — don't assign a public IP to the host. + // * true — the host should have a public IP address. + AssignPublicIp bool `protobuf:"varint,4,opt,name=assign_public_ip,json=assignPublicIp,proto3" json:"assign_public_ip,omitempty"` + ShardName string `protobuf:"bytes,5,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HostSpec) Reset() { *m = HostSpec{} } +func (m *HostSpec) String() string { return proto.CompactTextString(m) } +func (*HostSpec) ProtoMessage() {} +func (*HostSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{39} +} +func (m *HostSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HostSpec.Unmarshal(m, b) +} +func (m *HostSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HostSpec.Marshal(b, m, deterministic) +} +func (dst *HostSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostSpec.Merge(dst, src) +} +func (m *HostSpec) XXX_Size() int { + return xxx_messageInfo_HostSpec.Size(m) +} +func (m *HostSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HostSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HostSpec proto.InternalMessageInfo + +func (m *HostSpec) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func (m *HostSpec) GetType() Host_Type { + if m != nil { + return m.Type + } + return Host_TYPE_UNSPECIFIED +} + +func (m *HostSpec) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +func (m *HostSpec) GetAssignPublicIp() bool { + if m != nil { + return m.AssignPublicIp + } + return false +} + +func (m *HostSpec) GetShardName() string { + if m != nil { + return m.ShardName + } + return "" +} + +type ConfigSpec struct { + // Configuration and resources for a ClickHouse server. + Clickhouse *ConfigSpec_Clickhouse `protobuf:"bytes,1,opt,name=clickhouse,proto3" json:"clickhouse,omitempty"` + // Configuration and resources for a ZooKeeper server. + Zookeeper *ConfigSpec_Zookeeper `protobuf:"bytes,2,opt,name=zookeeper,proto3" json:"zookeeper,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigSpec) Reset() { *m = ConfigSpec{} } +func (m *ConfigSpec) String() string { return proto.CompactTextString(m) } +func (*ConfigSpec) ProtoMessage() {} +func (*ConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{40} +} +func (m *ConfigSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigSpec.Unmarshal(m, b) +} +func (m *ConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigSpec.Marshal(b, m, deterministic) +} +func (dst *ConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigSpec.Merge(dst, src) +} +func (m *ConfigSpec) XXX_Size() int { + return xxx_messageInfo_ConfigSpec.Size(m) +} +func (m *ConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigSpec proto.InternalMessageInfo + +func (m *ConfigSpec) GetClickhouse() *ConfigSpec_Clickhouse { + if m != nil { + return m.Clickhouse + } + return nil +} + +func (m *ConfigSpec) GetZookeeper() *ConfigSpec_Zookeeper { + if m != nil { + return m.Zookeeper + } + return nil +} + +type ConfigSpec_Clickhouse struct { + // Configuration for a ClickHouse server. + Config *config.ClickhouseConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + // Resources allocated to ClickHouse hosts. + Resources *Resources `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigSpec_Clickhouse) Reset() { *m = ConfigSpec_Clickhouse{} } +func (m *ConfigSpec_Clickhouse) String() string { return proto.CompactTextString(m) } +func (*ConfigSpec_Clickhouse) ProtoMessage() {} +func (*ConfigSpec_Clickhouse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{40, 0} +} +func (m *ConfigSpec_Clickhouse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigSpec_Clickhouse.Unmarshal(m, b) +} +func (m *ConfigSpec_Clickhouse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigSpec_Clickhouse.Marshal(b, m, deterministic) +} +func (dst *ConfigSpec_Clickhouse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigSpec_Clickhouse.Merge(dst, src) +} +func (m *ConfigSpec_Clickhouse) XXX_Size() int { + return xxx_messageInfo_ConfigSpec_Clickhouse.Size(m) +} +func (m *ConfigSpec_Clickhouse) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigSpec_Clickhouse.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigSpec_Clickhouse proto.InternalMessageInfo + +func (m *ConfigSpec_Clickhouse) GetConfig() *config.ClickhouseConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *ConfigSpec_Clickhouse) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +type ConfigSpec_Zookeeper struct { + // Resources allocated to ZooKeeper hosts. + Resources *Resources `protobuf:"bytes,1,opt,name=resources,proto3" json:"resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigSpec_Zookeeper) Reset() { *m = ConfigSpec_Zookeeper{} } +func (m *ConfigSpec_Zookeeper) String() string { return proto.CompactTextString(m) } +func (*ConfigSpec_Zookeeper) ProtoMessage() {} +func (*ConfigSpec_Zookeeper) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{40, 1} +} +func (m *ConfigSpec_Zookeeper) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigSpec_Zookeeper.Unmarshal(m, b) +} +func (m *ConfigSpec_Zookeeper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigSpec_Zookeeper.Marshal(b, m, deterministic) +} +func (dst *ConfigSpec_Zookeeper) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigSpec_Zookeeper.Merge(dst, src) +} +func (m *ConfigSpec_Zookeeper) XXX_Size() int { + return xxx_messageInfo_ConfigSpec_Zookeeper.Size(m) +} +func (m *ConfigSpec_Zookeeper) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigSpec_Zookeeper.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigSpec_Zookeeper proto.InternalMessageInfo + +func (m *ConfigSpec_Zookeeper) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +type ShardConfigSpec struct { + Clickhouse *ShardConfigSpec_Clickhouse `protobuf:"bytes,1,opt,name=clickhouse,proto3" json:"clickhouse,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShardConfigSpec) Reset() { *m = ShardConfigSpec{} } +func (m *ShardConfigSpec) String() string { return proto.CompactTextString(m) } +func (*ShardConfigSpec) ProtoMessage() {} +func (*ShardConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{41} +} +func (m *ShardConfigSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShardConfigSpec.Unmarshal(m, b) +} +func (m *ShardConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShardConfigSpec.Marshal(b, m, deterministic) +} +func (dst *ShardConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShardConfigSpec.Merge(dst, src) +} +func (m *ShardConfigSpec) XXX_Size() int { + return xxx_messageInfo_ShardConfigSpec.Size(m) +} +func (m *ShardConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ShardConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ShardConfigSpec proto.InternalMessageInfo + +func (m *ShardConfigSpec) GetClickhouse() *ShardConfigSpec_Clickhouse { + if m != nil { + return m.Clickhouse + } + return nil +} + +type ShardConfigSpec_Clickhouse struct { + Config *config.ClickhouseConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Resources *Resources `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` + Weight *wrappers.Int64Value `protobuf:"bytes,3,opt,name=weight,proto3" json:"weight,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShardConfigSpec_Clickhouse) Reset() { *m = ShardConfigSpec_Clickhouse{} } +func (m *ShardConfigSpec_Clickhouse) String() string { return proto.CompactTextString(m) } +func (*ShardConfigSpec_Clickhouse) ProtoMessage() {} +func (*ShardConfigSpec_Clickhouse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_069e868ca1dde37a, []int{41, 0} +} +func (m *ShardConfigSpec_Clickhouse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShardConfigSpec_Clickhouse.Unmarshal(m, b) +} +func (m *ShardConfigSpec_Clickhouse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShardConfigSpec_Clickhouse.Marshal(b, m, deterministic) +} +func (dst *ShardConfigSpec_Clickhouse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShardConfigSpec_Clickhouse.Merge(dst, src) +} +func (m *ShardConfigSpec_Clickhouse) XXX_Size() int { + return xxx_messageInfo_ShardConfigSpec_Clickhouse.Size(m) +} +func (m *ShardConfigSpec_Clickhouse) XXX_DiscardUnknown() { + xxx_messageInfo_ShardConfigSpec_Clickhouse.DiscardUnknown(m) +} + +var xxx_messageInfo_ShardConfigSpec_Clickhouse proto.InternalMessageInfo + +func (m *ShardConfigSpec_Clickhouse) GetConfig() *config.ClickhouseConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *ShardConfigSpec_Clickhouse) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *ShardConfigSpec_Clickhouse) GetWeight() *wrappers.Int64Value { + if m != nil { + return m.Weight + } + return nil +} + +func init() { + proto.RegisterType((*GetClusterRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.GetClusterRequest") + proto.RegisterType((*ListClustersRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.ListClustersRequest") + proto.RegisterType((*ListClustersResponse)(nil), "yandex.cloud.mdb.clickhouse.v1.ListClustersResponse") + proto.RegisterType((*CreateClusterRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.CreateClusterRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.clickhouse.v1.CreateClusterRequest.LabelsEntry") + proto.RegisterType((*CreateClusterMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.CreateClusterMetadata") + proto.RegisterType((*UpdateClusterRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.UpdateClusterRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.clickhouse.v1.UpdateClusterRequest.LabelsEntry") + proto.RegisterType((*UpdateClusterMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.UpdateClusterMetadata") + proto.RegisterType((*DeleteClusterRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.DeleteClusterRequest") + proto.RegisterType((*DeleteClusterMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.DeleteClusterMetadata") + proto.RegisterType((*StartClusterRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.StartClusterRequest") + proto.RegisterType((*StartClusterMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.StartClusterMetadata") + proto.RegisterType((*StopClusterRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.StopClusterRequest") + proto.RegisterType((*StopClusterMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.StopClusterMetadata") + proto.RegisterType((*BackupClusterRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.BackupClusterRequest") + proto.RegisterType((*BackupClusterMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.BackupClusterMetadata") + proto.RegisterType((*RestoreClusterRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.RestoreClusterRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.clickhouse.v1.RestoreClusterRequest.LabelsEntry") + proto.RegisterType((*RestoreClusterMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.RestoreClusterMetadata") + proto.RegisterType((*LogRecord)(nil), "yandex.cloud.mdb.clickhouse.v1.LogRecord") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.clickhouse.v1.LogRecord.MessageEntry") + proto.RegisterType((*ListClusterLogsRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.ListClusterLogsRequest") + proto.RegisterType((*ListClusterLogsResponse)(nil), "yandex.cloud.mdb.clickhouse.v1.ListClusterLogsResponse") + proto.RegisterType((*ListClusterOperationsRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.ListClusterOperationsRequest") + proto.RegisterType((*ListClusterOperationsResponse)(nil), "yandex.cloud.mdb.clickhouse.v1.ListClusterOperationsResponse") + proto.RegisterType((*ListClusterBackupsRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.ListClusterBackupsRequest") + proto.RegisterType((*ListClusterBackupsResponse)(nil), "yandex.cloud.mdb.clickhouse.v1.ListClusterBackupsResponse") + proto.RegisterType((*ListClusterHostsRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.ListClusterHostsRequest") + proto.RegisterType((*ListClusterHostsResponse)(nil), "yandex.cloud.mdb.clickhouse.v1.ListClusterHostsResponse") + proto.RegisterType((*AddClusterHostsRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.AddClusterHostsRequest") + proto.RegisterType((*AddClusterHostsMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.AddClusterHostsMetadata") + proto.RegisterType((*DeleteClusterHostsRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.DeleteClusterHostsRequest") + proto.RegisterType((*DeleteClusterHostsMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.DeleteClusterHostsMetadata") + proto.RegisterType((*GetClusterShardRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.GetClusterShardRequest") + proto.RegisterType((*ListClusterShardsRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.ListClusterShardsRequest") + proto.RegisterType((*ListClusterShardsResponse)(nil), "yandex.cloud.mdb.clickhouse.v1.ListClusterShardsResponse") + proto.RegisterType((*AddClusterShardRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.AddClusterShardRequest") + proto.RegisterType((*AddClusterShardMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.AddClusterShardMetadata") + proto.RegisterType((*UpdateClusterShardRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.UpdateClusterShardRequest") + proto.RegisterType((*UpdateClusterShardMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.UpdateClusterShardMetadata") + proto.RegisterType((*DeleteClusterShardRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.DeleteClusterShardRequest") + proto.RegisterType((*DeleteClusterShardMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.DeleteClusterShardMetadata") + proto.RegisterType((*HostSpec)(nil), "yandex.cloud.mdb.clickhouse.v1.HostSpec") + proto.RegisterType((*ConfigSpec)(nil), "yandex.cloud.mdb.clickhouse.v1.ConfigSpec") + proto.RegisterType((*ConfigSpec_Clickhouse)(nil), "yandex.cloud.mdb.clickhouse.v1.ConfigSpec.Clickhouse") + proto.RegisterType((*ConfigSpec_Zookeeper)(nil), "yandex.cloud.mdb.clickhouse.v1.ConfigSpec.Zookeeper") + proto.RegisterType((*ShardConfigSpec)(nil), "yandex.cloud.mdb.clickhouse.v1.ShardConfigSpec") + proto.RegisterType((*ShardConfigSpec_Clickhouse)(nil), "yandex.cloud.mdb.clickhouse.v1.ShardConfigSpec.Clickhouse") + proto.RegisterEnum("yandex.cloud.mdb.clickhouse.v1.ListClusterLogsRequest_ServiceType", ListClusterLogsRequest_ServiceType_name, ListClusterLogsRequest_ServiceType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ClusterServiceClient is the client API for ClusterService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ClusterServiceClient interface { + // Returns the specified ClickHouse Cluster resource. + // + // To get the list of available ClickHouse Cluster resources, make a [List] request. + Get(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) + // Retrieves a list of ClickHouse Cluster resources that belong + // to the specified folder. + List(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + // Creates a ClickHouse cluster in the specified folder. + Create(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified ClickHouse cluster. + Update(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified ClickHouse cluster. + Delete(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Start the specified ClickHouse cluster. + Start(ctx context.Context, in *StartClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Stop the specified ClickHouse cluster. + Stop(ctx context.Context, in *StopClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Creates a backup for the specified ClickHouse cluster. + Backup(ctx context.Context, in *BackupClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Creates a new ClickHouse cluster using the specified backup. + Restore(ctx context.Context, in *RestoreClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Retrieves logs for the specified ClickHouse cluster. + // For more information about logs, see the [Logs](/docs/yandex-mdb-guide/concepts/logs) section in the Developer's Guide. + ListLogs(ctx context.Context, in *ListClusterLogsRequest, opts ...grpc.CallOption) (*ListClusterLogsResponse, error) + // Retrieves the list of Operation resources for the specified cluster. + ListOperations(ctx context.Context, in *ListClusterOperationsRequest, opts ...grpc.CallOption) (*ListClusterOperationsResponse, error) + // Retrieves the list of available backups for the specified ClickHouse cluster. + ListBackups(ctx context.Context, in *ListClusterBackupsRequest, opts ...grpc.CallOption) (*ListClusterBackupsResponse, error) + // Retrieves a list of hosts for the specified cluster. + ListHosts(ctx context.Context, in *ListClusterHostsRequest, opts ...grpc.CallOption) (*ListClusterHostsResponse, error) + // Creates new hosts for a cluster. + AddHosts(ctx context.Context, in *AddClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified hosts for a cluster. + DeleteHosts(ctx context.Context, in *DeleteClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Returns the specified shard. + GetShard(ctx context.Context, in *GetClusterShardRequest, opts ...grpc.CallOption) (*Shard, error) + // Retrieves a list of shards. + ListShards(ctx context.Context, in *ListClusterShardsRequest, opts ...grpc.CallOption) (*ListClusterShardsResponse, error) + // Creates a new shard. + AddShard(ctx context.Context, in *AddClusterShardRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Modifies the specified shard. + UpdateShard(ctx context.Context, in *UpdateClusterShardRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified shard. + DeleteShard(ctx context.Context, in *DeleteClusterShardRequest, opts ...grpc.CallOption) (*operation.Operation, error) +} + +type clusterServiceClient struct { + cc *grpc.ClientConn +} + +func NewClusterServiceClient(cc *grpc.ClientConn) ClusterServiceClient { + return &clusterServiceClient{cc} +} + +func (c *clusterServiceClient) Get(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) { + out := new(Cluster) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) List(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Create(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Update(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Delete(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Start(ctx context.Context, in *StartClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Start", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Stop(ctx context.Context, in *StopClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Backup(ctx context.Context, in *BackupClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Backup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Restore(ctx context.Context, in *RestoreClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Restore", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListLogs(ctx context.Context, in *ListClusterLogsRequest, opts ...grpc.CallOption) (*ListClusterLogsResponse, error) { + out := new(ListClusterLogsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListLogs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListOperations(ctx context.Context, in *ListClusterOperationsRequest, opts ...grpc.CallOption) (*ListClusterOperationsResponse, error) { + out := new(ListClusterOperationsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListBackups(ctx context.Context, in *ListClusterBackupsRequest, opts ...grpc.CallOption) (*ListClusterBackupsResponse, error) { + out := new(ListClusterBackupsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListBackups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListHosts(ctx context.Context, in *ListClusterHostsRequest, opts ...grpc.CallOption) (*ListClusterHostsResponse, error) { + out := new(ListClusterHostsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListHosts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) AddHosts(ctx context.Context, in *AddClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/AddHosts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) DeleteHosts(ctx context.Context, in *DeleteClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteHosts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) GetShard(ctx context.Context, in *GetClusterShardRequest, opts ...grpc.CallOption) (*Shard, error) { + out := new(Shard) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/GetShard", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListShards(ctx context.Context, in *ListClusterShardsRequest, opts ...grpc.CallOption) (*ListClusterShardsResponse, error) { + out := new(ListClusterShardsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListShards", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) AddShard(ctx context.Context, in *AddClusterShardRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/AddShard", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) UpdateShard(ctx context.Context, in *UpdateClusterShardRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/UpdateShard", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) DeleteShard(ctx context.Context, in *DeleteClusterShardRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteShard", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ClusterServiceServer is the server API for ClusterService service. +type ClusterServiceServer interface { + // Returns the specified ClickHouse Cluster resource. + // + // To get the list of available ClickHouse Cluster resources, make a [List] request. + Get(context.Context, *GetClusterRequest) (*Cluster, error) + // Retrieves a list of ClickHouse Cluster resources that belong + // to the specified folder. + List(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + // Creates a ClickHouse cluster in the specified folder. + Create(context.Context, *CreateClusterRequest) (*operation.Operation, error) + // Updates the specified ClickHouse cluster. + Update(context.Context, *UpdateClusterRequest) (*operation.Operation, error) + // Deletes the specified ClickHouse cluster. + Delete(context.Context, *DeleteClusterRequest) (*operation.Operation, error) + // Start the specified ClickHouse cluster. + Start(context.Context, *StartClusterRequest) (*operation.Operation, error) + // Stop the specified ClickHouse cluster. + Stop(context.Context, *StopClusterRequest) (*operation.Operation, error) + // Creates a backup for the specified ClickHouse cluster. + Backup(context.Context, *BackupClusterRequest) (*operation.Operation, error) + // Creates a new ClickHouse cluster using the specified backup. + Restore(context.Context, *RestoreClusterRequest) (*operation.Operation, error) + // Retrieves logs for the specified ClickHouse cluster. + // For more information about logs, see the [Logs](/docs/yandex-mdb-guide/concepts/logs) section in the Developer's Guide. + ListLogs(context.Context, *ListClusterLogsRequest) (*ListClusterLogsResponse, error) + // Retrieves the list of Operation resources for the specified cluster. + ListOperations(context.Context, *ListClusterOperationsRequest) (*ListClusterOperationsResponse, error) + // Retrieves the list of available backups for the specified ClickHouse cluster. + ListBackups(context.Context, *ListClusterBackupsRequest) (*ListClusterBackupsResponse, error) + // Retrieves a list of hosts for the specified cluster. + ListHosts(context.Context, *ListClusterHostsRequest) (*ListClusterHostsResponse, error) + // Creates new hosts for a cluster. + AddHosts(context.Context, *AddClusterHostsRequest) (*operation.Operation, error) + // Deletes the specified hosts for a cluster. + DeleteHosts(context.Context, *DeleteClusterHostsRequest) (*operation.Operation, error) + // Returns the specified shard. + GetShard(context.Context, *GetClusterShardRequest) (*Shard, error) + // Retrieves a list of shards. + ListShards(context.Context, *ListClusterShardsRequest) (*ListClusterShardsResponse, error) + // Creates a new shard. + AddShard(context.Context, *AddClusterShardRequest) (*operation.Operation, error) + // Modifies the specified shard. + UpdateShard(context.Context, *UpdateClusterShardRequest) (*operation.Operation, error) + // Deletes the specified shard. + DeleteShard(context.Context, *DeleteClusterShardRequest) (*operation.Operation, error) +} + +func RegisterClusterServiceServer(s *grpc.Server, srv ClusterServiceServer) { + s.RegisterService(&_ClusterService_serviceDesc, srv) +} + +func _ClusterService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Get(ctx, req.(*GetClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).List(ctx, req.(*ListClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Create(ctx, req.(*CreateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Update(ctx, req.(*UpdateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Delete(ctx, req.(*DeleteClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Start(ctx, req.(*StartClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Stop(ctx, req.(*StopClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Backup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BackupClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Backup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Backup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Backup(ctx, req.(*BackupClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Restore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestoreClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Restore(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/Restore", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Restore(ctx, req.(*RestoreClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListLogs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterLogsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListLogs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListLogs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListLogs(ctx, req.(*ListClusterLogsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListOperations(ctx, req.(*ListClusterOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListBackups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterBackupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListBackups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListBackups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListBackups(ctx, req.(*ListClusterBackupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterHostsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListHosts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListHosts(ctx, req.(*ListClusterHostsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_AddHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddClusterHostsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).AddHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/AddHosts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).AddHosts(ctx, req.(*AddClusterHostsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_DeleteHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterHostsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).DeleteHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteHosts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).DeleteHosts(ctx, req.(*DeleteClusterHostsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_GetShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterShardRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).GetShard(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/GetShard", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).GetShard(ctx, req.(*GetClusterShardRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListShards_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterShardsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListShards(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/ListShards", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListShards(ctx, req.(*ListClusterShardsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_AddShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddClusterShardRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).AddShard(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/AddShard", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).AddShard(ctx, req.(*AddClusterShardRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_UpdateShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClusterShardRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).UpdateShard(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/UpdateShard", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).UpdateShard(ctx, req.(*UpdateClusterShardRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_DeleteShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterShardRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).DeleteShard(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteShard", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).DeleteShard(ctx, req.(*DeleteClusterShardRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ClusterService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.clickhouse.v1.ClusterService", + HandlerType: (*ClusterServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _ClusterService_Get_Handler, + }, + { + MethodName: "List", + Handler: _ClusterService_List_Handler, + }, + { + MethodName: "Create", + Handler: _ClusterService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _ClusterService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _ClusterService_Delete_Handler, + }, + { + MethodName: "Start", + Handler: _ClusterService_Start_Handler, + }, + { + MethodName: "Stop", + Handler: _ClusterService_Stop_Handler, + }, + { + MethodName: "Backup", + Handler: _ClusterService_Backup_Handler, + }, + { + MethodName: "Restore", + Handler: _ClusterService_Restore_Handler, + }, + { + MethodName: "ListLogs", + Handler: _ClusterService_ListLogs_Handler, + }, + { + MethodName: "ListOperations", + Handler: _ClusterService_ListOperations_Handler, + }, + { + MethodName: "ListBackups", + Handler: _ClusterService_ListBackups_Handler, + }, + { + MethodName: "ListHosts", + Handler: _ClusterService_ListHosts_Handler, + }, + { + MethodName: "AddHosts", + Handler: _ClusterService_AddHosts_Handler, + }, + { + MethodName: "DeleteHosts", + Handler: _ClusterService_DeleteHosts_Handler, + }, + { + MethodName: "GetShard", + Handler: _ClusterService_GetShard_Handler, + }, + { + MethodName: "ListShards", + Handler: _ClusterService_ListShards_Handler, + }, + { + MethodName: "AddShard", + Handler: _ClusterService_AddShard_Handler, + }, + { + MethodName: "UpdateShard", + Handler: _ClusterService_UpdateShard_Handler, + }, + { + MethodName: "DeleteShard", + Handler: _ClusterService_DeleteShard_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/clickhouse/v1/cluster_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/clickhouse/v1/cluster_service.proto", fileDescriptor_cluster_service_069e868ca1dde37a) +} + +var fileDescriptor_cluster_service_069e868ca1dde37a = []byte{ + // 2577 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4b, 0x6c, 0x1b, 0xc7, + 0xf9, 0xff, 0x8f, 0x1e, 0x14, 0xf9, 0xd1, 0x96, 0xfd, 0x9f, 0xc8, 0x8e, 0xbc, 0xf1, 0x43, 0xde, + 0x24, 0xb6, 0x4c, 0x9b, 0xa4, 0x48, 0x3d, 0x6c, 0xc9, 0x8f, 0x58, 0x92, 0x65, 0x87, 0x88, 0x6c, + 0xab, 0x2b, 0x39, 0x41, 0x6c, 0xb8, 0xc4, 0x92, 0x3b, 0xa2, 0x08, 0x91, 0xbb, 0x5b, 0xee, 0x52, + 0xb6, 0x64, 0xa4, 0x08, 0xdc, 0xa2, 0x05, 0x7c, 0x2a, 0x50, 0x20, 0x45, 0x5c, 0xa0, 0x40, 0x2f, + 0xed, 0xb1, 0xa8, 0x0e, 0x2e, 0x10, 0x14, 0x3d, 0x04, 0x28, 0xec, 0xa2, 0x40, 0x8b, 0x2a, 0xc7, + 0x5c, 0x83, 0x22, 0xa7, 0x1e, 0x72, 0xec, 0xa1, 0x28, 0x66, 0x66, 0x97, 0xdc, 0xe5, 0x43, 0xbb, + 0x4b, 0xca, 0x75, 0x0a, 0xf4, 0x26, 0xee, 0xcc, 0xf7, 0xcd, 0xef, 0xf7, 0xcd, 0xf7, 0x98, 0xf9, + 0x46, 0x30, 0xb1, 0x29, 0xab, 0x0a, 0x79, 0x98, 0xcc, 0x97, 0xb4, 0xaa, 0x92, 0x2c, 0x2b, 0xb9, + 0x64, 0xbe, 0x54, 0xcc, 0xaf, 0xaf, 0x69, 0x55, 0x83, 0x24, 0x37, 0x52, 0xc9, 0x7c, 0xa9, 0x6a, + 0x98, 0xa4, 0x92, 0x35, 0x48, 0x65, 0xa3, 0x98, 0x27, 0x09, 0xbd, 0xa2, 0x99, 0x1a, 0x3e, 0xce, + 0xa5, 0x12, 0x4c, 0x2a, 0x51, 0x56, 0x72, 0x89, 0xba, 0x54, 0x62, 0x23, 0x25, 0x1c, 0x2d, 0x68, + 0x5a, 0xa1, 0x44, 0x92, 0xb2, 0x5e, 0x4c, 0xca, 0xaa, 0xaa, 0x99, 0xb2, 0x59, 0xd4, 0x54, 0x83, + 0x4b, 0x0b, 0x23, 0xd6, 0x28, 0xfb, 0x95, 0xab, 0xae, 0x26, 0x57, 0x8b, 0xa4, 0xa4, 0x64, 0xcb, + 0xb2, 0xb1, 0x6e, 0xcd, 0x38, 0xd1, 0x38, 0xc3, 0x2c, 0x96, 0x89, 0x61, 0xca, 0x65, 0xdd, 0x9a, + 0x70, 0xbc, 0x71, 0xc2, 0x83, 0x8a, 0xac, 0xeb, 0xa4, 0x62, 0x2f, 0x21, 0x58, 0xb4, 0x28, 0x00, + 0x4d, 0x27, 0x15, 0xb6, 0xbe, 0x35, 0x76, 0xca, 0x45, 0xb9, 0x36, 0xda, 0x34, 0xef, 0x98, 0x6b, + 0xde, 0x86, 0x5c, 0x2a, 0x2a, 0xce, 0xe1, 0xb3, 0x1e, 0x96, 0xcb, 0xc9, 0xf9, 0xf5, 0xaa, 0x8d, + 0xf7, 0x9c, 0x3f, 0x33, 0x5b, 0xb3, 0xe3, 0x1e, 0xb3, 0x15, 0xd9, 0x94, 0x73, 0xb2, 0x61, 0xed, + 0x86, 0x70, 0xc6, 0x63, 0x7a, 0xd5, 0xa8, 0x69, 0x9e, 0xf2, 0xc2, 0xa1, 0xa9, 0xab, 0xc5, 0x82, + 0xe3, 0x23, 0x97, 0x13, 0xaf, 0xc2, 0xff, 0xdf, 0x20, 0xe6, 0x3c, 0x47, 0x29, 0x91, 0xef, 0x55, + 0x89, 0x61, 0xe2, 0xb3, 0x00, 0xb6, 0x7b, 0x14, 0x95, 0x61, 0x34, 0x82, 0x46, 0x23, 0x73, 0xfb, + 0xbe, 0x7e, 0x9e, 0x42, 0x4f, 0x5e, 0xa4, 0xfa, 0x2e, 0x5d, 0x9e, 0x1c, 0x93, 0x22, 0xd6, 0x78, + 0x46, 0x11, 0x7f, 0x87, 0xe0, 0xb5, 0xc5, 0xa2, 0x61, 0xeb, 0x30, 0x6c, 0x25, 0x67, 0x20, 0xb2, + 0xaa, 0x95, 0x94, 0xf6, 0x3a, 0xc2, 0x7c, 0x38, 0xa3, 0xe0, 0xd3, 0x10, 0xd1, 0xe5, 0x02, 0xc9, + 0x1a, 0xc5, 0x2d, 0x32, 0xdc, 0x33, 0x82, 0x46, 0x7b, 0xe7, 0xe0, 0x9f, 0xcf, 0x53, 0xa1, 0x4b, + 0x97, 0x53, 0x63, 0x63, 0x63, 0x52, 0x98, 0x0e, 0x2e, 0x17, 0xb7, 0x08, 0x1e, 0x05, 0x60, 0x13, + 0x4d, 0x6d, 0x9d, 0xa8, 0xc3, 0xbd, 0x4c, 0x69, 0xe4, 0xc9, 0x8b, 0x54, 0x3f, 0x9b, 0x29, 0x31, + 0x2d, 0x2b, 0x74, 0x0c, 0x8b, 0x10, 0x5a, 0x2d, 0x96, 0x4c, 0x52, 0x19, 0xee, 0x63, 0xb3, 0xe0, + 0xc9, 0x8b, 0x9a, 0x3e, 0x6b, 0x44, 0xfc, 0x01, 0x82, 0x21, 0x37, 0x72, 0x43, 0xd7, 0x54, 0x83, + 0xe0, 0x79, 0x08, 0x5b, 0xfc, 0x8c, 0x61, 0x34, 0xd2, 0x3b, 0x1a, 0x4d, 0x9f, 0x4e, 0xec, 0x1e, + 0x18, 0x09, 0xdb, 0x82, 0x35, 0x41, 0x7c, 0x0a, 0x0e, 0xa8, 0xe4, 0xa1, 0x99, 0x75, 0x00, 0xa6, + 0xd4, 0x22, 0xd2, 0x7e, 0xfa, 0x79, 0xc9, 0x46, 0x2a, 0xfe, 0x64, 0x00, 0x86, 0xe6, 0x2b, 0x44, + 0x36, 0x49, 0xc3, 0x2e, 0x04, 0x30, 0x60, 0x1a, 0xfa, 0x54, 0xb9, 0xcc, 0x6d, 0x17, 0x99, 0x3b, + 0x4e, 0x67, 0x7d, 0xf3, 0x3c, 0x35, 0x78, 0x4f, 0x8e, 0x6f, 0xcd, 0xc6, 0xef, 0x8e, 0xc5, 0xa7, + 0xb3, 0xf1, 0xfb, 0x31, 0x2e, 0x37, 0x35, 0x2e, 0xb1, 0xb9, 0xf8, 0x2c, 0x44, 0x15, 0x62, 0xe4, + 0x2b, 0x45, 0x9d, 0xfa, 0xbe, 0xdb, 0x98, 0xe9, 0xc9, 0x29, 0xc9, 0x39, 0x8a, 0x3f, 0x45, 0x10, + 0x2a, 0xc9, 0x39, 0x52, 0x32, 0x86, 0xfb, 0x98, 0x41, 0xae, 0x7a, 0x1a, 0xa4, 0x05, 0xa5, 0xc4, + 0x22, 0x53, 0xb1, 0xa0, 0x9a, 0x95, 0xcd, 0xb9, 0x77, 0xbe, 0x79, 0x9e, 0x8a, 0xde, 0x8b, 0x67, + 0xc7, 0xe2, 0xd3, 0x72, 0x7c, 0xeb, 0x7e, 0xec, 0x31, 0x87, 0x37, 0x61, 0xc3, 0xdc, 0x7e, 0x91, + 0x0a, 0x09, 0xf6, 0x5f, 0x18, 0x1f, 0xa4, 0x64, 0xee, 0x3b, 0xe6, 0x4b, 0x16, 0x20, 0x7c, 0x0f, + 0xa2, 0x44, 0xdd, 0x28, 0x56, 0x34, 0xb5, 0x4c, 0x54, 0x73, 0xb8, 0x7f, 0x04, 0x8d, 0x0e, 0xa6, + 0xc7, 0x7d, 0x6e, 0x58, 0x62, 0xa1, 0x2e, 0x3a, 0xd7, 0x47, 0x0d, 0x27, 0x39, 0xb5, 0xe1, 0xef, + 0x40, 0x94, 0x87, 0x4e, 0xd6, 0xd0, 0x49, 0x7e, 0x38, 0x34, 0x82, 0x46, 0xa3, 0xe9, 0x98, 0xa7, + 0x72, 0x26, 0xb2, 0xac, 0x93, 0xbc, 0xa5, 0x13, 0xf2, 0xb5, 0x2f, 0xf8, 0x1e, 0x0c, 0xda, 0x71, + 0xce, 0x94, 0x1a, 0xc3, 0x03, 0xcc, 0xa4, 0xe7, 0xbc, 0xb4, 0x5e, 0xb3, 0xa4, 0x98, 0xde, 0xd0, + 0xe3, 0x17, 0xa9, 0x9e, 0x2b, 0x63, 0xd2, 0x7e, 0xc5, 0xf1, 0xd5, 0xc0, 0xb7, 0x01, 0x68, 0x56, + 0xb0, 0x14, 0x87, 0x99, 0xe2, 0x51, 0x2f, 0xc5, 0x77, 0x0c, 0x52, 0x71, 0x29, 0x8d, 0x54, 0xad, + 0x2f, 0x4c, 0xe1, 0x9a, 0x66, 0x98, 0x96, 0xc2, 0x88, 0x3f, 0x85, 0xef, 0x6a, 0x86, 0xe9, 0x56, + 0xb8, 0x66, 0x7d, 0x31, 0x68, 0x72, 0x51, 0x89, 0xf9, 0x40, 0xab, 0xac, 0x53, 0xbf, 0x86, 0x56, + 0xc9, 0xc5, 0x1a, 0xcf, 0x28, 0x78, 0x1a, 0xc0, 0x58, 0x93, 0x2b, 0x4a, 0x96, 0xb9, 0x77, 0x94, + 0x4d, 0x16, 0x76, 0x71, 0xed, 0x08, 0x9b, 0x7d, 0x4b, 0x2e, 0x13, 0x61, 0x1a, 0xa2, 0x0e, 0x77, + 0xc3, 0x07, 0xa1, 0x77, 0x9d, 0x6c, 0xf2, 0x38, 0x92, 0xe8, 0x9f, 0x78, 0x08, 0xfa, 0x37, 0xe4, + 0x52, 0xd5, 0x8a, 0x1a, 0x89, 0xff, 0x98, 0xe9, 0xb9, 0x80, 0xc4, 0x29, 0x38, 0xe4, 0x72, 0xdf, + 0x9b, 0xc4, 0x94, 0xa9, 0x99, 0xf1, 0xb1, 0xe6, 0xc4, 0xe8, 0x4c, 0x85, 0x7f, 0xeb, 0x85, 0xa1, + 0x3b, 0xba, 0xd2, 0x1c, 0xca, 0x41, 0x12, 0x2a, 0xbe, 0x08, 0xd1, 0x2a, 0x53, 0xc2, 0x0a, 0x27, + 0x43, 0x17, 0x4d, 0x0b, 0x09, 0x5e, 0x18, 0x13, 0x76, 0x61, 0x4c, 0x5c, 0xa7, 0xb5, 0xf5, 0xa6, + 0x6c, 0xac, 0x4b, 0xc0, 0xa7, 0xd3, 0xbf, 0x5f, 0x76, 0x54, 0xb7, 0x62, 0xf7, 0x72, 0xa2, 0xfa, + 0x3d, 0x77, 0xe0, 0xf5, 0x07, 0x0d, 0x3c, 0x67, 0xc8, 0x75, 0xe9, 0x0b, 0x2e, 0xd2, 0x7e, 0x7d, + 0x61, 0x1e, 0x86, 0xae, 0x91, 0x12, 0xe9, 0xca, 0x15, 0xe8, 0xe2, 0x2e, 0x25, 0x7e, 0x17, 0x9f, + 0x83, 0xd7, 0x96, 0x4d, 0xb9, 0xd2, 0x55, 0x5d, 0x9f, 0x84, 0x21, 0xa7, 0x0e, 0xbf, 0x4b, 0xcf, + 0x02, 0x5e, 0x36, 0x35, 0xbd, 0x9b, 0x95, 0x27, 0x28, 0xfa, 0x9a, 0x8a, 0x00, 0x06, 0x9f, 0x63, + 0x27, 0xb3, 0x2e, 0x0d, 0xee, 0x52, 0xe2, 0x77, 0xf1, 0x5f, 0xf6, 0xc3, 0x21, 0x89, 0x18, 0xa6, + 0x56, 0x69, 0xdc, 0xef, 0x93, 0x10, 0xe1, 0x07, 0xc6, 0xfa, 0xea, 0xbc, 0x24, 0x84, 0xf9, 0xe7, + 0xff, 0x44, 0xf5, 0x7e, 0xda, 0x18, 0xe7, 0xb3, 0x5e, 0x71, 0xd4, 0x92, 0xcb, 0xff, 0xca, 0x37, + 0x2d, 0xdf, 0xee, 0x82, 0x38, 0xb0, 0xd7, 0x05, 0x31, 0xbc, 0x6b, 0x41, 0xec, 0x26, 0x93, 0xad, + 0xc0, 0x61, 0xf7, 0xb6, 0xfa, 0x74, 0x6e, 0xfc, 0x86, 0xd3, 0x85, 0xb9, 0xda, 0x9a, 0xf3, 0x8a, + 0x5f, 0x20, 0x88, 0x2c, 0x6a, 0x05, 0x89, 0xe4, 0xb5, 0x8a, 0x82, 0x2f, 0x40, 0xa4, 0x76, 0xa3, + 0x63, 0x8a, 0x5a, 0x55, 0xae, 0x15, 0x7b, 0x86, 0x54, 0x9f, 0x8c, 0x97, 0x60, 0xa0, 0x4c, 0x0c, + 0x43, 0x2e, 0x50, 0xe4, 0xd4, 0xa6, 0x53, 0x5e, 0x36, 0xad, 0xad, 0x9a, 0xb8, 0xc9, 0x05, 0x99, + 0x49, 0x24, 0x5b, 0x8d, 0x30, 0x03, 0xfb, 0x9c, 0x03, 0x81, 0x6c, 0xf5, 0xd7, 0x5e, 0x38, 0xec, + 0xb8, 0x1a, 0x2c, 0x6a, 0x05, 0xa3, 0xa3, 0x5a, 0xfe, 0x26, 0xec, 0xcf, 0x6b, 0xa5, 0x6a, 0x59, + 0xcd, 0x5a, 0xb7, 0x11, 0xca, 0x2d, 0x22, 0xed, 0xe3, 0x1f, 0xaf, 0xb3, 0x6f, 0x98, 0xc0, 0x3e, + 0xeb, 0x16, 0x9e, 0x35, 0x37, 0x75, 0xc2, 0x82, 0x79, 0x30, 0x3d, 0xe7, 0xc9, 0xbf, 0x25, 0xbe, + 0xc4, 0x32, 0x57, 0xb5, 0xb2, 0xa9, 0x13, 0x29, 0x6a, 0xd4, 0x7f, 0xe0, 0xf3, 0x10, 0x59, 0xad, + 0x68, 0xe5, 0x2c, 0xb5, 0x39, 0xbb, 0x15, 0xed, 0xbe, 0x37, 0x61, 0x3a, 0x99, 0xfe, 0xc4, 0xe3, + 0x30, 0x60, 0x6a, 0x5c, 0xac, 0xdf, 0x53, 0x2c, 0x64, 0x6a, 0x4c, 0xc8, 0x75, 0xa7, 0x0b, 0xf9, + 0xbe, 0xd3, 0x0d, 0xb4, 0xbf, 0xd3, 0x89, 0x17, 0x21, 0xea, 0x20, 0x87, 0x8f, 0xc2, 0xf0, 0xf2, + 0x82, 0xf4, 0x7e, 0x66, 0x7e, 0x21, 0xbb, 0xf2, 0xe1, 0xd2, 0x42, 0xf6, 0xce, 0xad, 0xe5, 0xa5, + 0x85, 0xf9, 0xcc, 0xf5, 0xcc, 0xc2, 0xb5, 0x83, 0xff, 0x87, 0x07, 0x01, 0xe6, 0x17, 0x33, 0xf3, + 0xef, 0xbd, 0x7b, 0xfb, 0xce, 0xf2, 0xc2, 0x41, 0x24, 0x7e, 0x8c, 0xe0, 0xf5, 0x26, 0x8b, 0x59, + 0xf7, 0xbd, 0xcb, 0xd0, 0x57, 0xd2, 0x0a, 0xf6, 0x5d, 0xef, 0x8c, 0x6f, 0xc7, 0x93, 0x98, 0x98, + 0xef, 0x9b, 0xde, 0x2f, 0x10, 0x1c, 0x75, 0x40, 0xb8, 0x6d, 0xb7, 0x25, 0x3a, 0x73, 0xad, 0xbd, + 0xbf, 0x34, 0x8b, 0x4f, 0x10, 0x1c, 0x6b, 0x03, 0xd0, 0xb2, 0xd4, 0x2c, 0x40, 0xad, 0x9b, 0x62, + 0xdb, 0xeb, 0xa4, 0xdb, 0x5e, 0xf5, 0x6e, 0x4b, 0x4d, 0x5e, 0x72, 0x08, 0xf9, 0xb6, 0xd6, 0xcf, + 0x11, 0x1c, 0x71, 0x80, 0xe1, 0x65, 0xf9, 0x5b, 0x63, 0xaa, 0x1f, 0x21, 0x10, 0x5a, 0xa1, 0xb3, + 0xec, 0x74, 0x15, 0x06, 0x78, 0x86, 0xb4, 0x8d, 0x74, 0xca, 0xcb, 0xa9, 0xb8, 0x06, 0xc9, 0x16, + 0xf3, 0x6d, 0xa6, 0x4f, 0xdd, 0x7e, 0x4d, 0x0b, 0xcd, 0xb7, 0xc6, 0x48, 0xdf, 0x87, 0xe1, 0x66, + 0x68, 0x96, 0x85, 0x66, 0xa0, 0x9f, 0x96, 0x40, 0xdb, 0x3e, 0x6f, 0xf9, 0xa9, 0xa0, 0x12, 0x17, + 0xf1, 0x6d, 0x9b, 0x4f, 0x10, 0x1c, 0x9e, 0x55, 0x94, 0xae, 0x4d, 0xe3, 0x2e, 0xf9, 0x3d, 0x5d, + 0x97, 0x7c, 0xf1, 0x03, 0x78, 0xbd, 0x01, 0x97, 0xdf, 0x5a, 0x7c, 0xcc, 0x82, 0x42, 0x0f, 0x81, + 0x86, 0x55, 0x4d, 0x98, 0x62, 0x7a, 0xe7, 0x35, 0xc4, 0x87, 0x70, 0xc4, 0x75, 0x61, 0xe8, 0x9c, + 0x73, 0xa2, 0x79, 0xa1, 0xb9, 0x03, 0x9c, 0x89, 0x7d, 0xca, 0x1c, 0x77, 0xae, 0x7c, 0x17, 0x84, + 0xe6, 0x95, 0xf7, 0x88, 0xd5, 0x0f, 0x11, 0x1c, 0xae, 0x77, 0x29, 0x97, 0xe9, 0x15, 0xbf, 0x23, + 0x4e, 0x97, 0x5d, 0xdd, 0x04, 0x7f, 0xc7, 0xed, 0x7a, 0x47, 0x41, 0x7c, 0x8a, 0x5c, 0xfe, 0xcc, + 0x70, 0xec, 0x6d, 0xac, 0x8d, 0xc5, 0x3b, 0x8e, 0xb5, 0xc7, 0xee, 0x74, 0x69, 0x83, 0xab, 0x55, + 0xb8, 0x10, 0xe3, 0x61, 0x87, 0xdb, 0xdb, 0x5e, 0xde, 0xcb, 0x8d, 0x6c, 0x09, 0xf9, 0x0e, 0xb8, + 0x5f, 0xf7, 0x38, 0x03, 0xee, 0x55, 0x6d, 0x14, 0x5e, 0x72, 0x9f, 0xfa, 0x7b, 0xd9, 0xa1, 0x25, + 0xe9, 0x8b, 0x72, 0xeb, 0x06, 0x42, 0x43, 0x06, 0xe8, 0xdb, 0xe3, 0x0c, 0xc0, 0x56, 0x0e, 0x10, + 0x2b, 0x8d, 0xb6, 0x71, 0x3a, 0xe9, 0x27, 0x3d, 0x70, 0xc4, 0xd5, 0xb0, 0x78, 0x65, 0xbb, 0xd0, + 0xd0, 0xc7, 0xea, 0x0d, 0xd4, 0xc7, 0x6a, 0xd8, 0xc2, 0xbe, 0xae, 0xb7, 0x90, 0x26, 0xa8, 0x66, + 0xbb, 0xec, 0x91, 0xd1, 0x7f, 0x8c, 0x1a, 0xf2, 0xee, 0x2b, 0xcb, 0x51, 0x8d, 0x69, 0x78, 0x2f, + 0x59, 0xfe, 0x0b, 0x41, 0xd8, 0xf6, 0x69, 0x7c, 0x12, 0x06, 0xb6, 0x34, 0x95, 0xd4, 0x19, 0x85, + 0x6b, 0x6c, 0x42, 0x74, 0x20, 0xa3, 0xe0, 0x79, 0xe8, 0x63, 0xf7, 0x99, 0x1e, 0x76, 0x9f, 0x39, + 0xe3, 0x27, 0x5c, 0x12, 0xf4, 0x64, 0x6f, 0x5d, 0xba, 0x99, 0x30, 0x7e, 0x1b, 0x22, 0x46, 0x35, + 0xa7, 0x12, 0x93, 0xae, 0xd4, 0xdb, 0xb0, 0x52, 0x98, 0x0f, 0x65, 0x14, 0x3c, 0x0a, 0x07, 0x65, + 0xc3, 0x28, 0x16, 0xd4, 0xac, 0x5e, 0xcd, 0x95, 0x8a, 0xf9, 0x6c, 0x51, 0x67, 0x4e, 0x13, 0x96, + 0x06, 0xf9, 0xf7, 0x25, 0xf6, 0x39, 0xa3, 0x37, 0xb4, 0x94, 0xfb, 0x03, 0xb4, 0x94, 0xc5, 0x67, + 0xbd, 0x00, 0x75, 0xef, 0xc2, 0x77, 0xa8, 0x35, 0x6d, 0x06, 0xd6, 0x6d, 0x77, 0xd2, 0x7f, 0x6f, + 0x21, 0x31, 0x5f, 0x1b, 0x90, 0x1c, 0x8a, 0xb0, 0x04, 0x91, 0x2d, 0x4d, 0x5b, 0x27, 0x44, 0x67, + 0xf7, 0x45, 0xaa, 0x75, 0x22, 0x80, 0xd6, 0xbb, 0xb6, 0xac, 0x54, 0x57, 0x23, 0xfc, 0x0a, 0x01, + 0xd4, 0x97, 0xc3, 0xb7, 0x21, 0xc4, 0x23, 0xc3, 0x42, 0x7d, 0xde, 0x4b, 0x3f, 0x9f, 0xed, 0x40, + 0xcc, 0x17, 0x94, 0x2c, 0x35, 0xf8, 0x06, 0x44, 0x2a, 0xc4, 0xd0, 0xaa, 0x95, 0x3c, 0xab, 0xdf, + 0xc8, 0xcf, 0x35, 0x4a, 0xb2, 0x05, 0xa4, 0xba, 0xac, 0xb0, 0x02, 0x91, 0x1a, 0x01, 0xb7, 0x56, + 0xd4, 0xb9, 0x56, 0xf1, 0xcf, 0x3d, 0x70, 0xa0, 0x21, 0x37, 0xe0, 0xbb, 0x2d, 0x76, 0x6f, 0x26, + 0x60, 0x82, 0x69, 0xb3, 0x85, 0xc2, 0x97, 0xff, 0x25, 0xe6, 0xc6, 0xe3, 0x10, 0x7a, 0x40, 0x8a, + 0x85, 0x35, 0xd3, 0x4a, 0xcf, 0x6f, 0x34, 0xa5, 0xe7, 0x8c, 0x6a, 0x4e, 0x4d, 0xbc, 0x2f, 0x97, + 0xaa, 0x44, 0xb2, 0xa6, 0xa6, 0xff, 0x7e, 0x1c, 0x06, 0xed, 0xf4, 0xc2, 0xef, 0xe3, 0xf8, 0x67, + 0x08, 0x7a, 0x6f, 0x10, 0x13, 0xa7, 0xbc, 0x50, 0x34, 0x3d, 0x36, 0x0b, 0x7e, 0x9f, 0x56, 0xc5, + 0x89, 0xc7, 0x5f, 0x7c, 0xf5, 0xd3, 0x9e, 0x04, 0x3e, 0x97, 0x2c, 0xcb, 0xaa, 0x5c, 0x20, 0x4a, + 0xbc, 0xe5, 0x53, 0xbb, 0x91, 0x7c, 0x54, 0xcf, 0x71, 0x1f, 0xe1, 0xa7, 0x08, 0xfa, 0xe8, 0xb9, + 0x08, 0x8f, 0x07, 0xe8, 0xa7, 0xd8, 0xa7, 0x3a, 0x61, 0x22, 0x98, 0x10, 0x3f, 0x6d, 0x89, 0xa7, + 0x19, 0xd2, 0x93, 0xf8, 0x84, 0x07, 0x52, 0xfc, 0x5b, 0x04, 0x21, 0xfe, 0xd2, 0x84, 0x27, 0x3a, + 0x79, 0x50, 0x15, 0xbc, 0xef, 0xde, 0xe2, 0xad, 0xed, 0x9d, 0xd8, 0x48, 0xbb, 0x07, 0xad, 0x01, + 0xeb, 0x03, 0x03, 0xfc, 0x96, 0xe8, 0x05, 0x78, 0x06, 0xc5, 0xf0, 0x1f, 0x10, 0x84, 0x78, 0x21, + 0xf5, 0xc6, 0xdc, 0xea, 0xb9, 0xc8, 0x0f, 0xe6, 0x7b, 0x1c, 0x73, 0xeb, 0x87, 0x17, 0x17, 0xe6, + 0x54, 0x3a, 0x90, 0x3b, 0x50, 0x02, 0x7f, 0x42, 0x10, 0xe2, 0x35, 0xd2, 0x9b, 0x40, 0xab, 0x27, + 0x1c, 0x3f, 0x04, 0x56, 0xb7, 0x77, 0x62, 0x89, 0x76, 0x8f, 0x37, 0x87, 0x1a, 0x63, 0x6c, 0xa1, + 0xac, 0x9b, 0x9b, 0xdc, 0xbb, 0x63, 0xc1, 0xbc, 0xfb, 0x33, 0x04, 0xfd, 0xec, 0x95, 0xc6, 0xdb, + 0xbd, 0x5b, 0x3c, 0x08, 0xf9, 0x61, 0xf2, 0xe1, 0xf6, 0x4e, 0xec, 0x44, 0x9b, 0xa7, 0x20, 0xd7, + 0x4e, 0x8c, 0x89, 0x89, 0xf6, 0xff, 0xff, 0xd2, 0xb0, 0x0b, 0x06, 0x83, 0xfc, 0x0c, 0x41, 0xdf, + 0xb2, 0xa9, 0xe9, 0x38, 0xed, 0x8d, 0xbd, 0xf1, 0x45, 0xc9, 0x0f, 0xf4, 0x0f, 0xb6, 0x77, 0x62, + 0xc7, 0x5b, 0xbf, 0x25, 0xb9, 0x90, 0x27, 0xc5, 0x78, 0x00, 0xe4, 0x9a, 0x8e, 0x3f, 0x47, 0x10, + 0xe2, 0xfd, 0x1a, 0x6f, 0x0f, 0x6a, 0xf5, 0x26, 0xe5, 0x07, 0x7c, 0x96, 0x87, 0x40, 0xeb, 0xd7, + 0x28, 0x17, 0xfc, 0x49, 0x71, 0x3c, 0x50, 0x08, 0xf0, 0x0e, 0x13, 0xfe, 0x3d, 0x82, 0x01, 0xeb, + 0x3d, 0x00, 0x4f, 0x76, 0xf4, 0x1e, 0xe4, 0x77, 0x0f, 0x4e, 0xb6, 0x7d, 0x78, 0x70, 0xf1, 0x38, + 0x27, 0x9e, 0xf6, 0x4a, 0x3f, 0x15, 0xae, 0x86, 0x46, 0xf1, 0x33, 0x04, 0x61, 0x9a, 0x7c, 0x17, + 0xb5, 0x82, 0x81, 0xa7, 0x3a, 0xeb, 0x95, 0x0b, 0xe7, 0x03, 0xcb, 0x59, 0x19, 0x7e, 0x9a, 0x21, + 0x1e, 0xc7, 0xa9, 0x40, 0x96, 0x67, 0xdd, 0xe2, 0xbf, 0x20, 0x18, 0xa4, 0x6a, 0xeb, 0xdd, 0x55, + 0x7c, 0x29, 0x00, 0x8c, 0xa6, 0xae, 0xb1, 0x70, 0xb9, 0x43, 0x69, 0x8b, 0xca, 0x3b, 0x8c, 0xca, + 0x34, 0x3e, 0x1f, 0x84, 0x4a, 0xd2, 0xd1, 0xd0, 0xfd, 0x1c, 0x41, 0x94, 0x2e, 0x61, 0xf5, 0x40, + 0xf1, 0x74, 0x00, 0x3c, 0xee, 0xae, 0xae, 0x30, 0xd3, 0x89, 0xa8, 0xc5, 0xe3, 0x12, 0xe3, 0x31, + 0x85, 0x27, 0x02, 0xf1, 0xb0, 0xdb, 0xad, 0x9f, 0x21, 0x88, 0x50, 0xe5, 0xac, 0x6d, 0x85, 0x83, + 0xf8, 0x85, 0xb3, 0xc5, 0x26, 0x5c, 0x08, 0x2e, 0x68, 0xc1, 0x9f, 0x61, 0xf0, 0x27, 0x70, 0x3a, + 0x10, 0x7c, 0xde, 0x0f, 0xfd, 0x0a, 0x41, 0x78, 0x56, 0x51, 0x38, 0x76, 0xcf, 0x58, 0x68, 0xdd, + 0x11, 0xf5, 0x13, 0xcc, 0x1f, 0x6d, 0xef, 0xc4, 0xc6, 0xda, 0xb7, 0x2e, 0x77, 0xa9, 0x6b, 0xf3, + 0xe2, 0x95, 0xe0, 0xbc, 0x66, 0x72, 0xb2, 0x99, 0x5f, 0xe3, 0x07, 0x17, 0x1a, 0xf2, 0xff, 0x40, + 0x10, 0xe5, 0x15, 0x95, 0x33, 0x9d, 0x0e, 0x54, 0xbd, 0x83, 0x92, 0xfd, 0x18, 0x6d, 0xef, 0xc4, + 0xc6, 0x77, 0xed, 0x6a, 0xbe, 0x1c, 0xc2, 0x7c, 0x41, 0x4a, 0xf8, 0x37, 0x08, 0xc2, 0x37, 0x88, + 0xc9, 0x2e, 0x1d, 0xde, 0xfb, 0xda, 0xba, 0x43, 0x2a, 0xf8, 0x6b, 0xf5, 0x89, 0xd7, 0x18, 0xec, + 0x2b, 0xf8, 0x92, 0xdf, 0x52, 0x98, 0xe4, 0xbd, 0xc1, 0xe4, 0xa3, 0xfa, 0x6d, 0x9a, 0x9d, 0x47, + 0x80, 0xba, 0x38, 0x6f, 0x3f, 0xe2, 0x20, 0xe1, 0xe0, 0x6a, 0xa7, 0x0a, 0xd3, 0x1d, 0x48, 0x5a, + 0x91, 0x74, 0x9e, 0x1f, 0x0c, 0x71, 0x32, 0x20, 0x13, 0xfc, 0x47, 0x1e, 0x46, 0x3e, 0xcd, 0xdd, + 0xba, 0xcf, 0xe9, 0xc7, 0xb3, 0xbe, 0xcb, 0x4a, 0x7b, 0xdb, 0xfe, 0x5f, 0x3f, 0xfb, 0xc9, 0xd3, + 0x81, 0x18, 0x94, 0x04, 0x75, 0x9b, 0x2f, 0x11, 0x44, 0xf9, 0xd1, 0x99, 0x53, 0x99, 0x0e, 0x74, + 0x4c, 0x0f, 0xca, 0x46, 0xdd, 0xde, 0x89, 0xbd, 0xb9, 0x6b, 0x6f, 0xcd, 0x41, 0x68, 0x36, 0xdd, + 0x95, 0x7f, 0x51, 0x76, 0x5f, 0xd7, 0xb2, 0x80, 0x4f, 0x76, 0x6d, 0x1b, 0x73, 0x7e, 0xd8, 0x3d, + 0x6a, 0x91, 0x04, 0xdc, 0xec, 0x76, 0x49, 0x02, 0x57, 0x62, 0x5d, 0xb1, 0x9d, 0xbb, 0x7d, 0xf7, + 0x66, 0xa1, 0x68, 0xae, 0x55, 0x73, 0x89, 0xbc, 0x56, 0x4e, 0x72, 0xac, 0x71, 0xfe, 0x1f, 0xde, + 0x05, 0x2d, 0x5e, 0x20, 0x2a, 0x5b, 0x33, 0xb9, 0xfb, 0xbf, 0x7e, 0x5f, 0xac, 0xff, 0xca, 0x85, + 0x98, 0xc0, 0xf8, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x2f, 0xbf, 0x54, 0x1d, 0x30, 0x00, + 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.pb.go new file mode 100644 index 000000000..c30c428c3 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.pb.go @@ -0,0 +1,1867 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/clickhouse/v1/config/clickhouse.proto + +package clickhouse // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/config" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ClickhouseConfig_LogLevel int32 + +const ( + ClickhouseConfig_LOG_LEVEL_UNSPECIFIED ClickhouseConfig_LogLevel = 0 + ClickhouseConfig_TRACE ClickhouseConfig_LogLevel = 1 + ClickhouseConfig_DEBUG ClickhouseConfig_LogLevel = 2 + ClickhouseConfig_INFORMATION ClickhouseConfig_LogLevel = 3 + ClickhouseConfig_WARNING ClickhouseConfig_LogLevel = 4 + ClickhouseConfig_ERROR ClickhouseConfig_LogLevel = 5 +) + +var ClickhouseConfig_LogLevel_name = map[int32]string{ + 0: "LOG_LEVEL_UNSPECIFIED", + 1: "TRACE", + 2: "DEBUG", + 3: "INFORMATION", + 4: "WARNING", + 5: "ERROR", +} +var ClickhouseConfig_LogLevel_value = map[string]int32{ + "LOG_LEVEL_UNSPECIFIED": 0, + "TRACE": 1, + "DEBUG": 2, + "INFORMATION": 3, + "WARNING": 4, + "ERROR": 5, +} + +func (x ClickhouseConfig_LogLevel) String() string { + return proto.EnumName(ClickhouseConfig_LogLevel_name, int32(x)) +} +func (ClickhouseConfig_LogLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 0} +} + +type ClickhouseConfig_Compression_Method int32 + +const ( + ClickhouseConfig_Compression_METHOD_UNSPECIFIED ClickhouseConfig_Compression_Method = 0 + // [LZ4 compression algorithm](https://lz4.github.io/lz4/). + ClickhouseConfig_Compression_LZ4 ClickhouseConfig_Compression_Method = 1 + // [Zstandard compression algorithm](https://facebook.github.io/zstd/). + ClickhouseConfig_Compression_ZSTD ClickhouseConfig_Compression_Method = 2 +) + +var ClickhouseConfig_Compression_Method_name = map[int32]string{ + 0: "METHOD_UNSPECIFIED", + 1: "LZ4", + 2: "ZSTD", +} +var ClickhouseConfig_Compression_Method_value = map[string]int32{ + "METHOD_UNSPECIFIED": 0, + "LZ4": 1, + "ZSTD": 2, +} + +func (x ClickhouseConfig_Compression_Method) String() string { + return proto.EnumName(ClickhouseConfig_Compression_Method_name, int32(x)) +} +func (ClickhouseConfig_Compression_Method) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 1, 0} +} + +type ClickhouseConfig_ExternalDictionary_Layout_Type int32 + +const ( + ClickhouseConfig_ExternalDictionary_Layout_TYPE_UNSPECIFIED ClickhouseConfig_ExternalDictionary_Layout_Type = 0 + // The entire dictionary is stored in memory in the form of flat arrays. + // Available for all dictionary sources. + ClickhouseConfig_ExternalDictionary_Layout_FLAT ClickhouseConfig_ExternalDictionary_Layout_Type = 1 + // The entire dictionary is stored in memory in the form of a hash table. + // Available for all dictionary sources. + ClickhouseConfig_ExternalDictionary_Layout_HASHED ClickhouseConfig_ExternalDictionary_Layout_Type = 2 + // Similar to HASHED, to be used with composite keys. + // Available for all dictionary sources. + ClickhouseConfig_ExternalDictionary_Layout_COMPLEX_KEY_HASHED ClickhouseConfig_ExternalDictionary_Layout_Type = 3 + // The entire dictionary is stored in memory in the form of a hash table, + // with an ordered array of ranges and their corresponding values. + // Available for all dictionary sources. + ClickhouseConfig_ExternalDictionary_Layout_RANGE_HASHED ClickhouseConfig_ExternalDictionary_Layout_Type = 4 + // The dictionary is stored in a cache with a set number of cells. + // Available for MySQL, ClickHouse and HTTP dictionary sources. + ClickhouseConfig_ExternalDictionary_Layout_CACHE ClickhouseConfig_ExternalDictionary_Layout_Type = 5 + // Similar to CACHE, to be used with composite keys. + // Available for MySQL, ClickHouse and HTTP dictionary sources. + ClickhouseConfig_ExternalDictionary_Layout_COMPLEX_KEY_CACHE ClickhouseConfig_ExternalDictionary_Layout_Type = 6 +) + +var ClickhouseConfig_ExternalDictionary_Layout_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "FLAT", + 2: "HASHED", + 3: "COMPLEX_KEY_HASHED", + 4: "RANGE_HASHED", + 5: "CACHE", + 6: "COMPLEX_KEY_CACHE", +} +var ClickhouseConfig_ExternalDictionary_Layout_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "FLAT": 1, + "HASHED": 2, + "COMPLEX_KEY_HASHED": 3, + "RANGE_HASHED": 4, + "CACHE": 5, + "COMPLEX_KEY_CACHE": 6, +} + +func (x ClickhouseConfig_ExternalDictionary_Layout_Type) String() string { + return proto.EnumName(ClickhouseConfig_ExternalDictionary_Layout_Type_name, int32(x)) +} +func (ClickhouseConfig_ExternalDictionary_Layout_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 2, 5, 0} +} + +// ClickHouse configuration options. Detailed description for each set of options +// is available in [ClickHouse documentation](https://clickhouse.yandex/docs/ru/operations/server_settings/settings/). +// +// Any options not listed here are not supported. +type ClickhouseConfig struct { + // Logging level for the ClickHouse cluster. + LogLevel ClickhouseConfig_LogLevel `protobuf:"varint,1,opt,name=log_level,json=logLevel,proto3,enum=yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig_LogLevel" json:"log_level,omitempty"` + // Settings for the MergeTree engine. + // See description in [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/server_settings/settings/#merge_tree). + MergeTree *ClickhouseConfig_MergeTree `protobuf:"bytes,2,opt,name=merge_tree,json=mergeTree,proto3" json:"merge_tree,omitempty"` + // Compression settings for the ClickHouse cluster. + // See in-depth description in [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/server_settings/settings/#compression). + Compression []*ClickhouseConfig_Compression `protobuf:"bytes,3,rep,name=compression,proto3" json:"compression,omitempty"` + // Configuration of external dictionaries to be used by the ClickHouse cluster. + // See in-depth description in [ClickHouse documentation](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts/). + Dictionaries []*ClickhouseConfig_ExternalDictionary `protobuf:"bytes,4,rep,name=dictionaries,proto3" json:"dictionaries,omitempty"` + // Settings for thinning Graphite data. + // See in-depth description in [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/server_settings/settings/#server_settings-graphite_rollup). + GraphiteRollup []*ClickhouseConfig_GraphiteRollup `protobuf:"bytes,5,rep,name=graphite_rollup,json=graphiteRollup,proto3" json:"graphite_rollup,omitempty"` + // Maximum number of inbound connections. + MaxConnections *wrappers.Int64Value `protobuf:"bytes,6,opt,name=max_connections,json=maxConnections,proto3" json:"max_connections,omitempty"` + // Maximum number of simultaneously processed requests. + MaxConcurrentQueries *wrappers.Int64Value `protobuf:"bytes,7,opt,name=max_concurrent_queries,json=maxConcurrentQueries,proto3" json:"max_concurrent_queries,omitempty"` + // Number of milliseconds that ClickHouse waits for incoming requests before closing the connection. + KeepAliveTimeout *wrappers.Int64Value `protobuf:"bytes,8,opt,name=keep_alive_timeout,json=keepAliveTimeout,proto3" json:"keep_alive_timeout,omitempty"` + // Cache size (in bytes) for uncompressed data used by MergeTree tables. + // See in-depth description in [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/server_settings/settings/#uncompressed_cache_size). + UncompressedCacheSize *wrappers.Int64Value `protobuf:"bytes,9,opt,name=uncompressed_cache_size,json=uncompressedCacheSize,proto3" json:"uncompressed_cache_size,omitempty"` + // Approximate size (in bytes) of the cache of "marks" used by MergeTree tables. + // See details in [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/server_settings/settings/#mark_cache_size). + MarkCacheSize *wrappers.Int64Value `protobuf:"bytes,10,opt,name=mark_cache_size,json=markCacheSize,proto3" json:"mark_cache_size,omitempty"` + // Maximum size of the table that can be deleted using a DROP query. + // See in-depth description in [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/server_settings/settings/#max_table_size_to_drop). + MaxTableSizeToDrop *wrappers.Int64Value `protobuf:"bytes,11,opt,name=max_table_size_to_drop,json=maxTableSizeToDrop,proto3" json:"max_table_size_to_drop,omitempty"` + // Time interval for reloading built-in dictionaries. + // See in-depth description in [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/server_settings/settings/#builtin_dictionaries_reload_interval). + BuiltinDictionariesReloadInterval *wrappers.Int64Value `protobuf:"bytes,12,opt,name=builtin_dictionaries_reload_interval,json=builtinDictionariesReloadInterval,proto3" json:"builtin_dictionaries_reload_interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig) Reset() { *m = ClickhouseConfig{} } +func (m *ClickhouseConfig) String() string { return proto.CompactTextString(m) } +func (*ClickhouseConfig) ProtoMessage() {} +func (*ClickhouseConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0} +} +func (m *ClickhouseConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig.Unmarshal(m, b) +} +func (m *ClickhouseConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig.Merge(dst, src) +} +func (m *ClickhouseConfig) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig.Size(m) +} +func (m *ClickhouseConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig proto.InternalMessageInfo + +func (m *ClickhouseConfig) GetLogLevel() ClickhouseConfig_LogLevel { + if m != nil { + return m.LogLevel + } + return ClickhouseConfig_LOG_LEVEL_UNSPECIFIED +} + +func (m *ClickhouseConfig) GetMergeTree() *ClickhouseConfig_MergeTree { + if m != nil { + return m.MergeTree + } + return nil +} + +func (m *ClickhouseConfig) GetCompression() []*ClickhouseConfig_Compression { + if m != nil { + return m.Compression + } + return nil +} + +func (m *ClickhouseConfig) GetDictionaries() []*ClickhouseConfig_ExternalDictionary { + if m != nil { + return m.Dictionaries + } + return nil +} + +func (m *ClickhouseConfig) GetGraphiteRollup() []*ClickhouseConfig_GraphiteRollup { + if m != nil { + return m.GraphiteRollup + } + return nil +} + +func (m *ClickhouseConfig) GetMaxConnections() *wrappers.Int64Value { + if m != nil { + return m.MaxConnections + } + return nil +} + +func (m *ClickhouseConfig) GetMaxConcurrentQueries() *wrappers.Int64Value { + if m != nil { + return m.MaxConcurrentQueries + } + return nil +} + +func (m *ClickhouseConfig) GetKeepAliveTimeout() *wrappers.Int64Value { + if m != nil { + return m.KeepAliveTimeout + } + return nil +} + +func (m *ClickhouseConfig) GetUncompressedCacheSize() *wrappers.Int64Value { + if m != nil { + return m.UncompressedCacheSize + } + return nil +} + +func (m *ClickhouseConfig) GetMarkCacheSize() *wrappers.Int64Value { + if m != nil { + return m.MarkCacheSize + } + return nil +} + +func (m *ClickhouseConfig) GetMaxTableSizeToDrop() *wrappers.Int64Value { + if m != nil { + return m.MaxTableSizeToDrop + } + return nil +} + +func (m *ClickhouseConfig) GetBuiltinDictionariesReloadInterval() *wrappers.Int64Value { + if m != nil { + return m.BuiltinDictionariesReloadInterval + } + return nil +} + +// Options specific to the MergeTree table engine. +type ClickhouseConfig_MergeTree struct { + // Number of blocks of hashes to keep in ZooKeeper. + // See detailed description in [ClickHouse sources](https://github.com/yandex/ClickHouse/blob/v18.1.0-stable/dbms/src/Storages/MergeTree/MergeTreeSettings.h#L59). + ReplicatedDeduplicationWindow *wrappers.Int64Value `protobuf:"bytes,1,opt,name=replicated_deduplication_window,json=replicatedDeduplicationWindow,proto3" json:"replicated_deduplication_window,omitempty"` + // Period of time to keep blocks of hashes for. + // See detailed description in [ClickHouse sources](https://github.com/yandex/ClickHouse/blob/v18.1.0-stable/dbms/src/Storages/MergeTree/MergeTreeSettings.h#L64). + ReplicatedDeduplicationWindowSeconds *wrappers.Int64Value `protobuf:"bytes,2,opt,name=replicated_deduplication_window_seconds,json=replicatedDeduplicationWindowSeconds,proto3" json:"replicated_deduplication_window_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_MergeTree) Reset() { *m = ClickhouseConfig_MergeTree{} } +func (m *ClickhouseConfig_MergeTree) String() string { return proto.CompactTextString(m) } +func (*ClickhouseConfig_MergeTree) ProtoMessage() {} +func (*ClickhouseConfig_MergeTree) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 0} +} +func (m *ClickhouseConfig_MergeTree) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_MergeTree.Unmarshal(m, b) +} +func (m *ClickhouseConfig_MergeTree) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_MergeTree.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_MergeTree) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_MergeTree.Merge(dst, src) +} +func (m *ClickhouseConfig_MergeTree) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_MergeTree.Size(m) +} +func (m *ClickhouseConfig_MergeTree) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_MergeTree.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_MergeTree proto.InternalMessageInfo + +func (m *ClickhouseConfig_MergeTree) GetReplicatedDeduplicationWindow() *wrappers.Int64Value { + if m != nil { + return m.ReplicatedDeduplicationWindow + } + return nil +} + +func (m *ClickhouseConfig_MergeTree) GetReplicatedDeduplicationWindowSeconds() *wrappers.Int64Value { + if m != nil { + return m.ReplicatedDeduplicationWindowSeconds + } + return nil +} + +type ClickhouseConfig_Compression struct { + // Compression method to use for the specified combination of `min_part_size` and `min_part_size_ratio`. + Method ClickhouseConfig_Compression_Method `protobuf:"varint,1,opt,name=method,proto3,enum=yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig_Compression_Method" json:"method,omitempty"` + // Minimum size of a part of a table. + MinPartSize int64 `protobuf:"varint,2,opt,name=min_part_size,json=minPartSize,proto3" json:"min_part_size,omitempty"` + // Minimum ratio of a part relative to the size of all the data in the table. + MinPartSizeRatio float64 `protobuf:"fixed64,3,opt,name=min_part_size_ratio,json=minPartSizeRatio,proto3" json:"min_part_size_ratio,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_Compression) Reset() { *m = ClickhouseConfig_Compression{} } +func (m *ClickhouseConfig_Compression) String() string { return proto.CompactTextString(m) } +func (*ClickhouseConfig_Compression) ProtoMessage() {} +func (*ClickhouseConfig_Compression) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 1} +} +func (m *ClickhouseConfig_Compression) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_Compression.Unmarshal(m, b) +} +func (m *ClickhouseConfig_Compression) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_Compression.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_Compression) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_Compression.Merge(dst, src) +} +func (m *ClickhouseConfig_Compression) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_Compression.Size(m) +} +func (m *ClickhouseConfig_Compression) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_Compression.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_Compression proto.InternalMessageInfo + +func (m *ClickhouseConfig_Compression) GetMethod() ClickhouseConfig_Compression_Method { + if m != nil { + return m.Method + } + return ClickhouseConfig_Compression_METHOD_UNSPECIFIED +} + +func (m *ClickhouseConfig_Compression) GetMinPartSize() int64 { + if m != nil { + return m.MinPartSize + } + return 0 +} + +func (m *ClickhouseConfig_Compression) GetMinPartSizeRatio() float64 { + if m != nil { + return m.MinPartSizeRatio + } + return 0 +} + +type ClickhouseConfig_ExternalDictionary struct { + // Name of the external dictionary. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Set of attributes for the external dictionary. + // For in-depth description, see [ClickHouse documentation](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts_dict_structure/). + Structure *ClickhouseConfig_ExternalDictionary_Structure `protobuf:"bytes,2,opt,name=structure,proto3" json:"structure,omitempty"` + // Layout for storing the dictionary in memory. + // For in-depth description, see [ClickHouse documentation](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts_dict_layout/). + Layout *ClickhouseConfig_ExternalDictionary_Layout `protobuf:"bytes,3,opt,name=layout,proto3" json:"layout,omitempty"` + // Required. Setting for the period of time between dictionary updates. + // For details, see [ClickHouse documentation](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts_dict_lifetime/). + // + // Types that are valid to be assigned to Lifetime: + // *ClickhouseConfig_ExternalDictionary_FixedLifetime + // *ClickhouseConfig_ExternalDictionary_LifetimeRange + Lifetime isClickhouseConfig_ExternalDictionary_Lifetime `protobuf_oneof:"lifetime"` + // Required. Description of the source for the external dictionary. + // + // Types that are valid to be assigned to Source: + // *ClickhouseConfig_ExternalDictionary_HttpSource_ + // *ClickhouseConfig_ExternalDictionary_MysqlSource_ + // *ClickhouseConfig_ExternalDictionary_ClickhouseSource_ + // *ClickhouseConfig_ExternalDictionary_MongodbSource_ + Source isClickhouseConfig_ExternalDictionary_Source `protobuf_oneof:"source"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_ExternalDictionary) Reset() { *m = ClickhouseConfig_ExternalDictionary{} } +func (m *ClickhouseConfig_ExternalDictionary) String() string { return proto.CompactTextString(m) } +func (*ClickhouseConfig_ExternalDictionary) ProtoMessage() {} +func (*ClickhouseConfig_ExternalDictionary) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 2} +} +func (m *ClickhouseConfig_ExternalDictionary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary.Unmarshal(m, b) +} +func (m *ClickhouseConfig_ExternalDictionary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_ExternalDictionary) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary.Merge(dst, src) +} +func (m *ClickhouseConfig_ExternalDictionary) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary.Size(m) +} +func (m *ClickhouseConfig_ExternalDictionary) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_ExternalDictionary proto.InternalMessageInfo + +func (m *ClickhouseConfig_ExternalDictionary) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary) GetStructure() *ClickhouseConfig_ExternalDictionary_Structure { + if m != nil { + return m.Structure + } + return nil +} + +func (m *ClickhouseConfig_ExternalDictionary) GetLayout() *ClickhouseConfig_ExternalDictionary_Layout { + if m != nil { + return m.Layout + } + return nil +} + +type isClickhouseConfig_ExternalDictionary_Lifetime interface { + isClickhouseConfig_ExternalDictionary_Lifetime() +} + +type ClickhouseConfig_ExternalDictionary_FixedLifetime struct { + FixedLifetime int64 `protobuf:"varint,4,opt,name=fixed_lifetime,json=fixedLifetime,proto3,oneof"` +} + +type ClickhouseConfig_ExternalDictionary_LifetimeRange struct { + LifetimeRange *ClickhouseConfig_ExternalDictionary_Range `protobuf:"bytes,5,opt,name=lifetime_range,json=lifetimeRange,proto3,oneof"` +} + +func (*ClickhouseConfig_ExternalDictionary_FixedLifetime) isClickhouseConfig_ExternalDictionary_Lifetime() { +} + +func (*ClickhouseConfig_ExternalDictionary_LifetimeRange) isClickhouseConfig_ExternalDictionary_Lifetime() { +} + +func (m *ClickhouseConfig_ExternalDictionary) GetLifetime() isClickhouseConfig_ExternalDictionary_Lifetime { + if m != nil { + return m.Lifetime + } + return nil +} + +func (m *ClickhouseConfig_ExternalDictionary) GetFixedLifetime() int64 { + if x, ok := m.GetLifetime().(*ClickhouseConfig_ExternalDictionary_FixedLifetime); ok { + return x.FixedLifetime + } + return 0 +} + +func (m *ClickhouseConfig_ExternalDictionary) GetLifetimeRange() *ClickhouseConfig_ExternalDictionary_Range { + if x, ok := m.GetLifetime().(*ClickhouseConfig_ExternalDictionary_LifetimeRange); ok { + return x.LifetimeRange + } + return nil +} + +type isClickhouseConfig_ExternalDictionary_Source interface { + isClickhouseConfig_ExternalDictionary_Source() +} + +type ClickhouseConfig_ExternalDictionary_HttpSource_ struct { + HttpSource *ClickhouseConfig_ExternalDictionary_HttpSource `protobuf:"bytes,6,opt,name=http_source,json=httpSource,proto3,oneof"` +} + +type ClickhouseConfig_ExternalDictionary_MysqlSource_ struct { + MysqlSource *ClickhouseConfig_ExternalDictionary_MysqlSource `protobuf:"bytes,7,opt,name=mysql_source,json=mysqlSource,proto3,oneof"` +} + +type ClickhouseConfig_ExternalDictionary_ClickhouseSource_ struct { + ClickhouseSource *ClickhouseConfig_ExternalDictionary_ClickhouseSource `protobuf:"bytes,8,opt,name=clickhouse_source,json=clickhouseSource,proto3,oneof"` +} + +type ClickhouseConfig_ExternalDictionary_MongodbSource_ struct { + MongodbSource *ClickhouseConfig_ExternalDictionary_MongodbSource `protobuf:"bytes,9,opt,name=mongodb_source,json=mongodbSource,proto3,oneof"` +} + +func (*ClickhouseConfig_ExternalDictionary_HttpSource_) isClickhouseConfig_ExternalDictionary_Source() { +} + +func (*ClickhouseConfig_ExternalDictionary_MysqlSource_) isClickhouseConfig_ExternalDictionary_Source() { +} + +func (*ClickhouseConfig_ExternalDictionary_ClickhouseSource_) isClickhouseConfig_ExternalDictionary_Source() { +} + +func (*ClickhouseConfig_ExternalDictionary_MongodbSource_) isClickhouseConfig_ExternalDictionary_Source() { +} + +func (m *ClickhouseConfig_ExternalDictionary) GetSource() isClickhouseConfig_ExternalDictionary_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *ClickhouseConfig_ExternalDictionary) GetHttpSource() *ClickhouseConfig_ExternalDictionary_HttpSource { + if x, ok := m.GetSource().(*ClickhouseConfig_ExternalDictionary_HttpSource_); ok { + return x.HttpSource + } + return nil +} + +func (m *ClickhouseConfig_ExternalDictionary) GetMysqlSource() *ClickhouseConfig_ExternalDictionary_MysqlSource { + if x, ok := m.GetSource().(*ClickhouseConfig_ExternalDictionary_MysqlSource_); ok { + return x.MysqlSource + } + return nil +} + +func (m *ClickhouseConfig_ExternalDictionary) GetClickhouseSource() *ClickhouseConfig_ExternalDictionary_ClickhouseSource { + if x, ok := m.GetSource().(*ClickhouseConfig_ExternalDictionary_ClickhouseSource_); ok { + return x.ClickhouseSource + } + return nil +} + +func (m *ClickhouseConfig_ExternalDictionary) GetMongodbSource() *ClickhouseConfig_ExternalDictionary_MongodbSource { + if x, ok := m.GetSource().(*ClickhouseConfig_ExternalDictionary_MongodbSource_); ok { + return x.MongodbSource + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ClickhouseConfig_ExternalDictionary) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ClickhouseConfig_ExternalDictionary_OneofMarshaler, _ClickhouseConfig_ExternalDictionary_OneofUnmarshaler, _ClickhouseConfig_ExternalDictionary_OneofSizer, []interface{}{ + (*ClickhouseConfig_ExternalDictionary_FixedLifetime)(nil), + (*ClickhouseConfig_ExternalDictionary_LifetimeRange)(nil), + (*ClickhouseConfig_ExternalDictionary_HttpSource_)(nil), + (*ClickhouseConfig_ExternalDictionary_MysqlSource_)(nil), + (*ClickhouseConfig_ExternalDictionary_ClickhouseSource_)(nil), + (*ClickhouseConfig_ExternalDictionary_MongodbSource_)(nil), + } +} + +func _ClickhouseConfig_ExternalDictionary_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ClickhouseConfig_ExternalDictionary) + // lifetime + switch x := m.Lifetime.(type) { + case *ClickhouseConfig_ExternalDictionary_FixedLifetime: + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.FixedLifetime)) + case *ClickhouseConfig_ExternalDictionary_LifetimeRange: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.LifetimeRange); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ClickhouseConfig_ExternalDictionary.Lifetime has unexpected type %T", x) + } + // source + switch x := m.Source.(type) { + case *ClickhouseConfig_ExternalDictionary_HttpSource_: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HttpSource); err != nil { + return err + } + case *ClickhouseConfig_ExternalDictionary_MysqlSource_: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MysqlSource); err != nil { + return err + } + case *ClickhouseConfig_ExternalDictionary_ClickhouseSource_: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClickhouseSource); err != nil { + return err + } + case *ClickhouseConfig_ExternalDictionary_MongodbSource_: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MongodbSource); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ClickhouseConfig_ExternalDictionary.Source has unexpected type %T", x) + } + return nil +} + +func _ClickhouseConfig_ExternalDictionary_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ClickhouseConfig_ExternalDictionary) + switch tag { + case 4: // lifetime.fixed_lifetime + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Lifetime = &ClickhouseConfig_ExternalDictionary_FixedLifetime{int64(x)} + return true, err + case 5: // lifetime.lifetime_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClickhouseConfig_ExternalDictionary_Range) + err := b.DecodeMessage(msg) + m.Lifetime = &ClickhouseConfig_ExternalDictionary_LifetimeRange{msg} + return true, err + case 6: // source.http_source + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClickhouseConfig_ExternalDictionary_HttpSource) + err := b.DecodeMessage(msg) + m.Source = &ClickhouseConfig_ExternalDictionary_HttpSource_{msg} + return true, err + case 7: // source.mysql_source + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClickhouseConfig_ExternalDictionary_MysqlSource) + err := b.DecodeMessage(msg) + m.Source = &ClickhouseConfig_ExternalDictionary_MysqlSource_{msg} + return true, err + case 8: // source.clickhouse_source + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClickhouseConfig_ExternalDictionary_ClickhouseSource) + err := b.DecodeMessage(msg) + m.Source = &ClickhouseConfig_ExternalDictionary_ClickhouseSource_{msg} + return true, err + case 9: // source.mongodb_source + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClickhouseConfig_ExternalDictionary_MongodbSource) + err := b.DecodeMessage(msg) + m.Source = &ClickhouseConfig_ExternalDictionary_MongodbSource_{msg} + return true, err + default: + return false, nil + } +} + +func _ClickhouseConfig_ExternalDictionary_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ClickhouseConfig_ExternalDictionary) + // lifetime + switch x := m.Lifetime.(type) { + case *ClickhouseConfig_ExternalDictionary_FixedLifetime: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.FixedLifetime)) + case *ClickhouseConfig_ExternalDictionary_LifetimeRange: + s := proto.Size(x.LifetimeRange) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // source + switch x := m.Source.(type) { + case *ClickhouseConfig_ExternalDictionary_HttpSource_: + s := proto.Size(x.HttpSource) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *ClickhouseConfig_ExternalDictionary_MysqlSource_: + s := proto.Size(x.MysqlSource) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *ClickhouseConfig_ExternalDictionary_ClickhouseSource_: + s := proto.Size(x.ClickhouseSource) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *ClickhouseConfig_ExternalDictionary_MongodbSource_: + s := proto.Size(x.MongodbSource) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ClickhouseConfig_ExternalDictionary_HttpSource struct { + // URL of the source dictionary available over HTTP. + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + // The data format. Valid values are all formats supported by ClickHouse SQL dialect. + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_ExternalDictionary_HttpSource) Reset() { + *m = ClickhouseConfig_ExternalDictionary_HttpSource{} +} +func (m *ClickhouseConfig_ExternalDictionary_HttpSource) String() string { + return proto.CompactTextString(m) +} +func (*ClickhouseConfig_ExternalDictionary_HttpSource) ProtoMessage() {} +func (*ClickhouseConfig_ExternalDictionary_HttpSource) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 2, 0} +} +func (m *ClickhouseConfig_ExternalDictionary_HttpSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_HttpSource.Unmarshal(m, b) +} +func (m *ClickhouseConfig_ExternalDictionary_HttpSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_HttpSource.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_ExternalDictionary_HttpSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_HttpSource.Merge(dst, src) +} +func (m *ClickhouseConfig_ExternalDictionary_HttpSource) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_HttpSource.Size(m) +} +func (m *ClickhouseConfig_ExternalDictionary_HttpSource) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_HttpSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_ExternalDictionary_HttpSource proto.InternalMessageInfo + +func (m *ClickhouseConfig_ExternalDictionary_HttpSource) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_HttpSource) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +type ClickhouseConfig_ExternalDictionary_MysqlSource struct { + // Name of the MySQL database to connect to. + Db string `protobuf:"bytes,1,opt,name=db,proto3" json:"db,omitempty"` + // Name of the database table to use as a ClickHouse dictionary. + Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"` + // Default port to use when connecting to a replica of the dictionary source. + Port int64 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + // Name of the default user for replicas of the dictionary source. + User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` + // Password of the default user for replicas of the dictionary source. + Password string `protobuf:"bytes,5,opt,name=password,proto3" json:"password,omitempty"` + // List of MySQL replicas of the database used as dictionary source. + Replicas []*ClickhouseConfig_ExternalDictionary_MysqlSource_Replica `protobuf:"bytes,6,rep,name=replicas,proto3" json:"replicas,omitempty"` + // Selection criteria for the data in the specified MySQL table. + Where string `protobuf:"bytes,7,opt,name=where,proto3" json:"where,omitempty"` + // Query for checking the dictionary status, to pull only updated data. + // For more details, see [ClickHouse documentation on dictionaries](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts_dict_lifetime/). + InvalidateQuery string `protobuf:"bytes,8,opt,name=invalidate_query,json=invalidateQuery,proto3" json:"invalidate_query,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource) Reset() { + *m = ClickhouseConfig_ExternalDictionary_MysqlSource{} +} +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource) String() string { + return proto.CompactTextString(m) +} +func (*ClickhouseConfig_ExternalDictionary_MysqlSource) ProtoMessage() {} +func (*ClickhouseConfig_ExternalDictionary_MysqlSource) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 2, 1} +} +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MysqlSource.Unmarshal(m, b) +} +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MysqlSource.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_ExternalDictionary_MysqlSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MysqlSource.Merge(dst, src) +} +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MysqlSource.Size(m) +} +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MysqlSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MysqlSource proto.InternalMessageInfo + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource) GetDb() string { + if m != nil { + return m.Db + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource) GetTable() string { + if m != nil { + return m.Table + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource) GetPort() int64 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource) GetReplicas() []*ClickhouseConfig_ExternalDictionary_MysqlSource_Replica { + if m != nil { + return m.Replicas + } + return nil +} + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource) GetWhere() string { + if m != nil { + return m.Where + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource) GetInvalidateQuery() string { + if m != nil { + return m.InvalidateQuery + } + return "" +} + +type ClickhouseConfig_ExternalDictionary_MysqlSource_Replica struct { + // MySQL host of the replica. + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // Required. The priority of the replica that ClickHouse takes into account when connecting. + // Replica with the highest priority should have this field set to the lowest number. + Priority int64 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` + // Port to use when connecting to the replica. + // If a port is not specified for a replica, ClickHouse uses the port specified for the source. + Port int64 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + // Name of the MySQL database user. + User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` + // Password of the MySQL database user. + Password string `protobuf:"bytes,5,opt,name=password,proto3" json:"password,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource_Replica) Reset() { + *m = ClickhouseConfig_ExternalDictionary_MysqlSource_Replica{} +} +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource_Replica) String() string { + return proto.CompactTextString(m) +} +func (*ClickhouseConfig_ExternalDictionary_MysqlSource_Replica) ProtoMessage() {} +func (*ClickhouseConfig_ExternalDictionary_MysqlSource_Replica) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 2, 1, 0} +} +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource_Replica) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MysqlSource_Replica.Unmarshal(m, b) +} +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource_Replica) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MysqlSource_Replica.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_ExternalDictionary_MysqlSource_Replica) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MysqlSource_Replica.Merge(dst, src) +} +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource_Replica) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MysqlSource_Replica.Size(m) +} +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource_Replica) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MysqlSource_Replica.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MysqlSource_Replica proto.InternalMessageInfo + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource_Replica) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource_Replica) GetPriority() int64 { + if m != nil { + return m.Priority + } + return 0 +} + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource_Replica) GetPort() int64 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource_Replica) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_MysqlSource_Replica) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type ClickhouseConfig_ExternalDictionary_ClickhouseSource struct { + // Name of the ClickHouse database. + Db string `protobuf:"bytes,1,opt,name=db,proto3" json:"db,omitempty"` + // Name of the table in the specified database to be used as the dictionary source. + Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"` + // ClickHouse host of the specified database. + Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` + // Port to use when connecting to the host. + Port int64 `protobuf:"varint,4,opt,name=port,proto3" json:"port,omitempty"` + // Name of the ClickHouse database user. + User string `protobuf:"bytes,5,opt,name=user,proto3" json:"user,omitempty"` + // Password of the ClickHouse database user. + Password string `protobuf:"bytes,6,opt,name=password,proto3" json:"password,omitempty"` + // Selection criteria for the data in the specified ClickHouse table. + Where string `protobuf:"bytes,7,opt,name=where,proto3" json:"where,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_ExternalDictionary_ClickhouseSource) Reset() { + *m = ClickhouseConfig_ExternalDictionary_ClickhouseSource{} +} +func (m *ClickhouseConfig_ExternalDictionary_ClickhouseSource) String() string { + return proto.CompactTextString(m) +} +func (*ClickhouseConfig_ExternalDictionary_ClickhouseSource) ProtoMessage() {} +func (*ClickhouseConfig_ExternalDictionary_ClickhouseSource) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 2, 2} +} +func (m *ClickhouseConfig_ExternalDictionary_ClickhouseSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_ClickhouseSource.Unmarshal(m, b) +} +func (m *ClickhouseConfig_ExternalDictionary_ClickhouseSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_ClickhouseSource.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_ExternalDictionary_ClickhouseSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_ClickhouseSource.Merge(dst, src) +} +func (m *ClickhouseConfig_ExternalDictionary_ClickhouseSource) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_ClickhouseSource.Size(m) +} +func (m *ClickhouseConfig_ExternalDictionary_ClickhouseSource) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_ClickhouseSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_ExternalDictionary_ClickhouseSource proto.InternalMessageInfo + +func (m *ClickhouseConfig_ExternalDictionary_ClickhouseSource) GetDb() string { + if m != nil { + return m.Db + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_ClickhouseSource) GetTable() string { + if m != nil { + return m.Table + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_ClickhouseSource) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_ClickhouseSource) GetPort() int64 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *ClickhouseConfig_ExternalDictionary_ClickhouseSource) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_ClickhouseSource) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_ClickhouseSource) GetWhere() string { + if m != nil { + return m.Where + } + return "" +} + +type ClickhouseConfig_ExternalDictionary_MongodbSource struct { + // Name of the MongoDB database. + Db string `protobuf:"bytes,1,opt,name=db,proto3" json:"db,omitempty"` + // Name of the collection in the specified database to be used as the dictionary source. + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + // MongoDB host of the specified database. + Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` + // Port to use when connecting to the host. + Port int64 `protobuf:"varint,4,opt,name=port,proto3" json:"port,omitempty"` + // Name of the MongoDB database user. + User string `protobuf:"bytes,5,opt,name=user,proto3" json:"user,omitempty"` + // Password of the MongoDB database user. + Password string `protobuf:"bytes,6,opt,name=password,proto3" json:"password,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_ExternalDictionary_MongodbSource) Reset() { + *m = ClickhouseConfig_ExternalDictionary_MongodbSource{} +} +func (m *ClickhouseConfig_ExternalDictionary_MongodbSource) String() string { + return proto.CompactTextString(m) +} +func (*ClickhouseConfig_ExternalDictionary_MongodbSource) ProtoMessage() {} +func (*ClickhouseConfig_ExternalDictionary_MongodbSource) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 2, 3} +} +func (m *ClickhouseConfig_ExternalDictionary_MongodbSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MongodbSource.Unmarshal(m, b) +} +func (m *ClickhouseConfig_ExternalDictionary_MongodbSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MongodbSource.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_ExternalDictionary_MongodbSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MongodbSource.Merge(dst, src) +} +func (m *ClickhouseConfig_ExternalDictionary_MongodbSource) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MongodbSource.Size(m) +} +func (m *ClickhouseConfig_ExternalDictionary_MongodbSource) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MongodbSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_ExternalDictionary_MongodbSource proto.InternalMessageInfo + +func (m *ClickhouseConfig_ExternalDictionary_MongodbSource) GetDb() string { + if m != nil { + return m.Db + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_MongodbSource) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_MongodbSource) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_MongodbSource) GetPort() int64 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *ClickhouseConfig_ExternalDictionary_MongodbSource) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_MongodbSource) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type ClickhouseConfig_ExternalDictionary_Structure struct { + // Single numeric key column for the dictionary. + Id *ClickhouseConfig_ExternalDictionary_Structure_Id `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Complex key for the dictionary, containing of one or more key column(s). + Key *ClickhouseConfig_ExternalDictionary_Structure_Key `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Description of the fields available for database queries. + Attributes []*ClickhouseConfig_ExternalDictionary_Structure_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_ExternalDictionary_Structure) Reset() { + *m = ClickhouseConfig_ExternalDictionary_Structure{} +} +func (m *ClickhouseConfig_ExternalDictionary_Structure) String() string { + return proto.CompactTextString(m) +} +func (*ClickhouseConfig_ExternalDictionary_Structure) ProtoMessage() {} +func (*ClickhouseConfig_ExternalDictionary_Structure) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 2, 4} +} +func (m *ClickhouseConfig_ExternalDictionary_Structure) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure.Unmarshal(m, b) +} +func (m *ClickhouseConfig_ExternalDictionary_Structure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_ExternalDictionary_Structure) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure.Merge(dst, src) +} +func (m *ClickhouseConfig_ExternalDictionary_Structure) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure.Size(m) +} +func (m *ClickhouseConfig_ExternalDictionary_Structure) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure proto.InternalMessageInfo + +func (m *ClickhouseConfig_ExternalDictionary_Structure) GetId() *ClickhouseConfig_ExternalDictionary_Structure_Id { + if m != nil { + return m.Id + } + return nil +} + +func (m *ClickhouseConfig_ExternalDictionary_Structure) GetKey() *ClickhouseConfig_ExternalDictionary_Structure_Key { + if m != nil { + return m.Key + } + return nil +} + +func (m *ClickhouseConfig_ExternalDictionary_Structure) GetAttributes() []*ClickhouseConfig_ExternalDictionary_Structure_Attribute { + if m != nil { + return m.Attributes + } + return nil +} + +type ClickhouseConfig_ExternalDictionary_Structure_Attribute struct { + // Name of the column. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Type of the column. + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // Default value for an element without data (for example, an empty string). + NullValue string `protobuf:"bytes,3,opt,name=null_value,json=nullValue,proto3" json:"null_value,omitempty"` + // Expression, describing the attribute, if applicable. + Expression string `protobuf:"bytes,4,opt,name=expression,proto3" json:"expression,omitempty"` + // Indication of hierarchy support. + // Default value: "false". + Hierarchical bool `protobuf:"varint,5,opt,name=hierarchical,proto3" json:"hierarchical,omitempty"` + // Indication of injective mapping "id -> attribute". + // Default value: "false". + Injective bool `protobuf:"varint,6,opt,name=injective,proto3" json:"injective,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_ExternalDictionary_Structure_Attribute) Reset() { + *m = ClickhouseConfig_ExternalDictionary_Structure_Attribute{} +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Attribute) String() string { + return proto.CompactTextString(m) +} +func (*ClickhouseConfig_ExternalDictionary_Structure_Attribute) ProtoMessage() {} +func (*ClickhouseConfig_ExternalDictionary_Structure_Attribute) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 2, 4, 0} +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Attribute) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Attribute.Unmarshal(m, b) +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Attribute.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_ExternalDictionary_Structure_Attribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Attribute.Merge(dst, src) +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Attribute) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Attribute.Size(m) +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Attribute) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Attribute.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Attribute proto.InternalMessageInfo + +func (m *ClickhouseConfig_ExternalDictionary_Structure_Attribute) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_Structure_Attribute) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_Structure_Attribute) GetNullValue() string { + if m != nil { + return m.NullValue + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_Structure_Attribute) GetExpression() string { + if m != nil { + return m.Expression + } + return "" +} + +func (m *ClickhouseConfig_ExternalDictionary_Structure_Attribute) GetHierarchical() bool { + if m != nil { + return m.Hierarchical + } + return false +} + +func (m *ClickhouseConfig_ExternalDictionary_Structure_Attribute) GetInjective() bool { + if m != nil { + return m.Injective + } + return false +} + +// Numeric key. +type ClickhouseConfig_ExternalDictionary_Structure_Id struct { + // Name of the numeric key. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_ExternalDictionary_Structure_Id) Reset() { + *m = ClickhouseConfig_ExternalDictionary_Structure_Id{} +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Id) String() string { + return proto.CompactTextString(m) +} +func (*ClickhouseConfig_ExternalDictionary_Structure_Id) ProtoMessage() {} +func (*ClickhouseConfig_ExternalDictionary_Structure_Id) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 2, 4, 1} +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Id) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Id.Unmarshal(m, b) +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Id) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Id.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_ExternalDictionary_Structure_Id) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Id.Merge(dst, src) +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Id) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Id.Size(m) +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Id) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Id.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Id proto.InternalMessageInfo + +func (m *ClickhouseConfig_ExternalDictionary_Structure_Id) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Complex key. +type ClickhouseConfig_ExternalDictionary_Structure_Key struct { + // Attributes of a complex key. + Attributes []*ClickhouseConfig_ExternalDictionary_Structure_Attribute `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_ExternalDictionary_Structure_Key) Reset() { + *m = ClickhouseConfig_ExternalDictionary_Structure_Key{} +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Key) String() string { + return proto.CompactTextString(m) +} +func (*ClickhouseConfig_ExternalDictionary_Structure_Key) ProtoMessage() {} +func (*ClickhouseConfig_ExternalDictionary_Structure_Key) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 2, 4, 2} +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Key) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Key.Unmarshal(m, b) +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Key) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Key.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_ExternalDictionary_Structure_Key) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Key.Merge(dst, src) +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Key) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Key.Size(m) +} +func (m *ClickhouseConfig_ExternalDictionary_Structure_Key) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Key.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Structure_Key proto.InternalMessageInfo + +func (m *ClickhouseConfig_ExternalDictionary_Structure_Key) GetAttributes() []*ClickhouseConfig_ExternalDictionary_Structure_Attribute { + if m != nil { + return m.Attributes + } + return nil +} + +// Layout determining how to store the dictionary in memory. +type ClickhouseConfig_ExternalDictionary_Layout struct { + // Layout type for an external dictionary. + Type ClickhouseConfig_ExternalDictionary_Layout_Type `protobuf:"varint,1,opt,name=type,proto3,enum=yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig_ExternalDictionary_Layout_Type" json:"type,omitempty"` + // Number of cells in the cache. Rounded up to a power of two. + // Applicable only for CACHE and COMPLEX_KEY_CACHE layout types. + SizeInCells int64 `protobuf:"varint,2,opt,name=size_in_cells,json=sizeInCells,proto3" json:"size_in_cells,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_ExternalDictionary_Layout) Reset() { + *m = ClickhouseConfig_ExternalDictionary_Layout{} +} +func (m *ClickhouseConfig_ExternalDictionary_Layout) String() string { + return proto.CompactTextString(m) +} +func (*ClickhouseConfig_ExternalDictionary_Layout) ProtoMessage() {} +func (*ClickhouseConfig_ExternalDictionary_Layout) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 2, 5} +} +func (m *ClickhouseConfig_ExternalDictionary_Layout) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Layout.Unmarshal(m, b) +} +func (m *ClickhouseConfig_ExternalDictionary_Layout) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Layout.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_ExternalDictionary_Layout) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Layout.Merge(dst, src) +} +func (m *ClickhouseConfig_ExternalDictionary_Layout) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Layout.Size(m) +} +func (m *ClickhouseConfig_ExternalDictionary_Layout) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Layout.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Layout proto.InternalMessageInfo + +func (m *ClickhouseConfig_ExternalDictionary_Layout) GetType() ClickhouseConfig_ExternalDictionary_Layout_Type { + if m != nil { + return m.Type + } + return ClickhouseConfig_ExternalDictionary_Layout_TYPE_UNSPECIFIED +} + +func (m *ClickhouseConfig_ExternalDictionary_Layout) GetSizeInCells() int64 { + if m != nil { + return m.SizeInCells + } + return 0 +} + +type ClickhouseConfig_ExternalDictionary_Range struct { + // Minimum dictionary lifetime. + Min int64 `protobuf:"varint,1,opt,name=min,proto3" json:"min,omitempty"` + // Maximum dictionary lifetime. + Max int64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_ExternalDictionary_Range) Reset() { + *m = ClickhouseConfig_ExternalDictionary_Range{} +} +func (m *ClickhouseConfig_ExternalDictionary_Range) String() string { return proto.CompactTextString(m) } +func (*ClickhouseConfig_ExternalDictionary_Range) ProtoMessage() {} +func (*ClickhouseConfig_ExternalDictionary_Range) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 2, 6} +} +func (m *ClickhouseConfig_ExternalDictionary_Range) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Range.Unmarshal(m, b) +} +func (m *ClickhouseConfig_ExternalDictionary_Range) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Range.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_ExternalDictionary_Range) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Range.Merge(dst, src) +} +func (m *ClickhouseConfig_ExternalDictionary_Range) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Range.Size(m) +} +func (m *ClickhouseConfig_ExternalDictionary_Range) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Range.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_ExternalDictionary_Range proto.InternalMessageInfo + +func (m *ClickhouseConfig_ExternalDictionary_Range) GetMin() int64 { + if m != nil { + return m.Min + } + return 0 +} + +func (m *ClickhouseConfig_ExternalDictionary_Range) GetMax() int64 { + if m != nil { + return m.Max + } + return 0 +} + +// Rollup settings for the GraphiteMergeTree table engine. +type ClickhouseConfig_GraphiteRollup struct { + // Name for the specified combination of settings for Graphite rollup. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Pattern to use for the rollup. + Patterns []*ClickhouseConfig_GraphiteRollup_Pattern `protobuf:"bytes,2,rep,name=patterns,proto3" json:"patterns,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_GraphiteRollup) Reset() { *m = ClickhouseConfig_GraphiteRollup{} } +func (m *ClickhouseConfig_GraphiteRollup) String() string { return proto.CompactTextString(m) } +func (*ClickhouseConfig_GraphiteRollup) ProtoMessage() {} +func (*ClickhouseConfig_GraphiteRollup) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 3} +} +func (m *ClickhouseConfig_GraphiteRollup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_GraphiteRollup.Unmarshal(m, b) +} +func (m *ClickhouseConfig_GraphiteRollup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_GraphiteRollup.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_GraphiteRollup) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_GraphiteRollup.Merge(dst, src) +} +func (m *ClickhouseConfig_GraphiteRollup) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_GraphiteRollup.Size(m) +} +func (m *ClickhouseConfig_GraphiteRollup) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_GraphiteRollup.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_GraphiteRollup proto.InternalMessageInfo + +func (m *ClickhouseConfig_GraphiteRollup) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ClickhouseConfig_GraphiteRollup) GetPatterns() []*ClickhouseConfig_GraphiteRollup_Pattern { + if m != nil { + return m.Patterns + } + return nil +} + +type ClickhouseConfig_GraphiteRollup_Pattern struct { + // Pattern for metric names. + Regexp string `protobuf:"bytes,1,opt,name=regexp,proto3" json:"regexp,omitempty"` + // Name of the aggregating function to apply to data of the age specified in [retention]. + Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"` + // Age of data to use for thinning. + Retention []*ClickhouseConfig_GraphiteRollup_Pattern_Retention `protobuf:"bytes,3,rep,name=retention,proto3" json:"retention,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_GraphiteRollup_Pattern) Reset() { + *m = ClickhouseConfig_GraphiteRollup_Pattern{} +} +func (m *ClickhouseConfig_GraphiteRollup_Pattern) String() string { return proto.CompactTextString(m) } +func (*ClickhouseConfig_GraphiteRollup_Pattern) ProtoMessage() {} +func (*ClickhouseConfig_GraphiteRollup_Pattern) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 3, 0} +} +func (m *ClickhouseConfig_GraphiteRollup_Pattern) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_GraphiteRollup_Pattern.Unmarshal(m, b) +} +func (m *ClickhouseConfig_GraphiteRollup_Pattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_GraphiteRollup_Pattern.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_GraphiteRollup_Pattern) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_GraphiteRollup_Pattern.Merge(dst, src) +} +func (m *ClickhouseConfig_GraphiteRollup_Pattern) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_GraphiteRollup_Pattern.Size(m) +} +func (m *ClickhouseConfig_GraphiteRollup_Pattern) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_GraphiteRollup_Pattern.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_GraphiteRollup_Pattern proto.InternalMessageInfo + +func (m *ClickhouseConfig_GraphiteRollup_Pattern) GetRegexp() string { + if m != nil { + return m.Regexp + } + return "" +} + +func (m *ClickhouseConfig_GraphiteRollup_Pattern) GetFunction() string { + if m != nil { + return m.Function + } + return "" +} + +func (m *ClickhouseConfig_GraphiteRollup_Pattern) GetRetention() []*ClickhouseConfig_GraphiteRollup_Pattern_Retention { + if m != nil { + return m.Retention + } + return nil +} + +type ClickhouseConfig_GraphiteRollup_Pattern_Retention struct { + // Minimum age of the data in seconds. + Age int64 `protobuf:"varint,1,opt,name=age,proto3" json:"age,omitempty"` + // Precision of determining the age of the data, in seconds. + Precision int64 `protobuf:"varint,2,opt,name=precision,proto3" json:"precision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfig_GraphiteRollup_Pattern_Retention) Reset() { + *m = ClickhouseConfig_GraphiteRollup_Pattern_Retention{} +} +func (m *ClickhouseConfig_GraphiteRollup_Pattern_Retention) String() string { + return proto.CompactTextString(m) +} +func (*ClickhouseConfig_GraphiteRollup_Pattern_Retention) ProtoMessage() {} +func (*ClickhouseConfig_GraphiteRollup_Pattern_Retention) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{0, 3, 0, 0} +} +func (m *ClickhouseConfig_GraphiteRollup_Pattern_Retention) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfig_GraphiteRollup_Pattern_Retention.Unmarshal(m, b) +} +func (m *ClickhouseConfig_GraphiteRollup_Pattern_Retention) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfig_GraphiteRollup_Pattern_Retention.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfig_GraphiteRollup_Pattern_Retention) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfig_GraphiteRollup_Pattern_Retention.Merge(dst, src) +} +func (m *ClickhouseConfig_GraphiteRollup_Pattern_Retention) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfig_GraphiteRollup_Pattern_Retention.Size(m) +} +func (m *ClickhouseConfig_GraphiteRollup_Pattern_Retention) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfig_GraphiteRollup_Pattern_Retention.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfig_GraphiteRollup_Pattern_Retention proto.InternalMessageInfo + +func (m *ClickhouseConfig_GraphiteRollup_Pattern_Retention) GetAge() int64 { + if m != nil { + return m.Age + } + return 0 +} + +func (m *ClickhouseConfig_GraphiteRollup_Pattern_Retention) GetPrecision() int64 { + if m != nil { + return m.Precision + } + return 0 +} + +type ClickhouseConfigSet struct { + // Effective settings for a ClickHouse cluster (a combination of settings defined + // in [user_config] and [default_config]). + EffectiveConfig *ClickhouseConfig `protobuf:"bytes,1,opt,name=effective_config,json=effectiveConfig,proto3" json:"effective_config,omitempty"` + // User-defined settings for a ClickHouse cluster. + UserConfig *ClickhouseConfig `protobuf:"bytes,2,opt,name=user_config,json=userConfig,proto3" json:"user_config,omitempty"` + // Default configuration for a ClickHouse cluster. + DefaultConfig *ClickhouseConfig `protobuf:"bytes,3,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClickhouseConfigSet) Reset() { *m = ClickhouseConfigSet{} } +func (m *ClickhouseConfigSet) String() string { return proto.CompactTextString(m) } +func (*ClickhouseConfigSet) ProtoMessage() {} +func (*ClickhouseConfigSet) Descriptor() ([]byte, []int) { + return fileDescriptor_clickhouse_cf71ea45ca19ec95, []int{1} +} +func (m *ClickhouseConfigSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClickhouseConfigSet.Unmarshal(m, b) +} +func (m *ClickhouseConfigSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClickhouseConfigSet.Marshal(b, m, deterministic) +} +func (dst *ClickhouseConfigSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickhouseConfigSet.Merge(dst, src) +} +func (m *ClickhouseConfigSet) XXX_Size() int { + return xxx_messageInfo_ClickhouseConfigSet.Size(m) +} +func (m *ClickhouseConfigSet) XXX_DiscardUnknown() { + xxx_messageInfo_ClickhouseConfigSet.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickhouseConfigSet proto.InternalMessageInfo + +func (m *ClickhouseConfigSet) GetEffectiveConfig() *ClickhouseConfig { + if m != nil { + return m.EffectiveConfig + } + return nil +} + +func (m *ClickhouseConfigSet) GetUserConfig() *ClickhouseConfig { + if m != nil { + return m.UserConfig + } + return nil +} + +func (m *ClickhouseConfigSet) GetDefaultConfig() *ClickhouseConfig { + if m != nil { + return m.DefaultConfig + } + return nil +} + +func init() { + proto.RegisterType((*ClickhouseConfig)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig") + proto.RegisterType((*ClickhouseConfig_MergeTree)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.MergeTree") + proto.RegisterType((*ClickhouseConfig_Compression)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.Compression") + proto.RegisterType((*ClickhouseConfig_ExternalDictionary)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.ExternalDictionary") + proto.RegisterType((*ClickhouseConfig_ExternalDictionary_HttpSource)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.ExternalDictionary.HttpSource") + proto.RegisterType((*ClickhouseConfig_ExternalDictionary_MysqlSource)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.ExternalDictionary.MysqlSource") + proto.RegisterType((*ClickhouseConfig_ExternalDictionary_MysqlSource_Replica)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.ExternalDictionary.MysqlSource.Replica") + proto.RegisterType((*ClickhouseConfig_ExternalDictionary_ClickhouseSource)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.ExternalDictionary.ClickhouseSource") + proto.RegisterType((*ClickhouseConfig_ExternalDictionary_MongodbSource)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.ExternalDictionary.MongodbSource") + proto.RegisterType((*ClickhouseConfig_ExternalDictionary_Structure)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.ExternalDictionary.Structure") + proto.RegisterType((*ClickhouseConfig_ExternalDictionary_Structure_Attribute)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.ExternalDictionary.Structure.Attribute") + proto.RegisterType((*ClickhouseConfig_ExternalDictionary_Structure_Id)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.ExternalDictionary.Structure.Id") + proto.RegisterType((*ClickhouseConfig_ExternalDictionary_Structure_Key)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.ExternalDictionary.Structure.Key") + proto.RegisterType((*ClickhouseConfig_ExternalDictionary_Layout)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.ExternalDictionary.Layout") + proto.RegisterType((*ClickhouseConfig_ExternalDictionary_Range)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.ExternalDictionary.Range") + proto.RegisterType((*ClickhouseConfig_GraphiteRollup)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.GraphiteRollup") + proto.RegisterType((*ClickhouseConfig_GraphiteRollup_Pattern)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.GraphiteRollup.Pattern") + proto.RegisterType((*ClickhouseConfig_GraphiteRollup_Pattern_Retention)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig.GraphiteRollup.Pattern.Retention") + proto.RegisterType((*ClickhouseConfigSet)(nil), "yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfigSet") + proto.RegisterEnum("yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig_LogLevel", ClickhouseConfig_LogLevel_name, ClickhouseConfig_LogLevel_value) + proto.RegisterEnum("yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig_Compression_Method", ClickhouseConfig_Compression_Method_name, ClickhouseConfig_Compression_Method_value) + proto.RegisterEnum("yandex.cloud.mdb.clickhouse.v1.config.ClickhouseConfig_ExternalDictionary_Layout_Type", ClickhouseConfig_ExternalDictionary_Layout_Type_name, ClickhouseConfig_ExternalDictionary_Layout_Type_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/clickhouse/v1/config/clickhouse.proto", fileDescriptor_clickhouse_cf71ea45ca19ec95) +} + +var fileDescriptor_clickhouse_cf71ea45ca19ec95 = []byte{ + // 1882 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x58, 0x4f, 0x73, 0xdb, 0xc6, + 0x15, 0x0f, 0x08, 0x92, 0x22, 0x1e, 0x2d, 0x09, 0xd9, 0xd8, 0x0e, 0x8b, 0xc6, 0x8e, 0xa3, 0x71, + 0x27, 0xea, 0x64, 0x0c, 0x89, 0x52, 0xe4, 0xb4, 0xd3, 0xda, 0x53, 0x8a, 0xa2, 0x25, 0xc6, 0xd4, + 0x9f, 0xac, 0x68, 0x59, 0x71, 0xa6, 0x41, 0x41, 0x60, 0x45, 0x22, 0xc6, 0xbf, 0x2c, 0x01, 0x89, + 0x4c, 0x67, 0x32, 0x53, 0x1f, 0x7a, 0xc8, 0xa1, 0xd3, 0x4f, 0xd0, 0x99, 0x7e, 0x84, 0x1e, 0x7b, + 0x4b, 0x0f, 0x1d, 0xe5, 0x03, 0xf4, 0xda, 0x5b, 0x0f, 0xfd, 0x02, 0xbd, 0xa8, 0x97, 0xce, 0xee, + 0x02, 0x04, 0x29, 0xd7, 0x56, 0xa2, 0xd0, 0x93, 0x1b, 0xf6, 0xb7, 0x78, 0xbf, 0xdf, 0xdb, 0x87, + 0xb7, 0xfb, 0x1e, 0x16, 0xee, 0x0e, 0x4d, 0xdf, 0x26, 0x83, 0x25, 0xcb, 0x0d, 0x62, 0x7b, 0xc9, + 0xb3, 0x3b, 0x4b, 0x96, 0xeb, 0x58, 0x4f, 0x7b, 0x41, 0xdc, 0x27, 0x4b, 0xc7, 0xd5, 0x25, 0x2b, + 0xf0, 0x8f, 0x9c, 0xee, 0x18, 0xa8, 0x87, 0x34, 0x88, 0x02, 0xf4, 0x13, 0x61, 0xa7, 0x73, 0x3b, + 0xdd, 0xb3, 0x3b, 0xfa, 0xd8, 0x2b, 0xc7, 0x55, 0x5d, 0xd8, 0x69, 0x37, 0xbb, 0x41, 0xd0, 0x75, + 0xc9, 0x12, 0x37, 0xea, 0xc4, 0x47, 0x4b, 0x27, 0xd4, 0x0c, 0x43, 0x42, 0xfb, 0x82, 0x46, 0xbb, + 0x31, 0x21, 0x7f, 0x6c, 0xba, 0x8e, 0x6d, 0x46, 0x4e, 0xe0, 0x8b, 0xe9, 0x85, 0xff, 0x2e, 0x82, + 0x5a, 0x1f, 0xf1, 0xd6, 0x39, 0x27, 0xfa, 0x35, 0x28, 0x6e, 0xd0, 0x35, 0x5c, 0x72, 0x4c, 0xdc, + 0x8a, 0x74, 0x4b, 0x5a, 0x9c, 0x5b, 0xf9, 0x95, 0xfe, 0xad, 0xdc, 0xd1, 0xcf, 0x73, 0xe9, 0xad, + 0xa0, 0xdb, 0x62, 0x3c, 0xb8, 0xe4, 0x26, 0x4f, 0xe8, 0x37, 0x00, 0x1e, 0xa1, 0x5d, 0x62, 0x44, + 0x94, 0x90, 0x4a, 0xee, 0x96, 0xb4, 0x58, 0x5e, 0xa9, 0x5d, 0x96, 0x7f, 0x9b, 0x31, 0xb5, 0x29, + 0x21, 0x58, 0xf1, 0xd2, 0x47, 0x44, 0xa0, 0x6c, 0x05, 0x5e, 0x48, 0x49, 0xbf, 0xef, 0x04, 0x7e, + 0x45, 0xbe, 0x25, 0x2f, 0x96, 0x57, 0xea, 0x97, 0x95, 0xa8, 0x67, 0x54, 0x78, 0x9c, 0x17, 0xf9, + 0x70, 0xc5, 0x76, 0x2c, 0x16, 0x4d, 0x93, 0x3a, 0xa4, 0x5f, 0xc9, 0x73, 0x9d, 0x0f, 0x2f, 0xab, + 0xd3, 0x18, 0x44, 0x84, 0xfa, 0xa6, 0xbb, 0x91, 0x72, 0x0e, 0xf1, 0x04, 0x3f, 0x0a, 0x60, 0xbe, + 0x4b, 0xcd, 0xb0, 0xe7, 0x44, 0xc4, 0xa0, 0x81, 0xeb, 0xc6, 0x61, 0xa5, 0xc0, 0x25, 0x1f, 0x5c, + 0x56, 0x72, 0x33, 0xa1, 0xc3, 0x9c, 0x0d, 0xcf, 0x75, 0x27, 0xc6, 0x68, 0x07, 0xe6, 0x3d, 0x73, + 0x60, 0x58, 0x81, 0xef, 0x13, 0xee, 0x47, 0xbf, 0x52, 0xe4, 0x9f, 0xeb, 0xc7, 0xba, 0x48, 0x3b, + 0x3d, 0x4d, 0x3b, 0xbd, 0xe9, 0x47, 0x77, 0xdf, 0x3f, 0x30, 0xdd, 0x98, 0xac, 0x97, 0xce, 0x4e, + 0xab, 0xf9, 0xfb, 0xf7, 0xaa, 0xcb, 0x78, 0xce, 0x33, 0x07, 0xf5, 0xcc, 0x18, 0x7d, 0x02, 0xd7, + 0x13, 0x3e, 0x2b, 0xa6, 0x94, 0xf8, 0x91, 0xf1, 0x79, 0x4c, 0x78, 0xe8, 0x66, 0xbe, 0x0b, 0xed, + 0x55, 0x41, 0x9b, 0x70, 0x7c, 0x24, 0x28, 0x50, 0x13, 0xd0, 0x53, 0x42, 0x42, 0xc3, 0x74, 0x9d, + 0x63, 0x62, 0x44, 0x8e, 0x47, 0x82, 0x38, 0xaa, 0x94, 0x2e, 0x24, 0xc6, 0x2a, 0x33, 0xab, 0x31, + 0xab, 0xb6, 0x30, 0x42, 0xfb, 0xf0, 0x66, 0xec, 0xa7, 0x5f, 0x9a, 0xd8, 0x86, 0x65, 0x5a, 0x3d, + 0x62, 0xf4, 0x9d, 0x2f, 0x48, 0x45, 0xb9, 0x98, 0xef, 0xda, 0xb8, 0x6d, 0x9d, 0x99, 0xee, 0x3b, + 0x5f, 0x10, 0xd4, 0x66, 0xc1, 0xa4, 0x4f, 0xc7, 0xc9, 0xe0, 0xe2, 0x55, 0xcf, 0x9f, 0x9d, 0x56, + 0xcb, 0xf7, 0xd7, 0x56, 0xef, 0xfe, 0xec, 0x83, 0xe5, 0x9f, 0x57, 0x57, 0x96, 0xf1, 0x2c, 0x23, + 0xc9, 0x58, 0x77, 0x45, 0x48, 0x23, 0xb3, 0xe3, 0x0a, 0x52, 0x23, 0x0a, 0x0c, 0x9b, 0x06, 0x61, + 0xa5, 0x7c, 0xb1, 0xa7, 0xc8, 0x33, 0x07, 0x6d, 0x66, 0xc9, 0xa8, 0xda, 0xc1, 0x06, 0x0d, 0x42, + 0xe4, 0xc2, 0xed, 0x4e, 0xec, 0xb8, 0x91, 0xe3, 0x1b, 0xe3, 0xc9, 0x67, 0x50, 0xe2, 0x06, 0xa6, + 0x6d, 0x38, 0x7e, 0x44, 0xe8, 0xb1, 0xe9, 0x56, 0xae, 0x5c, 0x4c, 0xff, 0x4e, 0x42, 0xb4, 0x31, + 0xc6, 0x83, 0x39, 0x4d, 0x33, 0x61, 0xd1, 0xfe, 0x25, 0x81, 0x32, 0xda, 0xc2, 0xc8, 0x82, 0xb7, + 0x29, 0x09, 0x5d, 0xc7, 0x32, 0x23, 0x62, 0x1b, 0x36, 0xb1, 0x63, 0x31, 0x70, 0x02, 0xdf, 0x38, + 0x71, 0x7c, 0x3b, 0x38, 0xe1, 0xc7, 0xd1, 0x05, 0xb2, 0x37, 0x32, 0x8e, 0x8d, 0x71, 0x8a, 0xc7, + 0x9c, 0x01, 0x51, 0x78, 0xf7, 0x02, 0x11, 0xa3, 0x4f, 0xac, 0xc0, 0xb7, 0xfb, 0xc9, 0xd9, 0xf4, + 0x52, 0xb1, 0xdb, 0x2f, 0x15, 0xdb, 0x17, 0x44, 0xda, 0xb3, 0x1c, 0x94, 0xc7, 0x8e, 0x11, 0xd4, + 0x81, 0xa2, 0x47, 0xa2, 0x5e, 0x60, 0x27, 0xc7, 0xeb, 0x87, 0x53, 0x38, 0x9b, 0xf4, 0x6d, 0xce, + 0x88, 0x13, 0x66, 0xf4, 0x1e, 0xcc, 0x7a, 0x8e, 0x6f, 0x84, 0x26, 0x8d, 0x44, 0xb6, 0xb1, 0xd5, + 0xc8, 0xeb, 0x33, 0x67, 0xa7, 0x55, 0xf9, 0xfe, 0xbd, 0x2a, 0x2e, 0x7b, 0x8e, 0xbf, 0x67, 0xd2, + 0x88, 0xa7, 0xd1, 0x1d, 0x78, 0x63, 0xe2, 0x65, 0x83, 0xb2, 0x55, 0x54, 0xe4, 0x5b, 0xd2, 0xa2, + 0x84, 0xd5, 0xb1, 0x37, 0x31, 0xc3, 0x17, 0x56, 0xa1, 0x28, 0xd4, 0xd0, 0x75, 0x40, 0xdb, 0x8d, + 0xf6, 0xd6, 0xee, 0x86, 0xf1, 0x68, 0x67, 0x7f, 0xaf, 0x51, 0x6f, 0x3e, 0x68, 0x36, 0x36, 0xd4, + 0xd7, 0xd0, 0x0c, 0xc8, 0xad, 0x27, 0xef, 0xab, 0x12, 0x2a, 0x41, 0xfe, 0xc9, 0x7e, 0x7b, 0x43, + 0xcd, 0x69, 0xff, 0x79, 0x13, 0xd0, 0xf3, 0x67, 0x1c, 0xaa, 0x40, 0xde, 0x37, 0x3d, 0xc2, 0x23, + 0xa1, 0xac, 0xe7, 0xff, 0x7d, 0x5a, 0x95, 0x30, 0x47, 0xd0, 0x00, 0x94, 0x7e, 0x44, 0x63, 0x2b, + 0x8a, 0x69, 0x5a, 0x27, 0xda, 0xd3, 0x3b, 0x5c, 0xf5, 0xfd, 0x94, 0x3b, 0x11, 0xcd, 0xc4, 0x50, + 0x00, 0x45, 0xd7, 0x1c, 0xb2, 0xf3, 0x43, 0xe6, 0xb2, 0x1f, 0x4d, 0x51, 0xb6, 0xc5, 0x89, 0x13, + 0xcd, 0x44, 0x06, 0xbd, 0x0b, 0x73, 0x47, 0xce, 0x80, 0xd8, 0x86, 0xeb, 0x1c, 0x11, 0x76, 0x76, + 0x55, 0xf2, 0xec, 0x6b, 0x6d, 0xbd, 0x86, 0x67, 0x39, 0xde, 0x4a, 0x60, 0x34, 0x84, 0xb9, 0xf4, + 0x15, 0x83, 0x9a, 0x7e, 0x97, 0x54, 0x0a, 0xdc, 0xc3, 0xbd, 0x29, 0x7a, 0x88, 0x19, 0x2f, 0x93, + 0x4e, 0x95, 0x38, 0x80, 0x06, 0x50, 0xee, 0x45, 0x51, 0x68, 0xf4, 0x83, 0x98, 0x5a, 0x24, 0xa9, + 0x04, 0x8f, 0xa6, 0xa8, 0xbb, 0x15, 0x45, 0xe1, 0x3e, 0x27, 0xdf, 0x92, 0x30, 0xf4, 0x46, 0x23, + 0xf4, 0x5b, 0xb8, 0xe2, 0x0d, 0xfb, 0x9f, 0xbb, 0xa9, 0xb4, 0xa8, 0x16, 0x07, 0x53, 0x94, 0xde, + 0x66, 0xf4, 0x23, 0xed, 0xb2, 0x97, 0x0d, 0xd1, 0x57, 0x12, 0xbc, 0x9e, 0xf1, 0xa6, 0x2e, 0x88, + 0xba, 0xf2, 0xc9, 0x14, 0x5d, 0xc8, 0xde, 0x19, 0xf9, 0xa1, 0x5a, 0xe7, 0x30, 0xf4, 0x3b, 0x09, + 0xe6, 0xbc, 0xc0, 0xef, 0x06, 0x76, 0x27, 0xf5, 0x44, 0x54, 0xa4, 0xc3, 0x69, 0x06, 0x43, 0x08, + 0x8c, 0xdc, 0x98, 0xf5, 0xc6, 0x01, 0x6d, 0x1d, 0x20, 0xfb, 0x52, 0xe8, 0x3a, 0xc8, 0x31, 0x75, + 0x27, 0x76, 0x2f, 0x03, 0xd0, 0x5b, 0x50, 0x3c, 0x0a, 0xa8, 0x67, 0x46, 0x7c, 0xe7, 0xa6, 0x53, + 0x09, 0xa6, 0x9d, 0xc9, 0x50, 0x1e, 0x8b, 0x39, 0xba, 0x0a, 0x39, 0xbb, 0x33, 0x41, 0x92, 0xb3, + 0x3b, 0x48, 0x83, 0x02, 0x2f, 0x6c, 0x13, 0x14, 0x02, 0x42, 0x6f, 0x43, 0x3e, 0x0c, 0xa8, 0xd8, + 0xa0, 0xf2, 0x7a, 0xf9, 0xec, 0xb4, 0x3a, 0xb3, 0x7c, 0xe7, 0xee, 0xda, 0xda, 0xea, 0x1a, 0xe6, + 0x13, 0x08, 0x41, 0x3e, 0xee, 0x13, 0xca, 0x37, 0x92, 0x82, 0xf9, 0x33, 0xd2, 0xa0, 0x14, 0x9a, + 0xfd, 0xfe, 0x49, 0x40, 0x6d, 0xbe, 0x6f, 0x14, 0x3c, 0x1a, 0xa3, 0x67, 0x12, 0x94, 0x92, 0xc3, + 0x9c, 0xb5, 0x39, 0xac, 0xaf, 0xfa, 0xf4, 0xd5, 0x64, 0x98, 0x8e, 0x85, 0xcc, 0x7a, 0xf1, 0xd9, + 0x37, 0xd5, 0xdc, 0xfd, 0x65, 0x3c, 0xd2, 0x45, 0x57, 0xa1, 0x70, 0xd2, 0x23, 0x54, 0xa4, 0xb8, + 0x82, 0xc5, 0x00, 0xfd, 0x14, 0x54, 0xc7, 0x4f, 0x7a, 0x77, 0xc2, 0x7b, 0xa6, 0x21, 0x4f, 0x40, + 0x05, 0xcf, 0x67, 0x38, 0xeb, 0x83, 0x86, 0xda, 0x9f, 0x25, 0x98, 0x49, 0xe8, 0xd1, 0x3b, 0x90, + 0xef, 0x05, 0xfd, 0x28, 0x09, 0xeb, 0x2c, 0x8b, 0xde, 0x57, 0xdf, 0x54, 0x0b, 0xbf, 0xbc, 0xb7, + 0xb2, 0xb6, 0x8a, 0xf9, 0x14, 0x5a, 0x80, 0x52, 0x48, 0x9d, 0x80, 0x3a, 0xd1, 0x30, 0xa9, 0x0f, + 0xc5, 0xb3, 0x53, 0xe1, 0x53, 0x8a, 0x4f, 0x3d, 0xd2, 0xda, 0x3f, 0xa4, 0xf1, 0x9f, 0x8e, 0x4b, + 0x67, 0x40, 0xba, 0x3c, 0xf9, 0xc5, 0xcb, 0x4b, 0x5d, 0xcf, 0xbf, 0xc8, 0xf5, 0x4a, 0xe2, 0x7a, + 0x61, 0xbc, 0xf8, 0x3c, 0xb7, 0x80, 0xe2, 0xb9, 0x54, 0xf9, 0xbf, 0x5f, 0x49, 0xfb, 0xbb, 0x04, + 0xb3, 0x13, 0x5b, 0xe7, 0x05, 0x6b, 0xba, 0x0d, 0x60, 0x05, 0xae, 0x2b, 0x9a, 0xe2, 0x89, 0x85, + 0x8d, 0xe1, 0x3f, 0xdc, 0xea, 0xb4, 0x7f, 0x16, 0x40, 0x19, 0xd5, 0x46, 0xd4, 0x85, 0x9c, 0x63, + 0x27, 0x6d, 0xd7, 0xe3, 0x57, 0x51, 0x7d, 0xf5, 0xa6, 0x8d, 0x73, 0x8e, 0x8d, 0x3e, 0x03, 0xf9, + 0x29, 0x19, 0x26, 0x05, 0xf7, 0xf0, 0x95, 0x28, 0x3d, 0x24, 0x43, 0xcc, 0x44, 0xd0, 0xef, 0x25, + 0x00, 0x33, 0x8a, 0xa8, 0xd3, 0x89, 0x23, 0xc2, 0xfa, 0xbc, 0x69, 0xef, 0xf6, 0x4c, 0xb3, 0x96, + 0xca, 0x8c, 0x76, 0xfb, 0x98, 0xb2, 0xf6, 0xb5, 0x04, 0xca, 0xe8, 0x8d, 0x97, 0xb4, 0x42, 0x15, + 0xc8, 0x47, 0xc3, 0x70, 0x72, 0x1b, 0x70, 0x04, 0xdd, 0x00, 0xf0, 0x63, 0xd7, 0x35, 0x8e, 0x59, + 0x3b, 0x2a, 0xb2, 0x05, 0x2b, 0x0c, 0xe1, 0xfd, 0x29, 0xba, 0x09, 0x40, 0x06, 0xa3, 0x3f, 0x61, + 0xb1, 0x43, 0xc7, 0x10, 0xb4, 0x00, 0x57, 0x7a, 0x0e, 0xa1, 0x26, 0xb5, 0x7a, 0x8e, 0x65, 0xba, + 0x3c, 0x55, 0x4a, 0x78, 0x02, 0x43, 0x6f, 0x81, 0xe2, 0xf8, 0x9f, 0xb1, 0xbc, 0x3c, 0x16, 0x65, + 0xbf, 0x84, 0x33, 0x40, 0xbb, 0x09, 0xb9, 0xa6, 0xfd, 0x62, 0xd7, 0xb5, 0x3f, 0x48, 0x20, 0x3f, + 0x7c, 0x3e, 0xe6, 0xd2, 0x0f, 0x16, 0xf3, 0x3f, 0xe5, 0xa0, 0x28, 0x9a, 0x30, 0x14, 0x26, 0x61, + 0x15, 0x5d, 0xf8, 0xc1, 0xd4, 0xbb, 0x3c, 0xbd, 0x3d, 0x0c, 0xc9, 0xc4, 0xe7, 0x5a, 0x80, 0x59, + 0xde, 0x5f, 0x3b, 0xbe, 0x61, 0x11, 0xd7, 0x15, 0xff, 0x18, 0x32, 0x2e, 0x33, 0xb0, 0xe9, 0xd7, + 0x19, 0xb4, 0xf0, 0x25, 0xe4, 0x99, 0x1d, 0xba, 0x0a, 0x6a, 0xfb, 0xe3, 0xbd, 0xc6, 0xb9, 0xce, + 0xba, 0x04, 0xf9, 0x07, 0xad, 0x5a, 0x5b, 0x95, 0x10, 0x40, 0x71, 0xab, 0xb6, 0xbf, 0xd5, 0xd8, + 0x50, 0x73, 0xac, 0x0f, 0xaf, 0xef, 0x6e, 0xef, 0xb5, 0x1a, 0x87, 0xc6, 0xc3, 0xc6, 0xc7, 0x46, + 0x82, 0xcb, 0x48, 0x85, 0x2b, 0xb8, 0xb6, 0xb3, 0xd9, 0x48, 0x91, 0x3c, 0x52, 0xa0, 0x50, 0xaf, + 0xd5, 0xb7, 0x1a, 0x6a, 0x01, 0x5d, 0x83, 0xd7, 0xc7, 0x8d, 0x04, 0x5c, 0xd4, 0xde, 0x83, 0x82, + 0xe8, 0xf8, 0x54, 0x90, 0x3d, 0xc7, 0xe7, 0xd1, 0x91, 0x31, 0x7b, 0xe4, 0x88, 0x39, 0x48, 0x9c, + 0x66, 0x8f, 0xeb, 0x2a, 0x94, 0xd2, 0x36, 0x11, 0xe5, 0xbf, 0xfe, 0x5b, 0x55, 0x5a, 0x9f, 0x83, + 0xa2, 0x68, 0x4d, 0xc4, 0x58, 0xfb, 0x8b, 0x0c, 0x73, 0x93, 0x17, 0x0d, 0x2f, 0x49, 0x74, 0xca, + 0x0e, 0xa6, 0x88, 0x85, 0x33, 0xdd, 0x96, 0x3b, 0xd3, 0xb9, 0xdc, 0xd0, 0xf7, 0x04, 0x6d, 0x56, + 0x74, 0x53, 0x1d, 0xed, 0x8f, 0x39, 0x98, 0x49, 0x66, 0xd1, 0x75, 0x28, 0x52, 0xd2, 0x25, 0x83, + 0x50, 0xf8, 0x86, 0x93, 0x11, 0xba, 0x05, 0xa5, 0xa3, 0xd8, 0x7f, 0xfe, 0xc8, 0x1e, 0xa1, 0xe8, + 0x4b, 0x50, 0x28, 0x89, 0x88, 0x1f, 0x65, 0x57, 0x4e, 0x87, 0xd3, 0x75, 0x5d, 0xc7, 0x29, 0xff, + 0x68, 0x11, 0x99, 0xa4, 0xf6, 0x10, 0x94, 0xd1, 0x3c, 0xaa, 0x80, 0x6c, 0x76, 0x45, 0x7c, 0xb3, + 0x92, 0xce, 0x20, 0x74, 0x1b, 0x94, 0x90, 0x12, 0xcb, 0xe9, 0xa7, 0x2b, 0xc9, 0xe6, 0xb3, 0x89, + 0x05, 0x1b, 0x4a, 0xe9, 0xcd, 0x1d, 0xfa, 0x11, 0x5c, 0x6b, 0xed, 0x6e, 0x1a, 0xad, 0xc6, 0x41, + 0xa3, 0x75, 0x2e, 0x17, 0x15, 0x28, 0xb4, 0x71, 0xad, 0xde, 0x50, 0x25, 0xf6, 0xb8, 0xd1, 0x58, + 0x7f, 0xb4, 0xa9, 0xe6, 0xd0, 0x3c, 0x94, 0x9b, 0x3b, 0x0f, 0x76, 0xf1, 0x76, 0xad, 0xdd, 0xdc, + 0xdd, 0x51, 0x65, 0x54, 0x86, 0x99, 0xc7, 0x35, 0xbc, 0xd3, 0xdc, 0xd9, 0x14, 0xf9, 0xd7, 0xc0, + 0x78, 0x17, 0xab, 0x85, 0x85, 0xbf, 0xe6, 0xe0, 0x8d, 0xf3, 0x6b, 0xdf, 0x27, 0x11, 0xea, 0x81, + 0x4a, 0x8e, 0x8e, 0xc4, 0xf9, 0x62, 0x88, 0x18, 0x25, 0x15, 0xe8, 0x83, 0x4b, 0x46, 0x34, 0xf9, + 0x5a, 0xf3, 0x23, 0xda, 0xe4, 0xaa, 0xf3, 0x10, 0xca, 0xac, 0x1e, 0xa6, 0x22, 0xb9, 0xef, 0x25, + 0x82, 0x81, 0x71, 0x25, 0xcc, 0x9f, 0xc2, 0x9c, 0x4d, 0x8e, 0xcc, 0xd8, 0x8d, 0x52, 0x72, 0xf9, + 0xfb, 0x91, 0xcf, 0x26, 0x74, 0xc9, 0x82, 0x0e, 0x9e, 0xb4, 0xbb, 0x4e, 0xd4, 0x8b, 0x3b, 0xba, + 0x15, 0x78, 0x4b, 0x82, 0xf3, 0x8e, 0xb8, 0xe5, 0xed, 0x06, 0x77, 0xba, 0xc4, 0xe7, 0xb7, 0x15, + 0x4b, 0xdf, 0xea, 0xf6, 0xf9, 0x17, 0x19, 0xd8, 0x29, 0x72, 0xbb, 0xd5, 0xff, 0x05, 0x00, 0x00, + 0xff, 0xff, 0x20, 0xb8, 0x9b, 0xe3, 0xb8, 0x16, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/database.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/database.pb.go new file mode 100644 index 000000000..158a838f3 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/database.pb.go @@ -0,0 +1,137 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/clickhouse/v1/database.proto + +package clickhouse // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A ClickHouse Database resource. For more information, see the +// [Developer's Guide](/docs/managed-clickhouse/concepts). +type Database struct { + // Name of the database. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // ID of the ClickHouse cluster that the database belongs to. + ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Database) Reset() { *m = Database{} } +func (m *Database) String() string { return proto.CompactTextString(m) } +func (*Database) ProtoMessage() {} +func (*Database) Descriptor() ([]byte, []int) { + return fileDescriptor_database_d83bcea5c6482814, []int{0} +} +func (m *Database) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Database.Unmarshal(m, b) +} +func (m *Database) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Database.Marshal(b, m, deterministic) +} +func (dst *Database) XXX_Merge(src proto.Message) { + xxx_messageInfo_Database.Merge(dst, src) +} +func (m *Database) XXX_Size() int { + return xxx_messageInfo_Database.Size(m) +} +func (m *Database) XXX_DiscardUnknown() { + xxx_messageInfo_Database.DiscardUnknown(m) +} + +var xxx_messageInfo_Database proto.InternalMessageInfo + +func (m *Database) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Database) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type DatabaseSpec struct { + // Name of the ClickHouse database. 1-63 characters long. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DatabaseSpec) Reset() { *m = DatabaseSpec{} } +func (m *DatabaseSpec) String() string { return proto.CompactTextString(m) } +func (*DatabaseSpec) ProtoMessage() {} +func (*DatabaseSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_database_d83bcea5c6482814, []int{1} +} +func (m *DatabaseSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DatabaseSpec.Unmarshal(m, b) +} +func (m *DatabaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DatabaseSpec.Marshal(b, m, deterministic) +} +func (dst *DatabaseSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DatabaseSpec.Merge(dst, src) +} +func (m *DatabaseSpec) XXX_Size() int { + return xxx_messageInfo_DatabaseSpec.Size(m) +} +func (m *DatabaseSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DatabaseSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DatabaseSpec proto.InternalMessageInfo + +func (m *DatabaseSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*Database)(nil), "yandex.cloud.mdb.clickhouse.v1.Database") + proto.RegisterType((*DatabaseSpec)(nil), "yandex.cloud.mdb.clickhouse.v1.DatabaseSpec") +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/clickhouse/v1/database.proto", fileDescriptor_database_d83bcea5c6482814) +} + +var fileDescriptor_database_d83bcea5c6482814 = []byte{ + // 240 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xad, 0x4c, 0xcc, 0x4b, + 0x49, 0xad, 0xd0, 0x4f, 0xce, 0xc9, 0x2f, 0x4d, 0xd1, 0xcf, 0x4d, 0x49, 0xd2, 0x4f, 0xce, 0xc9, + 0x4c, 0xce, 0xce, 0xc8, 0x2f, 0x2d, 0x4e, 0xd5, 0x2f, 0x33, 0xd4, 0x4f, 0x49, 0x2c, 0x49, 0x4c, + 0x4a, 0x2c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x83, 0x28, 0xd7, 0x03, 0x2b, + 0xd7, 0xcb, 0x4d, 0x49, 0xd2, 0x43, 0x28, 0xd7, 0x2b, 0x33, 0x94, 0x92, 0x45, 0x31, 0xae, 0x2c, + 0x31, 0x27, 0x33, 0x25, 0xb1, 0x24, 0x33, 0x3f, 0x0f, 0xa2, 0x5d, 0xc9, 0x96, 0x8b, 0xc3, 0x05, + 0x6a, 0xa0, 0x90, 0x10, 0x17, 0x4b, 0x5e, 0x62, 0x6e, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, + 0x10, 0x98, 0x2d, 0x24, 0xcb, 0xc5, 0x95, 0x9c, 0x53, 0x5a, 0x5c, 0x92, 0x5a, 0x14, 0x9f, 0x99, + 0x22, 0xc1, 0x04, 0x96, 0xe1, 0x84, 0x8a, 0x78, 0xa6, 0x28, 0x39, 0x71, 0xf1, 0xc0, 0xb4, 0x07, + 0x17, 0xa4, 0x26, 0x0b, 0x19, 0x21, 0x1b, 0xe1, 0x24, 0xf7, 0xe2, 0xb8, 0x21, 0xe3, 0xa7, 0xe3, + 0x86, 0x7c, 0xd1, 0x89, 0xba, 0x55, 0x8e, 0xba, 0x51, 0x06, 0xba, 0x96, 0xf1, 0xba, 0xb1, 0x5a, + 0x5d, 0x27, 0x0c, 0x59, 0x6c, 0x6c, 0xcd, 0x8c, 0x21, 0x56, 0x38, 0xf9, 0x47, 0xf9, 0xa6, 0x67, + 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x43, 0x9c, 0xab, 0x0b, 0x71, 0x6e, 0x7a, + 0xbe, 0x6e, 0x7a, 0x6a, 0x1e, 0xd8, 0xa5, 0xfa, 0xf8, 0x83, 0xc5, 0x1a, 0xc1, 0x4b, 0x62, 0x03, + 0x6b, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x76, 0xaf, 0x2f, 0xc6, 0x4a, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/database_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/database_service.pb.go new file mode 100644 index 000000000..d43d02100 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/database_service.pb.go @@ -0,0 +1,630 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/clickhouse/v1/database_service.proto + +package clickhouse // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetDatabaseRequest struct { + // ID of the ClickHouse cluster that the database belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the ClickHouse Database resource to return. + // To get the name of the database, use a [DatabaseService.List] request. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDatabaseRequest) Reset() { *m = GetDatabaseRequest{} } +func (m *GetDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*GetDatabaseRequest) ProtoMessage() {} +func (*GetDatabaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_08aeb62acbe585c3, []int{0} +} +func (m *GetDatabaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDatabaseRequest.Unmarshal(m, b) +} +func (m *GetDatabaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDatabaseRequest.Marshal(b, m, deterministic) +} +func (dst *GetDatabaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDatabaseRequest.Merge(dst, src) +} +func (m *GetDatabaseRequest) XXX_Size() int { + return xxx_messageInfo_GetDatabaseRequest.Size(m) +} +func (m *GetDatabaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetDatabaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDatabaseRequest proto.InternalMessageInfo + +func (m *GetDatabaseRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GetDatabaseRequest) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type ListDatabasesRequest struct { + // ID of the ClickHouse cluster to list databases in. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListDatabasesResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. to get the next page of results, set [page_token] to the [ListDatabasesResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListDatabasesRequest) Reset() { *m = ListDatabasesRequest{} } +func (m *ListDatabasesRequest) String() string { return proto.CompactTextString(m) } +func (*ListDatabasesRequest) ProtoMessage() {} +func (*ListDatabasesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_08aeb62acbe585c3, []int{1} +} +func (m *ListDatabasesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListDatabasesRequest.Unmarshal(m, b) +} +func (m *ListDatabasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListDatabasesRequest.Marshal(b, m, deterministic) +} +func (dst *ListDatabasesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListDatabasesRequest.Merge(dst, src) +} +func (m *ListDatabasesRequest) XXX_Size() int { + return xxx_messageInfo_ListDatabasesRequest.Size(m) +} +func (m *ListDatabasesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListDatabasesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListDatabasesRequest proto.InternalMessageInfo + +func (m *ListDatabasesRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListDatabasesRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListDatabasesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListDatabasesResponse struct { + // List of ClickHouse Database resources. + Databases []*Database `protobuf:"bytes,1,rep,name=databases,proto3" json:"databases,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListDatabasesRequest.page_size], use the [next_page_token] as the value + // for the [ListDatabasesRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListDatabasesResponse) Reset() { *m = ListDatabasesResponse{} } +func (m *ListDatabasesResponse) String() string { return proto.CompactTextString(m) } +func (*ListDatabasesResponse) ProtoMessage() {} +func (*ListDatabasesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_08aeb62acbe585c3, []int{2} +} +func (m *ListDatabasesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListDatabasesResponse.Unmarshal(m, b) +} +func (m *ListDatabasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListDatabasesResponse.Marshal(b, m, deterministic) +} +func (dst *ListDatabasesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListDatabasesResponse.Merge(dst, src) +} +func (m *ListDatabasesResponse) XXX_Size() int { + return xxx_messageInfo_ListDatabasesResponse.Size(m) +} +func (m *ListDatabasesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListDatabasesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListDatabasesResponse proto.InternalMessageInfo + +func (m *ListDatabasesResponse) GetDatabases() []*Database { + if m != nil { + return m.Databases + } + return nil +} + +func (m *ListDatabasesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateDatabaseRequest struct { + // ID of the ClickHouse cluster to create a database in. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Configuration of the database to create. + DatabaseSpec *DatabaseSpec `protobuf:"bytes,2,opt,name=database_spec,json=databaseSpec,proto3" json:"database_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateDatabaseRequest) Reset() { *m = CreateDatabaseRequest{} } +func (m *CreateDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*CreateDatabaseRequest) ProtoMessage() {} +func (*CreateDatabaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_08aeb62acbe585c3, []int{3} +} +func (m *CreateDatabaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateDatabaseRequest.Unmarshal(m, b) +} +func (m *CreateDatabaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateDatabaseRequest.Marshal(b, m, deterministic) +} +func (dst *CreateDatabaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateDatabaseRequest.Merge(dst, src) +} +func (m *CreateDatabaseRequest) XXX_Size() int { + return xxx_messageInfo_CreateDatabaseRequest.Size(m) +} +func (m *CreateDatabaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateDatabaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateDatabaseRequest proto.InternalMessageInfo + +func (m *CreateDatabaseRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateDatabaseRequest) GetDatabaseSpec() *DatabaseSpec { + if m != nil { + return m.DatabaseSpec + } + return nil +} + +type CreateDatabaseMetadata struct { + // ID of the ClickHouse cluster where a database is being created. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the ClickHouse database that is being created. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateDatabaseMetadata) Reset() { *m = CreateDatabaseMetadata{} } +func (m *CreateDatabaseMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateDatabaseMetadata) ProtoMessage() {} +func (*CreateDatabaseMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_08aeb62acbe585c3, []int{4} +} +func (m *CreateDatabaseMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateDatabaseMetadata.Unmarshal(m, b) +} +func (m *CreateDatabaseMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateDatabaseMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateDatabaseMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateDatabaseMetadata.Merge(dst, src) +} +func (m *CreateDatabaseMetadata) XXX_Size() int { + return xxx_messageInfo_CreateDatabaseMetadata.Size(m) +} +func (m *CreateDatabaseMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateDatabaseMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateDatabaseMetadata proto.InternalMessageInfo + +func (m *CreateDatabaseMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateDatabaseMetadata) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type DeleteDatabaseRequest struct { + // ID of the ClickHouse cluster to delete a database in. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the database to delete. + // To get the name of the database, use a [DatabaseService.List] request. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteDatabaseRequest) Reset() { *m = DeleteDatabaseRequest{} } +func (m *DeleteDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteDatabaseRequest) ProtoMessage() {} +func (*DeleteDatabaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_08aeb62acbe585c3, []int{5} +} +func (m *DeleteDatabaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteDatabaseRequest.Unmarshal(m, b) +} +func (m *DeleteDatabaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteDatabaseRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteDatabaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteDatabaseRequest.Merge(dst, src) +} +func (m *DeleteDatabaseRequest) XXX_Size() int { + return xxx_messageInfo_DeleteDatabaseRequest.Size(m) +} +func (m *DeleteDatabaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteDatabaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteDatabaseRequest proto.InternalMessageInfo + +func (m *DeleteDatabaseRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteDatabaseRequest) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type DeleteDatabaseMetadata struct { + // ID of the ClickHouse cluster where a database is being deleted. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the ClickHouse database that is being deleted. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteDatabaseMetadata) Reset() { *m = DeleteDatabaseMetadata{} } +func (m *DeleteDatabaseMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteDatabaseMetadata) ProtoMessage() {} +func (*DeleteDatabaseMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_08aeb62acbe585c3, []int{6} +} +func (m *DeleteDatabaseMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteDatabaseMetadata.Unmarshal(m, b) +} +func (m *DeleteDatabaseMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteDatabaseMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteDatabaseMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteDatabaseMetadata.Merge(dst, src) +} +func (m *DeleteDatabaseMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteDatabaseMetadata.Size(m) +} +func (m *DeleteDatabaseMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteDatabaseMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteDatabaseMetadata proto.InternalMessageInfo + +func (m *DeleteDatabaseMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteDatabaseMetadata) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +func init() { + proto.RegisterType((*GetDatabaseRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.GetDatabaseRequest") + proto.RegisterType((*ListDatabasesRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.ListDatabasesRequest") + proto.RegisterType((*ListDatabasesResponse)(nil), "yandex.cloud.mdb.clickhouse.v1.ListDatabasesResponse") + proto.RegisterType((*CreateDatabaseRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.CreateDatabaseRequest") + proto.RegisterType((*CreateDatabaseMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.CreateDatabaseMetadata") + proto.RegisterType((*DeleteDatabaseRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.DeleteDatabaseRequest") + proto.RegisterType((*DeleteDatabaseMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.DeleteDatabaseMetadata") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// DatabaseServiceClient is the client API for DatabaseService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type DatabaseServiceClient interface { + // Returns the specified ClickHouse Database resource. + // + // To get the list of available ClickHouse Database resources, make a [List] request. + Get(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*Database, error) + // Retrieves the list of ClickHouse Database resources in the specified cluster. + List(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error) + // Creates a new ClickHouse database in the specified cluster. + Create(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified ClickHouse database. + Delete(ctx context.Context, in *DeleteDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) +} + +type databaseServiceClient struct { + cc *grpc.ClientConn +} + +func NewDatabaseServiceClient(cc *grpc.ClientConn) DatabaseServiceClient { + return &databaseServiceClient{cc} +} + +func (c *databaseServiceClient) Get(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*Database, error) { + out := new(Database) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.DatabaseService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseServiceClient) List(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error) { + out := new(ListDatabasesResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.DatabaseService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseServiceClient) Create(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.DatabaseService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseServiceClient) Delete(ctx context.Context, in *DeleteDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.DatabaseService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DatabaseServiceServer is the server API for DatabaseService service. +type DatabaseServiceServer interface { + // Returns the specified ClickHouse Database resource. + // + // To get the list of available ClickHouse Database resources, make a [List] request. + Get(context.Context, *GetDatabaseRequest) (*Database, error) + // Retrieves the list of ClickHouse Database resources in the specified cluster. + List(context.Context, *ListDatabasesRequest) (*ListDatabasesResponse, error) + // Creates a new ClickHouse database in the specified cluster. + Create(context.Context, *CreateDatabaseRequest) (*operation.Operation, error) + // Deletes the specified ClickHouse database. + Delete(context.Context, *DeleteDatabaseRequest) (*operation.Operation, error) +} + +func RegisterDatabaseServiceServer(s *grpc.Server, srv DatabaseServiceServer) { + s.RegisterService(&_DatabaseService_serviceDesc, srv) +} + +func _DatabaseService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.DatabaseService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServiceServer).Get(ctx, req.(*GetDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDatabasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.DatabaseService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServiceServer).List(ctx, req.(*ListDatabasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.DatabaseService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServiceServer).Create(ctx, req.(*CreateDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.DatabaseService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServiceServer).Delete(ctx, req.(*DeleteDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DatabaseService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.clickhouse.v1.DatabaseService", + HandlerType: (*DatabaseServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _DatabaseService_Get_Handler, + }, + { + MethodName: "List", + Handler: _DatabaseService_List_Handler, + }, + { + MethodName: "Create", + Handler: _DatabaseService_Create_Handler, + }, + { + MethodName: "Delete", + Handler: _DatabaseService_Delete_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/clickhouse/v1/database_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/clickhouse/v1/database_service.proto", fileDescriptor_database_service_08aeb62acbe585c3) +} + +var fileDescriptor_database_service_08aeb62acbe585c3 = []byte{ + // 702 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x5f, 0x4f, 0x13, 0x4b, + 0x1c, 0xcd, 0x50, 0x6e, 0x43, 0x07, 0xb8, 0x24, 0x93, 0x5b, 0xd2, 0x34, 0x17, 0xc2, 0xdd, 0x9b, + 0x60, 0x53, 0xdd, 0xdd, 0x6e, 0x11, 0xa2, 0x02, 0x26, 0x16, 0x04, 0x4d, 0x04, 0x4c, 0x31, 0x31, + 0x41, 0x4c, 0x33, 0xdd, 0xfd, 0xb9, 0x6c, 0x68, 0x77, 0xd7, 0xce, 0xb4, 0xe1, 0x4f, 0x78, 0xd0, + 0x07, 0x8d, 0xbc, 0x9a, 0xf8, 0xe6, 0x97, 0x40, 0xbf, 0x03, 0x24, 0xbe, 0xe1, 0x57, 0x30, 0xc6, + 0x67, 0x1f, 0x7d, 0x32, 0xbb, 0xd3, 0x6e, 0xbb, 0x50, 0x68, 0x05, 0xde, 0x76, 0xe7, 0xf7, 0x3b, + 0x33, 0xe7, 0xcc, 0x9c, 0x33, 0x83, 0x27, 0xb7, 0xa9, 0x6d, 0xc0, 0x96, 0xaa, 0x97, 0x9c, 0xaa, + 0xa1, 0x96, 0x8d, 0xa2, 0xaa, 0x97, 0x2c, 0x7d, 0x73, 0xc3, 0xa9, 0x32, 0x50, 0x6b, 0x9a, 0x6a, + 0x50, 0x4e, 0x8b, 0x94, 0x41, 0x81, 0x41, 0xa5, 0x66, 0xe9, 0xa0, 0xb8, 0x15, 0x87, 0x3b, 0x64, + 0x54, 0xc0, 0x14, 0x1f, 0xa6, 0x94, 0x8d, 0xa2, 0xd2, 0x84, 0x29, 0x35, 0x2d, 0xf9, 0xaf, 0xe9, + 0x38, 0x66, 0x09, 0x54, 0xea, 0x5a, 0x2a, 0xb5, 0x6d, 0x87, 0x53, 0x6e, 0x39, 0x36, 0x13, 0xe8, + 0x64, 0xb2, 0xbe, 0xa8, 0x57, 0x75, 0x5c, 0xa8, 0xf8, 0xc5, 0x7a, 0x6d, 0x3c, 0x44, 0x28, 0xa8, + 0x9e, 0xea, 0x1b, 0x09, 0xf5, 0xd5, 0x68, 0xc9, 0x32, 0x5a, 0xcb, 0x72, 0x97, 0xba, 0x44, 0xbb, + 0xf4, 0x06, 0x61, 0xb2, 0x08, 0x7c, 0xbe, 0x3e, 0x9a, 0x87, 0x97, 0x55, 0x60, 0x9c, 0x5c, 0xc7, + 0x58, 0x2f, 0x55, 0x19, 0x87, 0x4a, 0xc1, 0x32, 0x12, 0x68, 0x0c, 0xa5, 0x62, 0xb9, 0x81, 0x1f, + 0x87, 0x1a, 0xda, 0x3f, 0xd2, 0x7a, 0x67, 0x66, 0x27, 0x33, 0xf9, 0x58, 0xbd, 0xfe, 0xd0, 0x20, + 0x73, 0x78, 0x30, 0xd8, 0x2d, 0x9b, 0x96, 0x21, 0xd1, 0xe3, 0xf7, 0x8f, 0x7a, 0xfd, 0x3f, 0x0f, + 0xb5, 0xbf, 0x9f, 0x51, 0x79, 0xe7, 0x9e, 0xbc, 0x96, 0x91, 0x6f, 0x17, 0xe4, 0xe7, 0x69, 0x31, + 0xc3, 0xd4, 0x44, 0x7e, 0xa0, 0x01, 0x5a, 0xa6, 0x65, 0x90, 0x3e, 0x20, 0xfc, 0xcf, 0x23, 0x8b, + 0x05, 0x4c, 0xd8, 0x85, 0xa8, 0x5c, 0xc3, 0x31, 0x97, 0x9a, 0x50, 0x60, 0xd6, 0x8e, 0xa0, 0x11, + 0xc9, 0xe1, 0x5f, 0x87, 0x5a, 0x74, 0x66, 0x56, 0xcb, 0x64, 0x32, 0xf9, 0x3e, 0xaf, 0xb8, 0x6a, + 0xed, 0x00, 0x49, 0x61, 0xec, 0x37, 0x72, 0x67, 0x13, 0xec, 0x44, 0xc4, 0x9f, 0x35, 0xb6, 0x7f, + 0xa4, 0xfd, 0xe5, 0x77, 0xe6, 0xfd, 0x59, 0x9e, 0x78, 0x35, 0xe9, 0x2d, 0xc2, 0xf1, 0x13, 0xc4, + 0x98, 0xeb, 0xd8, 0x0c, 0xc8, 0x02, 0x8e, 0x35, 0x24, 0xb0, 0x04, 0x1a, 0x8b, 0xa4, 0xfa, 0xb3, + 0x29, 0xe5, 0x7c, 0x7f, 0x28, 0xc1, 0x46, 0x37, 0xa1, 0x64, 0x1c, 0x0f, 0xd9, 0xb0, 0xc5, 0x0b, + 0x2d, 0x84, 0xfc, 0x1d, 0xcc, 0x0f, 0x7a, 0xc3, 0x8f, 0x03, 0x26, 0x1f, 0x11, 0x8e, 0xcf, 0x55, + 0x80, 0x72, 0xb8, 0xd4, 0x71, 0x3d, 0x6d, 0x39, 0x2e, 0xe6, 0x82, 0xee, 0x2f, 0xd6, 0x9f, 0xbd, + 0xd1, 0x2d, 0xf5, 0x55, 0x17, 0xf4, 0x5c, 0xaf, 0x37, 0x7b, 0xf3, 0x08, 0xbd, 0x31, 0x69, 0x1d, + 0x0f, 0x87, 0xe9, 0x2d, 0x01, 0xa7, 0x5e, 0x07, 0x19, 0x39, 0xcd, 0xaf, 0x95, 0xd1, 0xff, 0x6d, + 0x0d, 0x74, 0xc2, 0x20, 0xef, 0x10, 0x8e, 0xcf, 0x43, 0x09, 0x2e, 0xa9, 0xfe, 0x4a, 0xcc, 0xba, + 0x8e, 0x87, 0xc3, 0x54, 0xae, 0x52, 0x69, 0xf6, 0x73, 0x14, 0x0f, 0x05, 0x9b, 0x2d, 0x6e, 0x1f, + 0xf2, 0x09, 0xe1, 0xc8, 0x22, 0x70, 0x92, 0xed, 0x74, 0x4a, 0xa7, 0xc3, 0x9c, 0xec, 0xda, 0x94, + 0xd2, 0xf2, 0xeb, 0xaf, 0xdf, 0xde, 0xf7, 0x3c, 0x20, 0x0b, 0x6a, 0x99, 0xda, 0xd4, 0x04, 0x43, + 0x0e, 0x5f, 0x1e, 0x75, 0x21, 0x4c, 0xdd, 0x6d, 0x8a, 0xdc, 0x0b, 0xae, 0x14, 0xa6, 0xee, 0x86, + 0xc4, 0xed, 0x79, 0xac, 0x7b, 0xbd, 0xec, 0x90, 0x9b, 0x9d, 0x28, 0xb4, 0x8b, 0x7e, 0x72, 0xf2, + 0x0f, 0x51, 0x22, 0x97, 0xd2, 0x5d, 0x5f, 0xc5, 0x2d, 0x32, 0x75, 0x31, 0x15, 0xe4, 0x0b, 0xc2, + 0x51, 0x61, 0x64, 0xd2, 0x91, 0x41, 0xdb, 0x3c, 0x26, 0xff, 0x0b, 0xc3, 0x9a, 0x57, 0xf8, 0x4a, + 0xe3, 0x4b, 0x32, 0x0f, 0x8e, 0xd3, 0xd2, 0x99, 0x81, 0xe9, 0x6b, 0x8c, 0xf8, 0x52, 0xa6, 0xa5, + 0x0b, 0x4a, 0xb9, 0x83, 0xd2, 0xe4, 0x3b, 0xc2, 0x51, 0x61, 0xd6, 0xce, 0x6a, 0xda, 0xe6, 0xab, + 0x1b, 0x35, 0xaf, 0xd0, 0xc1, 0x71, 0x5a, 0x3d, 0x33, 0x15, 0x71, 0xf1, 0x2a, 0x8a, 0x37, 0xa7, + 0x58, 0x7d, 0xa1, 0xdc, 0x2f, 0xbb, 0x7c, 0x5b, 0x98, 0x2d, 0x7d, 0x45, 0x66, 0xcb, 0xad, 0xac, + 0x2d, 0x99, 0x16, 0xdf, 0xa8, 0x16, 0x15, 0xdd, 0x29, 0xab, 0x82, 0xb2, 0x2c, 0x9e, 0x41, 0xd3, + 0x91, 0x4d, 0xb0, 0xfd, 0xd5, 0xd5, 0xf3, 0xdf, 0xc7, 0xe9, 0xe6, 0x5f, 0x31, 0xea, 0x03, 0x26, + 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x81, 0xf7, 0x00, 0x2b, 0x08, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/resource_preset.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/resource_preset.pb.go new file mode 100644 index 000000000..c572bb96e --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/resource_preset.pb.go @@ -0,0 +1,112 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/clickhouse/v1/resource_preset.proto + +package clickhouse // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A ResourcePreset resource for describing hardware configuration presets. +type ResourcePreset struct { + // ID of the ResourcePreset resource. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // IDs of availability zones where the resource preset is available. + ZoneIds []string `protobuf:"bytes,2,rep,name=zone_ids,json=zoneIds,proto3" json:"zone_ids,omitempty"` + // Number of CPU cores for a ClickHouse host created with the preset. + Cores int64 `protobuf:"varint,3,opt,name=cores,proto3" json:"cores,omitempty"` + // RAM volume for a ClickHouse host created with the preset, in bytes. + Memory int64 `protobuf:"varint,4,opt,name=memory,proto3" json:"memory,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourcePreset) Reset() { *m = ResourcePreset{} } +func (m *ResourcePreset) String() string { return proto.CompactTextString(m) } +func (*ResourcePreset) ProtoMessage() {} +func (*ResourcePreset) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_8b47fe1881d4dab9, []int{0} +} +func (m *ResourcePreset) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourcePreset.Unmarshal(m, b) +} +func (m *ResourcePreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourcePreset.Marshal(b, m, deterministic) +} +func (dst *ResourcePreset) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePreset.Merge(dst, src) +} +func (m *ResourcePreset) XXX_Size() int { + return xxx_messageInfo_ResourcePreset.Size(m) +} +func (m *ResourcePreset) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePreset.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePreset proto.InternalMessageInfo + +func (m *ResourcePreset) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ResourcePreset) GetZoneIds() []string { + if m != nil { + return m.ZoneIds + } + return nil +} + +func (m *ResourcePreset) GetCores() int64 { + if m != nil { + return m.Cores + } + return 0 +} + +func (m *ResourcePreset) GetMemory() int64 { + if m != nil { + return m.Memory + } + return 0 +} + +func init() { + proto.RegisterType((*ResourcePreset)(nil), "yandex.cloud.mdb.clickhouse.v1.ResourcePreset") +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/clickhouse/v1/resource_preset.proto", fileDescriptor_resource_preset_8b47fe1881d4dab9) +} + +var fileDescriptor_resource_preset_8b47fe1881d4dab9 = []byte{ + // 215 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0xcf, 0xb1, 0x4b, 0x03, 0x31, + 0x14, 0xc7, 0x71, 0xee, 0x4e, 0xab, 0xcd, 0xd0, 0x21, 0x88, 0xc4, 0x45, 0x0e, 0xa7, 0x5b, 0x9a, + 0x50, 0x74, 0x73, 0x73, 0x73, 0x10, 0x25, 0xa3, 0x4b, 0x31, 0x79, 0x8f, 0x6b, 0xb0, 0xb9, 0x57, + 0x92, 0x4b, 0xb1, 0xfe, 0xf5, 0x62, 0x72, 0x70, 0x5b, 0xc7, 0xef, 0x83, 0x0f, 0xbc, 0x1f, 0x7b, + 0x3a, 0x7d, 0x0d, 0x80, 0x3f, 0xca, 0xee, 0x29, 0x81, 0xf2, 0x60, 0x94, 0xdd, 0x3b, 0xfb, 0xbd, + 0xa3, 0x14, 0x51, 0x1d, 0x37, 0x2a, 0x60, 0xa4, 0x14, 0x2c, 0x6e, 0x0f, 0x01, 0x23, 0x8e, 0xf2, + 0x10, 0x68, 0x24, 0x7e, 0x5f, 0x94, 0xcc, 0x4a, 0x7a, 0x30, 0x72, 0x56, 0xf2, 0xb8, 0x79, 0x70, + 0x6c, 0xa5, 0x27, 0xf8, 0x91, 0x1d, 0x5f, 0xb1, 0xda, 0x81, 0xa8, 0xda, 0xaa, 0x5b, 0xea, 0xda, + 0x01, 0xbf, 0x63, 0xd7, 0xbf, 0x34, 0xe0, 0xd6, 0x41, 0x14, 0x75, 0xdb, 0x74, 0x4b, 0x7d, 0xf5, + 0xdf, 0xaf, 0x10, 0xf9, 0x0d, 0xbb, 0xb4, 0x14, 0x30, 0x8a, 0xa6, 0xad, 0xba, 0x46, 0x97, 0xe0, + 0xb7, 0x6c, 0xe1, 0xd1, 0x53, 0x38, 0x89, 0x8b, 0x7c, 0x9e, 0xea, 0xe5, 0xfd, 0xf3, 0xad, 0x77, + 0xe3, 0x2e, 0x19, 0x69, 0xc9, 0xab, 0xf2, 0xd7, 0xba, 0xac, 0xe9, 0x69, 0xdd, 0xe3, 0x90, 0x3f, + 0x56, 0xe7, 0x67, 0x3e, 0xcf, 0x65, 0x16, 0x19, 0x3c, 0xfe, 0x05, 0x00, 0x00, 0xff, 0xff, 0x10, + 0xb1, 0xd0, 0xe7, 0x1a, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/resource_preset_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/resource_preset_service.pb.go new file mode 100644 index 000000000..87d56a854 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/resource_preset_service.pb.go @@ -0,0 +1,325 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/clickhouse/v1/resource_preset_service.proto + +package clickhouse // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetResourcePresetRequest struct { + // ID of the resource preset to return. + // To get the resource preset ID, use a [ResourcePresetService.List] request. + ResourcePresetId string `protobuf:"bytes,1,opt,name=resource_preset_id,json=resourcePresetId,proto3" json:"resource_preset_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResourcePresetRequest) Reset() { *m = GetResourcePresetRequest{} } +func (m *GetResourcePresetRequest) String() string { return proto.CompactTextString(m) } +func (*GetResourcePresetRequest) ProtoMessage() {} +func (*GetResourcePresetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_service_3c94510b9cb0b96a, []int{0} +} +func (m *GetResourcePresetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResourcePresetRequest.Unmarshal(m, b) +} +func (m *GetResourcePresetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResourcePresetRequest.Marshal(b, m, deterministic) +} +func (dst *GetResourcePresetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResourcePresetRequest.Merge(dst, src) +} +func (m *GetResourcePresetRequest) XXX_Size() int { + return xxx_messageInfo_GetResourcePresetRequest.Size(m) +} +func (m *GetResourcePresetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetResourcePresetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResourcePresetRequest proto.InternalMessageInfo + +func (m *GetResourcePresetRequest) GetResourcePresetId() string { + if m != nil { + return m.ResourcePresetId + } + return "" +} + +type ListResourcePresetsRequest struct { + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListResourcePresetsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, Set [page_token] to the [ListResourcePresetsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListResourcePresetsRequest) Reset() { *m = ListResourcePresetsRequest{} } +func (m *ListResourcePresetsRequest) String() string { return proto.CompactTextString(m) } +func (*ListResourcePresetsRequest) ProtoMessage() {} +func (*ListResourcePresetsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_service_3c94510b9cb0b96a, []int{1} +} +func (m *ListResourcePresetsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListResourcePresetsRequest.Unmarshal(m, b) +} +func (m *ListResourcePresetsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListResourcePresetsRequest.Marshal(b, m, deterministic) +} +func (dst *ListResourcePresetsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListResourcePresetsRequest.Merge(dst, src) +} +func (m *ListResourcePresetsRequest) XXX_Size() int { + return xxx_messageInfo_ListResourcePresetsRequest.Size(m) +} +func (m *ListResourcePresetsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListResourcePresetsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListResourcePresetsRequest proto.InternalMessageInfo + +func (m *ListResourcePresetsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListResourcePresetsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListResourcePresetsResponse struct { + // List of ResourcePreset resources. + ResourcePresets []*ResourcePreset `protobuf:"bytes,1,rep,name=resource_presets,json=resourcePresets,proto3" json:"resource_presets,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value + // for the [ListResourcePresetsRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListResourcePresetsResponse) Reset() { *m = ListResourcePresetsResponse{} } +func (m *ListResourcePresetsResponse) String() string { return proto.CompactTextString(m) } +func (*ListResourcePresetsResponse) ProtoMessage() {} +func (*ListResourcePresetsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_service_3c94510b9cb0b96a, []int{2} +} +func (m *ListResourcePresetsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListResourcePresetsResponse.Unmarshal(m, b) +} +func (m *ListResourcePresetsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListResourcePresetsResponse.Marshal(b, m, deterministic) +} +func (dst *ListResourcePresetsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListResourcePresetsResponse.Merge(dst, src) +} +func (m *ListResourcePresetsResponse) XXX_Size() int { + return xxx_messageInfo_ListResourcePresetsResponse.Size(m) +} +func (m *ListResourcePresetsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListResourcePresetsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListResourcePresetsResponse proto.InternalMessageInfo + +func (m *ListResourcePresetsResponse) GetResourcePresets() []*ResourcePreset { + if m != nil { + return m.ResourcePresets + } + return nil +} + +func (m *ListResourcePresetsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetResourcePresetRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.GetResourcePresetRequest") + proto.RegisterType((*ListResourcePresetsRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.ListResourcePresetsRequest") + proto.RegisterType((*ListResourcePresetsResponse)(nil), "yandex.cloud.mdb.clickhouse.v1.ListResourcePresetsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ResourcePresetServiceClient is the client API for ResourcePresetService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ResourcePresetServiceClient interface { + // Returns the specified ResourcePreset resource. + // + // To get the list of available ResourcePreset resources, make a [List] request. + Get(ctx context.Context, in *GetResourcePresetRequest, opts ...grpc.CallOption) (*ResourcePreset, error) + // Retrieves the list of available ResourcePreset resources. + List(ctx context.Context, in *ListResourcePresetsRequest, opts ...grpc.CallOption) (*ListResourcePresetsResponse, error) +} + +type resourcePresetServiceClient struct { + cc *grpc.ClientConn +} + +func NewResourcePresetServiceClient(cc *grpc.ClientConn) ResourcePresetServiceClient { + return &resourcePresetServiceClient{cc} +} + +func (c *resourcePresetServiceClient) Get(ctx context.Context, in *GetResourcePresetRequest, opts ...grpc.CallOption) (*ResourcePreset, error) { + out := new(ResourcePreset) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ResourcePresetService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourcePresetServiceClient) List(ctx context.Context, in *ListResourcePresetsRequest, opts ...grpc.CallOption) (*ListResourcePresetsResponse, error) { + out := new(ListResourcePresetsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.ResourcePresetService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ResourcePresetServiceServer is the server API for ResourcePresetService service. +type ResourcePresetServiceServer interface { + // Returns the specified ResourcePreset resource. + // + // To get the list of available ResourcePreset resources, make a [List] request. + Get(context.Context, *GetResourcePresetRequest) (*ResourcePreset, error) + // Retrieves the list of available ResourcePreset resources. + List(context.Context, *ListResourcePresetsRequest) (*ListResourcePresetsResponse, error) +} + +func RegisterResourcePresetServiceServer(s *grpc.Server, srv ResourcePresetServiceServer) { + s.RegisterService(&_ResourcePresetService_serviceDesc, srv) +} + +func _ResourcePresetService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetResourcePresetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePresetServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ResourcePresetService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePresetServiceServer).Get(ctx, req.(*GetResourcePresetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourcePresetService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListResourcePresetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePresetServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.ResourcePresetService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePresetServiceServer).List(ctx, req.(*ListResourcePresetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ResourcePresetService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.clickhouse.v1.ResourcePresetService", + HandlerType: (*ResourcePresetServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _ResourcePresetService_Get_Handler, + }, + { + MethodName: "List", + Handler: _ResourcePresetService_List_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/clickhouse/v1/resource_preset_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/clickhouse/v1/resource_preset_service.proto", fileDescriptor_resource_preset_service_3c94510b9cb0b96a) +} + +var fileDescriptor_resource_preset_service_3c94510b9cb0b96a = []byte{ + // 470 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4d, 0x6b, 0x13, 0x41, + 0x18, 0x66, 0xba, 0xb5, 0x98, 0x51, 0x69, 0x19, 0x10, 0x96, 0xf5, 0x83, 0xb0, 0x87, 0xba, 0x97, + 0xcc, 0x64, 0xab, 0x82, 0x34, 0xc9, 0x25, 0x1e, 0x8a, 0xa0, 0x58, 0xb6, 0x22, 0xe8, 0x25, 0x4c, + 0x76, 0x5e, 0xb6, 0x43, 0x93, 0x99, 0x75, 0x67, 0x36, 0xd4, 0x8a, 0x20, 0x1e, 0x7b, 0xf5, 0x0f, + 0xf8, 0x0f, 0xbc, 0xf8, 0x1f, 0xda, 0xbb, 0x7f, 0xc1, 0x83, 0xbf, 0xc1, 0x93, 0xec, 0x6c, 0x4a, + 0x4d, 0xec, 0x87, 0xe9, 0x71, 0xf7, 0x99, 0xe7, 0x79, 0xde, 0xe7, 0xfd, 0xc0, 0xdd, 0xf7, 0x5c, + 0x09, 0xd8, 0x67, 0xe9, 0x48, 0x97, 0x82, 0x8d, 0xc5, 0x90, 0xa5, 0x23, 0x99, 0xee, 0xed, 0xea, + 0xd2, 0x00, 0x9b, 0xc4, 0xac, 0x00, 0xa3, 0xcb, 0x22, 0x85, 0x41, 0x5e, 0x80, 0x01, 0x3b, 0x30, + 0x50, 0x4c, 0x64, 0x0a, 0x34, 0x2f, 0xb4, 0xd5, 0xe4, 0x7e, 0xcd, 0xa6, 0x8e, 0x4d, 0xc7, 0x62, + 0x48, 0x4f, 0xd9, 0x74, 0x12, 0x07, 0x77, 0x33, 0xad, 0xb3, 0x11, 0x30, 0x9e, 0x4b, 0xc6, 0x95, + 0xd2, 0x96, 0x5b, 0xa9, 0x95, 0xa9, 0xd9, 0xc1, 0xbd, 0x19, 0xef, 0x09, 0x1f, 0x49, 0xe1, 0xf0, + 0x29, 0xfc, 0x68, 0xb1, 0xd2, 0x6a, 0x56, 0xf8, 0x1a, 0xfb, 0x5b, 0x60, 0x93, 0x29, 0xb6, 0xed, + 0xa0, 0x04, 0xde, 0x95, 0x60, 0x2c, 0xd9, 0xc4, 0x64, 0x3e, 0x8f, 0x14, 0x3e, 0x6a, 0xa2, 0xa8, + 0xd1, 0xbf, 0xf9, 0xeb, 0x28, 0x46, 0x87, 0xc7, 0xf1, 0x72, 0xb7, 0xf7, 0xb8, 0x9d, 0xac, 0x15, + 0x33, 0x02, 0xcf, 0x44, 0xa8, 0x71, 0xf0, 0x5c, 0x9a, 0x39, 0x61, 0x73, 0xa2, 0xfc, 0x00, 0x37, + 0x72, 0x9e, 0xc1, 0xc0, 0xc8, 0x03, 0xf0, 0x97, 0x9a, 0x28, 0xf2, 0xfa, 0xf8, 0xf7, 0x51, 0xbc, + 0xd2, 0xed, 0xc5, 0xed, 0x76, 0x3b, 0xb9, 0x5e, 0x81, 0x3b, 0xf2, 0x00, 0x48, 0x84, 0xb1, 0x7b, + 0x68, 0xf5, 0x1e, 0x28, 0xdf, 0x73, 0xd6, 0x8d, 0xc3, 0xe3, 0xf8, 0x9a, 0x7b, 0x99, 0x38, 0x95, + 0x57, 0x15, 0x16, 0x7e, 0x45, 0xf8, 0xce, 0x99, 0x8e, 0x26, 0xd7, 0xca, 0x00, 0x79, 0x83, 0xd7, + 0xe6, 0xc2, 0x18, 0x1f, 0x35, 0xbd, 0xe8, 0xc6, 0x06, 0xa5, 0x17, 0x8f, 0x85, 0xce, 0x75, 0x67, + 0x75, 0x36, 0xac, 0x21, 0xeb, 0x78, 0x55, 0xc1, 0xbe, 0x1d, 0xfc, 0x55, 0x69, 0x95, 0xa9, 0x91, + 0xdc, 0xaa, 0x7e, 0x6f, 0x9f, 0x94, 0xb8, 0xf1, 0xc9, 0xc3, 0xb7, 0x67, 0xb5, 0x76, 0xea, 0xf5, + 0x20, 0xdf, 0x11, 0xf6, 0xb6, 0xc0, 0x92, 0x27, 0x97, 0x95, 0x72, 0xde, 0xac, 0x82, 0x05, 0x43, + 0x84, 0x4f, 0x3f, 0xff, 0xf8, 0xf9, 0x65, 0xa9, 0x47, 0x3a, 0x6c, 0xcc, 0x15, 0xcf, 0x40, 0xb4, + 0xce, 0xde, 0x96, 0x69, 0x46, 0xf6, 0xe1, 0xdf, 0x4d, 0xf8, 0x48, 0xbe, 0x21, 0xbc, 0x5c, 0xf5, + 0x9c, 0x6c, 0x5e, 0xe6, 0x7e, 0xfe, 0x2e, 0x04, 0x9d, 0x2b, 0x71, 0xeb, 0xa9, 0x86, 0xd4, 0xc5, + 0x88, 0xc8, 0xfa, 0xff, 0xc5, 0xe8, 0xbf, 0x7c, 0xfb, 0x22, 0x93, 0x76, 0xb7, 0x1c, 0xd2, 0x54, + 0x8f, 0x59, 0x6d, 0xdc, 0xaa, 0x2f, 0x26, 0xd3, 0xad, 0x0c, 0x94, 0xbb, 0x0a, 0x76, 0xf1, 0x29, + 0x75, 0x4e, 0xbf, 0x86, 0x2b, 0x8e, 0xf0, 0xf0, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8a, 0xea, + 0x85, 0x77, 0x19, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/user.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/user.pb.go new file mode 100644 index 000000000..e30042084 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/user.pb.go @@ -0,0 +1,210 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/clickhouse/v1/user.proto + +package clickhouse // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A ClickHouse User resource. For more information, see +// the [Developer's guide](/docs/managed-clickhouse/concepts). +type User struct { + // Name of the ClickHouse user. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // ID of the ClickHouse cluster the user belongs to. + ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Set of permissions granted to the user. + Permissions []*Permission `protobuf:"bytes,3,rep,name=permissions,proto3" json:"permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { + return fileDescriptor_user_002833f6340c62e6, []int{0} +} +func (m *User) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_User.Unmarshal(m, b) +} +func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_User.Marshal(b, m, deterministic) +} +func (dst *User) XXX_Merge(src proto.Message) { + xxx_messageInfo_User.Merge(dst, src) +} +func (m *User) XXX_Size() int { + return xxx_messageInfo_User.Size(m) +} +func (m *User) XXX_DiscardUnknown() { + xxx_messageInfo_User.DiscardUnknown(m) +} + +var xxx_messageInfo_User proto.InternalMessageInfo + +func (m *User) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *User) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *User) GetPermissions() []*Permission { + if m != nil { + return m.Permissions + } + return nil +} + +type Permission struct { + // Name of the database that the permission grants access to. + DatabaseName string `protobuf:"bytes,1,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Permission) Reset() { *m = Permission{} } +func (m *Permission) String() string { return proto.CompactTextString(m) } +func (*Permission) ProtoMessage() {} +func (*Permission) Descriptor() ([]byte, []int) { + return fileDescriptor_user_002833f6340c62e6, []int{1} +} +func (m *Permission) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Permission.Unmarshal(m, b) +} +func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Permission.Marshal(b, m, deterministic) +} +func (dst *Permission) XXX_Merge(src proto.Message) { + xxx_messageInfo_Permission.Merge(dst, src) +} +func (m *Permission) XXX_Size() int { + return xxx_messageInfo_Permission.Size(m) +} +func (m *Permission) XXX_DiscardUnknown() { + xxx_messageInfo_Permission.DiscardUnknown(m) +} + +var xxx_messageInfo_Permission proto.InternalMessageInfo + +func (m *Permission) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type UserSpec struct { + // Name of the ClickHouse user. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Password of the ClickHouse user. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + // Set of permissions to grant to the user. + Permissions []*Permission `protobuf:"bytes,3,rep,name=permissions,proto3" json:"permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserSpec) Reset() { *m = UserSpec{} } +func (m *UserSpec) String() string { return proto.CompactTextString(m) } +func (*UserSpec) ProtoMessage() {} +func (*UserSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_user_002833f6340c62e6, []int{2} +} +func (m *UserSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UserSpec.Unmarshal(m, b) +} +func (m *UserSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UserSpec.Marshal(b, m, deterministic) +} +func (dst *UserSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserSpec.Merge(dst, src) +} +func (m *UserSpec) XXX_Size() int { + return xxx_messageInfo_UserSpec.Size(m) +} +func (m *UserSpec) XXX_DiscardUnknown() { + xxx_messageInfo_UserSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_UserSpec proto.InternalMessageInfo + +func (m *UserSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UserSpec) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *UserSpec) GetPermissions() []*Permission { + if m != nil { + return m.Permissions + } + return nil +} + +func init() { + proto.RegisterType((*User)(nil), "yandex.cloud.mdb.clickhouse.v1.User") + proto.RegisterType((*Permission)(nil), "yandex.cloud.mdb.clickhouse.v1.Permission") + proto.RegisterType((*UserSpec)(nil), "yandex.cloud.mdb.clickhouse.v1.UserSpec") +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/clickhouse/v1/user.proto", fileDescriptor_user_002833f6340c62e6) +} + +var fileDescriptor_user_002833f6340c62e6 = []byte{ + // 332 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xac, 0x4c, 0xcc, 0x4b, + 0x49, 0xad, 0xd0, 0x4f, 0xce, 0xc9, 0x2f, 0x4d, 0xd1, 0xcf, 0x4d, 0x49, 0xd2, 0x4f, 0xce, 0xc9, + 0x4c, 0xce, 0xce, 0xc8, 0x2f, 0x2d, 0x4e, 0xd5, 0x2f, 0x33, 0xd4, 0x2f, 0x2d, 0x4e, 0x2d, 0xd2, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x83, 0x28, 0xd5, 0x03, 0x2b, 0xd5, 0xcb, 0x4d, 0x49, + 0xd2, 0x43, 0x28, 0xd5, 0x2b, 0x33, 0x94, 0x92, 0x45, 0x31, 0xaa, 0x2c, 0x31, 0x27, 0x33, 0x25, + 0xb1, 0x24, 0x33, 0x3f, 0x0f, 0xa2, 0x5d, 0xa9, 0x9d, 0x91, 0x8b, 0x25, 0xb4, 0x38, 0xb5, 0x48, + 0x48, 0x88, 0x8b, 0x25, 0x2f, 0x31, 0x37, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xcc, + 0x16, 0x92, 0xe5, 0xe2, 0x4a, 0xce, 0x29, 0x2d, 0x2e, 0x49, 0x2d, 0x8a, 0xcf, 0x4c, 0x91, 0x60, + 0x02, 0xcb, 0x70, 0x42, 0x45, 0x3c, 0x53, 0x84, 0x7c, 0xb8, 0xb8, 0x0b, 0x52, 0x8b, 0x72, 0x33, + 0x8b, 0x8b, 0x33, 0xf3, 0xf3, 0x8a, 0x25, 0x98, 0x15, 0x98, 0x35, 0xb8, 0x8d, 0xb4, 0xf4, 0xf0, + 0x3b, 0x48, 0x2f, 0x00, 0xae, 0x25, 0x08, 0x59, 0xbb, 0x92, 0x21, 0x17, 0x17, 0x42, 0x4a, 0x48, + 0x99, 0x8b, 0x37, 0x25, 0xb1, 0x24, 0x31, 0x29, 0xb1, 0x38, 0x35, 0x1e, 0xc9, 0x5d, 0x3c, 0x30, + 0x41, 0xbf, 0xc4, 0xdc, 0x54, 0xa5, 0x6d, 0x8c, 0x5c, 0x1c, 0x20, 0xc7, 0x07, 0x17, 0xa4, 0x26, + 0x0b, 0x19, 0x22, 0x7b, 0xc0, 0x49, 0xf6, 0xc5, 0x71, 0x43, 0xc6, 0x4f, 0xc7, 0x0d, 0x79, 0xa3, + 0x13, 0x75, 0xab, 0x1c, 0x75, 0xa3, 0x0c, 0x74, 0x2d, 0xe3, 0x63, 0xb5, 0xba, 0x4e, 0x18, 0xb2, + 0xd8, 0xd8, 0x9a, 0x19, 0x43, 0xfd, 0xa7, 0xc9, 0xc5, 0x51, 0x90, 0x58, 0x5c, 0x5c, 0x9e, 0x5f, + 0x04, 0xf5, 0x9d, 0x13, 0x2f, 0x48, 0x5b, 0xd7, 0x09, 0x43, 0x56, 0x0b, 0x5d, 0x43, 0x23, 0x8b, + 0x20, 0xb8, 0x34, 0x75, 0xfd, 0xea, 0xe4, 0x1f, 0xe5, 0x9b, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, + 0x97, 0x9c, 0x9f, 0xab, 0x0f, 0x31, 0x44, 0x17, 0x12, 0x43, 0xe9, 0xf9, 0xba, 0xe9, 0xa9, 0x79, + 0xe0, 0xc8, 0xd1, 0xc7, 0x9f, 0x0a, 0xac, 0x11, 0xbc, 0x24, 0x36, 0xb0, 0x06, 0x63, 0x40, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x8b, 0x24, 0x59, 0x97, 0x39, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/user_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/user_service.pb.go new file mode 100644 index 000000000..7146245ab --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/user_service.pb.go @@ -0,0 +1,1099 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/clickhouse/v1/user_service.proto + +package clickhouse // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetUserRequest struct { + // ID of the ClickHouse cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the ClickHouse User resource to return. + // To get the name of the user, use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetUserRequest) Reset() { *m = GetUserRequest{} } +func (m *GetUserRequest) String() string { return proto.CompactTextString(m) } +func (*GetUserRequest) ProtoMessage() {} +func (*GetUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_c4b5b8d2a1dd03ab, []int{0} +} +func (m *GetUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetUserRequest.Unmarshal(m, b) +} +func (m *GetUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetUserRequest.Marshal(b, m, deterministic) +} +func (dst *GetUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetUserRequest.Merge(dst, src) +} +func (m *GetUserRequest) XXX_Size() int { + return xxx_messageInfo_GetUserRequest.Size(m) +} +func (m *GetUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetUserRequest proto.InternalMessageInfo + +func (m *GetUserRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GetUserRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type ListUsersRequest struct { + // ID of the cluster to list ClickHouse users in. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListUsersResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUsersRequest) Reset() { *m = ListUsersRequest{} } +func (m *ListUsersRequest) String() string { return proto.CompactTextString(m) } +func (*ListUsersRequest) ProtoMessage() {} +func (*ListUsersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_c4b5b8d2a1dd03ab, []int{1} +} +func (m *ListUsersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUsersRequest.Unmarshal(m, b) +} +func (m *ListUsersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUsersRequest.Marshal(b, m, deterministic) +} +func (dst *ListUsersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUsersRequest.Merge(dst, src) +} +func (m *ListUsersRequest) XXX_Size() int { + return xxx_messageInfo_ListUsersRequest.Size(m) +} +func (m *ListUsersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListUsersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUsersRequest proto.InternalMessageInfo + +func (m *ListUsersRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListUsersRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListUsersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListUsersResponse struct { + // List of ClickHouse User resources. + Users []*User `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListUsersRequest.page_size], use the [next_page_token] as the value + // for the [ListUsersRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUsersResponse) Reset() { *m = ListUsersResponse{} } +func (m *ListUsersResponse) String() string { return proto.CompactTextString(m) } +func (*ListUsersResponse) ProtoMessage() {} +func (*ListUsersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_c4b5b8d2a1dd03ab, []int{2} +} +func (m *ListUsersResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUsersResponse.Unmarshal(m, b) +} +func (m *ListUsersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUsersResponse.Marshal(b, m, deterministic) +} +func (dst *ListUsersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUsersResponse.Merge(dst, src) +} +func (m *ListUsersResponse) XXX_Size() int { + return xxx_messageInfo_ListUsersResponse.Size(m) +} +func (m *ListUsersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListUsersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUsersResponse proto.InternalMessageInfo + +func (m *ListUsersResponse) GetUsers() []*User { + if m != nil { + return m.Users + } + return nil +} + +func (m *ListUsersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateUserRequest struct { + // ID of the ClickHouse cluster to create a user in. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Properties of the user to be created. + UserSpec *UserSpec `protobuf:"bytes,2,opt,name=user_spec,json=userSpec,proto3" json:"user_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateUserRequest) Reset() { *m = CreateUserRequest{} } +func (m *CreateUserRequest) String() string { return proto.CompactTextString(m) } +func (*CreateUserRequest) ProtoMessage() {} +func (*CreateUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_c4b5b8d2a1dd03ab, []int{3} +} +func (m *CreateUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateUserRequest.Unmarshal(m, b) +} +func (m *CreateUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateUserRequest.Marshal(b, m, deterministic) +} +func (dst *CreateUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateUserRequest.Merge(dst, src) +} +func (m *CreateUserRequest) XXX_Size() int { + return xxx_messageInfo_CreateUserRequest.Size(m) +} +func (m *CreateUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateUserRequest proto.InternalMessageInfo + +func (m *CreateUserRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateUserRequest) GetUserSpec() *UserSpec { + if m != nil { + return m.UserSpec + } + return nil +} + +type CreateUserMetadata struct { + // ID of the ClickHouse cluster the user is being created in. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user that is being created. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateUserMetadata) Reset() { *m = CreateUserMetadata{} } +func (m *CreateUserMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateUserMetadata) ProtoMessage() {} +func (*CreateUserMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_c4b5b8d2a1dd03ab, []int{4} +} +func (m *CreateUserMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateUserMetadata.Unmarshal(m, b) +} +func (m *CreateUserMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateUserMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateUserMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateUserMetadata.Merge(dst, src) +} +func (m *CreateUserMetadata) XXX_Size() int { + return xxx_messageInfo_CreateUserMetadata.Size(m) +} +func (m *CreateUserMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateUserMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateUserMetadata proto.InternalMessageInfo + +func (m *CreateUserMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateUserMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type UpdateUserRequest struct { + // ID of the ClickHouse cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user to be updated. + // To get the name of the user, use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + // Field mask that specifies which fields of the ClickHouse User resource should be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // New password for the user. + Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"` + // New set of permissions for the user. + Permissions []*Permission `protobuf:"bytes,5,rep,name=permissions,proto3" json:"permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateUserRequest) Reset() { *m = UpdateUserRequest{} } +func (m *UpdateUserRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateUserRequest) ProtoMessage() {} +func (*UpdateUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_c4b5b8d2a1dd03ab, []int{5} +} +func (m *UpdateUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateUserRequest.Unmarshal(m, b) +} +func (m *UpdateUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateUserRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateUserRequest.Merge(dst, src) +} +func (m *UpdateUserRequest) XXX_Size() int { + return xxx_messageInfo_UpdateUserRequest.Size(m) +} +func (m *UpdateUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateUserRequest proto.InternalMessageInfo + +func (m *UpdateUserRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateUserRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +func (m *UpdateUserRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateUserRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *UpdateUserRequest) GetPermissions() []*Permission { + if m != nil { + return m.Permissions + } + return nil +} + +type UpdateUserMetadata struct { + // ID of the ClickHouse cluster the user belongs to. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user that is being updated. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateUserMetadata) Reset() { *m = UpdateUserMetadata{} } +func (m *UpdateUserMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateUserMetadata) ProtoMessage() {} +func (*UpdateUserMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_c4b5b8d2a1dd03ab, []int{6} +} +func (m *UpdateUserMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateUserMetadata.Unmarshal(m, b) +} +func (m *UpdateUserMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateUserMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateUserMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateUserMetadata.Merge(dst, src) +} +func (m *UpdateUserMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateUserMetadata.Size(m) +} +func (m *UpdateUserMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateUserMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateUserMetadata proto.InternalMessageInfo + +func (m *UpdateUserMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateUserMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type DeleteUserRequest struct { + // ID of the ClickHouse cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user to delete. + // To get the name of the user, use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteUserRequest) Reset() { *m = DeleteUserRequest{} } +func (m *DeleteUserRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteUserRequest) ProtoMessage() {} +func (*DeleteUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_c4b5b8d2a1dd03ab, []int{7} +} +func (m *DeleteUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteUserRequest.Unmarshal(m, b) +} +func (m *DeleteUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteUserRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteUserRequest.Merge(dst, src) +} +func (m *DeleteUserRequest) XXX_Size() int { + return xxx_messageInfo_DeleteUserRequest.Size(m) +} +func (m *DeleteUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteUserRequest proto.InternalMessageInfo + +func (m *DeleteUserRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteUserRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type DeleteUserMetadata struct { + // ID of the ClickHouse cluster the user belongs to. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user that is being deleted. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteUserMetadata) Reset() { *m = DeleteUserMetadata{} } +func (m *DeleteUserMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteUserMetadata) ProtoMessage() {} +func (*DeleteUserMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_c4b5b8d2a1dd03ab, []int{8} +} +func (m *DeleteUserMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteUserMetadata.Unmarshal(m, b) +} +func (m *DeleteUserMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteUserMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteUserMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteUserMetadata.Merge(dst, src) +} +func (m *DeleteUserMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteUserMetadata.Size(m) +} +func (m *DeleteUserMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteUserMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteUserMetadata proto.InternalMessageInfo + +func (m *DeleteUserMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteUserMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type GrantUserPermissionRequest struct { + // ID of the ClickHouse cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user to grant the permission to. + // To get the name of the user, use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + // Permission that should be granted to the specified user. + Permission *Permission `protobuf:"bytes,3,opt,name=permission,proto3" json:"permission,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GrantUserPermissionRequest) Reset() { *m = GrantUserPermissionRequest{} } +func (m *GrantUserPermissionRequest) String() string { return proto.CompactTextString(m) } +func (*GrantUserPermissionRequest) ProtoMessage() {} +func (*GrantUserPermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_c4b5b8d2a1dd03ab, []int{9} +} +func (m *GrantUserPermissionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GrantUserPermissionRequest.Unmarshal(m, b) +} +func (m *GrantUserPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GrantUserPermissionRequest.Marshal(b, m, deterministic) +} +func (dst *GrantUserPermissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GrantUserPermissionRequest.Merge(dst, src) +} +func (m *GrantUserPermissionRequest) XXX_Size() int { + return xxx_messageInfo_GrantUserPermissionRequest.Size(m) +} +func (m *GrantUserPermissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GrantUserPermissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GrantUserPermissionRequest proto.InternalMessageInfo + +func (m *GrantUserPermissionRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GrantUserPermissionRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +func (m *GrantUserPermissionRequest) GetPermission() *Permission { + if m != nil { + return m.Permission + } + return nil +} + +type GrantUserPermissionMetadata struct { + // ID of the ClickHouse cluster the user belongs to. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user that is being granted a permission. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GrantUserPermissionMetadata) Reset() { *m = GrantUserPermissionMetadata{} } +func (m *GrantUserPermissionMetadata) String() string { return proto.CompactTextString(m) } +func (*GrantUserPermissionMetadata) ProtoMessage() {} +func (*GrantUserPermissionMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_c4b5b8d2a1dd03ab, []int{10} +} +func (m *GrantUserPermissionMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GrantUserPermissionMetadata.Unmarshal(m, b) +} +func (m *GrantUserPermissionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GrantUserPermissionMetadata.Marshal(b, m, deterministic) +} +func (dst *GrantUserPermissionMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_GrantUserPermissionMetadata.Merge(dst, src) +} +func (m *GrantUserPermissionMetadata) XXX_Size() int { + return xxx_messageInfo_GrantUserPermissionMetadata.Size(m) +} +func (m *GrantUserPermissionMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_GrantUserPermissionMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_GrantUserPermissionMetadata proto.InternalMessageInfo + +func (m *GrantUserPermissionMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GrantUserPermissionMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type RevokeUserPermissionRequest struct { + // ID of the ClickHouse cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user to revoke a permission from. + // To get the name of the user, use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + // Name of the database that the user should lose access to. + DatabaseName string `protobuf:"bytes,3,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RevokeUserPermissionRequest) Reset() { *m = RevokeUserPermissionRequest{} } +func (m *RevokeUserPermissionRequest) String() string { return proto.CompactTextString(m) } +func (*RevokeUserPermissionRequest) ProtoMessage() {} +func (*RevokeUserPermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_c4b5b8d2a1dd03ab, []int{11} +} +func (m *RevokeUserPermissionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RevokeUserPermissionRequest.Unmarshal(m, b) +} +func (m *RevokeUserPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RevokeUserPermissionRequest.Marshal(b, m, deterministic) +} +func (dst *RevokeUserPermissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RevokeUserPermissionRequest.Merge(dst, src) +} +func (m *RevokeUserPermissionRequest) XXX_Size() int { + return xxx_messageInfo_RevokeUserPermissionRequest.Size(m) +} +func (m *RevokeUserPermissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RevokeUserPermissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RevokeUserPermissionRequest proto.InternalMessageInfo + +func (m *RevokeUserPermissionRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *RevokeUserPermissionRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +func (m *RevokeUserPermissionRequest) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type RevokeUserPermissionMetadata struct { + // ID of the ClickHouse cluster the user belongs to. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user whose permission is being revoked. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RevokeUserPermissionMetadata) Reset() { *m = RevokeUserPermissionMetadata{} } +func (m *RevokeUserPermissionMetadata) String() string { return proto.CompactTextString(m) } +func (*RevokeUserPermissionMetadata) ProtoMessage() {} +func (*RevokeUserPermissionMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_c4b5b8d2a1dd03ab, []int{12} +} +func (m *RevokeUserPermissionMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RevokeUserPermissionMetadata.Unmarshal(m, b) +} +func (m *RevokeUserPermissionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RevokeUserPermissionMetadata.Marshal(b, m, deterministic) +} +func (dst *RevokeUserPermissionMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_RevokeUserPermissionMetadata.Merge(dst, src) +} +func (m *RevokeUserPermissionMetadata) XXX_Size() int { + return xxx_messageInfo_RevokeUserPermissionMetadata.Size(m) +} +func (m *RevokeUserPermissionMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_RevokeUserPermissionMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_RevokeUserPermissionMetadata proto.InternalMessageInfo + +func (m *RevokeUserPermissionMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *RevokeUserPermissionMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +func init() { + proto.RegisterType((*GetUserRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.GetUserRequest") + proto.RegisterType((*ListUsersRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.ListUsersRequest") + proto.RegisterType((*ListUsersResponse)(nil), "yandex.cloud.mdb.clickhouse.v1.ListUsersResponse") + proto.RegisterType((*CreateUserRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.CreateUserRequest") + proto.RegisterType((*CreateUserMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.CreateUserMetadata") + proto.RegisterType((*UpdateUserRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.UpdateUserRequest") + proto.RegisterType((*UpdateUserMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.UpdateUserMetadata") + proto.RegisterType((*DeleteUserRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.DeleteUserRequest") + proto.RegisterType((*DeleteUserMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.DeleteUserMetadata") + proto.RegisterType((*GrantUserPermissionRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.GrantUserPermissionRequest") + proto.RegisterType((*GrantUserPermissionMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.GrantUserPermissionMetadata") + proto.RegisterType((*RevokeUserPermissionRequest)(nil), "yandex.cloud.mdb.clickhouse.v1.RevokeUserPermissionRequest") + proto.RegisterType((*RevokeUserPermissionMetadata)(nil), "yandex.cloud.mdb.clickhouse.v1.RevokeUserPermissionMetadata") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// UserServiceClient is the client API for UserService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UserServiceClient interface { + // Returns the specified ClickHouse User resource. + // + // To get the list of available ClickHouse User resources, make a [List] request. + Get(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*User, error) + // Retrieves the list of ClickHouse User resources in the specified cluster. + List(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) + // Creates a ClickHouse user in the specified cluster. + Create(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified ClickHouse user. + Update(ctx context.Context, in *UpdateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified ClickHouse user. + Delete(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Grants a permission to the specified ClickHouse user. + GrantPermission(ctx context.Context, in *GrantUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Revokes a permission from the specified ClickHouse user. + RevokePermission(ctx context.Context, in *RevokeUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) +} + +type userServiceClient struct { + cc *grpc.ClientConn +} + +func NewUserServiceClient(cc *grpc.ClientConn) UserServiceClient { + return &userServiceClient{cc} +} + +func (c *userServiceClient) Get(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*User, error) { + out := new(User) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.UserService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) List(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) { + out := new(ListUsersResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.UserService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) Create(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.UserService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) Update(ctx context.Context, in *UpdateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.UserService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) Delete(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.UserService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) GrantPermission(ctx context.Context, in *GrantUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.UserService/GrantPermission", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) RevokePermission(ctx context.Context, in *RevokeUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.clickhouse.v1.UserService/RevokePermission", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UserServiceServer is the server API for UserService service. +type UserServiceServer interface { + // Returns the specified ClickHouse User resource. + // + // To get the list of available ClickHouse User resources, make a [List] request. + Get(context.Context, *GetUserRequest) (*User, error) + // Retrieves the list of ClickHouse User resources in the specified cluster. + List(context.Context, *ListUsersRequest) (*ListUsersResponse, error) + // Creates a ClickHouse user in the specified cluster. + Create(context.Context, *CreateUserRequest) (*operation.Operation, error) + // Updates the specified ClickHouse user. + Update(context.Context, *UpdateUserRequest) (*operation.Operation, error) + // Deletes the specified ClickHouse user. + Delete(context.Context, *DeleteUserRequest) (*operation.Operation, error) + // Grants a permission to the specified ClickHouse user. + GrantPermission(context.Context, *GrantUserPermissionRequest) (*operation.Operation, error) + // Revokes a permission from the specified ClickHouse user. + RevokePermission(context.Context, *RevokeUserPermissionRequest) (*operation.Operation, error) +} + +func RegisterUserServiceServer(s *grpc.Server, srv UserServiceServer) { + s.RegisterService(&_UserService_serviceDesc, srv) +} + +func _UserService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.UserService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).Get(ctx, req.(*GetUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUsersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.UserService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).List(ctx, req.(*ListUsersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.UserService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).Create(ctx, req.(*CreateUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.UserService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).Update(ctx, req.(*UpdateUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.UserService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).Delete(ctx, req.(*DeleteUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_GrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GrantUserPermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).GrantPermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.UserService/GrantPermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).GrantPermission(ctx, req.(*GrantUserPermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_RevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RevokeUserPermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).RevokePermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.clickhouse.v1.UserService/RevokePermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).RevokePermission(ctx, req.(*RevokeUserPermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _UserService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.clickhouse.v1.UserService", + HandlerType: (*UserServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _UserService_Get_Handler, + }, + { + MethodName: "List", + Handler: _UserService_List_Handler, + }, + { + MethodName: "Create", + Handler: _UserService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _UserService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _UserService_Delete_Handler, + }, + { + MethodName: "GrantPermission", + Handler: _UserService_GrantPermission_Handler, + }, + { + MethodName: "RevokePermission", + Handler: _UserService_RevokePermission_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/clickhouse/v1/user_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/clickhouse/v1/user_service.proto", fileDescriptor_user_service_c4b5b8d2a1dd03ab) +} + +var fileDescriptor_user_service_c4b5b8d2a1dd03ab = []byte{ + // 990 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xd6, 0x24, 0x8e, 0x15, 0xbf, 0x6e, 0xda, 0x66, 0x24, 0x24, 0x6b, 0xd3, 0xa0, 0xb0, 0x94, + 0x12, 0x5c, 0x79, 0xd7, 0xeb, 0xf2, 0x51, 0x9c, 0x96, 0x8f, 0x14, 0x1a, 0x01, 0x4d, 0x13, 0x6d, + 0xda, 0x03, 0xa9, 0x90, 0x35, 0xf6, 0x4e, 0xdd, 0x95, 0xed, 0xdd, 0x65, 0x67, 0xd7, 0x6d, 0x52, + 0x2a, 0xa1, 0x1e, 0x2b, 0x71, 0x81, 0x9f, 0xc0, 0x81, 0x13, 0x1c, 0x72, 0xe5, 0x07, 0xb8, 0x57, + 0x82, 0xf8, 0x07, 0x1c, 0x38, 0x97, 0x03, 0x12, 0x27, 0x34, 0x33, 0x9b, 0xec, 0x3a, 0x36, 0x59, + 0x27, 0x8e, 0x9a, 0xdb, 0xae, 0xe7, 0x7d, 0xe6, 0x7d, 0x9e, 0xf7, 0xd3, 0x0b, 0xc6, 0x16, 0x71, + 0x2c, 0xfa, 0x48, 0x6f, 0xb4, 0xdd, 0xd0, 0xd2, 0x3b, 0x56, 0x5d, 0x6f, 0xb4, 0xed, 0x46, 0xeb, + 0x81, 0x1b, 0x32, 0xaa, 0x77, 0x0d, 0x3d, 0x64, 0xd4, 0xaf, 0x31, 0xea, 0x77, 0xed, 0x06, 0xd5, + 0x3c, 0xdf, 0x0d, 0x5c, 0xfc, 0xaa, 0x84, 0x68, 0x02, 0xa2, 0x75, 0xac, 0xba, 0x16, 0x43, 0xb4, + 0xae, 0xa1, 0x5c, 0x68, 0xba, 0x6e, 0xb3, 0x4d, 0x75, 0xe2, 0xd9, 0x3a, 0x71, 0x1c, 0x37, 0x20, + 0x81, 0xed, 0x3a, 0x4c, 0xa2, 0x95, 0x85, 0xe8, 0x54, 0xbc, 0xd5, 0xc3, 0xfb, 0xfa, 0x7d, 0x9b, + 0xb6, 0xad, 0x5a, 0x87, 0xb0, 0x56, 0x64, 0x71, 0xa9, 0x8f, 0x92, 0xeb, 0x51, 0x5f, 0x5c, 0x10, + 0x3f, 0x45, 0x76, 0x6f, 0x8d, 0x40, 0x3d, 0x32, 0x55, 0x22, 0x53, 0x4e, 0xe9, 0xe0, 0x35, 0xf3, + 0x7d, 0xd7, 0x74, 0x49, 0xdb, 0xb6, 0x12, 0xc7, 0xea, 0x16, 0x9c, 0x5d, 0xa1, 0xc1, 0x5d, 0x46, + 0x7d, 0x93, 0x7e, 0x1d, 0x52, 0x16, 0xe0, 0xcb, 0x00, 0x8d, 0x76, 0xc8, 0x02, 0xea, 0xd7, 0x6c, + 0xab, 0x80, 0x16, 0xd0, 0x62, 0x6e, 0xf9, 0xcc, 0x5f, 0x3d, 0x03, 0x3d, 0x7b, 0x6e, 0x64, 0xae, + 0x5d, 0x7f, 0xa7, 0x6c, 0xe6, 0xa2, 0xf3, 0xcf, 0x2c, 0x5c, 0x85, 0x9c, 0x08, 0xa1, 0x43, 0x3a, + 0xb4, 0x30, 0x21, 0x6c, 0xe7, 0xb9, 0xed, 0x8b, 0x9e, 0x31, 0x73, 0x8f, 0x94, 0xb6, 0x3f, 0x2e, + 0x6d, 0x96, 0x4b, 0xef, 0xd7, 0xbe, 0x2a, 0x4a, 0xf0, 0xbb, 0x57, 0xcc, 0x69, 0x6e, 0x7f, 0x9b, + 0x74, 0xa8, 0xfa, 0x3d, 0x82, 0xf3, 0xb7, 0x6c, 0x26, 0x9c, 0xb3, 0x63, 0x79, 0x7f, 0x13, 0x72, + 0x1e, 0x69, 0xd2, 0x1a, 0xb3, 0xb7, 0xa5, 0xf7, 0xc9, 0x65, 0xf8, 0xb7, 0x67, 0x64, 0xaf, 0x5d, + 0x37, 0xca, 0xe5, 0xb2, 0x39, 0xcd, 0x0f, 0x37, 0xec, 0x6d, 0x8a, 0x17, 0x01, 0x84, 0x61, 0xe0, + 0xb6, 0xa8, 0x53, 0x98, 0x14, 0xb7, 0xe6, 0x9e, 0x3d, 0x37, 0xa6, 0x84, 0xa5, 0x29, 0x6e, 0xb9, + 0xc3, 0xcf, 0xd4, 0x87, 0x30, 0x9b, 0xe0, 0xc4, 0x3c, 0xd7, 0x61, 0x14, 0x57, 0x61, 0x8a, 0xb3, + 0x66, 0x05, 0xb4, 0x30, 0xb9, 0x98, 0xaf, 0x5c, 0xd4, 0x0e, 0x2f, 0x11, 0x4d, 0x84, 0x53, 0x42, + 0xf0, 0x25, 0x38, 0xe7, 0xd0, 0x47, 0x41, 0x2d, 0xe1, 0x5f, 0xc4, 0xc9, 0x9c, 0xe1, 0x3f, 0xaf, + 0xef, 0x3b, 0xfe, 0x0e, 0xc1, 0xec, 0x0d, 0x9f, 0x92, 0x80, 0x1e, 0x3b, 0x19, 0x5f, 0x44, 0xc9, + 0x60, 0x1e, 0x6d, 0x08, 0x27, 0xf9, 0xca, 0xe2, 0x28, 0x54, 0x37, 0x3c, 0xda, 0x58, 0xce, 0xf0, + 0x5b, 0x65, 0x76, 0xf8, 0xbb, 0xba, 0x0e, 0x38, 0xa6, 0xb3, 0x4a, 0x03, 0x62, 0x91, 0x80, 0xe0, + 0xf9, 0x41, 0x3e, 0x49, 0x06, 0x73, 0x03, 0xe5, 0x90, 0xc8, 0xf7, 0x2f, 0x13, 0x30, 0x7b, 0xd7, + 0xb3, 0xc6, 0x51, 0x38, 0x46, 0xb9, 0xe1, 0x25, 0xc8, 0x87, 0xc2, 0xbb, 0x68, 0x46, 0x51, 0x04, + 0xf9, 0x8a, 0xa2, 0xc9, 0x7e, 0xd5, 0xf6, 0xfa, 0x55, 0xbb, 0xc9, 0xfb, 0x75, 0x95, 0xb0, 0x96, + 0x09, 0xd2, 0x9c, 0x3f, 0xe3, 0x37, 0x60, 0xda, 0x23, 0x8c, 0x3d, 0x74, 0x7d, 0xab, 0x90, 0x89, + 0xcb, 0xe7, 0x6a, 0xc9, 0xa8, 0x5c, 0x35, 0xf7, 0x8f, 0xf0, 0x2d, 0xc8, 0x7b, 0xd4, 0xef, 0xd8, + 0x8c, 0xf1, 0x91, 0x50, 0x98, 0x12, 0xe5, 0x52, 0x4c, 0xcb, 0xc1, 0xfa, 0x3e, 0xc4, 0x4c, 0xc2, + 0x79, 0x0a, 0xe2, 0x78, 0x9d, 0x48, 0x0a, 0xbe, 0x81, 0xd9, 0x4f, 0x68, 0x9b, 0x9e, 0x4e, 0x06, + 0xb8, 0x9e, 0xd8, 0xfb, 0x89, 0xe8, 0xf9, 0x0d, 0x81, 0xb2, 0xe2, 0x13, 0x47, 0xf4, 0x6b, 0x22, + 0x8c, 0x2f, 0xbb, 0xb6, 0x3e, 0x07, 0x88, 0x13, 0x17, 0x95, 0xd6, 0x51, 0xd2, 0x9e, 0x40, 0xab, + 0x5f, 0xc2, 0xdc, 0x10, 0x49, 0x27, 0x12, 0xae, 0x1e, 0x82, 0x39, 0x93, 0x76, 0xdd, 0x16, 0x3d, + 0xe5, 0x78, 0x7d, 0x08, 0x33, 0x5c, 0x4c, 0x9d, 0x30, 0x2a, 0xf1, 0x72, 0x24, 0x2b, 0x2f, 0x7a, + 0xc6, 0xd9, 0x04, 0xb6, 0x94, 0x00, 0x9f, 0xd9, 0x03, 0x08, 0x25, 0x9b, 0x70, 0x61, 0x98, 0x90, + 0x93, 0x88, 0x52, 0xe5, 0xe7, 0x3c, 0xe4, 0xc5, 0x58, 0x94, 0x7f, 0x0b, 0xf0, 0x8f, 0x08, 0x26, + 0x57, 0x68, 0x80, 0xb5, 0xb4, 0x84, 0xf6, 0x2f, 0x52, 0x65, 0xa4, 0x35, 0xa1, 0xde, 0x7c, 0xfa, + 0xfb, 0x9f, 0x3f, 0x4c, 0x7c, 0x84, 0x3f, 0xd0, 0x3b, 0xc4, 0x21, 0x4d, 0x6a, 0x95, 0xfa, 0xd7, + 0x7c, 0x44, 0x9a, 0xe9, 0x8f, 0x63, 0x41, 0x4f, 0xc4, 0xf2, 0x67, 0xfa, 0xe3, 0x7d, 0x11, 0x4f, + 0xf0, 0x4f, 0x08, 0x32, 0x7c, 0x73, 0xe1, 0x72, 0x9a, 0xdb, 0x83, 0x3b, 0x57, 0x31, 0x8e, 0x80, + 0x90, 0x1b, 0x51, 0xad, 0x0a, 0xd6, 0x6f, 0xe3, 0xca, 0xd1, 0x59, 0xe3, 0x5f, 0x11, 0x64, 0xe5, + 0x6a, 0xc1, 0xa9, 0x9e, 0x07, 0x36, 0xa2, 0xf2, 0x5a, 0x3f, 0x24, 0xfe, 0xbb, 0xb3, 0xb6, 0xf7, + 0xa4, 0xde, 0xdb, 0xd9, 0x2d, 0x2a, 0x43, 0xd7, 0x57, 0x86, 0xbf, 0x09, 0xea, 0xef, 0xa9, 0xc7, + 0xa0, 0x5e, 0x45, 0x45, 0xdc, 0x43, 0x90, 0x95, 0x53, 0x39, 0x9d, 0xfd, 0xc0, 0xb6, 0x1b, 0x85, + 0x7d, 0x53, 0xb2, 0x1f, 0x32, 0xf9, 0x63, 0xf6, 0x37, 0x2a, 0x63, 0x96, 0x0b, 0x57, 0xf2, 0x07, + 0x82, 0xac, 0x9c, 0xc7, 0xe9, 0x4a, 0x06, 0xb6, 0xc6, 0x28, 0x4a, 0xc2, 0x9d, 0xdd, 0xe2, 0xe5, + 0xa1, 0x33, 0xff, 0x95, 0x83, 0x5b, 0xf7, 0xd3, 0x8e, 0x17, 0x6c, 0xc9, 0x4e, 0x28, 0x8e, 0xdb, + 0x09, 0x7f, 0x23, 0x38, 0x27, 0x26, 0x68, 0x3c, 0x17, 0x70, 0x35, 0xb5, 0x77, 0xff, 0x77, 0x8b, + 0x8c, 0xa2, 0xf4, 0x5b, 0xb4, 0xb3, 0x5b, 0x7c, 0xfd, 0xf0, 0xc1, 0x1d, 0x67, 0x6f, 0x43, 0xbd, + 0x3d, 0x66, 0xf6, 0x9a, 0xfd, 0xf2, 0x78, 0x36, 0xff, 0x41, 0x70, 0x5e, 0x8e, 0xc4, 0x84, 0xec, + 0xa5, 0x34, 0xd9, 0x87, 0x6c, 0x83, 0x51, 0x74, 0x3f, 0xe5, 0xba, 0x2f, 0xa6, 0xcc, 0xe2, 0x58, + 0xf8, 0x1d, 0x75, 0x6d, 0x4c, 0xe1, 0xfe, 0x01, 0x85, 0x55, 0x54, 0x5c, 0x5e, 0xdb, 0x5c, 0x6d, + 0xda, 0xc1, 0x83, 0xb0, 0xae, 0x35, 0xdc, 0x8e, 0x2e, 0x39, 0x97, 0xe4, 0xe7, 0x4e, 0xd3, 0x2d, + 0x35, 0xa9, 0x23, 0xaa, 0x4c, 0x3f, 0xfc, 0x73, 0x6a, 0x29, 0x7e, 0xab, 0x67, 0x05, 0xe0, 0xca, + 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x14, 0x78, 0xfd, 0xcf, 0x3d, 0x0e, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/backup.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/backup.pb.go new file mode 100644 index 000000000..5cc752033 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/backup.pb.go @@ -0,0 +1,127 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/mongodb/v1/backup.proto + +package mongodb // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A MongoDB Backup resource. For more information, see the +// [Developer's Guide](/docs/managed-mongodb/concepts). +type Backup struct { + // ID of the backup. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the backup belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format + // (i.e. when the backup operation was completed). + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // ID of the MongoDB cluster that the backup was created for. + SourceClusterId string `protobuf:"bytes,4,opt,name=source_cluster_id,json=sourceClusterId,proto3" json:"source_cluster_id,omitempty"` + // Time when the backup operation was started. + StartedAt *timestamp.Timestamp `protobuf:"bytes,5,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup) Reset() { *m = Backup{} } +func (m *Backup) String() string { return proto.CompactTextString(m) } +func (*Backup) ProtoMessage() {} +func (*Backup) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_7bed62cf0a79db64, []int{0} +} +func (m *Backup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Backup.Unmarshal(m, b) +} +func (m *Backup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Backup.Marshal(b, m, deterministic) +} +func (dst *Backup) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup.Merge(dst, src) +} +func (m *Backup) XXX_Size() int { + return xxx_messageInfo_Backup.Size(m) +} +func (m *Backup) XXX_DiscardUnknown() { + xxx_messageInfo_Backup.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup proto.InternalMessageInfo + +func (m *Backup) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Backup) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *Backup) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Backup) GetSourceClusterId() string { + if m != nil { + return m.SourceClusterId + } + return "" +} + +func (m *Backup) GetStartedAt() *timestamp.Timestamp { + if m != nil { + return m.StartedAt + } + return nil +} + +func init() { + proto.RegisterType((*Backup)(nil), "yandex.cloud.mdb.mongodb.v1.Backup") +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/mongodb/v1/backup.proto", fileDescriptor_backup_7bed62cf0a79db64) +} + +var fileDescriptor_backup_7bed62cf0a79db64 = []byte{ + // 261 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0xc1, 0x4b, 0xc3, 0x30, + 0x14, 0xc6, 0x69, 0xd5, 0x61, 0x23, 0x28, 0xf6, 0x54, 0xb6, 0x83, 0xc3, 0x53, 0x11, 0x96, 0x30, + 0x3d, 0x89, 0xa7, 0xcd, 0x83, 0xec, 0x3a, 0x3c, 0x79, 0x29, 0x49, 0x5e, 0x16, 0x8b, 0x4d, 0xdf, + 0x68, 0x5f, 0x86, 0xfe, 0xa5, 0xfe, 0x3b, 0x42, 0x92, 0x5d, 0xdd, 0x2d, 0x7c, 0xf9, 0xbd, 0xef, + 0x07, 0x1f, 0xab, 0x7f, 0x64, 0x0f, 0xe6, 0x5b, 0xe8, 0x0e, 0x3d, 0x08, 0x07, 0x4a, 0x38, 0xec, + 0x2d, 0x82, 0x12, 0x87, 0xa5, 0x50, 0x52, 0x7f, 0xf9, 0x3d, 0xdf, 0x0f, 0x48, 0x58, 0xce, 0x22, + 0xc9, 0x03, 0xc9, 0x1d, 0x28, 0x9e, 0x48, 0x7e, 0x58, 0x4e, 0xef, 0x2c, 0xa2, 0xed, 0x8c, 0x08, + 0xa8, 0xf2, 0x3b, 0x41, 0xad, 0x33, 0x23, 0x49, 0x97, 0xae, 0xef, 0x7f, 0x33, 0x36, 0x59, 0x87, + 0xba, 0xf2, 0x9a, 0xe5, 0x2d, 0x54, 0xd9, 0x3c, 0xab, 0x8b, 0x6d, 0xde, 0x42, 0x39, 0x63, 0xc5, + 0x0e, 0x3b, 0x30, 0x43, 0xd3, 0x42, 0x95, 0x87, 0xf8, 0x32, 0x06, 0x1b, 0x28, 0x9f, 0x19, 0xd3, + 0x83, 0x91, 0x64, 0xa0, 0x91, 0x54, 0x9d, 0xcd, 0xb3, 0xfa, 0xea, 0x71, 0xca, 0xa3, 0x8d, 0x1f, + 0x6d, 0xfc, 0xfd, 0x68, 0xdb, 0x16, 0x89, 0x5e, 0x51, 0xf9, 0xc0, 0x6e, 0x47, 0xf4, 0x83, 0x36, + 0x8d, 0xee, 0xfc, 0x48, 0xb1, 0xff, 0x3c, 0xf4, 0xdf, 0xc4, 0x8f, 0xd7, 0x98, 0x47, 0xcd, 0x48, + 0x72, 0x48, 0x9a, 0x8b, 0xd3, 0x9a, 0x44, 0xaf, 0x68, 0xbd, 0xf9, 0x78, 0xb3, 0x2d, 0x7d, 0x7a, + 0xc5, 0x35, 0x3a, 0x11, 0x47, 0x5a, 0xc4, 0x39, 0x2d, 0x2e, 0xac, 0xe9, 0xc3, 0xb9, 0xf8, 0x67, + 0xe7, 0x97, 0xf4, 0x54, 0x93, 0x80, 0x3e, 0xfd, 0x05, 0x00, 0x00, 0xff, 0xff, 0xae, 0x68, 0xff, + 0xeb, 0x95, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/backup_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/backup_service.pb.go new file mode 100644 index 000000000..a893a8987 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/backup_service.pb.go @@ -0,0 +1,331 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/mongodb/v1/backup_service.proto + +package mongodb // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetBackupRequest struct { + // ID of the backup to return information about. + // To get the backup ID, use a [ClusterService.ListBackups] request. + BackupId string `protobuf:"bytes,1,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetBackupRequest) Reset() { *m = GetBackupRequest{} } +func (m *GetBackupRequest) String() string { return proto.CompactTextString(m) } +func (*GetBackupRequest) ProtoMessage() {} +func (*GetBackupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_service_8136fe766e84b00c, []int{0} +} +func (m *GetBackupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetBackupRequest.Unmarshal(m, b) +} +func (m *GetBackupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetBackupRequest.Marshal(b, m, deterministic) +} +func (dst *GetBackupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetBackupRequest.Merge(dst, src) +} +func (m *GetBackupRequest) XXX_Size() int { + return xxx_messageInfo_GetBackupRequest.Size(m) +} +func (m *GetBackupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetBackupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetBackupRequest proto.InternalMessageInfo + +func (m *GetBackupRequest) GetBackupId() string { + if m != nil { + return m.BackupId + } + return "" +} + +type ListBackupsRequest struct { + // ID of the folder to list backups in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListBackupsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListBackupsRequest) Reset() { *m = ListBackupsRequest{} } +func (m *ListBackupsRequest) String() string { return proto.CompactTextString(m) } +func (*ListBackupsRequest) ProtoMessage() {} +func (*ListBackupsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_service_8136fe766e84b00c, []int{1} +} +func (m *ListBackupsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListBackupsRequest.Unmarshal(m, b) +} +func (m *ListBackupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListBackupsRequest.Marshal(b, m, deterministic) +} +func (dst *ListBackupsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListBackupsRequest.Merge(dst, src) +} +func (m *ListBackupsRequest) XXX_Size() int { + return xxx_messageInfo_ListBackupsRequest.Size(m) +} +func (m *ListBackupsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListBackupsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListBackupsRequest proto.InternalMessageInfo + +func (m *ListBackupsRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListBackupsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListBackupsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListBackupsResponse struct { + // List of Backup resources. + Backups []*Backup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListBackupsRequest.page_size], use the [next_page_token] as the value + // for the [ListBackupsRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListBackupsResponse) Reset() { *m = ListBackupsResponse{} } +func (m *ListBackupsResponse) String() string { return proto.CompactTextString(m) } +func (*ListBackupsResponse) ProtoMessage() {} +func (*ListBackupsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_service_8136fe766e84b00c, []int{2} +} +func (m *ListBackupsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListBackupsResponse.Unmarshal(m, b) +} +func (m *ListBackupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListBackupsResponse.Marshal(b, m, deterministic) +} +func (dst *ListBackupsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListBackupsResponse.Merge(dst, src) +} +func (m *ListBackupsResponse) XXX_Size() int { + return xxx_messageInfo_ListBackupsResponse.Size(m) +} +func (m *ListBackupsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListBackupsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListBackupsResponse proto.InternalMessageInfo + +func (m *ListBackupsResponse) GetBackups() []*Backup { + if m != nil { + return m.Backups + } + return nil +} + +func (m *ListBackupsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetBackupRequest)(nil), "yandex.cloud.mdb.mongodb.v1.GetBackupRequest") + proto.RegisterType((*ListBackupsRequest)(nil), "yandex.cloud.mdb.mongodb.v1.ListBackupsRequest") + proto.RegisterType((*ListBackupsResponse)(nil), "yandex.cloud.mdb.mongodb.v1.ListBackupsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// BackupServiceClient is the client API for BackupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BackupServiceClient interface { + // Returns the specified MongoDB Backup resource. + // + // To get the list of available MongoDB Backup resources, make a [List] request. + Get(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error) + // Retrieves the list of Backup resources available for the specified folder. + List(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) +} + +type backupServiceClient struct { + cc *grpc.ClientConn +} + +func NewBackupServiceClient(cc *grpc.ClientConn) BackupServiceClient { + return &backupServiceClient{cc} +} + +func (c *backupServiceClient) Get(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error) { + out := new(Backup) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.BackupService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) List(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) { + out := new(ListBackupsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.BackupService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BackupServiceServer is the server API for BackupService service. +type BackupServiceServer interface { + // Returns the specified MongoDB Backup resource. + // + // To get the list of available MongoDB Backup resources, make a [List] request. + Get(context.Context, *GetBackupRequest) (*Backup, error) + // Retrieves the list of Backup resources available for the specified folder. + List(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error) +} + +func RegisterBackupServiceServer(s *grpc.Server, srv BackupServiceServer) { + s.RegisterService(&_BackupService_serviceDesc, srv) +} + +func _BackupService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBackupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.BackupService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).Get(ctx, req.(*GetBackupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBackupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.BackupService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).List(ctx, req.(*ListBackupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BackupService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.mongodb.v1.BackupService", + HandlerType: (*BackupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _BackupService_Get_Handler, + }, + { + MethodName: "List", + Handler: _BackupService_List_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/mongodb/v1/backup_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/mongodb/v1/backup_service.proto", fileDescriptor_backup_service_8136fe766e84b00c) +} + +var fileDescriptor_backup_service_8136fe766e84b00c = []byte{ + // 459 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x31, 0x6f, 0xd3, 0x40, + 0x14, 0xc7, 0xe5, 0x24, 0x94, 0xf8, 0xa0, 0x02, 0x1d, 0x4b, 0x94, 0x52, 0x29, 0xb8, 0x12, 0x75, + 0x87, 0xf8, 0xec, 0xa2, 0x4e, 0x34, 0x4b, 0x96, 0x28, 0x12, 0x03, 0x72, 0x99, 0x58, 0xa2, 0x73, + 0xee, 0x71, 0x9c, 0x1a, 0xdf, 0x99, 0xdc, 0xc5, 0x2a, 0x05, 0x16, 0xc6, 0x0c, 0x0c, 0xf0, 0x39, + 0xf8, 0x1c, 0xed, 0xce, 0x57, 0x60, 0xe0, 0x33, 0x30, 0x21, 0xdf, 0x39, 0x40, 0xa9, 0x64, 0xba, + 0x9d, 0xee, 0xff, 0x7e, 0xef, 0xfd, 0xf5, 0x7f, 0x0f, 0xc5, 0x6f, 0xa9, 0x64, 0x70, 0x46, 0xe6, + 0x0b, 0xb5, 0x62, 0x24, 0x67, 0x19, 0xc9, 0x95, 0xe4, 0x8a, 0x65, 0xa4, 0x4c, 0x48, 0x46, 0xe7, + 0xa7, 0xab, 0x62, 0xa6, 0x61, 0x59, 0x8a, 0x39, 0x44, 0xc5, 0x52, 0x19, 0x85, 0x77, 0x1c, 0x11, + 0x59, 0x22, 0xca, 0x59, 0x16, 0xd5, 0x44, 0x54, 0x26, 0xfd, 0x87, 0x5c, 0x29, 0xbe, 0x00, 0x42, + 0x0b, 0x41, 0xa8, 0x94, 0xca, 0x50, 0x23, 0x94, 0xd4, 0x0e, 0xed, 0xef, 0x5e, 0x19, 0x56, 0xd2, + 0x85, 0x60, 0x56, 0xaf, 0xe5, 0xf0, 0xff, 0x5e, 0x5c, 0x65, 0x70, 0x84, 0xee, 0x4f, 0xc0, 0x8c, + 0xed, 0x57, 0x0a, 0x6f, 0x56, 0xa0, 0x0d, 0x7e, 0x84, 0xfc, 0xda, 0xaf, 0x60, 0x3d, 0x6f, 0xe0, + 0x85, 0xfe, 0xb8, 0xf3, 0xe3, 0x22, 0xf1, 0xd2, 0xae, 0xfb, 0x9e, 0xb2, 0xe0, 0xb3, 0x87, 0xf0, + 0x33, 0xa1, 0x6b, 0x50, 0x6f, 0xc8, 0x03, 0xe4, 0xbf, 0x52, 0x0b, 0x06, 0xcb, 0x3f, 0xe4, 0xdd, + 0x8a, 0x5c, 0x5f, 0x26, 0x9d, 0xe3, 0xd1, 0x51, 0x9c, 0x76, 0x9d, 0x3c, 0x65, 0x78, 0x1f, 0xf9, + 0x05, 0xe5, 0x30, 0xd3, 0xe2, 0x1c, 0x7a, 0xad, 0x81, 0x17, 0xb6, 0xc7, 0xe8, 0xe7, 0x45, 0xb2, + 0x75, 0x3c, 0x4a, 0xe2, 0x38, 0x4e, 0xbb, 0x95, 0x78, 0x22, 0xce, 0x01, 0x87, 0x08, 0xd9, 0x42, + 0xa3, 0x4e, 0x41, 0xf6, 0xda, 0xb6, 0xa9, 0xbf, 0xbe, 0x4c, 0x6e, 0xd9, 0xca, 0xd4, 0x76, 0x79, + 0x51, 0x69, 0xc1, 0x7b, 0xf4, 0xe0, 0x8a, 0x27, 0x5d, 0x28, 0xa9, 0x01, 0x8f, 0xd0, 0x6d, 0xe7, + 0x5b, 0xf7, 0xbc, 0x41, 0x3b, 0xbc, 0x73, 0xb8, 0x17, 0x35, 0x04, 0x1f, 0xd5, 0x59, 0x6c, 0x18, + 0xfc, 0x18, 0xdd, 0x93, 0x70, 0x66, 0x66, 0x7f, 0x99, 0xa8, 0xec, 0xfa, 0xe9, 0x76, 0xf5, 0xfd, + 0x7c, 0x33, 0xfd, 0xf0, 0x6b, 0x0b, 0x6d, 0x3b, 0xf6, 0xc4, 0x6d, 0x19, 0xaf, 0x3d, 0xd4, 0x9e, + 0x80, 0xc1, 0xc3, 0xc6, 0x79, 0xff, 0xc6, 0xdf, 0xbf, 0x89, 0xbd, 0x80, 0x7c, 0xfc, 0xf6, 0xfd, + 0x4b, 0xeb, 0x00, 0xef, 0x93, 0x9c, 0x4a, 0xca, 0x81, 0x0d, 0xaf, 0x6d, 0x58, 0x93, 0x77, 0xbf, + 0xd7, 0xf8, 0x01, 0x7f, 0xf2, 0x50, 0xa7, 0x4a, 0x07, 0x93, 0xc6, 0xf6, 0xd7, 0x97, 0xda, 0x8f, + 0x6f, 0x0e, 0xb8, 0xc4, 0x83, 0x3d, 0x6b, 0x6e, 0x17, 0xef, 0x34, 0x98, 0x1b, 0x4f, 0x5f, 0x4e, + 0xb8, 0x30, 0xaf, 0x57, 0x59, 0x34, 0x57, 0x39, 0x71, 0x23, 0x86, 0xee, 0x60, 0xb9, 0x1a, 0x72, + 0x90, 0xf6, 0x40, 0x49, 0xc3, 0x25, 0x3f, 0xad, 0x9f, 0xd9, 0x96, 0x2d, 0x7d, 0xf2, 0x2b, 0x00, + 0x00, 0xff, 0xff, 0x1f, 0x32, 0xcb, 0x4a, 0x83, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/cluster.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/cluster.pb.go new file mode 100644 index 000000000..876abe324 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/cluster.pb.go @@ -0,0 +1,987 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/mongodb/v1/cluster.proto + +package mongodb // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import config "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/config" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Deployment environment. +type Cluster_Environment int32 + +const ( + Cluster_ENVIRONMENT_UNSPECIFIED Cluster_Environment = 0 + // Stable environment with a conservative update policy: + // only hotfixes are applied during regular maintenance. + Cluster_PRODUCTION Cluster_Environment = 1 + // Environment with more aggressive update policy: new versions + // are rolled out irrespective of backward compatibility. + Cluster_PRESTABLE Cluster_Environment = 2 +) + +var Cluster_Environment_name = map[int32]string{ + 0: "ENVIRONMENT_UNSPECIFIED", + 1: "PRODUCTION", + 2: "PRESTABLE", +} +var Cluster_Environment_value = map[string]int32{ + "ENVIRONMENT_UNSPECIFIED": 0, + "PRODUCTION": 1, + "PRESTABLE": 2, +} + +func (x Cluster_Environment) String() string { + return proto.EnumName(Cluster_Environment_name, int32(x)) +} +func (Cluster_Environment) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{0, 0} +} + +type Cluster_Health int32 + +const ( + // State of the cluster is unknown ([Host.health] for every host in the cluster is UNKNOWN). + Cluster_HEALTH_UNKNOWN Cluster_Health = 0 + // Cluster is alive and well ([Host.health] for every host in the cluster is ALIVE). + Cluster_ALIVE Cluster_Health = 1 + // Cluster is inoperable ([Host.health] for every host in the cluster is DEAD). + Cluster_DEAD Cluster_Health = 2 + // Cluster is working below capacity ([Host.health] for at least one host in the cluster is not ALIVE). + Cluster_DEGRADED Cluster_Health = 3 +) + +var Cluster_Health_name = map[int32]string{ + 0: "HEALTH_UNKNOWN", + 1: "ALIVE", + 2: "DEAD", + 3: "DEGRADED", +} +var Cluster_Health_value = map[string]int32{ + "HEALTH_UNKNOWN": 0, + "ALIVE": 1, + "DEAD": 2, + "DEGRADED": 3, +} + +func (x Cluster_Health) String() string { + return proto.EnumName(Cluster_Health_name, int32(x)) +} +func (Cluster_Health) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{0, 1} +} + +type Cluster_Status int32 + +const ( + // Cluster state is unknown. + Cluster_STATUS_UNKNOWN Cluster_Status = 0 + // Cluster is being created. + Cluster_CREATING Cluster_Status = 1 + // Cluster is running normally. + Cluster_RUNNING Cluster_Status = 2 + // Cluster encountered a problem and cannot operate. + Cluster_ERROR Cluster_Status = 3 + // Cluster is being updated. + Cluster_UPDATING Cluster_Status = 4 + // Cluster is stopping. + Cluster_STOPPING Cluster_Status = 5 + // Cluster stopped. + Cluster_STOPPED Cluster_Status = 6 + // Cluster is starting. + Cluster_STARTING Cluster_Status = 7 +) + +var Cluster_Status_name = map[int32]string{ + 0: "STATUS_UNKNOWN", + 1: "CREATING", + 2: "RUNNING", + 3: "ERROR", + 4: "UPDATING", + 5: "STOPPING", + 6: "STOPPED", + 7: "STARTING", +} +var Cluster_Status_value = map[string]int32{ + "STATUS_UNKNOWN": 0, + "CREATING": 1, + "RUNNING": 2, + "ERROR": 3, + "UPDATING": 4, + "STOPPING": 5, + "STOPPED": 6, + "STARTING": 7, +} + +func (x Cluster_Status) String() string { + return proto.EnumName(Cluster_Status_name, int32(x)) +} +func (Cluster_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{0, 2} +} + +type Host_Role int32 + +const ( + // Role of the host in the cluster is unknown. + Host_ROLE_UNKNOWN Host_Role = 0 + // Host is the primary MongoDB server in the cluster. + Host_PRIMARY Host_Role = 1 + // Host is a secondary MongoDB server in the cluster. + Host_SECONDARY Host_Role = 2 +) + +var Host_Role_name = map[int32]string{ + 0: "ROLE_UNKNOWN", + 1: "PRIMARY", + 2: "SECONDARY", +} +var Host_Role_value = map[string]int32{ + "ROLE_UNKNOWN": 0, + "PRIMARY": 1, + "SECONDARY": 2, +} + +func (x Host_Role) String() string { + return proto.EnumName(Host_Role_name, int32(x)) +} +func (Host_Role) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{4, 0} +} + +type Host_Health int32 + +const ( + // Health of the host is unknown. + Host_HEALTH_UNKNOWN Host_Health = 0 + // The host is performing all its functions normally. + Host_ALIVE Host_Health = 1 + // The host is inoperable, and cannot perform any of its essential functions. + Host_DEAD Host_Health = 2 + // The host is degraded, and can perform only some of its essential functions. + Host_DEGRADED Host_Health = 3 +) + +var Host_Health_name = map[int32]string{ + 0: "HEALTH_UNKNOWN", + 1: "ALIVE", + 2: "DEAD", + 3: "DEGRADED", +} +var Host_Health_value = map[string]int32{ + "HEALTH_UNKNOWN": 0, + "ALIVE": 1, + "DEAD": 2, + "DEGRADED": 3, +} + +func (x Host_Health) String() string { + return proto.EnumName(Host_Health_name, int32(x)) +} +func (Host_Health) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{4, 1} +} + +type Service_Type int32 + +const ( + Service_TYPE_UNSPECIFIED Service_Type = 0 + // The host is running a mongod daemon. + Service_MONGOD Service_Type = 1 + // The host is running a mongos daemon. + Service_MONGOS Service_Type = 2 + // The host is running a MongoDB config server. + Service_MONGOCFG Service_Type = 3 +) + +var Service_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "MONGOD", + 2: "MONGOS", + 3: "MONGOCFG", +} +var Service_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "MONGOD": 1, + "MONGOS": 2, + "MONGOCFG": 3, +} + +func (x Service_Type) String() string { + return proto.EnumName(Service_Type_name, int32(x)) +} +func (Service_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{5, 0} +} + +type Service_Health int32 + +const ( + // Health of the server is unknown. + Service_HEALTH_UNKNOWN Service_Health = 0 + // The server is working normally. + Service_ALIVE Service_Health = 1 + // The server is dead or unresponsive. + Service_DEAD Service_Health = 2 +) + +var Service_Health_name = map[int32]string{ + 0: "HEALTH_UNKNOWN", + 1: "ALIVE", + 2: "DEAD", +} +var Service_Health_value = map[string]int32{ + "HEALTH_UNKNOWN": 0, + "ALIVE": 1, + "DEAD": 2, +} + +func (x Service_Health) String() string { + return proto.EnumName(Service_Health_name, int32(x)) +} +func (Service_Health) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{5, 1} +} + +// A MongoDB Cluster resource. For more information, see the +// [Cluster](/docs/managed-mongodb/concepts) section in the Developer's Guide. +type Cluster struct { + // ID of the MongoDB cluster. + // This ID is assigned by MDB at creation time. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the MongoDB cluster belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Name of the MongoDB cluster. + // The name is unique within the folder. 1-63 characters long. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Description of the MongoDB cluster. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the MongoDB cluster as `` key:value `` pairs. Maximum 64 per resource. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Deployment environment of the MongoDB cluster. + Environment Cluster_Environment `protobuf:"varint,7,opt,name=environment,proto3,enum=yandex.cloud.mdb.mongodb.v1.Cluster_Environment" json:"environment,omitempty"` + // Description of monitoring systems relevant to the MongoDB cluster. + Monitoring []*Monitoring `protobuf:"bytes,8,rep,name=monitoring,proto3" json:"monitoring,omitempty"` + // Configuration of the MongoDB cluster. + Config *ClusterConfig `protobuf:"bytes,9,opt,name=config,proto3" json:"config,omitempty"` + // ID of the network that the cluster belongs to. + NetworkId string `protobuf:"bytes,10,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // Aggregated cluster health. + Health Cluster_Health `protobuf:"varint,11,opt,name=health,proto3,enum=yandex.cloud.mdb.mongodb.v1.Cluster_Health" json:"health,omitempty"` + // Current state of the cluster. + Status Cluster_Status `protobuf:"varint,12,opt,name=status,proto3,enum=yandex.cloud.mdb.mongodb.v1.Cluster_Status" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{0} +} +func (m *Cluster) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cluster.Unmarshal(m, b) +} +func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) +} +func (dst *Cluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cluster.Merge(dst, src) +} +func (m *Cluster) XXX_Size() int { + return xxx_messageInfo_Cluster.Size(m) +} +func (m *Cluster) XXX_DiscardUnknown() { + xxx_messageInfo_Cluster.DiscardUnknown(m) +} + +var xxx_messageInfo_Cluster proto.InternalMessageInfo + +func (m *Cluster) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Cluster) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *Cluster) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Cluster) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Cluster) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Cluster) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Cluster) GetEnvironment() Cluster_Environment { + if m != nil { + return m.Environment + } + return Cluster_ENVIRONMENT_UNSPECIFIED +} + +func (m *Cluster) GetMonitoring() []*Monitoring { + if m != nil { + return m.Monitoring + } + return nil +} + +func (m *Cluster) GetConfig() *ClusterConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *Cluster) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +func (m *Cluster) GetHealth() Cluster_Health { + if m != nil { + return m.Health + } + return Cluster_HEALTH_UNKNOWN +} + +func (m *Cluster) GetStatus() Cluster_Status { + if m != nil { + return m.Status + } + return Cluster_STATUS_UNKNOWN +} + +// Monitoring system. +type Monitoring struct { + // Name of the monitoring system. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Description of the monitoring system. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // Link to the monitoring system charts for the MongoDB cluster. + Link string `protobuf:"bytes,3,opt,name=link,proto3" json:"link,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Monitoring) Reset() { *m = Monitoring{} } +func (m *Monitoring) String() string { return proto.CompactTextString(m) } +func (*Monitoring) ProtoMessage() {} +func (*Monitoring) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{1} +} +func (m *Monitoring) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Monitoring.Unmarshal(m, b) +} +func (m *Monitoring) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Monitoring.Marshal(b, m, deterministic) +} +func (dst *Monitoring) XXX_Merge(src proto.Message) { + xxx_messageInfo_Monitoring.Merge(dst, src) +} +func (m *Monitoring) XXX_Size() int { + return xxx_messageInfo_Monitoring.Size(m) +} +func (m *Monitoring) XXX_DiscardUnknown() { + xxx_messageInfo_Monitoring.DiscardUnknown(m) +} + +var xxx_messageInfo_Monitoring proto.InternalMessageInfo + +func (m *Monitoring) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Monitoring) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Monitoring) GetLink() string { + if m != nil { + return m.Link + } + return "" +} + +type ClusterConfig struct { + // Version of MongoDB server software. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Configuration for MongoDB servers in the cluster. + // + // Types that are valid to be assigned to Mongodb: + // *ClusterConfig_Mongodb_3_6 + Mongodb isClusterConfig_Mongodb `protobuf_oneof:"mongodb"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterConfig) Reset() { *m = ClusterConfig{} } +func (m *ClusterConfig) String() string { return proto.CompactTextString(m) } +func (*ClusterConfig) ProtoMessage() {} +func (*ClusterConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{2} +} +func (m *ClusterConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterConfig.Unmarshal(m, b) +} +func (m *ClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterConfig.Marshal(b, m, deterministic) +} +func (dst *ClusterConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterConfig.Merge(dst, src) +} +func (m *ClusterConfig) XXX_Size() int { + return xxx_messageInfo_ClusterConfig.Size(m) +} +func (m *ClusterConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterConfig proto.InternalMessageInfo + +func (m *ClusterConfig) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type isClusterConfig_Mongodb interface { + isClusterConfig_Mongodb() +} + +type ClusterConfig_Mongodb_3_6 struct { + Mongodb_3_6 *Mongodb3_6 `protobuf:"bytes,2,opt,name=mongodb_3_6,json=mongodb36,proto3,oneof"` +} + +func (*ClusterConfig_Mongodb_3_6) isClusterConfig_Mongodb() {} + +func (m *ClusterConfig) GetMongodb() isClusterConfig_Mongodb { + if m != nil { + return m.Mongodb + } + return nil +} + +func (m *ClusterConfig) GetMongodb_3_6() *Mongodb3_6 { + if x, ok := m.GetMongodb().(*ClusterConfig_Mongodb_3_6); ok { + return x.Mongodb_3_6 + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ClusterConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ClusterConfig_OneofMarshaler, _ClusterConfig_OneofUnmarshaler, _ClusterConfig_OneofSizer, []interface{}{ + (*ClusterConfig_Mongodb_3_6)(nil), + } +} + +func _ClusterConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ClusterConfig) + // mongodb + switch x := m.Mongodb.(type) { + case *ClusterConfig_Mongodb_3_6: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mongodb_3_6); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ClusterConfig.Mongodb has unexpected type %T", x) + } + return nil +} + +func _ClusterConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ClusterConfig) + switch tag { + case 2: // mongodb.mongodb_3_6 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mongodb3_6) + err := b.DecodeMessage(msg) + m.Mongodb = &ClusterConfig_Mongodb_3_6{msg} + return true, err + default: + return false, nil + } +} + +func _ClusterConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ClusterConfig) + // mongodb + switch x := m.Mongodb.(type) { + case *ClusterConfig_Mongodb_3_6: + s := proto.Size(x.Mongodb_3_6) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Mongodb3_6 struct { + // Configuration and resource allocation for a MongoDB 3.6 cluster. + Mongod *Mongodb3_6_Mongod `protobuf:"bytes,1,opt,name=mongod,proto3" json:"mongod,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mongodb3_6) Reset() { *m = Mongodb3_6{} } +func (m *Mongodb3_6) String() string { return proto.CompactTextString(m) } +func (*Mongodb3_6) ProtoMessage() {} +func (*Mongodb3_6) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{3} +} +func (m *Mongodb3_6) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Mongodb3_6.Unmarshal(m, b) +} +func (m *Mongodb3_6) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Mongodb3_6.Marshal(b, m, deterministic) +} +func (dst *Mongodb3_6) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mongodb3_6.Merge(dst, src) +} +func (m *Mongodb3_6) XXX_Size() int { + return xxx_messageInfo_Mongodb3_6.Size(m) +} +func (m *Mongodb3_6) XXX_DiscardUnknown() { + xxx_messageInfo_Mongodb3_6.DiscardUnknown(m) +} + +var xxx_messageInfo_Mongodb3_6 proto.InternalMessageInfo + +func (m *Mongodb3_6) GetMongod() *Mongodb3_6_Mongod { + if m != nil { + return m.Mongod + } + return nil +} + +type Mongodb3_6_Mongod struct { + // Configuration of a MongoDB 3.6 server. + Config *config.MongodConfigSet3_6 `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + // Resources allocated to MongoDB hosts. + Resources *Resources `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mongodb3_6_Mongod) Reset() { *m = Mongodb3_6_Mongod{} } +func (m *Mongodb3_6_Mongod) String() string { return proto.CompactTextString(m) } +func (*Mongodb3_6_Mongod) ProtoMessage() {} +func (*Mongodb3_6_Mongod) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{3, 0} +} +func (m *Mongodb3_6_Mongod) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Mongodb3_6_Mongod.Unmarshal(m, b) +} +func (m *Mongodb3_6_Mongod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Mongodb3_6_Mongod.Marshal(b, m, deterministic) +} +func (dst *Mongodb3_6_Mongod) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mongodb3_6_Mongod.Merge(dst, src) +} +func (m *Mongodb3_6_Mongod) XXX_Size() int { + return xxx_messageInfo_Mongodb3_6_Mongod.Size(m) +} +func (m *Mongodb3_6_Mongod) XXX_DiscardUnknown() { + xxx_messageInfo_Mongodb3_6_Mongod.DiscardUnknown(m) +} + +var xxx_messageInfo_Mongodb3_6_Mongod proto.InternalMessageInfo + +func (m *Mongodb3_6_Mongod) GetConfig() *config.MongodConfigSet3_6 { + if m != nil { + return m.Config + } + return nil +} + +func (m *Mongodb3_6_Mongod) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +type Host struct { + // Name of the MongoDB host. The host name is assigned by MDB at creation time, and cannot be changed. + // 1-63 characters long. + // + // The name is unique across all existing MDB hosts in Yandex.Cloud, as it defines the FQDN of the host. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // ID of the MongoDB host. The ID is assigned by MDB at creation time. + ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // ID of the availability zone where the MongoDB host resides. + ZoneId string `protobuf:"bytes,3,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + // Resources allocated to the MongoDB host. + Resources *Resources `protobuf:"bytes,4,opt,name=resources,proto3" json:"resources,omitempty"` + // Role of the host in the cluster. + Role Host_Role `protobuf:"varint,5,opt,name=role,proto3,enum=yandex.cloud.mdb.mongodb.v1.Host_Role" json:"role,omitempty"` + // Status code of the aggregated health of the host. + Health Host_Health `protobuf:"varint,6,opt,name=health,proto3,enum=yandex.cloud.mdb.mongodb.v1.Host_Health" json:"health,omitempty"` + // Services provided by the host. + Services []*Service `protobuf:"bytes,7,rep,name=services,proto3" json:"services,omitempty"` + // ID of the subnet that the host belongs to. + SubnetId string `protobuf:"bytes,8,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + // Flag showing public IP assignment status to this host. + AssignPublicIp bool `protobuf:"varint,9,opt,name=assign_public_ip,json=assignPublicIp,proto3" json:"assign_public_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Host) Reset() { *m = Host{} } +func (m *Host) String() string { return proto.CompactTextString(m) } +func (*Host) ProtoMessage() {} +func (*Host) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{4} +} +func (m *Host) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Host.Unmarshal(m, b) +} +func (m *Host) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Host.Marshal(b, m, deterministic) +} +func (dst *Host) XXX_Merge(src proto.Message) { + xxx_messageInfo_Host.Merge(dst, src) +} +func (m *Host) XXX_Size() int { + return xxx_messageInfo_Host.Size(m) +} +func (m *Host) XXX_DiscardUnknown() { + xxx_messageInfo_Host.DiscardUnknown(m) +} + +var xxx_messageInfo_Host proto.InternalMessageInfo + +func (m *Host) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Host) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *Host) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func (m *Host) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *Host) GetRole() Host_Role { + if m != nil { + return m.Role + } + return Host_ROLE_UNKNOWN +} + +func (m *Host) GetHealth() Host_Health { + if m != nil { + return m.Health + } + return Host_HEALTH_UNKNOWN +} + +func (m *Host) GetServices() []*Service { + if m != nil { + return m.Services + } + return nil +} + +func (m *Host) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +func (m *Host) GetAssignPublicIp() bool { + if m != nil { + return m.AssignPublicIp + } + return false +} + +type Service struct { + // Type of the service provided by the host. + Type Service_Type `protobuf:"varint,1,opt,name=type,proto3,enum=yandex.cloud.mdb.mongodb.v1.Service_Type" json:"type,omitempty"` + // Status code of server availability. + Health Service_Health `protobuf:"varint,2,opt,name=health,proto3,enum=yandex.cloud.mdb.mongodb.v1.Service_Health" json:"health,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Service) Reset() { *m = Service{} } +func (m *Service) String() string { return proto.CompactTextString(m) } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{5} +} +func (m *Service) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Service.Unmarshal(m, b) +} +func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Service.Marshal(b, m, deterministic) +} +func (dst *Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service.Merge(dst, src) +} +func (m *Service) XXX_Size() int { + return xxx_messageInfo_Service.Size(m) +} +func (m *Service) XXX_DiscardUnknown() { + xxx_messageInfo_Service.DiscardUnknown(m) +} + +var xxx_messageInfo_Service proto.InternalMessageInfo + +func (m *Service) GetType() Service_Type { + if m != nil { + return m.Type + } + return Service_TYPE_UNSPECIFIED +} + +func (m *Service) GetHealth() Service_Health { + if m != nil { + return m.Health + } + return Service_HEALTH_UNKNOWN +} + +type Resources struct { + // ID of the preset for computational resources available to a host (CPU, memory etc.). + // All available presets are listed in the [documentation](/docs/managed-mongodb/concepts/instance-types). + ResourcePresetId string `protobuf:"bytes,1,opt,name=resource_preset_id,json=resourcePresetId,proto3" json:"resource_preset_id,omitempty"` + // Volume of the storage available to a host, in bytes. + DiskSize int64 `protobuf:"varint,2,opt,name=disk_size,json=diskSize,proto3" json:"disk_size,omitempty"` + // Type of the storage environment for the host. + // Possible values: + // * network-hdd — network HDD drive, + // * network-nvme — network SSD drive, + // * local-nvme — local SSD storage. + DiskTypeId string `protobuf:"bytes,3,opt,name=disk_type_id,json=diskTypeId,proto3" json:"disk_type_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resources) Reset() { *m = Resources{} } +func (m *Resources) String() string { return proto.CompactTextString(m) } +func (*Resources) ProtoMessage() {} +func (*Resources) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_939f865571591ca8, []int{6} +} +func (m *Resources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resources.Unmarshal(m, b) +} +func (m *Resources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resources.Marshal(b, m, deterministic) +} +func (dst *Resources) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resources.Merge(dst, src) +} +func (m *Resources) XXX_Size() int { + return xxx_messageInfo_Resources.Size(m) +} +func (m *Resources) XXX_DiscardUnknown() { + xxx_messageInfo_Resources.DiscardUnknown(m) +} + +var xxx_messageInfo_Resources proto.InternalMessageInfo + +func (m *Resources) GetResourcePresetId() string { + if m != nil { + return m.ResourcePresetId + } + return "" +} + +func (m *Resources) GetDiskSize() int64 { + if m != nil { + return m.DiskSize + } + return 0 +} + +func (m *Resources) GetDiskTypeId() string { + if m != nil { + return m.DiskTypeId + } + return "" +} + +func init() { + proto.RegisterType((*Cluster)(nil), "yandex.cloud.mdb.mongodb.v1.Cluster") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.mongodb.v1.Cluster.LabelsEntry") + proto.RegisterType((*Monitoring)(nil), "yandex.cloud.mdb.mongodb.v1.Monitoring") + proto.RegisterType((*ClusterConfig)(nil), "yandex.cloud.mdb.mongodb.v1.ClusterConfig") + proto.RegisterType((*Mongodb3_6)(nil), "yandex.cloud.mdb.mongodb.v1.Mongodb3_6") + proto.RegisterType((*Mongodb3_6_Mongod)(nil), "yandex.cloud.mdb.mongodb.v1.Mongodb3_6.Mongod") + proto.RegisterType((*Host)(nil), "yandex.cloud.mdb.mongodb.v1.Host") + proto.RegisterType((*Service)(nil), "yandex.cloud.mdb.mongodb.v1.Service") + proto.RegisterType((*Resources)(nil), "yandex.cloud.mdb.mongodb.v1.Resources") + proto.RegisterEnum("yandex.cloud.mdb.mongodb.v1.Cluster_Environment", Cluster_Environment_name, Cluster_Environment_value) + proto.RegisterEnum("yandex.cloud.mdb.mongodb.v1.Cluster_Health", Cluster_Health_name, Cluster_Health_value) + proto.RegisterEnum("yandex.cloud.mdb.mongodb.v1.Cluster_Status", Cluster_Status_name, Cluster_Status_value) + proto.RegisterEnum("yandex.cloud.mdb.mongodb.v1.Host_Role", Host_Role_name, Host_Role_value) + proto.RegisterEnum("yandex.cloud.mdb.mongodb.v1.Host_Health", Host_Health_name, Host_Health_value) + proto.RegisterEnum("yandex.cloud.mdb.mongodb.v1.Service_Type", Service_Type_name, Service_Type_value) + proto.RegisterEnum("yandex.cloud.mdb.mongodb.v1.Service_Health", Service_Health_name, Service_Health_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/mongodb/v1/cluster.proto", fileDescriptor_cluster_939f865571591ca8) +} + +var fileDescriptor_cluster_939f865571591ca8 = []byte{ + // 1093 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdf, 0x6f, 0xda, 0xd6, + 0x17, 0xaf, 0xc1, 0x31, 0xf8, 0x90, 0x22, 0xeb, 0xaa, 0x52, 0xad, 0x54, 0xd5, 0x37, 0xb2, 0xbe, + 0xda, 0xe8, 0xb6, 0x9a, 0x25, 0x99, 0xa2, 0xb5, 0xd3, 0xb4, 0x12, 0xec, 0x80, 0xb5, 0xc4, 0xa0, + 0x6b, 0x93, 0xa9, 0x7b, 0xb1, 0x00, 0xdf, 0x10, 0x2b, 0xc6, 0x46, 0xb6, 0x61, 0x25, 0x2f, 0x7b, + 0xd9, 0xdf, 0xb0, 0xc7, 0xfd, 0x27, 0xfb, 0xb3, 0xf6, 0x3e, 0xdd, 0x1f, 0x10, 0xd2, 0x6d, 0x94, + 0x4e, 0x7b, 0xbb, 0xe7, 0xdc, 0xf3, 0xf9, 0xf8, 0xfc, 0xbe, 0x86, 0x17, 0xcb, 0x61, 0x12, 0x92, + 0x77, 0xcd, 0x71, 0x9c, 0xce, 0xc3, 0xe6, 0x34, 0x1c, 0x35, 0xa7, 0x69, 0x32, 0x49, 0xc3, 0x51, + 0x73, 0x71, 0xd4, 0x1c, 0xc7, 0xf3, 0xbc, 0x20, 0x99, 0x39, 0xcb, 0xd2, 0x22, 0x45, 0xcf, 0xb8, + 0xa9, 0xc9, 0x4c, 0xcd, 0x69, 0x38, 0x32, 0x85, 0xa9, 0xb9, 0x38, 0x3a, 0xf8, 0xdf, 0x24, 0x4d, + 0x27, 0x31, 0x69, 0x32, 0xd3, 0xd1, 0xfc, 0xba, 0x59, 0x44, 0x53, 0x92, 0x17, 0xc3, 0xe9, 0x8c, + 0xa3, 0x0f, 0x4e, 0xb6, 0x7e, 0x28, 0x4d, 0xae, 0xa3, 0xc9, 0x4a, 0x73, 0x12, 0x9c, 0x72, 0x90, + 0xf1, 0x7b, 0x05, 0x2a, 0x6d, 0xee, 0x04, 0xaa, 0x43, 0x29, 0x0a, 0x75, 0xe9, 0x50, 0x6a, 0xa8, + 0xb8, 0x14, 0x85, 0xe8, 0x19, 0xa8, 0xd7, 0x69, 0x1c, 0x92, 0x2c, 0x88, 0x42, 0xbd, 0xc4, 0xd4, + 0x55, 0xae, 0x70, 0x42, 0xf4, 0x0a, 0x60, 0x9c, 0x91, 0x61, 0x41, 0xc2, 0x60, 0x58, 0xe8, 0xe5, + 0x43, 0xa9, 0x51, 0x3b, 0x3e, 0x30, 0xb9, 0x8f, 0xe6, 0xca, 0x47, 0xd3, 0x5f, 0xf9, 0x88, 0x55, + 0x61, 0xdd, 0x2a, 0x10, 0x02, 0x39, 0x19, 0x4e, 0x89, 0x2e, 0x33, 0x4a, 0x76, 0x46, 0x87, 0x50, + 0x0b, 0x49, 0x3e, 0xce, 0xa2, 0x59, 0x11, 0xa5, 0x89, 0xbe, 0xc7, 0xae, 0x36, 0x55, 0xa8, 0x0b, + 0x4a, 0x3c, 0x1c, 0x91, 0x38, 0xd7, 0x95, 0xc3, 0x72, 0xa3, 0x76, 0xfc, 0xa5, 0xb9, 0x25, 0x5b, + 0xa6, 0x88, 0xc9, 0xbc, 0x60, 0x10, 0x3b, 0x29, 0xb2, 0x25, 0x16, 0x78, 0x84, 0xa1, 0x46, 0x92, + 0x45, 0x94, 0xa5, 0xc9, 0x94, 0x24, 0x85, 0x5e, 0x39, 0x94, 0x1a, 0xf5, 0x1d, 0xe9, 0xec, 0x7b, + 0x1c, 0xde, 0x24, 0x41, 0x1d, 0x80, 0x69, 0x9a, 0x44, 0x45, 0x9a, 0x45, 0xc9, 0x44, 0xaf, 0x32, + 0x0f, 0x3f, 0xdd, 0x4a, 0x79, 0xb9, 0x36, 0xc7, 0x1b, 0x50, 0x74, 0x06, 0x0a, 0xaf, 0x95, 0xae, + 0xb2, 0x9c, 0x7e, 0xb6, 0x8b, 0x5f, 0x6d, 0x86, 0xc0, 0x02, 0x89, 0x9e, 0x03, 0x24, 0xa4, 0xf8, + 0x29, 0xcd, 0x6e, 0x69, 0xe5, 0x80, 0xe5, 0x52, 0x15, 0x1a, 0x27, 0x44, 0x6d, 0x50, 0x6e, 0xc8, + 0x30, 0x2e, 0x6e, 0xf4, 0x1a, 0x0b, 0xfd, 0xf3, 0x9d, 0x42, 0xef, 0x32, 0x08, 0x16, 0x50, 0x4a, + 0x92, 0x17, 0xc3, 0x62, 0x9e, 0xeb, 0xfb, 0x1f, 0x41, 0xe2, 0x31, 0x08, 0x16, 0xd0, 0x83, 0x57, + 0x50, 0xdb, 0x28, 0x10, 0xd2, 0xa0, 0x7c, 0x4b, 0x96, 0xa2, 0x03, 0xe9, 0x11, 0x3d, 0x81, 0xbd, + 0xc5, 0x30, 0x9e, 0x13, 0xd1, 0x7e, 0x5c, 0x78, 0x5d, 0xfa, 0x5a, 0x32, 0x1c, 0xa8, 0x6d, 0x14, + 0x03, 0x3d, 0x83, 0xa7, 0xb6, 0x7b, 0xe5, 0xe0, 0x9e, 0x7b, 0x69, 0xbb, 0x7e, 0x30, 0x70, 0xbd, + 0xbe, 0xdd, 0x76, 0xce, 0x1d, 0xdb, 0xd2, 0x1e, 0xa1, 0x3a, 0x40, 0x1f, 0xf7, 0xac, 0x41, 0xdb, + 0x77, 0x7a, 0xae, 0x26, 0xa1, 0xc7, 0xa0, 0xf6, 0xb1, 0xed, 0xf9, 0xad, 0xb3, 0x0b, 0x5b, 0x2b, + 0x19, 0xdf, 0x81, 0xc2, 0x83, 0x43, 0x08, 0xea, 0x5d, 0xbb, 0x75, 0xe1, 0x77, 0x83, 0x81, 0xfb, + 0xbd, 0xdb, 0xfb, 0xc1, 0xd5, 0x1e, 0x21, 0x15, 0xf6, 0x5a, 0x17, 0xce, 0x95, 0xad, 0x49, 0xa8, + 0x0a, 0xb2, 0x65, 0xb7, 0x2c, 0xad, 0x84, 0xf6, 0xa1, 0x6a, 0xd9, 0x1d, 0xdc, 0xb2, 0x6c, 0x4b, + 0x2b, 0x1b, 0x4b, 0x50, 0x78, 0x60, 0x94, 0xc0, 0xf3, 0x5b, 0xfe, 0xc0, 0xdb, 0x20, 0xd8, 0x87, + 0x6a, 0x1b, 0xdb, 0x2d, 0xdf, 0x71, 0x3b, 0x9a, 0x84, 0x6a, 0x50, 0xc1, 0x03, 0xd7, 0xa5, 0x42, + 0x89, 0x72, 0xdb, 0x18, 0xf7, 0xb0, 0x56, 0xa6, 0x56, 0x83, 0xbe, 0xc5, 0xad, 0x64, 0x2a, 0x79, + 0x7e, 0xaf, 0xdf, 0xa7, 0xd2, 0x1e, 0xc5, 0x30, 0xc9, 0xb6, 0x34, 0x85, 0x5f, 0xb5, 0x30, 0x33, + 0xac, 0x18, 0x57, 0x00, 0xf7, 0x8d, 0xb4, 0x9e, 0x2c, 0xe9, 0x9f, 0x27, 0xab, 0xf4, 0xd7, 0xc9, + 0x42, 0x20, 0xc7, 0x51, 0x72, 0xcb, 0x86, 0x58, 0xc5, 0xec, 0x6c, 0xfc, 0x0c, 0x8f, 0x1f, 0xf4, + 0x16, 0xd2, 0xa1, 0xb2, 0x20, 0x59, 0x4e, 0x29, 0x38, 0xfb, 0x4a, 0x44, 0x0e, 0xd4, 0x44, 0xa5, + 0x83, 0x93, 0xe0, 0x94, 0x7d, 0x60, 0x87, 0xde, 0x17, 0x6b, 0xa8, 0xfb, 0x08, 0xab, 0xab, 0xa5, + 0x74, 0x7a, 0xa6, 0x42, 0x45, 0x08, 0xc6, 0x1f, 0x12, 0x8b, 0x4c, 0x98, 0xa1, 0x73, 0x50, 0xf8, + 0x0d, 0xfb, 0x7a, 0xed, 0xd8, 0xdc, 0x91, 0x5f, 0x1c, 0xb1, 0x40, 0x1f, 0xfc, 0x26, 0x81, 0xc2, + 0x55, 0xc8, 0x5d, 0x4f, 0x1a, 0xa7, 0x3c, 0xdd, 0x4a, 0xc9, 0x4d, 0x05, 0x1d, 0xcf, 0x89, 0x47, + 0x8a, 0x93, 0xe0, 0x74, 0x3d, 0x75, 0x16, 0xa8, 0x19, 0xc9, 0xd3, 0x79, 0x36, 0x26, 0xb9, 0xc8, + 0xc2, 0x27, 0x5b, 0x29, 0xf1, 0xca, 0x1a, 0xdf, 0x03, 0x8d, 0x5f, 0x65, 0x90, 0xbb, 0x69, 0x5e, + 0xfc, 0x6d, 0x2d, 0x9f, 0x03, 0x88, 0x17, 0xe3, 0x7e, 0x25, 0xab, 0x42, 0xe3, 0x84, 0xe8, 0x29, + 0x54, 0xee, 0xd2, 0x84, 0xd0, 0x3b, 0x5e, 0x4b, 0x85, 0x8a, 0x4e, 0xf8, 0xd0, 0x35, 0xf9, 0x5f, + 0xba, 0x86, 0x5e, 0x83, 0x9c, 0xa5, 0x31, 0x61, 0xcb, 0xb9, 0xfe, 0x01, 0x02, 0x1a, 0x82, 0x89, + 0xd3, 0x98, 0x60, 0x86, 0x41, 0x6f, 0xd6, 0x3b, 0x47, 0x61, 0xe8, 0xc6, 0x87, 0xd1, 0xef, 0x2d, + 0x9c, 0x37, 0x50, 0xcd, 0x49, 0xb6, 0x88, 0x68, 0x08, 0x15, 0xb6, 0x5f, 0xff, 0xbf, 0x95, 0xc3, + 0xe3, 0xc6, 0x78, 0x8d, 0xa2, 0xef, 0x59, 0x3e, 0x1f, 0x25, 0xa4, 0xa0, 0x09, 0xaa, 0xf2, 0xf7, + 0x8c, 0x2b, 0x9c, 0x10, 0x35, 0x40, 0x1b, 0xe6, 0x79, 0x34, 0x49, 0x82, 0xd9, 0x7c, 0x14, 0x47, + 0xe3, 0x20, 0x9a, 0xb1, 0x0d, 0x5c, 0xc5, 0x75, 0xae, 0xef, 0x33, 0xb5, 0x33, 0x33, 0xbe, 0x02, + 0x99, 0x06, 0x86, 0x34, 0xd8, 0xc7, 0xbd, 0x0b, 0x7b, 0x63, 0xd2, 0x6b, 0x50, 0xe9, 0x63, 0xe7, + 0xb2, 0x85, 0xdf, 0xf2, 0x25, 0xe3, 0xd9, 0xed, 0x9e, 0x6b, 0x51, 0xf1, 0x3f, 0x58, 0x32, 0xbf, + 0x94, 0xa0, 0x22, 0x62, 0x42, 0xdf, 0x82, 0x5c, 0x2c, 0x67, 0xbc, 0x37, 0xea, 0xc7, 0x2f, 0x76, + 0xc9, 0x83, 0xe9, 0x2f, 0x67, 0x04, 0x33, 0xd8, 0xc6, 0x03, 0x50, 0xda, 0x61, 0x77, 0xaf, 0x08, + 0x1e, 0xd6, 0xc3, 0x38, 0x03, 0x99, 0x52, 0xa2, 0x27, 0xa0, 0xf9, 0x6f, 0xfb, 0xf6, 0x7b, 0x2b, + 0x17, 0x40, 0xb9, 0xec, 0xb9, 0x9d, 0x9e, 0xa5, 0x49, 0xeb, 0xb3, 0xc7, 0x63, 0x62, 0xe7, 0xf6, + 0x79, 0x47, 0x2b, 0x1b, 0x47, 0x1f, 0x9d, 0x14, 0xe3, 0x1d, 0xa8, 0xeb, 0xe6, 0x44, 0x5f, 0x00, + 0x5a, 0xb5, 0x67, 0x30, 0xcb, 0x48, 0xce, 0x4b, 0xcb, 0x27, 0x46, 0x5b, 0xdd, 0xf4, 0xd9, 0x85, + 0xc3, 0xfe, 0x67, 0xc2, 0x28, 0xbf, 0x0d, 0xf2, 0xe8, 0x8e, 0x3f, 0x28, 0x65, 0x5c, 0xa5, 0x0a, + 0x2f, 0xba, 0xa3, 0x6b, 0x72, 0x9f, 0x5d, 0xd2, 0x04, 0xdd, 0x0f, 0x10, 0x50, 0x1d, 0x0d, 0xd3, + 0x09, 0xcf, 0x9c, 0x1f, 0x3b, 0x93, 0xa8, 0xb8, 0x99, 0x8f, 0xcc, 0x71, 0x3a, 0x6d, 0xf2, 0x8c, + 0xbd, 0xe4, 0x3f, 0x5b, 0x93, 0xf4, 0xe5, 0x84, 0x24, 0xec, 0xaf, 0xa7, 0xb9, 0xe5, 0x2f, 0xec, + 0x1b, 0x71, 0x1c, 0x29, 0xcc, 0xf4, 0xe4, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x59, 0x37, 0x33, + 0x3d, 0x1c, 0x0a, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/cluster_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/cluster_service.pb.go new file mode 100644 index 000000000..821c05e31 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/cluster_service.pb.go @@ -0,0 +1,2712 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/mongodb/v1/cluster_service.proto + +package mongodb // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import config "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/config" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ListClusterLogsRequest_ServiceType int32 + +const ( + ListClusterLogsRequest_SERVICE_TYPE_UNSPECIFIED ListClusterLogsRequest_ServiceType = 0 + // Logs of MongoDB activity. + ListClusterLogsRequest_MONGOD ListClusterLogsRequest_ServiceType = 1 +) + +var ListClusterLogsRequest_ServiceType_name = map[int32]string{ + 0: "SERVICE_TYPE_UNSPECIFIED", + 1: "MONGOD", +} +var ListClusterLogsRequest_ServiceType_value = map[string]int32{ + "SERVICE_TYPE_UNSPECIFIED": 0, + "MONGOD": 1, +} + +func (x ListClusterLogsRequest_ServiceType) String() string { + return proto.EnumName(ListClusterLogsRequest_ServiceType_name, int32(x)) +} +func (ListClusterLogsRequest_ServiceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{18, 0} +} + +type GetClusterRequest struct { + // ID of the MongoDB Cluster resource to return. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterRequest) ProtoMessage() {} +func (*GetClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{0} +} +func (m *GetClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetClusterRequest.Unmarshal(m, b) +} +func (m *GetClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetClusterRequest.Marshal(b, m, deterministic) +} +func (dst *GetClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterRequest.Merge(dst, src) +} +func (m *GetClusterRequest) XXX_Size() int { + return xxx_messageInfo_GetClusterRequest.Size(m) +} +func (m *GetClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterRequest proto.InternalMessageInfo + +func (m *GetClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type ListClustersRequest struct { + // ID of the folder to list MongoDB clusters in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] + // to the [ListClustersResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters resources listed in the response. + // The expression must specify: + // 1. The field name. Currently you can only use filtering with the [Cluster.name] field. + // 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + // 3. The value. Мust be 1-63 characters long and match the regular expression `^[a-zA-Z0-9_-]+$`. + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) } +func (*ListClustersRequest) ProtoMessage() {} +func (*ListClustersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{1} +} +func (m *ListClustersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClustersRequest.Unmarshal(m, b) +} +func (m *ListClustersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClustersRequest.Marshal(b, m, deterministic) +} +func (dst *ListClustersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClustersRequest.Merge(dst, src) +} +func (m *ListClustersRequest) XXX_Size() int { + return xxx_messageInfo_ListClustersRequest.Size(m) +} +func (m *ListClustersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClustersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClustersRequest proto.InternalMessageInfo + +func (m *ListClustersRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListClustersRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClustersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListClustersRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListClustersResponse struct { + // List of MongoDB Cluster resources. + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClustersRequest.page_size], use the [next_page_token] as the value + // for the [ListClustersRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) } +func (*ListClustersResponse) ProtoMessage() {} +func (*ListClustersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{2} +} +func (m *ListClustersResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClustersResponse.Unmarshal(m, b) +} +func (m *ListClustersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClustersResponse.Marshal(b, m, deterministic) +} +func (dst *ListClustersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClustersResponse.Merge(dst, src) +} +func (m *ListClustersResponse) XXX_Size() int { + return xxx_messageInfo_ListClustersResponse.Size(m) +} +func (m *ListClustersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClustersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClustersResponse proto.InternalMessageInfo + +func (m *ListClustersResponse) GetClusters() []*Cluster { + if m != nil { + return m.Clusters + } + return nil +} + +func (m *ListClustersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateClusterRequest struct { + // ID of the folder to create MongoDB cluster in. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Name of the MongoDB cluster. The name must be unique within the folder. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the MongoDB cluster. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the MongoDB cluster as `` key:value `` pairs. Maximum 64 per resource. + // For example, "project": "mvp" or "source": "dictionary". + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Deployment environment of the MongoDB cluster. + Environment Cluster_Environment `protobuf:"varint,5,opt,name=environment,proto3,enum=yandex.cloud.mdb.mongodb.v1.Cluster_Environment" json:"environment,omitempty"` + // Configuration and resources for hosts that should be created for the MongoDB cluster. + ConfigSpec *ConfigSpec `protobuf:"bytes,6,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + // Descriptions of databases to be created in the MongoDB cluster. + DatabaseSpecs []*DatabaseSpec `protobuf:"bytes,7,rep,name=database_specs,json=databaseSpecs,proto3" json:"database_specs,omitempty"` + // Descriptions of database users to be created in the MongoDB cluster. + UserSpecs []*UserSpec `protobuf:"bytes,8,rep,name=user_specs,json=userSpecs,proto3" json:"user_specs,omitempty"` + // Individual configurations for hosts that should be created for the MongoDB cluster. + HostSpecs []*HostSpec `protobuf:"bytes,9,rep,name=host_specs,json=hostSpecs,proto3" json:"host_specs,omitempty"` + // ID of the network to create the cluster in. + NetworkId string `protobuf:"bytes,10,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} } +func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*CreateClusterRequest) ProtoMessage() {} +func (*CreateClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{3} +} +func (m *CreateClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateClusterRequest.Unmarshal(m, b) +} +func (m *CreateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateClusterRequest.Marshal(b, m, deterministic) +} +func (dst *CreateClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateClusterRequest.Merge(dst, src) +} +func (m *CreateClusterRequest) XXX_Size() int { + return xxx_messageInfo_CreateClusterRequest.Size(m) +} +func (m *CreateClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateClusterRequest proto.InternalMessageInfo + +func (m *CreateClusterRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *CreateClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateClusterRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *CreateClusterRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *CreateClusterRequest) GetEnvironment() Cluster_Environment { + if m != nil { + return m.Environment + } + return Cluster_ENVIRONMENT_UNSPECIFIED +} + +func (m *CreateClusterRequest) GetConfigSpec() *ConfigSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +func (m *CreateClusterRequest) GetDatabaseSpecs() []*DatabaseSpec { + if m != nil { + return m.DatabaseSpecs + } + return nil +} + +func (m *CreateClusterRequest) GetUserSpecs() []*UserSpec { + if m != nil { + return m.UserSpecs + } + return nil +} + +func (m *CreateClusterRequest) GetHostSpecs() []*HostSpec { + if m != nil { + return m.HostSpecs + } + return nil +} + +func (m *CreateClusterRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +type CreateClusterMetadata struct { + // ID of the MongoDB cluster that is being created. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateClusterMetadata) Reset() { *m = CreateClusterMetadata{} } +func (m *CreateClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateClusterMetadata) ProtoMessage() {} +func (*CreateClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{4} +} +func (m *CreateClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateClusterMetadata.Unmarshal(m, b) +} +func (m *CreateClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateClusterMetadata.Merge(dst, src) +} +func (m *CreateClusterMetadata) XXX_Size() int { + return xxx_messageInfo_CreateClusterMetadata.Size(m) +} +func (m *CreateClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateClusterMetadata proto.InternalMessageInfo + +func (m *CreateClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type UpdateClusterRequest struct { + // ID of the MongoDB Cluster resource to update. + // To get the MongoDB cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Field mask that specifies which fields of the MongoDB Cluster resource should be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // New description of the MongoDB cluster. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the MongoDB cluster as `` key:value `` pairs. Maximum 64 per resource. + // For example, "project": "mvp" or "source": "dictionary". + // + // The new set of labels will completely replace the old ones. To add a label, request the current + // set with the [ClusterService.Get] method, then send an [ClusterService.Update] request with the new label added to the set. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // New configuration and resources for hosts in the cluster. + ConfigSpec *ConfigSpec `protobuf:"bytes,5,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} } +func (m *UpdateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterRequest) ProtoMessage() {} +func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{5} +} +func (m *UpdateClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateClusterRequest.Unmarshal(m, b) +} +func (m *UpdateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateClusterRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateClusterRequest.Merge(dst, src) +} +func (m *UpdateClusterRequest) XXX_Size() int { + return xxx_messageInfo_UpdateClusterRequest.Size(m) +} +func (m *UpdateClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateClusterRequest proto.InternalMessageInfo + +func (m *UpdateClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateClusterRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateClusterRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *UpdateClusterRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *UpdateClusterRequest) GetConfigSpec() *ConfigSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +type UpdateClusterMetadata struct { + // ID of the MongoDB Cluster resource that is being updated. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateClusterMetadata) Reset() { *m = UpdateClusterMetadata{} } +func (m *UpdateClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterMetadata) ProtoMessage() {} +func (*UpdateClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{6} +} +func (m *UpdateClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateClusterMetadata.Unmarshal(m, b) +} +func (m *UpdateClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateClusterMetadata.Merge(dst, src) +} +func (m *UpdateClusterMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateClusterMetadata.Size(m) +} +func (m *UpdateClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateClusterMetadata proto.InternalMessageInfo + +func (m *UpdateClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type DeleteClusterRequest struct { + // ID of the MongoDB cluster to delete. + // To get the MongoDB cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} } +func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterRequest) ProtoMessage() {} +func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{7} +} +func (m *DeleteClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterRequest.Unmarshal(m, b) +} +func (m *DeleteClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterRequest.Merge(dst, src) +} +func (m *DeleteClusterRequest) XXX_Size() int { + return xxx_messageInfo_DeleteClusterRequest.Size(m) +} +func (m *DeleteClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterRequest proto.InternalMessageInfo + +func (m *DeleteClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type DeleteClusterMetadata struct { + // ID of the MongoDB cluster that is being deleted. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterMetadata) Reset() { *m = DeleteClusterMetadata{} } +func (m *DeleteClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterMetadata) ProtoMessage() {} +func (*DeleteClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{8} +} +func (m *DeleteClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterMetadata.Unmarshal(m, b) +} +func (m *DeleteClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterMetadata.Merge(dst, src) +} +func (m *DeleteClusterMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteClusterMetadata.Size(m) +} +func (m *DeleteClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterMetadata proto.InternalMessageInfo + +func (m *DeleteClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StartClusterRequest struct { + // Required. ID of the MongoDB cluster to start. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartClusterRequest) Reset() { *m = StartClusterRequest{} } +func (m *StartClusterRequest) String() string { return proto.CompactTextString(m) } +func (*StartClusterRequest) ProtoMessage() {} +func (*StartClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{9} +} +func (m *StartClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartClusterRequest.Unmarshal(m, b) +} +func (m *StartClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartClusterRequest.Marshal(b, m, deterministic) +} +func (dst *StartClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartClusterRequest.Merge(dst, src) +} +func (m *StartClusterRequest) XXX_Size() int { + return xxx_messageInfo_StartClusterRequest.Size(m) +} +func (m *StartClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartClusterRequest proto.InternalMessageInfo + +func (m *StartClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StartClusterMetadata struct { + // Required. ID of the MongoDB cluster. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartClusterMetadata) Reset() { *m = StartClusterMetadata{} } +func (m *StartClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*StartClusterMetadata) ProtoMessage() {} +func (*StartClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{10} +} +func (m *StartClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartClusterMetadata.Unmarshal(m, b) +} +func (m *StartClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *StartClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartClusterMetadata.Merge(dst, src) +} +func (m *StartClusterMetadata) XXX_Size() int { + return xxx_messageInfo_StartClusterMetadata.Size(m) +} +func (m *StartClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_StartClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_StartClusterMetadata proto.InternalMessageInfo + +func (m *StartClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StopClusterRequest struct { + // Required. ID of the MongoDB cluster to stop. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopClusterRequest) Reset() { *m = StopClusterRequest{} } +func (m *StopClusterRequest) String() string { return proto.CompactTextString(m) } +func (*StopClusterRequest) ProtoMessage() {} +func (*StopClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{11} +} +func (m *StopClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopClusterRequest.Unmarshal(m, b) +} +func (m *StopClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopClusterRequest.Marshal(b, m, deterministic) +} +func (dst *StopClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopClusterRequest.Merge(dst, src) +} +func (m *StopClusterRequest) XXX_Size() int { + return xxx_messageInfo_StopClusterRequest.Size(m) +} +func (m *StopClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopClusterRequest proto.InternalMessageInfo + +func (m *StopClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StopClusterMetadata struct { + // Required. ID of the MongoDB cluster. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopClusterMetadata) Reset() { *m = StopClusterMetadata{} } +func (m *StopClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*StopClusterMetadata) ProtoMessage() {} +func (*StopClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{12} +} +func (m *StopClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopClusterMetadata.Unmarshal(m, b) +} +func (m *StopClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *StopClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopClusterMetadata.Merge(dst, src) +} +func (m *StopClusterMetadata) XXX_Size() int { + return xxx_messageInfo_StopClusterMetadata.Size(m) +} +func (m *StopClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_StopClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_StopClusterMetadata proto.InternalMessageInfo + +func (m *StopClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type BackupClusterRequest struct { + // ID of the MongoDB cluster to back up. + // To get the MongoDB cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BackupClusterRequest) Reset() { *m = BackupClusterRequest{} } +func (m *BackupClusterRequest) String() string { return proto.CompactTextString(m) } +func (*BackupClusterRequest) ProtoMessage() {} +func (*BackupClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{13} +} +func (m *BackupClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BackupClusterRequest.Unmarshal(m, b) +} +func (m *BackupClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BackupClusterRequest.Marshal(b, m, deterministic) +} +func (dst *BackupClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupClusterRequest.Merge(dst, src) +} +func (m *BackupClusterRequest) XXX_Size() int { + return xxx_messageInfo_BackupClusterRequest.Size(m) +} +func (m *BackupClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BackupClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupClusterRequest proto.InternalMessageInfo + +func (m *BackupClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type BackupClusterMetadata struct { + // ID of the MongoDB cluster that is being backed up. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BackupClusterMetadata) Reset() { *m = BackupClusterMetadata{} } +func (m *BackupClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*BackupClusterMetadata) ProtoMessage() {} +func (*BackupClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{14} +} +func (m *BackupClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BackupClusterMetadata.Unmarshal(m, b) +} +func (m *BackupClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BackupClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *BackupClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupClusterMetadata.Merge(dst, src) +} +func (m *BackupClusterMetadata) XXX_Size() int { + return xxx_messageInfo_BackupClusterMetadata.Size(m) +} +func (m *BackupClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_BackupClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupClusterMetadata proto.InternalMessageInfo + +func (m *BackupClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type RestoreClusterRequest struct { + // ID of the backup to create a cluster from. + // To get the backup ID, use a [ClusterService.ListBackups] request. + BackupId string `protobuf:"bytes,1,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"` + // Name of the new MongoDB cluster. The name must be unique within the folder. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the new MongoDB cluster. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the MongoDB cluster as `` key:value `` pairs. Maximum 64 per resource. + // For example, "project": "mvp" or "source": "dictionary". + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Deployment environment of the new MongoDB cluster. + Environment Cluster_Environment `protobuf:"varint,5,opt,name=environment,proto3,enum=yandex.cloud.mdb.mongodb.v1.Cluster_Environment" json:"environment,omitempty"` + // Configuration for the MongoDB cluster to be created. + ConfigSpec *ConfigSpec `protobuf:"bytes,6,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + // Configurations for MongoDB hosts that should be created for + // the cluster that is being created from the backup. + HostSpecs []*HostSpec `protobuf:"bytes,7,rep,name=host_specs,json=hostSpecs,proto3" json:"host_specs,omitempty"` + // Required. ID of the network to create the MongoDB cluster in. + NetworkId string `protobuf:"bytes,8,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestoreClusterRequest) Reset() { *m = RestoreClusterRequest{} } +func (m *RestoreClusterRequest) String() string { return proto.CompactTextString(m) } +func (*RestoreClusterRequest) ProtoMessage() {} +func (*RestoreClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{15} +} +func (m *RestoreClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestoreClusterRequest.Unmarshal(m, b) +} +func (m *RestoreClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestoreClusterRequest.Marshal(b, m, deterministic) +} +func (dst *RestoreClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestoreClusterRequest.Merge(dst, src) +} +func (m *RestoreClusterRequest) XXX_Size() int { + return xxx_messageInfo_RestoreClusterRequest.Size(m) +} +func (m *RestoreClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RestoreClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RestoreClusterRequest proto.InternalMessageInfo + +func (m *RestoreClusterRequest) GetBackupId() string { + if m != nil { + return m.BackupId + } + return "" +} + +func (m *RestoreClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *RestoreClusterRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *RestoreClusterRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *RestoreClusterRequest) GetEnvironment() Cluster_Environment { + if m != nil { + return m.Environment + } + return Cluster_ENVIRONMENT_UNSPECIFIED +} + +func (m *RestoreClusterRequest) GetConfigSpec() *ConfigSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +func (m *RestoreClusterRequest) GetHostSpecs() []*HostSpec { + if m != nil { + return m.HostSpecs + } + return nil +} + +func (m *RestoreClusterRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +type RestoreClusterMetadata struct { + // ID of the new MongoDB cluster that is being created from a backup. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // ID of the backup that is being used for creating a cluster. + BackupId string `protobuf:"bytes,2,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestoreClusterMetadata) Reset() { *m = RestoreClusterMetadata{} } +func (m *RestoreClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*RestoreClusterMetadata) ProtoMessage() {} +func (*RestoreClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{16} +} +func (m *RestoreClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestoreClusterMetadata.Unmarshal(m, b) +} +func (m *RestoreClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestoreClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *RestoreClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestoreClusterMetadata.Merge(dst, src) +} +func (m *RestoreClusterMetadata) XXX_Size() int { + return xxx_messageInfo_RestoreClusterMetadata.Size(m) +} +func (m *RestoreClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_RestoreClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_RestoreClusterMetadata proto.InternalMessageInfo + +func (m *RestoreClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *RestoreClusterMetadata) GetBackupId() string { + if m != nil { + return m.BackupId + } + return "" +} + +type LogRecord struct { + // Log record timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Contents of the log record. + Message map[string]string `protobuf:"bytes,2,rep,name=message,proto3" json:"message,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogRecord) Reset() { *m = LogRecord{} } +func (m *LogRecord) String() string { return proto.CompactTextString(m) } +func (*LogRecord) ProtoMessage() {} +func (*LogRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{17} +} +func (m *LogRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogRecord.Unmarshal(m, b) +} +func (m *LogRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogRecord.Marshal(b, m, deterministic) +} +func (dst *LogRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogRecord.Merge(dst, src) +} +func (m *LogRecord) XXX_Size() int { + return xxx_messageInfo_LogRecord.Size(m) +} +func (m *LogRecord) XXX_DiscardUnknown() { + xxx_messageInfo_LogRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_LogRecord proto.InternalMessageInfo + +func (m *LogRecord) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *LogRecord) GetMessage() map[string]string { + if m != nil { + return m.Message + } + return nil +} + +type ListClusterLogsRequest struct { + // Required. ID of the MongoDB cluster to request logs for. + // To get the MongoDB cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Columns from the logs table to request. + // If no columns are specified, entire log records are returned. + ColumnFilter []string `protobuf:"bytes,2,rep,name=column_filter,json=columnFilter,proto3" json:"column_filter,omitempty"` + // Type of the service to request logs about. + ServiceType ListClusterLogsRequest_ServiceType `protobuf:"varint,3,opt,name=service_type,json=serviceType,proto3,enum=yandex.cloud.mdb.mongodb.v1.ListClusterLogsRequest_ServiceType" json:"service_type,omitempty"` + // Start timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + FromTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=from_time,json=fromTime,proto3" json:"from_time,omitempty"` + // End timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + ToTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=to_time,json=toTime,proto3" json:"to_time,omitempty"` + PageSize int64 `protobuf:"varint,6,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListClusterLogsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,7,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterLogsRequest) Reset() { *m = ListClusterLogsRequest{} } +func (m *ListClusterLogsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterLogsRequest) ProtoMessage() {} +func (*ListClusterLogsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{18} +} +func (m *ListClusterLogsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterLogsRequest.Unmarshal(m, b) +} +func (m *ListClusterLogsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterLogsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterLogsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterLogsRequest.Merge(dst, src) +} +func (m *ListClusterLogsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterLogsRequest.Size(m) +} +func (m *ListClusterLogsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterLogsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterLogsRequest proto.InternalMessageInfo + +func (m *ListClusterLogsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterLogsRequest) GetColumnFilter() []string { + if m != nil { + return m.ColumnFilter + } + return nil +} + +func (m *ListClusterLogsRequest) GetServiceType() ListClusterLogsRequest_ServiceType { + if m != nil { + return m.ServiceType + } + return ListClusterLogsRequest_SERVICE_TYPE_UNSPECIFIED +} + +func (m *ListClusterLogsRequest) GetFromTime() *timestamp.Timestamp { + if m != nil { + return m.FromTime + } + return nil +} + +func (m *ListClusterLogsRequest) GetToTime() *timestamp.Timestamp { + if m != nil { + return m.ToTime + } + return nil +} + +func (m *ListClusterLogsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterLogsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterLogsResponse struct { + // Requested log records. + Logs []*LogRecord `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterLogsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterLogsResponse) Reset() { *m = ListClusterLogsResponse{} } +func (m *ListClusterLogsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterLogsResponse) ProtoMessage() {} +func (*ListClusterLogsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{19} +} +func (m *ListClusterLogsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterLogsResponse.Unmarshal(m, b) +} +func (m *ListClusterLogsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterLogsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterLogsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterLogsResponse.Merge(dst, src) +} +func (m *ListClusterLogsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterLogsResponse.Size(m) +} +func (m *ListClusterLogsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterLogsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterLogsResponse proto.InternalMessageInfo + +func (m *ListClusterLogsResponse) GetLogs() []*LogRecord { + if m != nil { + return m.Logs + } + return nil +} + +func (m *ListClusterLogsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ListClusterOperationsRequest struct { + // Required. ID of the MongoDB Cluster resource to list operations for. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListClusterOperationsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterOperationsRequest) Reset() { *m = ListClusterOperationsRequest{} } +func (m *ListClusterOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterOperationsRequest) ProtoMessage() {} +func (*ListClusterOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{20} +} +func (m *ListClusterOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterOperationsRequest.Unmarshal(m, b) +} +func (m *ListClusterOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterOperationsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterOperationsRequest.Merge(dst, src) +} +func (m *ListClusterOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterOperationsRequest.Size(m) +} +func (m *ListClusterOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterOperationsRequest proto.InternalMessageInfo + +func (m *ListClusterOperationsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterOperationsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterOperationsResponse struct { + // List of Operation resources for the specified MongoDB cluster. + Operations []*operation.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterOperationsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterOperationsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterOperationsResponse) Reset() { *m = ListClusterOperationsResponse{} } +func (m *ListClusterOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterOperationsResponse) ProtoMessage() {} +func (*ListClusterOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{21} +} +func (m *ListClusterOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterOperationsResponse.Unmarshal(m, b) +} +func (m *ListClusterOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterOperationsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterOperationsResponse.Merge(dst, src) +} +func (m *ListClusterOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterOperationsResponse.Size(m) +} +func (m *ListClusterOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterOperationsResponse proto.InternalMessageInfo + +func (m *ListClusterOperationsResponse) GetOperations() []*operation.Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListClusterOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ListClusterBackupsRequest struct { + // Required. ID of the MongoDB cluster. + // To get the MongoDB cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListClusterBackupsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterBackupsRequest) Reset() { *m = ListClusterBackupsRequest{} } +func (m *ListClusterBackupsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterBackupsRequest) ProtoMessage() {} +func (*ListClusterBackupsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{22} +} +func (m *ListClusterBackupsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterBackupsRequest.Unmarshal(m, b) +} +func (m *ListClusterBackupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterBackupsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterBackupsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterBackupsRequest.Merge(dst, src) +} +func (m *ListClusterBackupsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterBackupsRequest.Size(m) +} +func (m *ListClusterBackupsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterBackupsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterBackupsRequest proto.InternalMessageInfo + +func (m *ListClusterBackupsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterBackupsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterBackupsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterBackupsResponse struct { + // List of MongoDB Backup resources. + Backups []*Backup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterBackupsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterBackupsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterBackupsResponse) Reset() { *m = ListClusterBackupsResponse{} } +func (m *ListClusterBackupsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterBackupsResponse) ProtoMessage() {} +func (*ListClusterBackupsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{23} +} +func (m *ListClusterBackupsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterBackupsResponse.Unmarshal(m, b) +} +func (m *ListClusterBackupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterBackupsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterBackupsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterBackupsResponse.Merge(dst, src) +} +func (m *ListClusterBackupsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterBackupsResponse.Size(m) +} +func (m *ListClusterBackupsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterBackupsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterBackupsResponse proto.InternalMessageInfo + +func (m *ListClusterBackupsResponse) GetBackups() []*Backup { + if m != nil { + return m.Backups + } + return nil +} + +func (m *ListClusterBackupsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ListClusterHostsRequest struct { + // Required. ID of the MongoDB cluster. + // To get the MongoDB cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListClusterHostsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterHostsRequest) Reset() { *m = ListClusterHostsRequest{} } +func (m *ListClusterHostsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterHostsRequest) ProtoMessage() {} +func (*ListClusterHostsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{24} +} +func (m *ListClusterHostsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterHostsRequest.Unmarshal(m, b) +} +func (m *ListClusterHostsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterHostsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterHostsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterHostsRequest.Merge(dst, src) +} +func (m *ListClusterHostsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterHostsRequest.Size(m) +} +func (m *ListClusterHostsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterHostsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterHostsRequest proto.InternalMessageInfo + +func (m *ListClusterHostsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterHostsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterHostsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterHostsResponse struct { + // List of Host resources. + Hosts []*Host `protobuf:"bytes,1,rep,name=hosts,proto3" json:"hosts,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterHostsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterHostsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterHostsResponse) Reset() { *m = ListClusterHostsResponse{} } +func (m *ListClusterHostsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterHostsResponse) ProtoMessage() {} +func (*ListClusterHostsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{25} +} +func (m *ListClusterHostsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterHostsResponse.Unmarshal(m, b) +} +func (m *ListClusterHostsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterHostsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterHostsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterHostsResponse.Merge(dst, src) +} +func (m *ListClusterHostsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterHostsResponse.Size(m) +} +func (m *ListClusterHostsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterHostsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterHostsResponse proto.InternalMessageInfo + +func (m *ListClusterHostsResponse) GetHosts() []*Host { + if m != nil { + return m.Hosts + } + return nil +} + +func (m *ListClusterHostsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type AddClusterHostsRequest struct { + // Required. ID of the MongoDB cluster to add hosts to. + // To get the MongoDB cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Configurations for MongoDB hosts that should be added to the cluster. + HostSpecs []*HostSpec `protobuf:"bytes,2,rep,name=host_specs,json=hostSpecs,proto3" json:"host_specs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddClusterHostsRequest) Reset() { *m = AddClusterHostsRequest{} } +func (m *AddClusterHostsRequest) String() string { return proto.CompactTextString(m) } +func (*AddClusterHostsRequest) ProtoMessage() {} +func (*AddClusterHostsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{26} +} +func (m *AddClusterHostsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddClusterHostsRequest.Unmarshal(m, b) +} +func (m *AddClusterHostsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddClusterHostsRequest.Marshal(b, m, deterministic) +} +func (dst *AddClusterHostsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddClusterHostsRequest.Merge(dst, src) +} +func (m *AddClusterHostsRequest) XXX_Size() int { + return xxx_messageInfo_AddClusterHostsRequest.Size(m) +} +func (m *AddClusterHostsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AddClusterHostsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AddClusterHostsRequest proto.InternalMessageInfo + +func (m *AddClusterHostsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *AddClusterHostsRequest) GetHostSpecs() []*HostSpec { + if m != nil { + return m.HostSpecs + } + return nil +} + +type AddClusterHostsMetadata struct { + // ID of the MongoDB cluster to which the hosts are being added. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Names of hosts that are being added to the cluster. + HostNames []string `protobuf:"bytes,2,rep,name=host_names,json=hostNames,proto3" json:"host_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddClusterHostsMetadata) Reset() { *m = AddClusterHostsMetadata{} } +func (m *AddClusterHostsMetadata) String() string { return proto.CompactTextString(m) } +func (*AddClusterHostsMetadata) ProtoMessage() {} +func (*AddClusterHostsMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{27} +} +func (m *AddClusterHostsMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddClusterHostsMetadata.Unmarshal(m, b) +} +func (m *AddClusterHostsMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddClusterHostsMetadata.Marshal(b, m, deterministic) +} +func (dst *AddClusterHostsMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddClusterHostsMetadata.Merge(dst, src) +} +func (m *AddClusterHostsMetadata) XXX_Size() int { + return xxx_messageInfo_AddClusterHostsMetadata.Size(m) +} +func (m *AddClusterHostsMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_AddClusterHostsMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_AddClusterHostsMetadata proto.InternalMessageInfo + +func (m *AddClusterHostsMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *AddClusterHostsMetadata) GetHostNames() []string { + if m != nil { + return m.HostNames + } + return nil +} + +type DeleteClusterHostsRequest struct { + // Required. ID of the MongoDB cluster to remove hosts from. + // To get the MongoDB cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Names of hosts to delete. + HostNames []string `protobuf:"bytes,2,rep,name=host_names,json=hostNames,proto3" json:"host_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterHostsRequest) Reset() { *m = DeleteClusterHostsRequest{} } +func (m *DeleteClusterHostsRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterHostsRequest) ProtoMessage() {} +func (*DeleteClusterHostsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{28} +} +func (m *DeleteClusterHostsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterHostsRequest.Unmarshal(m, b) +} +func (m *DeleteClusterHostsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterHostsRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterHostsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterHostsRequest.Merge(dst, src) +} +func (m *DeleteClusterHostsRequest) XXX_Size() int { + return xxx_messageInfo_DeleteClusterHostsRequest.Size(m) +} +func (m *DeleteClusterHostsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterHostsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterHostsRequest proto.InternalMessageInfo + +func (m *DeleteClusterHostsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteClusterHostsRequest) GetHostNames() []string { + if m != nil { + return m.HostNames + } + return nil +} + +type DeleteClusterHostsMetadata struct { + // ID of the MongoDB cluster to remove hosts from. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Names of hosts that are being deleted. + HostNames []string `protobuf:"bytes,2,rep,name=host_names,json=hostNames,proto3" json:"host_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterHostsMetadata) Reset() { *m = DeleteClusterHostsMetadata{} } +func (m *DeleteClusterHostsMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterHostsMetadata) ProtoMessage() {} +func (*DeleteClusterHostsMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{29} +} +func (m *DeleteClusterHostsMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterHostsMetadata.Unmarshal(m, b) +} +func (m *DeleteClusterHostsMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterHostsMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterHostsMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterHostsMetadata.Merge(dst, src) +} +func (m *DeleteClusterHostsMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteClusterHostsMetadata.Size(m) +} +func (m *DeleteClusterHostsMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterHostsMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterHostsMetadata proto.InternalMessageInfo + +func (m *DeleteClusterHostsMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteClusterHostsMetadata) GetHostNames() []string { + if m != nil { + return m.HostNames + } + return nil +} + +type HostSpec struct { + // ID of the availability zone where the host resides. + // To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request. + ZoneId string `protobuf:"bytes,1,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + // ID of the subnet that the host should belong to. This subnet should be a part + // of the network that the cluster belongs to. + // The network ID is set in the [Cluster.network_id] field. + SubnetId string `protobuf:"bytes,2,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + // Whether the host should get a public IP address on creation. + // + // After a host has been created, this setting cannot be changed. To remove an assigned public IP, or to assign + // a public IP to a host without one, recreate the host with [assign_public_ip] set as needed. + // + // Possible values: + // * false — don't assign a public IP to the host. + // * true — the host should have a public IP address. + AssignPublicIp bool `protobuf:"varint,3,opt,name=assign_public_ip,json=assignPublicIp,proto3" json:"assign_public_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HostSpec) Reset() { *m = HostSpec{} } +func (m *HostSpec) String() string { return proto.CompactTextString(m) } +func (*HostSpec) ProtoMessage() {} +func (*HostSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{30} +} +func (m *HostSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HostSpec.Unmarshal(m, b) +} +func (m *HostSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HostSpec.Marshal(b, m, deterministic) +} +func (dst *HostSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostSpec.Merge(dst, src) +} +func (m *HostSpec) XXX_Size() int { + return xxx_messageInfo_HostSpec.Size(m) +} +func (m *HostSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HostSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HostSpec proto.InternalMessageInfo + +func (m *HostSpec) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func (m *HostSpec) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +func (m *HostSpec) GetAssignPublicIp() bool { + if m != nil { + return m.AssignPublicIp + } + return false +} + +type MongodbSpec3_6 struct { + // Configuration and resource allocation for a mongod host. + Mongod *MongodbSpec3_6_Mongod `protobuf:"bytes,1,opt,name=mongod,proto3" json:"mongod,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MongodbSpec3_6) Reset() { *m = MongodbSpec3_6{} } +func (m *MongodbSpec3_6) String() string { return proto.CompactTextString(m) } +func (*MongodbSpec3_6) ProtoMessage() {} +func (*MongodbSpec3_6) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{31} +} +func (m *MongodbSpec3_6) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MongodbSpec3_6.Unmarshal(m, b) +} +func (m *MongodbSpec3_6) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MongodbSpec3_6.Marshal(b, m, deterministic) +} +func (dst *MongodbSpec3_6) XXX_Merge(src proto.Message) { + xxx_messageInfo_MongodbSpec3_6.Merge(dst, src) +} +func (m *MongodbSpec3_6) XXX_Size() int { + return xxx_messageInfo_MongodbSpec3_6.Size(m) +} +func (m *MongodbSpec3_6) XXX_DiscardUnknown() { + xxx_messageInfo_MongodbSpec3_6.DiscardUnknown(m) +} + +var xxx_messageInfo_MongodbSpec3_6 proto.InternalMessageInfo + +func (m *MongodbSpec3_6) GetMongod() *MongodbSpec3_6_Mongod { + if m != nil { + return m.Mongod + } + return nil +} + +type MongodbSpec3_6_Mongod struct { + // Configuration for a mongod 3.6 host. + Config *config.MongodConfig3_6 `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + // Resources allocated to mongod hosts. + Resources *Resources `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MongodbSpec3_6_Mongod) Reset() { *m = MongodbSpec3_6_Mongod{} } +func (m *MongodbSpec3_6_Mongod) String() string { return proto.CompactTextString(m) } +func (*MongodbSpec3_6_Mongod) ProtoMessage() {} +func (*MongodbSpec3_6_Mongod) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{31, 0} +} +func (m *MongodbSpec3_6_Mongod) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MongodbSpec3_6_Mongod.Unmarshal(m, b) +} +func (m *MongodbSpec3_6_Mongod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MongodbSpec3_6_Mongod.Marshal(b, m, deterministic) +} +func (dst *MongodbSpec3_6_Mongod) XXX_Merge(src proto.Message) { + xxx_messageInfo_MongodbSpec3_6_Mongod.Merge(dst, src) +} +func (m *MongodbSpec3_6_Mongod) XXX_Size() int { + return xxx_messageInfo_MongodbSpec3_6_Mongod.Size(m) +} +func (m *MongodbSpec3_6_Mongod) XXX_DiscardUnknown() { + xxx_messageInfo_MongodbSpec3_6_Mongod.DiscardUnknown(m) +} + +var xxx_messageInfo_MongodbSpec3_6_Mongod proto.InternalMessageInfo + +func (m *MongodbSpec3_6_Mongod) GetConfig() *config.MongodConfig3_6 { + if m != nil { + return m.Config + } + return nil +} + +func (m *MongodbSpec3_6_Mongod) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +type ConfigSpec struct { + // Version of MongoDB used in the cluster. + // The only valid value: 3.6 + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Types that are valid to be assigned to MongodbSpec: + // *ConfigSpec_MongodbSpec_3_6 + MongodbSpec isConfigSpec_MongodbSpec `protobuf_oneof:"mongodb_spec"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigSpec) Reset() { *m = ConfigSpec{} } +func (m *ConfigSpec) String() string { return proto.CompactTextString(m) } +func (*ConfigSpec) ProtoMessage() {} +func (*ConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_102bf0875d7e349f, []int{32} +} +func (m *ConfigSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigSpec.Unmarshal(m, b) +} +func (m *ConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigSpec.Marshal(b, m, deterministic) +} +func (dst *ConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigSpec.Merge(dst, src) +} +func (m *ConfigSpec) XXX_Size() int { + return xxx_messageInfo_ConfigSpec.Size(m) +} +func (m *ConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigSpec proto.InternalMessageInfo + +func (m *ConfigSpec) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type isConfigSpec_MongodbSpec interface { + isConfigSpec_MongodbSpec() +} + +type ConfigSpec_MongodbSpec_3_6 struct { + MongodbSpec_3_6 *MongodbSpec3_6 `protobuf:"bytes,2,opt,name=mongodb_spec_3_6,json=mongodbSpec36,proto3,oneof"` +} + +func (*ConfigSpec_MongodbSpec_3_6) isConfigSpec_MongodbSpec() {} + +func (m *ConfigSpec) GetMongodbSpec() isConfigSpec_MongodbSpec { + if m != nil { + return m.MongodbSpec + } + return nil +} + +func (m *ConfigSpec) GetMongodbSpec_3_6() *MongodbSpec3_6 { + if x, ok := m.GetMongodbSpec().(*ConfigSpec_MongodbSpec_3_6); ok { + return x.MongodbSpec_3_6 + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ConfigSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ConfigSpec_OneofMarshaler, _ConfigSpec_OneofUnmarshaler, _ConfigSpec_OneofSizer, []interface{}{ + (*ConfigSpec_MongodbSpec_3_6)(nil), + } +} + +func _ConfigSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ConfigSpec) + // mongodb_spec + switch x := m.MongodbSpec.(type) { + case *ConfigSpec_MongodbSpec_3_6: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MongodbSpec_3_6); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ConfigSpec.MongodbSpec has unexpected type %T", x) + } + return nil +} + +func _ConfigSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ConfigSpec) + switch tag { + case 2: // mongodb_spec.mongodb_spec_3_6 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(MongodbSpec3_6) + err := b.DecodeMessage(msg) + m.MongodbSpec = &ConfigSpec_MongodbSpec_3_6{msg} + return true, err + default: + return false, nil + } +} + +func _ConfigSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ConfigSpec) + // mongodb_spec + switch x := m.MongodbSpec.(type) { + case *ConfigSpec_MongodbSpec_3_6: + s := proto.Size(x.MongodbSpec_3_6) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*GetClusterRequest)(nil), "yandex.cloud.mdb.mongodb.v1.GetClusterRequest") + proto.RegisterType((*ListClustersRequest)(nil), "yandex.cloud.mdb.mongodb.v1.ListClustersRequest") + proto.RegisterType((*ListClustersResponse)(nil), "yandex.cloud.mdb.mongodb.v1.ListClustersResponse") + proto.RegisterType((*CreateClusterRequest)(nil), "yandex.cloud.mdb.mongodb.v1.CreateClusterRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.mongodb.v1.CreateClusterRequest.LabelsEntry") + proto.RegisterType((*CreateClusterMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.CreateClusterMetadata") + proto.RegisterType((*UpdateClusterRequest)(nil), "yandex.cloud.mdb.mongodb.v1.UpdateClusterRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.mongodb.v1.UpdateClusterRequest.LabelsEntry") + proto.RegisterType((*UpdateClusterMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.UpdateClusterMetadata") + proto.RegisterType((*DeleteClusterRequest)(nil), "yandex.cloud.mdb.mongodb.v1.DeleteClusterRequest") + proto.RegisterType((*DeleteClusterMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.DeleteClusterMetadata") + proto.RegisterType((*StartClusterRequest)(nil), "yandex.cloud.mdb.mongodb.v1.StartClusterRequest") + proto.RegisterType((*StartClusterMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.StartClusterMetadata") + proto.RegisterType((*StopClusterRequest)(nil), "yandex.cloud.mdb.mongodb.v1.StopClusterRequest") + proto.RegisterType((*StopClusterMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.StopClusterMetadata") + proto.RegisterType((*BackupClusterRequest)(nil), "yandex.cloud.mdb.mongodb.v1.BackupClusterRequest") + proto.RegisterType((*BackupClusterMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.BackupClusterMetadata") + proto.RegisterType((*RestoreClusterRequest)(nil), "yandex.cloud.mdb.mongodb.v1.RestoreClusterRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.mongodb.v1.RestoreClusterRequest.LabelsEntry") + proto.RegisterType((*RestoreClusterMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.RestoreClusterMetadata") + proto.RegisterType((*LogRecord)(nil), "yandex.cloud.mdb.mongodb.v1.LogRecord") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.mongodb.v1.LogRecord.MessageEntry") + proto.RegisterType((*ListClusterLogsRequest)(nil), "yandex.cloud.mdb.mongodb.v1.ListClusterLogsRequest") + proto.RegisterType((*ListClusterLogsResponse)(nil), "yandex.cloud.mdb.mongodb.v1.ListClusterLogsResponse") + proto.RegisterType((*ListClusterOperationsRequest)(nil), "yandex.cloud.mdb.mongodb.v1.ListClusterOperationsRequest") + proto.RegisterType((*ListClusterOperationsResponse)(nil), "yandex.cloud.mdb.mongodb.v1.ListClusterOperationsResponse") + proto.RegisterType((*ListClusterBackupsRequest)(nil), "yandex.cloud.mdb.mongodb.v1.ListClusterBackupsRequest") + proto.RegisterType((*ListClusterBackupsResponse)(nil), "yandex.cloud.mdb.mongodb.v1.ListClusterBackupsResponse") + proto.RegisterType((*ListClusterHostsRequest)(nil), "yandex.cloud.mdb.mongodb.v1.ListClusterHostsRequest") + proto.RegisterType((*ListClusterHostsResponse)(nil), "yandex.cloud.mdb.mongodb.v1.ListClusterHostsResponse") + proto.RegisterType((*AddClusterHostsRequest)(nil), "yandex.cloud.mdb.mongodb.v1.AddClusterHostsRequest") + proto.RegisterType((*AddClusterHostsMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.AddClusterHostsMetadata") + proto.RegisterType((*DeleteClusterHostsRequest)(nil), "yandex.cloud.mdb.mongodb.v1.DeleteClusterHostsRequest") + proto.RegisterType((*DeleteClusterHostsMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.DeleteClusterHostsMetadata") + proto.RegisterType((*HostSpec)(nil), "yandex.cloud.mdb.mongodb.v1.HostSpec") + proto.RegisterType((*MongodbSpec3_6)(nil), "yandex.cloud.mdb.mongodb.v1.MongodbSpec3_6") + proto.RegisterType((*MongodbSpec3_6_Mongod)(nil), "yandex.cloud.mdb.mongodb.v1.MongodbSpec3_6.Mongod") + proto.RegisterType((*ConfigSpec)(nil), "yandex.cloud.mdb.mongodb.v1.ConfigSpec") + proto.RegisterEnum("yandex.cloud.mdb.mongodb.v1.ListClusterLogsRequest_ServiceType", ListClusterLogsRequest_ServiceType_name, ListClusterLogsRequest_ServiceType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ClusterServiceClient is the client API for ClusterService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ClusterServiceClient interface { + // Returns the specified MongoDB Cluster resource. + // + // To get the list of available MongoDB Cluster resources, make a [List] request. + Get(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) + // Retrieves the list of MongoDB Cluster resources that belong + // to the specified folder. + List(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + // Creates a MongoDB cluster in the specified folder. + Create(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified MongoDB cluster. + Update(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified MongoDB cluster. + Delete(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Start the specified MongoDB cluster. + Start(ctx context.Context, in *StartClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Stop the specified MongoDB cluster. + Stop(ctx context.Context, in *StopClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Creates a backup for the specified MongoDB cluster. + Backup(ctx context.Context, in *BackupClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Creates a new MongoDB cluster using the specified backup. + Restore(ctx context.Context, in *RestoreClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Retrieves logs for the specified MongoDB cluster. + // See the [Logs](/docs/yandex-mdb-guide/concepts/logs) section in the developers guide for detailed logs description. + ListLogs(ctx context.Context, in *ListClusterLogsRequest, opts ...grpc.CallOption) (*ListClusterLogsResponse, error) + // Retrieves the list of Operation resources for the specified cluster. + ListOperations(ctx context.Context, in *ListClusterOperationsRequest, opts ...grpc.CallOption) (*ListClusterOperationsResponse, error) + // Retrieves the list of available backups for the specified MongoDB cluster. + ListBackups(ctx context.Context, in *ListClusterBackupsRequest, opts ...grpc.CallOption) (*ListClusterBackupsResponse, error) + // Retrieves a list of hosts for the specified cluster. + ListHosts(ctx context.Context, in *ListClusterHostsRequest, opts ...grpc.CallOption) (*ListClusterHostsResponse, error) + // Creates new hosts for a cluster. + AddHosts(ctx context.Context, in *AddClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified hosts for a cluster. + DeleteHosts(ctx context.Context, in *DeleteClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) +} + +type clusterServiceClient struct { + cc *grpc.ClientConn +} + +func NewClusterServiceClient(cc *grpc.ClientConn) ClusterServiceClient { + return &clusterServiceClient{cc} +} + +func (c *clusterServiceClient) Get(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) { + out := new(Cluster) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) List(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Create(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Update(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Delete(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Start(ctx context.Context, in *StartClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/Start", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Stop(ctx context.Context, in *StopClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Backup(ctx context.Context, in *BackupClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/Backup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Restore(ctx context.Context, in *RestoreClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/Restore", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListLogs(ctx context.Context, in *ListClusterLogsRequest, opts ...grpc.CallOption) (*ListClusterLogsResponse, error) { + out := new(ListClusterLogsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/ListLogs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListOperations(ctx context.Context, in *ListClusterOperationsRequest, opts ...grpc.CallOption) (*ListClusterOperationsResponse, error) { + out := new(ListClusterOperationsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListBackups(ctx context.Context, in *ListClusterBackupsRequest, opts ...grpc.CallOption) (*ListClusterBackupsResponse, error) { + out := new(ListClusterBackupsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/ListBackups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListHosts(ctx context.Context, in *ListClusterHostsRequest, opts ...grpc.CallOption) (*ListClusterHostsResponse, error) { + out := new(ListClusterHostsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/ListHosts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) AddHosts(ctx context.Context, in *AddClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/AddHosts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) DeleteHosts(ctx context.Context, in *DeleteClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ClusterService/DeleteHosts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ClusterServiceServer is the server API for ClusterService service. +type ClusterServiceServer interface { + // Returns the specified MongoDB Cluster resource. + // + // To get the list of available MongoDB Cluster resources, make a [List] request. + Get(context.Context, *GetClusterRequest) (*Cluster, error) + // Retrieves the list of MongoDB Cluster resources that belong + // to the specified folder. + List(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + // Creates a MongoDB cluster in the specified folder. + Create(context.Context, *CreateClusterRequest) (*operation.Operation, error) + // Updates the specified MongoDB cluster. + Update(context.Context, *UpdateClusterRequest) (*operation.Operation, error) + // Deletes the specified MongoDB cluster. + Delete(context.Context, *DeleteClusterRequest) (*operation.Operation, error) + // Start the specified MongoDB cluster. + Start(context.Context, *StartClusterRequest) (*operation.Operation, error) + // Stop the specified MongoDB cluster. + Stop(context.Context, *StopClusterRequest) (*operation.Operation, error) + // Creates a backup for the specified MongoDB cluster. + Backup(context.Context, *BackupClusterRequest) (*operation.Operation, error) + // Creates a new MongoDB cluster using the specified backup. + Restore(context.Context, *RestoreClusterRequest) (*operation.Operation, error) + // Retrieves logs for the specified MongoDB cluster. + // See the [Logs](/docs/yandex-mdb-guide/concepts/logs) section in the developers guide for detailed logs description. + ListLogs(context.Context, *ListClusterLogsRequest) (*ListClusterLogsResponse, error) + // Retrieves the list of Operation resources for the specified cluster. + ListOperations(context.Context, *ListClusterOperationsRequest) (*ListClusterOperationsResponse, error) + // Retrieves the list of available backups for the specified MongoDB cluster. + ListBackups(context.Context, *ListClusterBackupsRequest) (*ListClusterBackupsResponse, error) + // Retrieves a list of hosts for the specified cluster. + ListHosts(context.Context, *ListClusterHostsRequest) (*ListClusterHostsResponse, error) + // Creates new hosts for a cluster. + AddHosts(context.Context, *AddClusterHostsRequest) (*operation.Operation, error) + // Deletes the specified hosts for a cluster. + DeleteHosts(context.Context, *DeleteClusterHostsRequest) (*operation.Operation, error) +} + +func RegisterClusterServiceServer(s *grpc.Server, srv ClusterServiceServer) { + s.RegisterService(&_ClusterService_serviceDesc, srv) +} + +func _ClusterService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Get(ctx, req.(*GetClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).List(ctx, req.(*ListClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Create(ctx, req.(*CreateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Update(ctx, req.(*UpdateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Delete(ctx, req.(*DeleteClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Start(ctx, req.(*StartClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Stop(ctx, req.(*StopClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Backup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BackupClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Backup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/Backup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Backup(ctx, req.(*BackupClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Restore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestoreClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Restore(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/Restore", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Restore(ctx, req.(*RestoreClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListLogs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterLogsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListLogs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/ListLogs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListLogs(ctx, req.(*ListClusterLogsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListOperations(ctx, req.(*ListClusterOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListBackups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterBackupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListBackups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/ListBackups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListBackups(ctx, req.(*ListClusterBackupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterHostsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/ListHosts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListHosts(ctx, req.(*ListClusterHostsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_AddHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddClusterHostsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).AddHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/AddHosts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).AddHosts(ctx, req.(*AddClusterHostsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_DeleteHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterHostsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).DeleteHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ClusterService/DeleteHosts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).DeleteHosts(ctx, req.(*DeleteClusterHostsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ClusterService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.mongodb.v1.ClusterService", + HandlerType: (*ClusterServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _ClusterService_Get_Handler, + }, + { + MethodName: "List", + Handler: _ClusterService_List_Handler, + }, + { + MethodName: "Create", + Handler: _ClusterService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _ClusterService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _ClusterService_Delete_Handler, + }, + { + MethodName: "Start", + Handler: _ClusterService_Start_Handler, + }, + { + MethodName: "Stop", + Handler: _ClusterService_Stop_Handler, + }, + { + MethodName: "Backup", + Handler: _ClusterService_Backup_Handler, + }, + { + MethodName: "Restore", + Handler: _ClusterService_Restore_Handler, + }, + { + MethodName: "ListLogs", + Handler: _ClusterService_ListLogs_Handler, + }, + { + MethodName: "ListOperations", + Handler: _ClusterService_ListOperations_Handler, + }, + { + MethodName: "ListBackups", + Handler: _ClusterService_ListBackups_Handler, + }, + { + MethodName: "ListHosts", + Handler: _ClusterService_ListHosts_Handler, + }, + { + MethodName: "AddHosts", + Handler: _ClusterService_AddHosts_Handler, + }, + { + MethodName: "DeleteHosts", + Handler: _ClusterService_DeleteHosts_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/mongodb/v1/cluster_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/mongodb/v1/cluster_service.proto", fileDescriptor_cluster_service_102bf0875d7e349f) +} + +var fileDescriptor_cluster_service_102bf0875d7e349f = []byte{ + // 2185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xcf, 0xe8, 0x83, 0x1f, 0x8f, 0xb6, 0xa2, 0x4e, 0x64, 0x87, 0x61, 0x6c, 0x47, 0xde, 0x38, + 0x8e, 0x4c, 0x95, 0xcb, 0x2f, 0x9b, 0x8a, 0x64, 0x3b, 0xb1, 0x28, 0xc9, 0x36, 0x11, 0xc9, 0x16, + 0x56, 0xb2, 0xeb, 0x3a, 0x35, 0x88, 0x25, 0x77, 0x44, 0x13, 0x22, 0x77, 0x59, 0xee, 0x52, 0xb5, + 0x14, 0xb8, 0x08, 0xdc, 0x9b, 0x0f, 0x01, 0x8a, 0xa0, 0x2d, 0x8a, 0x00, 0xfd, 0x07, 0x8a, 0x02, + 0xad, 0x50, 0xf4, 0x03, 0xe8, 0xa9, 0x45, 0x61, 0x01, 0xbd, 0x14, 0xea, 0xbd, 0xa7, 0x02, 0xed, + 0x39, 0xb7, 0xf6, 0x54, 0xcc, 0xc7, 0x92, 0xbb, 0x22, 0xb9, 0x5a, 0x4a, 0x36, 0x10, 0x20, 0x37, + 0xee, 0xcc, 0x7b, 0xbf, 0xf9, 0xbd, 0x37, 0x6f, 0xe6, 0xcd, 0x7b, 0x12, 0xa4, 0xb7, 0x55, 0x5d, + 0x23, 0x4f, 0x92, 0xe5, 0x9a, 0xd1, 0xd2, 0x92, 0x75, 0xad, 0x94, 0xac, 0x1b, 0x7a, 0xc5, 0xd0, + 0x4a, 0xc9, 0xad, 0x74, 0xb2, 0x5c, 0x6b, 0x99, 0x16, 0x69, 0x16, 0x4d, 0xd2, 0xdc, 0xaa, 0x96, + 0x89, 0xdc, 0x68, 0x1a, 0x96, 0x81, 0xdf, 0xe6, 0x2a, 0x32, 0x53, 0x91, 0xeb, 0x5a, 0x49, 0x16, + 0x2a, 0xf2, 0x56, 0x3a, 0x76, 0xa6, 0x62, 0x18, 0x95, 0x1a, 0x49, 0xaa, 0x8d, 0x6a, 0x52, 0xd5, + 0x75, 0xc3, 0x52, 0xad, 0xaa, 0xa1, 0x9b, 0x5c, 0x35, 0x36, 0x29, 0x66, 0xd9, 0x57, 0xa9, 0xb5, + 0x91, 0xdc, 0xa8, 0x92, 0x9a, 0x56, 0xac, 0xab, 0xe6, 0xa6, 0x90, 0x78, 0xe7, 0xa0, 0x84, 0x55, + 0xad, 0x13, 0xd3, 0x52, 0xeb, 0x0d, 0x21, 0x10, 0x13, 0x84, 0xe9, 0x02, 0x46, 0x83, 0x34, 0x19, + 0xbe, 0x98, 0xbb, 0xe8, 0x32, 0xa6, 0x3d, 0xdb, 0x25, 0x77, 0xd6, 0x25, 0xb7, 0xa5, 0xd6, 0xaa, + 0x9a, 0x73, 0x7a, 0xca, 0xcb, 0x27, 0x25, 0xb5, 0xbc, 0xd9, 0xb2, 0xc9, 0x5c, 0xf2, 0xe1, 0x3d, + 0x21, 0x1a, 0xf7, 0x12, 0xd5, 0x54, 0x4b, 0x2d, 0xa9, 0x26, 0xe9, 0x69, 0xc7, 0x01, 0xd9, 0x96, + 0xd9, 0xc6, 0xcc, 0x7a, 0x2e, 0x6f, 0xe8, 0x1b, 0xd5, 0x8a, 0x3d, 0x92, 0x2d, 0xe6, 0xb8, 0x92, + 0x74, 0x03, 0xbe, 0x75, 0x8b, 0x58, 0x0b, 0x9c, 0x9c, 0x42, 0xbe, 0xdf, 0x22, 0xa6, 0x85, 0xa7, + 0x01, 0xec, 0xcd, 0xae, 0x6a, 0x51, 0x34, 0x89, 0xa6, 0xc2, 0xf9, 0x13, 0xff, 0x79, 0x91, 0x46, + 0xcf, 0xf7, 0xd2, 0x23, 0xd7, 0xae, 0x5f, 0x49, 0x29, 0x61, 0x31, 0x5f, 0xd0, 0xa4, 0xdf, 0x23, + 0x78, 0x63, 0xb9, 0x6a, 0xda, 0x18, 0xa6, 0x0d, 0x72, 0x09, 0xc2, 0x1b, 0x46, 0x4d, 0xeb, 0x8f, + 0x11, 0xe2, 0xd3, 0x05, 0x0d, 0xbf, 0x0f, 0xe1, 0x86, 0x5a, 0x21, 0x45, 0xb3, 0xba, 0x43, 0xa2, + 0x43, 0x93, 0x68, 0x6a, 0x38, 0x0f, 0xff, 0x7b, 0x91, 0x0e, 0x5c, 0xbb, 0x9e, 0x4e, 0xa5, 0x52, + 0x4a, 0x88, 0x4e, 0xae, 0x55, 0x77, 0x08, 0x9e, 0x02, 0x60, 0x82, 0x96, 0xb1, 0x49, 0xf4, 0xe8, + 0x30, 0x03, 0x0d, 0x3f, 0xdf, 0x4b, 0x8f, 0x32, 0x49, 0x85, 0xa1, 0xac, 0xd3, 0x39, 0x2c, 0x41, + 0x60, 0xa3, 0x5a, 0xb3, 0x48, 0x33, 0x3a, 0xc2, 0xa4, 0xe0, 0xf9, 0x5e, 0x1b, 0x4f, 0xcc, 0x48, + 0x9f, 0x21, 0x98, 0x70, 0x33, 0x37, 0x1b, 0x86, 0x6e, 0x12, 0x7c, 0x03, 0x42, 0xc2, 0x3e, 0x33, + 0x8a, 0x26, 0x87, 0xa7, 0x22, 0x99, 0x0b, 0xb2, 0x47, 0x98, 0xcb, 0xb6, 0xfb, 0xda, 0x5a, 0xf8, + 0x22, 0xbc, 0xae, 0x93, 0x27, 0x56, 0xd1, 0xc1, 0x96, 0xda, 0x15, 0x56, 0x4e, 0xd2, 0xe1, 0x55, + 0x9b, 0xa6, 0xf4, 0xe7, 0x00, 0x4c, 0x2c, 0x34, 0x89, 0x6a, 0x91, 0x03, 0x5b, 0x30, 0x80, 0xf7, + 0xd2, 0x30, 0xa2, 0xab, 0x75, 0xee, 0xb8, 0x70, 0xfe, 0x2c, 0x95, 0xfa, 0xea, 0x45, 0xfa, 0xe4, + 0x27, 0x6a, 0x62, 0x67, 0x3e, 0xf1, 0x30, 0x95, 0x98, 0x2d, 0x3e, 0x8a, 0x73, 0xb5, 0x5c, 0x56, + 0x61, 0xa2, 0x78, 0x1a, 0x22, 0x1a, 0x31, 0xcb, 0xcd, 0x6a, 0x83, 0x06, 0xba, 0xdb, 0x91, 0x99, + 0x2b, 0x39, 0xc5, 0x39, 0x8b, 0x7f, 0x8a, 0x20, 0x50, 0x53, 0x4b, 0xa4, 0x66, 0x46, 0x47, 0x98, + 0x33, 0xae, 0x7b, 0x3b, 0xa3, 0x87, 0x39, 0xf2, 0x32, 0xd3, 0x5f, 0xd2, 0xad, 0xe6, 0x76, 0xfe, + 0xa3, 0xaf, 0x5e, 0xa4, 0x23, 0x9f, 0x24, 0x8a, 0xa9, 0xc4, 0xac, 0x9a, 0xd8, 0x79, 0x14, 0x7f, + 0xc6, 0xb9, 0x5d, 0xb6, 0x39, 0xee, 0xee, 0xa5, 0x03, 0x31, 0xfb, 0x17, 0xc6, 0xe3, 0xd4, 0x90, + 0x47, 0x0e, 0x79, 0x45, 0xb0, 0xc1, 0x0f, 0x20, 0x42, 0xf4, 0xad, 0x6a, 0xd3, 0xd0, 0xeb, 0x44, + 0xb7, 0xa2, 0xa3, 0x93, 0x68, 0x6a, 0x2c, 0x93, 0xf2, 0xb3, 0x53, 0xf2, 0x52, 0x47, 0x2f, 0x3f, + 0x42, 0x3d, 0xa6, 0x38, 0xa1, 0xf0, 0x1d, 0x88, 0xf0, 0x03, 0x53, 0x34, 0x1b, 0xa4, 0x1c, 0x0d, + 0x4c, 0xa2, 0xa9, 0x48, 0xe6, 0x7d, 0x6f, 0x64, 0x26, 0xbf, 0xd6, 0x20, 0x65, 0x01, 0x08, 0xe5, + 0xf6, 0x08, 0x7e, 0x00, 0x63, 0xf6, 0xa1, 0x66, 0x88, 0x66, 0x34, 0xc8, 0x3c, 0x79, 0xc9, 0x13, + 0x72, 0x51, 0xa8, 0x30, 0xd0, 0xc0, 0xb3, 0xbd, 0xf4, 0xd0, 0x87, 0x29, 0xe5, 0xa4, 0xe6, 0x18, + 0x35, 0xf1, 0x32, 0x00, 0xbd, 0x02, 0x04, 0x6a, 0x88, 0xa1, 0xbe, 0xe7, 0x89, 0x7a, 0xcf, 0x24, + 0x4d, 0x17, 0x62, 0xb8, 0x25, 0x46, 0x18, 0xda, 0x63, 0xc3, 0xb4, 0x04, 0x5a, 0xd8, 0x07, 0xda, + 0x6d, 0xc3, 0xb4, 0xdc, 0x68, 0x8f, 0xc5, 0x88, 0x49, 0xaf, 0x11, 0x9d, 0x58, 0x3f, 0x30, 0x9a, + 0x9b, 0x34, 0x88, 0xa1, 0xd7, 0x35, 0x22, 0xe6, 0x0b, 0x5a, 0x6c, 0x16, 0x22, 0x8e, 0x20, 0xc1, + 0xe3, 0x30, 0xbc, 0x49, 0xb6, 0x79, 0xe4, 0x2b, 0xf4, 0x27, 0x9e, 0x80, 0xd1, 0x2d, 0xb5, 0xd6, + 0x12, 0x71, 0xae, 0xf0, 0x8f, 0xb9, 0xa1, 0x0f, 0x90, 0x94, 0x83, 0x53, 0xae, 0xa0, 0x5b, 0x21, + 0x96, 0x4a, 0xbd, 0x84, 0xcf, 0x76, 0xdf, 0x63, 0xce, 0x9b, 0xeb, 0x6f, 0xc3, 0x30, 0x71, 0xaf, + 0xa1, 0x75, 0x1f, 0xbe, 0x41, 0xee, 0x3f, 0x7c, 0x15, 0x22, 0x2d, 0x06, 0xc2, 0x12, 0x17, 0x63, + 0x17, 0xc9, 0xc4, 0x64, 0x9e, 0xb9, 0x64, 0x3b, 0x73, 0xc9, 0x37, 0x69, 0x6e, 0x5b, 0x51, 0xcd, + 0x4d, 0x05, 0xb8, 0x38, 0xfd, 0xfd, 0x4a, 0x0f, 0x62, 0x2f, 0xd3, 0x5e, 0xcd, 0x41, 0xbc, 0xed, + 0x3e, 0x2e, 0xa3, 0x03, 0x1d, 0x17, 0xe7, 0x41, 0x39, 0x66, 0x14, 0xb8, 0x2c, 0xf6, 0x1b, 0x05, + 0x0b, 0x30, 0xb1, 0x48, 0x6a, 0xe4, 0x58, 0x41, 0x40, 0x17, 0x77, 0x81, 0xf8, 0x5d, 0x3c, 0x0f, + 0x6f, 0xac, 0x59, 0x6a, 0xf3, 0x58, 0x09, 0xf8, 0x0a, 0x4c, 0x38, 0x31, 0xfc, 0x2e, 0x3d, 0x0f, + 0x78, 0xcd, 0x32, 0x1a, 0xc7, 0x59, 0xf9, 0x32, 0x65, 0xdf, 0x86, 0x18, 0xc0, 0xe1, 0x79, 0xf6, + 0x6c, 0x3a, 0xa6, 0xc3, 0x5d, 0x20, 0x7e, 0x17, 0xff, 0xc9, 0x28, 0x9c, 0x52, 0x88, 0x69, 0x19, + 0xcd, 0x83, 0xfb, 0x7d, 0x1e, 0xc2, 0xfc, 0x35, 0xd7, 0x59, 0x9d, 0x5f, 0xe4, 0x21, 0x3e, 0x5c, + 0xd0, 0x70, 0xc6, 0x95, 0x69, 0xcf, 0x89, 0x4c, 0x3b, 0xe6, 0xc8, 0xb4, 0x89, 0xe3, 0xa5, 0xda, + 0x9f, 0x1d, 0x3c, 0xe1, 0x1f, 0x7a, 0x1e, 0xa2, 0x9e, 0x86, 0x7c, 0xd3, 0x73, 0xad, 0x3b, 0x87, + 0x05, 0x5f, 0x6a, 0x0e, 0x0b, 0xbd, 0xb2, 0x1c, 0xb6, 0x0e, 0xa7, 0xdd, 0xbb, 0xe9, 0x33, 0xa0, + 0xf1, 0xdb, 0xce, 0xb0, 0xe5, 0xb0, 0xed, 0x80, 0x95, 0xfe, 0x8e, 0x20, 0xbc, 0x6c, 0x54, 0x14, + 0x52, 0x36, 0x9a, 0x1a, 0xfe, 0x00, 0xc2, 0xed, 0xfa, 0x89, 0x01, 0xf5, 0xca, 0x53, 0xeb, 0xb6, + 0x84, 0xd2, 0x11, 0xc6, 0x2b, 0x10, 0xac, 0x13, 0xd3, 0x54, 0x2b, 0x94, 0x39, 0x75, 0x68, 0xd6, + 0xd3, 0xa1, 0xed, 0x25, 0xe5, 0x15, 0xae, 0xc5, 0xfc, 0xa1, 0xd8, 0x18, 0xb1, 0x39, 0x38, 0xe1, + 0x9c, 0x18, 0xc8, 0x51, 0x7b, 0xc3, 0x70, 0xda, 0xf1, 0x68, 0x5f, 0x36, 0x2a, 0xe6, 0x91, 0xd2, + 0xf6, 0xbb, 0x70, 0xb2, 0x6c, 0xd4, 0x5a, 0x75, 0xbd, 0x28, 0xea, 0x04, 0x6a, 0x58, 0x58, 0x39, + 0xc1, 0x07, 0x6f, 0xb2, 0x31, 0x5c, 0x82, 0x13, 0xa2, 0xda, 0x2d, 0x5a, 0xdb, 0x0d, 0xc2, 0x4e, + 0xef, 0x58, 0xe6, 0x23, 0x6f, 0xe3, 0x7b, 0x92, 0x93, 0xd7, 0x38, 0xce, 0xfa, 0x76, 0x83, 0x28, + 0x11, 0xb3, 0xf3, 0x81, 0x67, 0x20, 0xbc, 0xd1, 0x34, 0xea, 0x45, 0xea, 0x6d, 0x56, 0xac, 0x78, + 0xef, 0x4a, 0x88, 0x0a, 0xd3, 0x4f, 0x9c, 0x85, 0xa0, 0x65, 0x70, 0xb5, 0xd1, 0x43, 0xd5, 0x02, + 0x96, 0xc1, 0x94, 0x5c, 0xa5, 0x56, 0xc0, 0x77, 0xa9, 0x15, 0xec, 0x5f, 0x6a, 0x49, 0x33, 0x10, + 0x71, 0x18, 0x87, 0xcf, 0x40, 0x74, 0x6d, 0x49, 0xb9, 0x5f, 0x58, 0x58, 0x2a, 0xae, 0x7f, 0x77, + 0x75, 0xa9, 0x78, 0xef, 0xce, 0xda, 0xea, 0xd2, 0x42, 0xe1, 0x66, 0x61, 0x69, 0x71, 0xfc, 0x35, + 0x0c, 0x10, 0x58, 0xb9, 0x7b, 0xe7, 0xd6, 0xdd, 0xc5, 0x71, 0x24, 0x3d, 0x85, 0x37, 0xbb, 0x9c, + 0x25, 0x2a, 0xb0, 0x39, 0x18, 0xa9, 0x19, 0x15, 0xbb, 0xfa, 0xba, 0xe8, 0x2f, 0xda, 0x14, 0xa6, + 0xe3, 0xbb, 0xf6, 0xfa, 0x05, 0x82, 0x33, 0x8e, 0xf5, 0xef, 0xda, 0x6d, 0x81, 0xa3, 0xc5, 0xd3, + 0xcb, 0xaf, 0x61, 0xa5, 0xe7, 0x08, 0xce, 0xf6, 0x21, 0x28, 0xdc, 0x34, 0x0f, 0xd0, 0xee, 0x66, + 0xd8, 0xce, 0x3a, 0xef, 0x76, 0x56, 0xa7, 0xdb, 0xd1, 0xd6, 0x57, 0x1c, 0x4a, 0xbe, 0xbd, 0xf5, + 0x25, 0x82, 0xb7, 0x1c, 0x64, 0x78, 0xf2, 0xfd, 0xda, 0xb8, 0xea, 0x47, 0x08, 0x62, 0xbd, 0xd8, + 0x09, 0x3f, 0x5d, 0x87, 0x20, 0xbf, 0x13, 0x6d, 0x27, 0xbd, 0xeb, 0x19, 0x51, 0x5c, 0x5d, 0xb1, + 0x75, 0x7c, 0xfb, 0xe8, 0xe7, 0xc8, 0x15, 0xd1, 0x34, 0xaf, 0x7c, 0x6d, 0x3c, 0xf4, 0x29, 0x44, + 0xbb, 0xa9, 0x09, 0xf7, 0xcc, 0xc0, 0x28, 0xcd, 0x78, 0x7d, 0x22, 0xa8, 0x47, 0xb6, 0x54, 0xb8, + 0xbc, 0x6f, 0xc7, 0x7c, 0x81, 0xe0, 0xf4, 0xbc, 0xa6, 0x1d, 0xdb, 0x2f, 0xee, 0xdc, 0x3e, 0x74, + 0xbc, 0xdc, 0x2e, 0x7d, 0x07, 0xde, 0x3c, 0x40, 0xca, 0x6f, 0xd2, 0x3d, 0x2b, 0x78, 0xd0, 0x17, + 0x9e, 0x29, 0x32, 0x07, 0x03, 0xbe, 0x43, 0x07, 0xa4, 0x27, 0xf0, 0x96, 0xab, 0x1a, 0x38, 0xba, + 0xc1, 0x72, 0xf7, 0x42, 0xf9, 0xd7, 0xb9, 0x25, 0xf6, 0x13, 0x32, 0xeb, 0x5c, 0xf9, 0x21, 0xc4, + 0xba, 0x57, 0x7e, 0x49, 0x56, 0xfd, 0x10, 0x42, 0xb6, 0x37, 0xf1, 0x79, 0x08, 0xee, 0x18, 0x3a, + 0xe9, 0x58, 0x10, 0x6a, 0xb3, 0x0f, 0xd0, 0x89, 0x82, 0x86, 0xdf, 0x83, 0xb0, 0xd9, 0x2a, 0xe9, + 0xc4, 0x6a, 0x3f, 0x4c, 0x1c, 0x42, 0x21, 0x3e, 0x55, 0xd0, 0xf0, 0x14, 0x8c, 0xab, 0xa6, 0x59, + 0xad, 0xe8, 0xc5, 0x46, 0xab, 0x54, 0xab, 0x96, 0x8b, 0xd5, 0x06, 0x8b, 0xe3, 0x90, 0x32, 0xc6, + 0xc7, 0x57, 0xd9, 0x70, 0xa1, 0x21, 0x7d, 0x36, 0x04, 0x63, 0x2b, 0x7c, 0x67, 0x29, 0x87, 0x6c, + 0x31, 0x87, 0x57, 0x21, 0xc0, 0xf7, 0x5a, 0x3c, 0x67, 0x32, 0x9e, 0xb1, 0xe0, 0x56, 0x16, 0x9f, + 0xe2, 0x05, 0x29, 0x70, 0x62, 0x5f, 0x22, 0x08, 0xf0, 0x09, 0xfc, 0x31, 0x04, 0xf8, 0xb3, 0x52, + 0x80, 0x7b, 0xbf, 0x79, 0xb8, 0xa8, 0x00, 0xe5, 0x0f, 0xd4, 0x6c, 0x31, 0xa7, 0x08, 0x08, 0xbc, + 0x08, 0xe1, 0x26, 0x31, 0x8d, 0x56, 0xb3, 0xcc, 0x5c, 0x8b, 0x0e, 0xcd, 0x6a, 0x8a, 0x2d, 0xad, + 0x74, 0x14, 0xa5, 0xcf, 0x11, 0x40, 0xe7, 0xf1, 0x8b, 0xa3, 0x10, 0xdc, 0x22, 0x4d, 0x93, 0xd6, + 0x15, 0x7c, 0x33, 0xed, 0x4f, 0x7c, 0x1f, 0xc6, 0x05, 0x16, 0x3b, 0x2b, 0xc5, 0x6c, 0x31, 0x27, + 0x56, 0x9d, 0x1e, 0xc0, 0x45, 0xb7, 0x5f, 0x53, 0x4e, 0xd6, 0x1d, 0x23, 0xb9, 0xfc, 0x18, 0x9c, + 0x70, 0xe2, 0x66, 0xfe, 0x7b, 0x0a, 0xc6, 0x44, 0xa8, 0x89, 0x37, 0x00, 0xfe, 0x1c, 0xc1, 0xf0, + 0x2d, 0x62, 0x61, 0xd9, 0x73, 0xa1, 0xae, 0xa6, 0x73, 0xcc, 0x57, 0x8b, 0x55, 0x4a, 0x3f, 0xfb, + 0xc7, 0xbf, 0xbe, 0x18, 0x9a, 0xc6, 0x97, 0x92, 0x75, 0x55, 0x57, 0x2b, 0x44, 0x4b, 0x74, 0xf7, + 0xd8, 0xcd, 0xe4, 0xa7, 0x9d, 0x30, 0x7f, 0x8a, 0x7f, 0x8c, 0x60, 0x84, 0x5e, 0x7d, 0x38, 0xe5, + 0xf7, 0xdd, 0x66, 0x9f, 0xd5, 0x58, 0x7a, 0x00, 0x0d, 0x7e, 0x97, 0x4a, 0x17, 0x18, 0xc1, 0x73, + 0xf8, 0x8c, 0x17, 0x41, 0xfc, 0x4b, 0x04, 0x01, 0xde, 0xb3, 0xc2, 0xe9, 0x81, 0xbb, 0xa9, 0xb1, + 0xc3, 0x53, 0xbc, 0xf4, 0xf1, 0xee, 0x7e, 0x7c, 0xb2, 0x5f, 0x5f, 0x2c, 0x28, 0x06, 0x18, 0xd5, + 0xf3, 0x92, 0x27, 0xd5, 0x39, 0x14, 0xc7, 0x7f, 0x40, 0x10, 0xe0, 0xbd, 0x95, 0x43, 0xd8, 0xf6, + 0x6a, 0x39, 0xf9, 0x61, 0xfb, 0x80, 0xb3, 0xed, 0xdd, 0xbf, 0x71, 0xb1, 0x95, 0x33, 0xfe, 0x77, + 0x9e, 0x52, 0xff, 0x0b, 0x82, 0x00, 0xbf, 0x11, 0x0f, 0xa1, 0xde, 0xab, 0x07, 0xe4, 0x87, 0x7a, + 0x79, 0x77, 0x3f, 0x2e, 0xf7, 0xeb, 0xfe, 0x9c, 0x3a, 0xf8, 0x20, 0x5f, 0xaa, 0x37, 0xac, 0x6d, + 0x1e, 0xc2, 0xf1, 0x01, 0x42, 0xf8, 0xb7, 0x08, 0x46, 0x59, 0x8f, 0xe7, 0x90, 0x18, 0xee, 0xd1, + 0x4b, 0xf2, 0x63, 0xc3, 0xfd, 0xdd, 0xfd, 0xf8, 0x3b, 0x7d, 0xba, 0x48, 0x2e, 0xef, 0x27, 0xa4, + 0xe9, 0x3e, 0x7f, 0xd7, 0x3a, 0xe0, 0x79, 0x93, 0x91, 0xfd, 0x35, 0x82, 0x91, 0x35, 0xcb, 0x68, + 0xe0, 0xe4, 0x21, 0xac, 0x0f, 0xb6, 0xa1, 0xfc, 0x90, 0x5e, 0xdf, 0xdd, 0x8f, 0x9f, 0xeb, 0xdd, + 0x80, 0x72, 0x71, 0xfe, 0xb6, 0x14, 0xf7, 0xcb, 0xd9, 0x68, 0xe0, 0x3f, 0x21, 0x08, 0xf0, 0xe7, + 0xdf, 0x21, 0xf1, 0xd2, 0xab, 0x85, 0xe5, 0x87, 0xf6, 0xf7, 0x78, 0xa8, 0xf7, 0x6e, 0x5e, 0xb9, + 0x88, 0x67, 0xa4, 0x94, 0xff, 0x50, 0xe7, 0x4f, 0x55, 0xfc, 0x3b, 0x04, 0x41, 0xd1, 0x47, 0xc0, + 0x99, 0xc1, 0x7b, 0x47, 0x7e, 0xfd, 0x7e, 0xbe, 0x6f, 0xb7, 0xc2, 0x65, 0xc1, 0x25, 0xe9, 0x82, + 0xe7, 0xd5, 0xd2, 0xe4, 0x18, 0xf4, 0x9c, 0xfe, 0x0a, 0x41, 0x88, 0xde, 0xa7, 0xb4, 0x0a, 0xc4, + 0xd9, 0x23, 0x14, 0xd8, 0xb1, 0xcb, 0x83, 0x29, 0x89, 0xeb, 0x3a, 0xc7, 0x88, 0xa6, 0xb0, 0xec, + 0xdf, 0xd5, 0xac, 0xc8, 0xfc, 0x2b, 0x82, 0x31, 0x8a, 0xd9, 0x29, 0xca, 0xf0, 0xac, 0x5f, 0x02, + 0x5d, 0x95, 0x66, 0x6c, 0xee, 0x28, 0xaa, 0xc2, 0x82, 0x6b, 0xcc, 0x82, 0x1c, 0xbe, 0xec, 0xdb, + 0x82, 0xa4, 0xa3, 0xfc, 0xfb, 0x23, 0x82, 0x08, 0xc5, 0x17, 0x15, 0x13, 0xce, 0xf9, 0x65, 0xe2, + 0x2e, 0x00, 0x63, 0x33, 0x03, 0xeb, 0x09, 0xfa, 0xb3, 0x8c, 0x7e, 0x16, 0xa7, 0xfd, 0xd3, 0xb7, + 0xcb, 0xb2, 0xdf, 0x20, 0x08, 0x53, 0x64, 0xf6, 0xc8, 0xc5, 0xbe, 0xf7, 0xdf, 0xf9, 0x1a, 0x8f, + 0x5d, 0x19, 0x50, 0x4b, 0xb0, 0x9e, 0x61, 0xac, 0xd3, 0x38, 0xe9, 0x9f, 0x35, 0xaf, 0x98, 0xfe, + 0x89, 0x20, 0x34, 0xaf, 0x69, 0x9c, 0xb2, 0x77, 0x9c, 0xf7, 0x2e, 0x98, 0xfc, 0x1c, 0xd1, 0x9d, + 0xdd, 0xfd, 0x78, 0xaa, 0x7f, 0x71, 0xe3, 0x91, 0x95, 0x6e, 0x48, 0x57, 0x07, 0xb4, 0x68, 0xae, + 0xa4, 0x5a, 0xe5, 0xc7, 0xfc, 0x9d, 0x41, 0x0f, 0xf2, 0xbf, 0x11, 0x44, 0x78, 0x32, 0xe4, 0x36, + 0xe6, 0xfc, 0x67, 0xdd, 0x41, 0xcd, 0x7c, 0xba, 0xbb, 0x1f, 0xcf, 0x7a, 0x16, 0x3c, 0xaf, 0xc0, + 0x52, 0xbe, 0xda, 0x1c, 0x8a, 0xe7, 0x0b, 0x0f, 0x6f, 0x55, 0xaa, 0xd6, 0xe3, 0x56, 0x49, 0x2e, + 0x1b, 0xf5, 0x24, 0x67, 0x9b, 0xe0, 0xff, 0x7c, 0x51, 0x31, 0x12, 0x15, 0xa2, 0xb3, 0x55, 0x93, + 0x1e, 0xff, 0x95, 0x71, 0x55, 0xfc, 0x2c, 0x05, 0x98, 0x68, 0xf6, 0xff, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x77, 0xed, 0x33, 0x00, 0x80, 0x23, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/config/mongodb3_6.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/config/mongodb3_6.pb.go new file mode 100644 index 000000000..594f2dc32 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/config/mongodb3_6.pb.go @@ -0,0 +1,600 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/mongodb/v1/config/mongodb3_6.proto + +package mongodb // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/config" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor int32 + +const ( + MongodConfig3_6_Storage_WiredTiger_CollectionConfig_COMPRESSOR_UNSPECIFIED MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor = 0 + // No compression. + MongodConfig3_6_Storage_WiredTiger_CollectionConfig_NONE MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor = 1 + // The [Snappy](https://docs.mongodb.com/v3.6/reference/glossary/#term-snappy) compression. + MongodConfig3_6_Storage_WiredTiger_CollectionConfig_SNAPPY MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor = 2 + // The [zlib](https://docs.mongodb.com/v3.6/reference/glossary/#term-zlib) compression. + MongodConfig3_6_Storage_WiredTiger_CollectionConfig_ZLIB MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor = 3 +) + +var MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor_name = map[int32]string{ + 0: "COMPRESSOR_UNSPECIFIED", + 1: "NONE", + 2: "SNAPPY", + 3: "ZLIB", +} +var MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor_value = map[string]int32{ + "COMPRESSOR_UNSPECIFIED": 0, + "NONE": 1, + "SNAPPY": 2, + "ZLIB": 3, +} + +func (x MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor) String() string { + return proto.EnumName(MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor_name, int32(x)) +} +func (MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_mongodb3_6_b6b4a9da8ede6ac2, []int{0, 0, 0, 1, 0} +} + +type MongodConfig3_6_OperationProfiling_Mode int32 + +const ( + MongodConfig3_6_OperationProfiling_MODE_UNSPECIFIED MongodConfig3_6_OperationProfiling_Mode = 0 + // The profiler is off and does not collect any data. + MongodConfig3_6_OperationProfiling_OFF MongodConfig3_6_OperationProfiling_Mode = 1 + // The profiler collects data for operations that take longer than the value of [slow_op_threshold]. + MongodConfig3_6_OperationProfiling_SLOW_OP MongodConfig3_6_OperationProfiling_Mode = 2 + // The profiler collects data for all operations. + MongodConfig3_6_OperationProfiling_ALL MongodConfig3_6_OperationProfiling_Mode = 3 +) + +var MongodConfig3_6_OperationProfiling_Mode_name = map[int32]string{ + 0: "MODE_UNSPECIFIED", + 1: "OFF", + 2: "SLOW_OP", + 3: "ALL", +} +var MongodConfig3_6_OperationProfiling_Mode_value = map[string]int32{ + "MODE_UNSPECIFIED": 0, + "OFF": 1, + "SLOW_OP": 2, + "ALL": 3, +} + +func (x MongodConfig3_6_OperationProfiling_Mode) String() string { + return proto.EnumName(MongodConfig3_6_OperationProfiling_Mode_name, int32(x)) +} +func (MongodConfig3_6_OperationProfiling_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_mongodb3_6_b6b4a9da8ede6ac2, []int{0, 1, 0} +} + +// Configuration of a mongod daemon. Supported options are a limited subset of all +// options described in [MongoDB documentation](https://docs.mongodb.com/v3.6/reference/configuration-options/). +type MongodConfig3_6 struct { + // `storage` section of mongod configuration. + Storage *MongodConfig3_6_Storage `protobuf:"bytes,1,opt,name=storage,proto3" json:"storage,omitempty"` + // `operationProfiling` section of mongod configuration. + OperationProfiling *MongodConfig3_6_OperationProfiling `protobuf:"bytes,2,opt,name=operation_profiling,json=operationProfiling,proto3" json:"operation_profiling,omitempty"` + // `net` section of mongod configuration. + Net *MongodConfig3_6_Network `protobuf:"bytes,3,opt,name=net,proto3" json:"net,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MongodConfig3_6) Reset() { *m = MongodConfig3_6{} } +func (m *MongodConfig3_6) String() string { return proto.CompactTextString(m) } +func (*MongodConfig3_6) ProtoMessage() {} +func (*MongodConfig3_6) Descriptor() ([]byte, []int) { + return fileDescriptor_mongodb3_6_b6b4a9da8ede6ac2, []int{0} +} +func (m *MongodConfig3_6) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MongodConfig3_6.Unmarshal(m, b) +} +func (m *MongodConfig3_6) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MongodConfig3_6.Marshal(b, m, deterministic) +} +func (dst *MongodConfig3_6) XXX_Merge(src proto.Message) { + xxx_messageInfo_MongodConfig3_6.Merge(dst, src) +} +func (m *MongodConfig3_6) XXX_Size() int { + return xxx_messageInfo_MongodConfig3_6.Size(m) +} +func (m *MongodConfig3_6) XXX_DiscardUnknown() { + xxx_messageInfo_MongodConfig3_6.DiscardUnknown(m) +} + +var xxx_messageInfo_MongodConfig3_6 proto.InternalMessageInfo + +func (m *MongodConfig3_6) GetStorage() *MongodConfig3_6_Storage { + if m != nil { + return m.Storage + } + return nil +} + +func (m *MongodConfig3_6) GetOperationProfiling() *MongodConfig3_6_OperationProfiling { + if m != nil { + return m.OperationProfiling + } + return nil +} + +func (m *MongodConfig3_6) GetNet() *MongodConfig3_6_Network { + if m != nil { + return m.Net + } + return nil +} + +type MongodConfig3_6_Storage struct { + // Configuration of the WiredTiger storage engine. + WiredTiger *MongodConfig3_6_Storage_WiredTiger `protobuf:"bytes,1,opt,name=wired_tiger,json=wiredTiger,proto3" json:"wired_tiger,omitempty"` + // Configuration of the MongoDB [journal](https://docs.mongodb.com/v3.6/reference/glossary/#term-journal). + Journal *MongodConfig3_6_Storage_Journal `protobuf:"bytes,2,opt,name=journal,proto3" json:"journal,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MongodConfig3_6_Storage) Reset() { *m = MongodConfig3_6_Storage{} } +func (m *MongodConfig3_6_Storage) String() string { return proto.CompactTextString(m) } +func (*MongodConfig3_6_Storage) ProtoMessage() {} +func (*MongodConfig3_6_Storage) Descriptor() ([]byte, []int) { + return fileDescriptor_mongodb3_6_b6b4a9da8ede6ac2, []int{0, 0} +} +func (m *MongodConfig3_6_Storage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MongodConfig3_6_Storage.Unmarshal(m, b) +} +func (m *MongodConfig3_6_Storage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MongodConfig3_6_Storage.Marshal(b, m, deterministic) +} +func (dst *MongodConfig3_6_Storage) XXX_Merge(src proto.Message) { + xxx_messageInfo_MongodConfig3_6_Storage.Merge(dst, src) +} +func (m *MongodConfig3_6_Storage) XXX_Size() int { + return xxx_messageInfo_MongodConfig3_6_Storage.Size(m) +} +func (m *MongodConfig3_6_Storage) XXX_DiscardUnknown() { + xxx_messageInfo_MongodConfig3_6_Storage.DiscardUnknown(m) +} + +var xxx_messageInfo_MongodConfig3_6_Storage proto.InternalMessageInfo + +func (m *MongodConfig3_6_Storage) GetWiredTiger() *MongodConfig3_6_Storage_WiredTiger { + if m != nil { + return m.WiredTiger + } + return nil +} + +func (m *MongodConfig3_6_Storage) GetJournal() *MongodConfig3_6_Storage_Journal { + if m != nil { + return m.Journal + } + return nil +} + +// Configuration of WiredTiger storage engine. +type MongodConfig3_6_Storage_WiredTiger struct { + // Engine configuration for WiredTiger. + EngineConfig *MongodConfig3_6_Storage_WiredTiger_EngineConfig `protobuf:"bytes,1,opt,name=engine_config,json=engineConfig,proto3" json:"engine_config,omitempty"` + // Collection configuration for WiredTiger. + CollectionConfig *MongodConfig3_6_Storage_WiredTiger_CollectionConfig `protobuf:"bytes,2,opt,name=collection_config,json=collectionConfig,proto3" json:"collection_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MongodConfig3_6_Storage_WiredTiger) Reset() { *m = MongodConfig3_6_Storage_WiredTiger{} } +func (m *MongodConfig3_6_Storage_WiredTiger) String() string { return proto.CompactTextString(m) } +func (*MongodConfig3_6_Storage_WiredTiger) ProtoMessage() {} +func (*MongodConfig3_6_Storage_WiredTiger) Descriptor() ([]byte, []int) { + return fileDescriptor_mongodb3_6_b6b4a9da8ede6ac2, []int{0, 0, 0} +} +func (m *MongodConfig3_6_Storage_WiredTiger) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger.Unmarshal(m, b) +} +func (m *MongodConfig3_6_Storage_WiredTiger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger.Marshal(b, m, deterministic) +} +func (dst *MongodConfig3_6_Storage_WiredTiger) XXX_Merge(src proto.Message) { + xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger.Merge(dst, src) +} +func (m *MongodConfig3_6_Storage_WiredTiger) XXX_Size() int { + return xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger.Size(m) +} +func (m *MongodConfig3_6_Storage_WiredTiger) XXX_DiscardUnknown() { + xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger.DiscardUnknown(m) +} + +var xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger proto.InternalMessageInfo + +func (m *MongodConfig3_6_Storage_WiredTiger) GetEngineConfig() *MongodConfig3_6_Storage_WiredTiger_EngineConfig { + if m != nil { + return m.EngineConfig + } + return nil +} + +func (m *MongodConfig3_6_Storage_WiredTiger) GetCollectionConfig() *MongodConfig3_6_Storage_WiredTiger_CollectionConfig { + if m != nil { + return m.CollectionConfig + } + return nil +} + +type MongodConfig3_6_Storage_WiredTiger_EngineConfig struct { + // The maximum size of the internal cache that WiredTiger will use for all data. + CacheSizeGb *wrappers.DoubleValue `protobuf:"bytes,1,opt,name=cache_size_gb,json=cacheSizeGb,proto3" json:"cache_size_gb,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MongodConfig3_6_Storage_WiredTiger_EngineConfig) Reset() { + *m = MongodConfig3_6_Storage_WiredTiger_EngineConfig{} +} +func (m *MongodConfig3_6_Storage_WiredTiger_EngineConfig) String() string { + return proto.CompactTextString(m) +} +func (*MongodConfig3_6_Storage_WiredTiger_EngineConfig) ProtoMessage() {} +func (*MongodConfig3_6_Storage_WiredTiger_EngineConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_mongodb3_6_b6b4a9da8ede6ac2, []int{0, 0, 0, 0} +} +func (m *MongodConfig3_6_Storage_WiredTiger_EngineConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger_EngineConfig.Unmarshal(m, b) +} +func (m *MongodConfig3_6_Storage_WiredTiger_EngineConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger_EngineConfig.Marshal(b, m, deterministic) +} +func (dst *MongodConfig3_6_Storage_WiredTiger_EngineConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger_EngineConfig.Merge(dst, src) +} +func (m *MongodConfig3_6_Storage_WiredTiger_EngineConfig) XXX_Size() int { + return xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger_EngineConfig.Size(m) +} +func (m *MongodConfig3_6_Storage_WiredTiger_EngineConfig) XXX_DiscardUnknown() { + xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger_EngineConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger_EngineConfig proto.InternalMessageInfo + +func (m *MongodConfig3_6_Storage_WiredTiger_EngineConfig) GetCacheSizeGb() *wrappers.DoubleValue { + if m != nil { + return m.CacheSizeGb + } + return nil +} + +type MongodConfig3_6_Storage_WiredTiger_CollectionConfig struct { + // Default type of compression to use for collection data. + BlockCompressor MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor `protobuf:"varint,1,opt,name=block_compressor,json=blockCompressor,proto3,enum=yandex.cloud.mdb.mongodb.v1.config.MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor" json:"block_compressor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MongodConfig3_6_Storage_WiredTiger_CollectionConfig) Reset() { + *m = MongodConfig3_6_Storage_WiredTiger_CollectionConfig{} +} +func (m *MongodConfig3_6_Storage_WiredTiger_CollectionConfig) String() string { + return proto.CompactTextString(m) +} +func (*MongodConfig3_6_Storage_WiredTiger_CollectionConfig) ProtoMessage() {} +func (*MongodConfig3_6_Storage_WiredTiger_CollectionConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_mongodb3_6_b6b4a9da8ede6ac2, []int{0, 0, 0, 1} +} +func (m *MongodConfig3_6_Storage_WiredTiger_CollectionConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger_CollectionConfig.Unmarshal(m, b) +} +func (m *MongodConfig3_6_Storage_WiredTiger_CollectionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger_CollectionConfig.Marshal(b, m, deterministic) +} +func (dst *MongodConfig3_6_Storage_WiredTiger_CollectionConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger_CollectionConfig.Merge(dst, src) +} +func (m *MongodConfig3_6_Storage_WiredTiger_CollectionConfig) XXX_Size() int { + return xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger_CollectionConfig.Size(m) +} +func (m *MongodConfig3_6_Storage_WiredTiger_CollectionConfig) XXX_DiscardUnknown() { + xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger_CollectionConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_MongodConfig3_6_Storage_WiredTiger_CollectionConfig proto.InternalMessageInfo + +func (m *MongodConfig3_6_Storage_WiredTiger_CollectionConfig) GetBlockCompressor() MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor { + if m != nil { + return m.BlockCompressor + } + return MongodConfig3_6_Storage_WiredTiger_CollectionConfig_COMPRESSOR_UNSPECIFIED +} + +type MongodConfig3_6_Storage_Journal struct { + // Whether the journal is enabled or disabled. + // Possible values: + // * true (default) — the journal is enabled. + // * false — the journal is disabled. + Enabled *wrappers.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Commit interval between journal operations, in milliseconds. + // Default: 100. + CommitInterval *wrappers.Int64Value `protobuf:"bytes,2,opt,name=commit_interval,json=commitInterval,proto3" json:"commit_interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MongodConfig3_6_Storage_Journal) Reset() { *m = MongodConfig3_6_Storage_Journal{} } +func (m *MongodConfig3_6_Storage_Journal) String() string { return proto.CompactTextString(m) } +func (*MongodConfig3_6_Storage_Journal) ProtoMessage() {} +func (*MongodConfig3_6_Storage_Journal) Descriptor() ([]byte, []int) { + return fileDescriptor_mongodb3_6_b6b4a9da8ede6ac2, []int{0, 0, 1} +} +func (m *MongodConfig3_6_Storage_Journal) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MongodConfig3_6_Storage_Journal.Unmarshal(m, b) +} +func (m *MongodConfig3_6_Storage_Journal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MongodConfig3_6_Storage_Journal.Marshal(b, m, deterministic) +} +func (dst *MongodConfig3_6_Storage_Journal) XXX_Merge(src proto.Message) { + xxx_messageInfo_MongodConfig3_6_Storage_Journal.Merge(dst, src) +} +func (m *MongodConfig3_6_Storage_Journal) XXX_Size() int { + return xxx_messageInfo_MongodConfig3_6_Storage_Journal.Size(m) +} +func (m *MongodConfig3_6_Storage_Journal) XXX_DiscardUnknown() { + xxx_messageInfo_MongodConfig3_6_Storage_Journal.DiscardUnknown(m) +} + +var xxx_messageInfo_MongodConfig3_6_Storage_Journal proto.InternalMessageInfo + +func (m *MongodConfig3_6_Storage_Journal) GetEnabled() *wrappers.BoolValue { + if m != nil { + return m.Enabled + } + return nil +} + +func (m *MongodConfig3_6_Storage_Journal) GetCommitInterval() *wrappers.Int64Value { + if m != nil { + return m.CommitInterval + } + return nil +} + +type MongodConfig3_6_OperationProfiling struct { + // Mode which specifies operations that should be profiled. + Mode MongodConfig3_6_OperationProfiling_Mode `protobuf:"varint,1,opt,name=mode,proto3,enum=yandex.cloud.mdb.mongodb.v1.config.MongodConfig3_6_OperationProfiling_Mode" json:"mode,omitempty"` + // The slow operation time threshold, in milliseconds. Operations that run + // for longer than this threshold are considered slow, and are processed by the profiler + // running in the SLOW_OP mode. + SlowOpThreshold *wrappers.Int64Value `protobuf:"bytes,2,opt,name=slow_op_threshold,json=slowOpThreshold,proto3" json:"slow_op_threshold,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MongodConfig3_6_OperationProfiling) Reset() { *m = MongodConfig3_6_OperationProfiling{} } +func (m *MongodConfig3_6_OperationProfiling) String() string { return proto.CompactTextString(m) } +func (*MongodConfig3_6_OperationProfiling) ProtoMessage() {} +func (*MongodConfig3_6_OperationProfiling) Descriptor() ([]byte, []int) { + return fileDescriptor_mongodb3_6_b6b4a9da8ede6ac2, []int{0, 1} +} +func (m *MongodConfig3_6_OperationProfiling) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MongodConfig3_6_OperationProfiling.Unmarshal(m, b) +} +func (m *MongodConfig3_6_OperationProfiling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MongodConfig3_6_OperationProfiling.Marshal(b, m, deterministic) +} +func (dst *MongodConfig3_6_OperationProfiling) XXX_Merge(src proto.Message) { + xxx_messageInfo_MongodConfig3_6_OperationProfiling.Merge(dst, src) +} +func (m *MongodConfig3_6_OperationProfiling) XXX_Size() int { + return xxx_messageInfo_MongodConfig3_6_OperationProfiling.Size(m) +} +func (m *MongodConfig3_6_OperationProfiling) XXX_DiscardUnknown() { + xxx_messageInfo_MongodConfig3_6_OperationProfiling.DiscardUnknown(m) +} + +var xxx_messageInfo_MongodConfig3_6_OperationProfiling proto.InternalMessageInfo + +func (m *MongodConfig3_6_OperationProfiling) GetMode() MongodConfig3_6_OperationProfiling_Mode { + if m != nil { + return m.Mode + } + return MongodConfig3_6_OperationProfiling_MODE_UNSPECIFIED +} + +func (m *MongodConfig3_6_OperationProfiling) GetSlowOpThreshold() *wrappers.Int64Value { + if m != nil { + return m.SlowOpThreshold + } + return nil +} + +type MongodConfig3_6_Network struct { + // The maximum number of simultaneous connections that mongod will accept. + MaxIncomingConnections *wrappers.Int64Value `protobuf:"bytes,1,opt,name=max_incoming_connections,json=maxIncomingConnections,proto3" json:"max_incoming_connections,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MongodConfig3_6_Network) Reset() { *m = MongodConfig3_6_Network{} } +func (m *MongodConfig3_6_Network) String() string { return proto.CompactTextString(m) } +func (*MongodConfig3_6_Network) ProtoMessage() {} +func (*MongodConfig3_6_Network) Descriptor() ([]byte, []int) { + return fileDescriptor_mongodb3_6_b6b4a9da8ede6ac2, []int{0, 2} +} +func (m *MongodConfig3_6_Network) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MongodConfig3_6_Network.Unmarshal(m, b) +} +func (m *MongodConfig3_6_Network) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MongodConfig3_6_Network.Marshal(b, m, deterministic) +} +func (dst *MongodConfig3_6_Network) XXX_Merge(src proto.Message) { + xxx_messageInfo_MongodConfig3_6_Network.Merge(dst, src) +} +func (m *MongodConfig3_6_Network) XXX_Size() int { + return xxx_messageInfo_MongodConfig3_6_Network.Size(m) +} +func (m *MongodConfig3_6_Network) XXX_DiscardUnknown() { + xxx_messageInfo_MongodConfig3_6_Network.DiscardUnknown(m) +} + +var xxx_messageInfo_MongodConfig3_6_Network proto.InternalMessageInfo + +func (m *MongodConfig3_6_Network) GetMaxIncomingConnections() *wrappers.Int64Value { + if m != nil { + return m.MaxIncomingConnections + } + return nil +} + +type MongodConfigSet3_6 struct { + // Effective settings for a MongoDB 3.6 cluster (a combination of settings defined + // in [user_config] and [default_config]). + EffectiveConfig *MongodConfig3_6 `protobuf:"bytes,1,opt,name=effective_config,json=effectiveConfig,proto3" json:"effective_config,omitempty"` + // User-defined settings for a MongoDB 3.6 cluster. + UserConfig *MongodConfig3_6 `protobuf:"bytes,2,opt,name=user_config,json=userConfig,proto3" json:"user_config,omitempty"` + // Default configuration for a MongoDB 3.6 cluster. + DefaultConfig *MongodConfig3_6 `protobuf:"bytes,3,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MongodConfigSet3_6) Reset() { *m = MongodConfigSet3_6{} } +func (m *MongodConfigSet3_6) String() string { return proto.CompactTextString(m) } +func (*MongodConfigSet3_6) ProtoMessage() {} +func (*MongodConfigSet3_6) Descriptor() ([]byte, []int) { + return fileDescriptor_mongodb3_6_b6b4a9da8ede6ac2, []int{1} +} +func (m *MongodConfigSet3_6) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MongodConfigSet3_6.Unmarshal(m, b) +} +func (m *MongodConfigSet3_6) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MongodConfigSet3_6.Marshal(b, m, deterministic) +} +func (dst *MongodConfigSet3_6) XXX_Merge(src proto.Message) { + xxx_messageInfo_MongodConfigSet3_6.Merge(dst, src) +} +func (m *MongodConfigSet3_6) XXX_Size() int { + return xxx_messageInfo_MongodConfigSet3_6.Size(m) +} +func (m *MongodConfigSet3_6) XXX_DiscardUnknown() { + xxx_messageInfo_MongodConfigSet3_6.DiscardUnknown(m) +} + +var xxx_messageInfo_MongodConfigSet3_6 proto.InternalMessageInfo + +func (m *MongodConfigSet3_6) GetEffectiveConfig() *MongodConfig3_6 { + if m != nil { + return m.EffectiveConfig + } + return nil +} + +func (m *MongodConfigSet3_6) GetUserConfig() *MongodConfig3_6 { + if m != nil { + return m.UserConfig + } + return nil +} + +func (m *MongodConfigSet3_6) GetDefaultConfig() *MongodConfig3_6 { + if m != nil { + return m.DefaultConfig + } + return nil +} + +func init() { + proto.RegisterType((*MongodConfig3_6)(nil), "yandex.cloud.mdb.mongodb.v1.config.MongodConfig3_6") + proto.RegisterType((*MongodConfig3_6_Storage)(nil), "yandex.cloud.mdb.mongodb.v1.config.MongodConfig3_6.Storage") + proto.RegisterType((*MongodConfig3_6_Storage_WiredTiger)(nil), "yandex.cloud.mdb.mongodb.v1.config.MongodConfig3_6.Storage.WiredTiger") + proto.RegisterType((*MongodConfig3_6_Storage_WiredTiger_EngineConfig)(nil), "yandex.cloud.mdb.mongodb.v1.config.MongodConfig3_6.Storage.WiredTiger.EngineConfig") + proto.RegisterType((*MongodConfig3_6_Storage_WiredTiger_CollectionConfig)(nil), "yandex.cloud.mdb.mongodb.v1.config.MongodConfig3_6.Storage.WiredTiger.CollectionConfig") + proto.RegisterType((*MongodConfig3_6_Storage_Journal)(nil), "yandex.cloud.mdb.mongodb.v1.config.MongodConfig3_6.Storage.Journal") + proto.RegisterType((*MongodConfig3_6_OperationProfiling)(nil), "yandex.cloud.mdb.mongodb.v1.config.MongodConfig3_6.OperationProfiling") + proto.RegisterType((*MongodConfig3_6_Network)(nil), "yandex.cloud.mdb.mongodb.v1.config.MongodConfig3_6.Network") + proto.RegisterType((*MongodConfigSet3_6)(nil), "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet3_6") + proto.RegisterEnum("yandex.cloud.mdb.mongodb.v1.config.MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor", MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor_name, MongodConfig3_6_Storage_WiredTiger_CollectionConfig_Compressor_value) + proto.RegisterEnum("yandex.cloud.mdb.mongodb.v1.config.MongodConfig3_6_OperationProfiling_Mode", MongodConfig3_6_OperationProfiling_Mode_name, MongodConfig3_6_OperationProfiling_Mode_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/mongodb/v1/config/mongodb3_6.proto", fileDescriptor_mongodb3_6_b6b4a9da8ede6ac2) +} + +var fileDescriptor_mongodb3_6_b6b4a9da8ede6ac2 = []byte{ + // 832 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x96, 0xdd, 0x6e, 0xe3, 0x44, + 0x14, 0xc7, 0x71, 0x52, 0xea, 0xe5, 0xa4, 0x6d, 0xbc, 0x03, 0x5a, 0x55, 0xe6, 0x43, 0x28, 0x57, + 0xdc, 0x74, 0x5c, 0x6f, 0x4a, 0x85, 0x54, 0x09, 0xb1, 0x4d, 0x13, 0x08, 0x34, 0xb1, 0x65, 0x77, + 0xa9, 0xa8, 0x04, 0x96, 0x3f, 0x26, 0xae, 0x59, 0x7b, 0xc6, 0xf2, 0x47, 0x52, 0xf6, 0x16, 0x6e, + 0x91, 0x78, 0x0a, 0xde, 0x80, 0x67, 0xe8, 0x1d, 0x8f, 0xc0, 0x13, 0xf0, 0x04, 0xbd, 0x42, 0xf6, + 0x8c, 0xd3, 0x6e, 0x2a, 0xb1, 0x28, 0x5b, 0xee, 0xe2, 0x93, 0xf9, 0xff, 0xfe, 0x73, 0xce, 0x9c, + 0x39, 0x36, 0xf4, 0x7f, 0x72, 0x69, 0x40, 0xae, 0x34, 0x3f, 0x66, 0x65, 0xa0, 0x25, 0x81, 0xa7, + 0x25, 0x8c, 0x86, 0x2c, 0xf0, 0xb4, 0xb9, 0xae, 0xf9, 0x8c, 0xce, 0xa2, 0xb0, 0x89, 0xf4, 0x9d, + 0x43, 0x9c, 0x66, 0xac, 0x60, 0xa8, 0xc7, 0x45, 0xb8, 0x16, 0xe1, 0x24, 0xf0, 0xb0, 0x58, 0x82, + 0xe7, 0x3a, 0xe6, 0x22, 0xf5, 0xa3, 0x90, 0xb1, 0x30, 0x26, 0x5a, 0xad, 0xf0, 0xca, 0x99, 0xb6, + 0xc8, 0xdc, 0x34, 0x25, 0x59, 0xce, 0x19, 0xea, 0x87, 0xaf, 0x18, 0xcf, 0xdd, 0x38, 0x0a, 0xdc, + 0x22, 0x62, 0x94, 0xff, 0xdd, 0xfb, 0x6b, 0x0b, 0xba, 0x93, 0x1a, 0x3a, 0xa8, 0x79, 0x7d, 0xe7, + 0x10, 0x3d, 0x07, 0x39, 0x2f, 0x58, 0xe6, 0x86, 0x64, 0x57, 0xfa, 0x58, 0xfa, 0xa4, 0xf3, 0xf4, + 0x08, 0xbf, 0x7e, 0x23, 0x78, 0x85, 0x82, 0x6d, 0x8e, 0xb0, 0x1a, 0x16, 0x5a, 0xc0, 0xbb, 0x2c, + 0x25, 0x59, 0xed, 0xee, 0xa4, 0x19, 0x9b, 0x45, 0x71, 0x44, 0xc3, 0xdd, 0x56, 0x6d, 0x31, 0x5a, + 0xc7, 0xc2, 0x68, 0x70, 0x66, 0x43, 0xb3, 0x10, 0xbb, 0x17, 0x43, 0x13, 0x68, 0x53, 0x52, 0xec, + 0xb6, 0xd7, 0xcf, 0x65, 0x4a, 0x8a, 0x05, 0xcb, 0x5e, 0x58, 0x15, 0x47, 0xfd, 0x43, 0x06, 0x59, + 0x24, 0x87, 0x42, 0xe8, 0x2c, 0xa2, 0x8c, 0x04, 0x4e, 0x11, 0x85, 0x24, 0x13, 0xe5, 0x1a, 0xbd, + 0x41, 0xb9, 0xf0, 0x79, 0x85, 0x3b, 0xab, 0x68, 0x16, 0x2c, 0x96, 0xbf, 0xd1, 0xf7, 0x20, 0xff, + 0xc8, 0xca, 0x8c, 0xba, 0xb1, 0x28, 0xd8, 0xe0, 0x4d, 0x4c, 0xbe, 0xe6, 0x28, 0xab, 0x61, 0xaa, + 0x7f, 0x6e, 0x00, 0xdc, 0x3a, 0xa3, 0x2b, 0xd8, 0x26, 0x34, 0x8c, 0x28, 0x71, 0x38, 0x48, 0x24, + 0x66, 0x3f, 0x4c, 0x62, 0x78, 0x58, 0xb3, 0xf9, 0x12, 0x6b, 0x8b, 0xdc, 0x79, 0x42, 0xbf, 0x48, + 0xf0, 0xd8, 0x67, 0x71, 0x4c, 0xfc, 0xba, 0x4d, 0x84, 0x3d, 0x4f, 0xf9, 0xfc, 0x81, 0xec, 0x07, + 0x4b, 0xbe, 0xd8, 0x82, 0xe2, 0xaf, 0x44, 0x54, 0x13, 0xb6, 0xee, 0x6e, 0x12, 0x7d, 0x01, 0xdb, + 0xbe, 0xeb, 0x5f, 0x12, 0x27, 0x8f, 0x5e, 0x12, 0x27, 0xf4, 0x44, 0x41, 0x3e, 0xc0, 0xfc, 0xf6, + 0xe1, 0xe6, 0xf6, 0xe1, 0x13, 0x56, 0x7a, 0x31, 0xf9, 0xd6, 0x8d, 0x4b, 0x62, 0x75, 0x6a, 0x89, + 0x1d, 0xbd, 0x24, 0x5f, 0x7a, 0xea, 0xdf, 0x12, 0x28, 0xab, 0xc6, 0xe8, 0x57, 0x09, 0x14, 0x2f, + 0x66, 0xfe, 0x0b, 0xc7, 0x67, 0x49, 0x9a, 0x91, 0x3c, 0x67, 0xbc, 0x89, 0x76, 0x9e, 0x7a, 0xff, + 0x53, 0xb2, 0x78, 0xb0, 0x74, 0xb2, 0xba, 0xb5, 0xf7, 0x6d, 0xa0, 0xf7, 0x15, 0xc0, 0xed, 0x13, + 0x52, 0xe1, 0xc9, 0xc0, 0x98, 0x98, 0xd6, 0xd0, 0xb6, 0x0d, 0xcb, 0x79, 0x3e, 0xb5, 0xcd, 0xe1, + 0x60, 0x3c, 0x1a, 0x0f, 0x4f, 0x94, 0xb7, 0xd0, 0x23, 0xd8, 0x98, 0x1a, 0xd3, 0xa1, 0x22, 0x21, + 0x80, 0x4d, 0x7b, 0xfa, 0xcc, 0x34, 0xbf, 0x53, 0x5a, 0x55, 0xf4, 0xe2, 0x74, 0x7c, 0xac, 0xb4, + 0xd5, 0xdf, 0x24, 0x90, 0x45, 0x97, 0xa1, 0x03, 0x90, 0x09, 0x75, 0xbd, 0x98, 0x04, 0xa2, 0x6c, + 0xea, 0xbd, 0xb2, 0x1d, 0x33, 0x16, 0xf3, 0xa2, 0x35, 0x4b, 0x91, 0x01, 0x5d, 0x9f, 0x25, 0x49, + 0x54, 0x38, 0x11, 0x2d, 0x48, 0x36, 0x5f, 0x76, 0xfe, 0xfb, 0xf7, 0xd4, 0x63, 0x5a, 0x1c, 0x1e, + 0xd4, 0xf2, 0xe3, 0x77, 0x6e, 0xae, 0xf5, 0xb7, 0xf5, 0xbd, 0x4f, 0xf7, 0xf7, 0xad, 0x1d, 0x2e, + 0x1f, 0x0b, 0xb5, 0xfa, 0x73, 0x0b, 0xd0, 0xfd, 0x89, 0x81, 0x1c, 0xd8, 0x48, 0x58, 0x40, 0x44, + 0xd9, 0xbf, 0x79, 0x98, 0x39, 0x84, 0x27, 0x2c, 0x20, 0x56, 0x0d, 0x46, 0x06, 0x3c, 0xce, 0x63, + 0xb6, 0x70, 0x58, 0xea, 0x14, 0x97, 0x19, 0xc9, 0x2f, 0x59, 0x1c, 0xfc, 0x97, 0x54, 0x36, 0x6f, + 0xae, 0xf5, 0xd6, 0xe7, 0xfb, 0x56, 0xb7, 0x52, 0x1b, 0xe9, 0x59, 0xa3, 0xed, 0x1d, 0xc1, 0x46, + 0x85, 0x47, 0xef, 0x81, 0x32, 0x31, 0x4e, 0x86, 0x2b, 0x27, 0x23, 0x43, 0xdb, 0x18, 0x8d, 0x14, + 0x09, 0x75, 0x40, 0xb6, 0x4f, 0x8d, 0x73, 0xc7, 0x30, 0x95, 0x56, 0x15, 0x7d, 0x76, 0x7a, 0xaa, + 0xb4, 0x55, 0x0a, 0xb2, 0x98, 0x66, 0xc8, 0x87, 0xdd, 0xc4, 0xbd, 0x72, 0x22, 0xea, 0xb3, 0x24, + 0xa2, 0x61, 0x75, 0xd9, 0x28, 0x6f, 0x95, 0x5c, 0x1c, 0xd4, 0xbf, 0xee, 0x6f, 0xeb, 0xe6, 0x5a, + 0x7f, 0xa4, 0xef, 0xef, 0xe9, 0x87, 0xfd, 0xcf, 0x0e, 0xac, 0x27, 0x89, 0x7b, 0x35, 0x16, 0xa4, + 0xc1, 0x2d, 0xa8, 0xf7, 0x7b, 0x0b, 0xd0, 0xdd, 0x7a, 0xd9, 0xa4, 0xa8, 0xde, 0x31, 0x3f, 0x80, + 0x42, 0x66, 0xb3, 0x6a, 0xd1, 0x7c, 0x65, 0xc8, 0xf4, 0xd7, 0x38, 0x01, 0xab, 0xbb, 0x84, 0x89, + 0x9b, 0x75, 0x06, 0x9d, 0x32, 0x27, 0xd9, 0xab, 0x03, 0x64, 0x2d, 0x34, 0x54, 0x1c, 0x41, 0xbd, + 0x80, 0x9d, 0x80, 0xcc, 0xdc, 0x32, 0x2e, 0x1a, 0x70, 0x7b, 0x7d, 0xf0, 0xb6, 0x40, 0xf1, 0xc8, + 0xb1, 0x79, 0x31, 0x0d, 0xa3, 0xe2, 0xb2, 0xf4, 0xb0, 0xcf, 0x12, 0x8d, 0xf3, 0xf6, 0xf8, 0x5b, + 0x3b, 0x64, 0x7b, 0x21, 0xa1, 0xf5, 0x19, 0x68, 0xaf, 0xff, 0x8e, 0x38, 0x12, 0x11, 0x6f, 0xb3, + 0x56, 0xf4, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0xbe, 0xb4, 0xd3, 0xe3, 0x7c, 0x08, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/database.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/database.pb.go new file mode 100644 index 000000000..174454c41 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/database.pb.go @@ -0,0 +1,137 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/mongodb/v1/database.proto + +package mongodb // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A MongoDB Database resource. For more information, see the +// [Developer's Guide](/docs/managed-mongodb/concepts). +type Database struct { + // Name of the database. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // ID of the MongoDB cluster that the database belongs to. + ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Database) Reset() { *m = Database{} } +func (m *Database) String() string { return proto.CompactTextString(m) } +func (*Database) ProtoMessage() {} +func (*Database) Descriptor() ([]byte, []int) { + return fileDescriptor_database_4ea16d4da2c264aa, []int{0} +} +func (m *Database) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Database.Unmarshal(m, b) +} +func (m *Database) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Database.Marshal(b, m, deterministic) +} +func (dst *Database) XXX_Merge(src proto.Message) { + xxx_messageInfo_Database.Merge(dst, src) +} +func (m *Database) XXX_Size() int { + return xxx_messageInfo_Database.Size(m) +} +func (m *Database) XXX_DiscardUnknown() { + xxx_messageInfo_Database.DiscardUnknown(m) +} + +var xxx_messageInfo_Database proto.InternalMessageInfo + +func (m *Database) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Database) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type DatabaseSpec struct { + // Name of the MongoDB database. 1-63 characters long. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DatabaseSpec) Reset() { *m = DatabaseSpec{} } +func (m *DatabaseSpec) String() string { return proto.CompactTextString(m) } +func (*DatabaseSpec) ProtoMessage() {} +func (*DatabaseSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_database_4ea16d4da2c264aa, []int{1} +} +func (m *DatabaseSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DatabaseSpec.Unmarshal(m, b) +} +func (m *DatabaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DatabaseSpec.Marshal(b, m, deterministic) +} +func (dst *DatabaseSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DatabaseSpec.Merge(dst, src) +} +func (m *DatabaseSpec) XXX_Size() int { + return xxx_messageInfo_DatabaseSpec.Size(m) +} +func (m *DatabaseSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DatabaseSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DatabaseSpec proto.InternalMessageInfo + +func (m *DatabaseSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*Database)(nil), "yandex.cloud.mdb.mongodb.v1.Database") + proto.RegisterType((*DatabaseSpec)(nil), "yandex.cloud.mdb.mongodb.v1.DatabaseSpec") +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/mongodb/v1/database.proto", fileDescriptor_database_4ea16d4da2c264aa) +} + +var fileDescriptor_database_4ea16d4da2c264aa = []byte{ + // 237 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xaa, 0x4c, 0xcc, 0x4b, + 0x49, 0xad, 0xd0, 0x4f, 0xce, 0xc9, 0x2f, 0x4d, 0xd1, 0xcf, 0x4d, 0x49, 0xd2, 0xcf, 0xcd, 0xcf, + 0x4b, 0xcf, 0x4f, 0x49, 0xd2, 0x2f, 0x33, 0xd4, 0x4f, 0x49, 0x2c, 0x49, 0x4c, 0x4a, 0x2c, 0x4e, + 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x86, 0xa8, 0xd5, 0x03, 0xab, 0xd5, 0xcb, 0x4d, + 0x49, 0xd2, 0x83, 0xaa, 0xd5, 0x2b, 0x33, 0x94, 0x92, 0x45, 0x31, 0xa8, 0x2c, 0x31, 0x27, 0x33, + 0x25, 0xb1, 0x24, 0x33, 0x3f, 0x0f, 0xa2, 0x57, 0xc9, 0x96, 0x8b, 0xc3, 0x05, 0x6a, 0x9a, 0x90, + 0x10, 0x17, 0x4b, 0x5e, 0x62, 0x6e, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98, 0x2d, + 0x24, 0xcb, 0xc5, 0x95, 0x9c, 0x53, 0x5a, 0x5c, 0x92, 0x5a, 0x14, 0x9f, 0x99, 0x22, 0xc1, 0x04, + 0x96, 0xe1, 0x84, 0x8a, 0x78, 0xa6, 0x28, 0x39, 0x71, 0xf1, 0xc0, 0xb4, 0x07, 0x17, 0xa4, 0x26, + 0x0b, 0x19, 0x21, 0x1b, 0xe1, 0x24, 0xf7, 0xe2, 0xb8, 0x21, 0xe3, 0xa7, 0xe3, 0x86, 0x7c, 0xd1, + 0x89, 0xba, 0x55, 0x8e, 0xba, 0x51, 0x06, 0xba, 0x96, 0xf1, 0xba, 0xb1, 0x5a, 0x5d, 0x27, 0x0c, + 0x59, 0x6c, 0x6c, 0xcd, 0x8c, 0x21, 0x56, 0x38, 0x79, 0x46, 0xb9, 0xa7, 0x67, 0x96, 0x64, 0x94, + 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x43, 0x9c, 0xab, 0x0b, 0x71, 0x6e, 0x7a, 0xbe, 0x6e, 0x7a, + 0x6a, 0x1e, 0xd8, 0xa5, 0xfa, 0x78, 0x02, 0xc4, 0x1a, 0xca, 0x4c, 0x62, 0x03, 0x2b, 0x35, 0x06, + 0x04, 0x00, 0x00, 0xff, 0xff, 0x89, 0x35, 0xe4, 0x5f, 0x3e, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/database_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/database_service.pb.go new file mode 100644 index 000000000..069af5cff --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/database_service.pb.go @@ -0,0 +1,627 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/mongodb/v1/database_service.proto + +package mongodb // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetDatabaseRequest struct { + // ID of the MongoDB cluster that the database belongs to. + // To get the cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the MongoDB Database resource to return. + // To get the name of the database use a [DatabaseService.List] request. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDatabaseRequest) Reset() { *m = GetDatabaseRequest{} } +func (m *GetDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*GetDatabaseRequest) ProtoMessage() {} +func (*GetDatabaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_9ac08729ed097c0a, []int{0} +} +func (m *GetDatabaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDatabaseRequest.Unmarshal(m, b) +} +func (m *GetDatabaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDatabaseRequest.Marshal(b, m, deterministic) +} +func (dst *GetDatabaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDatabaseRequest.Merge(dst, src) +} +func (m *GetDatabaseRequest) XXX_Size() int { + return xxx_messageInfo_GetDatabaseRequest.Size(m) +} +func (m *GetDatabaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetDatabaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDatabaseRequest proto.InternalMessageInfo + +func (m *GetDatabaseRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GetDatabaseRequest) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type ListDatabasesRequest struct { + // ID of the MongoDB cluster to list databases in. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListDatabasesResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListDatabasesRequest) Reset() { *m = ListDatabasesRequest{} } +func (m *ListDatabasesRequest) String() string { return proto.CompactTextString(m) } +func (*ListDatabasesRequest) ProtoMessage() {} +func (*ListDatabasesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_9ac08729ed097c0a, []int{1} +} +func (m *ListDatabasesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListDatabasesRequest.Unmarshal(m, b) +} +func (m *ListDatabasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListDatabasesRequest.Marshal(b, m, deterministic) +} +func (dst *ListDatabasesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListDatabasesRequest.Merge(dst, src) +} +func (m *ListDatabasesRequest) XXX_Size() int { + return xxx_messageInfo_ListDatabasesRequest.Size(m) +} +func (m *ListDatabasesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListDatabasesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListDatabasesRequest proto.InternalMessageInfo + +func (m *ListDatabasesRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListDatabasesRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListDatabasesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListDatabasesResponse struct { + // List of MongoDB Database resources. + Databases []*Database `protobuf:"bytes,1,rep,name=databases,proto3" json:"databases,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListDatabasesRequest.page_size], use the [next_page_token] as the value + // for the [ListDatabasesRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListDatabasesResponse) Reset() { *m = ListDatabasesResponse{} } +func (m *ListDatabasesResponse) String() string { return proto.CompactTextString(m) } +func (*ListDatabasesResponse) ProtoMessage() {} +func (*ListDatabasesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_9ac08729ed097c0a, []int{2} +} +func (m *ListDatabasesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListDatabasesResponse.Unmarshal(m, b) +} +func (m *ListDatabasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListDatabasesResponse.Marshal(b, m, deterministic) +} +func (dst *ListDatabasesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListDatabasesResponse.Merge(dst, src) +} +func (m *ListDatabasesResponse) XXX_Size() int { + return xxx_messageInfo_ListDatabasesResponse.Size(m) +} +func (m *ListDatabasesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListDatabasesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListDatabasesResponse proto.InternalMessageInfo + +func (m *ListDatabasesResponse) GetDatabases() []*Database { + if m != nil { + return m.Databases + } + return nil +} + +func (m *ListDatabasesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateDatabaseRequest struct { + // ID of the MongoDB cluster to create a database in. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Configuration of the database to create. + DatabaseSpec *DatabaseSpec `protobuf:"bytes,2,opt,name=database_spec,json=databaseSpec,proto3" json:"database_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateDatabaseRequest) Reset() { *m = CreateDatabaseRequest{} } +func (m *CreateDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*CreateDatabaseRequest) ProtoMessage() {} +func (*CreateDatabaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_9ac08729ed097c0a, []int{3} +} +func (m *CreateDatabaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateDatabaseRequest.Unmarshal(m, b) +} +func (m *CreateDatabaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateDatabaseRequest.Marshal(b, m, deterministic) +} +func (dst *CreateDatabaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateDatabaseRequest.Merge(dst, src) +} +func (m *CreateDatabaseRequest) XXX_Size() int { + return xxx_messageInfo_CreateDatabaseRequest.Size(m) +} +func (m *CreateDatabaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateDatabaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateDatabaseRequest proto.InternalMessageInfo + +func (m *CreateDatabaseRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateDatabaseRequest) GetDatabaseSpec() *DatabaseSpec { + if m != nil { + return m.DatabaseSpec + } + return nil +} + +type CreateDatabaseMetadata struct { + // ID of the MongoDB cluster where a database is being created. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the MongoDB database that is being created. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateDatabaseMetadata) Reset() { *m = CreateDatabaseMetadata{} } +func (m *CreateDatabaseMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateDatabaseMetadata) ProtoMessage() {} +func (*CreateDatabaseMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_9ac08729ed097c0a, []int{4} +} +func (m *CreateDatabaseMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateDatabaseMetadata.Unmarshal(m, b) +} +func (m *CreateDatabaseMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateDatabaseMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateDatabaseMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateDatabaseMetadata.Merge(dst, src) +} +func (m *CreateDatabaseMetadata) XXX_Size() int { + return xxx_messageInfo_CreateDatabaseMetadata.Size(m) +} +func (m *CreateDatabaseMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateDatabaseMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateDatabaseMetadata proto.InternalMessageInfo + +func (m *CreateDatabaseMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateDatabaseMetadata) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type DeleteDatabaseRequest struct { + // ID of the MongoDB cluster to delete a database in. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the database to delete. + // To get the name of the database, use a [DatabaseService.List] request. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteDatabaseRequest) Reset() { *m = DeleteDatabaseRequest{} } +func (m *DeleteDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteDatabaseRequest) ProtoMessage() {} +func (*DeleteDatabaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_9ac08729ed097c0a, []int{5} +} +func (m *DeleteDatabaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteDatabaseRequest.Unmarshal(m, b) +} +func (m *DeleteDatabaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteDatabaseRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteDatabaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteDatabaseRequest.Merge(dst, src) +} +func (m *DeleteDatabaseRequest) XXX_Size() int { + return xxx_messageInfo_DeleteDatabaseRequest.Size(m) +} +func (m *DeleteDatabaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteDatabaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteDatabaseRequest proto.InternalMessageInfo + +func (m *DeleteDatabaseRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteDatabaseRequest) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type DeleteDatabaseMetadata struct { + // ID of the MongoDB cluster where a database is being deleted. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the MongoDB database that is being deleted. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteDatabaseMetadata) Reset() { *m = DeleteDatabaseMetadata{} } +func (m *DeleteDatabaseMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteDatabaseMetadata) ProtoMessage() {} +func (*DeleteDatabaseMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_9ac08729ed097c0a, []int{6} +} +func (m *DeleteDatabaseMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteDatabaseMetadata.Unmarshal(m, b) +} +func (m *DeleteDatabaseMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteDatabaseMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteDatabaseMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteDatabaseMetadata.Merge(dst, src) +} +func (m *DeleteDatabaseMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteDatabaseMetadata.Size(m) +} +func (m *DeleteDatabaseMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteDatabaseMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteDatabaseMetadata proto.InternalMessageInfo + +func (m *DeleteDatabaseMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteDatabaseMetadata) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +func init() { + proto.RegisterType((*GetDatabaseRequest)(nil), "yandex.cloud.mdb.mongodb.v1.GetDatabaseRequest") + proto.RegisterType((*ListDatabasesRequest)(nil), "yandex.cloud.mdb.mongodb.v1.ListDatabasesRequest") + proto.RegisterType((*ListDatabasesResponse)(nil), "yandex.cloud.mdb.mongodb.v1.ListDatabasesResponse") + proto.RegisterType((*CreateDatabaseRequest)(nil), "yandex.cloud.mdb.mongodb.v1.CreateDatabaseRequest") + proto.RegisterType((*CreateDatabaseMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.CreateDatabaseMetadata") + proto.RegisterType((*DeleteDatabaseRequest)(nil), "yandex.cloud.mdb.mongodb.v1.DeleteDatabaseRequest") + proto.RegisterType((*DeleteDatabaseMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.DeleteDatabaseMetadata") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// DatabaseServiceClient is the client API for DatabaseService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type DatabaseServiceClient interface { + // Returns the specified MongoDB Database resource. + // + // To get the list of available MongoDB Database resources, make a [List] request. + Get(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*Database, error) + // Retrieves the list of MongoDB Database resources in the specified cluster. + List(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error) + // Creates a new MongoDB database in the specified cluster. + Create(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified MongoDB database. + Delete(ctx context.Context, in *DeleteDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) +} + +type databaseServiceClient struct { + cc *grpc.ClientConn +} + +func NewDatabaseServiceClient(cc *grpc.ClientConn) DatabaseServiceClient { + return &databaseServiceClient{cc} +} + +func (c *databaseServiceClient) Get(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*Database, error) { + out := new(Database) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.DatabaseService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseServiceClient) List(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error) { + out := new(ListDatabasesResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.DatabaseService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseServiceClient) Create(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.DatabaseService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseServiceClient) Delete(ctx context.Context, in *DeleteDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.DatabaseService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DatabaseServiceServer is the server API for DatabaseService service. +type DatabaseServiceServer interface { + // Returns the specified MongoDB Database resource. + // + // To get the list of available MongoDB Database resources, make a [List] request. + Get(context.Context, *GetDatabaseRequest) (*Database, error) + // Retrieves the list of MongoDB Database resources in the specified cluster. + List(context.Context, *ListDatabasesRequest) (*ListDatabasesResponse, error) + // Creates a new MongoDB database in the specified cluster. + Create(context.Context, *CreateDatabaseRequest) (*operation.Operation, error) + // Deletes the specified MongoDB database. + Delete(context.Context, *DeleteDatabaseRequest) (*operation.Operation, error) +} + +func RegisterDatabaseServiceServer(s *grpc.Server, srv DatabaseServiceServer) { + s.RegisterService(&_DatabaseService_serviceDesc, srv) +} + +func _DatabaseService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.DatabaseService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServiceServer).Get(ctx, req.(*GetDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDatabasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.DatabaseService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServiceServer).List(ctx, req.(*ListDatabasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.DatabaseService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServiceServer).Create(ctx, req.(*CreateDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.DatabaseService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServiceServer).Delete(ctx, req.(*DeleteDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DatabaseService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.mongodb.v1.DatabaseService", + HandlerType: (*DatabaseServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _DatabaseService_Get_Handler, + }, + { + MethodName: "List", + Handler: _DatabaseService_List_Handler, + }, + { + MethodName: "Create", + Handler: _DatabaseService_Create_Handler, + }, + { + MethodName: "Delete", + Handler: _DatabaseService_Delete_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/mongodb/v1/database_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/mongodb/v1/database_service.proto", fileDescriptor_database_service_9ac08729ed097c0a) +} + +var fileDescriptor_database_service_9ac08729ed097c0a = []byte{ + // 697 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcf, 0x4f, 0x13, 0x4d, + 0x18, 0xce, 0x52, 0xbe, 0x86, 0x0e, 0xf0, 0x91, 0x4c, 0xbe, 0x92, 0xa6, 0xdf, 0xc7, 0x17, 0x5c, + 0x23, 0x62, 0x4d, 0x77, 0xba, 0x25, 0x18, 0x15, 0x38, 0xd8, 0x42, 0x08, 0xf1, 0x67, 0x0a, 0x27, + 0xc4, 0x34, 0xb3, 0xdd, 0xd7, 0x75, 0x63, 0x77, 0x67, 0xed, 0x4c, 0x1b, 0x7e, 0x04, 0x0f, 0xc6, + 0x98, 0xc8, 0xd5, 0xe8, 0xc1, 0x3f, 0xc1, 0x23, 0xff, 0x83, 0x81, 0x33, 0xfe, 0x01, 0x5e, 0x3c, + 0x78, 0xf6, 0xe8, 0xc9, 0xec, 0xce, 0x76, 0xdb, 0x42, 0xa9, 0x15, 0xb8, 0x4d, 0xe6, 0x7d, 0xde, + 0x79, 0x9f, 0x67, 0xe6, 0x79, 0xe7, 0x45, 0xf9, 0x2d, 0xea, 0x9a, 0xb0, 0x49, 0x2a, 0x55, 0x56, + 0x37, 0x89, 0x63, 0x1a, 0xc4, 0x61, 0xae, 0xc5, 0x4c, 0x83, 0x34, 0x74, 0x62, 0x52, 0x41, 0x0d, + 0xca, 0xa1, 0xcc, 0xa1, 0xd6, 0xb0, 0x2b, 0xa0, 0x79, 0x35, 0x26, 0x18, 0xfe, 0x57, 0xe6, 0x68, + 0x41, 0x8e, 0xe6, 0x98, 0x86, 0x16, 0xe6, 0x68, 0x0d, 0x3d, 0xfd, 0x9f, 0xc5, 0x98, 0x55, 0x05, + 0x42, 0x3d, 0x9b, 0x50, 0xd7, 0x65, 0x82, 0x0a, 0x9b, 0xb9, 0x5c, 0xa6, 0xa6, 0xd3, 0x61, 0x39, + 0x3f, 0xca, 0x3c, 0xa8, 0x05, 0xc1, 0x30, 0x36, 0xd5, 0x41, 0x25, 0x8a, 0x9e, 0xc0, 0x4d, 0x74, + 0xe0, 0x1a, 0xb4, 0x6a, 0x9b, 0xed, 0xe1, 0x4c, 0x3f, 0x8a, 0x24, 0x56, 0x7d, 0xa3, 0x20, 0xbc, + 0x0c, 0x62, 0x31, 0xdc, 0x2d, 0xc1, 0x8b, 0x3a, 0x70, 0x81, 0xaf, 0x23, 0x54, 0xa9, 0xd6, 0xb9, + 0x80, 0x5a, 0xd9, 0x36, 0x53, 0xca, 0xa4, 0x32, 0x9d, 0x28, 0x8c, 0x7c, 0x3f, 0xd0, 0x95, 0xbd, + 0x43, 0x7d, 0x70, 0x7e, 0x61, 0x36, 0x57, 0x4a, 0x84, 0xf1, 0x15, 0x13, 0x17, 0xd1, 0x68, 0x74, + 0x4f, 0x2e, 0x75, 0x20, 0x35, 0x10, 0xe0, 0xff, 0xf7, 0xf1, 0x3f, 0x0e, 0xf4, 0xbf, 0x1f, 0xd3, + 0xec, 0xf6, 0x9d, 0xec, 0x7a, 0x2e, 0x7b, 0xab, 0x9c, 0x7d, 0x92, 0x91, 0x27, 0xdc, 0x98, 0x29, + 0x8d, 0x34, 0x93, 0x1e, 0x50, 0x07, 0xd4, 0x0f, 0x0a, 0xfa, 0xe7, 0x9e, 0xcd, 0x23, 0x26, 0xfc, + 0x4c, 0x54, 0xae, 0xa2, 0x84, 0x47, 0x2d, 0x28, 0x73, 0x7b, 0x5b, 0xd2, 0x88, 0x15, 0xd0, 0xcf, + 0x03, 0x3d, 0x3e, 0xbf, 0xa0, 0xe7, 0x72, 0xb9, 0xd2, 0x90, 0x1f, 0x5c, 0xb5, 0xb7, 0x01, 0x4f, + 0x23, 0x14, 0x00, 0x05, 0x7b, 0x0e, 0x6e, 0x2a, 0x16, 0x9c, 0x9a, 0xd8, 0x3b, 0xd4, 0xff, 0x0a, + 0x90, 0xa5, 0xe0, 0x94, 0x35, 0x3f, 0xa6, 0xbe, 0x56, 0x50, 0xf2, 0x18, 0x31, 0xee, 0x31, 0x97, + 0x03, 0x2e, 0xa2, 0x44, 0x53, 0x02, 0x4f, 0x29, 0x93, 0xb1, 0xe9, 0xe1, 0xfc, 0x15, 0xad, 0x87, + 0x33, 0xb4, 0xe8, 0x96, 0x5b, 0x79, 0x78, 0x0a, 0x8d, 0xb9, 0xb0, 0x29, 0xca, 0x6d, 0x6c, 0x82, + 0xeb, 0x2b, 0x8d, 0xfa, 0xdb, 0x8f, 0x22, 0x1a, 0x1f, 0x15, 0x94, 0x2c, 0xd6, 0x80, 0x0a, 0x38, + 0xd7, 0x5b, 0xad, 0xb5, 0xbd, 0x15, 0xf7, 0xa0, 0x12, 0x14, 0x1b, 0xce, 0x5f, 0xeb, 0x8b, 0xf7, + 0xaa, 0x07, 0x95, 0xc2, 0xa0, 0x7f, 0x74, 0xeb, 0xf1, 0xfc, 0x3d, 0x75, 0x03, 0x8d, 0x77, 0x72, + 0xbb, 0x0f, 0x82, 0xfa, 0x08, 0x3c, 0x71, 0x92, 0x5c, 0x3b, 0x9d, 0xcb, 0x5d, 0xad, 0x73, 0xcc, + 0x1a, 0x6f, 0x15, 0x94, 0x5c, 0x84, 0x2a, 0x9c, 0x53, 0xfa, 0x85, 0xd8, 0x74, 0x03, 0x8d, 0x77, + 0x52, 0xb9, 0x48, 0xa5, 0xf9, 0xf7, 0x71, 0x34, 0x16, 0x5d, 0xb6, 0xfc, 0x71, 0xf0, 0x27, 0x05, + 0xc5, 0x96, 0x41, 0x60, 0xd2, 0xf3, 0x89, 0x4e, 0xf6, 0x70, 0xba, 0x3f, 0x2f, 0xaa, 0x77, 0x5f, + 0x7d, 0xf9, 0xf6, 0x6e, 0x60, 0x09, 0x17, 0x89, 0x43, 0x5d, 0x6a, 0x81, 0x99, 0x6d, 0xfb, 0x2d, + 0x42, 0xfe, 0x9c, 0xec, 0xb4, 0xb4, 0xed, 0x46, 0x7f, 0x08, 0x27, 0x3b, 0x1d, 0x9a, 0x76, 0x7d, + 0xb2, 0x83, 0x7e, 0xb3, 0x60, 0xbd, 0x67, 0xf1, 0x6e, 0x8d, 0x9e, 0xce, 0xff, 0x49, 0x8a, 0x6c, + 0x41, 0x75, 0x2e, 0x20, 0x3f, 0x8b, 0x67, 0xce, 0x40, 0x1e, 0x7f, 0x56, 0x50, 0x5c, 0xda, 0x16, + 0xf7, 0xae, 0xdd, 0xb5, 0xef, 0xd2, 0x97, 0x3a, 0x73, 0x5a, 0x9f, 0xf4, 0xc3, 0xe6, 0x4a, 0x35, + 0xf7, 0x8f, 0x32, 0xea, 0xa9, 0xbd, 0x31, 0xd4, 0xdc, 0x09, 0x44, 0xdc, 0x54, 0xcf, 0x22, 0xe2, + 0xb6, 0x92, 0xc1, 0x5f, 0x15, 0x14, 0x97, 0xa6, 0xfc, 0x8d, 0x8e, 0xae, 0x4d, 0xd4, 0x8f, 0x8e, + 0x97, 0xfb, 0x47, 0x19, 0x72, 0xaa, 0xf3, 0x93, 0x72, 0xe0, 0xc9, 0x89, 0x62, 0xd4, 0x9f, 0x6a, + 0x4b, 0x8e, 0x27, 0xb6, 0xa4, 0xad, 0x32, 0x17, 0x61, 0xab, 0xc2, 0xca, 0xfa, 0xb2, 0x65, 0x8b, + 0x67, 0x75, 0x43, 0xab, 0x30, 0x87, 0x48, 0xba, 0x59, 0x39, 0xde, 0x2c, 0x96, 0xb5, 0xc0, 0x0d, + 0x4a, 0x93, 0x1e, 0x73, 0x6f, 0x2e, 0x5c, 0x1a, 0xf1, 0x00, 0x3a, 0xf3, 0x2b, 0x00, 0x00, 0xff, + 0xff, 0x96, 0x76, 0x94, 0xd2, 0xf7, 0x07, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/resource_preset.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/resource_preset.pb.go new file mode 100644 index 000000000..35ad5245d --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/resource_preset.pb.go @@ -0,0 +1,112 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/mongodb/v1/resource_preset.proto + +package mongodb // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A ResourcePreset resource for describing hardware configuration presets. +type ResourcePreset struct { + // ID of the ResourcePreset resource. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // IDs of availability zones where the resource preset is available. + ZoneIds []string `protobuf:"bytes,2,rep,name=zone_ids,json=zoneIds,proto3" json:"zone_ids,omitempty"` + // Number of CPU cores for a MongoDB host created with the preset. + Cores int64 `protobuf:"varint,3,opt,name=cores,proto3" json:"cores,omitempty"` + // RAM volume for a MongoDB host created with the preset, in bytes. + Memory int64 `protobuf:"varint,4,opt,name=memory,proto3" json:"memory,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourcePreset) Reset() { *m = ResourcePreset{} } +func (m *ResourcePreset) String() string { return proto.CompactTextString(m) } +func (*ResourcePreset) ProtoMessage() {} +func (*ResourcePreset) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_e8f81c1a07027864, []int{0} +} +func (m *ResourcePreset) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourcePreset.Unmarshal(m, b) +} +func (m *ResourcePreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourcePreset.Marshal(b, m, deterministic) +} +func (dst *ResourcePreset) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePreset.Merge(dst, src) +} +func (m *ResourcePreset) XXX_Size() int { + return xxx_messageInfo_ResourcePreset.Size(m) +} +func (m *ResourcePreset) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePreset.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePreset proto.InternalMessageInfo + +func (m *ResourcePreset) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ResourcePreset) GetZoneIds() []string { + if m != nil { + return m.ZoneIds + } + return nil +} + +func (m *ResourcePreset) GetCores() int64 { + if m != nil { + return m.Cores + } + return 0 +} + +func (m *ResourcePreset) GetMemory() int64 { + if m != nil { + return m.Memory + } + return 0 +} + +func init() { + proto.RegisterType((*ResourcePreset)(nil), "yandex.cloud.mdb.mongodb.v1.ResourcePreset") +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/mongodb/v1/resource_preset.proto", fileDescriptor_resource_preset_e8f81c1a07027864) +} + +var fileDescriptor_resource_preset_e8f81c1a07027864 = []byte{ + // 211 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x8f, 0x31, 0x4b, 0x04, 0x31, + 0x10, 0x85, 0xd9, 0x5d, 0x3d, 0xbd, 0x14, 0x57, 0x04, 0x91, 0x15, 0x9b, 0xc5, 0x6a, 0x9b, 0x4b, + 0x58, 0x2c, 0xed, 0x6c, 0xe4, 0x3a, 0x49, 0x69, 0x73, 0x98, 0xcc, 0x10, 0x03, 0x26, 0x73, 0x24, + 0xbb, 0x87, 0xeb, 0xaf, 0x17, 0x93, 0xd4, 0xd7, 0xcd, 0x37, 0xbc, 0x0f, 0xde, 0x63, 0xd3, 0xfa, + 0x19, 0x00, 0x7f, 0xa4, 0xf9, 0xa6, 0x05, 0xa4, 0x07, 0x2d, 0x3d, 0x05, 0x4b, 0xa0, 0xe5, 0x79, + 0x92, 0x11, 0x13, 0x2d, 0xd1, 0xe0, 0xf1, 0x14, 0x31, 0xe1, 0x2c, 0x4e, 0x91, 0x66, 0xe2, 0x8f, + 0x45, 0x11, 0x59, 0x11, 0x1e, 0xb4, 0xa8, 0x8a, 0x38, 0x4f, 0x4f, 0x8e, 0xed, 0x54, 0xb5, 0xde, + 0xb3, 0xc4, 0x77, 0xac, 0x75, 0xd0, 0x37, 0x43, 0x33, 0x6e, 0x55, 0xeb, 0x80, 0x3f, 0xb0, 0xdb, + 0x5f, 0x0a, 0x78, 0x74, 0x90, 0xfa, 0x76, 0xe8, 0xc6, 0xad, 0xba, 0xf9, 0xe7, 0x03, 0x24, 0x7e, + 0xc7, 0xae, 0x0d, 0x45, 0x4c, 0x7d, 0x37, 0x34, 0x63, 0xa7, 0x0a, 0xf0, 0x7b, 0xb6, 0xf1, 0xe8, + 0x29, 0xae, 0xfd, 0x55, 0x7e, 0x57, 0x7a, 0x3d, 0x7c, 0xbc, 0x59, 0x37, 0x7f, 0x2d, 0x5a, 0x18, + 0xf2, 0xb2, 0x94, 0xda, 0x97, 0x1d, 0x96, 0xf6, 0x16, 0x43, 0xae, 0x2b, 0x2f, 0x0c, 0x7c, 0xa9, + 0xa7, 0xde, 0xe4, 0xe8, 0xf3, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x03, 0xbd, 0x87, 0x16, 0x0e, + 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/resource_preset_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/resource_preset_service.pb.go new file mode 100644 index 000000000..2b2fd60b1 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/resource_preset_service.pb.go @@ -0,0 +1,324 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/mongodb/v1/resource_preset_service.proto + +package mongodb // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetResourcePresetRequest struct { + // ID of the resource preset to return. + // To get the resource preset ID, use a [ResourcePresetService.List] request. + ResourcePresetId string `protobuf:"bytes,1,opt,name=resource_preset_id,json=resourcePresetId,proto3" json:"resource_preset_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResourcePresetRequest) Reset() { *m = GetResourcePresetRequest{} } +func (m *GetResourcePresetRequest) String() string { return proto.CompactTextString(m) } +func (*GetResourcePresetRequest) ProtoMessage() {} +func (*GetResourcePresetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_service_0453cd5d31029933, []int{0} +} +func (m *GetResourcePresetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResourcePresetRequest.Unmarshal(m, b) +} +func (m *GetResourcePresetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResourcePresetRequest.Marshal(b, m, deterministic) +} +func (dst *GetResourcePresetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResourcePresetRequest.Merge(dst, src) +} +func (m *GetResourcePresetRequest) XXX_Size() int { + return xxx_messageInfo_GetResourcePresetRequest.Size(m) +} +func (m *GetResourcePresetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetResourcePresetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResourcePresetRequest proto.InternalMessageInfo + +func (m *GetResourcePresetRequest) GetResourcePresetId() string { + if m != nil { + return m.ResourcePresetId + } + return "" +} + +type ListResourcePresetsRequest struct { + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListResourcePresetsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListResourcePresetsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListResourcePresetsRequest) Reset() { *m = ListResourcePresetsRequest{} } +func (m *ListResourcePresetsRequest) String() string { return proto.CompactTextString(m) } +func (*ListResourcePresetsRequest) ProtoMessage() {} +func (*ListResourcePresetsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_service_0453cd5d31029933, []int{1} +} +func (m *ListResourcePresetsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListResourcePresetsRequest.Unmarshal(m, b) +} +func (m *ListResourcePresetsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListResourcePresetsRequest.Marshal(b, m, deterministic) +} +func (dst *ListResourcePresetsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListResourcePresetsRequest.Merge(dst, src) +} +func (m *ListResourcePresetsRequest) XXX_Size() int { + return xxx_messageInfo_ListResourcePresetsRequest.Size(m) +} +func (m *ListResourcePresetsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListResourcePresetsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListResourcePresetsRequest proto.InternalMessageInfo + +func (m *ListResourcePresetsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListResourcePresetsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListResourcePresetsResponse struct { + // List of ResourcePreset resources. + ResourcePresets []*ResourcePreset `protobuf:"bytes,1,rep,name=resource_presets,json=resourcePresets,proto3" json:"resource_presets,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value + // for the [ListResourcePresetsRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListResourcePresetsResponse) Reset() { *m = ListResourcePresetsResponse{} } +func (m *ListResourcePresetsResponse) String() string { return proto.CompactTextString(m) } +func (*ListResourcePresetsResponse) ProtoMessage() {} +func (*ListResourcePresetsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_service_0453cd5d31029933, []int{2} +} +func (m *ListResourcePresetsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListResourcePresetsResponse.Unmarshal(m, b) +} +func (m *ListResourcePresetsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListResourcePresetsResponse.Marshal(b, m, deterministic) +} +func (dst *ListResourcePresetsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListResourcePresetsResponse.Merge(dst, src) +} +func (m *ListResourcePresetsResponse) XXX_Size() int { + return xxx_messageInfo_ListResourcePresetsResponse.Size(m) +} +func (m *ListResourcePresetsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListResourcePresetsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListResourcePresetsResponse proto.InternalMessageInfo + +func (m *ListResourcePresetsResponse) GetResourcePresets() []*ResourcePreset { + if m != nil { + return m.ResourcePresets + } + return nil +} + +func (m *ListResourcePresetsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetResourcePresetRequest)(nil), "yandex.cloud.mdb.mongodb.v1.GetResourcePresetRequest") + proto.RegisterType((*ListResourcePresetsRequest)(nil), "yandex.cloud.mdb.mongodb.v1.ListResourcePresetsRequest") + proto.RegisterType((*ListResourcePresetsResponse)(nil), "yandex.cloud.mdb.mongodb.v1.ListResourcePresetsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ResourcePresetServiceClient is the client API for ResourcePresetService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ResourcePresetServiceClient interface { + // Returns the specified ResourcePreset resource. + // + // To get the list of available ResourcePreset resources, make a [List] request. + Get(ctx context.Context, in *GetResourcePresetRequest, opts ...grpc.CallOption) (*ResourcePreset, error) + // Retrieves the list of available ResourcePreset resources. + List(ctx context.Context, in *ListResourcePresetsRequest, opts ...grpc.CallOption) (*ListResourcePresetsResponse, error) +} + +type resourcePresetServiceClient struct { + cc *grpc.ClientConn +} + +func NewResourcePresetServiceClient(cc *grpc.ClientConn) ResourcePresetServiceClient { + return &resourcePresetServiceClient{cc} +} + +func (c *resourcePresetServiceClient) Get(ctx context.Context, in *GetResourcePresetRequest, opts ...grpc.CallOption) (*ResourcePreset, error) { + out := new(ResourcePreset) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ResourcePresetService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourcePresetServiceClient) List(ctx context.Context, in *ListResourcePresetsRequest, opts ...grpc.CallOption) (*ListResourcePresetsResponse, error) { + out := new(ListResourcePresetsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.ResourcePresetService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ResourcePresetServiceServer is the server API for ResourcePresetService service. +type ResourcePresetServiceServer interface { + // Returns the specified ResourcePreset resource. + // + // To get the list of available ResourcePreset resources, make a [List] request. + Get(context.Context, *GetResourcePresetRequest) (*ResourcePreset, error) + // Retrieves the list of available ResourcePreset resources. + List(context.Context, *ListResourcePresetsRequest) (*ListResourcePresetsResponse, error) +} + +func RegisterResourcePresetServiceServer(s *grpc.Server, srv ResourcePresetServiceServer) { + s.RegisterService(&_ResourcePresetService_serviceDesc, srv) +} + +func _ResourcePresetService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetResourcePresetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePresetServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ResourcePresetService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePresetServiceServer).Get(ctx, req.(*GetResourcePresetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourcePresetService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListResourcePresetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePresetServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.ResourcePresetService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePresetServiceServer).List(ctx, req.(*ListResourcePresetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ResourcePresetService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.mongodb.v1.ResourcePresetService", + HandlerType: (*ResourcePresetServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _ResourcePresetService_Get_Handler, + }, + { + MethodName: "List", + Handler: _ResourcePresetService_List_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/mongodb/v1/resource_preset_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/mongodb/v1/resource_preset_service.proto", fileDescriptor_resource_preset_service_0453cd5d31029933) +} + +var fileDescriptor_resource_preset_service_0453cd5d31029933 = []byte{ + // 457 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4f, 0x6b, 0x13, 0x41, + 0x1c, 0x65, 0x92, 0x5a, 0xcc, 0x88, 0xb4, 0x0c, 0x08, 0xcb, 0x56, 0x21, 0xac, 0xa8, 0x81, 0x92, + 0x99, 0x6c, 0x44, 0xac, 0xff, 0x40, 0x72, 0x09, 0x05, 0x91, 0xb2, 0x15, 0x0f, 0x5e, 0xc2, 0x6c, + 0xe6, 0xc7, 0x38, 0x98, 0x9d, 0x59, 0x77, 0x26, 0xa1, 0x56, 0xbc, 0x78, 0xf4, 0xea, 0xd9, 0xab, + 0x17, 0x3f, 0x48, 0xbd, 0xfb, 0x15, 0x3c, 0x78, 0xf2, 0x03, 0x78, 0x92, 0x9d, 0xdd, 0x82, 0x89, + 0xed, 0xd2, 0xdc, 0x96, 0x7d, 0xf3, 0x7e, 0xef, 0xbd, 0x79, 0xbf, 0xc1, 0x0f, 0xde, 0x71, 0x2d, + 0xe0, 0x88, 0x4d, 0x67, 0x66, 0x2e, 0x58, 0x26, 0x52, 0x96, 0x19, 0x2d, 0x8d, 0x48, 0xd9, 0x22, + 0x66, 0x05, 0x58, 0x33, 0x2f, 0xa6, 0x30, 0xc9, 0x0b, 0xb0, 0xe0, 0x26, 0x16, 0x8a, 0x85, 0x9a, + 0x02, 0xcd, 0x0b, 0xe3, 0x0c, 0xd9, 0xa9, 0xa8, 0xd4, 0x53, 0x69, 0x26, 0x52, 0x5a, 0x53, 0xe9, + 0x22, 0x0e, 0xaf, 0x4b, 0x63, 0xe4, 0x0c, 0x18, 0xcf, 0x15, 0xe3, 0x5a, 0x1b, 0xc7, 0x9d, 0x32, + 0xda, 0x56, 0xd4, 0xf0, 0xc6, 0x92, 0xea, 0x82, 0xcf, 0x94, 0xf0, 0x78, 0x0d, 0xc7, 0x6b, 0x98, + 0xaa, 0x28, 0xd1, 0x73, 0x1c, 0x8c, 0xc1, 0x25, 0x35, 0x76, 0xe0, 0xa1, 0x04, 0xde, 0xce, 0xc1, + 0x3a, 0x32, 0xc4, 0x64, 0x35, 0x89, 0x12, 0x01, 0xea, 0xa2, 0x5e, 0x67, 0xb4, 0xf1, 0xeb, 0x24, + 0x46, 0xc9, 0x76, 0xb1, 0x44, 0xdc, 0x17, 0x91, 0xc1, 0xe1, 0x33, 0x65, 0x57, 0x06, 0xda, 0xd3, + 0x89, 0x77, 0x70, 0x27, 0xe7, 0x12, 0x26, 0x56, 0x1d, 0x43, 0xd0, 0xea, 0xa2, 0x5e, 0x7b, 0x84, + 0xff, 0x9c, 0xc4, 0x9b, 0x8f, 0x9f, 0xc4, 0x83, 0xc1, 0x20, 0xb9, 0x5c, 0x82, 0x87, 0xea, 0x18, + 0x48, 0x0f, 0x63, 0x7f, 0xd0, 0x99, 0x37, 0xa0, 0x83, 0xb6, 0x97, 0xec, 0x7c, 0xfa, 0x1e, 0x5f, + 0xf2, 0x27, 0x13, 0x3f, 0xe5, 0x45, 0x89, 0x45, 0x5f, 0x10, 0xde, 0x39, 0x53, 0xd1, 0xe6, 0x46, + 0x5b, 0x20, 0x2f, 0xf1, 0xf6, 0x4a, 0x08, 0x1b, 0xa0, 0x6e, 0xbb, 0x77, 0x65, 0xb8, 0x4b, 0x1b, + 0x8a, 0xa0, 0x2b, 0x57, 0xb2, 0xb5, 0x9c, 0xd4, 0x92, 0xdb, 0x78, 0x4b, 0xc3, 0x91, 0x9b, 0xfc, + 0x63, 0xb3, 0x0c, 0xd4, 0x49, 0xae, 0x96, 0xbf, 0x0f, 0x4e, 0xfd, 0x0d, 0x7f, 0xb7, 0xf0, 0xb5, + 0xe5, 0x59, 0x87, 0xd5, 0x36, 0x90, 0x6f, 0x08, 0xb7, 0xc7, 0xe0, 0xc8, 0xbd, 0x46, 0x1f, 0xe7, + 0xb5, 0x13, 0xae, 0x63, 0x3f, 0x7a, 0xfa, 0xf1, 0xc7, 0xcf, 0xcf, 0xad, 0x87, 0x64, 0x8f, 0x65, + 0x5c, 0x73, 0x09, 0xa2, 0x7f, 0xc6, 0x66, 0xd4, 0xd1, 0xd8, 0xfb, 0xff, 0x5b, 0xff, 0x40, 0xbe, + 0x22, 0xbc, 0x51, 0xde, 0x33, 0xb9, 0xdf, 0xa8, 0x7b, 0x7e, 0xf9, 0xe1, 0xde, 0xfa, 0xc4, 0xaa, + 0xc3, 0x68, 0xd7, 0xbb, 0xbf, 0x45, 0x6e, 0x5e, 0xc0, 0xfd, 0x68, 0xff, 0xd5, 0x58, 0x2a, 0xf7, + 0x7a, 0x9e, 0xd2, 0xa9, 0xc9, 0x58, 0x25, 0xd9, 0xaf, 0x5e, 0x84, 0x34, 0x7d, 0x09, 0xda, 0x2f, + 0x3e, 0x6b, 0x78, 0x2a, 0x8f, 0xea, 0xcf, 0x74, 0xd3, 0x1f, 0xbd, 0xfb, 0x37, 0x00, 0x00, 0xff, + 0xff, 0xcf, 0x5c, 0x51, 0x57, 0xed, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/user.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/user.pb.go new file mode 100644 index 000000000..8fb4f42e2 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/user.pb.go @@ -0,0 +1,220 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/mongodb/v1/user.proto + +package mongodb // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A MongoDB User resource. For more information, see the +// [Developer's Guide](/docs/managed-mongodb/concepts). +type User struct { + // Name of the MongoDB user. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // ID of the MongoDB cluster the user belongs to. + ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Set of permissions granted to the user. + Permissions []*Permission `protobuf:"bytes,3,rep,name=permissions,proto3" json:"permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { + return fileDescriptor_user_ce6db2103014c466, []int{0} +} +func (m *User) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_User.Unmarshal(m, b) +} +func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_User.Marshal(b, m, deterministic) +} +func (dst *User) XXX_Merge(src proto.Message) { + xxx_messageInfo_User.Merge(dst, src) +} +func (m *User) XXX_Size() int { + return xxx_messageInfo_User.Size(m) +} +func (m *User) XXX_DiscardUnknown() { + xxx_messageInfo_User.DiscardUnknown(m) +} + +var xxx_messageInfo_User proto.InternalMessageInfo + +func (m *User) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *User) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *User) GetPermissions() []*Permission { + if m != nil { + return m.Permissions + } + return nil +} + +type Permission struct { + // Name of the database that the permission grants access to. + DatabaseName string `protobuf:"bytes,1,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + // MongoDB roles for the [database_name] database that the permission grants. + Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Permission) Reset() { *m = Permission{} } +func (m *Permission) String() string { return proto.CompactTextString(m) } +func (*Permission) ProtoMessage() {} +func (*Permission) Descriptor() ([]byte, []int) { + return fileDescriptor_user_ce6db2103014c466, []int{1} +} +func (m *Permission) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Permission.Unmarshal(m, b) +} +func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Permission.Marshal(b, m, deterministic) +} +func (dst *Permission) XXX_Merge(src proto.Message) { + xxx_messageInfo_Permission.Merge(dst, src) +} +func (m *Permission) XXX_Size() int { + return xxx_messageInfo_Permission.Size(m) +} +func (m *Permission) XXX_DiscardUnknown() { + xxx_messageInfo_Permission.DiscardUnknown(m) +} + +var xxx_messageInfo_Permission proto.InternalMessageInfo + +func (m *Permission) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +func (m *Permission) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + +type UserSpec struct { + // Name of the MongoDB user. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Password of the MongoDB user. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + // Set of permissions to grant to the user. + Permissions []*Permission `protobuf:"bytes,3,rep,name=permissions,proto3" json:"permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserSpec) Reset() { *m = UserSpec{} } +func (m *UserSpec) String() string { return proto.CompactTextString(m) } +func (*UserSpec) ProtoMessage() {} +func (*UserSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_user_ce6db2103014c466, []int{2} +} +func (m *UserSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UserSpec.Unmarshal(m, b) +} +func (m *UserSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UserSpec.Marshal(b, m, deterministic) +} +func (dst *UserSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserSpec.Merge(dst, src) +} +func (m *UserSpec) XXX_Size() int { + return xxx_messageInfo_UserSpec.Size(m) +} +func (m *UserSpec) XXX_DiscardUnknown() { + xxx_messageInfo_UserSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_UserSpec proto.InternalMessageInfo + +func (m *UserSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UserSpec) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *UserSpec) GetPermissions() []*Permission { + if m != nil { + return m.Permissions + } + return nil +} + +func init() { + proto.RegisterType((*User)(nil), "yandex.cloud.mdb.mongodb.v1.User") + proto.RegisterType((*Permission)(nil), "yandex.cloud.mdb.mongodb.v1.Permission") + proto.RegisterType((*UserSpec)(nil), "yandex.cloud.mdb.mongodb.v1.UserSpec") +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/mongodb/v1/user.proto", fileDescriptor_user_ce6db2103014c466) +} + +var fileDescriptor_user_ce6db2103014c466 = []byte{ + // 345 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xab, 0x4c, 0xcc, 0x4b, + 0x49, 0xad, 0xd0, 0x4f, 0xce, 0xc9, 0x2f, 0x4d, 0xd1, 0xcf, 0x4d, 0x49, 0xd2, 0xcf, 0xcd, 0xcf, + 0x4b, 0xcf, 0x4f, 0x49, 0xd2, 0x2f, 0x33, 0xd4, 0x2f, 0x2d, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, + 0x2f, 0xc9, 0x17, 0x92, 0x86, 0xa8, 0xd3, 0x03, 0xab, 0xd3, 0xcb, 0x4d, 0x49, 0xd2, 0x83, 0xaa, + 0xd3, 0x2b, 0x33, 0x94, 0x92, 0x45, 0x31, 0xa4, 0x2c, 0x31, 0x27, 0x33, 0x25, 0xb1, 0x24, 0x33, + 0x3f, 0x0f, 0xa2, 0x57, 0xa9, 0x85, 0x91, 0x8b, 0x25, 0xb4, 0x38, 0xb5, 0x48, 0x48, 0x88, 0x8b, + 0x25, 0x2f, 0x31, 0x37, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xcc, 0x16, 0x92, 0xe5, + 0xe2, 0x4a, 0xce, 0x29, 0x2d, 0x2e, 0x49, 0x2d, 0x8a, 0xcf, 0x4c, 0x91, 0x60, 0x02, 0xcb, 0x70, + 0x42, 0x45, 0x3c, 0x53, 0x84, 0x3c, 0xb9, 0xb8, 0x0b, 0x52, 0x8b, 0x72, 0x33, 0x8b, 0x8b, 0x33, + 0xf3, 0xf3, 0x8a, 0x25, 0x98, 0x15, 0x98, 0x35, 0xb8, 0x8d, 0xd4, 0xf5, 0xf0, 0xb8, 0x46, 0x2f, + 0x00, 0xae, 0x3e, 0x08, 0x59, 0xaf, 0x92, 0x3b, 0x17, 0x17, 0x42, 0x4a, 0x48, 0x99, 0x8b, 0x37, + 0x25, 0xb1, 0x24, 0x31, 0x29, 0xb1, 0x38, 0x35, 0x1e, 0xc9, 0x51, 0x3c, 0x30, 0x41, 0x3f, 0x90, + 0xe3, 0x44, 0xb8, 0x58, 0x8b, 0xf2, 0x73, 0x52, 0x8b, 0x25, 0x98, 0x14, 0x98, 0x35, 0x38, 0x83, + 0x20, 0x1c, 0xa5, 0xcd, 0x8c, 0x5c, 0x1c, 0x20, 0xff, 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x19, 0x22, + 0xfb, 0xc9, 0x49, 0xf6, 0xc5, 0x71, 0x43, 0xc6, 0x4f, 0xc7, 0x0d, 0x79, 0xa3, 0x13, 0x75, 0xab, + 0x1c, 0x75, 0xa3, 0x0c, 0x74, 0x2d, 0xe3, 0x63, 0xb5, 0xba, 0x4e, 0x18, 0xb2, 0xd8, 0xd8, 0x9a, + 0x19, 0x43, 0xbd, 0xac, 0xc9, 0xc5, 0x51, 0x90, 0x58, 0x5c, 0x5c, 0x9e, 0x5f, 0x04, 0xf5, 0xb0, + 0x13, 0x2f, 0x48, 0x5b, 0xd7, 0x09, 0x43, 0x56, 0x0b, 0x5d, 0x43, 0x23, 0x8b, 0x20, 0xb8, 0x34, + 0x15, 0xbd, 0xef, 0xe4, 0x19, 0xe5, 0x9e, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, + 0xab, 0x0f, 0x31, 0x41, 0x17, 0x12, 0x63, 0xe9, 0xf9, 0xba, 0xe9, 0xa9, 0x79, 0xe0, 0xc8, 0xd2, + 0xc7, 0x93, 0x1e, 0xac, 0xa1, 0xcc, 0x24, 0x36, 0xb0, 0x52, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xb7, 0x04, 0x46, 0xbf, 0x3d, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/user_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/user_service.pb.go new file mode 100644 index 000000000..070cae5e8 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1/user_service.pb.go @@ -0,0 +1,1097 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/mongodb/v1/user_service.proto + +package mongodb // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetUserRequest struct { + // ID of the MongoDB cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the MongoDB User resource to return. + // To get the name of the user, use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetUserRequest) Reset() { *m = GetUserRequest{} } +func (m *GetUserRequest) String() string { return proto.CompactTextString(m) } +func (*GetUserRequest) ProtoMessage() {} +func (*GetUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_74333e64510eb416, []int{0} +} +func (m *GetUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetUserRequest.Unmarshal(m, b) +} +func (m *GetUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetUserRequest.Marshal(b, m, deterministic) +} +func (dst *GetUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetUserRequest.Merge(dst, src) +} +func (m *GetUserRequest) XXX_Size() int { + return xxx_messageInfo_GetUserRequest.Size(m) +} +func (m *GetUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetUserRequest proto.InternalMessageInfo + +func (m *GetUserRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GetUserRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type ListUsersRequest struct { + // ID of the cluster to list MongoDB users in. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListUsersResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUsersRequest) Reset() { *m = ListUsersRequest{} } +func (m *ListUsersRequest) String() string { return proto.CompactTextString(m) } +func (*ListUsersRequest) ProtoMessage() {} +func (*ListUsersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_74333e64510eb416, []int{1} +} +func (m *ListUsersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUsersRequest.Unmarshal(m, b) +} +func (m *ListUsersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUsersRequest.Marshal(b, m, deterministic) +} +func (dst *ListUsersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUsersRequest.Merge(dst, src) +} +func (m *ListUsersRequest) XXX_Size() int { + return xxx_messageInfo_ListUsersRequest.Size(m) +} +func (m *ListUsersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListUsersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUsersRequest proto.InternalMessageInfo + +func (m *ListUsersRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListUsersRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListUsersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListUsersResponse struct { + // List of MongoDB User resources. + Users []*User `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListUsersRequest.page_size], use the [next_page_token] as the value + // for the [ListUsersRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUsersResponse) Reset() { *m = ListUsersResponse{} } +func (m *ListUsersResponse) String() string { return proto.CompactTextString(m) } +func (*ListUsersResponse) ProtoMessage() {} +func (*ListUsersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_74333e64510eb416, []int{2} +} +func (m *ListUsersResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUsersResponse.Unmarshal(m, b) +} +func (m *ListUsersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUsersResponse.Marshal(b, m, deterministic) +} +func (dst *ListUsersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUsersResponse.Merge(dst, src) +} +func (m *ListUsersResponse) XXX_Size() int { + return xxx_messageInfo_ListUsersResponse.Size(m) +} +func (m *ListUsersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListUsersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUsersResponse proto.InternalMessageInfo + +func (m *ListUsersResponse) GetUsers() []*User { + if m != nil { + return m.Users + } + return nil +} + +func (m *ListUsersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateUserRequest struct { + // ID of the MongoDB cluster to create a user in. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Properties of the user to be created. + UserSpec *UserSpec `protobuf:"bytes,2,opt,name=user_spec,json=userSpec,proto3" json:"user_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateUserRequest) Reset() { *m = CreateUserRequest{} } +func (m *CreateUserRequest) String() string { return proto.CompactTextString(m) } +func (*CreateUserRequest) ProtoMessage() {} +func (*CreateUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_74333e64510eb416, []int{3} +} +func (m *CreateUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateUserRequest.Unmarshal(m, b) +} +func (m *CreateUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateUserRequest.Marshal(b, m, deterministic) +} +func (dst *CreateUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateUserRequest.Merge(dst, src) +} +func (m *CreateUserRequest) XXX_Size() int { + return xxx_messageInfo_CreateUserRequest.Size(m) +} +func (m *CreateUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateUserRequest proto.InternalMessageInfo + +func (m *CreateUserRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateUserRequest) GetUserSpec() *UserSpec { + if m != nil { + return m.UserSpec + } + return nil +} + +type CreateUserMetadata struct { + // ID of the MongoDB cluster the user is being created in. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user that is being created. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateUserMetadata) Reset() { *m = CreateUserMetadata{} } +func (m *CreateUserMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateUserMetadata) ProtoMessage() {} +func (*CreateUserMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_74333e64510eb416, []int{4} +} +func (m *CreateUserMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateUserMetadata.Unmarshal(m, b) +} +func (m *CreateUserMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateUserMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateUserMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateUserMetadata.Merge(dst, src) +} +func (m *CreateUserMetadata) XXX_Size() int { + return xxx_messageInfo_CreateUserMetadata.Size(m) +} +func (m *CreateUserMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateUserMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateUserMetadata proto.InternalMessageInfo + +func (m *CreateUserMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateUserMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type UpdateUserRequest struct { + // ID of the MongoDB cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user to be updated. + // To get the name of the user, use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + // Field mask that specifies which fields of the MongoDB User resource should be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // New password for the user. + Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"` + // New set of permissions for the user. + Permissions []*Permission `protobuf:"bytes,5,rep,name=permissions,proto3" json:"permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateUserRequest) Reset() { *m = UpdateUserRequest{} } +func (m *UpdateUserRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateUserRequest) ProtoMessage() {} +func (*UpdateUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_74333e64510eb416, []int{5} +} +func (m *UpdateUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateUserRequest.Unmarshal(m, b) +} +func (m *UpdateUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateUserRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateUserRequest.Merge(dst, src) +} +func (m *UpdateUserRequest) XXX_Size() int { + return xxx_messageInfo_UpdateUserRequest.Size(m) +} +func (m *UpdateUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateUserRequest proto.InternalMessageInfo + +func (m *UpdateUserRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateUserRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +func (m *UpdateUserRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateUserRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *UpdateUserRequest) GetPermissions() []*Permission { + if m != nil { + return m.Permissions + } + return nil +} + +type UpdateUserMetadata struct { + // ID of the MongoDB cluster the user belongs to. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user that is being updated. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateUserMetadata) Reset() { *m = UpdateUserMetadata{} } +func (m *UpdateUserMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateUserMetadata) ProtoMessage() {} +func (*UpdateUserMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_74333e64510eb416, []int{6} +} +func (m *UpdateUserMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateUserMetadata.Unmarshal(m, b) +} +func (m *UpdateUserMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateUserMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateUserMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateUserMetadata.Merge(dst, src) +} +func (m *UpdateUserMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateUserMetadata.Size(m) +} +func (m *UpdateUserMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateUserMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateUserMetadata proto.InternalMessageInfo + +func (m *UpdateUserMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateUserMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type DeleteUserRequest struct { + // ID of the MongoDB cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user to delete. + // To get the name of the user use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteUserRequest) Reset() { *m = DeleteUserRequest{} } +func (m *DeleteUserRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteUserRequest) ProtoMessage() {} +func (*DeleteUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_74333e64510eb416, []int{7} +} +func (m *DeleteUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteUserRequest.Unmarshal(m, b) +} +func (m *DeleteUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteUserRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteUserRequest.Merge(dst, src) +} +func (m *DeleteUserRequest) XXX_Size() int { + return xxx_messageInfo_DeleteUserRequest.Size(m) +} +func (m *DeleteUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteUserRequest proto.InternalMessageInfo + +func (m *DeleteUserRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteUserRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type DeleteUserMetadata struct { + // ID of the MongoDB cluster the user belongs to. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user that is being deleted. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteUserMetadata) Reset() { *m = DeleteUserMetadata{} } +func (m *DeleteUserMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteUserMetadata) ProtoMessage() {} +func (*DeleteUserMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_74333e64510eb416, []int{8} +} +func (m *DeleteUserMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteUserMetadata.Unmarshal(m, b) +} +func (m *DeleteUserMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteUserMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteUserMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteUserMetadata.Merge(dst, src) +} +func (m *DeleteUserMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteUserMetadata.Size(m) +} +func (m *DeleteUserMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteUserMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteUserMetadata proto.InternalMessageInfo + +func (m *DeleteUserMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteUserMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type GrantUserPermissionRequest struct { + // ID of the MongoDB cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user to grant the permission to. + // To get the name of the user, use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + // Permission that should be granted to the specified user. + Permission *Permission `protobuf:"bytes,3,opt,name=permission,proto3" json:"permission,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GrantUserPermissionRequest) Reset() { *m = GrantUserPermissionRequest{} } +func (m *GrantUserPermissionRequest) String() string { return proto.CompactTextString(m) } +func (*GrantUserPermissionRequest) ProtoMessage() {} +func (*GrantUserPermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_74333e64510eb416, []int{9} +} +func (m *GrantUserPermissionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GrantUserPermissionRequest.Unmarshal(m, b) +} +func (m *GrantUserPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GrantUserPermissionRequest.Marshal(b, m, deterministic) +} +func (dst *GrantUserPermissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GrantUserPermissionRequest.Merge(dst, src) +} +func (m *GrantUserPermissionRequest) XXX_Size() int { + return xxx_messageInfo_GrantUserPermissionRequest.Size(m) +} +func (m *GrantUserPermissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GrantUserPermissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GrantUserPermissionRequest proto.InternalMessageInfo + +func (m *GrantUserPermissionRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GrantUserPermissionRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +func (m *GrantUserPermissionRequest) GetPermission() *Permission { + if m != nil { + return m.Permission + } + return nil +} + +type GrantUserPermissionMetadata struct { + // ID of the MongoDB cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user that is being granted a permission. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GrantUserPermissionMetadata) Reset() { *m = GrantUserPermissionMetadata{} } +func (m *GrantUserPermissionMetadata) String() string { return proto.CompactTextString(m) } +func (*GrantUserPermissionMetadata) ProtoMessage() {} +func (*GrantUserPermissionMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_74333e64510eb416, []int{10} +} +func (m *GrantUserPermissionMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GrantUserPermissionMetadata.Unmarshal(m, b) +} +func (m *GrantUserPermissionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GrantUserPermissionMetadata.Marshal(b, m, deterministic) +} +func (dst *GrantUserPermissionMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_GrantUserPermissionMetadata.Merge(dst, src) +} +func (m *GrantUserPermissionMetadata) XXX_Size() int { + return xxx_messageInfo_GrantUserPermissionMetadata.Size(m) +} +func (m *GrantUserPermissionMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_GrantUserPermissionMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_GrantUserPermissionMetadata proto.InternalMessageInfo + +func (m *GrantUserPermissionMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GrantUserPermissionMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type RevokeUserPermissionRequest struct { + // ID of the MongoDB cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user to revoke a permission from. + // To get the name of the user, use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + // Name of the database that the user should lose access to. + DatabaseName string `protobuf:"bytes,3,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RevokeUserPermissionRequest) Reset() { *m = RevokeUserPermissionRequest{} } +func (m *RevokeUserPermissionRequest) String() string { return proto.CompactTextString(m) } +func (*RevokeUserPermissionRequest) ProtoMessage() {} +func (*RevokeUserPermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_74333e64510eb416, []int{11} +} +func (m *RevokeUserPermissionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RevokeUserPermissionRequest.Unmarshal(m, b) +} +func (m *RevokeUserPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RevokeUserPermissionRequest.Marshal(b, m, deterministic) +} +func (dst *RevokeUserPermissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RevokeUserPermissionRequest.Merge(dst, src) +} +func (m *RevokeUserPermissionRequest) XXX_Size() int { + return xxx_messageInfo_RevokeUserPermissionRequest.Size(m) +} +func (m *RevokeUserPermissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RevokeUserPermissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RevokeUserPermissionRequest proto.InternalMessageInfo + +func (m *RevokeUserPermissionRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *RevokeUserPermissionRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +func (m *RevokeUserPermissionRequest) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type RevokeUserPermissionMetadata struct { + // ID of the MongoDB cluster the user belongs to. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user whose permission is being revoked. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RevokeUserPermissionMetadata) Reset() { *m = RevokeUserPermissionMetadata{} } +func (m *RevokeUserPermissionMetadata) String() string { return proto.CompactTextString(m) } +func (*RevokeUserPermissionMetadata) ProtoMessage() {} +func (*RevokeUserPermissionMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_74333e64510eb416, []int{12} +} +func (m *RevokeUserPermissionMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RevokeUserPermissionMetadata.Unmarshal(m, b) +} +func (m *RevokeUserPermissionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RevokeUserPermissionMetadata.Marshal(b, m, deterministic) +} +func (dst *RevokeUserPermissionMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_RevokeUserPermissionMetadata.Merge(dst, src) +} +func (m *RevokeUserPermissionMetadata) XXX_Size() int { + return xxx_messageInfo_RevokeUserPermissionMetadata.Size(m) +} +func (m *RevokeUserPermissionMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_RevokeUserPermissionMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_RevokeUserPermissionMetadata proto.InternalMessageInfo + +func (m *RevokeUserPermissionMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *RevokeUserPermissionMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +func init() { + proto.RegisterType((*GetUserRequest)(nil), "yandex.cloud.mdb.mongodb.v1.GetUserRequest") + proto.RegisterType((*ListUsersRequest)(nil), "yandex.cloud.mdb.mongodb.v1.ListUsersRequest") + proto.RegisterType((*ListUsersResponse)(nil), "yandex.cloud.mdb.mongodb.v1.ListUsersResponse") + proto.RegisterType((*CreateUserRequest)(nil), "yandex.cloud.mdb.mongodb.v1.CreateUserRequest") + proto.RegisterType((*CreateUserMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.CreateUserMetadata") + proto.RegisterType((*UpdateUserRequest)(nil), "yandex.cloud.mdb.mongodb.v1.UpdateUserRequest") + proto.RegisterType((*UpdateUserMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.UpdateUserMetadata") + proto.RegisterType((*DeleteUserRequest)(nil), "yandex.cloud.mdb.mongodb.v1.DeleteUserRequest") + proto.RegisterType((*DeleteUserMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.DeleteUserMetadata") + proto.RegisterType((*GrantUserPermissionRequest)(nil), "yandex.cloud.mdb.mongodb.v1.GrantUserPermissionRequest") + proto.RegisterType((*GrantUserPermissionMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.GrantUserPermissionMetadata") + proto.RegisterType((*RevokeUserPermissionRequest)(nil), "yandex.cloud.mdb.mongodb.v1.RevokeUserPermissionRequest") + proto.RegisterType((*RevokeUserPermissionMetadata)(nil), "yandex.cloud.mdb.mongodb.v1.RevokeUserPermissionMetadata") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// UserServiceClient is the client API for UserService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UserServiceClient interface { + // Returns the specified MongoDB User resource. + // + // To get the list of available MongoDB User resources, make a [List] request. + Get(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*User, error) + // Retrieves the list of MongoDB User resources in the specified cluster. + List(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) + // Creates a MongoDB user in the specified cluster. + Create(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified MongoDB user. + Update(ctx context.Context, in *UpdateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified MongoDB user. + Delete(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Grants permission to the specified MongoDB user. + GrantPermission(ctx context.Context, in *GrantUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Revokes permission from the specified MongoDB user. + RevokePermission(ctx context.Context, in *RevokeUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) +} + +type userServiceClient struct { + cc *grpc.ClientConn +} + +func NewUserServiceClient(cc *grpc.ClientConn) UserServiceClient { + return &userServiceClient{cc} +} + +func (c *userServiceClient) Get(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*User, error) { + out := new(User) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.UserService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) List(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) { + out := new(ListUsersResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.UserService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) Create(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.UserService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) Update(ctx context.Context, in *UpdateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.UserService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) Delete(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.UserService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) GrantPermission(ctx context.Context, in *GrantUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.UserService/GrantPermission", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) RevokePermission(ctx context.Context, in *RevokeUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.mongodb.v1.UserService/RevokePermission", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UserServiceServer is the server API for UserService service. +type UserServiceServer interface { + // Returns the specified MongoDB User resource. + // + // To get the list of available MongoDB User resources, make a [List] request. + Get(context.Context, *GetUserRequest) (*User, error) + // Retrieves the list of MongoDB User resources in the specified cluster. + List(context.Context, *ListUsersRequest) (*ListUsersResponse, error) + // Creates a MongoDB user in the specified cluster. + Create(context.Context, *CreateUserRequest) (*operation.Operation, error) + // Updates the specified MongoDB user. + Update(context.Context, *UpdateUserRequest) (*operation.Operation, error) + // Deletes the specified MongoDB user. + Delete(context.Context, *DeleteUserRequest) (*operation.Operation, error) + // Grants permission to the specified MongoDB user. + GrantPermission(context.Context, *GrantUserPermissionRequest) (*operation.Operation, error) + // Revokes permission from the specified MongoDB user. + RevokePermission(context.Context, *RevokeUserPermissionRequest) (*operation.Operation, error) +} + +func RegisterUserServiceServer(s *grpc.Server, srv UserServiceServer) { + s.RegisterService(&_UserService_serviceDesc, srv) +} + +func _UserService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.UserService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).Get(ctx, req.(*GetUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUsersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.UserService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).List(ctx, req.(*ListUsersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.UserService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).Create(ctx, req.(*CreateUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.UserService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).Update(ctx, req.(*UpdateUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.UserService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).Delete(ctx, req.(*DeleteUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_GrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GrantUserPermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).GrantPermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.UserService/GrantPermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).GrantPermission(ctx, req.(*GrantUserPermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_RevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RevokeUserPermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).RevokePermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.mongodb.v1.UserService/RevokePermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).RevokePermission(ctx, req.(*RevokeUserPermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _UserService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.mongodb.v1.UserService", + HandlerType: (*UserServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _UserService_Get_Handler, + }, + { + MethodName: "List", + Handler: _UserService_List_Handler, + }, + { + MethodName: "Create", + Handler: _UserService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _UserService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _UserService_Delete_Handler, + }, + { + MethodName: "GrantPermission", + Handler: _UserService_GrantPermission_Handler, + }, + { + MethodName: "RevokePermission", + Handler: _UserService_RevokePermission_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/mongodb/v1/user_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/mongodb/v1/user_service.proto", fileDescriptor_user_service_74333e64510eb416) +} + +var fileDescriptor_user_service_74333e64510eb416 = []byte{ + // 987 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4f, 0x6f, 0x1b, 0xc5, + 0x1b, 0xd6, 0x24, 0x8e, 0x15, 0xbf, 0x6e, 0xda, 0x66, 0xa4, 0x9f, 0x64, 0x39, 0xcd, 0x4f, 0x61, + 0xa1, 0x6d, 0xe4, 0x68, 0x77, 0xbd, 0x2e, 0x90, 0x90, 0x34, 0x12, 0x38, 0x40, 0x14, 0xd1, 0x40, + 0xd8, 0x52, 0x09, 0x82, 0x90, 0x35, 0xf6, 0x4e, 0x97, 0x55, 0xbc, 0x7f, 0xd8, 0x59, 0x9b, 0x26, + 0x25, 0x42, 0x42, 0x9c, 0x7a, 0x84, 0x1b, 0x07, 0xbe, 0x00, 0xb7, 0x48, 0x7c, 0x00, 0x4e, 0xa9, + 0xc4, 0x01, 0x29, 0xf0, 0x11, 0x38, 0x70, 0xe0, 0x80, 0x38, 0xc2, 0x05, 0xcd, 0xcc, 0x26, 0xbb, + 0xb1, 0xcd, 0xda, 0xad, 0x23, 0x7a, 0x1b, 0xfb, 0x7d, 0x66, 0xde, 0xe7, 0x79, 0xff, 0xda, 0xa0, + 0xed, 0x13, 0xcf, 0xa2, 0x0f, 0xf4, 0x56, 0xdb, 0xef, 0x58, 0xba, 0x6b, 0x35, 0x75, 0xd7, 0xf7, + 0x6c, 0xdf, 0x6a, 0xea, 0x5d, 0x43, 0xef, 0x30, 0x1a, 0x36, 0x18, 0x0d, 0xbb, 0x4e, 0x8b, 0x6a, + 0x41, 0xe8, 0x47, 0x3e, 0x9e, 0x93, 0x78, 0x4d, 0xe0, 0x35, 0xd7, 0x6a, 0x6a, 0x31, 0x5e, 0xeb, + 0x1a, 0xe5, 0x6b, 0xb6, 0xef, 0xdb, 0x6d, 0xaa, 0x93, 0xc0, 0xd1, 0x89, 0xe7, 0xf9, 0x11, 0x89, + 0x1c, 0xdf, 0x63, 0xf2, 0x6a, 0x79, 0x21, 0xb6, 0x8a, 0x4f, 0xcd, 0xce, 0x7d, 0xfd, 0xbe, 0x43, + 0xdb, 0x56, 0xc3, 0x25, 0x6c, 0x2f, 0x46, 0xcc, 0x9f, 0x23, 0xd3, 0x25, 0x6d, 0xc7, 0x12, 0x2f, + 0xc4, 0xe6, 0x1b, 0xe7, 0xcc, 0x7e, 0x40, 0x43, 0x61, 0x4d, 0x4e, 0x03, 0x71, 0x03, 0x34, 0xc5, + 0xb8, 0x72, 0x8c, 0xe3, 0x74, 0x7b, 0xde, 0x50, 0xf6, 0xe1, 0xf2, 0x26, 0x8d, 0xee, 0x31, 0x1a, + 0x9a, 0xf4, 0x93, 0x0e, 0x65, 0x11, 0x5e, 0x02, 0x68, 0xb5, 0x3b, 0x2c, 0xa2, 0x61, 0xc3, 0xb1, + 0x4a, 0x68, 0x01, 0x2d, 0x16, 0xea, 0x97, 0x7e, 0x3b, 0x36, 0xd0, 0xa3, 0xc7, 0x46, 0xee, 0xf6, + 0xfa, 0x4b, 0x55, 0xb3, 0x10, 0xdb, 0xb7, 0x2c, 0xbc, 0x0a, 0x05, 0x11, 0x3c, 0x8f, 0xb8, 0xb4, + 0x34, 0x21, 0xb0, 0xf3, 0x1c, 0xfb, 0xe7, 0xb1, 0x31, 0xf3, 0x21, 0x51, 0x0f, 0x5e, 0x53, 0x77, + 0xab, 0xea, 0x2b, 0x8d, 0x8f, 0x2a, 0xf2, 0xf2, 0xcb, 0xb7, 0xcc, 0x69, 0x8e, 0x7f, 0x9b, 0xb8, + 0x54, 0xf9, 0x0a, 0xc1, 0xd5, 0x3b, 0x0e, 0x13, 0xce, 0xd9, 0x53, 0x79, 0xbf, 0x09, 0x85, 0x80, + 0xd8, 0xb4, 0xc1, 0x9c, 0x03, 0xe9, 0x7d, 0xb2, 0x0e, 0x7f, 0x1d, 0x1b, 0xf9, 0xdb, 0xeb, 0x46, + 0xb5, 0x5a, 0x35, 0xa7, 0xb9, 0xf1, 0xae, 0x73, 0x40, 0xf1, 0x22, 0x80, 0x00, 0x46, 0xfe, 0x1e, + 0xf5, 0x4a, 0x93, 0xe2, 0xd5, 0xc2, 0xa3, 0xc7, 0xc6, 0x94, 0x40, 0x9a, 0xe2, 0x95, 0xf7, 0xb8, + 0x4d, 0x89, 0x60, 0x36, 0xc5, 0x89, 0x05, 0xbe, 0xc7, 0x28, 0x5e, 0x86, 0x29, 0xce, 0x9a, 0x95, + 0xd0, 0xc2, 0xe4, 0x62, 0xb1, 0xf6, 0x9c, 0x96, 0x51, 0x1c, 0x9a, 0x88, 0xa5, 0xc4, 0xe3, 0x1b, + 0x70, 0xc5, 0xa3, 0x0f, 0xa2, 0x46, 0xca, 0xb9, 0x08, 0x92, 0x39, 0xc3, 0xbf, 0xde, 0x39, 0xf3, + 0xfa, 0x25, 0x82, 0xd9, 0x8d, 0x90, 0x92, 0x88, 0x3e, 0x75, 0x26, 0xea, 0x71, 0x26, 0x58, 0x40, + 0x5b, 0xc2, 0x49, 0xb1, 0x76, 0x7d, 0x28, 0xcf, 0xbb, 0x01, 0x6d, 0xc9, 0x8c, 0xf0, 0x93, 0xb2, + 0x03, 0x38, 0x61, 0xb1, 0x4d, 0x23, 0x62, 0x91, 0x88, 0xe0, 0xf9, 0x7e, 0x1a, 0x69, 0xc7, 0x73, + 0x7d, 0x25, 0x90, 0xca, 0xf1, 0x77, 0x13, 0x30, 0x7b, 0x2f, 0xb0, 0xc6, 0x11, 0x36, 0x46, 0x89, + 0xe1, 0x35, 0x28, 0x76, 0x84, 0x77, 0xd1, 0x7d, 0x22, 0xf1, 0xc5, 0x5a, 0x59, 0x93, 0x0d, 0xaa, + 0x9d, 0x36, 0xa8, 0xf6, 0x26, 0x6f, 0xd0, 0x6d, 0xc2, 0xf6, 0x4c, 0x90, 0x70, 0x7e, 0xc6, 0xd7, + 0x61, 0x3a, 0x20, 0x8c, 0x7d, 0xea, 0x87, 0x56, 0x29, 0x97, 0x94, 0xcc, 0x8a, 0x6a, 0xd4, 0x56, + 0xcc, 0x33, 0x13, 0xde, 0x82, 0x62, 0x40, 0x43, 0xd7, 0x61, 0x8c, 0xcf, 0x80, 0xd2, 0x94, 0x28, + 0x91, 0x9b, 0x99, 0xa1, 0xdf, 0x39, 0xc3, 0x9b, 0xe9, 0xbb, 0x3c, 0xfe, 0x49, 0xb0, 0x2e, 0x24, + 0xfe, 0x9f, 0xc1, 0xec, 0xeb, 0xb4, 0x4d, 0x9f, 0x4d, 0xf8, 0xb9, 0x9e, 0xc4, 0xfb, 0x85, 0xe8, + 0xf9, 0x05, 0x41, 0x79, 0x33, 0x24, 0x9e, 0x68, 0xd0, 0x54, 0x18, 0xff, 0xeb, 0xc2, 0xda, 0x06, + 0x48, 0x12, 0x17, 0xd7, 0xd5, 0xa8, 0x39, 0xaf, 0xe7, 0xb8, 0x17, 0x33, 0xf5, 0x80, 0xf2, 0x01, + 0xcc, 0x0d, 0x50, 0x75, 0x21, 0x11, 0xfb, 0x11, 0xc1, 0x9c, 0x49, 0xbb, 0xfe, 0x1e, 0x7d, 0xc6, + 0x21, 0xdb, 0x80, 0x19, 0x2e, 0xa6, 0x49, 0x18, 0x95, 0xf7, 0xe5, 0x18, 0xfe, 0x7f, 0x7c, 0xff, + 0x72, 0xea, 0xbe, 0x9a, 0x7a, 0xe0, 0xd2, 0xe9, 0x25, 0xa1, 0x66, 0x17, 0xae, 0x0d, 0x12, 0x73, + 0x11, 0x91, 0xaa, 0xfd, 0x0d, 0x50, 0x14, 0x43, 0x51, 0xfe, 0x10, 0xc0, 0xdf, 0x20, 0x98, 0xdc, + 0xa4, 0x11, 0x5e, 0xca, 0xcc, 0xeb, 0xf9, 0xed, 0x59, 0x1e, 0xbe, 0x1b, 0x94, 0x8d, 0x2f, 0x7e, + 0xfe, 0xf5, 0xeb, 0x89, 0x75, 0xbc, 0xa6, 0xbb, 0xc4, 0x23, 0x36, 0xb5, 0xd4, 0xd4, 0xda, 0x8e, + 0xb9, 0x32, 0xfd, 0x61, 0xa2, 0xe3, 0x50, 0x2c, 0x73, 0xa6, 0x3f, 0x3c, 0xe3, 0x7e, 0x88, 0xbf, + 0x45, 0x90, 0xe3, 0x8b, 0x0a, 0xab, 0x99, 0x0e, 0x7b, 0xf7, 0x6b, 0x59, 0x1b, 0x15, 0x2e, 0x57, + 0x9f, 0xb2, 0x2c, 0xc8, 0x1a, 0x58, 0x7f, 0x42, 0xb2, 0xf8, 0x7b, 0x04, 0x79, 0xb9, 0x4c, 0x70, + 0xb6, 0xcf, 0xbe, 0xbd, 0xd7, 0x1b, 0xc3, 0xe4, 0x27, 0xcb, 0x3b, 0xa7, 0x27, 0xe5, 0xfd, 0xa3, + 0x93, 0x4a, 0x79, 0xe0, 0xb6, 0xca, 0xf1, 0x4f, 0x82, 0xf4, 0x8b, 0xca, 0x93, 0x92, 0x5e, 0x45, + 0x15, 0xfc, 0x03, 0x82, 0xbc, 0x1c, 0xc2, 0x43, 0x78, 0xf7, 0xad, 0xb5, 0x51, 0x78, 0x5b, 0x92, + 0xf7, 0x80, 0x29, 0x9f, 0xf0, 0x7e, 0xb5, 0x36, 0x4e, 0x65, 0x70, 0x0d, 0x3f, 0x21, 0xc8, 0xcb, + 0xc1, 0x3b, 0x44, 0x43, 0xdf, 0x6e, 0x18, 0x45, 0x43, 0x78, 0x74, 0x52, 0x59, 0x1a, 0x38, 0xd9, + 0xff, 0xd7, 0xbb, 0x58, 0xdf, 0x70, 0x83, 0x68, 0x5f, 0x96, 0x7b, 0x65, 0xac, 0x72, 0xff, 0x1d, + 0xc1, 0x15, 0x31, 0x21, 0x93, 0x9e, 0xc7, 0xcb, 0xd9, 0x7d, 0xf9, 0xaf, 0x5b, 0x62, 0x14, 0x8d, + 0x87, 0x47, 0x27, 0x95, 0xe7, 0xb3, 0x87, 0x72, 0x92, 0xb0, 0x1d, 0xe5, 0xad, 0x71, 0x12, 0x66, + 0x9f, 0xd7, 0xc5, 0x13, 0xf8, 0x07, 0x82, 0xab, 0x72, 0xce, 0xa5, 0xf4, 0xae, 0x64, 0xea, 0xcd, + 0x98, 0xf1, 0xa3, 0x08, 0xfe, 0xfc, 0xe8, 0xa4, 0xf2, 0xc2, 0x90, 0xe1, 0x9a, 0x28, 0x7e, 0x57, + 0xb9, 0x33, 0x8e, 0xe2, 0xb0, 0x47, 0xda, 0x2a, 0xaa, 0xd4, 0xb7, 0x76, 0x37, 0x6d, 0x27, 0xfa, + 0xb8, 0xd3, 0xd4, 0x5a, 0xbe, 0xab, 0x4b, 0xbe, 0xaa, 0xfc, 0x67, 0x63, 0xfb, 0xaa, 0x4d, 0x3d, + 0x51, 0x54, 0x7a, 0xc6, 0x5f, 0x9e, 0xb5, 0xf8, 0xd8, 0xcc, 0x0b, 0xe8, 0xad, 0x7f, 0x02, 0x00, + 0x00, 0xff, 0xff, 0xb8, 0x9c, 0xe7, 0x3e, 0xf4, 0x0d, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/backup.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/backup.pb.go new file mode 100644 index 000000000..62d4bd254 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/backup.pb.go @@ -0,0 +1,127 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/backup.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A PostgreSQL Backup resource. For more information, see +// the [Developer's Guide](/docs/managed-postgresql/concepts/backup). +type Backup struct { + // ID of the backup. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the backup belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format + // (i.e. when the backup operation was completed). + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // ID of the PostgreSQL cluster that the backup was created for. + SourceClusterId string `protobuf:"bytes,4,opt,name=source_cluster_id,json=sourceClusterId,proto3" json:"source_cluster_id,omitempty"` + // Time when the backup operation was started. + StartedAt *timestamp.Timestamp `protobuf:"bytes,5,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup) Reset() { *m = Backup{} } +func (m *Backup) String() string { return proto.CompactTextString(m) } +func (*Backup) ProtoMessage() {} +func (*Backup) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_201d65cfeeef8a8a, []int{0} +} +func (m *Backup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Backup.Unmarshal(m, b) +} +func (m *Backup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Backup.Marshal(b, m, deterministic) +} +func (dst *Backup) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup.Merge(dst, src) +} +func (m *Backup) XXX_Size() int { + return xxx_messageInfo_Backup.Size(m) +} +func (m *Backup) XXX_DiscardUnknown() { + xxx_messageInfo_Backup.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup proto.InternalMessageInfo + +func (m *Backup) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Backup) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *Backup) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Backup) GetSourceClusterId() string { + if m != nil { + return m.SourceClusterId + } + return "" +} + +func (m *Backup) GetStartedAt() *timestamp.Timestamp { + if m != nil { + return m.StartedAt + } + return nil +} + +func init() { + proto.RegisterType((*Backup)(nil), "yandex.cloud.mdb.postgresql.v1.Backup") +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/backup.proto", fileDescriptor_backup_201d65cfeeef8a8a) +} + +var fileDescriptor_backup_201d65cfeeef8a8a = []byte{ + // 268 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0xc1, 0x4a, 0x33, 0x31, + 0x14, 0x85, 0x99, 0xf9, 0x7f, 0x8b, 0x13, 0x41, 0x71, 0x56, 0x43, 0x05, 0x2d, 0xae, 0x8a, 0xd2, + 0x84, 0xea, 0x4a, 0x5c, 0xb5, 0xae, 0x5c, 0x88, 0x50, 0x5c, 0xb9, 0x19, 0x92, 0xdc, 0x34, 0x0e, + 0xce, 0xf4, 0x8e, 0xc9, 0x4d, 0xd1, 0x27, 0xf5, 0x75, 0x84, 0x64, 0x4a, 0x77, 0xba, 0xcc, 0xc9, + 0x77, 0xcf, 0x07, 0x87, 0x5d, 0x7f, 0xc9, 0x0d, 0x98, 0x4f, 0xa1, 0x5b, 0x0c, 0x20, 0x3a, 0x50, + 0xa2, 0x47, 0x4f, 0xd6, 0x19, 0xff, 0xd1, 0x8a, 0xed, 0x5c, 0x28, 0xa9, 0xdf, 0x43, 0xcf, 0x7b, + 0x87, 0x84, 0xe5, 0x79, 0x82, 0x79, 0x84, 0x79, 0x07, 0x8a, 0xef, 0x61, 0xbe, 0x9d, 0x8f, 0x2f, + 0x2c, 0xa2, 0x6d, 0x8d, 0x88, 0xb4, 0x0a, 0x6b, 0x41, 0x4d, 0x67, 0x3c, 0xc9, 0x6e, 0x28, 0xb8, + 0xfc, 0xce, 0xd8, 0x68, 0x19, 0x1b, 0xcb, 0x63, 0x96, 0x37, 0x50, 0x65, 0x93, 0x6c, 0x5a, 0xac, + 0xf2, 0x06, 0xca, 0x33, 0x56, 0xac, 0xb1, 0x05, 0xe3, 0xea, 0x06, 0xaa, 0x3c, 0xc6, 0x87, 0x29, + 0x78, 0x84, 0xf2, 0x8e, 0x31, 0xed, 0x8c, 0x24, 0x03, 0xb5, 0xa4, 0xea, 0xdf, 0x24, 0x9b, 0x1e, + 0xdd, 0x8c, 0x79, 0xb2, 0xf1, 0x9d, 0x8d, 0xbf, 0xec, 0x6c, 0xab, 0x62, 0xa0, 0x17, 0x54, 0x5e, + 0xb1, 0x53, 0x8f, 0xc1, 0x69, 0x53, 0xeb, 0x36, 0x78, 0x4a, 0xfd, 0xff, 0x63, 0xff, 0x49, 0xfa, + 0x78, 0x48, 0x79, 0xd2, 0x78, 0x92, 0x6e, 0xd0, 0x1c, 0xfc, 0xad, 0x19, 0xe8, 0x05, 0x2d, 0x9f, + 0x5f, 0x9f, 0x6c, 0x43, 0x6f, 0x41, 0x71, 0x8d, 0x9d, 0x48, 0x3b, 0xcd, 0xd2, 0xa8, 0x16, 0x67, + 0xd6, 0x6c, 0xe2, 0xb9, 0xf8, 0x7d, 0xed, 0xfb, 0xfd, 0x4b, 0x8d, 0xe2, 0xc1, 0xed, 0x4f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xed, 0x24, 0x15, 0xfb, 0xa1, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/backup_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/backup_service.pb.go new file mode 100644 index 000000000..00e4837f9 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/backup_service.pb.go @@ -0,0 +1,335 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/backup_service.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetBackupRequest struct { + // ID of the backup to return information about. + // To get the backup ID, use a [ClusterService.ListBackups] request. + BackupId string `protobuf:"bytes,1,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetBackupRequest) Reset() { *m = GetBackupRequest{} } +func (m *GetBackupRequest) String() string { return proto.CompactTextString(m) } +func (*GetBackupRequest) ProtoMessage() {} +func (*GetBackupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_service_76dfe2452a94567c, []int{0} +} +func (m *GetBackupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetBackupRequest.Unmarshal(m, b) +} +func (m *GetBackupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetBackupRequest.Marshal(b, m, deterministic) +} +func (dst *GetBackupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetBackupRequest.Merge(dst, src) +} +func (m *GetBackupRequest) XXX_Size() int { + return xxx_messageInfo_GetBackupRequest.Size(m) +} +func (m *GetBackupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetBackupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetBackupRequest proto.InternalMessageInfo + +func (m *GetBackupRequest) GetBackupId() string { + if m != nil { + return m.BackupId + } + return "" +} + +type ListBackupsRequest struct { + // ID of the folder to list backups in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListBackupsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, Set [page_token] to the [ListBackupsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListBackupsRequest) Reset() { *m = ListBackupsRequest{} } +func (m *ListBackupsRequest) String() string { return proto.CompactTextString(m) } +func (*ListBackupsRequest) ProtoMessage() {} +func (*ListBackupsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_service_76dfe2452a94567c, []int{1} +} +func (m *ListBackupsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListBackupsRequest.Unmarshal(m, b) +} +func (m *ListBackupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListBackupsRequest.Marshal(b, m, deterministic) +} +func (dst *ListBackupsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListBackupsRequest.Merge(dst, src) +} +func (m *ListBackupsRequest) XXX_Size() int { + return xxx_messageInfo_ListBackupsRequest.Size(m) +} +func (m *ListBackupsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListBackupsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListBackupsRequest proto.InternalMessageInfo + +func (m *ListBackupsRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListBackupsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListBackupsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListBackupsResponse struct { + // List of PostgreSQL Backup resources. + Backups []*Backup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListBackupsRequest.page_size], use the [next_page_token] as the value + // for the [ListBackupsRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListBackupsResponse) Reset() { *m = ListBackupsResponse{} } +func (m *ListBackupsResponse) String() string { return proto.CompactTextString(m) } +func (*ListBackupsResponse) ProtoMessage() {} +func (*ListBackupsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_service_76dfe2452a94567c, []int{2} +} +func (m *ListBackupsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListBackupsResponse.Unmarshal(m, b) +} +func (m *ListBackupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListBackupsResponse.Marshal(b, m, deterministic) +} +func (dst *ListBackupsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListBackupsResponse.Merge(dst, src) +} +func (m *ListBackupsResponse) XXX_Size() int { + return xxx_messageInfo_ListBackupsResponse.Size(m) +} +func (m *ListBackupsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListBackupsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListBackupsResponse proto.InternalMessageInfo + +func (m *ListBackupsResponse) GetBackups() []*Backup { + if m != nil { + return m.Backups + } + return nil +} + +func (m *ListBackupsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetBackupRequest)(nil), "yandex.cloud.mdb.postgresql.v1.GetBackupRequest") + proto.RegisterType((*ListBackupsRequest)(nil), "yandex.cloud.mdb.postgresql.v1.ListBackupsRequest") + proto.RegisterType((*ListBackupsResponse)(nil), "yandex.cloud.mdb.postgresql.v1.ListBackupsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// BackupServiceClient is the client API for BackupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BackupServiceClient interface { + // Returns the specified PostgreSQL Backup resource. + // + // To get the list of available PostgreSQL Backup resources, make a [List] request. + Get(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error) + // Retrieves the list of Backup resources available for the specified folder. + List(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) +} + +type backupServiceClient struct { + cc *grpc.ClientConn +} + +func NewBackupServiceClient(cc *grpc.ClientConn) BackupServiceClient { + return &backupServiceClient{cc} +} + +func (c *backupServiceClient) Get(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error) { + out := new(Backup) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.BackupService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) List(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) { + out := new(ListBackupsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.BackupService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BackupServiceServer is the server API for BackupService service. +type BackupServiceServer interface { + // Returns the specified PostgreSQL Backup resource. + // + // To get the list of available PostgreSQL Backup resources, make a [List] request. + Get(context.Context, *GetBackupRequest) (*Backup, error) + // Retrieves the list of Backup resources available for the specified folder. + List(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error) +} + +func RegisterBackupServiceServer(s *grpc.Server, srv BackupServiceServer) { + s.RegisterService(&_BackupService_serviceDesc, srv) +} + +func _BackupService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBackupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.BackupService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).Get(ctx, req.(*GetBackupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBackupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.BackupService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).List(ctx, req.(*ListBackupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BackupService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.postgresql.v1.BackupService", + HandlerType: (*BackupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _BackupService_Get_Handler, + }, + { + MethodName: "List", + Handler: _BackupService_List_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/postgresql/v1/backup_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/backup_service.proto", fileDescriptor_backup_service_76dfe2452a94567c) +} + +var fileDescriptor_backup_service_76dfe2452a94567c = []byte{ + // 466 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xbf, 0x6f, 0x13, 0x31, + 0x14, 0xc7, 0xe5, 0x24, 0x94, 0x9c, 0xa1, 0x02, 0x99, 0x25, 0x8a, 0xa0, 0x0a, 0x37, 0x94, 0xf0, + 0x23, 0xe7, 0xbb, 0x44, 0x9d, 0x68, 0x25, 0x94, 0xa5, 0xaa, 0x04, 0x02, 0x5d, 0x99, 0x58, 0x22, + 0x5f, 0xfc, 0x30, 0x56, 0x2f, 0xf6, 0x35, 0x76, 0xa2, 0x52, 0xc4, 0xc2, 0x98, 0x91, 0x0e, 0xfc, + 0x39, 0x8c, 0xed, 0xce, 0xbf, 0xc0, 0xc0, 0xdf, 0xc0, 0x84, 0xce, 0x4e, 0x08, 0x2d, 0x28, 0x2d, + 0xa3, 0xfd, 0x7d, 0x9f, 0xf7, 0xbe, 0x7a, 0xdf, 0x87, 0x7b, 0xef, 0x99, 0xe2, 0x70, 0x44, 0x87, + 0xb9, 0x9e, 0x70, 0x3a, 0xe2, 0x19, 0x2d, 0xb4, 0xb1, 0x62, 0x0c, 0xe6, 0x30, 0xa7, 0xd3, 0x84, + 0x66, 0x6c, 0x78, 0x30, 0x29, 0x06, 0x06, 0xc6, 0x53, 0x39, 0x84, 0xa8, 0x18, 0x6b, 0xab, 0xc9, + 0x86, 0x87, 0x22, 0x07, 0x45, 0x23, 0x9e, 0x45, 0x4b, 0x28, 0x9a, 0x26, 0xcd, 0xbb, 0x42, 0x6b, + 0x91, 0x03, 0x65, 0x85, 0xa4, 0x4c, 0x29, 0x6d, 0x99, 0x95, 0x5a, 0x19, 0x4f, 0x37, 0xef, 0x9d, + 0x1b, 0x39, 0x65, 0xb9, 0xe4, 0x4e, 0x9f, 0xcb, 0x8f, 0xaf, 0xe4, 0xc8, 0x17, 0x87, 0x5b, 0xf8, + 0xf6, 0x2e, 0xd8, 0xbe, 0xfb, 0x4a, 0xe1, 0x70, 0x02, 0xc6, 0x92, 0xfb, 0x38, 0x98, 0xbb, 0x96, + 0xbc, 0x81, 0x5a, 0xa8, 0x1d, 0xf4, 0x6b, 0x3f, 0x4e, 0x13, 0x94, 0xd6, 0xfd, 0xf7, 0x1e, 0x0f, + 0x3f, 0x23, 0x4c, 0x9e, 0x4b, 0x33, 0x07, 0xcd, 0x82, 0x7c, 0x88, 0x83, 0xb7, 0x3a, 0xe7, 0x30, + 0x5e, 0x92, 0x37, 0x4b, 0x72, 0x76, 0x96, 0xd4, 0xb6, 0x77, 0xb6, 0xe2, 0xb4, 0xee, 0xe5, 0x3d, + 0x4e, 0x1e, 0xe0, 0xa0, 0x60, 0x02, 0x06, 0x46, 0x1e, 0x43, 0xa3, 0xd2, 0x42, 0xed, 0x6a, 0x1f, + 0xff, 0x3c, 0x4d, 0xd6, 0xb6, 0x77, 0x92, 0x38, 0x8e, 0xd3, 0x7a, 0x29, 0xee, 0xcb, 0x63, 0x20, + 0x6d, 0x8c, 0x5d, 0xa1, 0xd5, 0x07, 0xa0, 0x1a, 0x55, 0xd7, 0x34, 0x98, 0x9d, 0x25, 0xd7, 0x5c, + 0x65, 0xea, 0xba, 0xbc, 0x2e, 0xb5, 0x70, 0x86, 0xf0, 0x9d, 0x73, 0xa6, 0x4c, 0xa1, 0x95, 0x01, + 0xf2, 0x0c, 0x5f, 0xf7, 0xc6, 0x4d, 0x03, 0xb5, 0xaa, 0xed, 0x1b, 0xdd, 0xcd, 0x68, 0xf5, 0xfe, + 0xa3, 0xf9, 0x3e, 0x16, 0x18, 0x49, 0xf0, 0x2d, 0x05, 0x47, 0x76, 0xf0, 0x87, 0x91, 0xca, 0x45, + 0x23, 0xeb, 0x65, 0xc5, 0xab, 0x85, 0x99, 0xee, 0xd7, 0x0a, 0x5e, 0xf7, 0x6d, 0xf6, 0x7d, 0xf4, + 0xe4, 0x04, 0xe1, 0xea, 0x2e, 0x58, 0x12, 0x5f, 0x36, 0xfd, 0x62, 0x20, 0xcd, 0x2b, 0xfa, 0x0d, + 0xbb, 0x9f, 0xbe, 0x7d, 0x3f, 0xa9, 0x3c, 0x21, 0x8f, 0xe8, 0x88, 0x29, 0x26, 0x80, 0x77, 0xfe, + 0x95, 0xbc, 0xa1, 0x1f, 0x7e, 0xc7, 0xfb, 0x91, 0x7c, 0x41, 0xb8, 0x56, 0x2e, 0x8d, 0x74, 0x2f, + 0x1b, 0xf2, 0x77, 0xde, 0xcd, 0xde, 0x7f, 0x31, 0x3e, 0x8e, 0x70, 0xd3, 0xb9, 0x6c, 0x91, 0x8d, + 0xd5, 0x2e, 0xfb, 0x2f, 0xdf, 0xbc, 0x10, 0xd2, 0xbe, 0x9b, 0x64, 0xd1, 0x50, 0x8f, 0xa8, 0x1f, + 0xd4, 0xf1, 0x47, 0x2d, 0x74, 0x47, 0x80, 0x72, 0x17, 0x4c, 0x57, 0x5f, 0xfb, 0xd3, 0xe5, 0x2b, + 0x5b, 0x73, 0x40, 0xef, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x59, 0xd4, 0x32, 0xc6, 0xb3, 0x03, + 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/cluster.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/cluster.pb.go new file mode 100644 index 000000000..dfd366568 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/cluster.pb.go @@ -0,0 +1,1331 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/cluster.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import config "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Cluster_Environment int32 + +const ( + Cluster_ENVIRONMENT_UNSPECIFIED Cluster_Environment = 0 + // Stable environment with a conservative update policy: + // only hotfixes are applied during regular maintenance. + Cluster_PRODUCTION Cluster_Environment = 1 + // Environment with more aggressive update policy: new versions + // are rolled out irrespective of backward compatibility. + Cluster_PRESTABLE Cluster_Environment = 2 +) + +var Cluster_Environment_name = map[int32]string{ + 0: "ENVIRONMENT_UNSPECIFIED", + 1: "PRODUCTION", + 2: "PRESTABLE", +} +var Cluster_Environment_value = map[string]int32{ + "ENVIRONMENT_UNSPECIFIED": 0, + "PRODUCTION": 1, + "PRESTABLE": 2, +} + +func (x Cluster_Environment) String() string { + return proto.EnumName(Cluster_Environment_name, int32(x)) +} +func (Cluster_Environment) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{0, 0} +} + +type Cluster_Health int32 + +const ( + // State of the cluster is unknown ([Host.health] for every host in the cluster is UNKNOWN). + Cluster_HEALTH_UNKNOWN Cluster_Health = 0 + // Cluster is alive and well ([Host.health] for every host in the cluster is ALIVE). + Cluster_ALIVE Cluster_Health = 1 + // Cluster is inoperable ([Host.health] for every host in the cluster is DEAD). + Cluster_DEAD Cluster_Health = 2 + // Cluster is working below capacity ([Host.health] for at least one host in the cluster is not ALIVE). + Cluster_DEGRADED Cluster_Health = 3 +) + +var Cluster_Health_name = map[int32]string{ + 0: "HEALTH_UNKNOWN", + 1: "ALIVE", + 2: "DEAD", + 3: "DEGRADED", +} +var Cluster_Health_value = map[string]int32{ + "HEALTH_UNKNOWN": 0, + "ALIVE": 1, + "DEAD": 2, + "DEGRADED": 3, +} + +func (x Cluster_Health) String() string { + return proto.EnumName(Cluster_Health_name, int32(x)) +} +func (Cluster_Health) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{0, 1} +} + +type Cluster_Status int32 + +const ( + // Cluster state is unknown. + Cluster_STATUS_UNKNOWN Cluster_Status = 0 + // Cluster is being created. + Cluster_CREATING Cluster_Status = 1 + // Cluster is running normally. + Cluster_RUNNING Cluster_Status = 2 + // Cluster encountered a problem and cannot operate. + Cluster_ERROR Cluster_Status = 3 + // Cluster is being updated. + Cluster_UPDATING Cluster_Status = 4 + // Cluster is stopping. + Cluster_STOPPING Cluster_Status = 5 + // Cluster stopped. + Cluster_STOPPED Cluster_Status = 6 + // Cluster is starting. + Cluster_STARTING Cluster_Status = 7 +) + +var Cluster_Status_name = map[int32]string{ + 0: "STATUS_UNKNOWN", + 1: "CREATING", + 2: "RUNNING", + 3: "ERROR", + 4: "UPDATING", + 5: "STOPPING", + 6: "STOPPED", + 7: "STARTING", +} +var Cluster_Status_value = map[string]int32{ + "STATUS_UNKNOWN": 0, + "CREATING": 1, + "RUNNING": 2, + "ERROR": 3, + "UPDATING": 4, + "STOPPING": 5, + "STOPPED": 6, + "STARTING": 7, +} + +func (x Cluster_Status) String() string { + return proto.EnumName(Cluster_Status_name, int32(x)) +} +func (Cluster_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{0, 2} +} + +type ConnectionPoolerConfig_PoolingMode int32 + +const ( + ConnectionPoolerConfig_POOLING_MODE_UNSPECIFIED ConnectionPoolerConfig_PoolingMode = 0 + // Session pooling mode. + ConnectionPoolerConfig_SESSION ConnectionPoolerConfig_PoolingMode = 1 + // Transaction pooling mode. + ConnectionPoolerConfig_TRANSACTION ConnectionPoolerConfig_PoolingMode = 2 + // Statement pooling mode. + ConnectionPoolerConfig_STATEMENT ConnectionPoolerConfig_PoolingMode = 3 +) + +var ConnectionPoolerConfig_PoolingMode_name = map[int32]string{ + 0: "POOLING_MODE_UNSPECIFIED", + 1: "SESSION", + 2: "TRANSACTION", + 3: "STATEMENT", +} +var ConnectionPoolerConfig_PoolingMode_value = map[string]int32{ + "POOLING_MODE_UNSPECIFIED": 0, + "SESSION": 1, + "TRANSACTION": 2, + "STATEMENT": 3, +} + +func (x ConnectionPoolerConfig_PoolingMode) String() string { + return proto.EnumName(ConnectionPoolerConfig_PoolingMode_name, int32(x)) +} +func (ConnectionPoolerConfig_PoolingMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{3, 0} +} + +type Host_Role int32 + +const ( + // Role of the host in the cluster is unknown. + Host_ROLE_UNKNOWN Host_Role = 0 + // Host is the master PostgreSQL server in the cluster. + Host_MASTER Host_Role = 1 + // Host is a replica (standby) PostgreSQL server in the cluster. + Host_REPLICA Host_Role = 2 +) + +var Host_Role_name = map[int32]string{ + 0: "ROLE_UNKNOWN", + 1: "MASTER", + 2: "REPLICA", +} +var Host_Role_value = map[string]int32{ + "ROLE_UNKNOWN": 0, + "MASTER": 1, + "REPLICA": 2, +} + +func (x Host_Role) String() string { + return proto.EnumName(Host_Role_name, int32(x)) +} +func (Host_Role) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{4, 0} +} + +type Host_ReplicaType int32 + +const ( + Host_REPLICA_TYPE_UNKNOWN Host_ReplicaType = 0 + Host_ASYNC Host_ReplicaType = 1 + Host_SYNC Host_ReplicaType = 2 +) + +var Host_ReplicaType_name = map[int32]string{ + 0: "REPLICA_TYPE_UNKNOWN", + 1: "ASYNC", + 2: "SYNC", +} +var Host_ReplicaType_value = map[string]int32{ + "REPLICA_TYPE_UNKNOWN": 0, + "ASYNC": 1, + "SYNC": 2, +} + +func (x Host_ReplicaType) String() string { + return proto.EnumName(Host_ReplicaType_name, int32(x)) +} +func (Host_ReplicaType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{4, 1} +} + +type Host_Health int32 + +const ( + // Health of the host is unknown. + Host_HEALTH_UNKNOWN Host_Health = 0 + // The host is performing all its functions normally. + Host_ALIVE Host_Health = 1 + // The host is inoperable, and cannot perform any of its essential functions. + Host_DEAD Host_Health = 2 + // The host is degraded, and can perform only some of its essential functions. + Host_DEGRADED Host_Health = 3 +) + +var Host_Health_name = map[int32]string{ + 0: "HEALTH_UNKNOWN", + 1: "ALIVE", + 2: "DEAD", + 3: "DEGRADED", +} +var Host_Health_value = map[string]int32{ + "HEALTH_UNKNOWN": 0, + "ALIVE": 1, + "DEAD": 2, + "DEGRADED": 3, +} + +func (x Host_Health) String() string { + return proto.EnumName(Host_Health_name, int32(x)) +} +func (Host_Health) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{4, 2} +} + +type Service_Type int32 + +const ( + Service_TYPE_UNSPECIFIED Service_Type = 0 + // The host is a PostgreSQL server. + Service_POSTGRESQL Service_Type = 1 + // The host is a PgBouncer server. + Service_POOLER Service_Type = 2 +) + +var Service_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "POSTGRESQL", + 2: "POOLER", +} +var Service_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "POSTGRESQL": 1, + "POOLER": 2, +} + +func (x Service_Type) String() string { + return proto.EnumName(Service_Type_name, int32(x)) +} +func (Service_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{6, 0} +} + +type Service_Health int32 + +const ( + // Health of the server is unknown. + Service_HEALTH_UNKNOWN Service_Health = 0 + // The server is working normally. + Service_ALIVE Service_Health = 1 + // The server is dead or unresponsive. + Service_DEAD Service_Health = 2 +) + +var Service_Health_name = map[int32]string{ + 0: "HEALTH_UNKNOWN", + 1: "ALIVE", + 2: "DEAD", +} +var Service_Health_value = map[string]int32{ + "HEALTH_UNKNOWN": 0, + "ALIVE": 1, + "DEAD": 2, +} + +func (x Service_Health) String() string { + return proto.EnumName(Service_Health_name, int32(x)) +} +func (Service_Health) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{6, 1} +} + +// A PostgreSQL Cluster resource. For more information, see +// the [Concepts](/docs/managed-postgresql/concepts) section of the documentation. +type Cluster struct { + // ID of the PostgreSQL cluster. + // This ID is assigned by MDB at creation time. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the PostgreSQL cluster belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Name of the PostgreSQL cluster. + // The name is unique within the folder. 1-63 characters long. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Description of the PostgreSQL cluster. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the PostgreSQL cluster as `` key:value `` pairs. + // Maximum 64 per resource. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Deployment environment of the PostgreSQL cluster. + Environment Cluster_Environment `protobuf:"varint,7,opt,name=environment,proto3,enum=yandex.cloud.mdb.postgresql.v1.Cluster_Environment" json:"environment,omitempty"` + // Description of monitoring systems relevant to the PostgreSQL cluster. + Monitoring []*Monitoring `protobuf:"bytes,8,rep,name=monitoring,proto3" json:"monitoring,omitempty"` + // Configuration of the PostgreSQL cluster. + Config *ClusterConfig `protobuf:"bytes,9,opt,name=config,proto3" json:"config,omitempty"` + // ID of the network that the cluster belongs to. + NetworkId string `protobuf:"bytes,10,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // Aggregated cluster health. + Health Cluster_Health `protobuf:"varint,11,opt,name=health,proto3,enum=yandex.cloud.mdb.postgresql.v1.Cluster_Health" json:"health,omitempty"` + // Current state of the cluster. + Status Cluster_Status `protobuf:"varint,12,opt,name=status,proto3,enum=yandex.cloud.mdb.postgresql.v1.Cluster_Status" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{0} +} +func (m *Cluster) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cluster.Unmarshal(m, b) +} +func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) +} +func (dst *Cluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cluster.Merge(dst, src) +} +func (m *Cluster) XXX_Size() int { + return xxx_messageInfo_Cluster.Size(m) +} +func (m *Cluster) XXX_DiscardUnknown() { + xxx_messageInfo_Cluster.DiscardUnknown(m) +} + +var xxx_messageInfo_Cluster proto.InternalMessageInfo + +func (m *Cluster) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Cluster) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *Cluster) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Cluster) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Cluster) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Cluster) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Cluster) GetEnvironment() Cluster_Environment { + if m != nil { + return m.Environment + } + return Cluster_ENVIRONMENT_UNSPECIFIED +} + +func (m *Cluster) GetMonitoring() []*Monitoring { + if m != nil { + return m.Monitoring + } + return nil +} + +func (m *Cluster) GetConfig() *ClusterConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *Cluster) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +func (m *Cluster) GetHealth() Cluster_Health { + if m != nil { + return m.Health + } + return Cluster_HEALTH_UNKNOWN +} + +func (m *Cluster) GetStatus() Cluster_Status { + if m != nil { + return m.Status + } + return Cluster_STATUS_UNKNOWN +} + +// Monitoring system. +type Monitoring struct { + // Name of the monitoring system. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Description of the monitoring system. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // Link to the monitoring system charts for the PostgreSQL cluster. + Link string `protobuf:"bytes,3,opt,name=link,proto3" json:"link,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Monitoring) Reset() { *m = Monitoring{} } +func (m *Monitoring) String() string { return proto.CompactTextString(m) } +func (*Monitoring) ProtoMessage() {} +func (*Monitoring) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{1} +} +func (m *Monitoring) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Monitoring.Unmarshal(m, b) +} +func (m *Monitoring) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Monitoring.Marshal(b, m, deterministic) +} +func (dst *Monitoring) XXX_Merge(src proto.Message) { + xxx_messageInfo_Monitoring.Merge(dst, src) +} +func (m *Monitoring) XXX_Size() int { + return xxx_messageInfo_Monitoring.Size(m) +} +func (m *Monitoring) XXX_DiscardUnknown() { + xxx_messageInfo_Monitoring.DiscardUnknown(m) +} + +var xxx_messageInfo_Monitoring proto.InternalMessageInfo + +func (m *Monitoring) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Monitoring) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Monitoring) GetLink() string { + if m != nil { + return m.Link + } + return "" +} + +type ClusterConfig struct { + // Version of PostgreSQL server software. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Configuration for PostgreSQL servers in the cluster. + // + // Types that are valid to be assigned to PostgresqlConfig: + // *ClusterConfig_PostgresqlConfig_9_6 + // *ClusterConfig_PostgresqlConfig_10 + // *ClusterConfig_PostgresqlConfig_11 + PostgresqlConfig isClusterConfig_PostgresqlConfig `protobuf_oneof:"postgresql_config"` + // Configuration of the connection pooler. + PoolerConfig *ConnectionPoolerConfig `protobuf:"bytes,4,opt,name=pooler_config,json=poolerConfig,proto3" json:"pooler_config,omitempty"` + // Resources allocated to PostgreSQL hosts. + Resources *Resources `protobuf:"bytes,5,opt,name=resources,proto3" json:"resources,omitempty"` + // Configuration setting which enables/disables autofailover in cluster. + Autofailover *wrappers.BoolValue `protobuf:"bytes,6,opt,name=autofailover,proto3" json:"autofailover,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterConfig) Reset() { *m = ClusterConfig{} } +func (m *ClusterConfig) String() string { return proto.CompactTextString(m) } +func (*ClusterConfig) ProtoMessage() {} +func (*ClusterConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{2} +} +func (m *ClusterConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterConfig.Unmarshal(m, b) +} +func (m *ClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterConfig.Marshal(b, m, deterministic) +} +func (dst *ClusterConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterConfig.Merge(dst, src) +} +func (m *ClusterConfig) XXX_Size() int { + return xxx_messageInfo_ClusterConfig.Size(m) +} +func (m *ClusterConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterConfig proto.InternalMessageInfo + +func (m *ClusterConfig) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type isClusterConfig_PostgresqlConfig interface { + isClusterConfig_PostgresqlConfig() +} + +type ClusterConfig_PostgresqlConfig_9_6 struct { + PostgresqlConfig_9_6 *config.PostgresqlConfigSet9_6 `protobuf:"bytes,2,opt,name=postgresql_config_9_6,json=postgresqlConfig96,proto3,oneof"` +} + +type ClusterConfig_PostgresqlConfig_10 struct { + PostgresqlConfig_10 *config.PostgresqlConfigSet10 `protobuf:"bytes,3,opt,name=postgresql_config_10,json=postgresqlConfig10,proto3,oneof"` +} + +type ClusterConfig_PostgresqlConfig_11 struct { + PostgresqlConfig_11 *config.PostgresqlConfigSet11 `protobuf:"bytes,8,opt,name=postgresql_config_11,json=postgresqlConfig11,proto3,oneof"` +} + +func (*ClusterConfig_PostgresqlConfig_9_6) isClusterConfig_PostgresqlConfig() {} + +func (*ClusterConfig_PostgresqlConfig_10) isClusterConfig_PostgresqlConfig() {} + +func (*ClusterConfig_PostgresqlConfig_11) isClusterConfig_PostgresqlConfig() {} + +func (m *ClusterConfig) GetPostgresqlConfig() isClusterConfig_PostgresqlConfig { + if m != nil { + return m.PostgresqlConfig + } + return nil +} + +func (m *ClusterConfig) GetPostgresqlConfig_9_6() *config.PostgresqlConfigSet9_6 { + if x, ok := m.GetPostgresqlConfig().(*ClusterConfig_PostgresqlConfig_9_6); ok { + return x.PostgresqlConfig_9_6 + } + return nil +} + +func (m *ClusterConfig) GetPostgresqlConfig_10() *config.PostgresqlConfigSet10 { + if x, ok := m.GetPostgresqlConfig().(*ClusterConfig_PostgresqlConfig_10); ok { + return x.PostgresqlConfig_10 + } + return nil +} + +func (m *ClusterConfig) GetPostgresqlConfig_11() *config.PostgresqlConfigSet11 { + if x, ok := m.GetPostgresqlConfig().(*ClusterConfig_PostgresqlConfig_11); ok { + return x.PostgresqlConfig_11 + } + return nil +} + +func (m *ClusterConfig) GetPoolerConfig() *ConnectionPoolerConfig { + if m != nil { + return m.PoolerConfig + } + return nil +} + +func (m *ClusterConfig) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *ClusterConfig) GetAutofailover() *wrappers.BoolValue { + if m != nil { + return m.Autofailover + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ClusterConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ClusterConfig_OneofMarshaler, _ClusterConfig_OneofUnmarshaler, _ClusterConfig_OneofSizer, []interface{}{ + (*ClusterConfig_PostgresqlConfig_9_6)(nil), + (*ClusterConfig_PostgresqlConfig_10)(nil), + (*ClusterConfig_PostgresqlConfig_11)(nil), + } +} + +func _ClusterConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ClusterConfig) + // postgresql_config + switch x := m.PostgresqlConfig.(type) { + case *ClusterConfig_PostgresqlConfig_9_6: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PostgresqlConfig_9_6); err != nil { + return err + } + case *ClusterConfig_PostgresqlConfig_10: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PostgresqlConfig_10); err != nil { + return err + } + case *ClusterConfig_PostgresqlConfig_11: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PostgresqlConfig_11); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ClusterConfig.PostgresqlConfig has unexpected type %T", x) + } + return nil +} + +func _ClusterConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ClusterConfig) + switch tag { + case 2: // postgresql_config.postgresql_config_9_6 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(config.PostgresqlConfigSet9_6) + err := b.DecodeMessage(msg) + m.PostgresqlConfig = &ClusterConfig_PostgresqlConfig_9_6{msg} + return true, err + case 3: // postgresql_config.postgresql_config_10 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(config.PostgresqlConfigSet10) + err := b.DecodeMessage(msg) + m.PostgresqlConfig = &ClusterConfig_PostgresqlConfig_10{msg} + return true, err + case 8: // postgresql_config.postgresql_config_11 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(config.PostgresqlConfigSet11) + err := b.DecodeMessage(msg) + m.PostgresqlConfig = &ClusterConfig_PostgresqlConfig_11{msg} + return true, err + default: + return false, nil + } +} + +func _ClusterConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ClusterConfig) + // postgresql_config + switch x := m.PostgresqlConfig.(type) { + case *ClusterConfig_PostgresqlConfig_9_6: + s := proto.Size(x.PostgresqlConfig_9_6) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *ClusterConfig_PostgresqlConfig_10: + s := proto.Size(x.PostgresqlConfig_10) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *ClusterConfig_PostgresqlConfig_11: + s := proto.Size(x.PostgresqlConfig_11) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ConnectionPoolerConfig struct { + // Mode that the connection pooler is working in. + // See descriptions of all modes in the [documentation for PgBouncer](https://pgbouncer.github.io/usage). + PoolingMode ConnectionPoolerConfig_PoolingMode `protobuf:"varint,1,opt,name=pooling_mode,json=poolingMode,proto3,enum=yandex.cloud.mdb.postgresql.v1.ConnectionPoolerConfig_PoolingMode" json:"pooling_mode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectionPoolerConfig) Reset() { *m = ConnectionPoolerConfig{} } +func (m *ConnectionPoolerConfig) String() string { return proto.CompactTextString(m) } +func (*ConnectionPoolerConfig) ProtoMessage() {} +func (*ConnectionPoolerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{3} +} +func (m *ConnectionPoolerConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnectionPoolerConfig.Unmarshal(m, b) +} +func (m *ConnectionPoolerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnectionPoolerConfig.Marshal(b, m, deterministic) +} +func (dst *ConnectionPoolerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectionPoolerConfig.Merge(dst, src) +} +func (m *ConnectionPoolerConfig) XXX_Size() int { + return xxx_messageInfo_ConnectionPoolerConfig.Size(m) +} +func (m *ConnectionPoolerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectionPoolerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectionPoolerConfig proto.InternalMessageInfo + +func (m *ConnectionPoolerConfig) GetPoolingMode() ConnectionPoolerConfig_PoolingMode { + if m != nil { + return m.PoolingMode + } + return ConnectionPoolerConfig_POOLING_MODE_UNSPECIFIED +} + +type Host struct { + // Name of the PostgreSQL host. The host name is assigned by MDB at creation time, and cannot be changed. + // 1-63 characters long. + // + // The name is unique across all existing MDB hosts in Yandex.Cloud, as it defines the FQDN of the host. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // ID of the PostgreSQL host. The ID is assigned by MDB at creation time. + ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // ID of the availability zone where the PostgreSQL host resides. + ZoneId string `protobuf:"bytes,3,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + // Resources allocated to the PostgreSQL host. + Resources *Resources `protobuf:"bytes,4,opt,name=resources,proto3" json:"resources,omitempty"` + // Role of the host in the cluster. + Role Host_Role `protobuf:"varint,5,opt,name=role,proto3,enum=yandex.cloud.mdb.postgresql.v1.Host_Role" json:"role,omitempty"` + // Status code of the aggregated health of the host. + Health Host_Health `protobuf:"varint,6,opt,name=health,proto3,enum=yandex.cloud.mdb.postgresql.v1.Host_Health" json:"health,omitempty"` + // Services provided by the host. + Services []*Service `protobuf:"bytes,7,rep,name=services,proto3" json:"services,omitempty"` + // ID of the subnet that the host belongs to. + SubnetId string `protobuf:"bytes,8,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + // Name of the host to be used as the replication source for cascading replication. + ReplicationSource string `protobuf:"bytes,9,opt,name=replication_source,json=replicationSource,proto3" json:"replication_source,omitempty"` + // Priority of the host as a replica. Higher value means higher priority. + // + // The host with the highest priority is the synchronous replica. All others are asynchronous. + // The synchronous replica replaces the master when needed. + // + // When a replica becomes the master, its priority is ignored. + Priority *wrappers.Int64Value `protobuf:"bytes,10,opt,name=priority,proto3" json:"priority,omitempty"` + // Configuration of a PostgreSQL server for the host. + Config *HostConfig `protobuf:"bytes,11,opt,name=config,proto3" json:"config,omitempty"` + // Flag showing public IP assignment status to this host. + AssignPublicIp bool `protobuf:"varint,12,opt,name=assign_public_ip,json=assignPublicIp,proto3" json:"assign_public_ip,omitempty"` + ReplicaType Host_ReplicaType `protobuf:"varint,13,opt,name=replica_type,json=replicaType,proto3,enum=yandex.cloud.mdb.postgresql.v1.Host_ReplicaType" json:"replica_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Host) Reset() { *m = Host{} } +func (m *Host) String() string { return proto.CompactTextString(m) } +func (*Host) ProtoMessage() {} +func (*Host) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{4} +} +func (m *Host) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Host.Unmarshal(m, b) +} +func (m *Host) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Host.Marshal(b, m, deterministic) +} +func (dst *Host) XXX_Merge(src proto.Message) { + xxx_messageInfo_Host.Merge(dst, src) +} +func (m *Host) XXX_Size() int { + return xxx_messageInfo_Host.Size(m) +} +func (m *Host) XXX_DiscardUnknown() { + xxx_messageInfo_Host.DiscardUnknown(m) +} + +var xxx_messageInfo_Host proto.InternalMessageInfo + +func (m *Host) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Host) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *Host) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func (m *Host) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *Host) GetRole() Host_Role { + if m != nil { + return m.Role + } + return Host_ROLE_UNKNOWN +} + +func (m *Host) GetHealth() Host_Health { + if m != nil { + return m.Health + } + return Host_HEALTH_UNKNOWN +} + +func (m *Host) GetServices() []*Service { + if m != nil { + return m.Services + } + return nil +} + +func (m *Host) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +func (m *Host) GetReplicationSource() string { + if m != nil { + return m.ReplicationSource + } + return "" +} + +func (m *Host) GetPriority() *wrappers.Int64Value { + if m != nil { + return m.Priority + } + return nil +} + +func (m *Host) GetConfig() *HostConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *Host) GetAssignPublicIp() bool { + if m != nil { + return m.AssignPublicIp + } + return false +} + +func (m *Host) GetReplicaType() Host_ReplicaType { + if m != nil { + return m.ReplicaType + } + return Host_REPLICA_TYPE_UNKNOWN +} + +type HostConfig struct { + // Configuration of a PostgreSQL server for the host. + // + // Types that are valid to be assigned to PostgresqlConfig: + // *HostConfig_PostgresqlConfig_9_6 + // *HostConfig_PostgresqlConfig_10 + // *HostConfig_PostgresqlConfig_11 + PostgresqlConfig isHostConfig_PostgresqlConfig `protobuf_oneof:"postgresql_config"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HostConfig) Reset() { *m = HostConfig{} } +func (m *HostConfig) String() string { return proto.CompactTextString(m) } +func (*HostConfig) ProtoMessage() {} +func (*HostConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{5} +} +func (m *HostConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HostConfig.Unmarshal(m, b) +} +func (m *HostConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HostConfig.Marshal(b, m, deterministic) +} +func (dst *HostConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostConfig.Merge(dst, src) +} +func (m *HostConfig) XXX_Size() int { + return xxx_messageInfo_HostConfig.Size(m) +} +func (m *HostConfig) XXX_DiscardUnknown() { + xxx_messageInfo_HostConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_HostConfig proto.InternalMessageInfo + +type isHostConfig_PostgresqlConfig interface { + isHostConfig_PostgresqlConfig() +} + +type HostConfig_PostgresqlConfig_9_6 struct { + PostgresqlConfig_9_6 *config.PostgresqlHostConfig9_6 `protobuf:"bytes,1,opt,name=postgresql_config_9_6,json=postgresqlConfig96,proto3,oneof"` +} + +type HostConfig_PostgresqlConfig_10 struct { + PostgresqlConfig_10 *config.PostgresqlHostConfig10 `protobuf:"bytes,2,opt,name=postgresql_config_10,json=postgresqlConfig10,proto3,oneof"` +} + +type HostConfig_PostgresqlConfig_11 struct { + PostgresqlConfig_11 *config.PostgresqlHostConfig11 `protobuf:"bytes,3,opt,name=postgresql_config_11,json=postgresqlConfig11,proto3,oneof"` +} + +func (*HostConfig_PostgresqlConfig_9_6) isHostConfig_PostgresqlConfig() {} + +func (*HostConfig_PostgresqlConfig_10) isHostConfig_PostgresqlConfig() {} + +func (*HostConfig_PostgresqlConfig_11) isHostConfig_PostgresqlConfig() {} + +func (m *HostConfig) GetPostgresqlConfig() isHostConfig_PostgresqlConfig { + if m != nil { + return m.PostgresqlConfig + } + return nil +} + +func (m *HostConfig) GetPostgresqlConfig_9_6() *config.PostgresqlHostConfig9_6 { + if x, ok := m.GetPostgresqlConfig().(*HostConfig_PostgresqlConfig_9_6); ok { + return x.PostgresqlConfig_9_6 + } + return nil +} + +func (m *HostConfig) GetPostgresqlConfig_10() *config.PostgresqlHostConfig10 { + if x, ok := m.GetPostgresqlConfig().(*HostConfig_PostgresqlConfig_10); ok { + return x.PostgresqlConfig_10 + } + return nil +} + +func (m *HostConfig) GetPostgresqlConfig_11() *config.PostgresqlHostConfig11 { + if x, ok := m.GetPostgresqlConfig().(*HostConfig_PostgresqlConfig_11); ok { + return x.PostgresqlConfig_11 + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*HostConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _HostConfig_OneofMarshaler, _HostConfig_OneofUnmarshaler, _HostConfig_OneofSizer, []interface{}{ + (*HostConfig_PostgresqlConfig_9_6)(nil), + (*HostConfig_PostgresqlConfig_10)(nil), + (*HostConfig_PostgresqlConfig_11)(nil), + } +} + +func _HostConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*HostConfig) + // postgresql_config + switch x := m.PostgresqlConfig.(type) { + case *HostConfig_PostgresqlConfig_9_6: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PostgresqlConfig_9_6); err != nil { + return err + } + case *HostConfig_PostgresqlConfig_10: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PostgresqlConfig_10); err != nil { + return err + } + case *HostConfig_PostgresqlConfig_11: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PostgresqlConfig_11); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("HostConfig.PostgresqlConfig has unexpected type %T", x) + } + return nil +} + +func _HostConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*HostConfig) + switch tag { + case 1: // postgresql_config.postgresql_config_9_6 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(config.PostgresqlHostConfig9_6) + err := b.DecodeMessage(msg) + m.PostgresqlConfig = &HostConfig_PostgresqlConfig_9_6{msg} + return true, err + case 2: // postgresql_config.postgresql_config_10 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(config.PostgresqlHostConfig10) + err := b.DecodeMessage(msg) + m.PostgresqlConfig = &HostConfig_PostgresqlConfig_10{msg} + return true, err + case 3: // postgresql_config.postgresql_config_11 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(config.PostgresqlHostConfig11) + err := b.DecodeMessage(msg) + m.PostgresqlConfig = &HostConfig_PostgresqlConfig_11{msg} + return true, err + default: + return false, nil + } +} + +func _HostConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*HostConfig) + // postgresql_config + switch x := m.PostgresqlConfig.(type) { + case *HostConfig_PostgresqlConfig_9_6: + s := proto.Size(x.PostgresqlConfig_9_6) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *HostConfig_PostgresqlConfig_10: + s := proto.Size(x.PostgresqlConfig_10) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *HostConfig_PostgresqlConfig_11: + s := proto.Size(x.PostgresqlConfig_11) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Service struct { + // Type of the service provided by the host. + Type Service_Type `protobuf:"varint,1,opt,name=type,proto3,enum=yandex.cloud.mdb.postgresql.v1.Service_Type" json:"type,omitempty"` + // Status code of server availability. + Health Service_Health `protobuf:"varint,2,opt,name=health,proto3,enum=yandex.cloud.mdb.postgresql.v1.Service_Health" json:"health,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Service) Reset() { *m = Service{} } +func (m *Service) String() string { return proto.CompactTextString(m) } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{6} +} +func (m *Service) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Service.Unmarshal(m, b) +} +func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Service.Marshal(b, m, deterministic) +} +func (dst *Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service.Merge(dst, src) +} +func (m *Service) XXX_Size() int { + return xxx_messageInfo_Service.Size(m) +} +func (m *Service) XXX_DiscardUnknown() { + xxx_messageInfo_Service.DiscardUnknown(m) +} + +var xxx_messageInfo_Service proto.InternalMessageInfo + +func (m *Service) GetType() Service_Type { + if m != nil { + return m.Type + } + return Service_TYPE_UNSPECIFIED +} + +func (m *Service) GetHealth() Service_Health { + if m != nil { + return m.Health + } + return Service_HEALTH_UNKNOWN +} + +type Resources struct { + // ID of the preset for computational resources available to a host (CPU, memory etc.). + // All available presets are listed in the [documentation](/docs/managed-postgresql/concepts/instance-types). + ResourcePresetId string `protobuf:"bytes,1,opt,name=resource_preset_id,json=resourcePresetId,proto3" json:"resource_preset_id,omitempty"` + // Volume of the storage available to a host, in bytes. + DiskSize int64 `protobuf:"varint,2,opt,name=disk_size,json=diskSize,proto3" json:"disk_size,omitempty"` + // Type of the storage environment for the host. + // Possible values: + // * network-hdd — network HDD drive, + // * network-nvme — network SSD drive, + // * local-nvme — local SSD storage. + DiskTypeId string `protobuf:"bytes,3,opt,name=disk_type_id,json=diskTypeId,proto3" json:"disk_type_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resources) Reset() { *m = Resources{} } +func (m *Resources) String() string { return proto.CompactTextString(m) } +func (*Resources) ProtoMessage() {} +func (*Resources) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_59240cab5d3bdea0, []int{7} +} +func (m *Resources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resources.Unmarshal(m, b) +} +func (m *Resources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resources.Marshal(b, m, deterministic) +} +func (dst *Resources) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resources.Merge(dst, src) +} +func (m *Resources) XXX_Size() int { + return xxx_messageInfo_Resources.Size(m) +} +func (m *Resources) XXX_DiscardUnknown() { + xxx_messageInfo_Resources.DiscardUnknown(m) +} + +var xxx_messageInfo_Resources proto.InternalMessageInfo + +func (m *Resources) GetResourcePresetId() string { + if m != nil { + return m.ResourcePresetId + } + return "" +} + +func (m *Resources) GetDiskSize() int64 { + if m != nil { + return m.DiskSize + } + return 0 +} + +func (m *Resources) GetDiskTypeId() string { + if m != nil { + return m.DiskTypeId + } + return "" +} + +func init() { + proto.RegisterType((*Cluster)(nil), "yandex.cloud.mdb.postgresql.v1.Cluster") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.postgresql.v1.Cluster.LabelsEntry") + proto.RegisterType((*Monitoring)(nil), "yandex.cloud.mdb.postgresql.v1.Monitoring") + proto.RegisterType((*ClusterConfig)(nil), "yandex.cloud.mdb.postgresql.v1.ClusterConfig") + proto.RegisterType((*ConnectionPoolerConfig)(nil), "yandex.cloud.mdb.postgresql.v1.ConnectionPoolerConfig") + proto.RegisterType((*Host)(nil), "yandex.cloud.mdb.postgresql.v1.Host") + proto.RegisterType((*HostConfig)(nil), "yandex.cloud.mdb.postgresql.v1.HostConfig") + proto.RegisterType((*Service)(nil), "yandex.cloud.mdb.postgresql.v1.Service") + proto.RegisterType((*Resources)(nil), "yandex.cloud.mdb.postgresql.v1.Resources") + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.Cluster_Environment", Cluster_Environment_name, Cluster_Environment_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.Cluster_Health", Cluster_Health_name, Cluster_Health_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.Cluster_Status", Cluster_Status_name, Cluster_Status_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.ConnectionPoolerConfig_PoolingMode", ConnectionPoolerConfig_PoolingMode_name, ConnectionPoolerConfig_PoolingMode_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.Host_Role", Host_Role_name, Host_Role_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.Host_ReplicaType", Host_ReplicaType_name, Host_ReplicaType_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.Host_Health", Host_Health_name, Host_Health_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.Service_Type", Service_Type_name, Service_Type_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.Service_Health", Service_Health_name, Service_Health_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/cluster.proto", fileDescriptor_cluster_59240cab5d3bdea0) +} + +var fileDescriptor_cluster_59240cab5d3bdea0 = []byte{ + // 1432 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4b, 0x57, 0xdb, 0xc6, + 0x17, 0x8f, 0x1f, 0xf8, 0x71, 0x65, 0xf8, 0x2b, 0xf3, 0xa7, 0x8d, 0x0e, 0x69, 0x52, 0x8e, 0x37, + 0xa5, 0x69, 0x90, 0xb1, 0xd3, 0x43, 0x43, 0x9b, 0xa4, 0x35, 0xb6, 0x02, 0x6a, 0x8c, 0xed, 0x8e, + 0x0c, 0x6d, 0xda, 0x85, 0x8e, 0x6c, 0x0d, 0x46, 0x07, 0x59, 0xa3, 0x48, 0x32, 0x09, 0x59, 0xf5, + 0x1b, 0x76, 0xdd, 0x4d, 0x3f, 0x49, 0x16, 0x3d, 0x33, 0x23, 0x2c, 0xf3, 0x70, 0x30, 0xa4, 0x3b, + 0xdd, 0xc7, 0xef, 0xce, 0xe3, 0x3e, 0x7e, 0x23, 0x78, 0x7c, 0x6a, 0x79, 0x36, 0x79, 0x57, 0x19, + 0xb8, 0x74, 0x6c, 0x57, 0x46, 0x76, 0xbf, 0xe2, 0xd3, 0x30, 0x1a, 0x06, 0x24, 0x7c, 0xe3, 0x56, + 0x4e, 0xaa, 0x95, 0x81, 0x3b, 0x0e, 0x23, 0x12, 0xa8, 0x7e, 0x40, 0x23, 0x8a, 0x1e, 0x0a, 0x6f, + 0x95, 0x7b, 0xab, 0x23, 0xbb, 0xaf, 0x26, 0xde, 0xea, 0x49, 0x75, 0xe5, 0xcb, 0x21, 0xa5, 0x43, + 0x97, 0x54, 0xb8, 0x77, 0x7f, 0x7c, 0x58, 0x89, 0x9c, 0x11, 0x09, 0x23, 0x6b, 0xe4, 0x8b, 0x00, + 0x2b, 0x0f, 0x2f, 0x3a, 0xbc, 0x0d, 0x2c, 0xdf, 0x27, 0x41, 0x18, 0xdb, 0xb7, 0xae, 0xdb, 0x0e, + 0xf5, 0x0e, 0x9d, 0xe1, 0x94, 0x72, 0xcb, 0xdc, 0x8c, 0xa1, 0x4f, 0x6f, 0x0a, 0xad, 0x6e, 0xdc, + 0x1a, 0x59, 0x8d, 0x91, 0x4f, 0xe6, 0x43, 0x1e, 0xd1, 0x30, 0x4a, 0x36, 0x5a, 0x9b, 0x1f, 0x34, + 0xd9, 0xe2, 0x4d, 0x30, 0xf1, 0xe6, 0xca, 0x7f, 0xe5, 0x21, 0xdf, 0x10, 0xe9, 0x43, 0x4b, 0x90, + 0x76, 0x6c, 0x25, 0xb5, 0x9a, 0x5a, 0x2b, 0xe2, 0xb4, 0x63, 0xa3, 0xfb, 0x50, 0x3c, 0xa4, 0xae, + 0x4d, 0x02, 0xd3, 0xb1, 0x95, 0x34, 0x57, 0x17, 0x84, 0x42, 0xb7, 0xd1, 0x16, 0xc0, 0x20, 0x20, + 0x56, 0x44, 0x6c, 0xd3, 0x8a, 0x94, 0xcc, 0x6a, 0x6a, 0x4d, 0xaa, 0xad, 0xa8, 0x22, 0x73, 0xea, + 0x59, 0xe6, 0xd4, 0xde, 0x59, 0x6a, 0x71, 0x31, 0xf6, 0xae, 0x47, 0x08, 0x41, 0xd6, 0xb3, 0x46, + 0x44, 0xc9, 0xf2, 0x90, 0xfc, 0x1b, 0xad, 0x82, 0x64, 0x93, 0x70, 0x10, 0x38, 0x7e, 0xe4, 0x50, + 0x4f, 0x59, 0xe0, 0xa6, 0x69, 0x15, 0x7a, 0x05, 0x39, 0xd7, 0xea, 0x13, 0x37, 0x54, 0x72, 0xab, + 0x99, 0x35, 0xa9, 0xf6, 0x44, 0xfd, 0x78, 0x9d, 0xa9, 0xf1, 0xb1, 0xd4, 0x16, 0x47, 0x69, 0x5e, + 0x14, 0x9c, 0xe2, 0x38, 0x04, 0xda, 0x07, 0x89, 0x78, 0x27, 0x4e, 0x40, 0xbd, 0x11, 0xf1, 0x22, + 0x25, 0xbf, 0x9a, 0x5a, 0x5b, 0x9a, 0x3f, 0xa2, 0x96, 0x40, 0xf1, 0x74, 0x1c, 0xf4, 0x33, 0xc0, + 0x88, 0x7a, 0x4e, 0x44, 0x03, 0xc7, 0x1b, 0x2a, 0x05, 0xbe, 0xcf, 0x47, 0xd7, 0x45, 0xdd, 0x9b, + 0x20, 0xf0, 0x14, 0x1a, 0x69, 0x90, 0x13, 0x09, 0x53, 0x8a, 0xfc, 0x72, 0xd7, 0xe7, 0xdc, 0x5d, + 0x83, 0x83, 0x70, 0x0c, 0x46, 0x0f, 0x00, 0x3c, 0x12, 0xbd, 0xa5, 0xc1, 0x31, 0xcb, 0x22, 0xf0, + 0x7b, 0x2d, 0xc6, 0x1a, 0xdd, 0x46, 0x2f, 0x21, 0x77, 0x44, 0x2c, 0x37, 0x3a, 0x52, 0x24, 0x7e, + 0x07, 0xea, 0xbc, 0x77, 0xb0, 0xcb, 0x51, 0x38, 0x46, 0xb3, 0x38, 0x61, 0x64, 0x45, 0xe3, 0x50, + 0x29, 0xdd, 0x2c, 0x8e, 0xc1, 0x51, 0x38, 0x46, 0xaf, 0x6c, 0x81, 0x34, 0x95, 0x2f, 0x24, 0x43, + 0xe6, 0x98, 0x9c, 0xc6, 0x35, 0xc9, 0x3e, 0xd1, 0x32, 0x2c, 0x9c, 0x58, 0xee, 0x98, 0xc4, 0x05, + 0x29, 0x84, 0xef, 0xd3, 0x4f, 0x53, 0x65, 0x1d, 0xa4, 0xa9, 0xc4, 0xa0, 0xfb, 0x70, 0x4f, 0x6b, + 0x1f, 0xe8, 0xb8, 0xd3, 0xde, 0xd3, 0xda, 0x3d, 0x73, 0xbf, 0x6d, 0x74, 0xb5, 0x86, 0xfe, 0x52, + 0xd7, 0x9a, 0xf2, 0x1d, 0xb4, 0x04, 0xd0, 0xc5, 0x9d, 0xe6, 0x7e, 0xa3, 0xa7, 0x77, 0xda, 0x72, + 0x0a, 0x2d, 0x42, 0xb1, 0x8b, 0x35, 0xa3, 0x57, 0xdf, 0x6e, 0x69, 0x72, 0xba, 0xfc, 0x23, 0xe4, + 0xc4, 0xf9, 0x10, 0x82, 0xa5, 0x5d, 0xad, 0xde, 0xea, 0xed, 0x9a, 0xfb, 0xed, 0x57, 0xed, 0xce, + 0xaf, 0x6d, 0xf9, 0x0e, 0x2a, 0xc2, 0x42, 0xbd, 0xa5, 0x1f, 0x68, 0x72, 0x0a, 0x15, 0x20, 0xdb, + 0xd4, 0xea, 0x4d, 0x39, 0x8d, 0x4a, 0x50, 0x68, 0x6a, 0x3b, 0xb8, 0xde, 0xd4, 0x9a, 0x72, 0xa6, + 0x7c, 0x0a, 0x39, 0x71, 0x30, 0x16, 0xc0, 0xe8, 0xd5, 0x7b, 0xfb, 0xc6, 0x54, 0x80, 0x12, 0x14, + 0x1a, 0x58, 0xab, 0xf7, 0xf4, 0xf6, 0x8e, 0x9c, 0x42, 0x12, 0xe4, 0xf1, 0x7e, 0xbb, 0xcd, 0x84, + 0x34, 0x8b, 0xad, 0x61, 0xdc, 0xc1, 0x72, 0x86, 0x79, 0xed, 0x77, 0x9b, 0xc2, 0x2b, 0xcb, 0x24, + 0xa3, 0xd7, 0xe9, 0x76, 0x99, 0xb4, 0xc0, 0x30, 0x5c, 0xd2, 0x9a, 0x72, 0x4e, 0x98, 0xea, 0x98, + 0x3b, 0xe6, 0xcb, 0x07, 0x00, 0x49, 0x45, 0x4d, 0x7a, 0x2d, 0x35, 0xbb, 0xd7, 0xd2, 0x97, 0x7b, + 0x0d, 0x41, 0xd6, 0x75, 0xbc, 0x63, 0xde, 0xd6, 0x45, 0xcc, 0xbf, 0xcb, 0x1f, 0xb2, 0xb0, 0x78, + 0xae, 0xc4, 0x90, 0x02, 0xf9, 0x13, 0x12, 0x84, 0x2c, 0x86, 0x08, 0x7f, 0x26, 0xa2, 0x00, 0x3e, + 0x4b, 0xb2, 0x6d, 0x8a, 0x4a, 0x34, 0xb7, 0xcc, 0x4d, 0xbe, 0x96, 0x54, 0x7b, 0x7e, 0x5d, 0x71, + 0x08, 0x84, 0xda, 0x9d, 0x28, 0xc5, 0x8a, 0x06, 0x61, 0x13, 0x72, 0xf7, 0x0e, 0x46, 0xfe, 0x05, + 0xcb, 0xd6, 0x26, 0xf2, 0x61, 0xf9, 0xf2, 0x9a, 0xd5, 0x8d, 0x78, 0x34, 0x3d, 0xbb, 0xfd, 0x92, + 0xd5, 0x8d, 0xab, 0x56, 0xac, 0x6e, 0xcc, 0x58, 0xb1, 0xaa, 0x14, 0x3e, 0x79, 0xc5, 0xea, 0x95, + 0x2b, 0x56, 0xd1, 0x1f, 0xb0, 0xe8, 0x53, 0xea, 0x92, 0x20, 0x5e, 0x8d, 0x8f, 0x50, 0xa9, 0xb6, + 0x79, 0x6d, 0xb3, 0x51, 0xcf, 0x23, 0x03, 0x96, 0xda, 0x2e, 0x87, 0xc7, 0x33, 0xa2, 0xe4, 0x4f, + 0x49, 0x68, 0x07, 0x8a, 0x01, 0x09, 0xe9, 0x38, 0x18, 0x90, 0x90, 0x0f, 0x60, 0xa9, 0xf6, 0xf5, + 0x75, 0x81, 0xf1, 0x19, 0x00, 0x27, 0x58, 0xf4, 0x02, 0x4a, 0xd6, 0x38, 0xa2, 0x87, 0x96, 0xe3, + 0xd2, 0x13, 0x12, 0x28, 0xb9, 0x19, 0xe4, 0xb0, 0x4d, 0xa9, 0x7b, 0xc0, 0xda, 0x17, 0x9f, 0xf3, + 0xdf, 0xfe, 0x3f, 0xdc, 0xbd, 0x74, 0xaf, 0xe5, 0xbf, 0x53, 0xf0, 0xf9, 0xd5, 0xc7, 0x40, 0x04, + 0xf8, 0x41, 0x1c, 0x6f, 0x68, 0x8e, 0xa8, 0x2d, 0x6a, 0x7d, 0xa9, 0xb6, 0x7d, 0xbb, 0x4b, 0x51, + 0xbb, 0x22, 0xd4, 0x1e, 0xb5, 0x09, 0x96, 0xfc, 0x44, 0x28, 0xff, 0x06, 0xd2, 0x94, 0x0d, 0x7d, + 0x01, 0x4a, 0xb7, 0xd3, 0x69, 0xe9, 0xed, 0x1d, 0x73, 0xaf, 0xd3, 0xd4, 0x2e, 0x0c, 0x18, 0xd6, + 0xa0, 0x9a, 0x61, 0x88, 0xe9, 0xf2, 0x3f, 0x90, 0x7a, 0xb8, 0xde, 0x36, 0xea, 0x62, 0xdc, 0xa4, + 0xd9, 0xb8, 0x61, 0x43, 0x41, 0x63, 0x93, 0x49, 0xce, 0x94, 0xff, 0xc9, 0x41, 0x76, 0x97, 0x86, + 0xd1, 0x95, 0xdd, 0xfa, 0x00, 0x20, 0x7e, 0x5f, 0x25, 0x34, 0x5c, 0x8c, 0x35, 0xba, 0x8d, 0xee, + 0x41, 0xfe, 0x3d, 0xf5, 0x08, 0xb3, 0x89, 0x6e, 0xcd, 0x31, 0x51, 0xb7, 0xcf, 0xa7, 0x33, 0xfb, + 0x09, 0xe9, 0x7c, 0x0e, 0xd9, 0x80, 0xba, 0x84, 0x97, 0xc4, 0xd2, 0xf5, 0x31, 0xd8, 0x41, 0x54, + 0x4c, 0x5d, 0x82, 0x39, 0x0c, 0x35, 0x26, 0x0c, 0x93, 0xe3, 0x01, 0xbe, 0x99, 0x2b, 0xc0, 0x05, + 0x7a, 0x69, 0x40, 0x21, 0x24, 0xc1, 0x89, 0xc3, 0xce, 0x92, 0xe7, 0xb4, 0xfa, 0xd5, 0x75, 0x61, + 0x0c, 0xe1, 0x8f, 0x27, 0x40, 0xf6, 0x9e, 0x09, 0xc7, 0x7d, 0x8f, 0x44, 0xec, 0xb2, 0x0a, 0xe2, + 0x3d, 0x23, 0x14, 0xba, 0x8d, 0xd6, 0x01, 0x05, 0xc4, 0x77, 0x9d, 0x81, 0xc5, 0x2a, 0xc2, 0x14, + 0x87, 0xe7, 0xd4, 0x5b, 0xc4, 0x77, 0xa7, 0x2c, 0x06, 0x37, 0xa0, 0xef, 0xa0, 0xe0, 0x07, 0x0e, + 0x0d, 0x9c, 0xe8, 0x94, 0x93, 0xaa, 0x54, 0xbb, 0x7f, 0xa9, 0xbe, 0x75, 0x2f, 0xda, 0xfc, 0x56, + 0x14, 0xf8, 0xc4, 0x19, 0x6d, 0x4f, 0x68, 0x5d, 0xe2, 0xb0, 0x47, 0xf3, 0x5c, 0xc7, 0x05, 0x4e, + 0x5f, 0x03, 0xd9, 0x0a, 0x43, 0x67, 0xe8, 0x99, 0xfe, 0xb8, 0xef, 0x3a, 0x03, 0xd3, 0xf1, 0x39, + 0xed, 0x16, 0xf0, 0x92, 0xd0, 0x77, 0xb9, 0x5a, 0xf7, 0x91, 0x01, 0xa5, 0x78, 0xef, 0x66, 0x74, + 0xea, 0x13, 0x65, 0x91, 0xa7, 0x60, 0x63, 0xbe, 0x1c, 0x0a, 0x60, 0xef, 0xd4, 0x27, 0x58, 0x0a, + 0x12, 0xa1, 0x5c, 0x85, 0x2c, 0xcb, 0x2f, 0x92, 0xa1, 0x84, 0x3b, 0x2d, 0x6d, 0x8a, 0xd8, 0x00, + 0x72, 0x7b, 0x75, 0xa3, 0xa7, 0xe1, 0x98, 0xd6, 0xb4, 0x6e, 0x4b, 0x6f, 0xd4, 0xe5, 0x74, 0xf9, + 0x19, 0x48, 0x53, 0xe1, 0x90, 0x02, 0xcb, 0xb1, 0xcd, 0xec, 0xbd, 0xee, 0x6a, 0x17, 0xb8, 0xd5, + 0x78, 0xdd, 0x6e, 0x08, 0x6e, 0xe5, 0x5f, 0xff, 0x01, 0x1d, 0x7f, 0x48, 0x03, 0x24, 0xf7, 0x88, + 0xc2, 0x59, 0xf4, 0x94, 0xe2, 0x29, 0x79, 0x71, 0xe3, 0xc9, 0x9d, 0xc4, 0x9e, 0xcd, 0x4f, 0x6f, + 0x66, 0xf0, 0xd3, 0x6d, 0x29, 0x31, 0x59, 0x73, 0x26, 0x41, 0xbd, 0x99, 0x41, 0x50, 0x99, 0x4f, + 0x5f, 0x72, 0x06, 0x43, 0x5d, 0x3d, 0xbb, 0xff, 0x4c, 0x43, 0x3e, 0x6e, 0x47, 0xf4, 0x13, 0x64, + 0x79, 0x25, 0x8a, 0x21, 0xfd, 0x78, 0xce, 0x2e, 0x56, 0x79, 0x15, 0x72, 0xe4, 0xd4, 0x93, 0x35, + 0x3d, 0xdf, 0x53, 0xf3, 0x2c, 0xc6, 0xf9, 0x99, 0x52, 0x7e, 0x0a, 0x59, 0x5e, 0x8c, 0xcb, 0x20, + 0xc7, 0x45, 0x78, 0xf1, 0x85, 0xd8, 0x31, 0x7a, 0x3b, 0x58, 0x33, 0x7e, 0x69, 0xc9, 0x29, 0x56, + 0xda, 0x6c, 0xdc, 0x6b, 0x58, 0x4e, 0x97, 0xab, 0x37, 0xae, 0xc7, 0xf2, 0x3b, 0x28, 0x4e, 0x86, + 0x2b, 0x7a, 0xcc, 0x66, 0x8d, 0x10, 0x4c, 0x3f, 0x20, 0xa1, 0x98, 0x48, 0x62, 0xe8, 0xcb, 0x67, + 0x96, 0x2e, 0x37, 0xe8, 0xfc, 0x37, 0xcc, 0x76, 0xc2, 0x63, 0x33, 0x74, 0xde, 0x8b, 0x57, 0x6f, + 0x06, 0x17, 0x98, 0xc2, 0x70, 0xde, 0xb3, 0xb7, 0x5c, 0x89, 0x1b, 0xd9, 0xcd, 0x24, 0x1c, 0x00, + 0x4c, 0xc7, 0x0e, 0xa7, 0xdb, 0xdb, 0x9d, 0xdf, 0xf7, 0x86, 0x4e, 0x74, 0x34, 0xee, 0xab, 0x03, + 0x3a, 0xaa, 0x88, 0xab, 0x5a, 0x17, 0xbf, 0x88, 0x43, 0xba, 0x3e, 0x24, 0x1e, 0x9f, 0x57, 0x95, + 0x8f, 0xff, 0x3b, 0xfe, 0x90, 0x48, 0xfd, 0x1c, 0x07, 0x3c, 0xf9, 0x37, 0x00, 0x00, 0xff, 0xff, + 0x71, 0x1d, 0xb4, 0xb6, 0x16, 0x10, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/cluster_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/cluster_service.pb.go new file mode 100644 index 000000000..88185ac7f --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/cluster_service.pb.go @@ -0,0 +1,3216 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/cluster_service.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import config "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ListClusterLogsRequest_ServiceType int32 + +const ( + ListClusterLogsRequest_SERVICE_TYPE_UNSPECIFIED ListClusterLogsRequest_ServiceType = 0 + // Logs of PostgreSQL activity. + ListClusterLogsRequest_POSTGRESQL ListClusterLogsRequest_ServiceType = 1 + // Logs of connection pooler activity. + ListClusterLogsRequest_POOLER ListClusterLogsRequest_ServiceType = 2 +) + +var ListClusterLogsRequest_ServiceType_name = map[int32]string{ + 0: "SERVICE_TYPE_UNSPECIFIED", + 1: "POSTGRESQL", + 2: "POOLER", +} +var ListClusterLogsRequest_ServiceType_value = map[string]int32{ + "SERVICE_TYPE_UNSPECIFIED": 0, + "POSTGRESQL": 1, + "POOLER": 2, +} + +func (x ListClusterLogsRequest_ServiceType) String() string { + return proto.EnumName(ListClusterLogsRequest_ServiceType_name, int32(x)) +} +func (ListClusterLogsRequest_ServiceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{18, 0} +} + +type GetClusterRequest struct { + // ID of the PostgreSQL Cluster resource to return. + // To get the cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterRequest) ProtoMessage() {} +func (*GetClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{0} +} +func (m *GetClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetClusterRequest.Unmarshal(m, b) +} +func (m *GetClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetClusterRequest.Marshal(b, m, deterministic) +} +func (dst *GetClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterRequest.Merge(dst, src) +} +func (m *GetClusterRequest) XXX_Size() int { + return xxx_messageInfo_GetClusterRequest.Size(m) +} +func (m *GetClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterRequest proto.InternalMessageInfo + +func (m *GetClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type ListClustersRequest struct { + // ID of the folder to list PostgreSQL clusters in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClustersResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters resources listed in the response. + // The expression must specify: + // 1. The field name. Currently you can only use filtering with the [Cluster.name] field. + // 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + // 3. The value. Мust be 1-63 characters long and match the regular expression `^[a-zA-Z0-9_-]+$`. + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) } +func (*ListClustersRequest) ProtoMessage() {} +func (*ListClustersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{1} +} +func (m *ListClustersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClustersRequest.Unmarshal(m, b) +} +func (m *ListClustersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClustersRequest.Marshal(b, m, deterministic) +} +func (dst *ListClustersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClustersRequest.Merge(dst, src) +} +func (m *ListClustersRequest) XXX_Size() int { + return xxx_messageInfo_ListClustersRequest.Size(m) +} +func (m *ListClustersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClustersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClustersRequest proto.InternalMessageInfo + +func (m *ListClustersRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListClustersRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClustersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListClustersRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListClustersResponse struct { + // List of PostgreSQL Cluster resources. + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClustersRequest.page_size], use the [next_page_token] as the value + // for the [ListClustersRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) } +func (*ListClustersResponse) ProtoMessage() {} +func (*ListClustersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{2} +} +func (m *ListClustersResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClustersResponse.Unmarshal(m, b) +} +func (m *ListClustersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClustersResponse.Marshal(b, m, deterministic) +} +func (dst *ListClustersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClustersResponse.Merge(dst, src) +} +func (m *ListClustersResponse) XXX_Size() int { + return xxx_messageInfo_ListClustersResponse.Size(m) +} +func (m *ListClustersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClustersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClustersResponse proto.InternalMessageInfo + +func (m *ListClustersResponse) GetClusters() []*Cluster { + if m != nil { + return m.Clusters + } + return nil +} + +func (m *ListClustersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateClusterRequest struct { + // ID of the folder to create the PostgreSQL cluster in. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Name of the PostgreSQL cluster. The name must be unique within the folder. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the PostgreSQL cluster. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the PostgreSQL cluster as `` key:value `` pairs. Maximum 64 per resource. + // For example, "project": "mvp" or "source": "dictionary". + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Deployment environment of the PostgreSQL cluster. + Environment Cluster_Environment `protobuf:"varint,5,opt,name=environment,proto3,enum=yandex.cloud.mdb.postgresql.v1.Cluster_Environment" json:"environment,omitempty"` + // Configuration and resources for hosts that should be created for the PostgreSQL cluster. + ConfigSpec *ConfigSpec `protobuf:"bytes,6,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + // Descriptions of databases to be created in the PostgreSQL cluster. + DatabaseSpecs []*DatabaseSpec `protobuf:"bytes,7,rep,name=database_specs,json=databaseSpecs,proto3" json:"database_specs,omitempty"` + // Descriptions of database users to be created in the PostgreSQL cluster. + UserSpecs []*UserSpec `protobuf:"bytes,8,rep,name=user_specs,json=userSpecs,proto3" json:"user_specs,omitempty"` + // Individual configurations for hosts that should be created for the PostgreSQL cluster. + HostSpecs []*HostSpec `protobuf:"bytes,9,rep,name=host_specs,json=hostSpecs,proto3" json:"host_specs,omitempty"` + // ID of the network to create the cluster in. + NetworkId string `protobuf:"bytes,10,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} } +func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*CreateClusterRequest) ProtoMessage() {} +func (*CreateClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{3} +} +func (m *CreateClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateClusterRequest.Unmarshal(m, b) +} +func (m *CreateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateClusterRequest.Marshal(b, m, deterministic) +} +func (dst *CreateClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateClusterRequest.Merge(dst, src) +} +func (m *CreateClusterRequest) XXX_Size() int { + return xxx_messageInfo_CreateClusterRequest.Size(m) +} +func (m *CreateClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateClusterRequest proto.InternalMessageInfo + +func (m *CreateClusterRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *CreateClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateClusterRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *CreateClusterRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *CreateClusterRequest) GetEnvironment() Cluster_Environment { + if m != nil { + return m.Environment + } + return Cluster_ENVIRONMENT_UNSPECIFIED +} + +func (m *CreateClusterRequest) GetConfigSpec() *ConfigSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +func (m *CreateClusterRequest) GetDatabaseSpecs() []*DatabaseSpec { + if m != nil { + return m.DatabaseSpecs + } + return nil +} + +func (m *CreateClusterRequest) GetUserSpecs() []*UserSpec { + if m != nil { + return m.UserSpecs + } + return nil +} + +func (m *CreateClusterRequest) GetHostSpecs() []*HostSpec { + if m != nil { + return m.HostSpecs + } + return nil +} + +func (m *CreateClusterRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +type CreateClusterMetadata struct { + // ID of the PostgreSQL cluster that is being created. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateClusterMetadata) Reset() { *m = CreateClusterMetadata{} } +func (m *CreateClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateClusterMetadata) ProtoMessage() {} +func (*CreateClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{4} +} +func (m *CreateClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateClusterMetadata.Unmarshal(m, b) +} +func (m *CreateClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateClusterMetadata.Merge(dst, src) +} +func (m *CreateClusterMetadata) XXX_Size() int { + return xxx_messageInfo_CreateClusterMetadata.Size(m) +} +func (m *CreateClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateClusterMetadata proto.InternalMessageInfo + +func (m *CreateClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type UpdateClusterRequest struct { + // ID of the PostgreSQL Cluster resource to update. + // To get the PostgreSQL cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Field mask that specifies which fields of the PostgreSQL Cluster resource should be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // New description of the PostgreSQL cluster. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the PostgreSQL cluster as `` key:value `` pairs. Maximum 64 per resource. + // For example, "project": "mvp" or "source": "dictionary". + // + // The new set of labels will completely replace the old ones. To add a label, request the current + // set with the [ClusterService.Get] method, then send an [ClusterService.Update] request with the new label added to the set. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // New configuration and resources for hosts in the cluster. + ConfigSpec *ConfigSpec `protobuf:"bytes,5,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} } +func (m *UpdateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterRequest) ProtoMessage() {} +func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{5} +} +func (m *UpdateClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateClusterRequest.Unmarshal(m, b) +} +func (m *UpdateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateClusterRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateClusterRequest.Merge(dst, src) +} +func (m *UpdateClusterRequest) XXX_Size() int { + return xxx_messageInfo_UpdateClusterRequest.Size(m) +} +func (m *UpdateClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateClusterRequest proto.InternalMessageInfo + +func (m *UpdateClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateClusterRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateClusterRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *UpdateClusterRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *UpdateClusterRequest) GetConfigSpec() *ConfigSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +func (m *UpdateClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type UpdateClusterMetadata struct { + // ID of the PostgreSQL Cluster resource that is being updated. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateClusterMetadata) Reset() { *m = UpdateClusterMetadata{} } +func (m *UpdateClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterMetadata) ProtoMessage() {} +func (*UpdateClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{6} +} +func (m *UpdateClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateClusterMetadata.Unmarshal(m, b) +} +func (m *UpdateClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateClusterMetadata.Merge(dst, src) +} +func (m *UpdateClusterMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateClusterMetadata.Size(m) +} +func (m *UpdateClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateClusterMetadata proto.InternalMessageInfo + +func (m *UpdateClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type DeleteClusterRequest struct { + // ID of the PostgreSQL cluster to delete. + // To get the PostgreSQL cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} } +func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterRequest) ProtoMessage() {} +func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{7} +} +func (m *DeleteClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterRequest.Unmarshal(m, b) +} +func (m *DeleteClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterRequest.Merge(dst, src) +} +func (m *DeleteClusterRequest) XXX_Size() int { + return xxx_messageInfo_DeleteClusterRequest.Size(m) +} +func (m *DeleteClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterRequest proto.InternalMessageInfo + +func (m *DeleteClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type DeleteClusterMetadata struct { + // ID of the PostgreSQL cluster that is being deleted. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterMetadata) Reset() { *m = DeleteClusterMetadata{} } +func (m *DeleteClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterMetadata) ProtoMessage() {} +func (*DeleteClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{8} +} +func (m *DeleteClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterMetadata.Unmarshal(m, b) +} +func (m *DeleteClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterMetadata.Merge(dst, src) +} +func (m *DeleteClusterMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteClusterMetadata.Size(m) +} +func (m *DeleteClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterMetadata proto.InternalMessageInfo + +func (m *DeleteClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StartClusterRequest struct { + // Required. ID of the PostgreSQL cluster to start. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartClusterRequest) Reset() { *m = StartClusterRequest{} } +func (m *StartClusterRequest) String() string { return proto.CompactTextString(m) } +func (*StartClusterRequest) ProtoMessage() {} +func (*StartClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{9} +} +func (m *StartClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartClusterRequest.Unmarshal(m, b) +} +func (m *StartClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartClusterRequest.Marshal(b, m, deterministic) +} +func (dst *StartClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartClusterRequest.Merge(dst, src) +} +func (m *StartClusterRequest) XXX_Size() int { + return xxx_messageInfo_StartClusterRequest.Size(m) +} +func (m *StartClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartClusterRequest proto.InternalMessageInfo + +func (m *StartClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StartClusterMetadata struct { + // Required. ID of the PostgreSQL cluster. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartClusterMetadata) Reset() { *m = StartClusterMetadata{} } +func (m *StartClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*StartClusterMetadata) ProtoMessage() {} +func (*StartClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{10} +} +func (m *StartClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartClusterMetadata.Unmarshal(m, b) +} +func (m *StartClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *StartClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartClusterMetadata.Merge(dst, src) +} +func (m *StartClusterMetadata) XXX_Size() int { + return xxx_messageInfo_StartClusterMetadata.Size(m) +} +func (m *StartClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_StartClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_StartClusterMetadata proto.InternalMessageInfo + +func (m *StartClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StopClusterRequest struct { + // Required. ID of the PostgreSQL cluster to stop. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopClusterRequest) Reset() { *m = StopClusterRequest{} } +func (m *StopClusterRequest) String() string { return proto.CompactTextString(m) } +func (*StopClusterRequest) ProtoMessage() {} +func (*StopClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{11} +} +func (m *StopClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopClusterRequest.Unmarshal(m, b) +} +func (m *StopClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopClusterRequest.Marshal(b, m, deterministic) +} +func (dst *StopClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopClusterRequest.Merge(dst, src) +} +func (m *StopClusterRequest) XXX_Size() int { + return xxx_messageInfo_StopClusterRequest.Size(m) +} +func (m *StopClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopClusterRequest proto.InternalMessageInfo + +func (m *StopClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StopClusterMetadata struct { + // Required. ID of the PostgreSQL cluster. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopClusterMetadata) Reset() { *m = StopClusterMetadata{} } +func (m *StopClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*StopClusterMetadata) ProtoMessage() {} +func (*StopClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{12} +} +func (m *StopClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopClusterMetadata.Unmarshal(m, b) +} +func (m *StopClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *StopClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopClusterMetadata.Merge(dst, src) +} +func (m *StopClusterMetadata) XXX_Size() int { + return xxx_messageInfo_StopClusterMetadata.Size(m) +} +func (m *StopClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_StopClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_StopClusterMetadata proto.InternalMessageInfo + +func (m *StopClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type BackupClusterRequest struct { + // ID of the PostgreSQL cluster to back up. + // To get the PostgreSQL cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BackupClusterRequest) Reset() { *m = BackupClusterRequest{} } +func (m *BackupClusterRequest) String() string { return proto.CompactTextString(m) } +func (*BackupClusterRequest) ProtoMessage() {} +func (*BackupClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{13} +} +func (m *BackupClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BackupClusterRequest.Unmarshal(m, b) +} +func (m *BackupClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BackupClusterRequest.Marshal(b, m, deterministic) +} +func (dst *BackupClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupClusterRequest.Merge(dst, src) +} +func (m *BackupClusterRequest) XXX_Size() int { + return xxx_messageInfo_BackupClusterRequest.Size(m) +} +func (m *BackupClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BackupClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupClusterRequest proto.InternalMessageInfo + +func (m *BackupClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type BackupClusterMetadata struct { + // ID of the PostgreSQL cluster that is being backed up. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BackupClusterMetadata) Reset() { *m = BackupClusterMetadata{} } +func (m *BackupClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*BackupClusterMetadata) ProtoMessage() {} +func (*BackupClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{14} +} +func (m *BackupClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BackupClusterMetadata.Unmarshal(m, b) +} +func (m *BackupClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BackupClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *BackupClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupClusterMetadata.Merge(dst, src) +} +func (m *BackupClusterMetadata) XXX_Size() int { + return xxx_messageInfo_BackupClusterMetadata.Size(m) +} +func (m *BackupClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_BackupClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupClusterMetadata proto.InternalMessageInfo + +func (m *BackupClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type RestoreClusterRequest struct { + // ID of the backup to create a cluster from. + // To get the backup ID, use a [ClusterService.ListBackups] request. + BackupId string `protobuf:"bytes,1,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"` + // Timestamp of the moment to which the PostgreSQL cluster should be restored. + Time *timestamp.Timestamp `protobuf:"bytes,2,opt,name=time,proto3" json:"time,omitempty"` + // Flag that indicates whether a database should be restored to the first backup point + // available just after the timestamp specified in the [time] field instead of just before. + // + // Possible values: + // * false (default) — the restore point refers to the first backup moment before [time]. + // * true — the restore point refers to the first backup point after [time]. + TimeInclusive bool `protobuf:"varint,3,opt,name=time_inclusive,json=timeInclusive,proto3" json:"time_inclusive,omitempty"` + // Name of the new PostgreSQL cluster. The name must be unique within the folder. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Description of the new PostgreSQL cluster. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the PostgreSQL cluster as `` key:value `` pairs. Maximum 64 per resource. + // For example, "project": "mvp" or "source": "dictionary". + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Deployment environment of the new PostgreSQL cluster. + Environment Cluster_Environment `protobuf:"varint,7,opt,name=environment,proto3,enum=yandex.cloud.mdb.postgresql.v1.Cluster_Environment" json:"environment,omitempty"` + // Configuration for the PostgreSQL cluster to be created. + ConfigSpec *ConfigSpec `protobuf:"bytes,8,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + // Configurations for PostgreSQL hosts that should be created for + // the cluster that is being created from the backup. + HostSpecs []*HostSpec `protobuf:"bytes,9,rep,name=host_specs,json=hostSpecs,proto3" json:"host_specs,omitempty"` + // ID of the network to create the PostgreSQL cluster in. + NetworkId string `protobuf:"bytes,10,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestoreClusterRequest) Reset() { *m = RestoreClusterRequest{} } +func (m *RestoreClusterRequest) String() string { return proto.CompactTextString(m) } +func (*RestoreClusterRequest) ProtoMessage() {} +func (*RestoreClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{15} +} +func (m *RestoreClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestoreClusterRequest.Unmarshal(m, b) +} +func (m *RestoreClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestoreClusterRequest.Marshal(b, m, deterministic) +} +func (dst *RestoreClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestoreClusterRequest.Merge(dst, src) +} +func (m *RestoreClusterRequest) XXX_Size() int { + return xxx_messageInfo_RestoreClusterRequest.Size(m) +} +func (m *RestoreClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RestoreClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RestoreClusterRequest proto.InternalMessageInfo + +func (m *RestoreClusterRequest) GetBackupId() string { + if m != nil { + return m.BackupId + } + return "" +} + +func (m *RestoreClusterRequest) GetTime() *timestamp.Timestamp { + if m != nil { + return m.Time + } + return nil +} + +func (m *RestoreClusterRequest) GetTimeInclusive() bool { + if m != nil { + return m.TimeInclusive + } + return false +} + +func (m *RestoreClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *RestoreClusterRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *RestoreClusterRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *RestoreClusterRequest) GetEnvironment() Cluster_Environment { + if m != nil { + return m.Environment + } + return Cluster_ENVIRONMENT_UNSPECIFIED +} + +func (m *RestoreClusterRequest) GetConfigSpec() *ConfigSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +func (m *RestoreClusterRequest) GetHostSpecs() []*HostSpec { + if m != nil { + return m.HostSpecs + } + return nil +} + +func (m *RestoreClusterRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +type RestoreClusterMetadata struct { + // ID of the new PostgreSQL cluster that is being created from a backup. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // ID of the backup that is being used for creating a cluster. + BackupId string `protobuf:"bytes,2,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestoreClusterMetadata) Reset() { *m = RestoreClusterMetadata{} } +func (m *RestoreClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*RestoreClusterMetadata) ProtoMessage() {} +func (*RestoreClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{16} +} +func (m *RestoreClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestoreClusterMetadata.Unmarshal(m, b) +} +func (m *RestoreClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestoreClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *RestoreClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestoreClusterMetadata.Merge(dst, src) +} +func (m *RestoreClusterMetadata) XXX_Size() int { + return xxx_messageInfo_RestoreClusterMetadata.Size(m) +} +func (m *RestoreClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_RestoreClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_RestoreClusterMetadata proto.InternalMessageInfo + +func (m *RestoreClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *RestoreClusterMetadata) GetBackupId() string { + if m != nil { + return m.BackupId + } + return "" +} + +type LogRecord struct { + // Log record timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Contents of the log record. + Message map[string]string `protobuf:"bytes,2,rep,name=message,proto3" json:"message,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogRecord) Reset() { *m = LogRecord{} } +func (m *LogRecord) String() string { return proto.CompactTextString(m) } +func (*LogRecord) ProtoMessage() {} +func (*LogRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{17} +} +func (m *LogRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogRecord.Unmarshal(m, b) +} +func (m *LogRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogRecord.Marshal(b, m, deterministic) +} +func (dst *LogRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogRecord.Merge(dst, src) +} +func (m *LogRecord) XXX_Size() int { + return xxx_messageInfo_LogRecord.Size(m) +} +func (m *LogRecord) XXX_DiscardUnknown() { + xxx_messageInfo_LogRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_LogRecord proto.InternalMessageInfo + +func (m *LogRecord) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *LogRecord) GetMessage() map[string]string { + if m != nil { + return m.Message + } + return nil +} + +type ListClusterLogsRequest struct { + // ID of the PostgreSQL cluster to request logs for. + // To get the PostgreSQL cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Columns from the logs table to request. + // If no columns are specified, entire log records are returned. + ColumnFilter []string `protobuf:"bytes,2,rep,name=column_filter,json=columnFilter,proto3" json:"column_filter,omitempty"` + // Type of the service to request logs about. + ServiceType ListClusterLogsRequest_ServiceType `protobuf:"varint,3,opt,name=service_type,json=serviceType,proto3,enum=yandex.cloud.mdb.postgresql.v1.ListClusterLogsRequest_ServiceType" json:"service_type,omitempty"` + // Start timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + FromTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=from_time,json=fromTime,proto3" json:"from_time,omitempty"` + // End timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + ToTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=to_time,json=toTime,proto3" json:"to_time,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClusterLogsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,6,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListClusterLogsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,7,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // Always return `next_page_token`, even if current page is empty. + AlwaysNextPageToken bool `protobuf:"varint,8,opt,name=always_next_page_token,json=alwaysNextPageToken,proto3" json:"always_next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterLogsRequest) Reset() { *m = ListClusterLogsRequest{} } +func (m *ListClusterLogsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterLogsRequest) ProtoMessage() {} +func (*ListClusterLogsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{18} +} +func (m *ListClusterLogsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterLogsRequest.Unmarshal(m, b) +} +func (m *ListClusterLogsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterLogsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterLogsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterLogsRequest.Merge(dst, src) +} +func (m *ListClusterLogsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterLogsRequest.Size(m) +} +func (m *ListClusterLogsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterLogsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterLogsRequest proto.InternalMessageInfo + +func (m *ListClusterLogsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterLogsRequest) GetColumnFilter() []string { + if m != nil { + return m.ColumnFilter + } + return nil +} + +func (m *ListClusterLogsRequest) GetServiceType() ListClusterLogsRequest_ServiceType { + if m != nil { + return m.ServiceType + } + return ListClusterLogsRequest_SERVICE_TYPE_UNSPECIFIED +} + +func (m *ListClusterLogsRequest) GetFromTime() *timestamp.Timestamp { + if m != nil { + return m.FromTime + } + return nil +} + +func (m *ListClusterLogsRequest) GetToTime() *timestamp.Timestamp { + if m != nil { + return m.ToTime + } + return nil +} + +func (m *ListClusterLogsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterLogsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListClusterLogsRequest) GetAlwaysNextPageToken() bool { + if m != nil { + return m.AlwaysNextPageToken + } + return false +} + +type ListClusterLogsResponse struct { + // Requested log records. + Logs []*LogRecord `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterLogsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterLogsResponse) Reset() { *m = ListClusterLogsResponse{} } +func (m *ListClusterLogsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterLogsResponse) ProtoMessage() {} +func (*ListClusterLogsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{19} +} +func (m *ListClusterLogsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterLogsResponse.Unmarshal(m, b) +} +func (m *ListClusterLogsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterLogsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterLogsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterLogsResponse.Merge(dst, src) +} +func (m *ListClusterLogsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterLogsResponse.Size(m) +} +func (m *ListClusterLogsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterLogsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterLogsResponse proto.InternalMessageInfo + +func (m *ListClusterLogsResponse) GetLogs() []*LogRecord { + if m != nil { + return m.Logs + } + return nil +} + +func (m *ListClusterLogsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ListClusterOperationsRequest struct { + // ID of the PostgreSQL Cluster resource to list operations for. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClusterOperationsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterOperationsRequest) Reset() { *m = ListClusterOperationsRequest{} } +func (m *ListClusterOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterOperationsRequest) ProtoMessage() {} +func (*ListClusterOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{20} +} +func (m *ListClusterOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterOperationsRequest.Unmarshal(m, b) +} +func (m *ListClusterOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterOperationsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterOperationsRequest.Merge(dst, src) +} +func (m *ListClusterOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterOperationsRequest.Size(m) +} +func (m *ListClusterOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterOperationsRequest proto.InternalMessageInfo + +func (m *ListClusterOperationsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterOperationsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterOperationsResponse struct { + // List of Operation resources for the specified PostgreSQL cluster. + Operations []*operation.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterOperationsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterOperationsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterOperationsResponse) Reset() { *m = ListClusterOperationsResponse{} } +func (m *ListClusterOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterOperationsResponse) ProtoMessage() {} +func (*ListClusterOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{21} +} +func (m *ListClusterOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterOperationsResponse.Unmarshal(m, b) +} +func (m *ListClusterOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterOperationsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterOperationsResponse.Merge(dst, src) +} +func (m *ListClusterOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterOperationsResponse.Size(m) +} +func (m *ListClusterOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterOperationsResponse proto.InternalMessageInfo + +func (m *ListClusterOperationsResponse) GetOperations() []*operation.Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListClusterOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ListClusterBackupsRequest struct { + // ID of the PostgreSQL cluster. + // To get the PostgreSQL cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClusterBackupsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListClusterBackupsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterBackupsRequest) Reset() { *m = ListClusterBackupsRequest{} } +func (m *ListClusterBackupsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterBackupsRequest) ProtoMessage() {} +func (*ListClusterBackupsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{22} +} +func (m *ListClusterBackupsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterBackupsRequest.Unmarshal(m, b) +} +func (m *ListClusterBackupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterBackupsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterBackupsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterBackupsRequest.Merge(dst, src) +} +func (m *ListClusterBackupsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterBackupsRequest.Size(m) +} +func (m *ListClusterBackupsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterBackupsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterBackupsRequest proto.InternalMessageInfo + +func (m *ListClusterBackupsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterBackupsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterBackupsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterBackupsResponse struct { + // List of PostgreSQL Backup resources. + Backups []*Backup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterBackupsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterBackupsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterBackupsResponse) Reset() { *m = ListClusterBackupsResponse{} } +func (m *ListClusterBackupsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterBackupsResponse) ProtoMessage() {} +func (*ListClusterBackupsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{23} +} +func (m *ListClusterBackupsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterBackupsResponse.Unmarshal(m, b) +} +func (m *ListClusterBackupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterBackupsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterBackupsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterBackupsResponse.Merge(dst, src) +} +func (m *ListClusterBackupsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterBackupsResponse.Size(m) +} +func (m *ListClusterBackupsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterBackupsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterBackupsResponse proto.InternalMessageInfo + +func (m *ListClusterBackupsResponse) GetBackups() []*Backup { + if m != nil { + return m.Backups + } + return nil +} + +func (m *ListClusterBackupsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ListClusterHostsRequest struct { + // ID of the PostgreSQL cluster. + // To get the PostgreSQL cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClusterHostsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterHostsRequest) Reset() { *m = ListClusterHostsRequest{} } +func (m *ListClusterHostsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterHostsRequest) ProtoMessage() {} +func (*ListClusterHostsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{24} +} +func (m *ListClusterHostsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterHostsRequest.Unmarshal(m, b) +} +func (m *ListClusterHostsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterHostsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterHostsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterHostsRequest.Merge(dst, src) +} +func (m *ListClusterHostsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterHostsRequest.Size(m) +} +func (m *ListClusterHostsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterHostsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterHostsRequest proto.InternalMessageInfo + +func (m *ListClusterHostsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterHostsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterHostsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterHostsResponse struct { + // List of Host resources. + Hosts []*Host `protobuf:"bytes,1,rep,name=hosts,proto3" json:"hosts,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterHostsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterHostsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterHostsResponse) Reset() { *m = ListClusterHostsResponse{} } +func (m *ListClusterHostsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterHostsResponse) ProtoMessage() {} +func (*ListClusterHostsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{25} +} +func (m *ListClusterHostsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterHostsResponse.Unmarshal(m, b) +} +func (m *ListClusterHostsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterHostsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterHostsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterHostsResponse.Merge(dst, src) +} +func (m *ListClusterHostsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterHostsResponse.Size(m) +} +func (m *ListClusterHostsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterHostsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterHostsResponse proto.InternalMessageInfo + +func (m *ListClusterHostsResponse) GetHosts() []*Host { + if m != nil { + return m.Hosts + } + return nil +} + +func (m *ListClusterHostsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type AddClusterHostsRequest struct { + // ID of the PostgreSQL cluster to add hosts to. + // To get the PostgreSQL cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Configurations for PostgreSQL hosts that should be added to the cluster. + HostSpecs []*HostSpec `protobuf:"bytes,2,rep,name=host_specs,json=hostSpecs,proto3" json:"host_specs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddClusterHostsRequest) Reset() { *m = AddClusterHostsRequest{} } +func (m *AddClusterHostsRequest) String() string { return proto.CompactTextString(m) } +func (*AddClusterHostsRequest) ProtoMessage() {} +func (*AddClusterHostsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{26} +} +func (m *AddClusterHostsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddClusterHostsRequest.Unmarshal(m, b) +} +func (m *AddClusterHostsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddClusterHostsRequest.Marshal(b, m, deterministic) +} +func (dst *AddClusterHostsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddClusterHostsRequest.Merge(dst, src) +} +func (m *AddClusterHostsRequest) XXX_Size() int { + return xxx_messageInfo_AddClusterHostsRequest.Size(m) +} +func (m *AddClusterHostsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AddClusterHostsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AddClusterHostsRequest proto.InternalMessageInfo + +func (m *AddClusterHostsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *AddClusterHostsRequest) GetHostSpecs() []*HostSpec { + if m != nil { + return m.HostSpecs + } + return nil +} + +type AddClusterHostsMetadata struct { + // ID of the PostgreSQL cluster to which the hosts are being added. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Names of hosts that are being added to the cluster. + HostNames []string `protobuf:"bytes,2,rep,name=host_names,json=hostNames,proto3" json:"host_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddClusterHostsMetadata) Reset() { *m = AddClusterHostsMetadata{} } +func (m *AddClusterHostsMetadata) String() string { return proto.CompactTextString(m) } +func (*AddClusterHostsMetadata) ProtoMessage() {} +func (*AddClusterHostsMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{27} +} +func (m *AddClusterHostsMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddClusterHostsMetadata.Unmarshal(m, b) +} +func (m *AddClusterHostsMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddClusterHostsMetadata.Marshal(b, m, deterministic) +} +func (dst *AddClusterHostsMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddClusterHostsMetadata.Merge(dst, src) +} +func (m *AddClusterHostsMetadata) XXX_Size() int { + return xxx_messageInfo_AddClusterHostsMetadata.Size(m) +} +func (m *AddClusterHostsMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_AddClusterHostsMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_AddClusterHostsMetadata proto.InternalMessageInfo + +func (m *AddClusterHostsMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *AddClusterHostsMetadata) GetHostNames() []string { + if m != nil { + return m.HostNames + } + return nil +} + +type DeleteClusterHostsRequest struct { + // ID of the PostgreSQL cluster to remove hosts from. + // To get the PostgreSQL cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Names of hosts to delete. + HostNames []string `protobuf:"bytes,2,rep,name=host_names,json=hostNames,proto3" json:"host_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterHostsRequest) Reset() { *m = DeleteClusterHostsRequest{} } +func (m *DeleteClusterHostsRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterHostsRequest) ProtoMessage() {} +func (*DeleteClusterHostsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{28} +} +func (m *DeleteClusterHostsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterHostsRequest.Unmarshal(m, b) +} +func (m *DeleteClusterHostsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterHostsRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterHostsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterHostsRequest.Merge(dst, src) +} +func (m *DeleteClusterHostsRequest) XXX_Size() int { + return xxx_messageInfo_DeleteClusterHostsRequest.Size(m) +} +func (m *DeleteClusterHostsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterHostsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterHostsRequest proto.InternalMessageInfo + +func (m *DeleteClusterHostsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteClusterHostsRequest) GetHostNames() []string { + if m != nil { + return m.HostNames + } + return nil +} + +type DeleteClusterHostsMetadata struct { + // ID of the PostgreSQL cluster to remove hosts from. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Names of hosts that are being deleted. + HostNames []string `protobuf:"bytes,2,rep,name=host_names,json=hostNames,proto3" json:"host_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterHostsMetadata) Reset() { *m = DeleteClusterHostsMetadata{} } +func (m *DeleteClusterHostsMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterHostsMetadata) ProtoMessage() {} +func (*DeleteClusterHostsMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{29} +} +func (m *DeleteClusterHostsMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterHostsMetadata.Unmarshal(m, b) +} +func (m *DeleteClusterHostsMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterHostsMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterHostsMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterHostsMetadata.Merge(dst, src) +} +func (m *DeleteClusterHostsMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteClusterHostsMetadata.Size(m) +} +func (m *DeleteClusterHostsMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterHostsMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterHostsMetadata proto.InternalMessageInfo + +func (m *DeleteClusterHostsMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteClusterHostsMetadata) GetHostNames() []string { + if m != nil { + return m.HostNames + } + return nil +} + +type UpdateClusterHostsRequest struct { + // ID of the PostgreSQL cluster to update hosts in. + // To get the PostgreSQL cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // New configurations to apply to hosts. + UpdateHostSpecs []*UpdateHostSpec `protobuf:"bytes,2,rep,name=update_host_specs,json=updateHostSpecs,proto3" json:"update_host_specs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateClusterHostsRequest) Reset() { *m = UpdateClusterHostsRequest{} } +func (m *UpdateClusterHostsRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterHostsRequest) ProtoMessage() {} +func (*UpdateClusterHostsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{30} +} +func (m *UpdateClusterHostsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateClusterHostsRequest.Unmarshal(m, b) +} +func (m *UpdateClusterHostsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateClusterHostsRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateClusterHostsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateClusterHostsRequest.Merge(dst, src) +} +func (m *UpdateClusterHostsRequest) XXX_Size() int { + return xxx_messageInfo_UpdateClusterHostsRequest.Size(m) +} +func (m *UpdateClusterHostsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateClusterHostsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateClusterHostsRequest proto.InternalMessageInfo + +func (m *UpdateClusterHostsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateClusterHostsRequest) GetUpdateHostSpecs() []*UpdateHostSpec { + if m != nil { + return m.UpdateHostSpecs + } + return nil +} + +type UpdateClusterHostsMetadata struct { + // ID of the PostgreSQL cluster to update hosts in. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Names of hosts that are being updated. + HostNames []string `protobuf:"bytes,2,rep,name=host_names,json=hostNames,proto3" json:"host_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateClusterHostsMetadata) Reset() { *m = UpdateClusterHostsMetadata{} } +func (m *UpdateClusterHostsMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterHostsMetadata) ProtoMessage() {} +func (*UpdateClusterHostsMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{31} +} +func (m *UpdateClusterHostsMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateClusterHostsMetadata.Unmarshal(m, b) +} +func (m *UpdateClusterHostsMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateClusterHostsMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateClusterHostsMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateClusterHostsMetadata.Merge(dst, src) +} +func (m *UpdateClusterHostsMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateClusterHostsMetadata.Size(m) +} +func (m *UpdateClusterHostsMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateClusterHostsMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateClusterHostsMetadata proto.InternalMessageInfo + +func (m *UpdateClusterHostsMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateClusterHostsMetadata) GetHostNames() []string { + if m != nil { + return m.HostNames + } + return nil +} + +type UpdateHostSpec struct { + // Name of the host to update. + // To get the PostgreSQL host name, use a [ClusterService.ListHosts] request. + HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` + // [Host.name] of the host to be used as the replication source (for cascading replication). + // To get the PostgreSQL host name, use a [ClusterService.ListHosts] request. + ReplicationSource string `protobuf:"bytes,2,opt,name=replication_source,json=replicationSource,proto3" json:"replication_source,omitempty"` + // The host with the highest priority is the synchronous replica. All others are asynchronous. + // The synchronous replica replaces the master when needed. + // + // When a replica becomes the master, its priority is ignored. + Priority *wrappers.Int64Value `protobuf:"bytes,3,opt,name=priority,proto3" json:"priority,omitempty"` + // Configuration of a PostgreSQL server for the host. + ConfigSpec *ConfigHostSpec `protobuf:"bytes,4,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateHostSpec) Reset() { *m = UpdateHostSpec{} } +func (m *UpdateHostSpec) String() string { return proto.CompactTextString(m) } +func (*UpdateHostSpec) ProtoMessage() {} +func (*UpdateHostSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{32} +} +func (m *UpdateHostSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateHostSpec.Unmarshal(m, b) +} +func (m *UpdateHostSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateHostSpec.Marshal(b, m, deterministic) +} +func (dst *UpdateHostSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateHostSpec.Merge(dst, src) +} +func (m *UpdateHostSpec) XXX_Size() int { + return xxx_messageInfo_UpdateHostSpec.Size(m) +} +func (m *UpdateHostSpec) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateHostSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateHostSpec proto.InternalMessageInfo + +func (m *UpdateHostSpec) GetHostName() string { + if m != nil { + return m.HostName + } + return "" +} + +func (m *UpdateHostSpec) GetReplicationSource() string { + if m != nil { + return m.ReplicationSource + } + return "" +} + +func (m *UpdateHostSpec) GetPriority() *wrappers.Int64Value { + if m != nil { + return m.Priority + } + return nil +} + +func (m *UpdateHostSpec) GetConfigSpec() *ConfigHostSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +type HostSpec struct { + // ID of the availability zone where the host resides. + // To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request. + ZoneId string `protobuf:"bytes,1,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + // ID of the subnet that the host should belong to. This subnet should be a part + // of the network that the cluster belongs to. + // The ID of the network is set in the field [Cluster.network_id]. + SubnetId string `protobuf:"bytes,2,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + // Whether the host should get a public IP address on creation. + // + // After a host has been created, this setting cannot be changed. To remove an assigned public IP, or to assign + // a public IP to a host without one, recreate the host with [assign_public_ip] set as needed. + // + // Possible values: + // * false — don't assign a public IP to the host. + // * true — the host should have a public IP address. + AssignPublicIp bool `protobuf:"varint,3,opt,name=assign_public_ip,json=assignPublicIp,proto3" json:"assign_public_ip,omitempty"` + // [Host.name] of the host to be used as the replication source (for cascading replication). + ReplicationSource string `protobuf:"bytes,4,opt,name=replication_source,json=replicationSource,proto3" json:"replication_source,omitempty"` + // Priority of the host as a replica. A higher value corresponds to higher priority. + // + // The host with the highest priority is the synchronous replica. All others are asynchronous. + // The synchronous replica replaces the master when needed. + // + // When a replica becomes the master, its priority is ignored. + Priority *wrappers.Int64Value `protobuf:"bytes,5,opt,name=priority,proto3" json:"priority,omitempty"` + // Configuration of a PostgreSQL server for the host. + ConfigSpec *ConfigHostSpec `protobuf:"bytes,6,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HostSpec) Reset() { *m = HostSpec{} } +func (m *HostSpec) String() string { return proto.CompactTextString(m) } +func (*HostSpec) ProtoMessage() {} +func (*HostSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{33} +} +func (m *HostSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HostSpec.Unmarshal(m, b) +} +func (m *HostSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HostSpec.Marshal(b, m, deterministic) +} +func (dst *HostSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostSpec.Merge(dst, src) +} +func (m *HostSpec) XXX_Size() int { + return xxx_messageInfo_HostSpec.Size(m) +} +func (m *HostSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HostSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HostSpec proto.InternalMessageInfo + +func (m *HostSpec) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func (m *HostSpec) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +func (m *HostSpec) GetAssignPublicIp() bool { + if m != nil { + return m.AssignPublicIp + } + return false +} + +func (m *HostSpec) GetReplicationSource() string { + if m != nil { + return m.ReplicationSource + } + return "" +} + +func (m *HostSpec) GetPriority() *wrappers.Int64Value { + if m != nil { + return m.Priority + } + return nil +} + +func (m *HostSpec) GetConfigSpec() *ConfigHostSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +type ConfigSpec struct { + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Configuration of a PostgreSQL cluster. + // + // Types that are valid to be assigned to PostgresqlConfig: + // *ConfigSpec_PostgresqlConfig_9_6 + // *ConfigSpec_PostgresqlConfig_10 + // *ConfigSpec_PostgresqlConfig_11 + PostgresqlConfig isConfigSpec_PostgresqlConfig `protobuf_oneof:"postgresql_config"` + // Configuration of the connection pooler. + PoolerConfig *ConnectionPoolerConfig `protobuf:"bytes,4,opt,name=pooler_config,json=poolerConfig,proto3" json:"pooler_config,omitempty"` + // Resources allocated to PostgreSQL hosts. + Resources *Resources `protobuf:"bytes,5,opt,name=resources,proto3" json:"resources,omitempty"` + // Configuration setting which enables/disables autofailover in cluster. + Autofailover *wrappers.BoolValue `protobuf:"bytes,6,opt,name=autofailover,proto3" json:"autofailover,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigSpec) Reset() { *m = ConfigSpec{} } +func (m *ConfigSpec) String() string { return proto.CompactTextString(m) } +func (*ConfigSpec) ProtoMessage() {} +func (*ConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{34} +} +func (m *ConfigSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigSpec.Unmarshal(m, b) +} +func (m *ConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigSpec.Marshal(b, m, deterministic) +} +func (dst *ConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigSpec.Merge(dst, src) +} +func (m *ConfigSpec) XXX_Size() int { + return xxx_messageInfo_ConfigSpec.Size(m) +} +func (m *ConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigSpec proto.InternalMessageInfo + +func (m *ConfigSpec) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type isConfigSpec_PostgresqlConfig interface { + isConfigSpec_PostgresqlConfig() +} + +type ConfigSpec_PostgresqlConfig_9_6 struct { + PostgresqlConfig_9_6 *config.PostgresqlConfig9_6 `protobuf:"bytes,2,opt,name=postgresql_config_9_6,json=postgresqlConfig96,proto3,oneof"` +} + +type ConfigSpec_PostgresqlConfig_10 struct { + PostgresqlConfig_10 *config.PostgresqlConfig10 `protobuf:"bytes,3,opt,name=postgresql_config_10,json=postgresqlConfig10,proto3,oneof"` +} + +type ConfigSpec_PostgresqlConfig_11 struct { + PostgresqlConfig_11 *config.PostgresqlConfig11 `protobuf:"bytes,7,opt,name=postgresql_config_11,json=postgresqlConfig11,proto3,oneof"` +} + +func (*ConfigSpec_PostgresqlConfig_9_6) isConfigSpec_PostgresqlConfig() {} + +func (*ConfigSpec_PostgresqlConfig_10) isConfigSpec_PostgresqlConfig() {} + +func (*ConfigSpec_PostgresqlConfig_11) isConfigSpec_PostgresqlConfig() {} + +func (m *ConfigSpec) GetPostgresqlConfig() isConfigSpec_PostgresqlConfig { + if m != nil { + return m.PostgresqlConfig + } + return nil +} + +func (m *ConfigSpec) GetPostgresqlConfig_9_6() *config.PostgresqlConfig9_6 { + if x, ok := m.GetPostgresqlConfig().(*ConfigSpec_PostgresqlConfig_9_6); ok { + return x.PostgresqlConfig_9_6 + } + return nil +} + +func (m *ConfigSpec) GetPostgresqlConfig_10() *config.PostgresqlConfig10 { + if x, ok := m.GetPostgresqlConfig().(*ConfigSpec_PostgresqlConfig_10); ok { + return x.PostgresqlConfig_10 + } + return nil +} + +func (m *ConfigSpec) GetPostgresqlConfig_11() *config.PostgresqlConfig11 { + if x, ok := m.GetPostgresqlConfig().(*ConfigSpec_PostgresqlConfig_11); ok { + return x.PostgresqlConfig_11 + } + return nil +} + +func (m *ConfigSpec) GetPoolerConfig() *ConnectionPoolerConfig { + if m != nil { + return m.PoolerConfig + } + return nil +} + +func (m *ConfigSpec) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *ConfigSpec) GetAutofailover() *wrappers.BoolValue { + if m != nil { + return m.Autofailover + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ConfigSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ConfigSpec_OneofMarshaler, _ConfigSpec_OneofUnmarshaler, _ConfigSpec_OneofSizer, []interface{}{ + (*ConfigSpec_PostgresqlConfig_9_6)(nil), + (*ConfigSpec_PostgresqlConfig_10)(nil), + (*ConfigSpec_PostgresqlConfig_11)(nil), + } +} + +func _ConfigSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ConfigSpec) + // postgresql_config + switch x := m.PostgresqlConfig.(type) { + case *ConfigSpec_PostgresqlConfig_9_6: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PostgresqlConfig_9_6); err != nil { + return err + } + case *ConfigSpec_PostgresqlConfig_10: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PostgresqlConfig_10); err != nil { + return err + } + case *ConfigSpec_PostgresqlConfig_11: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PostgresqlConfig_11); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ConfigSpec.PostgresqlConfig has unexpected type %T", x) + } + return nil +} + +func _ConfigSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ConfigSpec) + switch tag { + case 2: // postgresql_config.postgresql_config_9_6 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(config.PostgresqlConfig9_6) + err := b.DecodeMessage(msg) + m.PostgresqlConfig = &ConfigSpec_PostgresqlConfig_9_6{msg} + return true, err + case 3: // postgresql_config.postgresql_config_10 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(config.PostgresqlConfig10) + err := b.DecodeMessage(msg) + m.PostgresqlConfig = &ConfigSpec_PostgresqlConfig_10{msg} + return true, err + case 7: // postgresql_config.postgresql_config_11 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(config.PostgresqlConfig11) + err := b.DecodeMessage(msg) + m.PostgresqlConfig = &ConfigSpec_PostgresqlConfig_11{msg} + return true, err + default: + return false, nil + } +} + +func _ConfigSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ConfigSpec) + // postgresql_config + switch x := m.PostgresqlConfig.(type) { + case *ConfigSpec_PostgresqlConfig_9_6: + s := proto.Size(x.PostgresqlConfig_9_6) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *ConfigSpec_PostgresqlConfig_10: + s := proto.Size(x.PostgresqlConfig_10) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *ConfigSpec_PostgresqlConfig_11: + s := proto.Size(x.PostgresqlConfig_11) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ConfigHostSpec struct { + // Types that are valid to be assigned to PostgresqlConfig: + // *ConfigHostSpec_PostgresqlConfig_9_6 + // *ConfigHostSpec_PostgresqlConfig_10 + // *ConfigHostSpec_PostgresqlConfig_11 + PostgresqlConfig isConfigHostSpec_PostgresqlConfig `protobuf_oneof:"postgresql_config"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigHostSpec) Reset() { *m = ConfigHostSpec{} } +func (m *ConfigHostSpec) String() string { return proto.CompactTextString(m) } +func (*ConfigHostSpec) ProtoMessage() {} +func (*ConfigHostSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_28f92dbbf72ca094, []int{35} +} +func (m *ConfigHostSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigHostSpec.Unmarshal(m, b) +} +func (m *ConfigHostSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigHostSpec.Marshal(b, m, deterministic) +} +func (dst *ConfigHostSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigHostSpec.Merge(dst, src) +} +func (m *ConfigHostSpec) XXX_Size() int { + return xxx_messageInfo_ConfigHostSpec.Size(m) +} +func (m *ConfigHostSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigHostSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigHostSpec proto.InternalMessageInfo + +type isConfigHostSpec_PostgresqlConfig interface { + isConfigHostSpec_PostgresqlConfig() +} + +type ConfigHostSpec_PostgresqlConfig_9_6 struct { + PostgresqlConfig_9_6 *config.PostgresqlHostConfig9_6 `protobuf:"bytes,1,opt,name=postgresql_config_9_6,json=postgresqlConfig96,proto3,oneof"` +} + +type ConfigHostSpec_PostgresqlConfig_10 struct { + PostgresqlConfig_10 *config.PostgresqlHostConfig10 `protobuf:"bytes,2,opt,name=postgresql_config_10,json=postgresqlConfig10,proto3,oneof"` +} + +type ConfigHostSpec_PostgresqlConfig_11 struct { + PostgresqlConfig_11 *config.PostgresqlHostConfig11 `protobuf:"bytes,3,opt,name=postgresql_config_11,json=postgresqlConfig11,proto3,oneof"` +} + +func (*ConfigHostSpec_PostgresqlConfig_9_6) isConfigHostSpec_PostgresqlConfig() {} + +func (*ConfigHostSpec_PostgresqlConfig_10) isConfigHostSpec_PostgresqlConfig() {} + +func (*ConfigHostSpec_PostgresqlConfig_11) isConfigHostSpec_PostgresqlConfig() {} + +func (m *ConfigHostSpec) GetPostgresqlConfig() isConfigHostSpec_PostgresqlConfig { + if m != nil { + return m.PostgresqlConfig + } + return nil +} + +func (m *ConfigHostSpec) GetPostgresqlConfig_9_6() *config.PostgresqlHostConfig9_6 { + if x, ok := m.GetPostgresqlConfig().(*ConfigHostSpec_PostgresqlConfig_9_6); ok { + return x.PostgresqlConfig_9_6 + } + return nil +} + +func (m *ConfigHostSpec) GetPostgresqlConfig_10() *config.PostgresqlHostConfig10 { + if x, ok := m.GetPostgresqlConfig().(*ConfigHostSpec_PostgresqlConfig_10); ok { + return x.PostgresqlConfig_10 + } + return nil +} + +func (m *ConfigHostSpec) GetPostgresqlConfig_11() *config.PostgresqlHostConfig11 { + if x, ok := m.GetPostgresqlConfig().(*ConfigHostSpec_PostgresqlConfig_11); ok { + return x.PostgresqlConfig_11 + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ConfigHostSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ConfigHostSpec_OneofMarshaler, _ConfigHostSpec_OneofUnmarshaler, _ConfigHostSpec_OneofSizer, []interface{}{ + (*ConfigHostSpec_PostgresqlConfig_9_6)(nil), + (*ConfigHostSpec_PostgresqlConfig_10)(nil), + (*ConfigHostSpec_PostgresqlConfig_11)(nil), + } +} + +func _ConfigHostSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ConfigHostSpec) + // postgresql_config + switch x := m.PostgresqlConfig.(type) { + case *ConfigHostSpec_PostgresqlConfig_9_6: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PostgresqlConfig_9_6); err != nil { + return err + } + case *ConfigHostSpec_PostgresqlConfig_10: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PostgresqlConfig_10); err != nil { + return err + } + case *ConfigHostSpec_PostgresqlConfig_11: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PostgresqlConfig_11); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ConfigHostSpec.PostgresqlConfig has unexpected type %T", x) + } + return nil +} + +func _ConfigHostSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ConfigHostSpec) + switch tag { + case 1: // postgresql_config.postgresql_config_9_6 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(config.PostgresqlHostConfig9_6) + err := b.DecodeMessage(msg) + m.PostgresqlConfig = &ConfigHostSpec_PostgresqlConfig_9_6{msg} + return true, err + case 2: // postgresql_config.postgresql_config_10 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(config.PostgresqlHostConfig10) + err := b.DecodeMessage(msg) + m.PostgresqlConfig = &ConfigHostSpec_PostgresqlConfig_10{msg} + return true, err + case 3: // postgresql_config.postgresql_config_11 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(config.PostgresqlHostConfig11) + err := b.DecodeMessage(msg) + m.PostgresqlConfig = &ConfigHostSpec_PostgresqlConfig_11{msg} + return true, err + default: + return false, nil + } +} + +func _ConfigHostSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ConfigHostSpec) + // postgresql_config + switch x := m.PostgresqlConfig.(type) { + case *ConfigHostSpec_PostgresqlConfig_9_6: + s := proto.Size(x.PostgresqlConfig_9_6) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *ConfigHostSpec_PostgresqlConfig_10: + s := proto.Size(x.PostgresqlConfig_10) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *ConfigHostSpec_PostgresqlConfig_11: + s := proto.Size(x.PostgresqlConfig_11) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*GetClusterRequest)(nil), "yandex.cloud.mdb.postgresql.v1.GetClusterRequest") + proto.RegisterType((*ListClustersRequest)(nil), "yandex.cloud.mdb.postgresql.v1.ListClustersRequest") + proto.RegisterType((*ListClustersResponse)(nil), "yandex.cloud.mdb.postgresql.v1.ListClustersResponse") + proto.RegisterType((*CreateClusterRequest)(nil), "yandex.cloud.mdb.postgresql.v1.CreateClusterRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.postgresql.v1.CreateClusterRequest.LabelsEntry") + proto.RegisterType((*CreateClusterMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.CreateClusterMetadata") + proto.RegisterType((*UpdateClusterRequest)(nil), "yandex.cloud.mdb.postgresql.v1.UpdateClusterRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.postgresql.v1.UpdateClusterRequest.LabelsEntry") + proto.RegisterType((*UpdateClusterMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.UpdateClusterMetadata") + proto.RegisterType((*DeleteClusterRequest)(nil), "yandex.cloud.mdb.postgresql.v1.DeleteClusterRequest") + proto.RegisterType((*DeleteClusterMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.DeleteClusterMetadata") + proto.RegisterType((*StartClusterRequest)(nil), "yandex.cloud.mdb.postgresql.v1.StartClusterRequest") + proto.RegisterType((*StartClusterMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.StartClusterMetadata") + proto.RegisterType((*StopClusterRequest)(nil), "yandex.cloud.mdb.postgresql.v1.StopClusterRequest") + proto.RegisterType((*StopClusterMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.StopClusterMetadata") + proto.RegisterType((*BackupClusterRequest)(nil), "yandex.cloud.mdb.postgresql.v1.BackupClusterRequest") + proto.RegisterType((*BackupClusterMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.BackupClusterMetadata") + proto.RegisterType((*RestoreClusterRequest)(nil), "yandex.cloud.mdb.postgresql.v1.RestoreClusterRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.postgresql.v1.RestoreClusterRequest.LabelsEntry") + proto.RegisterType((*RestoreClusterMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.RestoreClusterMetadata") + proto.RegisterType((*LogRecord)(nil), "yandex.cloud.mdb.postgresql.v1.LogRecord") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.postgresql.v1.LogRecord.MessageEntry") + proto.RegisterType((*ListClusterLogsRequest)(nil), "yandex.cloud.mdb.postgresql.v1.ListClusterLogsRequest") + proto.RegisterType((*ListClusterLogsResponse)(nil), "yandex.cloud.mdb.postgresql.v1.ListClusterLogsResponse") + proto.RegisterType((*ListClusterOperationsRequest)(nil), "yandex.cloud.mdb.postgresql.v1.ListClusterOperationsRequest") + proto.RegisterType((*ListClusterOperationsResponse)(nil), "yandex.cloud.mdb.postgresql.v1.ListClusterOperationsResponse") + proto.RegisterType((*ListClusterBackupsRequest)(nil), "yandex.cloud.mdb.postgresql.v1.ListClusterBackupsRequest") + proto.RegisterType((*ListClusterBackupsResponse)(nil), "yandex.cloud.mdb.postgresql.v1.ListClusterBackupsResponse") + proto.RegisterType((*ListClusterHostsRequest)(nil), "yandex.cloud.mdb.postgresql.v1.ListClusterHostsRequest") + proto.RegisterType((*ListClusterHostsResponse)(nil), "yandex.cloud.mdb.postgresql.v1.ListClusterHostsResponse") + proto.RegisterType((*AddClusterHostsRequest)(nil), "yandex.cloud.mdb.postgresql.v1.AddClusterHostsRequest") + proto.RegisterType((*AddClusterHostsMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.AddClusterHostsMetadata") + proto.RegisterType((*DeleteClusterHostsRequest)(nil), "yandex.cloud.mdb.postgresql.v1.DeleteClusterHostsRequest") + proto.RegisterType((*DeleteClusterHostsMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.DeleteClusterHostsMetadata") + proto.RegisterType((*UpdateClusterHostsRequest)(nil), "yandex.cloud.mdb.postgresql.v1.UpdateClusterHostsRequest") + proto.RegisterType((*UpdateClusterHostsMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.UpdateClusterHostsMetadata") + proto.RegisterType((*UpdateHostSpec)(nil), "yandex.cloud.mdb.postgresql.v1.UpdateHostSpec") + proto.RegisterType((*HostSpec)(nil), "yandex.cloud.mdb.postgresql.v1.HostSpec") + proto.RegisterType((*ConfigSpec)(nil), "yandex.cloud.mdb.postgresql.v1.ConfigSpec") + proto.RegisterType((*ConfigHostSpec)(nil), "yandex.cloud.mdb.postgresql.v1.ConfigHostSpec") + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.ListClusterLogsRequest_ServiceType", ListClusterLogsRequest_ServiceType_name, ListClusterLogsRequest_ServiceType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ClusterServiceClient is the client API for ClusterService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ClusterServiceClient interface { + // Returns the specified PostgreSQL Cluster resource. + // + // To get the list of available PostgreSQL Cluster resources, make a [List] request. + Get(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) + // Retrieves the list of PostgreSQL Cluster resources that belong + // to the specified folder. + List(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + // Creates a PostgreSQL cluster in the specified folder. + Create(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified PostgreSQL cluster. + Update(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified PostgreSQL cluster. + Delete(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Start the specified PostgreSQL cluster. + Start(ctx context.Context, in *StartClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Stop the specified PostgreSQL cluster. + Stop(ctx context.Context, in *StopClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Creates a backup for the specified PostgreSQL cluster. + Backup(ctx context.Context, in *BackupClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Creates a new PostgreSQL cluster using the specified backup. + Restore(ctx context.Context, in *RestoreClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Retrieves logs for the specified PostgreSQL cluster. + // For more information about logs, see the [Logs](/docs/managed-postgresql/concepts/logs) section in the documentation. + ListLogs(ctx context.Context, in *ListClusterLogsRequest, opts ...grpc.CallOption) (*ListClusterLogsResponse, error) + // Retrieves the list of Operation resources for the specified cluster. + ListOperations(ctx context.Context, in *ListClusterOperationsRequest, opts ...grpc.CallOption) (*ListClusterOperationsResponse, error) + // Retrieves the list of available backups for the specified PostgreSQL cluster. + ListBackups(ctx context.Context, in *ListClusterBackupsRequest, opts ...grpc.CallOption) (*ListClusterBackupsResponse, error) + // Retrieves a list of hosts for the specified cluster. + ListHosts(ctx context.Context, in *ListClusterHostsRequest, opts ...grpc.CallOption) (*ListClusterHostsResponse, error) + // Creates new hosts for a cluster. + AddHosts(ctx context.Context, in *AddClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified hosts for a cluster. + DeleteHosts(ctx context.Context, in *DeleteClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified hosts. + UpdateHosts(ctx context.Context, in *UpdateClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) +} + +type clusterServiceClient struct { + cc *grpc.ClientConn +} + +func NewClusterServiceClient(cc *grpc.ClientConn) ClusterServiceClient { + return &clusterServiceClient{cc} +} + +func (c *clusterServiceClient) Get(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) { + out := new(Cluster) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) List(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Create(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Update(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Delete(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Start(ctx context.Context, in *StartClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/Start", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Stop(ctx context.Context, in *StopClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Backup(ctx context.Context, in *BackupClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/Backup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Restore(ctx context.Context, in *RestoreClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/Restore", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListLogs(ctx context.Context, in *ListClusterLogsRequest, opts ...grpc.CallOption) (*ListClusterLogsResponse, error) { + out := new(ListClusterLogsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/ListLogs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListOperations(ctx context.Context, in *ListClusterOperationsRequest, opts ...grpc.CallOption) (*ListClusterOperationsResponse, error) { + out := new(ListClusterOperationsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListBackups(ctx context.Context, in *ListClusterBackupsRequest, opts ...grpc.CallOption) (*ListClusterBackupsResponse, error) { + out := new(ListClusterBackupsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/ListBackups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListHosts(ctx context.Context, in *ListClusterHostsRequest, opts ...grpc.CallOption) (*ListClusterHostsResponse, error) { + out := new(ListClusterHostsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/ListHosts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) AddHosts(ctx context.Context, in *AddClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/AddHosts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) DeleteHosts(ctx context.Context, in *DeleteClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/DeleteHosts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) UpdateHosts(ctx context.Context, in *UpdateClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ClusterService/UpdateHosts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ClusterServiceServer is the server API for ClusterService service. +type ClusterServiceServer interface { + // Returns the specified PostgreSQL Cluster resource. + // + // To get the list of available PostgreSQL Cluster resources, make a [List] request. + Get(context.Context, *GetClusterRequest) (*Cluster, error) + // Retrieves the list of PostgreSQL Cluster resources that belong + // to the specified folder. + List(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + // Creates a PostgreSQL cluster in the specified folder. + Create(context.Context, *CreateClusterRequest) (*operation.Operation, error) + // Updates the specified PostgreSQL cluster. + Update(context.Context, *UpdateClusterRequest) (*operation.Operation, error) + // Deletes the specified PostgreSQL cluster. + Delete(context.Context, *DeleteClusterRequest) (*operation.Operation, error) + // Start the specified PostgreSQL cluster. + Start(context.Context, *StartClusterRequest) (*operation.Operation, error) + // Stop the specified PostgreSQL cluster. + Stop(context.Context, *StopClusterRequest) (*operation.Operation, error) + // Creates a backup for the specified PostgreSQL cluster. + Backup(context.Context, *BackupClusterRequest) (*operation.Operation, error) + // Creates a new PostgreSQL cluster using the specified backup. + Restore(context.Context, *RestoreClusterRequest) (*operation.Operation, error) + // Retrieves logs for the specified PostgreSQL cluster. + // For more information about logs, see the [Logs](/docs/managed-postgresql/concepts/logs) section in the documentation. + ListLogs(context.Context, *ListClusterLogsRequest) (*ListClusterLogsResponse, error) + // Retrieves the list of Operation resources for the specified cluster. + ListOperations(context.Context, *ListClusterOperationsRequest) (*ListClusterOperationsResponse, error) + // Retrieves the list of available backups for the specified PostgreSQL cluster. + ListBackups(context.Context, *ListClusterBackupsRequest) (*ListClusterBackupsResponse, error) + // Retrieves a list of hosts for the specified cluster. + ListHosts(context.Context, *ListClusterHostsRequest) (*ListClusterHostsResponse, error) + // Creates new hosts for a cluster. + AddHosts(context.Context, *AddClusterHostsRequest) (*operation.Operation, error) + // Deletes the specified hosts for a cluster. + DeleteHosts(context.Context, *DeleteClusterHostsRequest) (*operation.Operation, error) + // Updates the specified hosts. + UpdateHosts(context.Context, *UpdateClusterHostsRequest) (*operation.Operation, error) +} + +func RegisterClusterServiceServer(s *grpc.Server, srv ClusterServiceServer) { + s.RegisterService(&_ClusterService_serviceDesc, srv) +} + +func _ClusterService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Get(ctx, req.(*GetClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).List(ctx, req.(*ListClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Create(ctx, req.(*CreateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Update(ctx, req.(*UpdateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Delete(ctx, req.(*DeleteClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Start(ctx, req.(*StartClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Stop(ctx, req.(*StopClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Backup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BackupClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Backup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/Backup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Backup(ctx, req.(*BackupClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Restore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestoreClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Restore(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/Restore", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Restore(ctx, req.(*RestoreClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListLogs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterLogsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListLogs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/ListLogs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListLogs(ctx, req.(*ListClusterLogsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListOperations(ctx, req.(*ListClusterOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListBackups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterBackupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListBackups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/ListBackups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListBackups(ctx, req.(*ListClusterBackupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterHostsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/ListHosts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListHosts(ctx, req.(*ListClusterHostsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_AddHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddClusterHostsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).AddHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/AddHosts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).AddHosts(ctx, req.(*AddClusterHostsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_DeleteHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterHostsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).DeleteHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/DeleteHosts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).DeleteHosts(ctx, req.(*DeleteClusterHostsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_UpdateHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClusterHostsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).UpdateHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ClusterService/UpdateHosts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).UpdateHosts(ctx, req.(*UpdateClusterHostsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ClusterService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.postgresql.v1.ClusterService", + HandlerType: (*ClusterServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _ClusterService_Get_Handler, + }, + { + MethodName: "List", + Handler: _ClusterService_List_Handler, + }, + { + MethodName: "Create", + Handler: _ClusterService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _ClusterService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _ClusterService_Delete_Handler, + }, + { + MethodName: "Start", + Handler: _ClusterService_Start_Handler, + }, + { + MethodName: "Stop", + Handler: _ClusterService_Stop_Handler, + }, + { + MethodName: "Backup", + Handler: _ClusterService_Backup_Handler, + }, + { + MethodName: "Restore", + Handler: _ClusterService_Restore_Handler, + }, + { + MethodName: "ListLogs", + Handler: _ClusterService_ListLogs_Handler, + }, + { + MethodName: "ListOperations", + Handler: _ClusterService_ListOperations_Handler, + }, + { + MethodName: "ListBackups", + Handler: _ClusterService_ListBackups_Handler, + }, + { + MethodName: "ListHosts", + Handler: _ClusterService_ListHosts_Handler, + }, + { + MethodName: "AddHosts", + Handler: _ClusterService_AddHosts_Handler, + }, + { + MethodName: "DeleteHosts", + Handler: _ClusterService_DeleteHosts_Handler, + }, + { + MethodName: "UpdateHosts", + Handler: _ClusterService_UpdateHosts_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/postgresql/v1/cluster_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/cluster_service.proto", fileDescriptor_cluster_service_28f92dbbf72ca094) +} + +var fileDescriptor_cluster_service_28f92dbbf72ca094 = []byte{ + // 2555 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0x4f, 0x6f, 0x1b, 0xd7, + 0x11, 0xcf, 0x4a, 0x14, 0x45, 0x0e, 0x65, 0x45, 0x7e, 0x96, 0x1d, 0x86, 0x89, 0x1d, 0x99, 0x4d, + 0x62, 0x99, 0x36, 0xff, 0x4a, 0xa6, 0x4d, 0xc5, 0x76, 0x2c, 0xca, 0xb2, 0x43, 0x54, 0xb6, 0x94, + 0x95, 0x9c, 0xc0, 0x36, 0x0c, 0x62, 0xc9, 0x7d, 0xa2, 0x17, 0x22, 0xb9, 0xeb, 0xdd, 0xa5, 0x6c, + 0xb9, 0x48, 0xe1, 0xba, 0x40, 0x0f, 0x3e, 0x15, 0x28, 0xd0, 0x22, 0x29, 0xd0, 0x4f, 0xd0, 0x4b, + 0x75, 0x48, 0x80, 0xa0, 0xe8, 0x21, 0x17, 0xbb, 0x97, 0x16, 0x50, 0x0e, 0xfd, 0x02, 0x39, 0xf4, + 0x50, 0x14, 0x85, 0x8f, 0x3d, 0x15, 0xef, 0xcf, 0x92, 0xbb, 0x22, 0xa9, 0xdd, 0x25, 0x6d, 0x34, + 0x27, 0x69, 0xdf, 0x7b, 0x33, 0xf3, 0x9b, 0x79, 0xf3, 0xe6, 0xcd, 0xcc, 0x23, 0xcc, 0xef, 0x48, + 0x4d, 0x19, 0x3f, 0x4a, 0x57, 0xeb, 0x6a, 0x4b, 0x4e, 0x37, 0xe4, 0x4a, 0x5a, 0x53, 0x0d, 0xb3, + 0xa6, 0x63, 0xe3, 0x41, 0x3d, 0xbd, 0x9d, 0x4d, 0x57, 0xeb, 0x2d, 0xc3, 0xc4, 0x7a, 0xd9, 0xc0, + 0xfa, 0xb6, 0x52, 0xc5, 0x29, 0x4d, 0x57, 0x4d, 0x15, 0x9d, 0x60, 0x54, 0x29, 0x4a, 0x95, 0x6a, + 0xc8, 0x95, 0x54, 0x87, 0x2a, 0xb5, 0x9d, 0x8d, 0xbd, 0x5b, 0x53, 0xd5, 0x5a, 0x1d, 0xa7, 0x25, + 0x4d, 0x49, 0x4b, 0xcd, 0xa6, 0x6a, 0x4a, 0xa6, 0xa2, 0x36, 0x0d, 0x46, 0x1d, 0x9b, 0xe1, 0xb3, + 0xf4, 0xab, 0xd2, 0xda, 0x4c, 0x6f, 0x2a, 0xb8, 0x2e, 0x97, 0x1b, 0x92, 0xb1, 0xc5, 0x57, 0xbc, + 0xb7, 0x7f, 0x85, 0xa9, 0x34, 0xb0, 0x61, 0x4a, 0x0d, 0x8d, 0x2f, 0x38, 0xb1, 0x7f, 0xc1, 0x43, + 0x5d, 0xd2, 0x34, 0xac, 0x5b, 0x22, 0x62, 0x5c, 0x2d, 0x02, 0x40, 0xd5, 0xb0, 0x4e, 0xe5, 0xf3, + 0xb9, 0x0f, 0x1d, 0x2a, 0xb7, 0x67, 0xbb, 0xd6, 0x1d, 0x77, 0xac, 0xdb, 0x96, 0xea, 0x8a, 0x6c, + 0x9f, 0x3e, 0xe3, 0x62, 0xb9, 0x8a, 0x54, 0xdd, 0x6a, 0x59, 0x78, 0xcf, 0x7a, 0x33, 0x33, 0x5f, + 0x9d, 0x74, 0x59, 0x2d, 0x4b, 0xa6, 0x54, 0x91, 0x0c, 0xbe, 0x1b, 0xb1, 0xd3, 0x2e, 0xcb, 0x5b, + 0x46, 0x9b, 0x73, 0xc1, 0x0d, 0x87, 0xda, 0xdc, 0x54, 0x6a, 0xb6, 0xc1, 0x42, 0x39, 0xcf, 0x49, + 0x2f, 0xf8, 0x25, 0xcd, 0x66, 0x06, 0xa6, 0xcc, 0x72, 0xca, 0x39, 0x6f, 0x94, 0xf7, 0x55, 0xc3, + 0xec, 0x00, 0xcd, 0x79, 0x27, 0x6a, 0x43, 0xf4, 0x43, 0xc3, 0xc1, 0xc5, 0xaf, 0xc0, 0xe1, 0xeb, + 0xd8, 0x5c, 0x62, 0x3b, 0x27, 0xe2, 0x07, 0x2d, 0x6c, 0x98, 0xe8, 0x0c, 0x80, 0x75, 0x64, 0x14, + 0x39, 0x2a, 0xcc, 0x08, 0xb3, 0xe1, 0xe2, 0xc4, 0x3f, 0x9f, 0x67, 0x85, 0x67, 0x2f, 0xb2, 0x81, + 0x8b, 0x97, 0xce, 0x65, 0xc4, 0x30, 0x9f, 0x2f, 0xc9, 0xf1, 0x6f, 0x04, 0x38, 0xb2, 0xa2, 0x18, + 0x16, 0x0f, 0xc3, 0x62, 0x72, 0x1a, 0xc2, 0x9b, 0x6a, 0x5d, 0xee, 0xcf, 0x23, 0xc4, 0xa6, 0x4b, + 0x32, 0x3a, 0x05, 0x61, 0x4d, 0xaa, 0xe1, 0xb2, 0xa1, 0x3c, 0xc6, 0xd1, 0x91, 0x19, 0x61, 0x76, + 0xb4, 0x08, 0xff, 0x7d, 0x9e, 0x0d, 0x5e, 0xbc, 0x94, 0xcd, 0x64, 0x32, 0x62, 0x88, 0x4c, 0xae, + 0x2b, 0x8f, 0x31, 0x9a, 0x05, 0xa0, 0x0b, 0x4d, 0x75, 0x0b, 0x37, 0xa3, 0xa3, 0x94, 0x69, 0xf8, + 0xd9, 0x8b, 0xec, 0x18, 0x5d, 0x29, 0x52, 0x2e, 0x1b, 0x64, 0x0e, 0xc5, 0x21, 0xb8, 0xa9, 0xd4, + 0x4d, 0xac, 0x47, 0x03, 0x74, 0x15, 0x3c, 0x7b, 0xd1, 0xe6, 0xc7, 0x67, 0xe2, 0xbf, 0x14, 0x60, + 0xda, 0x89, 0xdc, 0xd0, 0xd4, 0xa6, 0x81, 0xd1, 0x12, 0x84, 0xb8, 0x7e, 0x46, 0x54, 0x98, 0x19, + 0x9d, 0x8d, 0xe4, 0x4e, 0xa5, 0x0e, 0x0e, 0x16, 0x29, 0xcb, 0x82, 0x6d, 0x42, 0xf4, 0x21, 0xbc, + 0xd9, 0xc4, 0x8f, 0xcc, 0xb2, 0x0d, 0x30, 0x51, 0x2d, 0x2c, 0x1e, 0x22, 0xc3, 0x6b, 0x16, 0xd2, + 0xf8, 0xdf, 0x83, 0x30, 0xbd, 0xa4, 0x63, 0xc9, 0xc4, 0xfb, 0x76, 0xc1, 0x87, 0x01, 0x73, 0x10, + 0x68, 0x4a, 0x0d, 0x66, 0xbb, 0x70, 0xf1, 0x04, 0x59, 0xf5, 0xf2, 0x79, 0x76, 0xf2, 0xae, 0x94, + 0x7c, 0xbc, 0x98, 0xbc, 0x93, 0x49, 0x16, 0xca, 0xc9, 0x7b, 0x09, 0x46, 0x97, 0x9f, 0x13, 0xe9, + 0x5a, 0x74, 0x06, 0x22, 0x32, 0x36, 0xaa, 0xba, 0xa2, 0x91, 0x78, 0xe0, 0x34, 0x66, 0xee, 0x5c, + 0x5e, 0xb4, 0xcf, 0xa2, 0x2f, 0x05, 0x08, 0xd6, 0xa5, 0x0a, 0xae, 0x1b, 0xd1, 0x00, 0x35, 0xc8, + 0x15, 0x57, 0x83, 0xf4, 0x50, 0x29, 0xb5, 0x42, 0x59, 0x2c, 0x37, 0x4d, 0x7d, 0xa7, 0xf8, 0xf1, + 0xcb, 0xe7, 0xd9, 0xc8, 0xdd, 0x64, 0x39, 0x93, 0x2c, 0x48, 0xc9, 0xc7, 0xf7, 0x12, 0x4f, 0x19, + 0xbc, 0x79, 0x0b, 0xe6, 0xee, 0x8b, 0x6c, 0x30, 0x66, 0xfd, 0x87, 0xd0, 0x14, 0x51, 0xe6, 0x9e, + 0x6d, 0xbd, 0xc8, 0x01, 0xa1, 0xbb, 0x10, 0xc1, 0xcd, 0x6d, 0x45, 0x57, 0x9b, 0x0d, 0xdc, 0x34, + 0xa3, 0x63, 0x33, 0xc2, 0xec, 0x64, 0x6e, 0xce, 0xe3, 0x86, 0xa5, 0x96, 0x3b, 0xa4, 0xc5, 0x00, + 0x31, 0x9c, 0x68, 0xe7, 0x86, 0x3e, 0x85, 0x08, 0x3b, 0x36, 0x65, 0x43, 0xc3, 0xd5, 0x68, 0x70, + 0x46, 0x98, 0x8d, 0xe4, 0x12, 0xae, 0xcc, 0x29, 0xc9, 0xba, 0x86, 0xab, 0x9c, 0x27, 0x54, 0xdb, + 0x23, 0xe8, 0x36, 0x4c, 0x5a, 0xb1, 0x8f, 0x32, 0x35, 0xa2, 0xe3, 0xd4, 0xa4, 0x67, 0xdd, 0xb8, + 0x5e, 0xe5, 0x54, 0x36, 0xbe, 0x87, 0x64, 0xdb, 0x98, 0x81, 0x6e, 0x00, 0x90, 0x38, 0xc9, 0xd9, + 0x86, 0x28, 0xdb, 0x59, 0x37, 0xb6, 0xb7, 0x0c, 0xac, 0xdb, 0x58, 0x86, 0x5b, 0xfc, 0x9b, 0xb2, + 0x23, 0xc1, 0x82, 0xb3, 0x0b, 0x7b, 0x63, 0xf7, 0x89, 0x6a, 0x98, 0x76, 0x76, 0xf7, 0xf9, 0xb7, + 0x41, 0xc2, 0x4a, 0x13, 0x9b, 0x0f, 0x55, 0x7d, 0x8b, 0x78, 0x34, 0xf4, 0x0a, 0x2b, 0x7c, 0xbe, + 0x24, 0xc7, 0x0a, 0x10, 0xb1, 0x79, 0x0b, 0x9a, 0x82, 0xd1, 0x2d, 0xbc, 0xc3, 0x8e, 0x81, 0x48, + 0xfe, 0x45, 0xd3, 0x30, 0xb6, 0x2d, 0xd5, 0x5b, 0xdc, 0xe9, 0x45, 0xf6, 0xb1, 0x30, 0x72, 0x41, + 0x88, 0xe7, 0xe1, 0xa8, 0xc3, 0xfb, 0x6e, 0x60, 0x53, 0x22, 0x76, 0x42, 0xc7, 0xbb, 0xe3, 0x9a, + 0x3d, 0x92, 0xfd, 0x22, 0x00, 0xd3, 0xb7, 0x34, 0xb9, 0xfb, 0x24, 0xfa, 0x89, 0x87, 0xe8, 0x23, + 0x88, 0xb4, 0x28, 0x13, 0x9a, 0x0b, 0x50, 0x74, 0x91, 0x5c, 0x2c, 0xc5, 0xee, 0xfa, 0x94, 0x75, + 0xd7, 0xa7, 0xae, 0x91, 0x74, 0xe1, 0x86, 0x64, 0x6c, 0x89, 0xc0, 0x96, 0x93, 0xff, 0x5f, 0xf7, + 0xa1, 0xec, 0xa5, 0xdd, 0xeb, 0x39, 0x94, 0x3f, 0x75, 0x9e, 0x9b, 0x31, 0xbf, 0xe7, 0xc6, 0x71, + 0x62, 0x52, 0x3c, 0xbc, 0x05, 0xa9, 0x39, 0x62, 0x6e, 0xa1, 0x6d, 0x48, 0xdf, 0x71, 0x18, 0xc9, + 0xab, 0xef, 0x2c, 0xc1, 0xf4, 0x55, 0x5c, 0xc7, 0x43, 0xb9, 0x0e, 0x11, 0xee, 0x60, 0xe2, 0x55, + 0x78, 0x11, 0x8e, 0xac, 0x9b, 0x92, 0x3e, 0xd4, 0x35, 0x7e, 0x0e, 0xa6, 0xed, 0x3c, 0xbc, 0x8a, + 0x5e, 0x04, 0xb4, 0x6e, 0xaa, 0xda, 0x30, 0x92, 0xe7, 0x09, 0xfa, 0x36, 0x0b, 0x1f, 0x06, 0x2f, + 0xd2, 0xe4, 0x74, 0x48, 0x83, 0x3b, 0x98, 0x78, 0x15, 0xfe, 0x72, 0x0c, 0x8e, 0x8a, 0xd8, 0x30, + 0x55, 0x7d, 0xff, 0x7e, 0x9f, 0x84, 0x30, 0xcb, 0x99, 0x3b, 0xd2, 0x59, 0x1c, 0x0c, 0xb1, 0xe1, + 0x92, 0x8c, 0xe6, 0x21, 0x40, 0x2a, 0x81, 0xbe, 0x91, 0x61, 0xc3, 0x2a, 0x13, 0x38, 0x25, 0x5d, + 0x8d, 0x3e, 0x80, 0x49, 0xf2, 0xb7, 0xac, 0x34, 0x09, 0x0c, 0x65, 0x1b, 0xd3, 0xe0, 0x10, 0x12, + 0x0f, 0x91, 0xd1, 0x92, 0x35, 0xd8, 0xce, 0x04, 0x02, 0x83, 0x67, 0x02, 0x63, 0x07, 0x06, 0x9d, + 0xaf, 0x3a, 0x41, 0x27, 0x48, 0x83, 0xce, 0xa2, 0xdb, 0xa1, 0xee, 0x69, 0xa8, 0xd7, 0x13, 0x75, + 0x6e, 0x39, 0x53, 0x81, 0xf1, 0x81, 0x53, 0x01, 0x67, 0x12, 0xb0, 0x2f, 0x98, 0x85, 0x86, 0x0a, + 0x66, 0xab, 0x43, 0x5d, 0xaa, 0xc1, 0xa7, 0x2f, 0xb2, 0x23, 0x97, 0x33, 0xff, 0x8f, 0x6b, 0x75, + 0x03, 0x8e, 0x39, 0xb7, 0xd2, 0xe3, 0x69, 0x41, 0xef, 0xd8, 0xcf, 0x04, 0x63, 0xdb, 0x3e, 0x0d, + 0xf1, 0xef, 0x05, 0x08, 0xaf, 0xa8, 0x35, 0x11, 0x57, 0x55, 0x5d, 0x46, 0x17, 0x20, 0xdc, 0xae, + 0x92, 0x29, 0xa3, 0x03, 0x0f, 0x88, 0xd8, 0x59, 0x8c, 0xd6, 0x60, 0xbc, 0x81, 0x0d, 0x43, 0xaa, + 0x11, 0xe4, 0xc4, 0xa6, 0x79, 0x37, 0x9b, 0xb6, 0xa5, 0xa6, 0x6e, 0x30, 0x42, 0x6a, 0x12, 0xd1, + 0x62, 0x13, 0x5b, 0x80, 0x09, 0xfb, 0x84, 0x2f, 0x5b, 0xfd, 0x3a, 0x00, 0xc7, 0x6c, 0xa5, 0xc5, + 0x8a, 0x5a, 0x33, 0x06, 0x4a, 0x26, 0x7e, 0x02, 0x87, 0xaa, 0x6a, 0xbd, 0xd5, 0x68, 0x96, 0x79, + 0x35, 0x43, 0x74, 0x0b, 0x8b, 0x13, 0x6c, 0xf0, 0x1a, 0x1d, 0x43, 0x18, 0x26, 0x78, 0x67, 0xa3, + 0x6c, 0xee, 0x68, 0x2c, 0x30, 0x4c, 0xe6, 0x8a, 0xae, 0xfa, 0xf7, 0xc4, 0x97, 0x5a, 0x67, 0xac, + 0x36, 0x76, 0x34, 0x2c, 0x46, 0x8c, 0xce, 0x07, 0x3a, 0x0f, 0xe1, 0x4d, 0x5d, 0x6d, 0x94, 0x69, + 0xf0, 0x0a, 0xb8, 0xee, 0x4d, 0x88, 0x2c, 0x26, 0x9f, 0x68, 0x0e, 0xc6, 0x4d, 0x95, 0x91, 0x8d, + 0xb9, 0x92, 0x05, 0x4d, 0x95, 0x12, 0x39, 0x6a, 0xc2, 0xa0, 0xe7, 0x9a, 0x70, 0xfc, 0x80, 0x9a, + 0x70, 0x0e, 0x8e, 0x49, 0xf5, 0x87, 0xd2, 0x8e, 0x51, 0xde, 0x5f, 0x98, 0x85, 0x68, 0x28, 0x3d, + 0xc2, 0x66, 0x6f, 0x3a, 0xca, 0xb3, 0xeb, 0x10, 0xb1, 0x59, 0x04, 0xbd, 0x0b, 0xd1, 0xf5, 0x65, + 0xf1, 0xb3, 0xd2, 0xd2, 0x72, 0x79, 0xe3, 0xf6, 0xda, 0x72, 0xf9, 0xd6, 0xcd, 0xf5, 0xb5, 0xe5, + 0xa5, 0xd2, 0xb5, 0xd2, 0xf2, 0xd5, 0xa9, 0x37, 0xd0, 0x24, 0xc0, 0xda, 0xea, 0xfa, 0xc6, 0x75, + 0x71, 0x79, 0xfd, 0xd3, 0x95, 0x29, 0x01, 0x01, 0x04, 0xd7, 0x56, 0x57, 0x57, 0x96, 0xc5, 0xa9, + 0x91, 0xf8, 0x13, 0x01, 0xde, 0xea, 0x32, 0x39, 0x2f, 0x38, 0x2f, 0x41, 0xa0, 0xae, 0xd6, 0xac, + 0x62, 0xf3, 0xb4, 0x67, 0xcf, 0x15, 0x29, 0x99, 0xe7, 0x52, 0xf3, 0x0f, 0x02, 0xbc, 0x6b, 0x83, + 0xb0, 0x6a, 0xf5, 0x8a, 0x06, 0xf3, 0xcd, 0x57, 0x5f, 0xb5, 0xc7, 0x9f, 0x09, 0x70, 0xbc, 0x0f, + 0x40, 0x6e, 0xa9, 0x45, 0x80, 0x76, 0x8b, 0xcb, 0xb2, 0xd7, 0x49, 0xa7, 0xbd, 0x3a, 0x2d, 0xb0, + 0x36, 0xbd, 0x68, 0x23, 0xf2, 0x6c, 0xad, 0xdf, 0x0b, 0xf0, 0xb6, 0x0d, 0x0c, 0x4b, 0x14, 0x7e, + 0x34, 0xa6, 0xfa, 0x95, 0x00, 0xb1, 0x5e, 0xe8, 0xb8, 0x9d, 0xae, 0xc0, 0x38, 0x0b, 0xb1, 0x96, + 0x91, 0x3e, 0x74, 0x73, 0x2a, 0xc6, 0x41, 0xb4, 0xc8, 0x3c, 0x9b, 0xe9, 0x4b, 0xa7, 0x5f, 0x93, + 0x9b, 0xea, 0x47, 0x63, 0xa4, 0x9f, 0x43, 0xb4, 0x1b, 0x1a, 0xb7, 0xd0, 0x02, 0x8c, 0x91, 0x3b, + 0xd4, 0xb2, 0xcf, 0xfb, 0x5e, 0xae, 0x60, 0x91, 0x91, 0x78, 0xb6, 0xcd, 0x6f, 0x05, 0x38, 0xb6, + 0x28, 0xcb, 0x43, 0x9b, 0xc6, 0x99, 0x33, 0x8c, 0x0c, 0x9d, 0x33, 0xc4, 0x3f, 0x87, 0xb7, 0xf6, + 0xe1, 0xf2, 0x7a, 0x99, 0x1f, 0xe7, 0x50, 0x48, 0xe6, 0x68, 0xf0, 0xeb, 0x88, 0x32, 0xbe, 0x49, + 0x06, 0xe2, 0x8f, 0xe0, 0x6d, 0x47, 0x09, 0x33, 0xb8, 0xce, 0xa9, 0x6e, 0x41, 0xc5, 0x37, 0x99, + 0x26, 0x56, 0x6a, 0x3a, 0x67, 0x97, 0x7c, 0x07, 0x62, 0xdd, 0x92, 0x5f, 0x91, 0x56, 0x7f, 0x14, + 0xe0, 0x6d, 0x47, 0x59, 0x38, 0xb8, 0x5a, 0x15, 0x38, 0xcc, 0xdb, 0x03, 0x5d, 0x3b, 0x9a, 0xf2, + 0x56, 0xbe, 0x77, 0xed, 0xeb, 0x9b, 0x2d, 0xc7, 0x38, 0x35, 0x45, 0x37, 0xda, 0x57, 0x64, 0x8a, + 0x7f, 0x09, 0x30, 0xe9, 0xc4, 0x41, 0x6a, 0x9e, 0x36, 0x85, 0xb3, 0xe6, 0xb1, 0xc8, 0x50, 0x12, + 0x90, 0x8e, 0xb5, 0xba, 0x52, 0xa5, 0x31, 0xb8, 0x6c, 0xa8, 0x2d, 0xbd, 0x6a, 0xa5, 0x4d, 0x87, + 0x6d, 0x33, 0xeb, 0x74, 0x02, 0x9d, 0x87, 0x90, 0xa6, 0x2b, 0xaa, 0xae, 0x98, 0x3b, 0xf4, 0x7c, + 0x47, 0x72, 0xef, 0x74, 0xa5, 0x0c, 0xa5, 0xa6, 0x99, 0x9f, 0xff, 0x8c, 0x64, 0x5c, 0x62, 0x7b, + 0x31, 0x5a, 0x75, 0x66, 0xea, 0x2c, 0x4b, 0x49, 0x79, 0xcb, 0xd4, 0x2d, 0x7d, 0xec, 0xd9, 0x7a, + 0xfc, 0x9b, 0x11, 0x08, 0xd9, 0x14, 0x1d, 0x7f, 0xac, 0x36, 0x71, 0x67, 0x97, 0x43, 0xed, 0x1d, + 0x0e, 0x92, 0x89, 0x92, 0x8c, 0x3e, 0x80, 0xb0, 0xd1, 0xaa, 0x34, 0xb1, 0xd9, 0xce, 0x75, 0x6d, + 0x8b, 0x42, 0x6c, 0xaa, 0x24, 0xa3, 0x59, 0x98, 0x92, 0x0c, 0x43, 0xa9, 0x35, 0xcb, 0x5a, 0xab, + 0x52, 0x57, 0xaa, 0x65, 0x45, 0xe3, 0xf5, 0xdc, 0x24, 0x1b, 0x5f, 0xa3, 0xc3, 0x25, 0xad, 0x8f, + 0xe5, 0x02, 0x5e, 0x2c, 0x37, 0x36, 0x84, 0xe5, 0x82, 0x43, 0x5b, 0xee, 0x3f, 0x01, 0x80, 0x4e, + 0x09, 0x84, 0xa2, 0x30, 0xbe, 0x8d, 0x75, 0x83, 0x14, 0x98, 0xcc, 0xe5, 0xac, 0x4f, 0xa4, 0xc2, + 0xd1, 0x0e, 0xd3, 0x32, 0x07, 0x51, 0x28, 0xe7, 0x79, 0x81, 0xbc, 0xe0, 0x86, 0x81, 0x51, 0xa4, + 0xd6, 0xda, 0x83, 0x4c, 0x6a, 0xa1, 0x9c, 0xff, 0xe4, 0x0d, 0x11, 0x69, 0xfb, 0x87, 0xf3, 0xa8, + 0x01, 0xd3, 0xdd, 0x02, 0xb3, 0x19, 0xee, 0x69, 0x85, 0x01, 0xe5, 0x65, 0x33, 0xbd, 0xc4, 0x65, + 0x33, 0x7d, 0xc4, 0x65, 0x69, 0xaa, 0x3a, 0x84, 0xb8, 0x6c, 0x4f, 0x71, 0x59, 0x74, 0x17, 0x0e, + 0x69, 0xaa, 0x5a, 0xc7, 0x3a, 0x17, 0xc5, 0x0f, 0x41, 0xde, 0xc3, 0x56, 0x36, 0x71, 0x95, 0xb8, + 0xd2, 0x1a, 0x25, 0x67, 0x0c, 0xc5, 0x09, 0xcd, 0xf6, 0x85, 0xae, 0x43, 0x58, 0xc7, 0xcc, 0x07, + 0x0d, 0xee, 0x5f, 0xa7, 0x3d, 0xd4, 0xff, 0x8c, 0x40, 0xec, 0xd0, 0xa2, 0xcb, 0x30, 0x21, 0xb5, + 0x4c, 0x75, 0x53, 0x52, 0xea, 0xea, 0x36, 0xd6, 0xb9, 0xbf, 0x75, 0x17, 0x06, 0x45, 0x55, 0xad, + 0x33, 0x57, 0x75, 0xac, 0x2f, 0x1e, 0x81, 0xc3, 0x5d, 0x46, 0x8d, 0x3f, 0x19, 0x85, 0x49, 0xa7, + 0x47, 0x22, 0xa3, 0x9f, 0x73, 0xb1, 0xe2, 0xf2, 0xb2, 0x6f, 0xeb, 0x13, 0xce, 0x6e, 0x0e, 0xf6, + 0xa0, 0x8f, 0x83, 0x31, 0x87, 0xbe, 0x34, 0x84, 0xcc, 0xbe, 0x4e, 0xf6, 0xa0, 0x8f, 0x93, 0x8d, + 0x0e, 0x2f, 0xb2, 0x8f, 0xa3, 0xf5, 0xdc, 0x82, 0xdc, 0x3f, 0xa2, 0x30, 0xc9, 0x6f, 0x1d, 0x5e, + 0x36, 0xa1, 0xdf, 0x09, 0x30, 0x7a, 0x1d, 0x9b, 0x28, 0xeb, 0x06, 0xa2, 0xeb, 0x21, 0x32, 0xe6, + 0xf5, 0xd9, 0x2d, 0x3e, 0xff, 0xf4, 0xfb, 0x1f, 0x7e, 0x33, 0x92, 0x42, 0x67, 0xd3, 0x0d, 0xa9, + 0x29, 0xd5, 0xb0, 0x9c, 0xec, 0xf9, 0x34, 0x6d, 0xa4, 0x7f, 0xd6, 0xb9, 0xfa, 0xbe, 0x40, 0x5f, + 0x09, 0x10, 0x20, 0xf9, 0x21, 0x9a, 0xf3, 0x51, 0x2b, 0x5b, 0xd7, 0x7e, 0x6c, 0xde, 0x1f, 0x11, + 0x4b, 0x3b, 0xe3, 0xa7, 0x28, 0xd2, 0x93, 0xe8, 0x3d, 0x17, 0xa4, 0xe8, 0x4f, 0x02, 0x04, 0xd9, + 0x33, 0x06, 0x9a, 0x1f, 0xe4, 0xb1, 0x2d, 0xe6, 0x5e, 0x16, 0xc5, 0x6f, 0xee, 0xee, 0x25, 0x66, + 0xfa, 0xbd, 0x96, 0x8c, 0xf3, 0x01, 0x0a, 0xf8, 0xfd, 0xb8, 0x1b, 0xe0, 0x05, 0x21, 0x81, 0xfe, + 0x22, 0x40, 0x90, 0x25, 0x07, 0xee, 0x98, 0x7b, 0xbd, 0x45, 0x78, 0xc1, 0x7c, 0x97, 0x61, 0xee, + 0xdd, 0xa5, 0x77, 0x60, 0xce, 0xe6, 0x7c, 0xb9, 0x03, 0x51, 0xe0, 0xaf, 0x02, 0x04, 0x59, 0x16, + 0xe9, 0xae, 0x40, 0xaf, 0x7e, 0xbf, 0x17, 0x05, 0x36, 0x77, 0xf7, 0x12, 0xa9, 0x7e, 0x9d, 0xfe, + 0xa3, 0xfb, 0x03, 0xe0, 0x72, 0x43, 0x33, 0x77, 0x98, 0x77, 0x27, 0xfc, 0x79, 0xf7, 0xb7, 0x02, + 0x8c, 0xd1, 0x96, 0xbe, 0xbb, 0x7b, 0xf7, 0x78, 0x3d, 0xf0, 0xa2, 0xc9, 0xed, 0xdd, 0xbd, 0xc4, + 0x7b, 0x7d, 0xde, 0x0d, 0x1c, 0x3b, 0x91, 0x89, 0xa7, 0xfa, 0xff, 0x5e, 0x64, 0xdf, 0x2e, 0x18, + 0x14, 0xf2, 0xd7, 0x02, 0x04, 0xd6, 0x4d, 0x55, 0x43, 0x39, 0x77, 0xec, 0xfb, 0x9f, 0x1f, 0xbc, + 0x40, 0xff, 0x7c, 0x77, 0x2f, 0x71, 0xa2, 0xf7, 0xc3, 0x83, 0x03, 0x79, 0x3a, 0x9e, 0xf4, 0x81, + 0x5c, 0xd5, 0xd0, 0x77, 0x02, 0x04, 0x59, 0x29, 0xed, 0xee, 0x41, 0xbd, 0x1e, 0x30, 0xbc, 0x80, + 0x2f, 0xb3, 0x23, 0xd0, 0xfb, 0xe9, 0xc2, 0x01, 0xff, 0x5c, 0x7c, 0xce, 0xd7, 0x11, 0x60, 0xc5, + 0x3f, 0xfa, 0xb3, 0x00, 0xe3, 0xbc, 0xd7, 0x8b, 0xce, 0x0d, 0xd4, 0xdf, 0xf7, 0xba, 0x07, 0x27, + 0xfb, 0x36, 0x95, 0x1d, 0x7a, 0x9c, 0x8d, 0x9f, 0x72, 0x0b, 0x3f, 0x3a, 0x63, 0x43, 0x4e, 0xf1, + 0xd7, 0x02, 0x84, 0x48, 0xf0, 0x5d, 0x51, 0x6b, 0x06, 0xca, 0x0f, 0xd6, 0x07, 0x8d, 0x9d, 0xf7, + 0x4d, 0xc7, 0x23, 0x7c, 0x81, 0x22, 0x9e, 0x43, 0x59, 0x5f, 0x96, 0xa7, 0x8d, 0xbc, 0xbf, 0x09, + 0x30, 0x49, 0xd8, 0x76, 0x1a, 0x5f, 0xe8, 0xa2, 0x0f, 0x18, 0x5d, 0x0d, 0xbd, 0xd8, 0xa5, 0x01, + 0xa9, 0xb9, 0x2a, 0x1f, 0x53, 0x55, 0x0a, 0xe8, 0xbc, 0x1f, 0x55, 0xd2, 0xb6, 0x5e, 0xdb, 0x77, + 0x02, 0x44, 0x88, 0x08, 0xde, 0x9e, 0x42, 0x05, 0x1f, 0x78, 0x9c, 0x0d, 0xb7, 0xd8, 0xc2, 0x20, + 0xa4, 0x5c, 0x8f, 0x8b, 0x54, 0x8f, 0x3c, 0x9a, 0xf7, 0xa5, 0x87, 0xd5, 0x09, 0xfb, 0x56, 0x80, + 0x30, 0x61, 0x4e, 0xcb, 0x68, 0xe4, 0xc7, 0x2f, 0xec, 0x6d, 0x82, 0xd8, 0x05, 0xff, 0x84, 0x1c, + 0xfe, 0x02, 0x85, 0x3f, 0x8f, 0x72, 0xbe, 0xe0, 0xb3, 0x56, 0xd5, 0x0f, 0x02, 0x84, 0x16, 0x65, + 0x99, 0x61, 0x77, 0x3d, 0x0b, 0xbd, 0x9b, 0x55, 0x5e, 0x0e, 0xf3, 0x17, 0xbb, 0x7b, 0x89, 0x4c, + 0xff, 0xae, 0xd2, 0x01, 0xf7, 0xda, 0x52, 0xfc, 0xb2, 0x7f, 0xbd, 0x16, 0x2a, 0x92, 0x59, 0xbd, + 0xcf, 0x12, 0x17, 0x72, 0xe4, 0xff, 0x2d, 0x40, 0x84, 0xdd, 0xa8, 0x4c, 0xd3, 0x82, 0xaf, 0xdb, + 0xdb, 0xaf, 0xb2, 0x4f, 0x84, 0xdd, 0xbd, 0xc4, 0xdc, 0x81, 0x0d, 0xa7, 0xd7, 0xa3, 0x30, 0x13, + 0x68, 0x29, 0xdc, 0xe9, 0xc3, 0x78, 0x50, 0xb8, 0x6f, 0xff, 0xca, 0x8f, 0xc2, 0x07, 0xb4, 0x95, + 0x5e, 0x8f, 0xc2, 0x4c, 0xe0, 0x82, 0x90, 0x28, 0xae, 0xde, 0xb9, 0x51, 0x53, 0xcc, 0xfb, 0xad, + 0x4a, 0xaa, 0xaa, 0x36, 0xd2, 0x0c, 0x71, 0x92, 0xfd, 0xd4, 0xb1, 0xa6, 0x26, 0x6b, 0xb8, 0x49, + 0x05, 0xa7, 0x0f, 0xfe, 0x0d, 0xe4, 0x47, 0x9d, 0xaf, 0x4a, 0x90, 0x12, 0xcc, 0xfd, 0x2f, 0x00, + 0x00, 0xff, 0xff, 0x6c, 0xa1, 0x6e, 0x70, 0x3e, 0x2c, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/host10.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/host10.pb.go new file mode 100644 index 000000000..a78d23c19 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/host10.pb.go @@ -0,0 +1,945 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/config/host10.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type PostgresqlHostConfig10_ConstraintExclusion int32 + +const ( + PostgresqlHostConfig10_CONSTRAINT_EXCLUSION_UNSPECIFIED PostgresqlHostConfig10_ConstraintExclusion = 0 + PostgresqlHostConfig10_CONSTRAINT_EXCLUSION_ON PostgresqlHostConfig10_ConstraintExclusion = 1 + PostgresqlHostConfig10_CONSTRAINT_EXCLUSION_OFF PostgresqlHostConfig10_ConstraintExclusion = 2 + PostgresqlHostConfig10_CONSTRAINT_EXCLUSION_PARTITION PostgresqlHostConfig10_ConstraintExclusion = 3 +) + +var PostgresqlHostConfig10_ConstraintExclusion_name = map[int32]string{ + 0: "CONSTRAINT_EXCLUSION_UNSPECIFIED", + 1: "CONSTRAINT_EXCLUSION_ON", + 2: "CONSTRAINT_EXCLUSION_OFF", + 3: "CONSTRAINT_EXCLUSION_PARTITION", +} +var PostgresqlHostConfig10_ConstraintExclusion_value = map[string]int32{ + "CONSTRAINT_EXCLUSION_UNSPECIFIED": 0, + "CONSTRAINT_EXCLUSION_ON": 1, + "CONSTRAINT_EXCLUSION_OFF": 2, + "CONSTRAINT_EXCLUSION_PARTITION": 3, +} + +func (x PostgresqlHostConfig10_ConstraintExclusion) String() string { + return proto.EnumName(PostgresqlHostConfig10_ConstraintExclusion_name, int32(x)) +} +func (PostgresqlHostConfig10_ConstraintExclusion) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host10_c67a1089e8774ac3, []int{0, 0} +} + +type PostgresqlHostConfig10_ForceParallelMode int32 + +const ( + PostgresqlHostConfig10_FORCE_PARALLEL_MODE_UNSPECIFIED PostgresqlHostConfig10_ForceParallelMode = 0 + PostgresqlHostConfig10_FORCE_PARALLEL_MODE_ON PostgresqlHostConfig10_ForceParallelMode = 1 + PostgresqlHostConfig10_FORCE_PARALLEL_MODE_OFF PostgresqlHostConfig10_ForceParallelMode = 2 + PostgresqlHostConfig10_FORCE_PARALLEL_MODE_REGRESS PostgresqlHostConfig10_ForceParallelMode = 3 +) + +var PostgresqlHostConfig10_ForceParallelMode_name = map[int32]string{ + 0: "FORCE_PARALLEL_MODE_UNSPECIFIED", + 1: "FORCE_PARALLEL_MODE_ON", + 2: "FORCE_PARALLEL_MODE_OFF", + 3: "FORCE_PARALLEL_MODE_REGRESS", +} +var PostgresqlHostConfig10_ForceParallelMode_value = map[string]int32{ + "FORCE_PARALLEL_MODE_UNSPECIFIED": 0, + "FORCE_PARALLEL_MODE_ON": 1, + "FORCE_PARALLEL_MODE_OFF": 2, + "FORCE_PARALLEL_MODE_REGRESS": 3, +} + +func (x PostgresqlHostConfig10_ForceParallelMode) String() string { + return proto.EnumName(PostgresqlHostConfig10_ForceParallelMode_name, int32(x)) +} +func (PostgresqlHostConfig10_ForceParallelMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host10_c67a1089e8774ac3, []int{0, 1} +} + +type PostgresqlHostConfig10_LogLevel int32 + +const ( + PostgresqlHostConfig10_LOG_LEVEL_UNSPECIFIED PostgresqlHostConfig10_LogLevel = 0 + PostgresqlHostConfig10_LOG_LEVEL_DEBUG5 PostgresqlHostConfig10_LogLevel = 1 + PostgresqlHostConfig10_LOG_LEVEL_DEBUG4 PostgresqlHostConfig10_LogLevel = 2 + PostgresqlHostConfig10_LOG_LEVEL_DEBUG3 PostgresqlHostConfig10_LogLevel = 3 + PostgresqlHostConfig10_LOG_LEVEL_DEBUG2 PostgresqlHostConfig10_LogLevel = 4 + PostgresqlHostConfig10_LOG_LEVEL_DEBUG1 PostgresqlHostConfig10_LogLevel = 5 + PostgresqlHostConfig10_LOG_LEVEL_LOG PostgresqlHostConfig10_LogLevel = 6 + PostgresqlHostConfig10_LOG_LEVEL_NOTICE PostgresqlHostConfig10_LogLevel = 7 + PostgresqlHostConfig10_LOG_LEVEL_WARNING PostgresqlHostConfig10_LogLevel = 8 + PostgresqlHostConfig10_LOG_LEVEL_ERROR PostgresqlHostConfig10_LogLevel = 9 + PostgresqlHostConfig10_LOG_LEVEL_FATAL PostgresqlHostConfig10_LogLevel = 10 + PostgresqlHostConfig10_LOG_LEVEL_PANIC PostgresqlHostConfig10_LogLevel = 11 +) + +var PostgresqlHostConfig10_LogLevel_name = map[int32]string{ + 0: "LOG_LEVEL_UNSPECIFIED", + 1: "LOG_LEVEL_DEBUG5", + 2: "LOG_LEVEL_DEBUG4", + 3: "LOG_LEVEL_DEBUG3", + 4: "LOG_LEVEL_DEBUG2", + 5: "LOG_LEVEL_DEBUG1", + 6: "LOG_LEVEL_LOG", + 7: "LOG_LEVEL_NOTICE", + 8: "LOG_LEVEL_WARNING", + 9: "LOG_LEVEL_ERROR", + 10: "LOG_LEVEL_FATAL", + 11: "LOG_LEVEL_PANIC", +} +var PostgresqlHostConfig10_LogLevel_value = map[string]int32{ + "LOG_LEVEL_UNSPECIFIED": 0, + "LOG_LEVEL_DEBUG5": 1, + "LOG_LEVEL_DEBUG4": 2, + "LOG_LEVEL_DEBUG3": 3, + "LOG_LEVEL_DEBUG2": 4, + "LOG_LEVEL_DEBUG1": 5, + "LOG_LEVEL_LOG": 6, + "LOG_LEVEL_NOTICE": 7, + "LOG_LEVEL_WARNING": 8, + "LOG_LEVEL_ERROR": 9, + "LOG_LEVEL_FATAL": 10, + "LOG_LEVEL_PANIC": 11, +} + +func (x PostgresqlHostConfig10_LogLevel) String() string { + return proto.EnumName(PostgresqlHostConfig10_LogLevel_name, int32(x)) +} +func (PostgresqlHostConfig10_LogLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host10_c67a1089e8774ac3, []int{0, 2} +} + +type PostgresqlHostConfig10_LogErrorVerbosity int32 + +const ( + PostgresqlHostConfig10_LOG_ERROR_VERBOSITY_UNSPECIFIED PostgresqlHostConfig10_LogErrorVerbosity = 0 + PostgresqlHostConfig10_LOG_ERROR_VERBOSITY_TERSE PostgresqlHostConfig10_LogErrorVerbosity = 1 + PostgresqlHostConfig10_LOG_ERROR_VERBOSITY_DEFAULT PostgresqlHostConfig10_LogErrorVerbosity = 2 + PostgresqlHostConfig10_LOG_ERROR_VERBOSITY_VERBOSE PostgresqlHostConfig10_LogErrorVerbosity = 3 +) + +var PostgresqlHostConfig10_LogErrorVerbosity_name = map[int32]string{ + 0: "LOG_ERROR_VERBOSITY_UNSPECIFIED", + 1: "LOG_ERROR_VERBOSITY_TERSE", + 2: "LOG_ERROR_VERBOSITY_DEFAULT", + 3: "LOG_ERROR_VERBOSITY_VERBOSE", +} +var PostgresqlHostConfig10_LogErrorVerbosity_value = map[string]int32{ + "LOG_ERROR_VERBOSITY_UNSPECIFIED": 0, + "LOG_ERROR_VERBOSITY_TERSE": 1, + "LOG_ERROR_VERBOSITY_DEFAULT": 2, + "LOG_ERROR_VERBOSITY_VERBOSE": 3, +} + +func (x PostgresqlHostConfig10_LogErrorVerbosity) String() string { + return proto.EnumName(PostgresqlHostConfig10_LogErrorVerbosity_name, int32(x)) +} +func (PostgresqlHostConfig10_LogErrorVerbosity) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host10_c67a1089e8774ac3, []int{0, 3} +} + +type PostgresqlHostConfig10_LogStatement int32 + +const ( + PostgresqlHostConfig10_LOG_STATEMENT_UNSPECIFIED PostgresqlHostConfig10_LogStatement = 0 + PostgresqlHostConfig10_LOG_STATEMENT_NONE PostgresqlHostConfig10_LogStatement = 1 + PostgresqlHostConfig10_LOG_STATEMENT_DDL PostgresqlHostConfig10_LogStatement = 2 + PostgresqlHostConfig10_LOG_STATEMENT_MOD PostgresqlHostConfig10_LogStatement = 3 + PostgresqlHostConfig10_LOG_STATEMENT_ALL PostgresqlHostConfig10_LogStatement = 4 +) + +var PostgresqlHostConfig10_LogStatement_name = map[int32]string{ + 0: "LOG_STATEMENT_UNSPECIFIED", + 1: "LOG_STATEMENT_NONE", + 2: "LOG_STATEMENT_DDL", + 3: "LOG_STATEMENT_MOD", + 4: "LOG_STATEMENT_ALL", +} +var PostgresqlHostConfig10_LogStatement_value = map[string]int32{ + "LOG_STATEMENT_UNSPECIFIED": 0, + "LOG_STATEMENT_NONE": 1, + "LOG_STATEMENT_DDL": 2, + "LOG_STATEMENT_MOD": 3, + "LOG_STATEMENT_ALL": 4, +} + +func (x PostgresqlHostConfig10_LogStatement) String() string { + return proto.EnumName(PostgresqlHostConfig10_LogStatement_name, int32(x)) +} +func (PostgresqlHostConfig10_LogStatement) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host10_c67a1089e8774ac3, []int{0, 4} +} + +type PostgresqlHostConfig10_TransactionIsolation int32 + +const ( + PostgresqlHostConfig10_TRANSACTION_ISOLATION_UNSPECIFIED PostgresqlHostConfig10_TransactionIsolation = 0 + PostgresqlHostConfig10_TRANSACTION_ISOLATION_READ_UNCOMMITTED PostgresqlHostConfig10_TransactionIsolation = 1 + PostgresqlHostConfig10_TRANSACTION_ISOLATION_READ_COMMITTED PostgresqlHostConfig10_TransactionIsolation = 2 + PostgresqlHostConfig10_TRANSACTION_ISOLATION_REPEATABLE_READ PostgresqlHostConfig10_TransactionIsolation = 3 + PostgresqlHostConfig10_TRANSACTION_ISOLATION_SERIALIZABLE PostgresqlHostConfig10_TransactionIsolation = 4 +) + +var PostgresqlHostConfig10_TransactionIsolation_name = map[int32]string{ + 0: "TRANSACTION_ISOLATION_UNSPECIFIED", + 1: "TRANSACTION_ISOLATION_READ_UNCOMMITTED", + 2: "TRANSACTION_ISOLATION_READ_COMMITTED", + 3: "TRANSACTION_ISOLATION_REPEATABLE_READ", + 4: "TRANSACTION_ISOLATION_SERIALIZABLE", +} +var PostgresqlHostConfig10_TransactionIsolation_value = map[string]int32{ + "TRANSACTION_ISOLATION_UNSPECIFIED": 0, + "TRANSACTION_ISOLATION_READ_UNCOMMITTED": 1, + "TRANSACTION_ISOLATION_READ_COMMITTED": 2, + "TRANSACTION_ISOLATION_REPEATABLE_READ": 3, + "TRANSACTION_ISOLATION_SERIALIZABLE": 4, +} + +func (x PostgresqlHostConfig10_TransactionIsolation) String() string { + return proto.EnumName(PostgresqlHostConfig10_TransactionIsolation_name, int32(x)) +} +func (PostgresqlHostConfig10_TransactionIsolation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host10_c67a1089e8774ac3, []int{0, 5} +} + +type PostgresqlHostConfig10_ByteaOutput int32 + +const ( + PostgresqlHostConfig10_BYTEA_OUTPUT_UNSPECIFIED PostgresqlHostConfig10_ByteaOutput = 0 + PostgresqlHostConfig10_BYTEA_OUTPUT_HEX PostgresqlHostConfig10_ByteaOutput = 1 + PostgresqlHostConfig10_BYTEA_OUTPUT_ESCAPED PostgresqlHostConfig10_ByteaOutput = 2 +) + +var PostgresqlHostConfig10_ByteaOutput_name = map[int32]string{ + 0: "BYTEA_OUTPUT_UNSPECIFIED", + 1: "BYTEA_OUTPUT_HEX", + 2: "BYTEA_OUTPUT_ESCAPED", +} +var PostgresqlHostConfig10_ByteaOutput_value = map[string]int32{ + "BYTEA_OUTPUT_UNSPECIFIED": 0, + "BYTEA_OUTPUT_HEX": 1, + "BYTEA_OUTPUT_ESCAPED": 2, +} + +func (x PostgresqlHostConfig10_ByteaOutput) String() string { + return proto.EnumName(PostgresqlHostConfig10_ByteaOutput_name, int32(x)) +} +func (PostgresqlHostConfig10_ByteaOutput) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host10_c67a1089e8774ac3, []int{0, 6} +} + +type PostgresqlHostConfig10_XmlBinary int32 + +const ( + PostgresqlHostConfig10_XML_BINARY_UNSPECIFIED PostgresqlHostConfig10_XmlBinary = 0 + PostgresqlHostConfig10_XML_BINARY_BASE64 PostgresqlHostConfig10_XmlBinary = 1 + PostgresqlHostConfig10_XML_BINARY_HEX PostgresqlHostConfig10_XmlBinary = 2 +) + +var PostgresqlHostConfig10_XmlBinary_name = map[int32]string{ + 0: "XML_BINARY_UNSPECIFIED", + 1: "XML_BINARY_BASE64", + 2: "XML_BINARY_HEX", +} +var PostgresqlHostConfig10_XmlBinary_value = map[string]int32{ + "XML_BINARY_UNSPECIFIED": 0, + "XML_BINARY_BASE64": 1, + "XML_BINARY_HEX": 2, +} + +func (x PostgresqlHostConfig10_XmlBinary) String() string { + return proto.EnumName(PostgresqlHostConfig10_XmlBinary_name, int32(x)) +} +func (PostgresqlHostConfig10_XmlBinary) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host10_c67a1089e8774ac3, []int{0, 7} +} + +type PostgresqlHostConfig10_XmlOption int32 + +const ( + PostgresqlHostConfig10_XML_OPTION_UNSPECIFIED PostgresqlHostConfig10_XmlOption = 0 + PostgresqlHostConfig10_XML_OPTION_DOCUMENT PostgresqlHostConfig10_XmlOption = 1 + PostgresqlHostConfig10_XML_OPTION_CONTENT PostgresqlHostConfig10_XmlOption = 2 +) + +var PostgresqlHostConfig10_XmlOption_name = map[int32]string{ + 0: "XML_OPTION_UNSPECIFIED", + 1: "XML_OPTION_DOCUMENT", + 2: "XML_OPTION_CONTENT", +} +var PostgresqlHostConfig10_XmlOption_value = map[string]int32{ + "XML_OPTION_UNSPECIFIED": 0, + "XML_OPTION_DOCUMENT": 1, + "XML_OPTION_CONTENT": 2, +} + +func (x PostgresqlHostConfig10_XmlOption) String() string { + return proto.EnumName(PostgresqlHostConfig10_XmlOption_name, int32(x)) +} +func (PostgresqlHostConfig10_XmlOption) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host10_c67a1089e8774ac3, []int{0, 8} +} + +type PostgresqlHostConfig10_BackslashQuote int32 + +const ( + PostgresqlHostConfig10_BACKSLASH_QUOTE_UNSPECIFIED PostgresqlHostConfig10_BackslashQuote = 0 + PostgresqlHostConfig10_BACKSLASH_QUOTE PostgresqlHostConfig10_BackslashQuote = 1 + PostgresqlHostConfig10_BACKSLASH_QUOTE_ON PostgresqlHostConfig10_BackslashQuote = 2 + PostgresqlHostConfig10_BACKSLASH_QUOTE_OFF PostgresqlHostConfig10_BackslashQuote = 3 + PostgresqlHostConfig10_BACKSLASH_QUOTE_SAFE_ENCODING PostgresqlHostConfig10_BackslashQuote = 4 +) + +var PostgresqlHostConfig10_BackslashQuote_name = map[int32]string{ + 0: "BACKSLASH_QUOTE_UNSPECIFIED", + 1: "BACKSLASH_QUOTE", + 2: "BACKSLASH_QUOTE_ON", + 3: "BACKSLASH_QUOTE_OFF", + 4: "BACKSLASH_QUOTE_SAFE_ENCODING", +} +var PostgresqlHostConfig10_BackslashQuote_value = map[string]int32{ + "BACKSLASH_QUOTE_UNSPECIFIED": 0, + "BACKSLASH_QUOTE": 1, + "BACKSLASH_QUOTE_ON": 2, + "BACKSLASH_QUOTE_OFF": 3, + "BACKSLASH_QUOTE_SAFE_ENCODING": 4, +} + +func (x PostgresqlHostConfig10_BackslashQuote) String() string { + return proto.EnumName(PostgresqlHostConfig10_BackslashQuote_name, int32(x)) +} +func (PostgresqlHostConfig10_BackslashQuote) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host10_c67a1089e8774ac3, []int{0, 9} +} + +// Options and structure of `PostgresqlHostConfig` reflects PostgreSQL configuration file +// parameters whose detailed description is available in +// [PostgreSQL documentation](https://www.postgresql.org/docs/10/runtime-config.html). +type PostgresqlHostConfig10 struct { + RecoveryMinApplyDelay *wrappers.Int64Value `protobuf:"bytes,1,opt,name=recovery_min_apply_delay,json=recoveryMinApplyDelay,proto3" json:"recovery_min_apply_delay,omitempty"` + SharedBuffers *wrappers.Int64Value `protobuf:"bytes,2,opt,name=shared_buffers,json=sharedBuffers,proto3" json:"shared_buffers,omitempty"` + TempBuffers *wrappers.Int64Value `protobuf:"bytes,3,opt,name=temp_buffers,json=tempBuffers,proto3" json:"temp_buffers,omitempty"` + WorkMem *wrappers.Int64Value `protobuf:"bytes,4,opt,name=work_mem,json=workMem,proto3" json:"work_mem,omitempty"` + ReplacementSortTuples *wrappers.Int64Value `protobuf:"bytes,5,opt,name=replacement_sort_tuples,json=replacementSortTuples,proto3" json:"replacement_sort_tuples,omitempty"` + TempFileLimit *wrappers.Int64Value `protobuf:"bytes,6,opt,name=temp_file_limit,json=tempFileLimit,proto3" json:"temp_file_limit,omitempty"` + BackendFlushAfter *wrappers.Int64Value `protobuf:"bytes,7,opt,name=backend_flush_after,json=backendFlushAfter,proto3" json:"backend_flush_after,omitempty"` + OldSnapshotThreshold *wrappers.Int64Value `protobuf:"bytes,8,opt,name=old_snapshot_threshold,json=oldSnapshotThreshold,proto3" json:"old_snapshot_threshold,omitempty"` + MaxStandbyStreamingDelay *wrappers.Int64Value `protobuf:"bytes,9,opt,name=max_standby_streaming_delay,json=maxStandbyStreamingDelay,proto3" json:"max_standby_streaming_delay,omitempty"` + ConstraintExclusion PostgresqlHostConfig10_ConstraintExclusion `protobuf:"varint,10,opt,name=constraint_exclusion,json=constraintExclusion,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_ConstraintExclusion" json:"constraint_exclusion,omitempty"` + CursorTupleFraction *wrappers.DoubleValue `protobuf:"bytes,11,opt,name=cursor_tuple_fraction,json=cursorTupleFraction,proto3" json:"cursor_tuple_fraction,omitempty"` + FromCollapseLimit *wrappers.Int64Value `protobuf:"bytes,12,opt,name=from_collapse_limit,json=fromCollapseLimit,proto3" json:"from_collapse_limit,omitempty"` + JoinCollapseLimit *wrappers.Int64Value `protobuf:"bytes,13,opt,name=join_collapse_limit,json=joinCollapseLimit,proto3" json:"join_collapse_limit,omitempty"` + ForceParallelMode PostgresqlHostConfig10_ForceParallelMode `protobuf:"varint,14,opt,name=force_parallel_mode,json=forceParallelMode,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_ForceParallelMode" json:"force_parallel_mode,omitempty"` + ClientMinMessages PostgresqlHostConfig10_LogLevel `protobuf:"varint,15,opt,name=client_min_messages,json=clientMinMessages,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_LogLevel" json:"client_min_messages,omitempty"` + LogMinMessages PostgresqlHostConfig10_LogLevel `protobuf:"varint,16,opt,name=log_min_messages,json=logMinMessages,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_LogLevel" json:"log_min_messages,omitempty"` + LogMinErrorStatement PostgresqlHostConfig10_LogLevel `protobuf:"varint,17,opt,name=log_min_error_statement,json=logMinErrorStatement,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_LogLevel" json:"log_min_error_statement,omitempty"` + LogMinDurationStatement *wrappers.Int64Value `protobuf:"bytes,18,opt,name=log_min_duration_statement,json=logMinDurationStatement,proto3" json:"log_min_duration_statement,omitempty"` + LogCheckpoints *wrappers.BoolValue `protobuf:"bytes,19,opt,name=log_checkpoints,json=logCheckpoints,proto3" json:"log_checkpoints,omitempty"` + LogConnections *wrappers.BoolValue `protobuf:"bytes,20,opt,name=log_connections,json=logConnections,proto3" json:"log_connections,omitempty"` + LogDisconnections *wrappers.BoolValue `protobuf:"bytes,21,opt,name=log_disconnections,json=logDisconnections,proto3" json:"log_disconnections,omitempty"` + LogDuration *wrappers.BoolValue `protobuf:"bytes,22,opt,name=log_duration,json=logDuration,proto3" json:"log_duration,omitempty"` + LogErrorVerbosity PostgresqlHostConfig10_LogErrorVerbosity `protobuf:"varint,23,opt,name=log_error_verbosity,json=logErrorVerbosity,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_LogErrorVerbosity" json:"log_error_verbosity,omitempty"` + LogLockWaits *wrappers.BoolValue `protobuf:"bytes,24,opt,name=log_lock_waits,json=logLockWaits,proto3" json:"log_lock_waits,omitempty"` + LogStatement PostgresqlHostConfig10_LogStatement `protobuf:"varint,25,opt,name=log_statement,json=logStatement,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_LogStatement" json:"log_statement,omitempty"` + LogTempFiles *wrappers.Int64Value `protobuf:"bytes,26,opt,name=log_temp_files,json=logTempFiles,proto3" json:"log_temp_files,omitempty"` + SearchPath string `protobuf:"bytes,27,opt,name=search_path,json=searchPath,proto3" json:"search_path,omitempty"` + RowSecurity *wrappers.BoolValue `protobuf:"bytes,28,opt,name=row_security,json=rowSecurity,proto3" json:"row_security,omitempty"` + DefaultTransactionIsolation PostgresqlHostConfig10_TransactionIsolation `protobuf:"varint,29,opt,name=default_transaction_isolation,json=defaultTransactionIsolation,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_TransactionIsolation" json:"default_transaction_isolation,omitempty"` + StatementTimeout *wrappers.Int64Value `protobuf:"bytes,30,opt,name=statement_timeout,json=statementTimeout,proto3" json:"statement_timeout,omitempty"` + LockTimeout *wrappers.Int64Value `protobuf:"bytes,31,opt,name=lock_timeout,json=lockTimeout,proto3" json:"lock_timeout,omitempty"` + IdleInTransactionSessionTimeout *wrappers.Int64Value `protobuf:"bytes,32,opt,name=idle_in_transaction_session_timeout,json=idleInTransactionSessionTimeout,proto3" json:"idle_in_transaction_session_timeout,omitempty"` + ByteaOutput PostgresqlHostConfig10_ByteaOutput `protobuf:"varint,33,opt,name=bytea_output,json=byteaOutput,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_ByteaOutput" json:"bytea_output,omitempty"` + Xmlbinary PostgresqlHostConfig10_XmlBinary `protobuf:"varint,34,opt,name=xmlbinary,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_XmlBinary" json:"xmlbinary,omitempty"` + Xmloption PostgresqlHostConfig10_XmlOption `protobuf:"varint,35,opt,name=xmloption,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_XmlOption" json:"xmloption,omitempty"` + GinPendingListLimit *wrappers.Int64Value `protobuf:"bytes,36,opt,name=gin_pending_list_limit,json=ginPendingListLimit,proto3" json:"gin_pending_list_limit,omitempty"` + DeadlockTimeout *wrappers.Int64Value `protobuf:"bytes,37,opt,name=deadlock_timeout,json=deadlockTimeout,proto3" json:"deadlock_timeout,omitempty"` + MaxLocksPerTransaction *wrappers.Int64Value `protobuf:"bytes,38,opt,name=max_locks_per_transaction,json=maxLocksPerTransaction,proto3" json:"max_locks_per_transaction,omitempty"` + MaxPredLocksPerTransaction *wrappers.Int64Value `protobuf:"bytes,39,opt,name=max_pred_locks_per_transaction,json=maxPredLocksPerTransaction,proto3" json:"max_pred_locks_per_transaction,omitempty"` + ArrayNulls *wrappers.BoolValue `protobuf:"bytes,40,opt,name=array_nulls,json=arrayNulls,proto3" json:"array_nulls,omitempty"` + BackslashQuote PostgresqlHostConfig10_BackslashQuote `protobuf:"varint,41,opt,name=backslash_quote,json=backslashQuote,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_BackslashQuote" json:"backslash_quote,omitempty"` + DefaultWithOids *wrappers.BoolValue `protobuf:"bytes,42,opt,name=default_with_oids,json=defaultWithOids,proto3" json:"default_with_oids,omitempty"` + EscapeStringWarning *wrappers.BoolValue `protobuf:"bytes,43,opt,name=escape_string_warning,json=escapeStringWarning,proto3" json:"escape_string_warning,omitempty"` + LoCompatPrivileges *wrappers.BoolValue `protobuf:"bytes,44,opt,name=lo_compat_privileges,json=loCompatPrivileges,proto3" json:"lo_compat_privileges,omitempty"` + OperatorPrecedenceWarning *wrappers.BoolValue `protobuf:"bytes,45,opt,name=operator_precedence_warning,json=operatorPrecedenceWarning,proto3" json:"operator_precedence_warning,omitempty"` + QuoteAllIdentifiers *wrappers.BoolValue `protobuf:"bytes,46,opt,name=quote_all_identifiers,json=quoteAllIdentifiers,proto3" json:"quote_all_identifiers,omitempty"` + StandardConformingStrings *wrappers.BoolValue `protobuf:"bytes,47,opt,name=standard_conforming_strings,json=standardConformingStrings,proto3" json:"standard_conforming_strings,omitempty"` + SynchronizeSeqscans *wrappers.BoolValue `protobuf:"bytes,48,opt,name=synchronize_seqscans,json=synchronizeSeqscans,proto3" json:"synchronize_seqscans,omitempty"` + TransformNullEquals *wrappers.BoolValue `protobuf:"bytes,49,opt,name=transform_null_equals,json=transformNullEquals,proto3" json:"transform_null_equals,omitempty"` + ExitOnError *wrappers.BoolValue `protobuf:"bytes,50,opt,name=exit_on_error,json=exitOnError,proto3" json:"exit_on_error,omitempty"` + SeqPageCost *wrappers.DoubleValue `protobuf:"bytes,51,opt,name=seq_page_cost,json=seqPageCost,proto3" json:"seq_page_cost,omitempty"` + RandomPageCost *wrappers.DoubleValue `protobuf:"bytes,52,opt,name=random_page_cost,json=randomPageCost,proto3" json:"random_page_cost,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PostgresqlHostConfig10) Reset() { *m = PostgresqlHostConfig10{} } +func (m *PostgresqlHostConfig10) String() string { return proto.CompactTextString(m) } +func (*PostgresqlHostConfig10) ProtoMessage() {} +func (*PostgresqlHostConfig10) Descriptor() ([]byte, []int) { + return fileDescriptor_host10_c67a1089e8774ac3, []int{0} +} +func (m *PostgresqlHostConfig10) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PostgresqlHostConfig10.Unmarshal(m, b) +} +func (m *PostgresqlHostConfig10) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PostgresqlHostConfig10.Marshal(b, m, deterministic) +} +func (dst *PostgresqlHostConfig10) XXX_Merge(src proto.Message) { + xxx_messageInfo_PostgresqlHostConfig10.Merge(dst, src) +} +func (m *PostgresqlHostConfig10) XXX_Size() int { + return xxx_messageInfo_PostgresqlHostConfig10.Size(m) +} +func (m *PostgresqlHostConfig10) XXX_DiscardUnknown() { + xxx_messageInfo_PostgresqlHostConfig10.DiscardUnknown(m) +} + +var xxx_messageInfo_PostgresqlHostConfig10 proto.InternalMessageInfo + +func (m *PostgresqlHostConfig10) GetRecoveryMinApplyDelay() *wrappers.Int64Value { + if m != nil { + return m.RecoveryMinApplyDelay + } + return nil +} + +func (m *PostgresqlHostConfig10) GetSharedBuffers() *wrappers.Int64Value { + if m != nil { + return m.SharedBuffers + } + return nil +} + +func (m *PostgresqlHostConfig10) GetTempBuffers() *wrappers.Int64Value { + if m != nil { + return m.TempBuffers + } + return nil +} + +func (m *PostgresqlHostConfig10) GetWorkMem() *wrappers.Int64Value { + if m != nil { + return m.WorkMem + } + return nil +} + +func (m *PostgresqlHostConfig10) GetReplacementSortTuples() *wrappers.Int64Value { + if m != nil { + return m.ReplacementSortTuples + } + return nil +} + +func (m *PostgresqlHostConfig10) GetTempFileLimit() *wrappers.Int64Value { + if m != nil { + return m.TempFileLimit + } + return nil +} + +func (m *PostgresqlHostConfig10) GetBackendFlushAfter() *wrappers.Int64Value { + if m != nil { + return m.BackendFlushAfter + } + return nil +} + +func (m *PostgresqlHostConfig10) GetOldSnapshotThreshold() *wrappers.Int64Value { + if m != nil { + return m.OldSnapshotThreshold + } + return nil +} + +func (m *PostgresqlHostConfig10) GetMaxStandbyStreamingDelay() *wrappers.Int64Value { + if m != nil { + return m.MaxStandbyStreamingDelay + } + return nil +} + +func (m *PostgresqlHostConfig10) GetConstraintExclusion() PostgresqlHostConfig10_ConstraintExclusion { + if m != nil { + return m.ConstraintExclusion + } + return PostgresqlHostConfig10_CONSTRAINT_EXCLUSION_UNSPECIFIED +} + +func (m *PostgresqlHostConfig10) GetCursorTupleFraction() *wrappers.DoubleValue { + if m != nil { + return m.CursorTupleFraction + } + return nil +} + +func (m *PostgresqlHostConfig10) GetFromCollapseLimit() *wrappers.Int64Value { + if m != nil { + return m.FromCollapseLimit + } + return nil +} + +func (m *PostgresqlHostConfig10) GetJoinCollapseLimit() *wrappers.Int64Value { + if m != nil { + return m.JoinCollapseLimit + } + return nil +} + +func (m *PostgresqlHostConfig10) GetForceParallelMode() PostgresqlHostConfig10_ForceParallelMode { + if m != nil { + return m.ForceParallelMode + } + return PostgresqlHostConfig10_FORCE_PARALLEL_MODE_UNSPECIFIED +} + +func (m *PostgresqlHostConfig10) GetClientMinMessages() PostgresqlHostConfig10_LogLevel { + if m != nil { + return m.ClientMinMessages + } + return PostgresqlHostConfig10_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlHostConfig10) GetLogMinMessages() PostgresqlHostConfig10_LogLevel { + if m != nil { + return m.LogMinMessages + } + return PostgresqlHostConfig10_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlHostConfig10) GetLogMinErrorStatement() PostgresqlHostConfig10_LogLevel { + if m != nil { + return m.LogMinErrorStatement + } + return PostgresqlHostConfig10_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlHostConfig10) GetLogMinDurationStatement() *wrappers.Int64Value { + if m != nil { + return m.LogMinDurationStatement + } + return nil +} + +func (m *PostgresqlHostConfig10) GetLogCheckpoints() *wrappers.BoolValue { + if m != nil { + return m.LogCheckpoints + } + return nil +} + +func (m *PostgresqlHostConfig10) GetLogConnections() *wrappers.BoolValue { + if m != nil { + return m.LogConnections + } + return nil +} + +func (m *PostgresqlHostConfig10) GetLogDisconnections() *wrappers.BoolValue { + if m != nil { + return m.LogDisconnections + } + return nil +} + +func (m *PostgresqlHostConfig10) GetLogDuration() *wrappers.BoolValue { + if m != nil { + return m.LogDuration + } + return nil +} + +func (m *PostgresqlHostConfig10) GetLogErrorVerbosity() PostgresqlHostConfig10_LogErrorVerbosity { + if m != nil { + return m.LogErrorVerbosity + } + return PostgresqlHostConfig10_LOG_ERROR_VERBOSITY_UNSPECIFIED +} + +func (m *PostgresqlHostConfig10) GetLogLockWaits() *wrappers.BoolValue { + if m != nil { + return m.LogLockWaits + } + return nil +} + +func (m *PostgresqlHostConfig10) GetLogStatement() PostgresqlHostConfig10_LogStatement { + if m != nil { + return m.LogStatement + } + return PostgresqlHostConfig10_LOG_STATEMENT_UNSPECIFIED +} + +func (m *PostgresqlHostConfig10) GetLogTempFiles() *wrappers.Int64Value { + if m != nil { + return m.LogTempFiles + } + return nil +} + +func (m *PostgresqlHostConfig10) GetSearchPath() string { + if m != nil { + return m.SearchPath + } + return "" +} + +func (m *PostgresqlHostConfig10) GetRowSecurity() *wrappers.BoolValue { + if m != nil { + return m.RowSecurity + } + return nil +} + +func (m *PostgresqlHostConfig10) GetDefaultTransactionIsolation() PostgresqlHostConfig10_TransactionIsolation { + if m != nil { + return m.DefaultTransactionIsolation + } + return PostgresqlHostConfig10_TRANSACTION_ISOLATION_UNSPECIFIED +} + +func (m *PostgresqlHostConfig10) GetStatementTimeout() *wrappers.Int64Value { + if m != nil { + return m.StatementTimeout + } + return nil +} + +func (m *PostgresqlHostConfig10) GetLockTimeout() *wrappers.Int64Value { + if m != nil { + return m.LockTimeout + } + return nil +} + +func (m *PostgresqlHostConfig10) GetIdleInTransactionSessionTimeout() *wrappers.Int64Value { + if m != nil { + return m.IdleInTransactionSessionTimeout + } + return nil +} + +func (m *PostgresqlHostConfig10) GetByteaOutput() PostgresqlHostConfig10_ByteaOutput { + if m != nil { + return m.ByteaOutput + } + return PostgresqlHostConfig10_BYTEA_OUTPUT_UNSPECIFIED +} + +func (m *PostgresqlHostConfig10) GetXmlbinary() PostgresqlHostConfig10_XmlBinary { + if m != nil { + return m.Xmlbinary + } + return PostgresqlHostConfig10_XML_BINARY_UNSPECIFIED +} + +func (m *PostgresqlHostConfig10) GetXmloption() PostgresqlHostConfig10_XmlOption { + if m != nil { + return m.Xmloption + } + return PostgresqlHostConfig10_XML_OPTION_UNSPECIFIED +} + +func (m *PostgresqlHostConfig10) GetGinPendingListLimit() *wrappers.Int64Value { + if m != nil { + return m.GinPendingListLimit + } + return nil +} + +func (m *PostgresqlHostConfig10) GetDeadlockTimeout() *wrappers.Int64Value { + if m != nil { + return m.DeadlockTimeout + } + return nil +} + +func (m *PostgresqlHostConfig10) GetMaxLocksPerTransaction() *wrappers.Int64Value { + if m != nil { + return m.MaxLocksPerTransaction + } + return nil +} + +func (m *PostgresqlHostConfig10) GetMaxPredLocksPerTransaction() *wrappers.Int64Value { + if m != nil { + return m.MaxPredLocksPerTransaction + } + return nil +} + +func (m *PostgresqlHostConfig10) GetArrayNulls() *wrappers.BoolValue { + if m != nil { + return m.ArrayNulls + } + return nil +} + +func (m *PostgresqlHostConfig10) GetBackslashQuote() PostgresqlHostConfig10_BackslashQuote { + if m != nil { + return m.BackslashQuote + } + return PostgresqlHostConfig10_BACKSLASH_QUOTE_UNSPECIFIED +} + +func (m *PostgresqlHostConfig10) GetDefaultWithOids() *wrappers.BoolValue { + if m != nil { + return m.DefaultWithOids + } + return nil +} + +func (m *PostgresqlHostConfig10) GetEscapeStringWarning() *wrappers.BoolValue { + if m != nil { + return m.EscapeStringWarning + } + return nil +} + +func (m *PostgresqlHostConfig10) GetLoCompatPrivileges() *wrappers.BoolValue { + if m != nil { + return m.LoCompatPrivileges + } + return nil +} + +func (m *PostgresqlHostConfig10) GetOperatorPrecedenceWarning() *wrappers.BoolValue { + if m != nil { + return m.OperatorPrecedenceWarning + } + return nil +} + +func (m *PostgresqlHostConfig10) GetQuoteAllIdentifiers() *wrappers.BoolValue { + if m != nil { + return m.QuoteAllIdentifiers + } + return nil +} + +func (m *PostgresqlHostConfig10) GetStandardConformingStrings() *wrappers.BoolValue { + if m != nil { + return m.StandardConformingStrings + } + return nil +} + +func (m *PostgresqlHostConfig10) GetSynchronizeSeqscans() *wrappers.BoolValue { + if m != nil { + return m.SynchronizeSeqscans + } + return nil +} + +func (m *PostgresqlHostConfig10) GetTransformNullEquals() *wrappers.BoolValue { + if m != nil { + return m.TransformNullEquals + } + return nil +} + +func (m *PostgresqlHostConfig10) GetExitOnError() *wrappers.BoolValue { + if m != nil { + return m.ExitOnError + } + return nil +} + +func (m *PostgresqlHostConfig10) GetSeqPageCost() *wrappers.DoubleValue { + if m != nil { + return m.SeqPageCost + } + return nil +} + +func (m *PostgresqlHostConfig10) GetRandomPageCost() *wrappers.DoubleValue { + if m != nil { + return m.RandomPageCost + } + return nil +} + +func init() { + proto.RegisterType((*PostgresqlHostConfig10)(nil), "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10") + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_ConstraintExclusion", PostgresqlHostConfig10_ConstraintExclusion_name, PostgresqlHostConfig10_ConstraintExclusion_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_ForceParallelMode", PostgresqlHostConfig10_ForceParallelMode_name, PostgresqlHostConfig10_ForceParallelMode_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_LogLevel", PostgresqlHostConfig10_LogLevel_name, PostgresqlHostConfig10_LogLevel_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_LogErrorVerbosity", PostgresqlHostConfig10_LogErrorVerbosity_name, PostgresqlHostConfig10_LogErrorVerbosity_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_LogStatement", PostgresqlHostConfig10_LogStatement_name, PostgresqlHostConfig10_LogStatement_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_TransactionIsolation", PostgresqlHostConfig10_TransactionIsolation_name, PostgresqlHostConfig10_TransactionIsolation_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_ByteaOutput", PostgresqlHostConfig10_ByteaOutput_name, PostgresqlHostConfig10_ByteaOutput_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_XmlBinary", PostgresqlHostConfig10_XmlBinary_name, PostgresqlHostConfig10_XmlBinary_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_XmlOption", PostgresqlHostConfig10_XmlOption_name, PostgresqlHostConfig10_XmlOption_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig10_BackslashQuote", PostgresqlHostConfig10_BackslashQuote_name, PostgresqlHostConfig10_BackslashQuote_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/config/host10.proto", fileDescriptor_host10_c67a1089e8774ac3) +} + +var fileDescriptor_host10_c67a1089e8774ac3 = []byte{ + // 2210 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x99, 0xdb, 0x6f, 0xdb, 0xc8, + 0xf5, 0xc7, 0x7f, 0xb2, 0xb3, 0xd9, 0x64, 0x7c, 0xa3, 0x46, 0xbe, 0x30, 0xf6, 0xe6, 0xb2, 0xda, + 0x4d, 0x7e, 0xd9, 0x6d, 0x2d, 0x59, 0x8e, 0x9b, 0x0d, 0xb0, 0xe8, 0x62, 0x29, 0x89, 0x72, 0xd4, + 0x52, 0xa2, 0x42, 0xd2, 0x8e, 0x37, 0xc5, 0x62, 0x30, 0x22, 0x47, 0x12, 0x9b, 0x21, 0x87, 0xe6, + 0x50, 0xbe, 0x14, 0x28, 0xfa, 0xd2, 0xa7, 0x3e, 0xf6, 0xa1, 0x40, 0xfb, 0x0f, 0x05, 0xfd, 0x47, + 0xfa, 0x47, 0xe4, 0xa9, 0x18, 0x52, 0xd4, 0xc5, 0x56, 0x4b, 0xa3, 0xce, 0x5b, 0x74, 0xe6, 0x7c, + 0x3f, 0xe7, 0x70, 0xce, 0x19, 0xf2, 0x4c, 0x0c, 0xf6, 0x2f, 0xb1, 0xef, 0x90, 0x8b, 0xb2, 0x4d, + 0xd9, 0xd0, 0x29, 0x7b, 0x4e, 0xb7, 0x1c, 0x30, 0x1e, 0xf5, 0x43, 0xc2, 0x4f, 0x69, 0xf9, 0xac, + 0x52, 0xb6, 0x99, 0xdf, 0x73, 0xfb, 0xe5, 0x01, 0xe3, 0x51, 0x65, 0xaf, 0x14, 0x84, 0x2c, 0x62, + 0xf0, 0x69, 0xa2, 0x29, 0xc5, 0x9a, 0x92, 0xe7, 0x74, 0x4b, 0x13, 0x4d, 0xe9, 0xac, 0x52, 0x4a, + 0x34, 0xdb, 0x8f, 0xfa, 0x8c, 0xf5, 0x29, 0x29, 0xc7, 0xa2, 0xee, 0xb0, 0x57, 0x3e, 0x0f, 0x71, + 0x10, 0x90, 0x90, 0x27, 0x98, 0xed, 0x87, 0x33, 0xa1, 0xcf, 0x30, 0x75, 0x1d, 0x1c, 0xb9, 0xcc, + 0x4f, 0x96, 0x8b, 0xff, 0x2c, 0x83, 0xcd, 0xce, 0x98, 0xfb, 0x9a, 0xf1, 0xa8, 0x16, 0x73, 0x2b, + 0x7b, 0xd0, 0x02, 0x72, 0x48, 0x6c, 0x76, 0x46, 0xc2, 0x4b, 0xe4, 0xb9, 0x3e, 0xc2, 0x41, 0x40, + 0x2f, 0x91, 0x43, 0x28, 0xbe, 0x94, 0x73, 0x4f, 0x72, 0xcf, 0x97, 0xf6, 0x77, 0x4a, 0x49, 0xf0, + 0x52, 0x1a, 0xbc, 0xd4, 0xf4, 0xa3, 0x97, 0x07, 0xc7, 0x98, 0x0e, 0x89, 0xb1, 0x91, 0x8a, 0x5b, + 0xae, 0xaf, 0x08, 0x69, 0x5d, 0x28, 0x61, 0x15, 0xac, 0xf2, 0x01, 0x0e, 0x89, 0x83, 0xba, 0xc3, + 0x5e, 0x8f, 0x84, 0x5c, 0x5e, 0xc8, 0x66, 0xad, 0x24, 0x92, 0x6a, 0xa2, 0x80, 0x3f, 0x80, 0xe5, + 0x88, 0x78, 0xc1, 0x98, 0xb0, 0x98, 0x4d, 0x58, 0x12, 0x82, 0x54, 0xff, 0x12, 0xdc, 0x3b, 0x67, + 0xe1, 0x7b, 0xe4, 0x11, 0x4f, 0xbe, 0x93, 0xad, 0xfd, 0x5c, 0x38, 0xb7, 0x88, 0x07, 0x4d, 0xb0, + 0x15, 0x92, 0x80, 0x62, 0x9b, 0x78, 0xc4, 0x8f, 0x10, 0x67, 0x61, 0x84, 0xa2, 0x61, 0x40, 0x09, + 0x97, 0x3f, 0xbb, 0xd1, 0x86, 0x8c, 0xb5, 0x26, 0x0b, 0x23, 0x2b, 0x56, 0xc2, 0x1a, 0x58, 0x8b, + 0x1f, 0xa6, 0xe7, 0x52, 0x82, 0xa8, 0xeb, 0xb9, 0x91, 0x7c, 0xf7, 0x06, 0x3b, 0x22, 0x34, 0x0d, + 0x97, 0x12, 0x4d, 0x28, 0xe0, 0x5b, 0x50, 0xe8, 0x62, 0xfb, 0x3d, 0xf1, 0x1d, 0xd4, 0xa3, 0x43, + 0x3e, 0x40, 0xb8, 0x17, 0x91, 0x50, 0xfe, 0x3c, 0x13, 0x54, 0x05, 0x1f, 0x3f, 0x54, 0xee, 0xee, + 0xed, 0xee, 0xef, 0x1d, 0xbc, 0x32, 0xf2, 0x23, 0x46, 0x43, 0x20, 0x14, 0x41, 0x80, 0x08, 0x6c, + 0x32, 0xea, 0x20, 0xee, 0xe3, 0x80, 0x0f, 0x58, 0x84, 0xa2, 0x41, 0x48, 0xf8, 0x80, 0x51, 0x47, + 0xbe, 0x97, 0xcd, 0x5e, 0xfe, 0xf8, 0xa1, 0x72, 0x6f, 0xb7, 0xb2, 0xfb, 0xea, 0xe5, 0xc1, 0xde, + 0x9e, 0xb1, 0xce, 0xa8, 0x63, 0x8e, 0x38, 0x56, 0x8a, 0x81, 0xef, 0xc0, 0x8e, 0x87, 0x2f, 0x10, + 0x8f, 0xb0, 0xef, 0x74, 0x2f, 0x11, 0x8f, 0x42, 0x82, 0x3d, 0xd7, 0xef, 0x8f, 0x1a, 0xed, 0x7e, + 0xf6, 0x56, 0xc8, 0x1e, 0xbe, 0x30, 0x13, 0xb9, 0x99, 0xaa, 0x93, 0x5e, 0xfb, 0x73, 0x0e, 0xac, + 0xdb, 0xcc, 0xe7, 0x51, 0x88, 0x5d, 0x3f, 0x42, 0xe4, 0xc2, 0xa6, 0x43, 0xee, 0x32, 0x5f, 0x06, + 0x4f, 0x72, 0xcf, 0x57, 0xf7, 0xdf, 0x94, 0x6e, 0x74, 0xc4, 0x4a, 0xf3, 0xcf, 0x47, 0xa9, 0x36, + 0x26, 0xab, 0x29, 0xd8, 0x28, 0xd8, 0xd7, 0x8d, 0xb0, 0x03, 0x36, 0xec, 0x61, 0xc8, 0x59, 0x98, + 0x34, 0x0b, 0xea, 0x85, 0xd8, 0x16, 0x47, 0x50, 0x5e, 0x8a, 0x1f, 0xee, 0x8b, 0x6b, 0x0f, 0x57, + 0x67, 0xc3, 0x2e, 0x25, 0xc9, 0xd3, 0x15, 0x12, 0x69, 0xdc, 0x2c, 0x8d, 0x91, 0x10, 0xfe, 0x0c, + 0x0a, 0xbd, 0x90, 0x79, 0xc8, 0x66, 0x94, 0xe2, 0x80, 0xa7, 0x7d, 0xb3, 0x9c, 0x5d, 0x12, 0xe9, + 0xe3, 0x87, 0xca, 0x72, 0x65, 0x77, 0xbf, 0x72, 0xf0, 0xdd, 0xc1, 0xab, 0x17, 0x2f, 0x0f, 0xbe, + 0x33, 0xf2, 0x82, 0x54, 0x1b, 0x81, 0x92, 0x6e, 0xfa, 0x19, 0x14, 0x7e, 0xcf, 0x5c, 0xff, 0x2a, + 0x7e, 0xe5, 0x7f, 0xc2, 0x0b, 0xd2, 0x2c, 0xfe, 0x4f, 0xa0, 0xd0, 0x63, 0xa1, 0x4d, 0x50, 0x80, + 0x43, 0x4c, 0x29, 0xa1, 0xc8, 0x63, 0x0e, 0x91, 0x57, 0xe3, 0xa2, 0xe8, 0xb7, 0x2b, 0x4a, 0x43, + 0x80, 0x3b, 0x23, 0x6e, 0x8b, 0x39, 0xc4, 0xc8, 0xf7, 0xae, 0x9a, 0xe0, 0x19, 0x28, 0xd8, 0xd4, + 0x15, 0x47, 0x58, 0xbc, 0xd7, 0x3c, 0xc2, 0x39, 0xee, 0x13, 0x2e, 0xaf, 0xc5, 0x09, 0x34, 0x6e, + 0x97, 0x80, 0xc6, 0xfa, 0x1a, 0x39, 0x23, 0xd4, 0xc8, 0x27, 0x21, 0x5a, 0xae, 0xdf, 0x1a, 0x05, + 0x80, 0x01, 0x90, 0x28, 0xeb, 0xcf, 0x06, 0x95, 0x3e, 0x69, 0xd0, 0x55, 0xca, 0xfa, 0xd3, 0x11, + 0xff, 0x08, 0xb6, 0xd2, 0x88, 0x24, 0x0c, 0x59, 0x28, 0xce, 0x59, 0x14, 0xbf, 0x81, 0xe4, 0xfc, + 0x27, 0x0d, 0xbc, 0x9e, 0x04, 0x56, 0x45, 0x10, 0x33, 0x8d, 0x01, 0x4f, 0xc0, 0x76, 0x1a, 0xde, + 0x19, 0x86, 0xf1, 0x77, 0x67, 0x2a, 0x03, 0x98, 0x7d, 0xb6, 0xb7, 0x12, 0x6c, 0x7d, 0x24, 0x9e, + 0x90, 0x6b, 0x60, 0x4d, 0x90, 0xed, 0x01, 0xb1, 0xdf, 0x07, 0xcc, 0xf5, 0x23, 0x2e, 0x17, 0x62, + 0xdc, 0xf6, 0x35, 0x5c, 0x95, 0x31, 0x9a, 0xd0, 0xc4, 0xee, 0xd4, 0x26, 0x8a, 0x31, 0x84, 0xf9, + 0x3e, 0x89, 0x0f, 0x16, 0x97, 0xd7, 0x6f, 0x06, 0x99, 0x28, 0x60, 0x13, 0x40, 0x01, 0x71, 0x5c, + 0x3e, 0xcd, 0xd9, 0xc8, 0xe4, 0xe4, 0x29, 0xeb, 0xd7, 0x67, 0x44, 0xf0, 0xd7, 0x60, 0x39, 0x46, + 0x8d, 0x9e, 0x56, 0xde, 0xcc, 0x84, 0x2c, 0x09, 0xc8, 0xc8, 0x5d, 0x9c, 0x2b, 0x21, 0x4f, 0x0a, + 0x7d, 0x46, 0xc2, 0x2e, 0xe3, 0x6e, 0x74, 0x29, 0x6f, 0x7d, 0x8a, 0x73, 0xa5, 0xb1, 0x7e, 0x5c, + 0xdb, 0xe3, 0x14, 0x1b, 0xe7, 0x3f, 0x6b, 0x82, 0x3f, 0x02, 0xb1, 0x39, 0x88, 0x32, 0xfb, 0x3d, + 0x3a, 0xc7, 0x6e, 0xc4, 0x65, 0x39, 0xf3, 0x09, 0xc4, 0x13, 0x6b, 0xcc, 0x7e, 0xff, 0x56, 0xf8, + 0x43, 0x06, 0x56, 0x04, 0x61, 0xd2, 0x23, 0x0f, 0xe2, 0xe4, 0x7f, 0x73, 0xeb, 0xe4, 0xc7, 0x9d, + 0x13, 0x07, 0x9c, 0xf4, 0x91, 0x92, 0xa4, 0x3c, 0xfe, 0x02, 0x73, 0x79, 0x3b, 0xbb, 0x2b, 0x05, + 0xc2, 0x1a, 0x7d, 0x7f, 0x39, 0x7c, 0x0c, 0x96, 0x38, 0xc1, 0xa1, 0x3d, 0x40, 0x01, 0x8e, 0x06, + 0xf2, 0xce, 0x93, 0xdc, 0xf3, 0xfb, 0x06, 0x48, 0x4c, 0x1d, 0x1c, 0x0d, 0x44, 0x59, 0x43, 0x76, + 0x8e, 0x38, 0xb1, 0x87, 0xa1, 0x28, 0xc8, 0x17, 0xd9, 0x65, 0x0d, 0xd9, 0xb9, 0x39, 0x72, 0x87, + 0x7f, 0xcb, 0x81, 0x87, 0x0e, 0xe9, 0xe1, 0x21, 0x8d, 0x50, 0x14, 0x62, 0x9f, 0x27, 0x1f, 0x01, + 0xe4, 0x72, 0x46, 0x93, 0x3e, 0x79, 0x18, 0x6f, 0x92, 0x71, 0xbb, 0x4d, 0xb2, 0x26, 0xe8, 0x66, + 0x4a, 0x36, 0x76, 0x46, 0x81, 0xe7, 0x2d, 0xc2, 0xd7, 0x20, 0x3f, 0x2e, 0x14, 0x8a, 0x5c, 0x8f, + 0xb0, 0x61, 0x24, 0x3f, 0xca, 0xde, 0x3e, 0x69, 0xac, 0xb2, 0x12, 0x91, 0x18, 0xe8, 0xe2, 0xa6, + 0x49, 0x21, 0x8f, 0x6f, 0x30, 0xd0, 0x09, 0x41, 0xaa, 0x77, 0xc1, 0x57, 0xae, 0x43, 0x09, 0x72, + 0xfd, 0x99, 0x1d, 0xe2, 0x84, 0x8b, 0x0f, 0xf0, 0x18, 0xfb, 0x24, 0x1b, 0xfb, 0x58, 0x70, 0x9a, + 0xfe, 0xd4, 0xf3, 0x9a, 0x09, 0x24, 0x0d, 0x45, 0xc1, 0x72, 0xf7, 0x32, 0x22, 0x18, 0xb1, 0x61, + 0x14, 0x0c, 0x23, 0xf9, 0xcb, 0x78, 0xef, 0x9b, 0xb7, 0xdb, 0xfb, 0xaa, 0x20, 0xea, 0x31, 0xd0, + 0x58, 0xea, 0x4e, 0x7e, 0x40, 0x02, 0xee, 0x5f, 0x78, 0xb4, 0xeb, 0xfa, 0x38, 0xbc, 0x94, 0x8b, + 0x71, 0xa8, 0xc3, 0xdb, 0x85, 0x3a, 0xf1, 0x68, 0x35, 0xc6, 0x19, 0x13, 0xf2, 0x28, 0x0c, 0x0b, + 0xe2, 0x6e, 0xfa, 0xea, 0x13, 0x85, 0xd1, 0x63, 0x9c, 0x31, 0x21, 0xc3, 0x0e, 0xd8, 0xec, 0xbb, + 0x3e, 0x0a, 0x88, 0xef, 0x88, 0x09, 0x8f, 0xba, 0x3c, 0x1a, 0x8d, 0x16, 0x5f, 0x67, 0x57, 0xa6, + 0xd0, 0x77, 0xfd, 0x4e, 0xa2, 0xd4, 0x5c, 0x1e, 0x25, 0xa3, 0x44, 0x03, 0x48, 0x0e, 0xc1, 0xce, + 0x4c, 0xf3, 0x3c, 0xcd, 0x66, 0xad, 0xa5, 0xa2, 0xb4, 0xaa, 0xc7, 0xe0, 0x81, 0x98, 0x42, 0x85, + 0x89, 0xa3, 0x80, 0x84, 0xd3, 0x6d, 0x24, 0x3f, 0xcb, 0x06, 0x6e, 0x7a, 0xf8, 0x42, 0xbc, 0xc5, + 0x78, 0x87, 0x84, 0x53, 0xbd, 0x03, 0x11, 0x78, 0x24, 0xb8, 0x81, 0xb8, 0xef, 0xcc, 0x87, 0xff, + 0x7f, 0x36, 0x7c, 0xdb, 0xc3, 0x17, 0x9d, 0x90, 0x38, 0xf3, 0x02, 0x7c, 0x0f, 0x96, 0x70, 0x18, + 0xe2, 0x4b, 0xe4, 0x0f, 0x29, 0xe5, 0xf2, 0xf3, 0xcc, 0x57, 0x0b, 0x88, 0xdd, 0xdb, 0xc2, 0x1b, + 0x0e, 0xc1, 0x9a, 0x98, 0xf8, 0x39, 0xc5, 0x7c, 0x80, 0x4e, 0x87, 0x2c, 0x22, 0xf2, 0x37, 0x71, + 0xf1, 0xb5, 0x5b, 0xb6, 0x73, 0x0a, 0x7d, 0x23, 0x98, 0xc6, 0x6a, 0x77, 0xe6, 0x37, 0x6c, 0x80, + 0x7c, 0xfa, 0x3e, 0x3b, 0x77, 0xa3, 0x01, 0x62, 0xae, 0xc3, 0xe5, 0x6f, 0x33, 0x33, 0x5f, 0x1b, + 0x89, 0xde, 0xba, 0xd1, 0x40, 0x77, 0x1d, 0x0e, 0xdb, 0x60, 0x83, 0x70, 0x1b, 0x07, 0x44, 0xdc, + 0x1a, 0x44, 0x43, 0x9d, 0xe3, 0xd0, 0x77, 0xfd, 0xbe, 0xfc, 0x8b, 0x4c, 0x56, 0x21, 0x11, 0x9a, + 0xb1, 0xee, 0x6d, 0x22, 0x83, 0x1a, 0x58, 0xa7, 0x0c, 0xd9, 0xcc, 0x0b, 0x70, 0x84, 0x82, 0xd0, + 0x3d, 0x73, 0x29, 0x11, 0x23, 0xda, 0x2f, 0x33, 0x71, 0x90, 0xb2, 0x5a, 0x2c, 0xeb, 0x8c, 0x55, + 0xe2, 0x62, 0xc3, 0x02, 0x12, 0xe2, 0x88, 0x85, 0xa2, 0xfe, 0x36, 0x71, 0x88, 0x6f, 0x93, 0x71, + 0x8e, 0xbb, 0x99, 0xd0, 0x07, 0xa9, 0xbc, 0x33, 0x56, 0xa7, 0x99, 0xb6, 0xc1, 0x46, 0x5c, 0x2e, + 0x84, 0x29, 0x45, 0xae, 0x43, 0xfc, 0xc8, 0xed, 0xb9, 0xe2, 0x26, 0x5c, 0xca, 0x7e, 0xf2, 0x58, + 0xa8, 0x50, 0xda, 0x9c, 0xc8, 0x44, 0xae, 0xf1, 0x05, 0x0c, 0x87, 0x8e, 0x98, 0x86, 0x7a, 0x2c, + 0x8c, 0xaf, 0x60, 0xc9, 0xb6, 0x72, 0xb9, 0x9c, 0x9d, 0x6b, 0x2a, 0xaf, 0x8d, 0xd5, 0xc9, 0xde, + 0x72, 0xd8, 0x02, 0xeb, 0xfc, 0xd2, 0xb7, 0x07, 0x21, 0xf3, 0xdd, 0x3f, 0x10, 0xc4, 0xc9, 0x29, + 0xb7, 0xb1, 0xcf, 0xe5, 0xbd, 0xec, 0x54, 0xa7, 0x74, 0xe6, 0x48, 0x26, 0x1e, 0x3d, 0x3e, 0x3e, + 0x22, 0x4a, 0xdc, 0xf4, 0x88, 0x9c, 0x0e, 0x31, 0xe5, 0x72, 0x25, 0x9b, 0x37, 0x16, 0x8a, 0xf6, + 0x57, 0x63, 0x19, 0xfc, 0x01, 0xac, 0x90, 0x0b, 0x37, 0x42, 0x6c, 0x34, 0x21, 0xcb, 0xfb, 0xd9, + 0x5f, 0x67, 0x21, 0xd0, 0x93, 0x59, 0x17, 0xfe, 0x08, 0x56, 0x38, 0x39, 0x45, 0x01, 0xee, 0x13, + 0x64, 0x33, 0x1e, 0xc9, 0x2f, 0x6e, 0x70, 0xa9, 0x5b, 0xe2, 0xe4, 0xb4, 0x83, 0xfb, 0xa4, 0xc6, + 0x78, 0xfc, 0x0e, 0x0b, 0xb1, 0xef, 0x30, 0x6f, 0x0a, 0x72, 0x70, 0x03, 0xc8, 0x6a, 0xa2, 0x4a, + 0x39, 0xc5, 0x7f, 0xe4, 0x40, 0x61, 0xce, 0x9d, 0x14, 0x7e, 0x0d, 0x9e, 0xd4, 0xf4, 0xb6, 0x69, + 0x19, 0x4a, 0xb3, 0x6d, 0x21, 0xf5, 0xa4, 0xa6, 0x1d, 0x99, 0x4d, 0xbd, 0x8d, 0x8e, 0xda, 0x66, + 0x47, 0xad, 0x35, 0x1b, 0x4d, 0xb5, 0x2e, 0xfd, 0x1f, 0xdc, 0x01, 0x5b, 0x73, 0xbd, 0xf4, 0xb6, + 0x94, 0x83, 0x5f, 0x00, 0x79, 0xfe, 0x62, 0xa3, 0x21, 0x2d, 0xc0, 0x22, 0x78, 0x34, 0x77, 0xb5, + 0xa3, 0x18, 0x56, 0xd3, 0x6a, 0xea, 0x6d, 0x69, 0xb1, 0xf8, 0xd7, 0x1c, 0xc8, 0x5f, 0xbb, 0x9b, + 0xc1, 0xaf, 0xc0, 0xe3, 0x86, 0x6e, 0xd4, 0x54, 0xe1, 0xaa, 0x68, 0x9a, 0xaa, 0xa1, 0x96, 0x5e, + 0x57, 0xaf, 0x64, 0xb6, 0x0d, 0x36, 0xe7, 0x39, 0xc5, 0x89, 0xed, 0x80, 0xad, 0xb9, 0x6b, 0x71, + 0x5e, 0x8f, 0xc1, 0xce, 0xbc, 0x45, 0x43, 0x3d, 0x34, 0x54, 0xd3, 0x14, 0x49, 0x2d, 0x80, 0x7b, + 0xe9, 0x0d, 0x06, 0x3e, 0x00, 0x1b, 0x9a, 0x7e, 0x88, 0x34, 0xf5, 0x58, 0xd5, 0xae, 0x64, 0xb0, + 0x0e, 0xa4, 0xc9, 0x52, 0x5d, 0xad, 0x1e, 0x1d, 0xfe, 0x4a, 0xca, 0xcd, 0xb1, 0x1e, 0x48, 0x0b, + 0x73, 0xac, 0x2f, 0xa4, 0xc5, 0x39, 0xd6, 0x7d, 0xe9, 0xce, 0x1c, 0x6b, 0x45, 0xfa, 0x0c, 0xe6, + 0xc1, 0xca, 0xc4, 0xaa, 0xe9, 0x87, 0xd2, 0xdd, 0x59, 0xc7, 0xb6, 0x6e, 0x35, 0x6b, 0xaa, 0xf4, + 0x39, 0xdc, 0x00, 0xf9, 0x89, 0xf5, 0xad, 0x62, 0xb4, 0x9b, 0xed, 0x43, 0xe9, 0x1e, 0x2c, 0x80, + 0xb5, 0x89, 0x59, 0x35, 0x0c, 0xdd, 0x90, 0xee, 0xcf, 0x1a, 0x1b, 0x8a, 0xa5, 0x68, 0x12, 0x98, + 0x35, 0x76, 0x94, 0x76, 0xb3, 0x26, 0x2d, 0x15, 0xff, 0x9e, 0x03, 0xf9, 0x6b, 0xd3, 0xbe, 0xa8, + 0x94, 0x70, 0x8d, 0x71, 0xe8, 0x58, 0x35, 0xaa, 0xba, 0xd9, 0xb4, 0x7e, 0xba, 0xb2, 0x4f, 0x0f, + 0xc1, 0x83, 0x79, 0x4e, 0x96, 0x6a, 0x98, 0xaa, 0x94, 0x13, 0xf5, 0x98, 0xb7, 0x5c, 0x57, 0x1b, + 0xca, 0x91, 0x66, 0x25, 0x05, 0x9b, 0xe7, 0x90, 0xfc, 0x4b, 0x95, 0x16, 0x8b, 0x7f, 0xc9, 0x81, + 0xe5, 0xe9, 0x61, 0x3e, 0x8d, 0x68, 0x5a, 0x8a, 0xa5, 0xb6, 0xd4, 0xb6, 0x75, 0x25, 0xa1, 0x4d, + 0x00, 0x67, 0x97, 0xdb, 0x7a, 0x5b, 0x64, 0x32, 0xda, 0xb9, 0x89, 0xbd, 0x5e, 0xd7, 0xa4, 0x85, + 0xeb, 0xe6, 0x96, 0x5e, 0x97, 0x16, 0xaf, 0x9b, 0x15, 0x4d, 0x93, 0xee, 0x14, 0xff, 0x95, 0x03, + 0xeb, 0x73, 0xe7, 0xe2, 0xa7, 0xe0, 0x4b, 0xcb, 0x50, 0xda, 0xa6, 0x52, 0x13, 0xcd, 0x8f, 0x9a, + 0xa6, 0xae, 0x29, 0xd6, 0xf5, 0x13, 0xf7, 0x2d, 0x78, 0x36, 0xdf, 0xcd, 0x50, 0x95, 0x3a, 0x3a, + 0x6a, 0xd7, 0xf4, 0x56, 0xab, 0x69, 0x59, 0x6a, 0x5d, 0xca, 0xc1, 0xe7, 0xe0, 0xeb, 0xff, 0xe2, + 0x3b, 0xf1, 0x5c, 0x80, 0xdf, 0x80, 0xa7, 0xff, 0xc9, 0xb3, 0xa3, 0x2a, 0x96, 0x52, 0xd5, 0xd4, + 0x58, 0x24, 0x2d, 0xc2, 0x67, 0xa0, 0x38, 0xdf, 0xd5, 0x54, 0x8d, 0xa6, 0xa2, 0x35, 0xdf, 0x09, + 0x67, 0xe9, 0x4e, 0xf1, 0x77, 0x60, 0x69, 0x6a, 0x40, 0x15, 0x2f, 0x83, 0xea, 0x4f, 0x96, 0xaa, + 0x20, 0xfd, 0xc8, 0xea, 0x1c, 0x59, 0xd7, 0xcf, 0xca, 0xcc, 0xea, 0x6b, 0xf5, 0x44, 0xca, 0x41, + 0x19, 0xac, 0xcf, 0x58, 0x55, 0xb3, 0xa6, 0x74, 0x44, 0xbe, 0x45, 0x03, 0xdc, 0x1f, 0x8f, 0xa4, + 0xe2, 0xa8, 0x9f, 0xb4, 0x34, 0x54, 0x6d, 0xb6, 0x15, 0xe3, 0x6a, 0x73, 0x6d, 0x80, 0xfc, 0xd4, + 0x5a, 0x55, 0x31, 0xd5, 0x97, 0x07, 0x52, 0x0e, 0x42, 0xb0, 0x3a, 0x65, 0x16, 0xd1, 0x16, 0x8a, + 0x27, 0x31, 0x33, 0x99, 0x3f, 0x53, 0xa6, 0xde, 0x99, 0x53, 0x82, 0x2d, 0x50, 0x98, 0x5a, 0xab, + 0xeb, 0xb5, 0x23, 0x51, 0x5f, 0x29, 0x27, 0x1a, 0x67, 0x6a, 0xa1, 0xa6, 0xb7, 0x2d, 0x61, 0x5f, + 0x10, 0xef, 0xd8, 0xd5, 0xd9, 0xe9, 0x46, 0x34, 0x6d, 0x55, 0xa9, 0xfd, 0xd6, 0xd4, 0x14, 0xf3, + 0x35, 0x7a, 0x73, 0xa4, 0x5b, 0x57, 0xdf, 0x5f, 0x05, 0xb0, 0x76, 0xc5, 0x21, 0x09, 0x70, 0x55, + 0xa5, 0xb7, 0xa5, 0x05, 0x91, 0xd1, 0x35, 0x7b, 0xa3, 0x21, 0x2d, 0xc2, 0x2f, 0xc1, 0xc3, 0xab, + 0x0b, 0xa6, 0xd2, 0x50, 0x91, 0xda, 0xae, 0xe9, 0x75, 0x71, 0xf0, 0xef, 0x54, 0x8f, 0xdf, 0x59, + 0x7d, 0x37, 0x1a, 0x0c, 0xbb, 0x25, 0x9b, 0x79, 0xe5, 0x64, 0x82, 0xdb, 0x4d, 0xfe, 0xdf, 0xbf, + 0xcf, 0x76, 0xfb, 0xc4, 0x8f, 0x3f, 0x23, 0xe5, 0x1b, 0xfd, 0x2d, 0xe2, 0xfb, 0x89, 0xb1, 0x7b, + 0x37, 0xd6, 0xbd, 0xf8, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x77, 0xeb, 0xa2, 0x8a, 0xc6, 0x18, + 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/host11.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/host11.pb.go new file mode 100644 index 000000000..ce7f373ed --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/host11.pb.go @@ -0,0 +1,935 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/config/host11.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type PostgresqlHostConfig11_ConstraintExclusion int32 + +const ( + PostgresqlHostConfig11_CONSTRAINT_EXCLUSION_UNSPECIFIED PostgresqlHostConfig11_ConstraintExclusion = 0 + PostgresqlHostConfig11_CONSTRAINT_EXCLUSION_ON PostgresqlHostConfig11_ConstraintExclusion = 1 + PostgresqlHostConfig11_CONSTRAINT_EXCLUSION_OFF PostgresqlHostConfig11_ConstraintExclusion = 2 + PostgresqlHostConfig11_CONSTRAINT_EXCLUSION_PARTITION PostgresqlHostConfig11_ConstraintExclusion = 3 +) + +var PostgresqlHostConfig11_ConstraintExclusion_name = map[int32]string{ + 0: "CONSTRAINT_EXCLUSION_UNSPECIFIED", + 1: "CONSTRAINT_EXCLUSION_ON", + 2: "CONSTRAINT_EXCLUSION_OFF", + 3: "CONSTRAINT_EXCLUSION_PARTITION", +} +var PostgresqlHostConfig11_ConstraintExclusion_value = map[string]int32{ + "CONSTRAINT_EXCLUSION_UNSPECIFIED": 0, + "CONSTRAINT_EXCLUSION_ON": 1, + "CONSTRAINT_EXCLUSION_OFF": 2, + "CONSTRAINT_EXCLUSION_PARTITION": 3, +} + +func (x PostgresqlHostConfig11_ConstraintExclusion) String() string { + return proto.EnumName(PostgresqlHostConfig11_ConstraintExclusion_name, int32(x)) +} +func (PostgresqlHostConfig11_ConstraintExclusion) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host11_bb5f848b388f9f62, []int{0, 0} +} + +type PostgresqlHostConfig11_ForceParallelMode int32 + +const ( + PostgresqlHostConfig11_FORCE_PARALLEL_MODE_UNSPECIFIED PostgresqlHostConfig11_ForceParallelMode = 0 + PostgresqlHostConfig11_FORCE_PARALLEL_MODE_ON PostgresqlHostConfig11_ForceParallelMode = 1 + PostgresqlHostConfig11_FORCE_PARALLEL_MODE_OFF PostgresqlHostConfig11_ForceParallelMode = 2 + PostgresqlHostConfig11_FORCE_PARALLEL_MODE_REGRESS PostgresqlHostConfig11_ForceParallelMode = 3 +) + +var PostgresqlHostConfig11_ForceParallelMode_name = map[int32]string{ + 0: "FORCE_PARALLEL_MODE_UNSPECIFIED", + 1: "FORCE_PARALLEL_MODE_ON", + 2: "FORCE_PARALLEL_MODE_OFF", + 3: "FORCE_PARALLEL_MODE_REGRESS", +} +var PostgresqlHostConfig11_ForceParallelMode_value = map[string]int32{ + "FORCE_PARALLEL_MODE_UNSPECIFIED": 0, + "FORCE_PARALLEL_MODE_ON": 1, + "FORCE_PARALLEL_MODE_OFF": 2, + "FORCE_PARALLEL_MODE_REGRESS": 3, +} + +func (x PostgresqlHostConfig11_ForceParallelMode) String() string { + return proto.EnumName(PostgresqlHostConfig11_ForceParallelMode_name, int32(x)) +} +func (PostgresqlHostConfig11_ForceParallelMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host11_bb5f848b388f9f62, []int{0, 1} +} + +type PostgresqlHostConfig11_LogLevel int32 + +const ( + PostgresqlHostConfig11_LOG_LEVEL_UNSPECIFIED PostgresqlHostConfig11_LogLevel = 0 + PostgresqlHostConfig11_LOG_LEVEL_DEBUG5 PostgresqlHostConfig11_LogLevel = 1 + PostgresqlHostConfig11_LOG_LEVEL_DEBUG4 PostgresqlHostConfig11_LogLevel = 2 + PostgresqlHostConfig11_LOG_LEVEL_DEBUG3 PostgresqlHostConfig11_LogLevel = 3 + PostgresqlHostConfig11_LOG_LEVEL_DEBUG2 PostgresqlHostConfig11_LogLevel = 4 + PostgresqlHostConfig11_LOG_LEVEL_DEBUG1 PostgresqlHostConfig11_LogLevel = 5 + PostgresqlHostConfig11_LOG_LEVEL_LOG PostgresqlHostConfig11_LogLevel = 6 + PostgresqlHostConfig11_LOG_LEVEL_NOTICE PostgresqlHostConfig11_LogLevel = 7 + PostgresqlHostConfig11_LOG_LEVEL_WARNING PostgresqlHostConfig11_LogLevel = 8 + PostgresqlHostConfig11_LOG_LEVEL_ERROR PostgresqlHostConfig11_LogLevel = 9 + PostgresqlHostConfig11_LOG_LEVEL_FATAL PostgresqlHostConfig11_LogLevel = 10 + PostgresqlHostConfig11_LOG_LEVEL_PANIC PostgresqlHostConfig11_LogLevel = 11 +) + +var PostgresqlHostConfig11_LogLevel_name = map[int32]string{ + 0: "LOG_LEVEL_UNSPECIFIED", + 1: "LOG_LEVEL_DEBUG5", + 2: "LOG_LEVEL_DEBUG4", + 3: "LOG_LEVEL_DEBUG3", + 4: "LOG_LEVEL_DEBUG2", + 5: "LOG_LEVEL_DEBUG1", + 6: "LOG_LEVEL_LOG", + 7: "LOG_LEVEL_NOTICE", + 8: "LOG_LEVEL_WARNING", + 9: "LOG_LEVEL_ERROR", + 10: "LOG_LEVEL_FATAL", + 11: "LOG_LEVEL_PANIC", +} +var PostgresqlHostConfig11_LogLevel_value = map[string]int32{ + "LOG_LEVEL_UNSPECIFIED": 0, + "LOG_LEVEL_DEBUG5": 1, + "LOG_LEVEL_DEBUG4": 2, + "LOG_LEVEL_DEBUG3": 3, + "LOG_LEVEL_DEBUG2": 4, + "LOG_LEVEL_DEBUG1": 5, + "LOG_LEVEL_LOG": 6, + "LOG_LEVEL_NOTICE": 7, + "LOG_LEVEL_WARNING": 8, + "LOG_LEVEL_ERROR": 9, + "LOG_LEVEL_FATAL": 10, + "LOG_LEVEL_PANIC": 11, +} + +func (x PostgresqlHostConfig11_LogLevel) String() string { + return proto.EnumName(PostgresqlHostConfig11_LogLevel_name, int32(x)) +} +func (PostgresqlHostConfig11_LogLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host11_bb5f848b388f9f62, []int{0, 2} +} + +type PostgresqlHostConfig11_LogErrorVerbosity int32 + +const ( + PostgresqlHostConfig11_LOG_ERROR_VERBOSITY_UNSPECIFIED PostgresqlHostConfig11_LogErrorVerbosity = 0 + PostgresqlHostConfig11_LOG_ERROR_VERBOSITY_TERSE PostgresqlHostConfig11_LogErrorVerbosity = 1 + PostgresqlHostConfig11_LOG_ERROR_VERBOSITY_DEFAULT PostgresqlHostConfig11_LogErrorVerbosity = 2 + PostgresqlHostConfig11_LOG_ERROR_VERBOSITY_VERBOSE PostgresqlHostConfig11_LogErrorVerbosity = 3 +) + +var PostgresqlHostConfig11_LogErrorVerbosity_name = map[int32]string{ + 0: "LOG_ERROR_VERBOSITY_UNSPECIFIED", + 1: "LOG_ERROR_VERBOSITY_TERSE", + 2: "LOG_ERROR_VERBOSITY_DEFAULT", + 3: "LOG_ERROR_VERBOSITY_VERBOSE", +} +var PostgresqlHostConfig11_LogErrorVerbosity_value = map[string]int32{ + "LOG_ERROR_VERBOSITY_UNSPECIFIED": 0, + "LOG_ERROR_VERBOSITY_TERSE": 1, + "LOG_ERROR_VERBOSITY_DEFAULT": 2, + "LOG_ERROR_VERBOSITY_VERBOSE": 3, +} + +func (x PostgresqlHostConfig11_LogErrorVerbosity) String() string { + return proto.EnumName(PostgresqlHostConfig11_LogErrorVerbosity_name, int32(x)) +} +func (PostgresqlHostConfig11_LogErrorVerbosity) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host11_bb5f848b388f9f62, []int{0, 3} +} + +type PostgresqlHostConfig11_LogStatement int32 + +const ( + PostgresqlHostConfig11_LOG_STATEMENT_UNSPECIFIED PostgresqlHostConfig11_LogStatement = 0 + PostgresqlHostConfig11_LOG_STATEMENT_NONE PostgresqlHostConfig11_LogStatement = 1 + PostgresqlHostConfig11_LOG_STATEMENT_DDL PostgresqlHostConfig11_LogStatement = 2 + PostgresqlHostConfig11_LOG_STATEMENT_MOD PostgresqlHostConfig11_LogStatement = 3 + PostgresqlHostConfig11_LOG_STATEMENT_ALL PostgresqlHostConfig11_LogStatement = 4 +) + +var PostgresqlHostConfig11_LogStatement_name = map[int32]string{ + 0: "LOG_STATEMENT_UNSPECIFIED", + 1: "LOG_STATEMENT_NONE", + 2: "LOG_STATEMENT_DDL", + 3: "LOG_STATEMENT_MOD", + 4: "LOG_STATEMENT_ALL", +} +var PostgresqlHostConfig11_LogStatement_value = map[string]int32{ + "LOG_STATEMENT_UNSPECIFIED": 0, + "LOG_STATEMENT_NONE": 1, + "LOG_STATEMENT_DDL": 2, + "LOG_STATEMENT_MOD": 3, + "LOG_STATEMENT_ALL": 4, +} + +func (x PostgresqlHostConfig11_LogStatement) String() string { + return proto.EnumName(PostgresqlHostConfig11_LogStatement_name, int32(x)) +} +func (PostgresqlHostConfig11_LogStatement) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host11_bb5f848b388f9f62, []int{0, 4} +} + +type PostgresqlHostConfig11_TransactionIsolation int32 + +const ( + PostgresqlHostConfig11_TRANSACTION_ISOLATION_UNSPECIFIED PostgresqlHostConfig11_TransactionIsolation = 0 + PostgresqlHostConfig11_TRANSACTION_ISOLATION_READ_UNCOMMITTED PostgresqlHostConfig11_TransactionIsolation = 1 + PostgresqlHostConfig11_TRANSACTION_ISOLATION_READ_COMMITTED PostgresqlHostConfig11_TransactionIsolation = 2 + PostgresqlHostConfig11_TRANSACTION_ISOLATION_REPEATABLE_READ PostgresqlHostConfig11_TransactionIsolation = 3 + PostgresqlHostConfig11_TRANSACTION_ISOLATION_SERIALIZABLE PostgresqlHostConfig11_TransactionIsolation = 4 +) + +var PostgresqlHostConfig11_TransactionIsolation_name = map[int32]string{ + 0: "TRANSACTION_ISOLATION_UNSPECIFIED", + 1: "TRANSACTION_ISOLATION_READ_UNCOMMITTED", + 2: "TRANSACTION_ISOLATION_READ_COMMITTED", + 3: "TRANSACTION_ISOLATION_REPEATABLE_READ", + 4: "TRANSACTION_ISOLATION_SERIALIZABLE", +} +var PostgresqlHostConfig11_TransactionIsolation_value = map[string]int32{ + "TRANSACTION_ISOLATION_UNSPECIFIED": 0, + "TRANSACTION_ISOLATION_READ_UNCOMMITTED": 1, + "TRANSACTION_ISOLATION_READ_COMMITTED": 2, + "TRANSACTION_ISOLATION_REPEATABLE_READ": 3, + "TRANSACTION_ISOLATION_SERIALIZABLE": 4, +} + +func (x PostgresqlHostConfig11_TransactionIsolation) String() string { + return proto.EnumName(PostgresqlHostConfig11_TransactionIsolation_name, int32(x)) +} +func (PostgresqlHostConfig11_TransactionIsolation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host11_bb5f848b388f9f62, []int{0, 5} +} + +type PostgresqlHostConfig11_ByteaOutput int32 + +const ( + PostgresqlHostConfig11_BYTEA_OUTPUT_UNSPECIFIED PostgresqlHostConfig11_ByteaOutput = 0 + PostgresqlHostConfig11_BYTEA_OUTPUT_HEX PostgresqlHostConfig11_ByteaOutput = 1 + PostgresqlHostConfig11_BYTEA_OUTPUT_ESCAPED PostgresqlHostConfig11_ByteaOutput = 2 +) + +var PostgresqlHostConfig11_ByteaOutput_name = map[int32]string{ + 0: "BYTEA_OUTPUT_UNSPECIFIED", + 1: "BYTEA_OUTPUT_HEX", + 2: "BYTEA_OUTPUT_ESCAPED", +} +var PostgresqlHostConfig11_ByteaOutput_value = map[string]int32{ + "BYTEA_OUTPUT_UNSPECIFIED": 0, + "BYTEA_OUTPUT_HEX": 1, + "BYTEA_OUTPUT_ESCAPED": 2, +} + +func (x PostgresqlHostConfig11_ByteaOutput) String() string { + return proto.EnumName(PostgresqlHostConfig11_ByteaOutput_name, int32(x)) +} +func (PostgresqlHostConfig11_ByteaOutput) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host11_bb5f848b388f9f62, []int{0, 6} +} + +type PostgresqlHostConfig11_XmlBinary int32 + +const ( + PostgresqlHostConfig11_XML_BINARY_UNSPECIFIED PostgresqlHostConfig11_XmlBinary = 0 + PostgresqlHostConfig11_XML_BINARY_BASE64 PostgresqlHostConfig11_XmlBinary = 1 + PostgresqlHostConfig11_XML_BINARY_HEX PostgresqlHostConfig11_XmlBinary = 2 +) + +var PostgresqlHostConfig11_XmlBinary_name = map[int32]string{ + 0: "XML_BINARY_UNSPECIFIED", + 1: "XML_BINARY_BASE64", + 2: "XML_BINARY_HEX", +} +var PostgresqlHostConfig11_XmlBinary_value = map[string]int32{ + "XML_BINARY_UNSPECIFIED": 0, + "XML_BINARY_BASE64": 1, + "XML_BINARY_HEX": 2, +} + +func (x PostgresqlHostConfig11_XmlBinary) String() string { + return proto.EnumName(PostgresqlHostConfig11_XmlBinary_name, int32(x)) +} +func (PostgresqlHostConfig11_XmlBinary) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host11_bb5f848b388f9f62, []int{0, 7} +} + +type PostgresqlHostConfig11_XmlOption int32 + +const ( + PostgresqlHostConfig11_XML_OPTION_UNSPECIFIED PostgresqlHostConfig11_XmlOption = 0 + PostgresqlHostConfig11_XML_OPTION_DOCUMENT PostgresqlHostConfig11_XmlOption = 1 + PostgresqlHostConfig11_XML_OPTION_CONTENT PostgresqlHostConfig11_XmlOption = 2 +) + +var PostgresqlHostConfig11_XmlOption_name = map[int32]string{ + 0: "XML_OPTION_UNSPECIFIED", + 1: "XML_OPTION_DOCUMENT", + 2: "XML_OPTION_CONTENT", +} +var PostgresqlHostConfig11_XmlOption_value = map[string]int32{ + "XML_OPTION_UNSPECIFIED": 0, + "XML_OPTION_DOCUMENT": 1, + "XML_OPTION_CONTENT": 2, +} + +func (x PostgresqlHostConfig11_XmlOption) String() string { + return proto.EnumName(PostgresqlHostConfig11_XmlOption_name, int32(x)) +} +func (PostgresqlHostConfig11_XmlOption) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host11_bb5f848b388f9f62, []int{0, 8} +} + +type PostgresqlHostConfig11_BackslashQuote int32 + +const ( + PostgresqlHostConfig11_BACKSLASH_QUOTE_UNSPECIFIED PostgresqlHostConfig11_BackslashQuote = 0 + PostgresqlHostConfig11_BACKSLASH_QUOTE PostgresqlHostConfig11_BackslashQuote = 1 + PostgresqlHostConfig11_BACKSLASH_QUOTE_ON PostgresqlHostConfig11_BackslashQuote = 2 + PostgresqlHostConfig11_BACKSLASH_QUOTE_OFF PostgresqlHostConfig11_BackslashQuote = 3 + PostgresqlHostConfig11_BACKSLASH_QUOTE_SAFE_ENCODING PostgresqlHostConfig11_BackslashQuote = 4 +) + +var PostgresqlHostConfig11_BackslashQuote_name = map[int32]string{ + 0: "BACKSLASH_QUOTE_UNSPECIFIED", + 1: "BACKSLASH_QUOTE", + 2: "BACKSLASH_QUOTE_ON", + 3: "BACKSLASH_QUOTE_OFF", + 4: "BACKSLASH_QUOTE_SAFE_ENCODING", +} +var PostgresqlHostConfig11_BackslashQuote_value = map[string]int32{ + "BACKSLASH_QUOTE_UNSPECIFIED": 0, + "BACKSLASH_QUOTE": 1, + "BACKSLASH_QUOTE_ON": 2, + "BACKSLASH_QUOTE_OFF": 3, + "BACKSLASH_QUOTE_SAFE_ENCODING": 4, +} + +func (x PostgresqlHostConfig11_BackslashQuote) String() string { + return proto.EnumName(PostgresqlHostConfig11_BackslashQuote_name, int32(x)) +} +func (PostgresqlHostConfig11_BackslashQuote) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host11_bb5f848b388f9f62, []int{0, 9} +} + +// Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file +// parameters which detailed description is available in +// [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). +type PostgresqlHostConfig11 struct { + RecoveryMinApplyDelay *wrappers.Int64Value `protobuf:"bytes,1,opt,name=recovery_min_apply_delay,json=recoveryMinApplyDelay,proto3" json:"recovery_min_apply_delay,omitempty"` + SharedBuffers *wrappers.Int64Value `protobuf:"bytes,2,opt,name=shared_buffers,json=sharedBuffers,proto3" json:"shared_buffers,omitempty"` + TempBuffers *wrappers.Int64Value `protobuf:"bytes,3,opt,name=temp_buffers,json=tempBuffers,proto3" json:"temp_buffers,omitempty"` + WorkMem *wrappers.Int64Value `protobuf:"bytes,4,opt,name=work_mem,json=workMem,proto3" json:"work_mem,omitempty"` + TempFileLimit *wrappers.Int64Value `protobuf:"bytes,5,opt,name=temp_file_limit,json=tempFileLimit,proto3" json:"temp_file_limit,omitempty"` + BackendFlushAfter *wrappers.Int64Value `protobuf:"bytes,6,opt,name=backend_flush_after,json=backendFlushAfter,proto3" json:"backend_flush_after,omitempty"` + OldSnapshotThreshold *wrappers.Int64Value `protobuf:"bytes,7,opt,name=old_snapshot_threshold,json=oldSnapshotThreshold,proto3" json:"old_snapshot_threshold,omitempty"` + MaxStandbyStreamingDelay *wrappers.Int64Value `protobuf:"bytes,8,opt,name=max_standby_streaming_delay,json=maxStandbyStreamingDelay,proto3" json:"max_standby_streaming_delay,omitempty"` + ConstraintExclusion PostgresqlHostConfig11_ConstraintExclusion `protobuf:"varint,9,opt,name=constraint_exclusion,json=constraintExclusion,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_ConstraintExclusion" json:"constraint_exclusion,omitempty"` + CursorTupleFraction *wrappers.DoubleValue `protobuf:"bytes,10,opt,name=cursor_tuple_fraction,json=cursorTupleFraction,proto3" json:"cursor_tuple_fraction,omitempty"` + FromCollapseLimit *wrappers.Int64Value `protobuf:"bytes,11,opt,name=from_collapse_limit,json=fromCollapseLimit,proto3" json:"from_collapse_limit,omitempty"` + JoinCollapseLimit *wrappers.Int64Value `protobuf:"bytes,12,opt,name=join_collapse_limit,json=joinCollapseLimit,proto3" json:"join_collapse_limit,omitempty"` + ForceParallelMode PostgresqlHostConfig11_ForceParallelMode `protobuf:"varint,13,opt,name=force_parallel_mode,json=forceParallelMode,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_ForceParallelMode" json:"force_parallel_mode,omitempty"` + ClientMinMessages PostgresqlHostConfig11_LogLevel `protobuf:"varint,14,opt,name=client_min_messages,json=clientMinMessages,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_LogLevel" json:"client_min_messages,omitempty"` + LogMinMessages PostgresqlHostConfig11_LogLevel `protobuf:"varint,15,opt,name=log_min_messages,json=logMinMessages,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_LogLevel" json:"log_min_messages,omitempty"` + LogMinErrorStatement PostgresqlHostConfig11_LogLevel `protobuf:"varint,16,opt,name=log_min_error_statement,json=logMinErrorStatement,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_LogLevel" json:"log_min_error_statement,omitempty"` + LogMinDurationStatement *wrappers.Int64Value `protobuf:"bytes,17,opt,name=log_min_duration_statement,json=logMinDurationStatement,proto3" json:"log_min_duration_statement,omitempty"` + LogCheckpoints *wrappers.BoolValue `protobuf:"bytes,18,opt,name=log_checkpoints,json=logCheckpoints,proto3" json:"log_checkpoints,omitempty"` + LogConnections *wrappers.BoolValue `protobuf:"bytes,19,opt,name=log_connections,json=logConnections,proto3" json:"log_connections,omitempty"` + LogDisconnections *wrappers.BoolValue `protobuf:"bytes,20,opt,name=log_disconnections,json=logDisconnections,proto3" json:"log_disconnections,omitempty"` + LogDuration *wrappers.BoolValue `protobuf:"bytes,21,opt,name=log_duration,json=logDuration,proto3" json:"log_duration,omitempty"` + LogErrorVerbosity PostgresqlHostConfig11_LogErrorVerbosity `protobuf:"varint,22,opt,name=log_error_verbosity,json=logErrorVerbosity,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_LogErrorVerbosity" json:"log_error_verbosity,omitempty"` + LogLockWaits *wrappers.BoolValue `protobuf:"bytes,23,opt,name=log_lock_waits,json=logLockWaits,proto3" json:"log_lock_waits,omitempty"` + LogStatement PostgresqlHostConfig11_LogStatement `protobuf:"varint,24,opt,name=log_statement,json=logStatement,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_LogStatement" json:"log_statement,omitempty"` + LogTempFiles *wrappers.Int64Value `protobuf:"bytes,25,opt,name=log_temp_files,json=logTempFiles,proto3" json:"log_temp_files,omitempty"` + SearchPath string `protobuf:"bytes,26,opt,name=search_path,json=searchPath,proto3" json:"search_path,omitempty"` + RowSecurity *wrappers.BoolValue `protobuf:"bytes,27,opt,name=row_security,json=rowSecurity,proto3" json:"row_security,omitempty"` + DefaultTransactionIsolation PostgresqlHostConfig11_TransactionIsolation `protobuf:"varint,28,opt,name=default_transaction_isolation,json=defaultTransactionIsolation,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_TransactionIsolation" json:"default_transaction_isolation,omitempty"` + StatementTimeout *wrappers.Int64Value `protobuf:"bytes,29,opt,name=statement_timeout,json=statementTimeout,proto3" json:"statement_timeout,omitempty"` + LockTimeout *wrappers.Int64Value `protobuf:"bytes,30,opt,name=lock_timeout,json=lockTimeout,proto3" json:"lock_timeout,omitempty"` + IdleInTransactionSessionTimeout *wrappers.Int64Value `protobuf:"bytes,31,opt,name=idle_in_transaction_session_timeout,json=idleInTransactionSessionTimeout,proto3" json:"idle_in_transaction_session_timeout,omitempty"` + ByteaOutput PostgresqlHostConfig11_ByteaOutput `protobuf:"varint,32,opt,name=bytea_output,json=byteaOutput,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_ByteaOutput" json:"bytea_output,omitempty"` + Xmlbinary PostgresqlHostConfig11_XmlBinary `protobuf:"varint,33,opt,name=xmlbinary,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_XmlBinary" json:"xmlbinary,omitempty"` + Xmloption PostgresqlHostConfig11_XmlOption `protobuf:"varint,34,opt,name=xmloption,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_XmlOption" json:"xmloption,omitempty"` + GinPendingListLimit *wrappers.Int64Value `protobuf:"bytes,35,opt,name=gin_pending_list_limit,json=ginPendingListLimit,proto3" json:"gin_pending_list_limit,omitempty"` + DeadlockTimeout *wrappers.Int64Value `protobuf:"bytes,36,opt,name=deadlock_timeout,json=deadlockTimeout,proto3" json:"deadlock_timeout,omitempty"` + MaxLocksPerTransaction *wrappers.Int64Value `protobuf:"bytes,37,opt,name=max_locks_per_transaction,json=maxLocksPerTransaction,proto3" json:"max_locks_per_transaction,omitempty"` + MaxPredLocksPerTransaction *wrappers.Int64Value `protobuf:"bytes,38,opt,name=max_pred_locks_per_transaction,json=maxPredLocksPerTransaction,proto3" json:"max_pred_locks_per_transaction,omitempty"` + ArrayNulls *wrappers.BoolValue `protobuf:"bytes,39,opt,name=array_nulls,json=arrayNulls,proto3" json:"array_nulls,omitempty"` + BackslashQuote PostgresqlHostConfig11_BackslashQuote `protobuf:"varint,40,opt,name=backslash_quote,json=backslashQuote,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_BackslashQuote" json:"backslash_quote,omitempty"` + DefaultWithOids *wrappers.BoolValue `protobuf:"bytes,41,opt,name=default_with_oids,json=defaultWithOids,proto3" json:"default_with_oids,omitempty"` + EscapeStringWarning *wrappers.BoolValue `protobuf:"bytes,42,opt,name=escape_string_warning,json=escapeStringWarning,proto3" json:"escape_string_warning,omitempty"` + LoCompatPrivileges *wrappers.BoolValue `protobuf:"bytes,43,opt,name=lo_compat_privileges,json=loCompatPrivileges,proto3" json:"lo_compat_privileges,omitempty"` + OperatorPrecedenceWarning *wrappers.BoolValue `protobuf:"bytes,44,opt,name=operator_precedence_warning,json=operatorPrecedenceWarning,proto3" json:"operator_precedence_warning,omitempty"` + QuoteAllIdentifiers *wrappers.BoolValue `protobuf:"bytes,45,opt,name=quote_all_identifiers,json=quoteAllIdentifiers,proto3" json:"quote_all_identifiers,omitempty"` + StandardConformingStrings *wrappers.BoolValue `protobuf:"bytes,46,opt,name=standard_conforming_strings,json=standardConformingStrings,proto3" json:"standard_conforming_strings,omitempty"` + SynchronizeSeqscans *wrappers.BoolValue `protobuf:"bytes,47,opt,name=synchronize_seqscans,json=synchronizeSeqscans,proto3" json:"synchronize_seqscans,omitempty"` + TransformNullEquals *wrappers.BoolValue `protobuf:"bytes,48,opt,name=transform_null_equals,json=transformNullEquals,proto3" json:"transform_null_equals,omitempty"` + ExitOnError *wrappers.BoolValue `protobuf:"bytes,49,opt,name=exit_on_error,json=exitOnError,proto3" json:"exit_on_error,omitempty"` + SeqPageCost *wrappers.DoubleValue `protobuf:"bytes,50,opt,name=seq_page_cost,json=seqPageCost,proto3" json:"seq_page_cost,omitempty"` + RandomPageCost *wrappers.DoubleValue `protobuf:"bytes,51,opt,name=random_page_cost,json=randomPageCost,proto3" json:"random_page_cost,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PostgresqlHostConfig11) Reset() { *m = PostgresqlHostConfig11{} } +func (m *PostgresqlHostConfig11) String() string { return proto.CompactTextString(m) } +func (*PostgresqlHostConfig11) ProtoMessage() {} +func (*PostgresqlHostConfig11) Descriptor() ([]byte, []int) { + return fileDescriptor_host11_bb5f848b388f9f62, []int{0} +} +func (m *PostgresqlHostConfig11) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PostgresqlHostConfig11.Unmarshal(m, b) +} +func (m *PostgresqlHostConfig11) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PostgresqlHostConfig11.Marshal(b, m, deterministic) +} +func (dst *PostgresqlHostConfig11) XXX_Merge(src proto.Message) { + xxx_messageInfo_PostgresqlHostConfig11.Merge(dst, src) +} +func (m *PostgresqlHostConfig11) XXX_Size() int { + return xxx_messageInfo_PostgresqlHostConfig11.Size(m) +} +func (m *PostgresqlHostConfig11) XXX_DiscardUnknown() { + xxx_messageInfo_PostgresqlHostConfig11.DiscardUnknown(m) +} + +var xxx_messageInfo_PostgresqlHostConfig11 proto.InternalMessageInfo + +func (m *PostgresqlHostConfig11) GetRecoveryMinApplyDelay() *wrappers.Int64Value { + if m != nil { + return m.RecoveryMinApplyDelay + } + return nil +} + +func (m *PostgresqlHostConfig11) GetSharedBuffers() *wrappers.Int64Value { + if m != nil { + return m.SharedBuffers + } + return nil +} + +func (m *PostgresqlHostConfig11) GetTempBuffers() *wrappers.Int64Value { + if m != nil { + return m.TempBuffers + } + return nil +} + +func (m *PostgresqlHostConfig11) GetWorkMem() *wrappers.Int64Value { + if m != nil { + return m.WorkMem + } + return nil +} + +func (m *PostgresqlHostConfig11) GetTempFileLimit() *wrappers.Int64Value { + if m != nil { + return m.TempFileLimit + } + return nil +} + +func (m *PostgresqlHostConfig11) GetBackendFlushAfter() *wrappers.Int64Value { + if m != nil { + return m.BackendFlushAfter + } + return nil +} + +func (m *PostgresqlHostConfig11) GetOldSnapshotThreshold() *wrappers.Int64Value { + if m != nil { + return m.OldSnapshotThreshold + } + return nil +} + +func (m *PostgresqlHostConfig11) GetMaxStandbyStreamingDelay() *wrappers.Int64Value { + if m != nil { + return m.MaxStandbyStreamingDelay + } + return nil +} + +func (m *PostgresqlHostConfig11) GetConstraintExclusion() PostgresqlHostConfig11_ConstraintExclusion { + if m != nil { + return m.ConstraintExclusion + } + return PostgresqlHostConfig11_CONSTRAINT_EXCLUSION_UNSPECIFIED +} + +func (m *PostgresqlHostConfig11) GetCursorTupleFraction() *wrappers.DoubleValue { + if m != nil { + return m.CursorTupleFraction + } + return nil +} + +func (m *PostgresqlHostConfig11) GetFromCollapseLimit() *wrappers.Int64Value { + if m != nil { + return m.FromCollapseLimit + } + return nil +} + +func (m *PostgresqlHostConfig11) GetJoinCollapseLimit() *wrappers.Int64Value { + if m != nil { + return m.JoinCollapseLimit + } + return nil +} + +func (m *PostgresqlHostConfig11) GetForceParallelMode() PostgresqlHostConfig11_ForceParallelMode { + if m != nil { + return m.ForceParallelMode + } + return PostgresqlHostConfig11_FORCE_PARALLEL_MODE_UNSPECIFIED +} + +func (m *PostgresqlHostConfig11) GetClientMinMessages() PostgresqlHostConfig11_LogLevel { + if m != nil { + return m.ClientMinMessages + } + return PostgresqlHostConfig11_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlHostConfig11) GetLogMinMessages() PostgresqlHostConfig11_LogLevel { + if m != nil { + return m.LogMinMessages + } + return PostgresqlHostConfig11_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlHostConfig11) GetLogMinErrorStatement() PostgresqlHostConfig11_LogLevel { + if m != nil { + return m.LogMinErrorStatement + } + return PostgresqlHostConfig11_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlHostConfig11) GetLogMinDurationStatement() *wrappers.Int64Value { + if m != nil { + return m.LogMinDurationStatement + } + return nil +} + +func (m *PostgresqlHostConfig11) GetLogCheckpoints() *wrappers.BoolValue { + if m != nil { + return m.LogCheckpoints + } + return nil +} + +func (m *PostgresqlHostConfig11) GetLogConnections() *wrappers.BoolValue { + if m != nil { + return m.LogConnections + } + return nil +} + +func (m *PostgresqlHostConfig11) GetLogDisconnections() *wrappers.BoolValue { + if m != nil { + return m.LogDisconnections + } + return nil +} + +func (m *PostgresqlHostConfig11) GetLogDuration() *wrappers.BoolValue { + if m != nil { + return m.LogDuration + } + return nil +} + +func (m *PostgresqlHostConfig11) GetLogErrorVerbosity() PostgresqlHostConfig11_LogErrorVerbosity { + if m != nil { + return m.LogErrorVerbosity + } + return PostgresqlHostConfig11_LOG_ERROR_VERBOSITY_UNSPECIFIED +} + +func (m *PostgresqlHostConfig11) GetLogLockWaits() *wrappers.BoolValue { + if m != nil { + return m.LogLockWaits + } + return nil +} + +func (m *PostgresqlHostConfig11) GetLogStatement() PostgresqlHostConfig11_LogStatement { + if m != nil { + return m.LogStatement + } + return PostgresqlHostConfig11_LOG_STATEMENT_UNSPECIFIED +} + +func (m *PostgresqlHostConfig11) GetLogTempFiles() *wrappers.Int64Value { + if m != nil { + return m.LogTempFiles + } + return nil +} + +func (m *PostgresqlHostConfig11) GetSearchPath() string { + if m != nil { + return m.SearchPath + } + return "" +} + +func (m *PostgresqlHostConfig11) GetRowSecurity() *wrappers.BoolValue { + if m != nil { + return m.RowSecurity + } + return nil +} + +func (m *PostgresqlHostConfig11) GetDefaultTransactionIsolation() PostgresqlHostConfig11_TransactionIsolation { + if m != nil { + return m.DefaultTransactionIsolation + } + return PostgresqlHostConfig11_TRANSACTION_ISOLATION_UNSPECIFIED +} + +func (m *PostgresqlHostConfig11) GetStatementTimeout() *wrappers.Int64Value { + if m != nil { + return m.StatementTimeout + } + return nil +} + +func (m *PostgresqlHostConfig11) GetLockTimeout() *wrappers.Int64Value { + if m != nil { + return m.LockTimeout + } + return nil +} + +func (m *PostgresqlHostConfig11) GetIdleInTransactionSessionTimeout() *wrappers.Int64Value { + if m != nil { + return m.IdleInTransactionSessionTimeout + } + return nil +} + +func (m *PostgresqlHostConfig11) GetByteaOutput() PostgresqlHostConfig11_ByteaOutput { + if m != nil { + return m.ByteaOutput + } + return PostgresqlHostConfig11_BYTEA_OUTPUT_UNSPECIFIED +} + +func (m *PostgresqlHostConfig11) GetXmlbinary() PostgresqlHostConfig11_XmlBinary { + if m != nil { + return m.Xmlbinary + } + return PostgresqlHostConfig11_XML_BINARY_UNSPECIFIED +} + +func (m *PostgresqlHostConfig11) GetXmloption() PostgresqlHostConfig11_XmlOption { + if m != nil { + return m.Xmloption + } + return PostgresqlHostConfig11_XML_OPTION_UNSPECIFIED +} + +func (m *PostgresqlHostConfig11) GetGinPendingListLimit() *wrappers.Int64Value { + if m != nil { + return m.GinPendingListLimit + } + return nil +} + +func (m *PostgresqlHostConfig11) GetDeadlockTimeout() *wrappers.Int64Value { + if m != nil { + return m.DeadlockTimeout + } + return nil +} + +func (m *PostgresqlHostConfig11) GetMaxLocksPerTransaction() *wrappers.Int64Value { + if m != nil { + return m.MaxLocksPerTransaction + } + return nil +} + +func (m *PostgresqlHostConfig11) GetMaxPredLocksPerTransaction() *wrappers.Int64Value { + if m != nil { + return m.MaxPredLocksPerTransaction + } + return nil +} + +func (m *PostgresqlHostConfig11) GetArrayNulls() *wrappers.BoolValue { + if m != nil { + return m.ArrayNulls + } + return nil +} + +func (m *PostgresqlHostConfig11) GetBackslashQuote() PostgresqlHostConfig11_BackslashQuote { + if m != nil { + return m.BackslashQuote + } + return PostgresqlHostConfig11_BACKSLASH_QUOTE_UNSPECIFIED +} + +func (m *PostgresqlHostConfig11) GetDefaultWithOids() *wrappers.BoolValue { + if m != nil { + return m.DefaultWithOids + } + return nil +} + +func (m *PostgresqlHostConfig11) GetEscapeStringWarning() *wrappers.BoolValue { + if m != nil { + return m.EscapeStringWarning + } + return nil +} + +func (m *PostgresqlHostConfig11) GetLoCompatPrivileges() *wrappers.BoolValue { + if m != nil { + return m.LoCompatPrivileges + } + return nil +} + +func (m *PostgresqlHostConfig11) GetOperatorPrecedenceWarning() *wrappers.BoolValue { + if m != nil { + return m.OperatorPrecedenceWarning + } + return nil +} + +func (m *PostgresqlHostConfig11) GetQuoteAllIdentifiers() *wrappers.BoolValue { + if m != nil { + return m.QuoteAllIdentifiers + } + return nil +} + +func (m *PostgresqlHostConfig11) GetStandardConformingStrings() *wrappers.BoolValue { + if m != nil { + return m.StandardConformingStrings + } + return nil +} + +func (m *PostgresqlHostConfig11) GetSynchronizeSeqscans() *wrappers.BoolValue { + if m != nil { + return m.SynchronizeSeqscans + } + return nil +} + +func (m *PostgresqlHostConfig11) GetTransformNullEquals() *wrappers.BoolValue { + if m != nil { + return m.TransformNullEquals + } + return nil +} + +func (m *PostgresqlHostConfig11) GetExitOnError() *wrappers.BoolValue { + if m != nil { + return m.ExitOnError + } + return nil +} + +func (m *PostgresqlHostConfig11) GetSeqPageCost() *wrappers.DoubleValue { + if m != nil { + return m.SeqPageCost + } + return nil +} + +func (m *PostgresqlHostConfig11) GetRandomPageCost() *wrappers.DoubleValue { + if m != nil { + return m.RandomPageCost + } + return nil +} + +func init() { + proto.RegisterType((*PostgresqlHostConfig11)(nil), "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11") + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_ConstraintExclusion", PostgresqlHostConfig11_ConstraintExclusion_name, PostgresqlHostConfig11_ConstraintExclusion_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_ForceParallelMode", PostgresqlHostConfig11_ForceParallelMode_name, PostgresqlHostConfig11_ForceParallelMode_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_LogLevel", PostgresqlHostConfig11_LogLevel_name, PostgresqlHostConfig11_LogLevel_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_LogErrorVerbosity", PostgresqlHostConfig11_LogErrorVerbosity_name, PostgresqlHostConfig11_LogErrorVerbosity_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_LogStatement", PostgresqlHostConfig11_LogStatement_name, PostgresqlHostConfig11_LogStatement_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_TransactionIsolation", PostgresqlHostConfig11_TransactionIsolation_name, PostgresqlHostConfig11_TransactionIsolation_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_ByteaOutput", PostgresqlHostConfig11_ByteaOutput_name, PostgresqlHostConfig11_ByteaOutput_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_XmlBinary", PostgresqlHostConfig11_XmlBinary_name, PostgresqlHostConfig11_XmlBinary_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_XmlOption", PostgresqlHostConfig11_XmlOption_name, PostgresqlHostConfig11_XmlOption_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_BackslashQuote", PostgresqlHostConfig11_BackslashQuote_name, PostgresqlHostConfig11_BackslashQuote_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/config/host11.proto", fileDescriptor_host11_bb5f848b388f9f62) +} + +var fileDescriptor_host11_bb5f848b388f9f62 = []byte{ + // 2178 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x99, 0x5b, 0x6f, 0xdb, 0xc8, + 0x15, 0xc7, 0x2b, 0x3b, 0x9b, 0xcb, 0xf8, 0x46, 0x8d, 0x7c, 0x61, 0xec, 0x5c, 0x95, 0x4b, 0xb3, + 0xdb, 0x5a, 0x8e, 0x1c, 0x37, 0x1b, 0x60, 0xd1, 0xc5, 0x52, 0x12, 0xe5, 0xa8, 0xa5, 0x44, 0x85, + 0xa4, 0x1d, 0x6f, 0x8a, 0xc5, 0x60, 0x44, 0x8e, 0x24, 0x36, 0x43, 0x0e, 0xcd, 0xa1, 0x7c, 0x29, + 0x50, 0xf4, 0xa5, 0x4f, 0x7d, 0xec, 0x43, 0x81, 0xf6, 0x0b, 0xe5, 0x9b, 0xf4, 0x13, 0xf4, 0x29, + 0x4f, 0xc5, 0x90, 0xa2, 0x2e, 0xb6, 0x5a, 0x1a, 0xeb, 0xbc, 0x45, 0x67, 0xce, 0xff, 0x77, 0x0e, + 0xe7, 0x9c, 0x21, 0xcf, 0xc4, 0x60, 0xf7, 0x1c, 0xfb, 0x0e, 0x39, 0xdb, 0xb1, 0x29, 0x1b, 0x38, + 0x3b, 0x9e, 0xd3, 0xd9, 0x09, 0x18, 0x8f, 0x7a, 0x21, 0xe1, 0xc7, 0x74, 0xe7, 0xa4, 0xbc, 0x63, + 0x33, 0xbf, 0xeb, 0xf6, 0x76, 0xfa, 0x8c, 0x47, 0xe5, 0x72, 0x29, 0x08, 0x59, 0xc4, 0xe0, 0xb3, + 0x44, 0x53, 0x8a, 0x35, 0x25, 0xcf, 0xe9, 0x94, 0xc6, 0x9a, 0xd2, 0x49, 0xb9, 0x94, 0x68, 0x36, + 0x1f, 0xf4, 0x18, 0xeb, 0x51, 0xb2, 0x13, 0x8b, 0x3a, 0x83, 0xee, 0xce, 0x69, 0x88, 0x83, 0x80, + 0x84, 0x3c, 0xc1, 0x6c, 0xde, 0x9f, 0x0a, 0x7d, 0x82, 0xa9, 0xeb, 0xe0, 0xc8, 0x65, 0x7e, 0xb2, + 0x5c, 0xfc, 0x4f, 0x09, 0xac, 0xb7, 0x47, 0xdc, 0xb7, 0x8c, 0x47, 0xd5, 0x98, 0x5b, 0x2e, 0x43, + 0x0b, 0xc8, 0x21, 0xb1, 0xd9, 0x09, 0x09, 0xcf, 0x91, 0xe7, 0xfa, 0x08, 0x07, 0x01, 0x3d, 0x47, + 0x0e, 0xa1, 0xf8, 0x5c, 0xce, 0x3d, 0xca, 0xbd, 0x58, 0xd8, 0xdd, 0x2a, 0x25, 0xc1, 0x4b, 0x69, + 0xf0, 0x52, 0xc3, 0x8f, 0x5e, 0xef, 0x1d, 0x62, 0x3a, 0x20, 0xc6, 0x5a, 0x2a, 0x6e, 0xba, 0xbe, + 0x22, 0xa4, 0x35, 0xa1, 0x84, 0x15, 0xb0, 0xcc, 0xfb, 0x38, 0x24, 0x0e, 0xea, 0x0c, 0xba, 0x5d, + 0x12, 0x72, 0x79, 0x2e, 0x9b, 0xb5, 0x94, 0x48, 0x2a, 0x89, 0x02, 0x7e, 0x0f, 0x16, 0x23, 0xe2, + 0x05, 0x23, 0xc2, 0x7c, 0x36, 0x61, 0x41, 0x08, 0x52, 0xfd, 0x6b, 0x70, 0xfb, 0x94, 0x85, 0x1f, + 0x91, 0x47, 0x3c, 0xf9, 0x46, 0xb6, 0xf6, 0x96, 0x70, 0x6e, 0x12, 0x0f, 0x56, 0xc1, 0x4a, 0x1c, + 0xb7, 0xeb, 0x52, 0x82, 0xa8, 0xeb, 0xb9, 0x91, 0xfc, 0xd5, 0x15, 0x92, 0x17, 0x9a, 0xba, 0x4b, + 0x89, 0x26, 0x14, 0xf0, 0x3d, 0x28, 0x74, 0xb0, 0xfd, 0x91, 0xf8, 0x0e, 0xea, 0xd2, 0x01, 0xef, + 0x23, 0xdc, 0x8d, 0x48, 0x28, 0xdf, 0xcc, 0x04, 0x55, 0xc0, 0xe7, 0x4f, 0xe5, 0x9b, 0x2f, 0xb7, + 0x77, 0x5f, 0xee, 0xbd, 0x31, 0xf2, 0x43, 0x46, 0x5d, 0x20, 0x14, 0x41, 0x80, 0x08, 0xac, 0x33, + 0xea, 0x20, 0xee, 0xe3, 0x80, 0xf7, 0x59, 0x84, 0xa2, 0x7e, 0x48, 0x78, 0x9f, 0x51, 0x47, 0xbe, + 0x95, 0xcd, 0x5e, 0xfc, 0xfc, 0xa9, 0x7c, 0x7b, 0xbb, 0xbc, 0xfd, 0xe6, 0xf5, 0xde, 0xcb, 0x97, + 0xc6, 0x2a, 0xa3, 0x8e, 0x39, 0xe4, 0x58, 0x29, 0x06, 0x7e, 0x00, 0x5b, 0x1e, 0x3e, 0x43, 0x3c, + 0xc2, 0xbe, 0xd3, 0x39, 0x47, 0x3c, 0x0a, 0x09, 0xf6, 0x5c, 0xbf, 0x37, 0xec, 0x89, 0xdb, 0xd9, + 0x5b, 0x21, 0x7b, 0xf8, 0xcc, 0x4c, 0xe4, 0x66, 0xaa, 0x4e, 0xda, 0xe2, 0xaf, 0x39, 0xb0, 0x6a, + 0x33, 0x9f, 0x47, 0x21, 0x76, 0xfd, 0x08, 0x91, 0x33, 0x9b, 0x0e, 0xb8, 0xcb, 0x7c, 0xf9, 0xce, + 0xa3, 0xdc, 0x8b, 0xe5, 0xdd, 0x77, 0xa5, 0x2b, 0x9d, 0x86, 0xd2, 0xec, 0x56, 0x2e, 0x55, 0x47, + 0x64, 0x35, 0x05, 0x1b, 0x05, 0xfb, 0xb2, 0x11, 0xb6, 0xc1, 0x9a, 0x3d, 0x08, 0x39, 0x0b, 0x51, + 0x34, 0x08, 0x28, 0x41, 0xdd, 0x10, 0xdb, 0xe2, 0xb4, 0xc8, 0x20, 0x7e, 0xb8, 0x7b, 0x97, 0x1e, + 0xae, 0xc6, 0x06, 0x1d, 0x4a, 0x92, 0xa7, 0x2b, 0x24, 0x52, 0x4b, 0x28, 0xeb, 0x43, 0x21, 0xfc, + 0x09, 0x14, 0xba, 0x21, 0xf3, 0x90, 0xcd, 0x28, 0xc5, 0x01, 0x4f, 0xfb, 0x66, 0x21, 0xbb, 0x24, + 0xd2, 0xe7, 0x4f, 0xe5, 0xc5, 0xf2, 0xf6, 0x6e, 0x79, 0xef, 0xdb, 0xbd, 0x37, 0xaf, 0x5e, 0xef, + 0x7d, 0x6b, 0xe4, 0x05, 0xa9, 0x3a, 0x04, 0x25, 0xdd, 0xf4, 0x13, 0x28, 0xfc, 0x91, 0xb9, 0xfe, + 0x45, 0xfc, 0xe2, 0xcf, 0xc2, 0x0b, 0xd2, 0x34, 0xfe, 0x2f, 0xa0, 0xd0, 0x65, 0xa1, 0x4d, 0x50, + 0x80, 0x43, 0x4c, 0x29, 0xa1, 0xc8, 0x63, 0x0e, 0x91, 0x97, 0xe2, 0xa2, 0xe8, 0xd7, 0x2b, 0x4a, + 0x5d, 0x80, 0xdb, 0x43, 0x6e, 0x93, 0x39, 0xc4, 0xc8, 0x77, 0x2f, 0x9a, 0xe0, 0x09, 0x28, 0xd8, + 0xd4, 0x25, 0x7e, 0x14, 0xbf, 0x82, 0x3c, 0xc2, 0x39, 0xee, 0x11, 0x2e, 0x2f, 0xc7, 0x09, 0xd4, + 0xaf, 0x97, 0x80, 0xc6, 0x7a, 0x1a, 0x39, 0x21, 0xd4, 0xc8, 0x27, 0x21, 0x9a, 0xae, 0xdf, 0x1c, + 0x06, 0x80, 0x01, 0x90, 0x28, 0xeb, 0x4d, 0x07, 0x5d, 0xf9, 0xa2, 0x41, 0x97, 0x29, 0xeb, 0x4d, + 0x46, 0xfc, 0x33, 0xd8, 0x48, 0x23, 0x92, 0x30, 0x64, 0xa1, 0x38, 0x67, 0x11, 0xf1, 0x88, 0x1f, + 0xc9, 0xd2, 0x17, 0x0d, 0xbc, 0x9a, 0x04, 0x56, 0x45, 0x10, 0x33, 0x8d, 0x01, 0x8f, 0xc0, 0x66, + 0x1a, 0xde, 0x19, 0x84, 0xf1, 0x27, 0x62, 0x22, 0x83, 0x7c, 0xf6, 0xd9, 0xde, 0x48, 0xb0, 0xb5, + 0xa1, 0x78, 0x4c, 0xae, 0x82, 0x15, 0x41, 0xb6, 0xfb, 0xc4, 0xfe, 0x18, 0x30, 0xd7, 0x8f, 0xb8, + 0x0c, 0x63, 0xdc, 0xe6, 0x25, 0x5c, 0x85, 0x31, 0x9a, 0xd0, 0xc4, 0xee, 0x54, 0xc7, 0x8a, 0x11, + 0x84, 0xf9, 0x3e, 0x89, 0x0f, 0x16, 0x97, 0x0b, 0x57, 0x83, 0x8c, 0x15, 0xb0, 0x01, 0xa0, 0x80, + 0x38, 0x2e, 0x9f, 0xe4, 0xac, 0x66, 0x72, 0xf2, 0x94, 0xf5, 0x6a, 0x53, 0x22, 0xf8, 0x5b, 0xb0, + 0x18, 0xa3, 0x86, 0x4f, 0x2b, 0xaf, 0x65, 0x42, 0x16, 0x04, 0x64, 0xe8, 0x2e, 0xce, 0x95, 0x90, + 0x27, 0x85, 0x3e, 0x21, 0x61, 0x87, 0x71, 0x37, 0x3a, 0x97, 0xd7, 0xbf, 0xc4, 0xb9, 0xd2, 0x58, + 0x2f, 0xae, 0xed, 0x61, 0x8a, 0x8d, 0xf3, 0x9f, 0x36, 0xc1, 0x1f, 0x80, 0xd8, 0x1c, 0x44, 0x99, + 0xfd, 0x11, 0x9d, 0x62, 0x37, 0xe2, 0xf2, 0x46, 0xe6, 0x13, 0x88, 0x27, 0xd6, 0x98, 0xfd, 0xf1, + 0xbd, 0xf0, 0x87, 0x0c, 0x2c, 0x09, 0xc2, 0xb8, 0x47, 0xe4, 0x38, 0xf9, 0xdf, 0x5d, 0x3b, 0xf9, + 0x51, 0xe7, 0xc4, 0x01, 0xc7, 0x7d, 0xa4, 0x24, 0x29, 0x8f, 0xbe, 0xc0, 0x5c, 0xbe, 0x9b, 0xdd, + 0x95, 0x02, 0x61, 0x0d, 0xbf, 0xbf, 0x1c, 0x3e, 0x04, 0x0b, 0x9c, 0xe0, 0xd0, 0xee, 0xa3, 0x00, + 0x47, 0x7d, 0x79, 0xf3, 0x51, 0xee, 0xc5, 0x1d, 0x03, 0x24, 0xa6, 0x36, 0x8e, 0xfa, 0xa2, 0xac, + 0x21, 0x3b, 0x45, 0x9c, 0xd8, 0x83, 0x50, 0x14, 0x64, 0x2b, 0xbb, 0xac, 0x21, 0x3b, 0x35, 0x87, + 0xee, 0xf0, 0x1f, 0x39, 0x70, 0xdf, 0x21, 0x5d, 0x3c, 0xa0, 0x11, 0x8a, 0x42, 0xec, 0xf3, 0xe4, + 0x23, 0x80, 0x5c, 0xce, 0x68, 0xd2, 0x27, 0xf7, 0xe2, 0x4d, 0x32, 0xae, 0xb7, 0x49, 0xd6, 0x18, + 0xdd, 0x48, 0xc9, 0xc6, 0xd6, 0x30, 0xf0, 0xac, 0x45, 0xf8, 0x16, 0xe4, 0x47, 0x85, 0x42, 0x91, + 0xeb, 0x11, 0x36, 0x88, 0xe4, 0xfb, 0xd9, 0xdb, 0x27, 0x8d, 0x54, 0x56, 0x22, 0x12, 0xb3, 0x57, + 0xdc, 0x34, 0x29, 0xe4, 0xc1, 0x15, 0x66, 0x2f, 0x21, 0x48, 0xf5, 0x2e, 0x78, 0xe2, 0x3a, 0x94, + 0x20, 0xd7, 0x9f, 0xda, 0x21, 0x4e, 0xb8, 0xf8, 0x00, 0x8f, 0xb0, 0x0f, 0xb3, 0xb1, 0x0f, 0x05, + 0xa7, 0xe1, 0x4f, 0x3c, 0xaf, 0x99, 0x40, 0xd2, 0x50, 0x14, 0x2c, 0x76, 0xce, 0x23, 0x82, 0x11, + 0x1b, 0x44, 0xc1, 0x20, 0x92, 0x1f, 0xc5, 0x7b, 0xdf, 0xb8, 0xde, 0xde, 0x57, 0x04, 0x51, 0x8f, + 0x81, 0xc6, 0x42, 0x67, 0xfc, 0x03, 0x12, 0x70, 0xe7, 0xcc, 0xa3, 0x1d, 0xd7, 0xc7, 0xe1, 0xb9, + 0xfc, 0x38, 0x0e, 0xb5, 0x7f, 0xbd, 0x50, 0x47, 0x1e, 0xad, 0xc4, 0x38, 0x63, 0x4c, 0x1e, 0x86, + 0x61, 0x41, 0xdc, 0x4d, 0xc5, 0x2f, 0x14, 0x46, 0x8f, 0x71, 0xc6, 0x98, 0x0c, 0xdb, 0x60, 0xbd, + 0xe7, 0xfa, 0x28, 0x20, 0xbe, 0x23, 0x26, 0x3c, 0xea, 0xf2, 0x68, 0x38, 0x5a, 0x3c, 0xc9, 0xae, + 0x4c, 0xa1, 0xe7, 0xfa, 0xed, 0x44, 0xa9, 0xb9, 0x3c, 0x4a, 0x46, 0x89, 0x3a, 0x90, 0x1c, 0x82, + 0x9d, 0xa9, 0xe6, 0x79, 0x9a, 0xcd, 0x5a, 0x49, 0x45, 0x69, 0x55, 0x0f, 0xc1, 0x5d, 0x31, 0x85, + 0x0a, 0x13, 0x47, 0x01, 0x09, 0x27, 0xdb, 0x48, 0x7e, 0x96, 0x0d, 0x5c, 0xf7, 0xf0, 0x99, 0x78, + 0x8b, 0xf1, 0x36, 0x09, 0x27, 0x7a, 0x07, 0x22, 0xf0, 0x40, 0x70, 0x03, 0x71, 0x35, 0x99, 0x0d, + 0x7f, 0x9e, 0x0d, 0xdf, 0xf4, 0xf0, 0x59, 0x3b, 0x24, 0xce, 0xac, 0x00, 0xdf, 0x81, 0x05, 0x1c, + 0x86, 0xf8, 0x1c, 0xf9, 0x03, 0x4a, 0xb9, 0xfc, 0xcb, 0xcc, 0x57, 0x0b, 0x88, 0xdd, 0x5b, 0xc2, + 0x1b, 0x0e, 0xc0, 0x8a, 0x98, 0xf8, 0x39, 0xc5, 0xbc, 0x8f, 0x8e, 0x07, 0x2c, 0x22, 0xf2, 0x8b, + 0xb8, 0xf8, 0xda, 0x35, 0xdb, 0x39, 0x85, 0xbe, 0x13, 0x4c, 0x63, 0xb9, 0x33, 0xf5, 0x1b, 0xd6, + 0x41, 0x3e, 0x7d, 0x9f, 0x9d, 0xba, 0x51, 0x1f, 0x31, 0xd7, 0xe1, 0xf2, 0xd7, 0x99, 0x99, 0xaf, + 0x0c, 0x45, 0xef, 0xdd, 0xa8, 0xaf, 0xbb, 0x0e, 0x87, 0x2d, 0xb0, 0x46, 0xb8, 0x8d, 0x03, 0x22, + 0x6e, 0x0d, 0xa2, 0xa1, 0x4e, 0x71, 0xe8, 0xbb, 0x7e, 0x4f, 0xfe, 0x26, 0x93, 0x55, 0x48, 0x84, + 0x66, 0xac, 0x7b, 0x9f, 0xc8, 0xa0, 0x06, 0x56, 0x29, 0x43, 0x36, 0xf3, 0x02, 0x1c, 0xa1, 0x20, + 0x74, 0x4f, 0x5c, 0x4a, 0xc4, 0x88, 0xf6, 0xab, 0x4c, 0x1c, 0xa4, 0xac, 0x1a, 0xcb, 0xda, 0x23, + 0x95, 0xb8, 0xd8, 0xb0, 0x80, 0x84, 0x38, 0x62, 0xa1, 0xa8, 0xbf, 0x4d, 0x1c, 0xe2, 0xdb, 0x64, + 0x94, 0xe3, 0xaf, 0x33, 0xa1, 0x77, 0x53, 0x79, 0x7b, 0xa4, 0x4e, 0x33, 0x6d, 0x81, 0xb5, 0xb8, + 0x5c, 0x08, 0x53, 0x8a, 0x5c, 0x87, 0xf8, 0x91, 0xdb, 0x75, 0xc5, 0xa5, 0x75, 0x3b, 0xfb, 0xc9, + 0x63, 0xa1, 0x42, 0x69, 0x63, 0x2c, 0x13, 0xb9, 0xc6, 0x17, 0x30, 0x1c, 0x3a, 0x62, 0x1a, 0xea, + 0xb2, 0x30, 0xbe, 0x82, 0x25, 0xdb, 0xca, 0xe5, 0x52, 0x76, 0xae, 0xa9, 0xbc, 0x3a, 0x52, 0x27, + 0x7b, 0xcb, 0x61, 0x13, 0xac, 0xf2, 0x73, 0xdf, 0xee, 0x87, 0xcc, 0x77, 0xff, 0x44, 0x10, 0x27, + 0xc7, 0xdc, 0xc6, 0x3e, 0x97, 0x77, 0xb2, 0x53, 0x9d, 0xd0, 0x99, 0x43, 0x99, 0x78, 0xf4, 0xf8, + 0xf8, 0x88, 0x28, 0x71, 0xd3, 0x23, 0x72, 0x3c, 0xc0, 0x94, 0xcb, 0x2f, 0xb3, 0x79, 0x23, 0xa1, + 0x68, 0x7f, 0x35, 0x96, 0xc1, 0xef, 0xc1, 0x12, 0x39, 0x73, 0x23, 0xc4, 0x86, 0x13, 0xb2, 0x5c, + 0xce, 0xfe, 0x3a, 0x0b, 0x81, 0x9e, 0xcc, 0xba, 0xf0, 0x07, 0xb0, 0xc4, 0xc9, 0x31, 0x0a, 0x70, + 0x8f, 0x20, 0x9b, 0xf1, 0x48, 0xde, 0xbd, 0xc2, 0xa5, 0x6e, 0x81, 0x93, 0xe3, 0x36, 0xee, 0x91, + 0x2a, 0xe3, 0xf1, 0x3b, 0x2c, 0xc4, 0xbe, 0xc3, 0xbc, 0x09, 0xc8, 0xab, 0x2b, 0x40, 0x96, 0x13, + 0x55, 0xca, 0x29, 0xfe, 0x2b, 0x07, 0x0a, 0x33, 0xee, 0xa4, 0xf0, 0x29, 0x78, 0x54, 0xd5, 0x5b, + 0xa6, 0x65, 0x28, 0x8d, 0x96, 0x85, 0xd4, 0xa3, 0xaa, 0x76, 0x60, 0x36, 0xf4, 0x16, 0x3a, 0x68, + 0x99, 0x6d, 0xb5, 0xda, 0xa8, 0x37, 0xd4, 0x9a, 0xf4, 0x0b, 0xb8, 0x05, 0x36, 0x66, 0x7a, 0xe9, + 0x2d, 0x29, 0x07, 0xef, 0x01, 0x79, 0xf6, 0x62, 0xbd, 0x2e, 0xcd, 0xc1, 0x22, 0x78, 0x30, 0x73, + 0xb5, 0xad, 0x18, 0x56, 0xc3, 0x6a, 0xe8, 0x2d, 0x69, 0xbe, 0xf8, 0xf7, 0x1c, 0xc8, 0x5f, 0xba, + 0x9b, 0xc1, 0x27, 0xe0, 0x61, 0x5d, 0x37, 0xaa, 0xaa, 0x70, 0x55, 0x34, 0x4d, 0xd5, 0x50, 0x53, + 0xaf, 0xa9, 0x17, 0x32, 0xdb, 0x04, 0xeb, 0xb3, 0x9c, 0xe2, 0xc4, 0xb6, 0xc0, 0xc6, 0xcc, 0xb5, + 0x38, 0xaf, 0x87, 0x60, 0x6b, 0xd6, 0xa2, 0xa1, 0xee, 0x1b, 0xaa, 0x69, 0x8a, 0xa4, 0xe6, 0xc0, + 0xed, 0xf4, 0x06, 0x03, 0xef, 0x82, 0x35, 0x4d, 0xdf, 0x47, 0x9a, 0x7a, 0xa8, 0x6a, 0x17, 0x32, + 0x58, 0x05, 0xd2, 0x78, 0xa9, 0xa6, 0x56, 0x0e, 0xf6, 0x7f, 0x23, 0xe5, 0x66, 0x58, 0xf7, 0xa4, + 0xb9, 0x19, 0xd6, 0x57, 0xd2, 0xfc, 0x0c, 0xeb, 0xae, 0x74, 0x63, 0x86, 0xb5, 0x2c, 0x7d, 0x05, + 0xf3, 0x60, 0x69, 0x6c, 0xd5, 0xf4, 0x7d, 0xe9, 0xe6, 0xb4, 0x63, 0x4b, 0xb7, 0x1a, 0x55, 0x55, + 0xba, 0x05, 0xd7, 0x40, 0x7e, 0x6c, 0x7d, 0xaf, 0x18, 0xad, 0x46, 0x6b, 0x5f, 0xba, 0x0d, 0x0b, + 0x60, 0x65, 0x6c, 0x56, 0x0d, 0x43, 0x37, 0xa4, 0x3b, 0xd3, 0xc6, 0xba, 0x62, 0x29, 0x9a, 0x04, + 0xa6, 0x8d, 0x6d, 0xa5, 0xd5, 0xa8, 0x4a, 0x0b, 0xc5, 0x7f, 0xe6, 0x40, 0xfe, 0xd2, 0xb4, 0x2f, + 0x2a, 0x25, 0x5c, 0x63, 0x1c, 0x3a, 0x54, 0x8d, 0x8a, 0x6e, 0x36, 0xac, 0x1f, 0x2f, 0xec, 0xd3, + 0x7d, 0x70, 0x77, 0x96, 0x93, 0xa5, 0x1a, 0xa6, 0x2a, 0xe5, 0x44, 0x3d, 0x66, 0x2d, 0xd7, 0xd4, + 0xba, 0x72, 0xa0, 0x59, 0x49, 0xc1, 0x66, 0x39, 0x24, 0xff, 0x52, 0xa5, 0xf9, 0xe2, 0xdf, 0x72, + 0x60, 0x71, 0x72, 0x98, 0x4f, 0x23, 0x9a, 0x96, 0x62, 0xa9, 0x4d, 0xb5, 0x65, 0x5d, 0x48, 0x68, + 0x1d, 0xc0, 0xe9, 0xe5, 0x96, 0xde, 0x12, 0x99, 0x0c, 0x77, 0x6e, 0x6c, 0xaf, 0xd5, 0x34, 0x69, + 0xee, 0xb2, 0xb9, 0xa9, 0xd7, 0xa4, 0xf9, 0xcb, 0x66, 0x45, 0xd3, 0xa4, 0x1b, 0xc5, 0x7f, 0xe7, + 0xc0, 0xea, 0xcc, 0xb9, 0xf8, 0x19, 0x78, 0x6c, 0x19, 0x4a, 0xcb, 0x54, 0xaa, 0xa2, 0xf9, 0x51, + 0xc3, 0xd4, 0x35, 0xc5, 0xba, 0x7c, 0xe2, 0xbe, 0x01, 0xcf, 0x67, 0xbb, 0x19, 0xaa, 0x52, 0x43, + 0x07, 0xad, 0xaa, 0xde, 0x6c, 0x36, 0x2c, 0x4b, 0xad, 0x49, 0x39, 0xf8, 0x02, 0x3c, 0xfd, 0x3f, + 0xbe, 0x63, 0xcf, 0x39, 0xf8, 0x35, 0x78, 0xf6, 0xbf, 0x3c, 0xdb, 0xaa, 0x62, 0x29, 0x15, 0x4d, + 0x8d, 0x45, 0xd2, 0x3c, 0x7c, 0x0e, 0x8a, 0xb3, 0x5d, 0x4d, 0xd5, 0x68, 0x28, 0x5a, 0xe3, 0x83, + 0x70, 0x96, 0x6e, 0x14, 0xff, 0x00, 0x16, 0x26, 0x06, 0x54, 0xf1, 0x32, 0xa8, 0xfc, 0x68, 0xa9, + 0x0a, 0xd2, 0x0f, 0xac, 0xf6, 0x81, 0x75, 0xf9, 0xac, 0x4c, 0xad, 0xbe, 0x55, 0x8f, 0xa4, 0x1c, + 0x94, 0xc1, 0xea, 0x94, 0x55, 0x35, 0xab, 0x4a, 0x5b, 0xe4, 0x5b, 0x34, 0xc0, 0x9d, 0xd1, 0x48, + 0x2a, 0x8e, 0xfa, 0x51, 0x53, 0x43, 0x95, 0x46, 0x4b, 0x31, 0x2e, 0x36, 0xd7, 0x1a, 0xc8, 0x4f, + 0xac, 0x55, 0x14, 0x53, 0x7d, 0xbd, 0x27, 0xe5, 0x20, 0x04, 0xcb, 0x13, 0x66, 0x11, 0x6d, 0xae, + 0x78, 0x14, 0x33, 0x93, 0xf9, 0x33, 0x65, 0xea, 0xed, 0x19, 0x25, 0xd8, 0x00, 0x85, 0x89, 0xb5, + 0x9a, 0x5e, 0x3d, 0x10, 0xf5, 0x95, 0x72, 0xa2, 0x71, 0x26, 0x16, 0xaa, 0x7a, 0xcb, 0x12, 0xf6, + 0x39, 0xf1, 0x8e, 0x5d, 0x9e, 0x9e, 0x6e, 0x44, 0xd3, 0x56, 0x94, 0xea, 0xef, 0x4d, 0x4d, 0x31, + 0xdf, 0xa2, 0x77, 0x07, 0xba, 0x75, 0xf1, 0xfd, 0x55, 0x00, 0x2b, 0x17, 0x1c, 0x92, 0x00, 0x17, + 0x55, 0x7a, 0x4b, 0x9a, 0x13, 0x19, 0x5d, 0xb2, 0xd7, 0xeb, 0xd2, 0x3c, 0x7c, 0x0c, 0xee, 0x5f, + 0x5c, 0x30, 0x95, 0xba, 0x8a, 0xd4, 0x56, 0x55, 0xaf, 0x89, 0x83, 0x7f, 0xa3, 0x72, 0xf8, 0xc1, + 0xea, 0xb9, 0x51, 0x7f, 0xd0, 0x29, 0xd9, 0xcc, 0xdb, 0x49, 0x26, 0xb8, 0xed, 0xe4, 0xbf, 0xe8, + 0x7b, 0x6c, 0xbb, 0x47, 0xfc, 0xf8, 0x33, 0xb2, 0x73, 0xa5, 0x3f, 0x1b, 0x7c, 0x37, 0x36, 0x76, + 0x6e, 0xc6, 0xba, 0x57, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x9a, 0x16, 0x37, 0xd9, 0x71, 0x18, + 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/host9_6.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/host9_6.pb.go new file mode 100644 index 000000000..efd8331a6 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/host9_6.pb.go @@ -0,0 +1,955 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/config/host9_6.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type PostgresqlHostConfig9_6_ConstraintExclusion int32 + +const ( + PostgresqlHostConfig9_6_CONSTRAINT_EXCLUSION_UNSPECIFIED PostgresqlHostConfig9_6_ConstraintExclusion = 0 + PostgresqlHostConfig9_6_CONSTRAINT_EXCLUSION_ON PostgresqlHostConfig9_6_ConstraintExclusion = 1 + PostgresqlHostConfig9_6_CONSTRAINT_EXCLUSION_OFF PostgresqlHostConfig9_6_ConstraintExclusion = 2 + PostgresqlHostConfig9_6_CONSTRAINT_EXCLUSION_PARTITION PostgresqlHostConfig9_6_ConstraintExclusion = 3 +) + +var PostgresqlHostConfig9_6_ConstraintExclusion_name = map[int32]string{ + 0: "CONSTRAINT_EXCLUSION_UNSPECIFIED", + 1: "CONSTRAINT_EXCLUSION_ON", + 2: "CONSTRAINT_EXCLUSION_OFF", + 3: "CONSTRAINT_EXCLUSION_PARTITION", +} +var PostgresqlHostConfig9_6_ConstraintExclusion_value = map[string]int32{ + "CONSTRAINT_EXCLUSION_UNSPECIFIED": 0, + "CONSTRAINT_EXCLUSION_ON": 1, + "CONSTRAINT_EXCLUSION_OFF": 2, + "CONSTRAINT_EXCLUSION_PARTITION": 3, +} + +func (x PostgresqlHostConfig9_6_ConstraintExclusion) String() string { + return proto.EnumName(PostgresqlHostConfig9_6_ConstraintExclusion_name, int32(x)) +} +func (PostgresqlHostConfig9_6_ConstraintExclusion) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host9_6_bf21c75a83cd3758, []int{0, 0} +} + +type PostgresqlHostConfig9_6_ForceParallelMode int32 + +const ( + PostgresqlHostConfig9_6_FORCE_PARALLEL_MODE_UNSPECIFIED PostgresqlHostConfig9_6_ForceParallelMode = 0 + PostgresqlHostConfig9_6_FORCE_PARALLEL_MODE_ON PostgresqlHostConfig9_6_ForceParallelMode = 1 + PostgresqlHostConfig9_6_FORCE_PARALLEL_MODE_OFF PostgresqlHostConfig9_6_ForceParallelMode = 2 + PostgresqlHostConfig9_6_FORCE_PARALLEL_MODE_REGRESS PostgresqlHostConfig9_6_ForceParallelMode = 3 +) + +var PostgresqlHostConfig9_6_ForceParallelMode_name = map[int32]string{ + 0: "FORCE_PARALLEL_MODE_UNSPECIFIED", + 1: "FORCE_PARALLEL_MODE_ON", + 2: "FORCE_PARALLEL_MODE_OFF", + 3: "FORCE_PARALLEL_MODE_REGRESS", +} +var PostgresqlHostConfig9_6_ForceParallelMode_value = map[string]int32{ + "FORCE_PARALLEL_MODE_UNSPECIFIED": 0, + "FORCE_PARALLEL_MODE_ON": 1, + "FORCE_PARALLEL_MODE_OFF": 2, + "FORCE_PARALLEL_MODE_REGRESS": 3, +} + +func (x PostgresqlHostConfig9_6_ForceParallelMode) String() string { + return proto.EnumName(PostgresqlHostConfig9_6_ForceParallelMode_name, int32(x)) +} +func (PostgresqlHostConfig9_6_ForceParallelMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host9_6_bf21c75a83cd3758, []int{0, 1} +} + +type PostgresqlHostConfig9_6_LogLevel int32 + +const ( + PostgresqlHostConfig9_6_LOG_LEVEL_UNSPECIFIED PostgresqlHostConfig9_6_LogLevel = 0 + PostgresqlHostConfig9_6_LOG_LEVEL_DEBUG5 PostgresqlHostConfig9_6_LogLevel = 1 + PostgresqlHostConfig9_6_LOG_LEVEL_DEBUG4 PostgresqlHostConfig9_6_LogLevel = 2 + PostgresqlHostConfig9_6_LOG_LEVEL_DEBUG3 PostgresqlHostConfig9_6_LogLevel = 3 + PostgresqlHostConfig9_6_LOG_LEVEL_DEBUG2 PostgresqlHostConfig9_6_LogLevel = 4 + PostgresqlHostConfig9_6_LOG_LEVEL_DEBUG1 PostgresqlHostConfig9_6_LogLevel = 5 + PostgresqlHostConfig9_6_LOG_LEVEL_LOG PostgresqlHostConfig9_6_LogLevel = 6 + PostgresqlHostConfig9_6_LOG_LEVEL_NOTICE PostgresqlHostConfig9_6_LogLevel = 7 + PostgresqlHostConfig9_6_LOG_LEVEL_WARNING PostgresqlHostConfig9_6_LogLevel = 8 + PostgresqlHostConfig9_6_LOG_LEVEL_ERROR PostgresqlHostConfig9_6_LogLevel = 9 + PostgresqlHostConfig9_6_LOG_LEVEL_FATAL PostgresqlHostConfig9_6_LogLevel = 10 + PostgresqlHostConfig9_6_LOG_LEVEL_PANIC PostgresqlHostConfig9_6_LogLevel = 11 +) + +var PostgresqlHostConfig9_6_LogLevel_name = map[int32]string{ + 0: "LOG_LEVEL_UNSPECIFIED", + 1: "LOG_LEVEL_DEBUG5", + 2: "LOG_LEVEL_DEBUG4", + 3: "LOG_LEVEL_DEBUG3", + 4: "LOG_LEVEL_DEBUG2", + 5: "LOG_LEVEL_DEBUG1", + 6: "LOG_LEVEL_LOG", + 7: "LOG_LEVEL_NOTICE", + 8: "LOG_LEVEL_WARNING", + 9: "LOG_LEVEL_ERROR", + 10: "LOG_LEVEL_FATAL", + 11: "LOG_LEVEL_PANIC", +} +var PostgresqlHostConfig9_6_LogLevel_value = map[string]int32{ + "LOG_LEVEL_UNSPECIFIED": 0, + "LOG_LEVEL_DEBUG5": 1, + "LOG_LEVEL_DEBUG4": 2, + "LOG_LEVEL_DEBUG3": 3, + "LOG_LEVEL_DEBUG2": 4, + "LOG_LEVEL_DEBUG1": 5, + "LOG_LEVEL_LOG": 6, + "LOG_LEVEL_NOTICE": 7, + "LOG_LEVEL_WARNING": 8, + "LOG_LEVEL_ERROR": 9, + "LOG_LEVEL_FATAL": 10, + "LOG_LEVEL_PANIC": 11, +} + +func (x PostgresqlHostConfig9_6_LogLevel) String() string { + return proto.EnumName(PostgresqlHostConfig9_6_LogLevel_name, int32(x)) +} +func (PostgresqlHostConfig9_6_LogLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host9_6_bf21c75a83cd3758, []int{0, 2} +} + +type PostgresqlHostConfig9_6_LogErrorVerbosity int32 + +const ( + PostgresqlHostConfig9_6_LOG_ERROR_VERBOSITY_UNSPECIFIED PostgresqlHostConfig9_6_LogErrorVerbosity = 0 + PostgresqlHostConfig9_6_LOG_ERROR_VERBOSITY_TERSE PostgresqlHostConfig9_6_LogErrorVerbosity = 1 + PostgresqlHostConfig9_6_LOG_ERROR_VERBOSITY_DEFAULT PostgresqlHostConfig9_6_LogErrorVerbosity = 2 + PostgresqlHostConfig9_6_LOG_ERROR_VERBOSITY_VERBOSE PostgresqlHostConfig9_6_LogErrorVerbosity = 3 +) + +var PostgresqlHostConfig9_6_LogErrorVerbosity_name = map[int32]string{ + 0: "LOG_ERROR_VERBOSITY_UNSPECIFIED", + 1: "LOG_ERROR_VERBOSITY_TERSE", + 2: "LOG_ERROR_VERBOSITY_DEFAULT", + 3: "LOG_ERROR_VERBOSITY_VERBOSE", +} +var PostgresqlHostConfig9_6_LogErrorVerbosity_value = map[string]int32{ + "LOG_ERROR_VERBOSITY_UNSPECIFIED": 0, + "LOG_ERROR_VERBOSITY_TERSE": 1, + "LOG_ERROR_VERBOSITY_DEFAULT": 2, + "LOG_ERROR_VERBOSITY_VERBOSE": 3, +} + +func (x PostgresqlHostConfig9_6_LogErrorVerbosity) String() string { + return proto.EnumName(PostgresqlHostConfig9_6_LogErrorVerbosity_name, int32(x)) +} +func (PostgresqlHostConfig9_6_LogErrorVerbosity) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host9_6_bf21c75a83cd3758, []int{0, 3} +} + +type PostgresqlHostConfig9_6_LogStatement int32 + +const ( + PostgresqlHostConfig9_6_LOG_STATEMENT_UNSPECIFIED PostgresqlHostConfig9_6_LogStatement = 0 + PostgresqlHostConfig9_6_LOG_STATEMENT_NONE PostgresqlHostConfig9_6_LogStatement = 1 + PostgresqlHostConfig9_6_LOG_STATEMENT_DDL PostgresqlHostConfig9_6_LogStatement = 2 + PostgresqlHostConfig9_6_LOG_STATEMENT_MOD PostgresqlHostConfig9_6_LogStatement = 3 + PostgresqlHostConfig9_6_LOG_STATEMENT_ALL PostgresqlHostConfig9_6_LogStatement = 4 +) + +var PostgresqlHostConfig9_6_LogStatement_name = map[int32]string{ + 0: "LOG_STATEMENT_UNSPECIFIED", + 1: "LOG_STATEMENT_NONE", + 2: "LOG_STATEMENT_DDL", + 3: "LOG_STATEMENT_MOD", + 4: "LOG_STATEMENT_ALL", +} +var PostgresqlHostConfig9_6_LogStatement_value = map[string]int32{ + "LOG_STATEMENT_UNSPECIFIED": 0, + "LOG_STATEMENT_NONE": 1, + "LOG_STATEMENT_DDL": 2, + "LOG_STATEMENT_MOD": 3, + "LOG_STATEMENT_ALL": 4, +} + +func (x PostgresqlHostConfig9_6_LogStatement) String() string { + return proto.EnumName(PostgresqlHostConfig9_6_LogStatement_name, int32(x)) +} +func (PostgresqlHostConfig9_6_LogStatement) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host9_6_bf21c75a83cd3758, []int{0, 4} +} + +type PostgresqlHostConfig9_6_TransactionIsolation int32 + +const ( + PostgresqlHostConfig9_6_TRANSACTION_ISOLATION_UNSPECIFIED PostgresqlHostConfig9_6_TransactionIsolation = 0 + PostgresqlHostConfig9_6_TRANSACTION_ISOLATION_READ_UNCOMMITTED PostgresqlHostConfig9_6_TransactionIsolation = 1 + PostgresqlHostConfig9_6_TRANSACTION_ISOLATION_READ_COMMITTED PostgresqlHostConfig9_6_TransactionIsolation = 2 + PostgresqlHostConfig9_6_TRANSACTION_ISOLATION_REPEATABLE_READ PostgresqlHostConfig9_6_TransactionIsolation = 3 + PostgresqlHostConfig9_6_TRANSACTION_ISOLATION_SERIALIZABLE PostgresqlHostConfig9_6_TransactionIsolation = 4 +) + +var PostgresqlHostConfig9_6_TransactionIsolation_name = map[int32]string{ + 0: "TRANSACTION_ISOLATION_UNSPECIFIED", + 1: "TRANSACTION_ISOLATION_READ_UNCOMMITTED", + 2: "TRANSACTION_ISOLATION_READ_COMMITTED", + 3: "TRANSACTION_ISOLATION_REPEATABLE_READ", + 4: "TRANSACTION_ISOLATION_SERIALIZABLE", +} +var PostgresqlHostConfig9_6_TransactionIsolation_value = map[string]int32{ + "TRANSACTION_ISOLATION_UNSPECIFIED": 0, + "TRANSACTION_ISOLATION_READ_UNCOMMITTED": 1, + "TRANSACTION_ISOLATION_READ_COMMITTED": 2, + "TRANSACTION_ISOLATION_REPEATABLE_READ": 3, + "TRANSACTION_ISOLATION_SERIALIZABLE": 4, +} + +func (x PostgresqlHostConfig9_6_TransactionIsolation) String() string { + return proto.EnumName(PostgresqlHostConfig9_6_TransactionIsolation_name, int32(x)) +} +func (PostgresqlHostConfig9_6_TransactionIsolation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host9_6_bf21c75a83cd3758, []int{0, 5} +} + +type PostgresqlHostConfig9_6_ByteaOutput int32 + +const ( + PostgresqlHostConfig9_6_BYTEA_OUTPUT_UNSPECIFIED PostgresqlHostConfig9_6_ByteaOutput = 0 + PostgresqlHostConfig9_6_BYTEA_OUTPUT_HEX PostgresqlHostConfig9_6_ByteaOutput = 1 + PostgresqlHostConfig9_6_BYTEA_OUTPUT_ESCAPED PostgresqlHostConfig9_6_ByteaOutput = 2 +) + +var PostgresqlHostConfig9_6_ByteaOutput_name = map[int32]string{ + 0: "BYTEA_OUTPUT_UNSPECIFIED", + 1: "BYTEA_OUTPUT_HEX", + 2: "BYTEA_OUTPUT_ESCAPED", +} +var PostgresqlHostConfig9_6_ByteaOutput_value = map[string]int32{ + "BYTEA_OUTPUT_UNSPECIFIED": 0, + "BYTEA_OUTPUT_HEX": 1, + "BYTEA_OUTPUT_ESCAPED": 2, +} + +func (x PostgresqlHostConfig9_6_ByteaOutput) String() string { + return proto.EnumName(PostgresqlHostConfig9_6_ByteaOutput_name, int32(x)) +} +func (PostgresqlHostConfig9_6_ByteaOutput) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host9_6_bf21c75a83cd3758, []int{0, 6} +} + +type PostgresqlHostConfig9_6_XmlBinary int32 + +const ( + PostgresqlHostConfig9_6_XML_BINARY_UNSPECIFIED PostgresqlHostConfig9_6_XmlBinary = 0 + PostgresqlHostConfig9_6_XML_BINARY_BASE64 PostgresqlHostConfig9_6_XmlBinary = 1 + PostgresqlHostConfig9_6_XML_BINARY_HEX PostgresqlHostConfig9_6_XmlBinary = 2 +) + +var PostgresqlHostConfig9_6_XmlBinary_name = map[int32]string{ + 0: "XML_BINARY_UNSPECIFIED", + 1: "XML_BINARY_BASE64", + 2: "XML_BINARY_HEX", +} +var PostgresqlHostConfig9_6_XmlBinary_value = map[string]int32{ + "XML_BINARY_UNSPECIFIED": 0, + "XML_BINARY_BASE64": 1, + "XML_BINARY_HEX": 2, +} + +func (x PostgresqlHostConfig9_6_XmlBinary) String() string { + return proto.EnumName(PostgresqlHostConfig9_6_XmlBinary_name, int32(x)) +} +func (PostgresqlHostConfig9_6_XmlBinary) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host9_6_bf21c75a83cd3758, []int{0, 7} +} + +type PostgresqlHostConfig9_6_XmlOption int32 + +const ( + PostgresqlHostConfig9_6_XML_OPTION_UNSPECIFIED PostgresqlHostConfig9_6_XmlOption = 0 + PostgresqlHostConfig9_6_XML_OPTION_DOCUMENT PostgresqlHostConfig9_6_XmlOption = 1 + PostgresqlHostConfig9_6_XML_OPTION_CONTENT PostgresqlHostConfig9_6_XmlOption = 2 +) + +var PostgresqlHostConfig9_6_XmlOption_name = map[int32]string{ + 0: "XML_OPTION_UNSPECIFIED", + 1: "XML_OPTION_DOCUMENT", + 2: "XML_OPTION_CONTENT", +} +var PostgresqlHostConfig9_6_XmlOption_value = map[string]int32{ + "XML_OPTION_UNSPECIFIED": 0, + "XML_OPTION_DOCUMENT": 1, + "XML_OPTION_CONTENT": 2, +} + +func (x PostgresqlHostConfig9_6_XmlOption) String() string { + return proto.EnumName(PostgresqlHostConfig9_6_XmlOption_name, int32(x)) +} +func (PostgresqlHostConfig9_6_XmlOption) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host9_6_bf21c75a83cd3758, []int{0, 8} +} + +type PostgresqlHostConfig9_6_BackslashQuote int32 + +const ( + PostgresqlHostConfig9_6_BACKSLASH_QUOTE_UNSPECIFIED PostgresqlHostConfig9_6_BackslashQuote = 0 + PostgresqlHostConfig9_6_BACKSLASH_QUOTE PostgresqlHostConfig9_6_BackslashQuote = 1 + PostgresqlHostConfig9_6_BACKSLASH_QUOTE_ON PostgresqlHostConfig9_6_BackslashQuote = 2 + PostgresqlHostConfig9_6_BACKSLASH_QUOTE_OFF PostgresqlHostConfig9_6_BackslashQuote = 3 + PostgresqlHostConfig9_6_BACKSLASH_QUOTE_SAFE_ENCODING PostgresqlHostConfig9_6_BackslashQuote = 4 +) + +var PostgresqlHostConfig9_6_BackslashQuote_name = map[int32]string{ + 0: "BACKSLASH_QUOTE_UNSPECIFIED", + 1: "BACKSLASH_QUOTE", + 2: "BACKSLASH_QUOTE_ON", + 3: "BACKSLASH_QUOTE_OFF", + 4: "BACKSLASH_QUOTE_SAFE_ENCODING", +} +var PostgresqlHostConfig9_6_BackslashQuote_value = map[string]int32{ + "BACKSLASH_QUOTE_UNSPECIFIED": 0, + "BACKSLASH_QUOTE": 1, + "BACKSLASH_QUOTE_ON": 2, + "BACKSLASH_QUOTE_OFF": 3, + "BACKSLASH_QUOTE_SAFE_ENCODING": 4, +} + +func (x PostgresqlHostConfig9_6_BackslashQuote) String() string { + return proto.EnumName(PostgresqlHostConfig9_6_BackslashQuote_name, int32(x)) +} +func (PostgresqlHostConfig9_6_BackslashQuote) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_host9_6_bf21c75a83cd3758, []int{0, 9} +} + +// Options and structure of `PostgresqlHostConfig` reflects parameters of a PostgreSQL +// configuration file. Detailed description is available in +// [PostgreSQL documentation](https://www.postgresql.org/docs/9.6/runtime-config.html). +type PostgresqlHostConfig9_6 struct { + RecoveryMinApplyDelay *wrappers.Int64Value `protobuf:"bytes,1,opt,name=recovery_min_apply_delay,json=recoveryMinApplyDelay,proto3" json:"recovery_min_apply_delay,omitempty"` + SharedBuffers *wrappers.Int64Value `protobuf:"bytes,2,opt,name=shared_buffers,json=sharedBuffers,proto3" json:"shared_buffers,omitempty"` + TempBuffers *wrappers.Int64Value `protobuf:"bytes,3,opt,name=temp_buffers,json=tempBuffers,proto3" json:"temp_buffers,omitempty"` + WorkMem *wrappers.Int64Value `protobuf:"bytes,4,opt,name=work_mem,json=workMem,proto3" json:"work_mem,omitempty"` + ReplacementSortTuples *wrappers.Int64Value `protobuf:"bytes,5,opt,name=replacement_sort_tuples,json=replacementSortTuples,proto3" json:"replacement_sort_tuples,omitempty"` + TempFileLimit *wrappers.Int64Value `protobuf:"bytes,6,opt,name=temp_file_limit,json=tempFileLimit,proto3" json:"temp_file_limit,omitempty"` + BackendFlushAfter *wrappers.Int64Value `protobuf:"bytes,7,opt,name=backend_flush_after,json=backendFlushAfter,proto3" json:"backend_flush_after,omitempty"` + OldSnapshotThreshold *wrappers.Int64Value `protobuf:"bytes,8,opt,name=old_snapshot_threshold,json=oldSnapshotThreshold,proto3" json:"old_snapshot_threshold,omitempty"` + MaxStandbyStreamingDelay *wrappers.Int64Value `protobuf:"bytes,9,opt,name=max_standby_streaming_delay,json=maxStandbyStreamingDelay,proto3" json:"max_standby_streaming_delay,omitempty"` + ConstraintExclusion PostgresqlHostConfig9_6_ConstraintExclusion `protobuf:"varint,10,opt,name=constraint_exclusion,json=constraintExclusion,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_ConstraintExclusion" json:"constraint_exclusion,omitempty"` + CursorTupleFraction *wrappers.DoubleValue `protobuf:"bytes,11,opt,name=cursor_tuple_fraction,json=cursorTupleFraction,proto3" json:"cursor_tuple_fraction,omitempty"` + FromCollapseLimit *wrappers.Int64Value `protobuf:"bytes,12,opt,name=from_collapse_limit,json=fromCollapseLimit,proto3" json:"from_collapse_limit,omitempty"` + JoinCollapseLimit *wrappers.Int64Value `protobuf:"bytes,13,opt,name=join_collapse_limit,json=joinCollapseLimit,proto3" json:"join_collapse_limit,omitempty"` + ForceParallelMode PostgresqlHostConfig9_6_ForceParallelMode `protobuf:"varint,14,opt,name=force_parallel_mode,json=forceParallelMode,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_ForceParallelMode" json:"force_parallel_mode,omitempty"` + ClientMinMessages PostgresqlHostConfig9_6_LogLevel `protobuf:"varint,15,opt,name=client_min_messages,json=clientMinMessages,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_LogLevel" json:"client_min_messages,omitempty"` + LogMinMessages PostgresqlHostConfig9_6_LogLevel `protobuf:"varint,16,opt,name=log_min_messages,json=logMinMessages,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_LogLevel" json:"log_min_messages,omitempty"` + LogMinErrorStatement PostgresqlHostConfig9_6_LogLevel `protobuf:"varint,17,opt,name=log_min_error_statement,json=logMinErrorStatement,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_LogLevel" json:"log_min_error_statement,omitempty"` + LogMinDurationStatement *wrappers.Int64Value `protobuf:"bytes,18,opt,name=log_min_duration_statement,json=logMinDurationStatement,proto3" json:"log_min_duration_statement,omitempty"` + LogCheckpoints *wrappers.BoolValue `protobuf:"bytes,19,opt,name=log_checkpoints,json=logCheckpoints,proto3" json:"log_checkpoints,omitempty"` + LogConnections *wrappers.BoolValue `protobuf:"bytes,20,opt,name=log_connections,json=logConnections,proto3" json:"log_connections,omitempty"` + LogDisconnections *wrappers.BoolValue `protobuf:"bytes,21,opt,name=log_disconnections,json=logDisconnections,proto3" json:"log_disconnections,omitempty"` + LogDuration *wrappers.BoolValue `protobuf:"bytes,22,opt,name=log_duration,json=logDuration,proto3" json:"log_duration,omitempty"` + LogErrorVerbosity PostgresqlHostConfig9_6_LogErrorVerbosity `protobuf:"varint,23,opt,name=log_error_verbosity,json=logErrorVerbosity,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_LogErrorVerbosity" json:"log_error_verbosity,omitempty"` + LogLockWaits *wrappers.BoolValue `protobuf:"bytes,24,opt,name=log_lock_waits,json=logLockWaits,proto3" json:"log_lock_waits,omitempty"` + LogStatement PostgresqlHostConfig9_6_LogStatement `protobuf:"varint,25,opt,name=log_statement,json=logStatement,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_LogStatement" json:"log_statement,omitempty"` + LogTempFiles *wrappers.Int64Value `protobuf:"bytes,26,opt,name=log_temp_files,json=logTempFiles,proto3" json:"log_temp_files,omitempty"` + SearchPath string `protobuf:"bytes,27,opt,name=search_path,json=searchPath,proto3" json:"search_path,omitempty"` + RowSecurity *wrappers.BoolValue `protobuf:"bytes,28,opt,name=row_security,json=rowSecurity,proto3" json:"row_security,omitempty"` + DefaultTransactionIsolation PostgresqlHostConfig9_6_TransactionIsolation `protobuf:"varint,29,opt,name=default_transaction_isolation,json=defaultTransactionIsolation,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_TransactionIsolation" json:"default_transaction_isolation,omitempty"` + StatementTimeout *wrappers.Int64Value `protobuf:"bytes,30,opt,name=statement_timeout,json=statementTimeout,proto3" json:"statement_timeout,omitempty"` + LockTimeout *wrappers.Int64Value `protobuf:"bytes,31,opt,name=lock_timeout,json=lockTimeout,proto3" json:"lock_timeout,omitempty"` + IdleInTransactionSessionTimeout *wrappers.Int64Value `protobuf:"bytes,32,opt,name=idle_in_transaction_session_timeout,json=idleInTransactionSessionTimeout,proto3" json:"idle_in_transaction_session_timeout,omitempty"` + ByteaOutput PostgresqlHostConfig9_6_ByteaOutput `protobuf:"varint,33,opt,name=bytea_output,json=byteaOutput,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_ByteaOutput" json:"bytea_output,omitempty"` + Xmlbinary PostgresqlHostConfig9_6_XmlBinary `protobuf:"varint,34,opt,name=xmlbinary,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_XmlBinary" json:"xmlbinary,omitempty"` + Xmloption PostgresqlHostConfig9_6_XmlOption `protobuf:"varint,35,opt,name=xmloption,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_XmlOption" json:"xmloption,omitempty"` + GinPendingListLimit *wrappers.Int64Value `protobuf:"bytes,36,opt,name=gin_pending_list_limit,json=ginPendingListLimit,proto3" json:"gin_pending_list_limit,omitempty"` + DeadlockTimeout *wrappers.Int64Value `protobuf:"bytes,37,opt,name=deadlock_timeout,json=deadlockTimeout,proto3" json:"deadlock_timeout,omitempty"` + MaxLocksPerTransaction *wrappers.Int64Value `protobuf:"bytes,38,opt,name=max_locks_per_transaction,json=maxLocksPerTransaction,proto3" json:"max_locks_per_transaction,omitempty"` + MaxPredLocksPerTransaction *wrappers.Int64Value `protobuf:"bytes,39,opt,name=max_pred_locks_per_transaction,json=maxPredLocksPerTransaction,proto3" json:"max_pred_locks_per_transaction,omitempty"` + ArrayNulls *wrappers.BoolValue `protobuf:"bytes,40,opt,name=array_nulls,json=arrayNulls,proto3" json:"array_nulls,omitempty"` + BackslashQuote PostgresqlHostConfig9_6_BackslashQuote `protobuf:"varint,41,opt,name=backslash_quote,json=backslashQuote,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_BackslashQuote" json:"backslash_quote,omitempty"` + DefaultWithOids *wrappers.BoolValue `protobuf:"bytes,42,opt,name=default_with_oids,json=defaultWithOids,proto3" json:"default_with_oids,omitempty"` + EscapeStringWarning *wrappers.BoolValue `protobuf:"bytes,43,opt,name=escape_string_warning,json=escapeStringWarning,proto3" json:"escape_string_warning,omitempty"` + LoCompatPrivileges *wrappers.BoolValue `protobuf:"bytes,44,opt,name=lo_compat_privileges,json=loCompatPrivileges,proto3" json:"lo_compat_privileges,omitempty"` + OperatorPrecedenceWarning *wrappers.BoolValue `protobuf:"bytes,45,opt,name=operator_precedence_warning,json=operatorPrecedenceWarning,proto3" json:"operator_precedence_warning,omitempty"` + QuoteAllIdentifiers *wrappers.BoolValue `protobuf:"bytes,46,opt,name=quote_all_identifiers,json=quoteAllIdentifiers,proto3" json:"quote_all_identifiers,omitempty"` + StandardConformingStrings *wrappers.BoolValue `protobuf:"bytes,47,opt,name=standard_conforming_strings,json=standardConformingStrings,proto3" json:"standard_conforming_strings,omitempty"` + SynchronizeSeqscans *wrappers.BoolValue `protobuf:"bytes,48,opt,name=synchronize_seqscans,json=synchronizeSeqscans,proto3" json:"synchronize_seqscans,omitempty"` + TransformNullEquals *wrappers.BoolValue `protobuf:"bytes,49,opt,name=transform_null_equals,json=transformNullEquals,proto3" json:"transform_null_equals,omitempty"` + ExitOnError *wrappers.BoolValue `protobuf:"bytes,50,opt,name=exit_on_error,json=exitOnError,proto3" json:"exit_on_error,omitempty"` + SeqPageCost *wrappers.DoubleValue `protobuf:"bytes,51,opt,name=seq_page_cost,json=seqPageCost,proto3" json:"seq_page_cost,omitempty"` + RandomPageCost *wrappers.DoubleValue `protobuf:"bytes,52,opt,name=random_page_cost,json=randomPageCost,proto3" json:"random_page_cost,omitempty"` + // This option has been removed in PostgreSQL 10. + SqlInheritance *wrappers.BoolValue `protobuf:"bytes,53,opt,name=sql_inheritance,json=sqlInheritance,proto3" json:"sql_inheritance,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PostgresqlHostConfig9_6) Reset() { *m = PostgresqlHostConfig9_6{} } +func (m *PostgresqlHostConfig9_6) String() string { return proto.CompactTextString(m) } +func (*PostgresqlHostConfig9_6) ProtoMessage() {} +func (*PostgresqlHostConfig9_6) Descriptor() ([]byte, []int) { + return fileDescriptor_host9_6_bf21c75a83cd3758, []int{0} +} +func (m *PostgresqlHostConfig9_6) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PostgresqlHostConfig9_6.Unmarshal(m, b) +} +func (m *PostgresqlHostConfig9_6) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PostgresqlHostConfig9_6.Marshal(b, m, deterministic) +} +func (dst *PostgresqlHostConfig9_6) XXX_Merge(src proto.Message) { + xxx_messageInfo_PostgresqlHostConfig9_6.Merge(dst, src) +} +func (m *PostgresqlHostConfig9_6) XXX_Size() int { + return xxx_messageInfo_PostgresqlHostConfig9_6.Size(m) +} +func (m *PostgresqlHostConfig9_6) XXX_DiscardUnknown() { + xxx_messageInfo_PostgresqlHostConfig9_6.DiscardUnknown(m) +} + +var xxx_messageInfo_PostgresqlHostConfig9_6 proto.InternalMessageInfo + +func (m *PostgresqlHostConfig9_6) GetRecoveryMinApplyDelay() *wrappers.Int64Value { + if m != nil { + return m.RecoveryMinApplyDelay + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetSharedBuffers() *wrappers.Int64Value { + if m != nil { + return m.SharedBuffers + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetTempBuffers() *wrappers.Int64Value { + if m != nil { + return m.TempBuffers + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetWorkMem() *wrappers.Int64Value { + if m != nil { + return m.WorkMem + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetReplacementSortTuples() *wrappers.Int64Value { + if m != nil { + return m.ReplacementSortTuples + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetTempFileLimit() *wrappers.Int64Value { + if m != nil { + return m.TempFileLimit + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetBackendFlushAfter() *wrappers.Int64Value { + if m != nil { + return m.BackendFlushAfter + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetOldSnapshotThreshold() *wrappers.Int64Value { + if m != nil { + return m.OldSnapshotThreshold + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetMaxStandbyStreamingDelay() *wrappers.Int64Value { + if m != nil { + return m.MaxStandbyStreamingDelay + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetConstraintExclusion() PostgresqlHostConfig9_6_ConstraintExclusion { + if m != nil { + return m.ConstraintExclusion + } + return PostgresqlHostConfig9_6_CONSTRAINT_EXCLUSION_UNSPECIFIED +} + +func (m *PostgresqlHostConfig9_6) GetCursorTupleFraction() *wrappers.DoubleValue { + if m != nil { + return m.CursorTupleFraction + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetFromCollapseLimit() *wrappers.Int64Value { + if m != nil { + return m.FromCollapseLimit + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetJoinCollapseLimit() *wrappers.Int64Value { + if m != nil { + return m.JoinCollapseLimit + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetForceParallelMode() PostgresqlHostConfig9_6_ForceParallelMode { + if m != nil { + return m.ForceParallelMode + } + return PostgresqlHostConfig9_6_FORCE_PARALLEL_MODE_UNSPECIFIED +} + +func (m *PostgresqlHostConfig9_6) GetClientMinMessages() PostgresqlHostConfig9_6_LogLevel { + if m != nil { + return m.ClientMinMessages + } + return PostgresqlHostConfig9_6_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlHostConfig9_6) GetLogMinMessages() PostgresqlHostConfig9_6_LogLevel { + if m != nil { + return m.LogMinMessages + } + return PostgresqlHostConfig9_6_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlHostConfig9_6) GetLogMinErrorStatement() PostgresqlHostConfig9_6_LogLevel { + if m != nil { + return m.LogMinErrorStatement + } + return PostgresqlHostConfig9_6_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlHostConfig9_6) GetLogMinDurationStatement() *wrappers.Int64Value { + if m != nil { + return m.LogMinDurationStatement + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetLogCheckpoints() *wrappers.BoolValue { + if m != nil { + return m.LogCheckpoints + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetLogConnections() *wrappers.BoolValue { + if m != nil { + return m.LogConnections + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetLogDisconnections() *wrappers.BoolValue { + if m != nil { + return m.LogDisconnections + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetLogDuration() *wrappers.BoolValue { + if m != nil { + return m.LogDuration + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetLogErrorVerbosity() PostgresqlHostConfig9_6_LogErrorVerbosity { + if m != nil { + return m.LogErrorVerbosity + } + return PostgresqlHostConfig9_6_LOG_ERROR_VERBOSITY_UNSPECIFIED +} + +func (m *PostgresqlHostConfig9_6) GetLogLockWaits() *wrappers.BoolValue { + if m != nil { + return m.LogLockWaits + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetLogStatement() PostgresqlHostConfig9_6_LogStatement { + if m != nil { + return m.LogStatement + } + return PostgresqlHostConfig9_6_LOG_STATEMENT_UNSPECIFIED +} + +func (m *PostgresqlHostConfig9_6) GetLogTempFiles() *wrappers.Int64Value { + if m != nil { + return m.LogTempFiles + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetSearchPath() string { + if m != nil { + return m.SearchPath + } + return "" +} + +func (m *PostgresqlHostConfig9_6) GetRowSecurity() *wrappers.BoolValue { + if m != nil { + return m.RowSecurity + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetDefaultTransactionIsolation() PostgresqlHostConfig9_6_TransactionIsolation { + if m != nil { + return m.DefaultTransactionIsolation + } + return PostgresqlHostConfig9_6_TRANSACTION_ISOLATION_UNSPECIFIED +} + +func (m *PostgresqlHostConfig9_6) GetStatementTimeout() *wrappers.Int64Value { + if m != nil { + return m.StatementTimeout + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetLockTimeout() *wrappers.Int64Value { + if m != nil { + return m.LockTimeout + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetIdleInTransactionSessionTimeout() *wrappers.Int64Value { + if m != nil { + return m.IdleInTransactionSessionTimeout + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetByteaOutput() PostgresqlHostConfig9_6_ByteaOutput { + if m != nil { + return m.ByteaOutput + } + return PostgresqlHostConfig9_6_BYTEA_OUTPUT_UNSPECIFIED +} + +func (m *PostgresqlHostConfig9_6) GetXmlbinary() PostgresqlHostConfig9_6_XmlBinary { + if m != nil { + return m.Xmlbinary + } + return PostgresqlHostConfig9_6_XML_BINARY_UNSPECIFIED +} + +func (m *PostgresqlHostConfig9_6) GetXmloption() PostgresqlHostConfig9_6_XmlOption { + if m != nil { + return m.Xmloption + } + return PostgresqlHostConfig9_6_XML_OPTION_UNSPECIFIED +} + +func (m *PostgresqlHostConfig9_6) GetGinPendingListLimit() *wrappers.Int64Value { + if m != nil { + return m.GinPendingListLimit + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetDeadlockTimeout() *wrappers.Int64Value { + if m != nil { + return m.DeadlockTimeout + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetMaxLocksPerTransaction() *wrappers.Int64Value { + if m != nil { + return m.MaxLocksPerTransaction + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetMaxPredLocksPerTransaction() *wrappers.Int64Value { + if m != nil { + return m.MaxPredLocksPerTransaction + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetArrayNulls() *wrappers.BoolValue { + if m != nil { + return m.ArrayNulls + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetBackslashQuote() PostgresqlHostConfig9_6_BackslashQuote { + if m != nil { + return m.BackslashQuote + } + return PostgresqlHostConfig9_6_BACKSLASH_QUOTE_UNSPECIFIED +} + +func (m *PostgresqlHostConfig9_6) GetDefaultWithOids() *wrappers.BoolValue { + if m != nil { + return m.DefaultWithOids + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetEscapeStringWarning() *wrappers.BoolValue { + if m != nil { + return m.EscapeStringWarning + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetLoCompatPrivileges() *wrappers.BoolValue { + if m != nil { + return m.LoCompatPrivileges + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetOperatorPrecedenceWarning() *wrappers.BoolValue { + if m != nil { + return m.OperatorPrecedenceWarning + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetQuoteAllIdentifiers() *wrappers.BoolValue { + if m != nil { + return m.QuoteAllIdentifiers + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetStandardConformingStrings() *wrappers.BoolValue { + if m != nil { + return m.StandardConformingStrings + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetSynchronizeSeqscans() *wrappers.BoolValue { + if m != nil { + return m.SynchronizeSeqscans + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetTransformNullEquals() *wrappers.BoolValue { + if m != nil { + return m.TransformNullEquals + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetExitOnError() *wrappers.BoolValue { + if m != nil { + return m.ExitOnError + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetSeqPageCost() *wrappers.DoubleValue { + if m != nil { + return m.SeqPageCost + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetRandomPageCost() *wrappers.DoubleValue { + if m != nil { + return m.RandomPageCost + } + return nil +} + +func (m *PostgresqlHostConfig9_6) GetSqlInheritance() *wrappers.BoolValue { + if m != nil { + return m.SqlInheritance + } + return nil +} + +func init() { + proto.RegisterType((*PostgresqlHostConfig9_6)(nil), "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6") + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_ConstraintExclusion", PostgresqlHostConfig9_6_ConstraintExclusion_name, PostgresqlHostConfig9_6_ConstraintExclusion_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_ForceParallelMode", PostgresqlHostConfig9_6_ForceParallelMode_name, PostgresqlHostConfig9_6_ForceParallelMode_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_LogLevel", PostgresqlHostConfig9_6_LogLevel_name, PostgresqlHostConfig9_6_LogLevel_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_LogErrorVerbosity", PostgresqlHostConfig9_6_LogErrorVerbosity_name, PostgresqlHostConfig9_6_LogErrorVerbosity_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_LogStatement", PostgresqlHostConfig9_6_LogStatement_name, PostgresqlHostConfig9_6_LogStatement_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_TransactionIsolation", PostgresqlHostConfig9_6_TransactionIsolation_name, PostgresqlHostConfig9_6_TransactionIsolation_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_ByteaOutput", PostgresqlHostConfig9_6_ByteaOutput_name, PostgresqlHostConfig9_6_ByteaOutput_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_XmlBinary", PostgresqlHostConfig9_6_XmlBinary_name, PostgresqlHostConfig9_6_XmlBinary_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_XmlOption", PostgresqlHostConfig9_6_XmlOption_name, PostgresqlHostConfig9_6_XmlOption_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig9_6_BackslashQuote", PostgresqlHostConfig9_6_BackslashQuote_name, PostgresqlHostConfig9_6_BackslashQuote_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/config/host9_6.proto", fileDescriptor_host9_6_bf21c75a83cd3758) +} + +var fileDescriptor_host9_6_bf21c75a83cd3758 = []byte{ + // 2235 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x99, 0x5b, 0x73, 0xdb, 0xb8, + 0x15, 0xc7, 0x2b, 0x3b, 0x9b, 0x4d, 0xe0, 0x1b, 0x05, 0xf9, 0xc2, 0xd8, 0xb9, 0x2a, 0x97, 0x66, + 0xb7, 0xb5, 0x7c, 0x89, 0xd7, 0x9b, 0x9d, 0x9d, 0xee, 0x2c, 0x25, 0x51, 0x8e, 0xba, 0x94, 0xa8, + 0x25, 0x69, 0xc7, 0x9b, 0xce, 0x0e, 0x06, 0x22, 0x21, 0x89, 0x0d, 0x48, 0xd0, 0x04, 0xe5, 0x4b, + 0x1f, 0x3a, 0x7d, 0xe9, 0x4b, 0x1f, 0xfb, 0xd2, 0x69, 0x67, 0xfa, 0x79, 0xf2, 0x4d, 0xfa, 0x21, + 0xf2, 0xd4, 0x01, 0x29, 0xea, 0x62, 0x6b, 0x4b, 0x4f, 0x9d, 0x37, 0xfb, 0xe0, 0xfc, 0x7f, 0xe7, + 0x00, 0x38, 0x00, 0x0f, 0x46, 0xe0, 0xd5, 0x05, 0xf6, 0x1d, 0x72, 0xbe, 0x65, 0x53, 0xd6, 0x77, + 0xb6, 0x3c, 0xa7, 0xbd, 0x15, 0x30, 0x1e, 0x75, 0x43, 0xc2, 0x4f, 0xe8, 0xd6, 0xe9, 0xce, 0x96, + 0xcd, 0xfc, 0x8e, 0xdb, 0xdd, 0xea, 0x31, 0x1e, 0x7d, 0x83, 0xf6, 0x4b, 0x41, 0xc8, 0x22, 0x06, + 0x9f, 0x27, 0xa2, 0x52, 0x2c, 0x2a, 0x79, 0x4e, 0xbb, 0x34, 0x12, 0x95, 0x4e, 0x77, 0x4a, 0x89, + 0x68, 0xfd, 0x61, 0x97, 0xb1, 0x2e, 0x25, 0x5b, 0xb1, 0xa8, 0xdd, 0xef, 0x6c, 0x9d, 0x85, 0x38, + 0x08, 0x48, 0xc8, 0x13, 0xcc, 0xfa, 0x83, 0x89, 0xd8, 0xa7, 0x98, 0xba, 0x0e, 0x8e, 0x5c, 0xe6, + 0x27, 0xc3, 0xc5, 0x7f, 0x6f, 0x83, 0xb5, 0xd6, 0x90, 0xfb, 0x86, 0xf1, 0xa8, 0x12, 0x73, 0xbf, + 0x41, 0xfb, 0xd0, 0x02, 0x72, 0x48, 0x6c, 0x76, 0x4a, 0xc2, 0x0b, 0xe4, 0xb9, 0x3e, 0xc2, 0x41, + 0x40, 0x2f, 0x90, 0x43, 0x28, 0xbe, 0x90, 0x73, 0x8f, 0x73, 0x2f, 0xe7, 0x76, 0x37, 0x4a, 0x49, + 0xf4, 0x52, 0x1a, 0xbd, 0x54, 0xf7, 0xa3, 0xfd, 0xbd, 0x23, 0x4c, 0xfb, 0xc4, 0x58, 0x49, 0xc5, + 0x0d, 0xd7, 0x57, 0x84, 0xb4, 0x2a, 0x94, 0xb0, 0x0c, 0x16, 0x79, 0x0f, 0x87, 0xc4, 0x41, 0xed, + 0x7e, 0xa7, 0x43, 0x42, 0x2e, 0xcf, 0x64, 0xb3, 0x16, 0x12, 0x49, 0x39, 0x51, 0xc0, 0xef, 0xc0, + 0x7c, 0x44, 0xbc, 0x60, 0x48, 0x98, 0xcd, 0x26, 0xcc, 0x09, 0x41, 0xaa, 0xdf, 0x07, 0x77, 0xce, + 0x58, 0xf8, 0x1e, 0x79, 0xc4, 0x93, 0x6f, 0x65, 0x6b, 0x3f, 0x17, 0xce, 0x0d, 0xe2, 0x41, 0x13, + 0xac, 0x85, 0x24, 0xa0, 0xd8, 0x26, 0x1e, 0xf1, 0x23, 0xc4, 0x59, 0x18, 0xa1, 0xa8, 0x1f, 0x50, + 0xc2, 0xe5, 0xcf, 0xae, 0xb5, 0x20, 0x43, 0xad, 0xc9, 0xc2, 0xc8, 0x8a, 0x95, 0xb0, 0x02, 0x96, + 0xe2, 0xc9, 0x74, 0x5c, 0x4a, 0x10, 0x75, 0x3d, 0x37, 0x92, 0x6f, 0x5f, 0x63, 0x45, 0x84, 0xa6, + 0xe6, 0x52, 0xa2, 0x09, 0x05, 0x7c, 0x0b, 0x0a, 0x6d, 0x6c, 0xbf, 0x27, 0xbe, 0x83, 0x3a, 0xb4, + 0xcf, 0x7b, 0x08, 0x77, 0x22, 0x12, 0xca, 0x9f, 0x67, 0x82, 0xca, 0xe0, 0xe3, 0x87, 0x9d, 0xdb, + 0xdb, 0x9b, 0xbb, 0xdb, 0x7b, 0xaf, 0x8d, 0xfc, 0x80, 0x51, 0x13, 0x08, 0x45, 0x10, 0x20, 0x02, + 0xab, 0x8c, 0x3a, 0x88, 0xfb, 0x38, 0xe0, 0x3d, 0x16, 0xa1, 0xa8, 0x17, 0x12, 0xde, 0x63, 0xd4, + 0x91, 0xef, 0x64, 0xb3, 0xe7, 0x3f, 0x7e, 0xd8, 0xb9, 0xb3, 0xb9, 0xb3, 0xf9, 0x7a, 0x7f, 0x6f, + 0x7b, 0xdb, 0x58, 0x66, 0xd4, 0x31, 0x07, 0x1c, 0x2b, 0xc5, 0xc0, 0x77, 0x60, 0xc3, 0xc3, 0xe7, + 0x88, 0x47, 0xd8, 0x77, 0xda, 0x17, 0x88, 0x47, 0x21, 0xc1, 0x9e, 0xeb, 0x77, 0x07, 0x85, 0x76, + 0x37, 0x7b, 0x29, 0x64, 0x0f, 0x9f, 0x9b, 0x89, 0xdc, 0x4c, 0xd5, 0x49, 0xad, 0xfd, 0x35, 0x07, + 0x96, 0x6d, 0xe6, 0xf3, 0x28, 0xc4, 0xae, 0x1f, 0x21, 0x72, 0x6e, 0xd3, 0x3e, 0x77, 0x99, 0x2f, + 0x83, 0xc7, 0xb9, 0x97, 0x8b, 0xbb, 0x46, 0xe9, 0x5a, 0x67, 0xac, 0xf4, 0x0b, 0x07, 0xa4, 0x54, + 0x19, 0xa2, 0xd5, 0x94, 0x6c, 0x14, 0xec, 0xab, 0x46, 0xd8, 0x02, 0x2b, 0x76, 0x3f, 0xe4, 0x2c, + 0x4c, 0xaa, 0x05, 0x75, 0x42, 0x6c, 0x8b, 0x43, 0x28, 0xcf, 0xc5, 0xb3, 0xbb, 0x7f, 0x65, 0x76, + 0x55, 0xd6, 0x6f, 0x53, 0x92, 0x4c, 0xaf, 0x90, 0x48, 0xe3, 0x6a, 0xa9, 0x0d, 0x84, 0xf0, 0x67, + 0x50, 0xe8, 0x84, 0xcc, 0x43, 0x36, 0xa3, 0x14, 0x07, 0x3c, 0x2d, 0x9c, 0xf9, 0xec, 0x3d, 0x91, + 0x3e, 0x7e, 0xd8, 0x99, 0xdf, 0xd9, 0xdc, 0xdd, 0xd9, 0xfb, 0x7a, 0xef, 0xf5, 0xab, 0xfd, 0xbd, + 0xaf, 0x8d, 0xbc, 0x20, 0x55, 0x06, 0xa0, 0xa4, 0x9c, 0x7e, 0x06, 0x85, 0x3f, 0x32, 0xd7, 0xbf, + 0x8c, 0x5f, 0xf8, 0xbf, 0xf0, 0x82, 0x34, 0x89, 0xff, 0x4b, 0x0e, 0x14, 0x3a, 0x2c, 0xb4, 0x09, + 0x0a, 0x70, 0x88, 0x29, 0x25, 0x14, 0x79, 0xcc, 0x21, 0xf2, 0x62, 0xbc, 0x2d, 0xad, 0x1b, 0x6e, + 0x4b, 0x4d, 0x90, 0x5b, 0x03, 0x70, 0x83, 0x39, 0xc4, 0xc8, 0x77, 0x2e, 0x9b, 0xe0, 0x19, 0x28, + 0xd8, 0xd4, 0x15, 0xa7, 0x58, 0x5c, 0x6d, 0x1e, 0xe1, 0x1c, 0x77, 0x09, 0x97, 0x97, 0xe2, 0x0c, + 0x0e, 0x6e, 0x98, 0x81, 0xc6, 0xba, 0x1a, 0x39, 0x25, 0xd4, 0xc8, 0x27, 0x31, 0x1a, 0xae, 0xdf, + 0x18, 0x44, 0x80, 0x27, 0x40, 0xa2, 0xac, 0x3b, 0x19, 0x55, 0xfa, 0xb4, 0x51, 0x17, 0x29, 0xeb, + 0x8e, 0x87, 0xfc, 0x33, 0x58, 0x4b, 0x43, 0x92, 0x30, 0x64, 0xa1, 0x38, 0x6c, 0x51, 0x7c, 0x0d, + 0xc9, 0xf9, 0x4f, 0x1b, 0x79, 0x39, 0x89, 0xac, 0x8a, 0x28, 0x66, 0x1a, 0x04, 0x1e, 0x83, 0xf5, + 0x34, 0xbe, 0xd3, 0x0f, 0xe3, 0xcf, 0xcf, 0x58, 0x0a, 0x30, 0xfb, 0x84, 0xaf, 0x25, 0xd8, 0xea, + 0x40, 0x3c, 0x22, 0x57, 0xc0, 0x92, 0x20, 0xdb, 0x3d, 0x62, 0xbf, 0x0f, 0x98, 0xeb, 0x47, 0x5c, + 0x2e, 0xc4, 0xb8, 0xf5, 0x2b, 0xb8, 0x32, 0x63, 0x34, 0xa1, 0x89, 0xe5, 0xa9, 0x8c, 0x14, 0x43, + 0x08, 0xf3, 0x7d, 0x12, 0x9f, 0x2e, 0x2e, 0x2f, 0x5f, 0x0f, 0x32, 0x52, 0xc0, 0x3a, 0x80, 0x02, + 0xe2, 0xb8, 0x7c, 0x9c, 0xb3, 0x92, 0xc9, 0xc9, 0x53, 0xd6, 0xad, 0x4e, 0x88, 0xe0, 0xef, 0xc0, + 0x7c, 0x8c, 0x1a, 0xcc, 0x56, 0x5e, 0xcd, 0x84, 0xcc, 0x09, 0xc8, 0xc0, 0x3d, 0x3e, 0x5c, 0x42, + 0x9f, 0x6c, 0xf5, 0x29, 0x09, 0xdb, 0x8c, 0xbb, 0xd1, 0x85, 0xbc, 0xf6, 0x49, 0x0e, 0x97, 0xc6, + 0xba, 0xf1, 0xee, 0x1e, 0xa5, 0xdc, 0x78, 0x06, 0x93, 0x26, 0xf8, 0x3d, 0x10, 0xcb, 0x83, 0x28, + 0xb3, 0xdf, 0xa3, 0x33, 0xec, 0x46, 0x5c, 0x96, 0x33, 0xe7, 0x20, 0xe6, 0xac, 0x31, 0xfb, 0xfd, + 0x5b, 0xe1, 0x0f, 0x03, 0xb0, 0x20, 0x08, 0xa3, 0x2a, 0xb9, 0x17, 0x67, 0xff, 0xc3, 0xcd, 0xb3, + 0x1f, 0x16, 0x4f, 0x1c, 0x71, 0x54, 0x4a, 0x4a, 0x92, 0xf3, 0xf0, 0x53, 0xcc, 0xe5, 0xf5, 0xec, + 0xc2, 0x14, 0x08, 0x6b, 0xf0, 0x21, 0xe6, 0xf0, 0x11, 0x98, 0xe3, 0x04, 0x87, 0x76, 0x0f, 0x05, + 0x38, 0xea, 0xc9, 0x1b, 0x8f, 0x73, 0x2f, 0xef, 0x1a, 0x20, 0x31, 0xb5, 0x70, 0xd4, 0x13, 0x3b, + 0x1b, 0xb2, 0x33, 0xc4, 0x89, 0xdd, 0x0f, 0xc5, 0x96, 0xdc, 0xcf, 0xde, 0xd9, 0x90, 0x9d, 0x99, + 0x03, 0x77, 0xf8, 0x8f, 0x1c, 0x78, 0xe0, 0x90, 0x0e, 0xee, 0xd3, 0x08, 0x45, 0x21, 0xf6, 0x79, + 0xf2, 0x31, 0x40, 0x2e, 0x67, 0x34, 0x29, 0x95, 0x07, 0xf1, 0x2a, 0x99, 0x37, 0x5c, 0x25, 0x6b, + 0xc4, 0xae, 0xa7, 0x68, 0x63, 0x63, 0x10, 0x79, 0xda, 0x20, 0x7c, 0x03, 0xf2, 0xc3, 0xad, 0x42, + 0x91, 0xeb, 0x11, 0xd6, 0x8f, 0xe4, 0x87, 0xd9, 0xeb, 0x27, 0x0d, 0x55, 0x56, 0x22, 0x12, 0xad, + 0x5d, 0x5c, 0x36, 0x29, 0xe4, 0xd1, 0x35, 0x5a, 0x3b, 0x21, 0x48, 0xf5, 0x2e, 0x78, 0xea, 0x3a, + 0x94, 0x20, 0xd7, 0x9f, 0x58, 0x22, 0x4e, 0xb8, 0xf8, 0x12, 0x0f, 0xb1, 0x8f, 0xb3, 0xb1, 0x8f, + 0x04, 0xa7, 0xee, 0x8f, 0xcd, 0xd7, 0x4c, 0x20, 0x69, 0x28, 0x0f, 0xcc, 0xb7, 0x2f, 0x22, 0x82, + 0x11, 0xeb, 0x47, 0x41, 0x3f, 0x92, 0x9f, 0xc4, 0x8b, 0xff, 0xfb, 0x1b, 0x2e, 0x7e, 0x59, 0x20, + 0xf5, 0x98, 0x68, 0xcc, 0xb5, 0x47, 0xff, 0xc0, 0x0e, 0xb8, 0x7b, 0xee, 0xd1, 0xb6, 0xeb, 0xe3, + 0xf0, 0x42, 0x2e, 0xc6, 0xb1, 0xde, 0xdc, 0x30, 0xd6, 0xb1, 0x47, 0xcb, 0x31, 0xcf, 0x18, 0xa1, + 0x07, 0x71, 0x58, 0x10, 0x17, 0xd4, 0xd3, 0x4f, 0x15, 0x47, 0x8f, 0x79, 0xc6, 0x08, 0x0d, 0x5b, + 0x60, 0xb5, 0xeb, 0xfa, 0x28, 0x20, 0xbe, 0x23, 0xda, 0x3d, 0xea, 0xf2, 0x68, 0xd0, 0x66, 0x3c, + 0xcb, 0xde, 0x9c, 0x42, 0xd7, 0xf5, 0x5b, 0x89, 0x52, 0x73, 0x79, 0x94, 0xb4, 0x15, 0x35, 0x20, + 0x39, 0x04, 0x3b, 0x13, 0xf5, 0xf3, 0x3c, 0x9b, 0xb5, 0x94, 0x8a, 0xd2, 0x8d, 0x3d, 0x02, 0xf7, + 0x44, 0x4b, 0x2a, 0x4c, 0x1c, 0x05, 0x24, 0x1c, 0xaf, 0x24, 0xf9, 0x45, 0x36, 0x70, 0xd5, 0xc3, + 0xe7, 0xe2, 0x2a, 0xe3, 0x2d, 0x12, 0x8e, 0x95, 0x0f, 0x44, 0xe0, 0xa1, 0xe0, 0x06, 0xe2, 0xf1, + 0x33, 0x1d, 0xfe, 0xeb, 0x6c, 0xf8, 0xba, 0x87, 0xcf, 0x5b, 0x21, 0x71, 0xa6, 0x05, 0xf8, 0x16, + 0xcc, 0xe1, 0x30, 0xc4, 0x17, 0xc8, 0xef, 0x53, 0xca, 0xe5, 0x97, 0x99, 0xd7, 0x0b, 0x88, 0xdd, + 0x9b, 0xc2, 0x1b, 0x9e, 0x82, 0x25, 0xd1, 0xfe, 0x73, 0x8a, 0x79, 0x0f, 0x9d, 0xf4, 0x59, 0x44, + 0xe4, 0x2f, 0xe2, 0xdd, 0x6f, 0xdc, 0xb4, 0xa2, 0x53, 0xea, 0x8f, 0x02, 0x6a, 0x2c, 0xb6, 0x27, + 0xfe, 0x87, 0x35, 0x90, 0x4f, 0x2f, 0xb5, 0x33, 0x37, 0xea, 0x21, 0xe6, 0x3a, 0x5c, 0xfe, 0x32, + 0x33, 0xf5, 0xa5, 0x81, 0xe8, 0xad, 0x1b, 0xf5, 0x74, 0xd7, 0xe1, 0xb0, 0x09, 0x56, 0x08, 0xb7, + 0x71, 0x40, 0xc4, 0x1b, 0x42, 0x54, 0xd4, 0x19, 0x0e, 0x7d, 0xd7, 0xef, 0xca, 0xbf, 0xc9, 0x64, + 0x15, 0x12, 0xa1, 0x19, 0xeb, 0xde, 0x26, 0x32, 0xa8, 0x81, 0x65, 0xca, 0x90, 0xcd, 0xbc, 0x00, + 0x47, 0x28, 0x08, 0xdd, 0x53, 0x97, 0x12, 0xd1, 0xac, 0xfd, 0x36, 0x13, 0x07, 0x29, 0xab, 0xc4, + 0xb2, 0xd6, 0x50, 0x25, 0x9e, 0x39, 0x2c, 0x20, 0x21, 0x8e, 0x58, 0x28, 0x0a, 0xc0, 0x26, 0x0e, + 0xf1, 0x6d, 0x32, 0xcc, 0x71, 0x33, 0x13, 0x7a, 0x2f, 0x95, 0xb7, 0x86, 0xea, 0x34, 0xd3, 0x26, + 0x58, 0x89, 0xf7, 0x0b, 0x61, 0x4a, 0x91, 0xeb, 0x10, 0x3f, 0x72, 0x3b, 0xae, 0x78, 0x17, 0x97, + 0xb2, 0x67, 0x1e, 0x0b, 0x15, 0x4a, 0xeb, 0x23, 0x99, 0xc8, 0x35, 0x7e, 0x8e, 0xe1, 0xd0, 0x11, + 0x5d, 0x51, 0x87, 0x85, 0xf1, 0x83, 0x2c, 0x59, 0x56, 0x2e, 0x6f, 0x65, 0xe7, 0x9a, 0xca, 0x2b, + 0x43, 0x75, 0xb2, 0xb6, 0x1c, 0x36, 0xc0, 0x32, 0xbf, 0xf0, 0xed, 0x5e, 0xc8, 0x7c, 0xf7, 0x4f, + 0x04, 0x71, 0x72, 0xc2, 0x6d, 0xec, 0x73, 0x79, 0x3b, 0x3b, 0xd5, 0x31, 0x9d, 0x39, 0x90, 0x89, + 0xa9, 0xc7, 0xe7, 0x47, 0x44, 0x89, 0xab, 0x1e, 0x91, 0x93, 0x3e, 0xa6, 0x5c, 0xde, 0xc9, 0xe6, + 0x0d, 0x85, 0xa2, 0xfe, 0xd5, 0x58, 0x06, 0xbf, 0x03, 0x0b, 0xe4, 0xdc, 0x8d, 0x10, 0x1b, 0xb4, + 0xca, 0xf2, 0x6e, 0xf6, 0x27, 0x5a, 0x08, 0xf4, 0xa4, 0xe7, 0x85, 0xdf, 0x83, 0x05, 0x4e, 0x4e, + 0x50, 0x80, 0xbb, 0x04, 0xd9, 0x8c, 0x47, 0xf2, 0xab, 0x6b, 0xbc, 0xf0, 0xe6, 0x38, 0x39, 0x69, + 0xe1, 0x2e, 0xa9, 0x30, 0x1e, 0x5f, 0x62, 0x21, 0xf6, 0x1d, 0xe6, 0x8d, 0x41, 0xf6, 0xae, 0x01, + 0x59, 0x4c, 0x54, 0x43, 0x4e, 0x05, 0x2c, 0xf1, 0x13, 0x8a, 0x5c, 0xbf, 0x47, 0x42, 0x37, 0xc2, + 0xbe, 0x4d, 0xe4, 0xaf, 0xb2, 0xbb, 0x5a, 0x7e, 0x42, 0xeb, 0x23, 0x45, 0xf1, 0x5f, 0x39, 0x50, + 0x98, 0xf2, 0xca, 0x85, 0xcf, 0xc0, 0xe3, 0x8a, 0xde, 0x34, 0x2d, 0x43, 0xa9, 0x37, 0x2d, 0xa4, + 0x1e, 0x57, 0xb4, 0x43, 0xb3, 0xae, 0x37, 0xd1, 0x61, 0xd3, 0x6c, 0xa9, 0x95, 0x7a, 0xad, 0xae, + 0x56, 0xa5, 0x5f, 0xc1, 0x0d, 0xb0, 0x36, 0xd5, 0x4b, 0x6f, 0x4a, 0x39, 0x78, 0x1f, 0xc8, 0xd3, + 0x07, 0x6b, 0x35, 0x69, 0x06, 0x16, 0xc1, 0xc3, 0xa9, 0xa3, 0x2d, 0xc5, 0xb0, 0xea, 0x56, 0x5d, + 0x6f, 0x4a, 0xb3, 0xc5, 0xbf, 0xe7, 0x40, 0xfe, 0xca, 0x5b, 0x0f, 0x3e, 0x05, 0x8f, 0x6a, 0xba, + 0x51, 0x51, 0x85, 0xab, 0xa2, 0x69, 0xaa, 0x86, 0x1a, 0x7a, 0x55, 0xbd, 0x94, 0xd9, 0x3a, 0x58, + 0x9d, 0xe6, 0x14, 0x27, 0xb6, 0x01, 0xd6, 0xa6, 0x8e, 0xc5, 0x79, 0x3d, 0x02, 0x1b, 0xd3, 0x06, + 0x0d, 0xf5, 0xc0, 0x50, 0x4d, 0x53, 0x24, 0x35, 0x03, 0xee, 0xa4, 0xcf, 0x21, 0x78, 0x0f, 0xac, + 0x68, 0xfa, 0x01, 0xd2, 0xd4, 0x23, 0x55, 0xbb, 0x94, 0xc1, 0x32, 0x90, 0x46, 0x43, 0x55, 0xb5, + 0x7c, 0x78, 0xf0, 0x95, 0x94, 0x9b, 0x62, 0xdd, 0x93, 0x66, 0xa6, 0x58, 0x5f, 0x49, 0xb3, 0x53, + 0xac, 0xbb, 0xd2, 0xad, 0x29, 0xd6, 0x1d, 0xe9, 0x33, 0x98, 0x07, 0x0b, 0x23, 0xab, 0xa6, 0x1f, + 0x48, 0xb7, 0x27, 0x1d, 0x9b, 0xba, 0x55, 0xaf, 0xa8, 0xd2, 0xe7, 0x70, 0x05, 0xe4, 0x47, 0xd6, + 0xb7, 0x8a, 0xd1, 0xac, 0x37, 0x0f, 0xa4, 0x3b, 0xb0, 0x00, 0x96, 0x46, 0x66, 0xd5, 0x30, 0x74, + 0x43, 0xba, 0x3b, 0x69, 0xac, 0x29, 0x96, 0xa2, 0x49, 0x60, 0xd2, 0xd8, 0x52, 0x9a, 0xf5, 0x8a, + 0x34, 0x57, 0xfc, 0x67, 0x0e, 0xe4, 0xaf, 0x3c, 0x1c, 0xc4, 0x4e, 0x09, 0xd7, 0x18, 0x87, 0x8e, + 0x54, 0xa3, 0xac, 0x9b, 0x75, 0xeb, 0xa7, 0x4b, 0xeb, 0xf4, 0x00, 0xdc, 0x9b, 0xe6, 0x64, 0xa9, + 0x86, 0xa9, 0x4a, 0x39, 0xb1, 0x1f, 0xd3, 0x86, 0xab, 0x6a, 0x4d, 0x39, 0xd4, 0xac, 0x64, 0xc3, + 0xa6, 0x39, 0x24, 0x7f, 0xa9, 0xd2, 0x6c, 0xf1, 0x6f, 0x39, 0x30, 0x3f, 0xfe, 0x2c, 0x48, 0x23, + 0x9a, 0x96, 0x62, 0xa9, 0x0d, 0xb5, 0x69, 0x5d, 0x4a, 0x68, 0x15, 0xc0, 0xc9, 0xe1, 0xa6, 0xde, + 0x14, 0x99, 0x0c, 0x56, 0x6e, 0x64, 0xaf, 0x56, 0x35, 0x69, 0xe6, 0xaa, 0xb9, 0xa1, 0x57, 0xa5, + 0xd9, 0xab, 0x66, 0x45, 0xd3, 0xa4, 0x5b, 0xc5, 0xff, 0xe4, 0xc0, 0xf2, 0xd4, 0x06, 0xfb, 0x39, + 0x78, 0x62, 0x19, 0x4a, 0xd3, 0x54, 0x2a, 0xa2, 0xf8, 0x51, 0xdd, 0xd4, 0x35, 0xc5, 0xba, 0x7a, + 0xe2, 0xbe, 0x04, 0x2f, 0xa6, 0xbb, 0x19, 0xaa, 0x52, 0x45, 0x87, 0xcd, 0x8a, 0xde, 0x68, 0xd4, + 0x2d, 0x4b, 0xad, 0x4a, 0x39, 0xf8, 0x12, 0x3c, 0xfb, 0x1f, 0xbe, 0x23, 0xcf, 0x19, 0xf8, 0x05, + 0x78, 0xfe, 0x4b, 0x9e, 0x2d, 0x55, 0xb1, 0x94, 0xb2, 0xa6, 0xc6, 0x22, 0x69, 0x16, 0xbe, 0x00, + 0xc5, 0xe9, 0xae, 0xa6, 0x6a, 0xd4, 0x15, 0xad, 0xfe, 0x4e, 0x38, 0x4b, 0xb7, 0x8a, 0x7f, 0x00, + 0x73, 0x63, 0x8d, 0xae, 0xb8, 0x0c, 0xca, 0x3f, 0x59, 0xaa, 0x82, 0xf4, 0x43, 0xab, 0x75, 0x68, + 0x5d, 0x3d, 0x2b, 0x13, 0xa3, 0x6f, 0xd4, 0x63, 0x29, 0x07, 0x65, 0xb0, 0x3c, 0x61, 0x55, 0xcd, + 0x8a, 0xd2, 0x12, 0xf9, 0x16, 0x0d, 0x70, 0x77, 0xd8, 0xd9, 0x8a, 0xa3, 0x7e, 0xdc, 0xd0, 0x50, + 0xb9, 0xde, 0x54, 0x8c, 0xcb, 0xc5, 0xb5, 0x02, 0xf2, 0x63, 0x63, 0x65, 0xc5, 0x54, 0xf7, 0xf7, + 0xa4, 0x1c, 0x84, 0x60, 0x71, 0xcc, 0x2c, 0xa2, 0xcd, 0x14, 0x8f, 0x63, 0x66, 0xd2, 0xc5, 0xa6, + 0x4c, 0xbd, 0x35, 0x65, 0x0b, 0xd6, 0x40, 0x61, 0x6c, 0xac, 0xaa, 0x57, 0x0e, 0xc5, 0xfe, 0x4a, + 0x39, 0x51, 0x38, 0x63, 0x03, 0x15, 0xbd, 0x69, 0x09, 0xfb, 0x8c, 0xb8, 0x63, 0x17, 0x27, 0x5b, + 0x24, 0x51, 0xb4, 0x65, 0xa5, 0xf2, 0x83, 0xa9, 0x29, 0xe6, 0x1b, 0xf4, 0xe3, 0xa1, 0x6e, 0x5d, + 0xbe, 0xbf, 0x0a, 0x60, 0xe9, 0x92, 0x43, 0x12, 0xe0, 0xb2, 0x4a, 0x6f, 0x4a, 0x33, 0x22, 0xa3, + 0x2b, 0xf6, 0x5a, 0x4d, 0x9a, 0x85, 0x4f, 0xc0, 0x83, 0xcb, 0x03, 0xa6, 0x52, 0x53, 0x91, 0xda, + 0xac, 0xe8, 0x55, 0x71, 0xf0, 0x6f, 0x95, 0x8f, 0xde, 0x59, 0x5d, 0x37, 0xea, 0xf5, 0xdb, 0x25, + 0x9b, 0x79, 0x5b, 0x49, 0x1f, 0xb8, 0x99, 0xfc, 0x96, 0xd0, 0x65, 0x9b, 0x5d, 0xe2, 0xc7, 0x1f, + 0x91, 0xad, 0x6b, 0xfd, 0xc0, 0xf1, 0xed, 0xc8, 0xd8, 0xbe, 0x1d, 0xeb, 0x5e, 0xfd, 0x37, 0x00, + 0x00, 0xff, 0xff, 0xc7, 0x30, 0xd1, 0x37, 0x1b, 0x19, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/postgresql10.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/postgresql10.pb.go new file mode 100644 index 000000000..e41b02776 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/postgresql10.pb.go @@ -0,0 +1,1301 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/config/postgresql10.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type PostgresqlConfig10_WalLevel int32 + +const ( + PostgresqlConfig10_WAL_LEVEL_UNSPECIFIED PostgresqlConfig10_WalLevel = 0 + PostgresqlConfig10_WAL_LEVEL_REPLICA PostgresqlConfig10_WalLevel = 1 + PostgresqlConfig10_WAL_LEVEL_LOGICAL PostgresqlConfig10_WalLevel = 2 +) + +var PostgresqlConfig10_WalLevel_name = map[int32]string{ + 0: "WAL_LEVEL_UNSPECIFIED", + 1: "WAL_LEVEL_REPLICA", + 2: "WAL_LEVEL_LOGICAL", +} +var PostgresqlConfig10_WalLevel_value = map[string]int32{ + "WAL_LEVEL_UNSPECIFIED": 0, + "WAL_LEVEL_REPLICA": 1, + "WAL_LEVEL_LOGICAL": 2, +} + +func (x PostgresqlConfig10_WalLevel) String() string { + return proto.EnumName(PostgresqlConfig10_WalLevel_name, int32(x)) +} +func (PostgresqlConfig10_WalLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql10_fca60c3578566090, []int{0, 0} +} + +type PostgresqlConfig10_SynchronousCommit int32 + +const ( + PostgresqlConfig10_SYNCHRONOUS_COMMIT_UNSPECIFIED PostgresqlConfig10_SynchronousCommit = 0 + PostgresqlConfig10_SYNCHRONOUS_COMMIT_ON PostgresqlConfig10_SynchronousCommit = 1 + PostgresqlConfig10_SYNCHRONOUS_COMMIT_OFF PostgresqlConfig10_SynchronousCommit = 2 + PostgresqlConfig10_SYNCHRONOUS_COMMIT_LOCAL PostgresqlConfig10_SynchronousCommit = 3 + PostgresqlConfig10_SYNCHRONOUS_COMMIT_REMOTE_WRITE PostgresqlConfig10_SynchronousCommit = 4 + PostgresqlConfig10_SYNCHRONOUS_COMMIT_REMOTE_APPLY PostgresqlConfig10_SynchronousCommit = 5 +) + +var PostgresqlConfig10_SynchronousCommit_name = map[int32]string{ + 0: "SYNCHRONOUS_COMMIT_UNSPECIFIED", + 1: "SYNCHRONOUS_COMMIT_ON", + 2: "SYNCHRONOUS_COMMIT_OFF", + 3: "SYNCHRONOUS_COMMIT_LOCAL", + 4: "SYNCHRONOUS_COMMIT_REMOTE_WRITE", + 5: "SYNCHRONOUS_COMMIT_REMOTE_APPLY", +} +var PostgresqlConfig10_SynchronousCommit_value = map[string]int32{ + "SYNCHRONOUS_COMMIT_UNSPECIFIED": 0, + "SYNCHRONOUS_COMMIT_ON": 1, + "SYNCHRONOUS_COMMIT_OFF": 2, + "SYNCHRONOUS_COMMIT_LOCAL": 3, + "SYNCHRONOUS_COMMIT_REMOTE_WRITE": 4, + "SYNCHRONOUS_COMMIT_REMOTE_APPLY": 5, +} + +func (x PostgresqlConfig10_SynchronousCommit) String() string { + return proto.EnumName(PostgresqlConfig10_SynchronousCommit_name, int32(x)) +} +func (PostgresqlConfig10_SynchronousCommit) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql10_fca60c3578566090, []int{0, 1} +} + +type PostgresqlConfig10_ConstraintExclusion int32 + +const ( + PostgresqlConfig10_CONSTRAINT_EXCLUSION_UNSPECIFIED PostgresqlConfig10_ConstraintExclusion = 0 + PostgresqlConfig10_CONSTRAINT_EXCLUSION_ON PostgresqlConfig10_ConstraintExclusion = 1 + PostgresqlConfig10_CONSTRAINT_EXCLUSION_OFF PostgresqlConfig10_ConstraintExclusion = 2 + PostgresqlConfig10_CONSTRAINT_EXCLUSION_PARTITION PostgresqlConfig10_ConstraintExclusion = 3 +) + +var PostgresqlConfig10_ConstraintExclusion_name = map[int32]string{ + 0: "CONSTRAINT_EXCLUSION_UNSPECIFIED", + 1: "CONSTRAINT_EXCLUSION_ON", + 2: "CONSTRAINT_EXCLUSION_OFF", + 3: "CONSTRAINT_EXCLUSION_PARTITION", +} +var PostgresqlConfig10_ConstraintExclusion_value = map[string]int32{ + "CONSTRAINT_EXCLUSION_UNSPECIFIED": 0, + "CONSTRAINT_EXCLUSION_ON": 1, + "CONSTRAINT_EXCLUSION_OFF": 2, + "CONSTRAINT_EXCLUSION_PARTITION": 3, +} + +func (x PostgresqlConfig10_ConstraintExclusion) String() string { + return proto.EnumName(PostgresqlConfig10_ConstraintExclusion_name, int32(x)) +} +func (PostgresqlConfig10_ConstraintExclusion) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql10_fca60c3578566090, []int{0, 2} +} + +type PostgresqlConfig10_ForceParallelMode int32 + +const ( + PostgresqlConfig10_FORCE_PARALLEL_MODE_UNSPECIFIED PostgresqlConfig10_ForceParallelMode = 0 + PostgresqlConfig10_FORCE_PARALLEL_MODE_ON PostgresqlConfig10_ForceParallelMode = 1 + PostgresqlConfig10_FORCE_PARALLEL_MODE_OFF PostgresqlConfig10_ForceParallelMode = 2 + PostgresqlConfig10_FORCE_PARALLEL_MODE_REGRESS PostgresqlConfig10_ForceParallelMode = 3 +) + +var PostgresqlConfig10_ForceParallelMode_name = map[int32]string{ + 0: "FORCE_PARALLEL_MODE_UNSPECIFIED", + 1: "FORCE_PARALLEL_MODE_ON", + 2: "FORCE_PARALLEL_MODE_OFF", + 3: "FORCE_PARALLEL_MODE_REGRESS", +} +var PostgresqlConfig10_ForceParallelMode_value = map[string]int32{ + "FORCE_PARALLEL_MODE_UNSPECIFIED": 0, + "FORCE_PARALLEL_MODE_ON": 1, + "FORCE_PARALLEL_MODE_OFF": 2, + "FORCE_PARALLEL_MODE_REGRESS": 3, +} + +func (x PostgresqlConfig10_ForceParallelMode) String() string { + return proto.EnumName(PostgresqlConfig10_ForceParallelMode_name, int32(x)) +} +func (PostgresqlConfig10_ForceParallelMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql10_fca60c3578566090, []int{0, 3} +} + +type PostgresqlConfig10_LogLevel int32 + +const ( + PostgresqlConfig10_LOG_LEVEL_UNSPECIFIED PostgresqlConfig10_LogLevel = 0 + PostgresqlConfig10_LOG_LEVEL_DEBUG5 PostgresqlConfig10_LogLevel = 1 + PostgresqlConfig10_LOG_LEVEL_DEBUG4 PostgresqlConfig10_LogLevel = 2 + PostgresqlConfig10_LOG_LEVEL_DEBUG3 PostgresqlConfig10_LogLevel = 3 + PostgresqlConfig10_LOG_LEVEL_DEBUG2 PostgresqlConfig10_LogLevel = 4 + PostgresqlConfig10_LOG_LEVEL_DEBUG1 PostgresqlConfig10_LogLevel = 5 + PostgresqlConfig10_LOG_LEVEL_LOG PostgresqlConfig10_LogLevel = 6 + PostgresqlConfig10_LOG_LEVEL_NOTICE PostgresqlConfig10_LogLevel = 7 + PostgresqlConfig10_LOG_LEVEL_WARNING PostgresqlConfig10_LogLevel = 8 + PostgresqlConfig10_LOG_LEVEL_ERROR PostgresqlConfig10_LogLevel = 9 + PostgresqlConfig10_LOG_LEVEL_FATAL PostgresqlConfig10_LogLevel = 10 + PostgresqlConfig10_LOG_LEVEL_PANIC PostgresqlConfig10_LogLevel = 11 +) + +var PostgresqlConfig10_LogLevel_name = map[int32]string{ + 0: "LOG_LEVEL_UNSPECIFIED", + 1: "LOG_LEVEL_DEBUG5", + 2: "LOG_LEVEL_DEBUG4", + 3: "LOG_LEVEL_DEBUG3", + 4: "LOG_LEVEL_DEBUG2", + 5: "LOG_LEVEL_DEBUG1", + 6: "LOG_LEVEL_LOG", + 7: "LOG_LEVEL_NOTICE", + 8: "LOG_LEVEL_WARNING", + 9: "LOG_LEVEL_ERROR", + 10: "LOG_LEVEL_FATAL", + 11: "LOG_LEVEL_PANIC", +} +var PostgresqlConfig10_LogLevel_value = map[string]int32{ + "LOG_LEVEL_UNSPECIFIED": 0, + "LOG_LEVEL_DEBUG5": 1, + "LOG_LEVEL_DEBUG4": 2, + "LOG_LEVEL_DEBUG3": 3, + "LOG_LEVEL_DEBUG2": 4, + "LOG_LEVEL_DEBUG1": 5, + "LOG_LEVEL_LOG": 6, + "LOG_LEVEL_NOTICE": 7, + "LOG_LEVEL_WARNING": 8, + "LOG_LEVEL_ERROR": 9, + "LOG_LEVEL_FATAL": 10, + "LOG_LEVEL_PANIC": 11, +} + +func (x PostgresqlConfig10_LogLevel) String() string { + return proto.EnumName(PostgresqlConfig10_LogLevel_name, int32(x)) +} +func (PostgresqlConfig10_LogLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql10_fca60c3578566090, []int{0, 4} +} + +type PostgresqlConfig10_LogErrorVerbosity int32 + +const ( + PostgresqlConfig10_LOG_ERROR_VERBOSITY_UNSPECIFIED PostgresqlConfig10_LogErrorVerbosity = 0 + PostgresqlConfig10_LOG_ERROR_VERBOSITY_TERSE PostgresqlConfig10_LogErrorVerbosity = 1 + PostgresqlConfig10_LOG_ERROR_VERBOSITY_DEFAULT PostgresqlConfig10_LogErrorVerbosity = 2 + PostgresqlConfig10_LOG_ERROR_VERBOSITY_VERBOSE PostgresqlConfig10_LogErrorVerbosity = 3 +) + +var PostgresqlConfig10_LogErrorVerbosity_name = map[int32]string{ + 0: "LOG_ERROR_VERBOSITY_UNSPECIFIED", + 1: "LOG_ERROR_VERBOSITY_TERSE", + 2: "LOG_ERROR_VERBOSITY_DEFAULT", + 3: "LOG_ERROR_VERBOSITY_VERBOSE", +} +var PostgresqlConfig10_LogErrorVerbosity_value = map[string]int32{ + "LOG_ERROR_VERBOSITY_UNSPECIFIED": 0, + "LOG_ERROR_VERBOSITY_TERSE": 1, + "LOG_ERROR_VERBOSITY_DEFAULT": 2, + "LOG_ERROR_VERBOSITY_VERBOSE": 3, +} + +func (x PostgresqlConfig10_LogErrorVerbosity) String() string { + return proto.EnumName(PostgresqlConfig10_LogErrorVerbosity_name, int32(x)) +} +func (PostgresqlConfig10_LogErrorVerbosity) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql10_fca60c3578566090, []int{0, 5} +} + +type PostgresqlConfig10_LogStatement int32 + +const ( + PostgresqlConfig10_LOG_STATEMENT_UNSPECIFIED PostgresqlConfig10_LogStatement = 0 + PostgresqlConfig10_LOG_STATEMENT_NONE PostgresqlConfig10_LogStatement = 1 + PostgresqlConfig10_LOG_STATEMENT_DDL PostgresqlConfig10_LogStatement = 2 + PostgresqlConfig10_LOG_STATEMENT_MOD PostgresqlConfig10_LogStatement = 3 + PostgresqlConfig10_LOG_STATEMENT_ALL PostgresqlConfig10_LogStatement = 4 +) + +var PostgresqlConfig10_LogStatement_name = map[int32]string{ + 0: "LOG_STATEMENT_UNSPECIFIED", + 1: "LOG_STATEMENT_NONE", + 2: "LOG_STATEMENT_DDL", + 3: "LOG_STATEMENT_MOD", + 4: "LOG_STATEMENT_ALL", +} +var PostgresqlConfig10_LogStatement_value = map[string]int32{ + "LOG_STATEMENT_UNSPECIFIED": 0, + "LOG_STATEMENT_NONE": 1, + "LOG_STATEMENT_DDL": 2, + "LOG_STATEMENT_MOD": 3, + "LOG_STATEMENT_ALL": 4, +} + +func (x PostgresqlConfig10_LogStatement) String() string { + return proto.EnumName(PostgresqlConfig10_LogStatement_name, int32(x)) +} +func (PostgresqlConfig10_LogStatement) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql10_fca60c3578566090, []int{0, 6} +} + +type PostgresqlConfig10_TransactionIsolation int32 + +const ( + PostgresqlConfig10_TRANSACTION_ISOLATION_UNSPECIFIED PostgresqlConfig10_TransactionIsolation = 0 + PostgresqlConfig10_TRANSACTION_ISOLATION_READ_UNCOMMITTED PostgresqlConfig10_TransactionIsolation = 1 + PostgresqlConfig10_TRANSACTION_ISOLATION_READ_COMMITTED PostgresqlConfig10_TransactionIsolation = 2 + PostgresqlConfig10_TRANSACTION_ISOLATION_REPEATABLE_READ PostgresqlConfig10_TransactionIsolation = 3 + PostgresqlConfig10_TRANSACTION_ISOLATION_SERIALIZABLE PostgresqlConfig10_TransactionIsolation = 4 +) + +var PostgresqlConfig10_TransactionIsolation_name = map[int32]string{ + 0: "TRANSACTION_ISOLATION_UNSPECIFIED", + 1: "TRANSACTION_ISOLATION_READ_UNCOMMITTED", + 2: "TRANSACTION_ISOLATION_READ_COMMITTED", + 3: "TRANSACTION_ISOLATION_REPEATABLE_READ", + 4: "TRANSACTION_ISOLATION_SERIALIZABLE", +} +var PostgresqlConfig10_TransactionIsolation_value = map[string]int32{ + "TRANSACTION_ISOLATION_UNSPECIFIED": 0, + "TRANSACTION_ISOLATION_READ_UNCOMMITTED": 1, + "TRANSACTION_ISOLATION_READ_COMMITTED": 2, + "TRANSACTION_ISOLATION_REPEATABLE_READ": 3, + "TRANSACTION_ISOLATION_SERIALIZABLE": 4, +} + +func (x PostgresqlConfig10_TransactionIsolation) String() string { + return proto.EnumName(PostgresqlConfig10_TransactionIsolation_name, int32(x)) +} +func (PostgresqlConfig10_TransactionIsolation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql10_fca60c3578566090, []int{0, 7} +} + +type PostgresqlConfig10_ByteaOutput int32 + +const ( + PostgresqlConfig10_BYTEA_OUTPUT_UNSPECIFIED PostgresqlConfig10_ByteaOutput = 0 + PostgresqlConfig10_BYTEA_OUTPUT_HEX PostgresqlConfig10_ByteaOutput = 1 + PostgresqlConfig10_BYTEA_OUTPUT_ESCAPED PostgresqlConfig10_ByteaOutput = 2 +) + +var PostgresqlConfig10_ByteaOutput_name = map[int32]string{ + 0: "BYTEA_OUTPUT_UNSPECIFIED", + 1: "BYTEA_OUTPUT_HEX", + 2: "BYTEA_OUTPUT_ESCAPED", +} +var PostgresqlConfig10_ByteaOutput_value = map[string]int32{ + "BYTEA_OUTPUT_UNSPECIFIED": 0, + "BYTEA_OUTPUT_HEX": 1, + "BYTEA_OUTPUT_ESCAPED": 2, +} + +func (x PostgresqlConfig10_ByteaOutput) String() string { + return proto.EnumName(PostgresqlConfig10_ByteaOutput_name, int32(x)) +} +func (PostgresqlConfig10_ByteaOutput) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql10_fca60c3578566090, []int{0, 8} +} + +type PostgresqlConfig10_XmlBinary int32 + +const ( + PostgresqlConfig10_XML_BINARY_UNSPECIFIED PostgresqlConfig10_XmlBinary = 0 + PostgresqlConfig10_XML_BINARY_BASE64 PostgresqlConfig10_XmlBinary = 1 + PostgresqlConfig10_XML_BINARY_HEX PostgresqlConfig10_XmlBinary = 2 +) + +var PostgresqlConfig10_XmlBinary_name = map[int32]string{ + 0: "XML_BINARY_UNSPECIFIED", + 1: "XML_BINARY_BASE64", + 2: "XML_BINARY_HEX", +} +var PostgresqlConfig10_XmlBinary_value = map[string]int32{ + "XML_BINARY_UNSPECIFIED": 0, + "XML_BINARY_BASE64": 1, + "XML_BINARY_HEX": 2, +} + +func (x PostgresqlConfig10_XmlBinary) String() string { + return proto.EnumName(PostgresqlConfig10_XmlBinary_name, int32(x)) +} +func (PostgresqlConfig10_XmlBinary) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql10_fca60c3578566090, []int{0, 9} +} + +type PostgresqlConfig10_XmlOption int32 + +const ( + PostgresqlConfig10_XML_OPTION_UNSPECIFIED PostgresqlConfig10_XmlOption = 0 + PostgresqlConfig10_XML_OPTION_DOCUMENT PostgresqlConfig10_XmlOption = 1 + PostgresqlConfig10_XML_OPTION_CONTENT PostgresqlConfig10_XmlOption = 2 +) + +var PostgresqlConfig10_XmlOption_name = map[int32]string{ + 0: "XML_OPTION_UNSPECIFIED", + 1: "XML_OPTION_DOCUMENT", + 2: "XML_OPTION_CONTENT", +} +var PostgresqlConfig10_XmlOption_value = map[string]int32{ + "XML_OPTION_UNSPECIFIED": 0, + "XML_OPTION_DOCUMENT": 1, + "XML_OPTION_CONTENT": 2, +} + +func (x PostgresqlConfig10_XmlOption) String() string { + return proto.EnumName(PostgresqlConfig10_XmlOption_name, int32(x)) +} +func (PostgresqlConfig10_XmlOption) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql10_fca60c3578566090, []int{0, 10} +} + +type PostgresqlConfig10_BackslashQuote int32 + +const ( + PostgresqlConfig10_BACKSLASH_QUOTE_UNSPECIFIED PostgresqlConfig10_BackslashQuote = 0 + PostgresqlConfig10_BACKSLASH_QUOTE PostgresqlConfig10_BackslashQuote = 1 + PostgresqlConfig10_BACKSLASH_QUOTE_ON PostgresqlConfig10_BackslashQuote = 2 + PostgresqlConfig10_BACKSLASH_QUOTE_OFF PostgresqlConfig10_BackslashQuote = 3 + PostgresqlConfig10_BACKSLASH_QUOTE_SAFE_ENCODING PostgresqlConfig10_BackslashQuote = 4 +) + +var PostgresqlConfig10_BackslashQuote_name = map[int32]string{ + 0: "BACKSLASH_QUOTE_UNSPECIFIED", + 1: "BACKSLASH_QUOTE", + 2: "BACKSLASH_QUOTE_ON", + 3: "BACKSLASH_QUOTE_OFF", + 4: "BACKSLASH_QUOTE_SAFE_ENCODING", +} +var PostgresqlConfig10_BackslashQuote_value = map[string]int32{ + "BACKSLASH_QUOTE_UNSPECIFIED": 0, + "BACKSLASH_QUOTE": 1, + "BACKSLASH_QUOTE_ON": 2, + "BACKSLASH_QUOTE_OFF": 3, + "BACKSLASH_QUOTE_SAFE_ENCODING": 4, +} + +func (x PostgresqlConfig10_BackslashQuote) String() string { + return proto.EnumName(PostgresqlConfig10_BackslashQuote_name, int32(x)) +} +func (PostgresqlConfig10_BackslashQuote) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql10_fca60c3578566090, []int{0, 11} +} + +// Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file +// parameters whose detailed description is available in +// [PostgreSQL documentation](https://www.postgresql.org/docs/10/runtime-config.html). +type PostgresqlConfig10 struct { + MaxConnections *wrappers.Int64Value `protobuf:"bytes,1,opt,name=max_connections,json=maxConnections,proto3" json:"max_connections,omitempty"` + SharedBuffers *wrappers.Int64Value `protobuf:"bytes,2,opt,name=shared_buffers,json=sharedBuffers,proto3" json:"shared_buffers,omitempty"` + TempBuffers *wrappers.Int64Value `protobuf:"bytes,3,opt,name=temp_buffers,json=tempBuffers,proto3" json:"temp_buffers,omitempty"` + MaxPreparedTransactions *wrappers.Int64Value `protobuf:"bytes,4,opt,name=max_prepared_transactions,json=maxPreparedTransactions,proto3" json:"max_prepared_transactions,omitempty"` + WorkMem *wrappers.Int64Value `protobuf:"bytes,5,opt,name=work_mem,json=workMem,proto3" json:"work_mem,omitempty"` + MaintenanceWorkMem *wrappers.Int64Value `protobuf:"bytes,6,opt,name=maintenance_work_mem,json=maintenanceWorkMem,proto3" json:"maintenance_work_mem,omitempty"` + ReplacementSortTuples *wrappers.Int64Value `protobuf:"bytes,7,opt,name=replacement_sort_tuples,json=replacementSortTuples,proto3" json:"replacement_sort_tuples,omitempty"` + AutovacuumWorkMem *wrappers.Int64Value `protobuf:"bytes,8,opt,name=autovacuum_work_mem,json=autovacuumWorkMem,proto3" json:"autovacuum_work_mem,omitempty"` + TempFileLimit *wrappers.Int64Value `protobuf:"bytes,9,opt,name=temp_file_limit,json=tempFileLimit,proto3" json:"temp_file_limit,omitempty"` + VacuumCostDelay *wrappers.Int64Value `protobuf:"bytes,10,opt,name=vacuum_cost_delay,json=vacuumCostDelay,proto3" json:"vacuum_cost_delay,omitempty"` + VacuumCostPageHit *wrappers.Int64Value `protobuf:"bytes,11,opt,name=vacuum_cost_page_hit,json=vacuumCostPageHit,proto3" json:"vacuum_cost_page_hit,omitempty"` + VacuumCostPageMiss *wrappers.Int64Value `protobuf:"bytes,12,opt,name=vacuum_cost_page_miss,json=vacuumCostPageMiss,proto3" json:"vacuum_cost_page_miss,omitempty"` + VacuumCostPageDirty *wrappers.Int64Value `protobuf:"bytes,13,opt,name=vacuum_cost_page_dirty,json=vacuumCostPageDirty,proto3" json:"vacuum_cost_page_dirty,omitempty"` + VacuumCostLimit *wrappers.Int64Value `protobuf:"bytes,14,opt,name=vacuum_cost_limit,json=vacuumCostLimit,proto3" json:"vacuum_cost_limit,omitempty"` + BgwriterDelay *wrappers.Int64Value `protobuf:"bytes,15,opt,name=bgwriter_delay,json=bgwriterDelay,proto3" json:"bgwriter_delay,omitempty"` + BgwriterLruMaxpages *wrappers.Int64Value `protobuf:"bytes,16,opt,name=bgwriter_lru_maxpages,json=bgwriterLruMaxpages,proto3" json:"bgwriter_lru_maxpages,omitempty"` + BgwriterLruMultiplier *wrappers.DoubleValue `protobuf:"bytes,17,opt,name=bgwriter_lru_multiplier,json=bgwriterLruMultiplier,proto3" json:"bgwriter_lru_multiplier,omitempty"` + BgwriterFlushAfter *wrappers.Int64Value `protobuf:"bytes,18,opt,name=bgwriter_flush_after,json=bgwriterFlushAfter,proto3" json:"bgwriter_flush_after,omitempty"` + BackendFlushAfter *wrappers.Int64Value `protobuf:"bytes,19,opt,name=backend_flush_after,json=backendFlushAfter,proto3" json:"backend_flush_after,omitempty"` + OldSnapshotThreshold *wrappers.Int64Value `protobuf:"bytes,20,opt,name=old_snapshot_threshold,json=oldSnapshotThreshold,proto3" json:"old_snapshot_threshold,omitempty"` + WalLevel PostgresqlConfig10_WalLevel `protobuf:"varint,21,opt,name=wal_level,json=walLevel,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_WalLevel" json:"wal_level,omitempty"` + SynchronousCommit PostgresqlConfig10_SynchronousCommit `protobuf:"varint,22,opt,name=synchronous_commit,json=synchronousCommit,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_SynchronousCommit" json:"synchronous_commit,omitempty"` + CheckpointTimeout *wrappers.Int64Value `protobuf:"bytes,23,opt,name=checkpoint_timeout,json=checkpointTimeout,proto3" json:"checkpoint_timeout,omitempty"` + CheckpointCompletionTarget *wrappers.DoubleValue `protobuf:"bytes,24,opt,name=checkpoint_completion_target,json=checkpointCompletionTarget,proto3" json:"checkpoint_completion_target,omitempty"` + CheckpointFlushAfter *wrappers.Int64Value `protobuf:"bytes,25,opt,name=checkpoint_flush_after,json=checkpointFlushAfter,proto3" json:"checkpoint_flush_after,omitempty"` + MaxWalSize *wrappers.Int64Value `protobuf:"bytes,26,opt,name=max_wal_size,json=maxWalSize,proto3" json:"max_wal_size,omitempty"` + MinWalSize *wrappers.Int64Value `protobuf:"bytes,27,opt,name=min_wal_size,json=minWalSize,proto3" json:"min_wal_size,omitempty"` + MaxStandbyStreamingDelay *wrappers.Int64Value `protobuf:"bytes,28,opt,name=max_standby_streaming_delay,json=maxStandbyStreamingDelay,proto3" json:"max_standby_streaming_delay,omitempty"` + DefaultStatisticsTarget *wrappers.Int64Value `protobuf:"bytes,29,opt,name=default_statistics_target,json=defaultStatisticsTarget,proto3" json:"default_statistics_target,omitempty"` + ConstraintExclusion PostgresqlConfig10_ConstraintExclusion `protobuf:"varint,30,opt,name=constraint_exclusion,json=constraintExclusion,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_ConstraintExclusion" json:"constraint_exclusion,omitempty"` + CursorTupleFraction *wrappers.DoubleValue `protobuf:"bytes,31,opt,name=cursor_tuple_fraction,json=cursorTupleFraction,proto3" json:"cursor_tuple_fraction,omitempty"` + FromCollapseLimit *wrappers.Int64Value `protobuf:"bytes,32,opt,name=from_collapse_limit,json=fromCollapseLimit,proto3" json:"from_collapse_limit,omitempty"` + JoinCollapseLimit *wrappers.Int64Value `protobuf:"bytes,33,opt,name=join_collapse_limit,json=joinCollapseLimit,proto3" json:"join_collapse_limit,omitempty"` + ForceParallelMode PostgresqlConfig10_ForceParallelMode `protobuf:"varint,34,opt,name=force_parallel_mode,json=forceParallelMode,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_ForceParallelMode" json:"force_parallel_mode,omitempty"` + ClientMinMessages PostgresqlConfig10_LogLevel `protobuf:"varint,35,opt,name=client_min_messages,json=clientMinMessages,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_LogLevel" json:"client_min_messages,omitempty"` + LogMinMessages PostgresqlConfig10_LogLevel `protobuf:"varint,36,opt,name=log_min_messages,json=logMinMessages,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_LogLevel" json:"log_min_messages,omitempty"` + LogMinErrorStatement PostgresqlConfig10_LogLevel `protobuf:"varint,37,opt,name=log_min_error_statement,json=logMinErrorStatement,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_LogLevel" json:"log_min_error_statement,omitempty"` + LogMinDurationStatement *wrappers.Int64Value `protobuf:"bytes,38,opt,name=log_min_duration_statement,json=logMinDurationStatement,proto3" json:"log_min_duration_statement,omitempty"` + LogCheckpoints *wrappers.BoolValue `protobuf:"bytes,39,opt,name=log_checkpoints,json=logCheckpoints,proto3" json:"log_checkpoints,omitempty"` + LogConnections *wrappers.BoolValue `protobuf:"bytes,40,opt,name=log_connections,json=logConnections,proto3" json:"log_connections,omitempty"` + LogDisconnections *wrappers.BoolValue `protobuf:"bytes,41,opt,name=log_disconnections,json=logDisconnections,proto3" json:"log_disconnections,omitempty"` + LogDuration *wrappers.BoolValue `protobuf:"bytes,42,opt,name=log_duration,json=logDuration,proto3" json:"log_duration,omitempty"` + LogErrorVerbosity PostgresqlConfig10_LogErrorVerbosity `protobuf:"varint,43,opt,name=log_error_verbosity,json=logErrorVerbosity,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_LogErrorVerbosity" json:"log_error_verbosity,omitempty"` + LogLockWaits *wrappers.BoolValue `protobuf:"bytes,44,opt,name=log_lock_waits,json=logLockWaits,proto3" json:"log_lock_waits,omitempty"` + LogStatement PostgresqlConfig10_LogStatement `protobuf:"varint,45,opt,name=log_statement,json=logStatement,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_LogStatement" json:"log_statement,omitempty"` + LogTempFiles *wrappers.Int64Value `protobuf:"bytes,46,opt,name=log_temp_files,json=logTempFiles,proto3" json:"log_temp_files,omitempty"` + SearchPath string `protobuf:"bytes,47,opt,name=search_path,json=searchPath,proto3" json:"search_path,omitempty"` + RowSecurity *wrappers.BoolValue `protobuf:"bytes,48,opt,name=row_security,json=rowSecurity,proto3" json:"row_security,omitempty"` + DefaultTransactionIsolation PostgresqlConfig10_TransactionIsolation `protobuf:"varint,49,opt,name=default_transaction_isolation,json=defaultTransactionIsolation,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_TransactionIsolation" json:"default_transaction_isolation,omitempty"` + StatementTimeout *wrappers.Int64Value `protobuf:"bytes,50,opt,name=statement_timeout,json=statementTimeout,proto3" json:"statement_timeout,omitempty"` + LockTimeout *wrappers.Int64Value `protobuf:"bytes,51,opt,name=lock_timeout,json=lockTimeout,proto3" json:"lock_timeout,omitempty"` + IdleInTransactionSessionTimeout *wrappers.Int64Value `protobuf:"bytes,52,opt,name=idle_in_transaction_session_timeout,json=idleInTransactionSessionTimeout,proto3" json:"idle_in_transaction_session_timeout,omitempty"` + ByteaOutput PostgresqlConfig10_ByteaOutput `protobuf:"varint,53,opt,name=bytea_output,json=byteaOutput,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_ByteaOutput" json:"bytea_output,omitempty"` + Xmlbinary PostgresqlConfig10_XmlBinary `protobuf:"varint,54,opt,name=xmlbinary,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_XmlBinary" json:"xmlbinary,omitempty"` + Xmloption PostgresqlConfig10_XmlOption `protobuf:"varint,55,opt,name=xmloption,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_XmlOption" json:"xmloption,omitempty"` + GinPendingListLimit *wrappers.Int64Value `protobuf:"bytes,56,opt,name=gin_pending_list_limit,json=ginPendingListLimit,proto3" json:"gin_pending_list_limit,omitempty"` + DeadlockTimeout *wrappers.Int64Value `protobuf:"bytes,57,opt,name=deadlock_timeout,json=deadlockTimeout,proto3" json:"deadlock_timeout,omitempty"` + MaxLocksPerTransaction *wrappers.Int64Value `protobuf:"bytes,58,opt,name=max_locks_per_transaction,json=maxLocksPerTransaction,proto3" json:"max_locks_per_transaction,omitempty"` + MaxPredLocksPerTransaction *wrappers.Int64Value `protobuf:"bytes,59,opt,name=max_pred_locks_per_transaction,json=maxPredLocksPerTransaction,proto3" json:"max_pred_locks_per_transaction,omitempty"` + ArrayNulls *wrappers.BoolValue `protobuf:"bytes,60,opt,name=array_nulls,json=arrayNulls,proto3" json:"array_nulls,omitempty"` + BackslashQuote PostgresqlConfig10_BackslashQuote `protobuf:"varint,61,opt,name=backslash_quote,json=backslashQuote,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_BackslashQuote" json:"backslash_quote,omitempty"` + DefaultWithOids *wrappers.BoolValue `protobuf:"bytes,62,opt,name=default_with_oids,json=defaultWithOids,proto3" json:"default_with_oids,omitempty"` + EscapeStringWarning *wrappers.BoolValue `protobuf:"bytes,63,opt,name=escape_string_warning,json=escapeStringWarning,proto3" json:"escape_string_warning,omitempty"` + LoCompatPrivileges *wrappers.BoolValue `protobuf:"bytes,64,opt,name=lo_compat_privileges,json=loCompatPrivileges,proto3" json:"lo_compat_privileges,omitempty"` + OperatorPrecedenceWarning *wrappers.BoolValue `protobuf:"bytes,65,opt,name=operator_precedence_warning,json=operatorPrecedenceWarning,proto3" json:"operator_precedence_warning,omitempty"` + QuoteAllIdentifiers *wrappers.BoolValue `protobuf:"bytes,66,opt,name=quote_all_identifiers,json=quoteAllIdentifiers,proto3" json:"quote_all_identifiers,omitempty"` + StandardConformingStrings *wrappers.BoolValue `protobuf:"bytes,67,opt,name=standard_conforming_strings,json=standardConformingStrings,proto3" json:"standard_conforming_strings,omitempty"` + SynchronizeSeqscans *wrappers.BoolValue `protobuf:"bytes,68,opt,name=synchronize_seqscans,json=synchronizeSeqscans,proto3" json:"synchronize_seqscans,omitempty"` + TransformNullEquals *wrappers.BoolValue `protobuf:"bytes,69,opt,name=transform_null_equals,json=transformNullEquals,proto3" json:"transform_null_equals,omitempty"` + ExitOnError *wrappers.BoolValue `protobuf:"bytes,70,opt,name=exit_on_error,json=exitOnError,proto3" json:"exit_on_error,omitempty"` + SeqPageCost *wrappers.DoubleValue `protobuf:"bytes,71,opt,name=seq_page_cost,json=seqPageCost,proto3" json:"seq_page_cost,omitempty"` + RandomPageCost *wrappers.DoubleValue `protobuf:"bytes,72,opt,name=random_page_cost,json=randomPageCost,proto3" json:"random_page_cost,omitempty"` + AutovacuumMaxWorkers *wrappers.Int64Value `protobuf:"bytes,73,opt,name=autovacuum_max_workers,json=autovacuumMaxWorkers,proto3" json:"autovacuum_max_workers,omitempty"` + AutovacuumVacuumCostDelay *wrappers.Int64Value `protobuf:"bytes,74,opt,name=autovacuum_vacuum_cost_delay,json=autovacuumVacuumCostDelay,proto3" json:"autovacuum_vacuum_cost_delay,omitempty"` + AutovacuumVacuumCostLimit *wrappers.Int64Value `protobuf:"bytes,75,opt,name=autovacuum_vacuum_cost_limit,json=autovacuumVacuumCostLimit,proto3" json:"autovacuum_vacuum_cost_limit,omitempty"` + AutovacuumNaptime *wrappers.Int64Value `protobuf:"bytes,76,opt,name=autovacuum_naptime,json=autovacuumNaptime,proto3" json:"autovacuum_naptime,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PostgresqlConfig10) Reset() { *m = PostgresqlConfig10{} } +func (m *PostgresqlConfig10) String() string { return proto.CompactTextString(m) } +func (*PostgresqlConfig10) ProtoMessage() {} +func (*PostgresqlConfig10) Descriptor() ([]byte, []int) { + return fileDescriptor_postgresql10_fca60c3578566090, []int{0} +} +func (m *PostgresqlConfig10) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PostgresqlConfig10.Unmarshal(m, b) +} +func (m *PostgresqlConfig10) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PostgresqlConfig10.Marshal(b, m, deterministic) +} +func (dst *PostgresqlConfig10) XXX_Merge(src proto.Message) { + xxx_messageInfo_PostgresqlConfig10.Merge(dst, src) +} +func (m *PostgresqlConfig10) XXX_Size() int { + return xxx_messageInfo_PostgresqlConfig10.Size(m) +} +func (m *PostgresqlConfig10) XXX_DiscardUnknown() { + xxx_messageInfo_PostgresqlConfig10.DiscardUnknown(m) +} + +var xxx_messageInfo_PostgresqlConfig10 proto.InternalMessageInfo + +func (m *PostgresqlConfig10) GetMaxConnections() *wrappers.Int64Value { + if m != nil { + return m.MaxConnections + } + return nil +} + +func (m *PostgresqlConfig10) GetSharedBuffers() *wrappers.Int64Value { + if m != nil { + return m.SharedBuffers + } + return nil +} + +func (m *PostgresqlConfig10) GetTempBuffers() *wrappers.Int64Value { + if m != nil { + return m.TempBuffers + } + return nil +} + +func (m *PostgresqlConfig10) GetMaxPreparedTransactions() *wrappers.Int64Value { + if m != nil { + return m.MaxPreparedTransactions + } + return nil +} + +func (m *PostgresqlConfig10) GetWorkMem() *wrappers.Int64Value { + if m != nil { + return m.WorkMem + } + return nil +} + +func (m *PostgresqlConfig10) GetMaintenanceWorkMem() *wrappers.Int64Value { + if m != nil { + return m.MaintenanceWorkMem + } + return nil +} + +func (m *PostgresqlConfig10) GetReplacementSortTuples() *wrappers.Int64Value { + if m != nil { + return m.ReplacementSortTuples + } + return nil +} + +func (m *PostgresqlConfig10) GetAutovacuumWorkMem() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumWorkMem + } + return nil +} + +func (m *PostgresqlConfig10) GetTempFileLimit() *wrappers.Int64Value { + if m != nil { + return m.TempFileLimit + } + return nil +} + +func (m *PostgresqlConfig10) GetVacuumCostDelay() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostDelay + } + return nil +} + +func (m *PostgresqlConfig10) GetVacuumCostPageHit() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostPageHit + } + return nil +} + +func (m *PostgresqlConfig10) GetVacuumCostPageMiss() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostPageMiss + } + return nil +} + +func (m *PostgresqlConfig10) GetVacuumCostPageDirty() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostPageDirty + } + return nil +} + +func (m *PostgresqlConfig10) GetVacuumCostLimit() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostLimit + } + return nil +} + +func (m *PostgresqlConfig10) GetBgwriterDelay() *wrappers.Int64Value { + if m != nil { + return m.BgwriterDelay + } + return nil +} + +func (m *PostgresqlConfig10) GetBgwriterLruMaxpages() *wrappers.Int64Value { + if m != nil { + return m.BgwriterLruMaxpages + } + return nil +} + +func (m *PostgresqlConfig10) GetBgwriterLruMultiplier() *wrappers.DoubleValue { + if m != nil { + return m.BgwriterLruMultiplier + } + return nil +} + +func (m *PostgresqlConfig10) GetBgwriterFlushAfter() *wrappers.Int64Value { + if m != nil { + return m.BgwriterFlushAfter + } + return nil +} + +func (m *PostgresqlConfig10) GetBackendFlushAfter() *wrappers.Int64Value { + if m != nil { + return m.BackendFlushAfter + } + return nil +} + +func (m *PostgresqlConfig10) GetOldSnapshotThreshold() *wrappers.Int64Value { + if m != nil { + return m.OldSnapshotThreshold + } + return nil +} + +func (m *PostgresqlConfig10) GetWalLevel() PostgresqlConfig10_WalLevel { + if m != nil { + return m.WalLevel + } + return PostgresqlConfig10_WAL_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlConfig10) GetSynchronousCommit() PostgresqlConfig10_SynchronousCommit { + if m != nil { + return m.SynchronousCommit + } + return PostgresqlConfig10_SYNCHRONOUS_COMMIT_UNSPECIFIED +} + +func (m *PostgresqlConfig10) GetCheckpointTimeout() *wrappers.Int64Value { + if m != nil { + return m.CheckpointTimeout + } + return nil +} + +func (m *PostgresqlConfig10) GetCheckpointCompletionTarget() *wrappers.DoubleValue { + if m != nil { + return m.CheckpointCompletionTarget + } + return nil +} + +func (m *PostgresqlConfig10) GetCheckpointFlushAfter() *wrappers.Int64Value { + if m != nil { + return m.CheckpointFlushAfter + } + return nil +} + +func (m *PostgresqlConfig10) GetMaxWalSize() *wrappers.Int64Value { + if m != nil { + return m.MaxWalSize + } + return nil +} + +func (m *PostgresqlConfig10) GetMinWalSize() *wrappers.Int64Value { + if m != nil { + return m.MinWalSize + } + return nil +} + +func (m *PostgresqlConfig10) GetMaxStandbyStreamingDelay() *wrappers.Int64Value { + if m != nil { + return m.MaxStandbyStreamingDelay + } + return nil +} + +func (m *PostgresqlConfig10) GetDefaultStatisticsTarget() *wrappers.Int64Value { + if m != nil { + return m.DefaultStatisticsTarget + } + return nil +} + +func (m *PostgresqlConfig10) GetConstraintExclusion() PostgresqlConfig10_ConstraintExclusion { + if m != nil { + return m.ConstraintExclusion + } + return PostgresqlConfig10_CONSTRAINT_EXCLUSION_UNSPECIFIED +} + +func (m *PostgresqlConfig10) GetCursorTupleFraction() *wrappers.DoubleValue { + if m != nil { + return m.CursorTupleFraction + } + return nil +} + +func (m *PostgresqlConfig10) GetFromCollapseLimit() *wrappers.Int64Value { + if m != nil { + return m.FromCollapseLimit + } + return nil +} + +func (m *PostgresqlConfig10) GetJoinCollapseLimit() *wrappers.Int64Value { + if m != nil { + return m.JoinCollapseLimit + } + return nil +} + +func (m *PostgresqlConfig10) GetForceParallelMode() PostgresqlConfig10_ForceParallelMode { + if m != nil { + return m.ForceParallelMode + } + return PostgresqlConfig10_FORCE_PARALLEL_MODE_UNSPECIFIED +} + +func (m *PostgresqlConfig10) GetClientMinMessages() PostgresqlConfig10_LogLevel { + if m != nil { + return m.ClientMinMessages + } + return PostgresqlConfig10_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlConfig10) GetLogMinMessages() PostgresqlConfig10_LogLevel { + if m != nil { + return m.LogMinMessages + } + return PostgresqlConfig10_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlConfig10) GetLogMinErrorStatement() PostgresqlConfig10_LogLevel { + if m != nil { + return m.LogMinErrorStatement + } + return PostgresqlConfig10_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlConfig10) GetLogMinDurationStatement() *wrappers.Int64Value { + if m != nil { + return m.LogMinDurationStatement + } + return nil +} + +func (m *PostgresqlConfig10) GetLogCheckpoints() *wrappers.BoolValue { + if m != nil { + return m.LogCheckpoints + } + return nil +} + +func (m *PostgresqlConfig10) GetLogConnections() *wrappers.BoolValue { + if m != nil { + return m.LogConnections + } + return nil +} + +func (m *PostgresqlConfig10) GetLogDisconnections() *wrappers.BoolValue { + if m != nil { + return m.LogDisconnections + } + return nil +} + +func (m *PostgresqlConfig10) GetLogDuration() *wrappers.BoolValue { + if m != nil { + return m.LogDuration + } + return nil +} + +func (m *PostgresqlConfig10) GetLogErrorVerbosity() PostgresqlConfig10_LogErrorVerbosity { + if m != nil { + return m.LogErrorVerbosity + } + return PostgresqlConfig10_LOG_ERROR_VERBOSITY_UNSPECIFIED +} + +func (m *PostgresqlConfig10) GetLogLockWaits() *wrappers.BoolValue { + if m != nil { + return m.LogLockWaits + } + return nil +} + +func (m *PostgresqlConfig10) GetLogStatement() PostgresqlConfig10_LogStatement { + if m != nil { + return m.LogStatement + } + return PostgresqlConfig10_LOG_STATEMENT_UNSPECIFIED +} + +func (m *PostgresqlConfig10) GetLogTempFiles() *wrappers.Int64Value { + if m != nil { + return m.LogTempFiles + } + return nil +} + +func (m *PostgresqlConfig10) GetSearchPath() string { + if m != nil { + return m.SearchPath + } + return "" +} + +func (m *PostgresqlConfig10) GetRowSecurity() *wrappers.BoolValue { + if m != nil { + return m.RowSecurity + } + return nil +} + +func (m *PostgresqlConfig10) GetDefaultTransactionIsolation() PostgresqlConfig10_TransactionIsolation { + if m != nil { + return m.DefaultTransactionIsolation + } + return PostgresqlConfig10_TRANSACTION_ISOLATION_UNSPECIFIED +} + +func (m *PostgresqlConfig10) GetStatementTimeout() *wrappers.Int64Value { + if m != nil { + return m.StatementTimeout + } + return nil +} + +func (m *PostgresqlConfig10) GetLockTimeout() *wrappers.Int64Value { + if m != nil { + return m.LockTimeout + } + return nil +} + +func (m *PostgresqlConfig10) GetIdleInTransactionSessionTimeout() *wrappers.Int64Value { + if m != nil { + return m.IdleInTransactionSessionTimeout + } + return nil +} + +func (m *PostgresqlConfig10) GetByteaOutput() PostgresqlConfig10_ByteaOutput { + if m != nil { + return m.ByteaOutput + } + return PostgresqlConfig10_BYTEA_OUTPUT_UNSPECIFIED +} + +func (m *PostgresqlConfig10) GetXmlbinary() PostgresqlConfig10_XmlBinary { + if m != nil { + return m.Xmlbinary + } + return PostgresqlConfig10_XML_BINARY_UNSPECIFIED +} + +func (m *PostgresqlConfig10) GetXmloption() PostgresqlConfig10_XmlOption { + if m != nil { + return m.Xmloption + } + return PostgresqlConfig10_XML_OPTION_UNSPECIFIED +} + +func (m *PostgresqlConfig10) GetGinPendingListLimit() *wrappers.Int64Value { + if m != nil { + return m.GinPendingListLimit + } + return nil +} + +func (m *PostgresqlConfig10) GetDeadlockTimeout() *wrappers.Int64Value { + if m != nil { + return m.DeadlockTimeout + } + return nil +} + +func (m *PostgresqlConfig10) GetMaxLocksPerTransaction() *wrappers.Int64Value { + if m != nil { + return m.MaxLocksPerTransaction + } + return nil +} + +func (m *PostgresqlConfig10) GetMaxPredLocksPerTransaction() *wrappers.Int64Value { + if m != nil { + return m.MaxPredLocksPerTransaction + } + return nil +} + +func (m *PostgresqlConfig10) GetArrayNulls() *wrappers.BoolValue { + if m != nil { + return m.ArrayNulls + } + return nil +} + +func (m *PostgresqlConfig10) GetBackslashQuote() PostgresqlConfig10_BackslashQuote { + if m != nil { + return m.BackslashQuote + } + return PostgresqlConfig10_BACKSLASH_QUOTE_UNSPECIFIED +} + +func (m *PostgresqlConfig10) GetDefaultWithOids() *wrappers.BoolValue { + if m != nil { + return m.DefaultWithOids + } + return nil +} + +func (m *PostgresqlConfig10) GetEscapeStringWarning() *wrappers.BoolValue { + if m != nil { + return m.EscapeStringWarning + } + return nil +} + +func (m *PostgresqlConfig10) GetLoCompatPrivileges() *wrappers.BoolValue { + if m != nil { + return m.LoCompatPrivileges + } + return nil +} + +func (m *PostgresqlConfig10) GetOperatorPrecedenceWarning() *wrappers.BoolValue { + if m != nil { + return m.OperatorPrecedenceWarning + } + return nil +} + +func (m *PostgresqlConfig10) GetQuoteAllIdentifiers() *wrappers.BoolValue { + if m != nil { + return m.QuoteAllIdentifiers + } + return nil +} + +func (m *PostgresqlConfig10) GetStandardConformingStrings() *wrappers.BoolValue { + if m != nil { + return m.StandardConformingStrings + } + return nil +} + +func (m *PostgresqlConfig10) GetSynchronizeSeqscans() *wrappers.BoolValue { + if m != nil { + return m.SynchronizeSeqscans + } + return nil +} + +func (m *PostgresqlConfig10) GetTransformNullEquals() *wrappers.BoolValue { + if m != nil { + return m.TransformNullEquals + } + return nil +} + +func (m *PostgresqlConfig10) GetExitOnError() *wrappers.BoolValue { + if m != nil { + return m.ExitOnError + } + return nil +} + +func (m *PostgresqlConfig10) GetSeqPageCost() *wrappers.DoubleValue { + if m != nil { + return m.SeqPageCost + } + return nil +} + +func (m *PostgresqlConfig10) GetRandomPageCost() *wrappers.DoubleValue { + if m != nil { + return m.RandomPageCost + } + return nil +} + +func (m *PostgresqlConfig10) GetAutovacuumMaxWorkers() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumMaxWorkers + } + return nil +} + +func (m *PostgresqlConfig10) GetAutovacuumVacuumCostDelay() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumVacuumCostDelay + } + return nil +} + +func (m *PostgresqlConfig10) GetAutovacuumVacuumCostLimit() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumVacuumCostLimit + } + return nil +} + +func (m *PostgresqlConfig10) GetAutovacuumNaptime() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumNaptime + } + return nil +} + +type PostgresqlConfigSet10 struct { + // Effective settings for a PostgreSQL 10 cluster (a combination of settings defined + // in [user_config] and [default_config]). + EffectiveConfig *PostgresqlConfig10 `protobuf:"bytes,1,opt,name=effective_config,json=effectiveConfig,proto3" json:"effective_config,omitempty"` + // User-defined settings for a PostgreSQL 10 cluster. + UserConfig *PostgresqlConfig10 `protobuf:"bytes,2,opt,name=user_config,json=userConfig,proto3" json:"user_config,omitempty"` + // Default configuration for a PostgreSQL 10 cluster. + DefaultConfig *PostgresqlConfig10 `protobuf:"bytes,3,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PostgresqlConfigSet10) Reset() { *m = PostgresqlConfigSet10{} } +func (m *PostgresqlConfigSet10) String() string { return proto.CompactTextString(m) } +func (*PostgresqlConfigSet10) ProtoMessage() {} +func (*PostgresqlConfigSet10) Descriptor() ([]byte, []int) { + return fileDescriptor_postgresql10_fca60c3578566090, []int{1} +} +func (m *PostgresqlConfigSet10) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PostgresqlConfigSet10.Unmarshal(m, b) +} +func (m *PostgresqlConfigSet10) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PostgresqlConfigSet10.Marshal(b, m, deterministic) +} +func (dst *PostgresqlConfigSet10) XXX_Merge(src proto.Message) { + xxx_messageInfo_PostgresqlConfigSet10.Merge(dst, src) +} +func (m *PostgresqlConfigSet10) XXX_Size() int { + return xxx_messageInfo_PostgresqlConfigSet10.Size(m) +} +func (m *PostgresqlConfigSet10) XXX_DiscardUnknown() { + xxx_messageInfo_PostgresqlConfigSet10.DiscardUnknown(m) +} + +var xxx_messageInfo_PostgresqlConfigSet10 proto.InternalMessageInfo + +func (m *PostgresqlConfigSet10) GetEffectiveConfig() *PostgresqlConfig10 { + if m != nil { + return m.EffectiveConfig + } + return nil +} + +func (m *PostgresqlConfigSet10) GetUserConfig() *PostgresqlConfig10 { + if m != nil { + return m.UserConfig + } + return nil +} + +func (m *PostgresqlConfigSet10) GetDefaultConfig() *PostgresqlConfig10 { + if m != nil { + return m.DefaultConfig + } + return nil +} + +func init() { + proto.RegisterType((*PostgresqlConfig10)(nil), "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10") + proto.RegisterType((*PostgresqlConfigSet10)(nil), "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet10") + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_WalLevel", PostgresqlConfig10_WalLevel_name, PostgresqlConfig10_WalLevel_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_SynchronousCommit", PostgresqlConfig10_SynchronousCommit_name, PostgresqlConfig10_SynchronousCommit_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_ConstraintExclusion", PostgresqlConfig10_ConstraintExclusion_name, PostgresqlConfig10_ConstraintExclusion_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_ForceParallelMode", PostgresqlConfig10_ForceParallelMode_name, PostgresqlConfig10_ForceParallelMode_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_LogLevel", PostgresqlConfig10_LogLevel_name, PostgresqlConfig10_LogLevel_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_LogErrorVerbosity", PostgresqlConfig10_LogErrorVerbosity_name, PostgresqlConfig10_LogErrorVerbosity_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_LogStatement", PostgresqlConfig10_LogStatement_name, PostgresqlConfig10_LogStatement_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_TransactionIsolation", PostgresqlConfig10_TransactionIsolation_name, PostgresqlConfig10_TransactionIsolation_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_ByteaOutput", PostgresqlConfig10_ByteaOutput_name, PostgresqlConfig10_ByteaOutput_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_XmlBinary", PostgresqlConfig10_XmlBinary_name, PostgresqlConfig10_XmlBinary_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_XmlOption", PostgresqlConfig10_XmlOption_name, PostgresqlConfig10_XmlOption_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig10_BackslashQuote", PostgresqlConfig10_BackslashQuote_name, PostgresqlConfig10_BackslashQuote_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/config/postgresql10.proto", fileDescriptor_postgresql10_fca60c3578566090) +} + +var fileDescriptor_postgresql10_fca60c3578566090 = []byte{ + // 2893 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x9a, 0xdb, 0x7a, 0xdb, 0x36, + 0xb6, 0xc7, 0xb7, 0x6c, 0x37, 0x75, 0xe0, 0xd8, 0xa6, 0x20, 0x1f, 0x68, 0xe7, 0x58, 0xb5, 0xe9, + 0x4e, 0xbb, 0xb7, 0x0f, 0x4a, 0xdc, 0x34, 0x9d, 0x4e, 0x33, 0xa5, 0x28, 0xca, 0xd1, 0x84, 0x12, + 0x55, 0x92, 0x8e, 0x93, 0xf4, 0x6b, 0x39, 0x10, 0x09, 0x49, 0xac, 0x29, 0x42, 0x21, 0x28, 0x1f, + 0x32, 0x37, 0x73, 0x3d, 0x97, 0xbd, 0x9b, 0x79, 0x9b, 0xb9, 0xf2, 0x1b, 0xcc, 0x23, 0xf4, 0x9b, + 0x67, 0xc8, 0xd5, 0x7c, 0xe0, 0x41, 0xa4, 0x0e, 0x29, 0xd5, 0xa4, 0x77, 0xf6, 0x02, 0xfe, 0xbf, + 0x05, 0x60, 0x2d, 0x80, 0x58, 0xf8, 0x04, 0x1e, 0x5d, 0x20, 0xd7, 0xc2, 0xe7, 0x7b, 0xa6, 0x43, + 0x06, 0xd6, 0x5e, 0xcf, 0x6a, 0xed, 0xf5, 0x09, 0xf5, 0x3b, 0x1e, 0xa6, 0xaf, 0x9c, 0xbd, 0xd3, + 0xd2, 0x9e, 0x49, 0xdc, 0xb6, 0xdd, 0x49, 0x19, 0x4b, 0xfb, 0xbb, 0x7d, 0x8f, 0xf8, 0x04, 0xde, + 0x0d, 0x95, 0xbb, 0x81, 0x72, 0xb7, 0x67, 0xb5, 0x76, 0x93, 0x4e, 0xbb, 0xa7, 0xa5, 0xdd, 0x50, + 0xb9, 0x7d, 0xab, 0x43, 0x48, 0xc7, 0xc1, 0x7b, 0x81, 0xa8, 0x35, 0x68, 0xef, 0x9d, 0x79, 0xa8, + 0xdf, 0xc7, 0x1e, 0x0d, 0x31, 0xdb, 0x37, 0x47, 0x06, 0x70, 0x8a, 0x1c, 0xdb, 0x42, 0xbe, 0x4d, + 0xdc, 0xb0, 0xb9, 0xf8, 0x8b, 0x00, 0x60, 0x73, 0xc8, 0x15, 0x03, 0x66, 0x69, 0x1f, 0x56, 0xc0, + 0x6a, 0x0f, 0x9d, 0x1b, 0x26, 0x71, 0x5d, 0x6c, 0xb2, 0xee, 0x94, 0xcf, 0xdd, 0xc9, 0xdd, 0x5b, + 0xba, 0x7f, 0x7d, 0x37, 0xf4, 0xb7, 0x1b, 0xfb, 0xdb, 0xad, 0xb9, 0xfe, 0xc3, 0x83, 0x67, 0xc8, + 0x19, 0x60, 0x75, 0xa5, 0x87, 0xce, 0xc5, 0x44, 0x02, 0xcb, 0x60, 0x85, 0x76, 0x91, 0x87, 0x2d, + 0xa3, 0x35, 0x68, 0xb7, 0xb1, 0x47, 0xf9, 0xb9, 0x6c, 0xc8, 0x72, 0x28, 0x29, 0x87, 0x0a, 0xf8, + 0x18, 0x5c, 0xf3, 0x71, 0xaf, 0x3f, 0x24, 0xcc, 0x67, 0x13, 0x96, 0x98, 0x20, 0xd6, 0x1f, 0x83, + 0x2d, 0x36, 0x93, 0xbe, 0x87, 0xfb, 0xc1, 0x48, 0x7c, 0x0f, 0xb9, 0x14, 0x45, 0x73, 0x5a, 0xc8, + 0x86, 0x6d, 0xf6, 0xd0, 0x79, 0x33, 0x12, 0xeb, 0x29, 0x2d, 0x7c, 0x08, 0x16, 0xcf, 0x88, 0x77, + 0x62, 0xf4, 0x70, 0x8f, 0xff, 0x20, 0x9b, 0xf3, 0x21, 0xeb, 0x5c, 0xc7, 0x3d, 0x58, 0x07, 0x6b, + 0x3d, 0x64, 0xbb, 0x3e, 0x76, 0x91, 0x6b, 0x62, 0x63, 0xc8, 0xb8, 0x92, 0xcd, 0x80, 0x29, 0xe1, + 0x71, 0x84, 0xd3, 0xc0, 0xa6, 0x87, 0xfb, 0x0e, 0x32, 0x71, 0x0f, 0xbb, 0xbe, 0x41, 0x89, 0xe7, + 0x1b, 0xfe, 0xa0, 0xef, 0x60, 0xca, 0x7f, 0x98, 0x4d, 0x5c, 0x4f, 0x69, 0x35, 0xe2, 0xf9, 0x7a, + 0xa0, 0x84, 0x4f, 0x41, 0x01, 0x0d, 0x7c, 0x72, 0x8a, 0xcc, 0xc1, 0xa0, 0x97, 0x0c, 0x71, 0x31, + 0x1b, 0x98, 0x4f, 0x74, 0xf1, 0x08, 0x45, 0xb0, 0x1a, 0x44, 0xb0, 0x6d, 0x3b, 0xd8, 0x70, 0xec, + 0x9e, 0xed, 0xf3, 0x57, 0x67, 0x48, 0x03, 0xa6, 0xa9, 0xda, 0x0e, 0x96, 0x99, 0x02, 0x1e, 0x82, + 0x7c, 0x34, 0x1a, 0x93, 0x50, 0xdf, 0xb0, 0xb0, 0x83, 0x2e, 0x78, 0x90, 0x8d, 0x59, 0x0d, 0x55, + 0x22, 0xa1, 0x7e, 0x85, 0x69, 0xa0, 0x0c, 0xd6, 0xd2, 0xa0, 0x3e, 0xea, 0x60, 0xa3, 0x6b, 0xfb, + 0xfc, 0xd2, 0x0c, 0x73, 0x4b, 0x58, 0x4d, 0xd4, 0xc1, 0x4f, 0x6c, 0x1f, 0x36, 0xc0, 0xfa, 0x04, + 0xad, 0x67, 0x53, 0xca, 0x5f, 0x9b, 0x21, 0x9a, 0xa3, 0xb8, 0xba, 0x4d, 0x29, 0x6c, 0x82, 0x8d, + 0x09, 0x9e, 0x65, 0x7b, 0xfe, 0x05, 0xbf, 0x9c, 0x0d, 0x2c, 0x8c, 0x02, 0x2b, 0x4c, 0x37, 0xbe, + 0x70, 0xe1, 0xfa, 0xaf, 0xfc, 0xa6, 0x85, 0x0b, 0x23, 0xd0, 0x04, 0x2b, 0xad, 0xce, 0x99, 0x67, + 0xfb, 0xd8, 0x8b, 0x96, 0x7f, 0x35, 0x93, 0x52, 0xbe, 0xf6, 0xe6, 0xb2, 0xb4, 0x58, 0xda, 0xdf, + 0x29, 0xed, 0xef, 0xef, 0xef, 0xab, 0xcb, 0x31, 0x20, 0x0c, 0x85, 0x02, 0xd6, 0x87, 0x44, 0xc7, + 0x1b, 0x18, 0x3d, 0x74, 0xce, 0x26, 0x4c, 0x79, 0x6e, 0x86, 0xb9, 0xc6, 0x4a, 0xd9, 0x1b, 0xd4, + 0x23, 0x1d, 0xd4, 0xc1, 0xe6, 0x28, 0x70, 0xe0, 0xf8, 0x76, 0xdf, 0xb1, 0xb1, 0xc7, 0xe7, 0x03, + 0xe4, 0x8d, 0x09, 0x64, 0x85, 0x0c, 0x5a, 0x0e, 0x8e, 0x36, 0x43, 0x9a, 0x39, 0x94, 0xc2, 0x17, + 0x60, 0x6d, 0x48, 0x6d, 0x3b, 0x03, 0xda, 0x35, 0x50, 0xdb, 0xc7, 0x1e, 0x0f, 0xb3, 0xa7, 0x0f, + 0xde, 0x5c, 0x96, 0xae, 0xec, 0xef, 0xdc, 0xdf, 0x3f, 0x78, 0xa4, 0xc2, 0x18, 0x52, 0x65, 0x0c, + 0x81, 0x21, 0xe0, 0x31, 0x28, 0xb4, 0x90, 0x79, 0x82, 0x5d, 0x6b, 0x84, 0x5c, 0xf8, 0x6d, 0xe4, + 0x7c, 0xc4, 0x48, 0x81, 0x5b, 0x60, 0x83, 0x38, 0x96, 0x41, 0x5d, 0xd4, 0xa7, 0x5d, 0xe2, 0x1b, + 0x7e, 0xd7, 0xc3, 0xb4, 0x4b, 0x1c, 0x8b, 0x5f, 0xcb, 0x66, 0xaf, 0xbe, 0xb9, 0x2c, 0x2d, 0xed, + 0x94, 0x76, 0x1e, 0x3d, 0x3c, 0xd8, 0x0f, 0xe2, 0xb6, 0x46, 0x1c, 0x4b, 0x8b, 0x50, 0x7a, 0x4c, + 0x82, 0x06, 0xb8, 0x7a, 0x86, 0x1c, 0xc3, 0xc1, 0xa7, 0xd8, 0xe1, 0xd7, 0xef, 0xe4, 0xee, 0xad, + 0xdc, 0x2f, 0xef, 0xce, 0xf4, 0xd1, 0xda, 0x9d, 0xfc, 0xe2, 0xec, 0x1e, 0x23, 0x47, 0x66, 0x24, + 0x75, 0xf1, 0x2c, 0xfa, 0x0b, 0xbe, 0x06, 0x90, 0x5e, 0xb8, 0x66, 0xd7, 0x23, 0x2e, 0x19, 0x50, + 0xc3, 0x24, 0x3d, 0x96, 0xbb, 0x1b, 0x81, 0xa7, 0xa7, 0xef, 0xee, 0x49, 0x4b, 0x98, 0x62, 0x80, + 0x54, 0xf3, 0x74, 0xdc, 0x04, 0x7f, 0x04, 0xd0, 0xec, 0x62, 0xf3, 0xa4, 0x4f, 0x6c, 0xd7, 0x37, + 0x7c, 0xbb, 0x87, 0xc9, 0xc0, 0xe7, 0x37, 0xb3, 0x17, 0x0f, 0xbe, 0xb9, 0x2c, 0xad, 0x3c, 0x60, + 0xcb, 0x96, 0xac, 0x5f, 0x3e, 0x41, 0xe9, 0x21, 0x09, 0xfe, 0x08, 0x6e, 0xa4, 0xf8, 0x26, 0xe9, + 0xf5, 0x1d, 0xcc, 0xbe, 0x2b, 0x86, 0x8f, 0xbc, 0x0e, 0xf6, 0x79, 0x7e, 0x86, 0x7c, 0xdd, 0x4e, + 0x08, 0xe2, 0x10, 0xa0, 0x07, 0x7a, 0xf8, 0x03, 0xd8, 0x48, 0xf1, 0xd3, 0xc9, 0xb5, 0xf5, 0xdb, + 0x92, 0x6b, 0x2d, 0xc1, 0xa4, 0xf2, 0xeb, 0x1b, 0x70, 0x8d, 0x7d, 0x55, 0x59, 0xfc, 0xa9, 0xfd, + 0x1a, 0xf3, 0xdb, 0xd9, 0x3b, 0x16, 0xf4, 0xd0, 0xf9, 0x31, 0x72, 0x34, 0xfb, 0x35, 0x0e, 0xe4, + 0xb6, 0x9b, 0xc8, 0xaf, 0xcf, 0x22, 0xb7, 0xdd, 0x58, 0xfe, 0x12, 0x5c, 0x67, 0xde, 0xa9, 0x8f, + 0x5c, 0xab, 0x75, 0x61, 0x50, 0xdf, 0xc3, 0xa8, 0x67, 0xbb, 0x9d, 0xe8, 0x5c, 0xba, 0x91, 0x4d, + 0xe3, 0x7b, 0xe8, 0x5c, 0x0b, 0xe5, 0x5a, 0xac, 0x0e, 0x0f, 0xa5, 0x63, 0xb0, 0x65, 0xe1, 0x36, + 0x1a, 0x38, 0x3e, 0xe3, 0xfb, 0x36, 0xf5, 0x6d, 0x93, 0xc6, 0x51, 0xb9, 0x39, 0xc3, 0x7d, 0x21, + 0x52, 0x6b, 0x43, 0x71, 0x14, 0x91, 0xbf, 0xe5, 0xc0, 0x9a, 0x49, 0x5c, 0xea, 0x7b, 0xec, 0x23, + 0x6e, 0xe0, 0x73, 0xd3, 0x19, 0x50, 0x9b, 0xb8, 0xfc, 0xad, 0x20, 0xa1, 0xeb, 0xef, 0x9e, 0xd0, + 0xe2, 0x90, 0x2a, 0xc5, 0x50, 0xb5, 0x60, 0x4e, 0x1a, 0x61, 0x13, 0xac, 0x9b, 0x03, 0x8f, 0x12, + 0x2f, 0xbc, 0x21, 0x18, 0x6d, 0x2f, 0xbc, 0xcc, 0xf0, 0xb7, 0x67, 0xc8, 0xb6, 0x42, 0x28, 0x0d, + 0x6e, 0x08, 0xd5, 0x48, 0x08, 0x7f, 0x00, 0x85, 0xb6, 0x47, 0xd8, 0xb7, 0xc5, 0x71, 0x50, 0x9f, + 0xc6, 0xdf, 0xf7, 0x3b, 0xd9, 0x39, 0xc6, 0xbd, 0xb9, 0x2c, 0x5d, 0x2b, 0xed, 0xdc, 0x2f, 0x1d, + 0x7c, 0x79, 0xf0, 0xe8, 0xc1, 0xc3, 0x83, 0x2f, 0xd5, 0x3c, 0x23, 0x89, 0x11, 0x28, 0xfc, 0xe6, + 0xfc, 0x00, 0x0a, 0x3f, 0x11, 0xdb, 0x1d, 0xc7, 0x7f, 0xf4, 0x4e, 0x78, 0x46, 0x1a, 0xc5, 0xff, + 0x15, 0x14, 0xda, 0xc4, 0x33, 0xb1, 0xd1, 0x47, 0x1e, 0x72, 0x1c, 0xec, 0x18, 0x3d, 0x62, 0x61, + 0xbe, 0xf8, 0xbe, 0x27, 0x4c, 0x95, 0x41, 0x9b, 0x11, 0xb3, 0x4e, 0x2c, 0xac, 0xe6, 0xdb, 0xe3, + 0x26, 0xe8, 0x81, 0x82, 0xe9, 0xd8, 0xec, 0xce, 0xc6, 0xb6, 0x42, 0x0f, 0x53, 0x1a, 0x7c, 0xfb, + 0x3e, 0x7e, 0xdf, 0x83, 0x54, 0x26, 0x9d, 0xf0, 0x20, 0xcd, 0x87, 0xf8, 0xba, 0xed, 0xd6, 0x23, + 0x38, 0x74, 0x00, 0xe7, 0x90, 0xce, 0xa8, 0xc3, 0x4f, 0x7e, 0x37, 0x87, 0x2b, 0x0e, 0xe9, 0xa4, + 0xbd, 0x5d, 0x80, 0xcd, 0xd8, 0x1b, 0xf6, 0x3c, 0xe2, 0x05, 0x1b, 0x2a, 0xb8, 0x6a, 0xf2, 0x77, + 0x7f, 0x37, 0xa7, 0x6b, 0xa1, 0x53, 0x89, 0x39, 0xd0, 0x62, 0x3e, 0x7c, 0x0e, 0xb6, 0x63, 0xd7, + 0xd6, 0xc0, 0x0b, 0x0a, 0x9e, 0x94, 0xf7, 0x4f, 0x67, 0xd8, 0xc6, 0x21, 0xb6, 0x12, 0x89, 0x13, + 0xb2, 0x08, 0x56, 0x19, 0x39, 0x39, 0x15, 0x29, 0xff, 0xbf, 0x01, 0x6e, 0x7b, 0x02, 0x57, 0x26, + 0xc4, 0x89, 0x0a, 0x23, 0x87, 0x74, 0xc4, 0x44, 0x31, 0x84, 0xa4, 0xca, 0xab, 0x7b, 0xb3, 0x41, + 0x52, 0xd5, 0x55, 0x0d, 0x40, 0x06, 0xb1, 0x6c, 0x9a, 0xe6, 0x7c, 0x96, 0xc9, 0xc9, 0x3b, 0xa4, + 0x53, 0x19, 0x11, 0xb1, 0xf3, 0x38, 0x40, 0x45, 0xb3, 0xe5, 0x3f, 0xcf, 0x84, 0x2c, 0x31, 0x48, + 0xd4, 0x9d, 0xed, 0x23, 0x26, 0x0f, 0x83, 0x7c, 0x8a, 0xbd, 0x16, 0xa1, 0xb6, 0x7f, 0xc1, 0xff, + 0xdf, 0xfb, 0xee, 0x23, 0x99, 0x74, 0x82, 0xb8, 0x3e, 0x8b, 0x91, 0xc1, 0xd8, 0x47, 0x4d, 0xf0, + 0x5b, 0xc0, 0x16, 0xc6, 0x70, 0x88, 0x79, 0x62, 0x9c, 0x21, 0xdb, 0xa7, 0xfc, 0xff, 0x67, 0x8e, + 0x9e, 0xcd, 0x56, 0x26, 0xe6, 0xc9, 0x31, 0xeb, 0x0f, 0x4f, 0xc0, 0x32, 0x23, 0x24, 0xf9, 0xb1, + 0x13, 0x0c, 0xbc, 0xfa, 0x5e, 0x03, 0x1f, 0x66, 0x4c, 0xe0, 0x2c, 0xc9, 0x1f, 0x21, 0x1c, 0xee, + 0xb0, 0x22, 0xa2, 0xfc, 0x6e, 0x76, 0x36, 0x32, 0x84, 0x1e, 0xd5, 0x43, 0x14, 0xde, 0x06, 0x4b, + 0x14, 0x23, 0xcf, 0xec, 0x1a, 0x7d, 0xe4, 0x77, 0xf9, 0xbd, 0x3b, 0xb9, 0x7b, 0x57, 0x55, 0x10, + 0x9a, 0x9a, 0xc8, 0xef, 0xb2, 0x70, 0x7a, 0xe4, 0xcc, 0xa0, 0xd8, 0x1c, 0x78, 0x2c, 0x10, 0xfb, + 0xd9, 0xe1, 0xf4, 0xc8, 0x99, 0x16, 0x75, 0x87, 0x3f, 0xe7, 0xc0, 0xcd, 0xf8, 0x1b, 0x98, 0x2a, + 0x97, 0x0d, 0x9b, 0x12, 0x27, 0xcc, 0x8f, 0x52, 0xb0, 0x40, 0x8d, 0x77, 0x5f, 0xa0, 0x54, 0x25, + 0x5d, 0x8b, 0xa9, 0xea, 0xf5, 0xc8, 0xe9, 0xb4, 0x46, 0xf8, 0x04, 0xe4, 0x87, 0x01, 0x1a, 0xde, + 0xc7, 0xee, 0x67, 0x2f, 0x1d, 0x37, 0x54, 0xc5, 0x57, 0xaf, 0xc7, 0x2c, 0xd9, 0xcd, 0x93, 0x21, + 0xe4, 0xc1, 0x0c, 0x2f, 0x0a, 0x4c, 0x10, 0xeb, 0x6d, 0xf0, 0xb1, 0x6d, 0x39, 0xd8, 0xb0, 0xdd, + 0x91, 0xd5, 0xa1, 0x98, 0xd2, 0xe0, 0x02, 0x17, 0x61, 0x0f, 0xb2, 0xb1, 0xb7, 0x19, 0xa7, 0xe6, + 0xa6, 0xe6, 0xab, 0x85, 0x90, 0xd8, 0x55, 0x17, 0x5c, 0x6b, 0x5d, 0xf8, 0x18, 0x19, 0x64, 0xe0, + 0xf7, 0x07, 0x3e, 0xff, 0x45, 0xb0, 0xee, 0xd2, 0xbb, 0xaf, 0x7b, 0x99, 0xd1, 0x94, 0x00, 0xa6, + 0x2e, 0xb5, 0x92, 0x7f, 0x20, 0x02, 0x57, 0xcf, 0x7b, 0x4e, 0xcb, 0x76, 0x91, 0x77, 0xc1, 0x3f, + 0x0c, 0xdc, 0x88, 0xef, 0xee, 0xe6, 0x79, 0xcf, 0x29, 0x07, 0x28, 0x35, 0xa1, 0x46, 0x2e, 0x48, + 0x3f, 0xc8, 0xa0, 0x2f, 0x7f, 0x07, 0x17, 0x4a, 0x80, 0x52, 0x13, 0x2a, 0x2b, 0x9f, 0x3b, 0xb6, + 0x6b, 0xf4, 0xb1, 0x6b, 0xb1, 0xeb, 0xa0, 0x63, 0x0f, 0x2b, 0xde, 0x47, 0x33, 0x94, 0x94, 0x1d, + 0xdb, 0x6d, 0x86, 0x4a, 0xd9, 0x8e, 0xab, 0xde, 0x2a, 0xe0, 0x2c, 0x8c, 0xac, 0x91, 0x84, 0xf9, + 0x6a, 0x86, 0xea, 0x39, 0x16, 0xc5, 0x91, 0x7c, 0x16, 0x3e, 0x43, 0x31, 0x13, 0x35, 0xfa, 0xd8, + 0x4b, 0xa7, 0x0e, 0xff, 0x87, 0x6c, 0xe0, 0x46, 0x0f, 0x9d, 0xb3, 0x13, 0x8b, 0x36, 0xb1, 0x97, + 0xca, 0x17, 0x68, 0x80, 0x5b, 0xd1, 0xf3, 0x96, 0xf5, 0x16, 0xf8, 0xd7, 0xd9, 0xf0, 0xed, 0xf0, + 0x8d, 0xcb, 0x9a, 0xe6, 0xe0, 0x6b, 0xb0, 0x84, 0x3c, 0x0f, 0x5d, 0x18, 0xee, 0xc0, 0x71, 0x28, + 0xff, 0xc7, 0xcc, 0xa3, 0x04, 0x04, 0xdd, 0x1b, 0xac, 0x37, 0x7c, 0x05, 0x56, 0x59, 0x6d, 0x4a, + 0x1d, 0x44, 0xbb, 0xc6, 0xab, 0x01, 0xf1, 0x31, 0xff, 0x4d, 0x10, 0xf8, 0x27, 0xef, 0x91, 0xc2, + 0x31, 0xf0, 0x3b, 0xc6, 0x53, 0x57, 0x5a, 0x23, 0xff, 0xc3, 0x2a, 0xc8, 0xc7, 0x67, 0xd7, 0x99, + 0xed, 0x77, 0x0d, 0x62, 0x5b, 0x94, 0x7f, 0x9c, 0x39, 0xea, 0xd5, 0x48, 0x74, 0x6c, 0xfb, 0x5d, + 0xc5, 0xb6, 0x28, 0x6c, 0x80, 0x75, 0x4c, 0x4d, 0xd4, 0xc7, 0xac, 0xbc, 0x60, 0xc9, 0x74, 0x86, + 0x3c, 0xd7, 0x76, 0x3b, 0xfc, 0x9f, 0x32, 0x59, 0x85, 0x50, 0xa8, 0x05, 0xba, 0xe3, 0x50, 0x06, + 0x65, 0xb0, 0xe6, 0x90, 0xa0, 0xd0, 0x43, 0xbe, 0xd1, 0xf7, 0xec, 0x53, 0xdb, 0xc1, 0xec, 0xfa, + 0xf5, 0x6d, 0x26, 0x0e, 0x3a, 0x44, 0x0c, 0x64, 0xcd, 0xa1, 0x8a, 0x55, 0x40, 0xa4, 0x8f, 0x3d, + 0xe4, 0x13, 0x8f, 0xc5, 0xde, 0xc4, 0x16, 0x0e, 0x1e, 0x13, 0xa3, 0x31, 0x0a, 0x99, 0xd0, 0xad, + 0x58, 0xde, 0x1c, 0xaa, 0xe3, 0x91, 0x36, 0xc0, 0x7a, 0x10, 0x2a, 0x03, 0x39, 0x8e, 0x61, 0x5b, + 0xd8, 0xf5, 0xed, 0xb6, 0x8d, 0x3d, 0xca, 0x97, 0xb3, 0x67, 0x1e, 0x08, 0x05, 0xc7, 0xa9, 0x25, + 0x32, 0x36, 0xd6, 0xa0, 0x52, 0x43, 0x9e, 0xc5, 0x6e, 0x3c, 0x6d, 0xe2, 0x05, 0xb5, 0x5a, 0xb8, + 0xac, 0x94, 0x17, 0xb3, 0xc7, 0x1a, 0xcb, 0xc5, 0xa1, 0x3a, 0x5c, 0x5b, 0x0a, 0xeb, 0x60, 0x2d, + 0xae, 0xdd, 0xed, 0xd7, 0xd8, 0xa0, 0xf8, 0x15, 0x35, 0x91, 0x4b, 0xf9, 0x4a, 0xf6, 0x50, 0x53, + 0x3a, 0x2d, 0x92, 0xb1, 0xa9, 0x07, 0x5b, 0x87, 0x79, 0x09, 0x12, 0xde, 0xc0, 0xaf, 0x06, 0xc8, + 0xa1, 0xbc, 0x94, 0xcd, 0x1b, 0x0a, 0x59, 0xea, 0x4b, 0x81, 0x0c, 0x3e, 0x06, 0xcb, 0xf8, 0xdc, + 0xf6, 0x0d, 0x12, 0xdd, 0x80, 0xf9, 0x6a, 0xf6, 0x97, 0x98, 0x09, 0x94, 0xf0, 0x3e, 0x0b, 0xbf, + 0x05, 0xcb, 0x14, 0xbf, 0x0a, 0x9f, 0x01, 0x4d, 0x42, 0x7d, 0xfe, 0x70, 0x86, 0x42, 0x6d, 0x89, + 0xe2, 0x57, 0x4d, 0xd4, 0xc1, 0x22, 0xa1, 0xc1, 0xf9, 0xe5, 0x21, 0xd7, 0x22, 0xbd, 0x14, 0xe4, + 0xc9, 0x0c, 0x90, 0x95, 0x50, 0x35, 0xe4, 0x7c, 0x0f, 0x36, 0x52, 0x2f, 0xc2, 0x41, 0xed, 0x4f, + 0xbc, 0x13, 0x96, 0x15, 0xb5, 0xec, 0x62, 0x6c, 0xf1, 0xcd, 0x65, 0x69, 0xa1, 0xb4, 0xf3, 0xe0, + 0xbe, 0xba, 0x96, 0x40, 0xea, 0xe8, 0xfc, 0x38, 0x44, 0xc0, 0x2e, 0xb8, 0x91, 0x82, 0x4f, 0xbe, + 0xf3, 0xfe, 0x79, 0xc6, 0x27, 0x8b, 0x9d, 0xd2, 0x4e, 0x69, 0x7f, 0x5f, 0xdd, 0x4a, 0x60, 0xcf, + 0xc6, 0x5e, 0x7f, 0x7f, 0x7a, 0xab, 0xa7, 0xf0, 0x33, 0xf1, 0x74, 0xc6, 0x27, 0xcd, 0xd0, 0xd3, + 0xdb, 0x7c, 0xc5, 0xc5, 0x2b, 0x4c, 0xf9, 0x72, 0x51, 0x9f, 0x7d, 0x3f, 0x78, 0x39, 0xdb, 0x43, + 0xfe, 0xcd, 0x65, 0x69, 0xb9, 0x34, 0xfa, 0x82, 0x94, 0x90, 0x1a, 0x21, 0xa8, 0xa8, 0x81, 0xc5, + 0xf8, 0xcd, 0x0c, 0x6e, 0x81, 0xf5, 0x63, 0x41, 0x36, 0x64, 0xe9, 0x99, 0x24, 0x1b, 0x47, 0x0d, + 0xad, 0x29, 0x89, 0xb5, 0x6a, 0x4d, 0xaa, 0x70, 0xff, 0x03, 0xd7, 0x41, 0x3e, 0x69, 0x52, 0xa5, + 0xa6, 0x5c, 0x13, 0x05, 0x2e, 0x37, 0x6a, 0x96, 0x95, 0xc3, 0x9a, 0x28, 0xc8, 0xdc, 0x5c, 0xf1, + 0xdf, 0x39, 0x90, 0x9f, 0x78, 0x1f, 0x83, 0x45, 0x70, 0x4b, 0x7b, 0xd1, 0x10, 0x9f, 0xa8, 0x4a, + 0x43, 0x39, 0xd2, 0x0c, 0x51, 0xa9, 0xd7, 0x6b, 0xfa, 0x98, 0x9f, 0x2d, 0xb0, 0x3e, 0xa5, 0x8f, + 0xd2, 0xe0, 0x72, 0x70, 0x1b, 0x6c, 0x4c, 0x6b, 0xaa, 0x56, 0xb9, 0x39, 0x78, 0x03, 0xf0, 0x53, + 0xda, 0x64, 0x85, 0x0d, 0x67, 0x1e, 0x7e, 0x0c, 0x6e, 0x4f, 0x69, 0x55, 0xa5, 0xba, 0xa2, 0x4b, + 0xc6, 0xb1, 0x5a, 0xd3, 0x25, 0x6e, 0xe1, 0xd7, 0x3b, 0x09, 0xcd, 0xa6, 0xfc, 0x82, 0xfb, 0xa0, + 0xf8, 0xcf, 0x1c, 0x28, 0x4c, 0x79, 0x27, 0x81, 0x9f, 0x80, 0x3b, 0xa2, 0xd2, 0xd0, 0x74, 0x55, + 0xa8, 0x35, 0x74, 0x43, 0x7a, 0x2e, 0xca, 0x47, 0x5a, 0x4d, 0x69, 0x8c, 0x4d, 0xee, 0x3a, 0xd8, + 0x9c, 0xda, 0x2b, 0x98, 0xde, 0x0d, 0xc0, 0x4f, 0x6f, 0x0c, 0x26, 0x58, 0x04, 0xb7, 0xa6, 0xb6, + 0x36, 0x05, 0x55, 0xaf, 0xe9, 0x35, 0xa5, 0xc1, 0xcd, 0x17, 0x7f, 0xce, 0x81, 0xfc, 0xc4, 0x9b, + 0x01, 0x9b, 0x57, 0x55, 0x51, 0x45, 0x89, 0x75, 0x15, 0x64, 0x59, 0x92, 0x8d, 0xba, 0x52, 0x91, + 0xc6, 0x46, 0xb6, 0x0d, 0x36, 0xa6, 0x75, 0x0a, 0x06, 0x76, 0x1d, 0x6c, 0x4e, 0x6d, 0x0b, 0xc6, + 0x75, 0x1b, 0x5c, 0x9f, 0xd6, 0xa8, 0x4a, 0x87, 0xaa, 0xa4, 0x69, 0x6c, 0x50, 0x73, 0x60, 0x31, + 0xae, 0xb2, 0x59, 0x74, 0x65, 0xe5, 0x70, 0x6a, 0x82, 0xad, 0x01, 0x2e, 0x69, 0xaa, 0x48, 0xe5, + 0xa3, 0xc3, 0x2f, 0xb8, 0xdc, 0x14, 0xeb, 0x01, 0x37, 0x37, 0xc5, 0xfa, 0x80, 0x9b, 0x9f, 0x62, + 0xbd, 0xcf, 0x2d, 0x4c, 0xb1, 0x96, 0xb8, 0x0f, 0x60, 0x1e, 0x2c, 0x27, 0x56, 0x59, 0x39, 0xe4, + 0xae, 0x8c, 0x76, 0x6c, 0x28, 0x7a, 0x4d, 0x94, 0xb8, 0x0f, 0x59, 0x82, 0x27, 0xd6, 0x63, 0x41, + 0x6d, 0xd4, 0x1a, 0x87, 0xdc, 0x22, 0x2c, 0x80, 0xd5, 0xc4, 0x2c, 0xa9, 0xaa, 0xa2, 0x72, 0x57, + 0x47, 0x8d, 0x55, 0x41, 0x17, 0x64, 0x0e, 0x8c, 0x1a, 0x9b, 0x42, 0xa3, 0x26, 0x72, 0x4b, 0xc5, + 0x7f, 0xe4, 0x40, 0x7e, 0xa2, 0x2a, 0x65, 0x91, 0x62, 0x5d, 0x03, 0x9c, 0xf1, 0x4c, 0x52, 0xcb, + 0x8a, 0x56, 0xd3, 0x5f, 0x8c, 0xad, 0xd3, 0x4d, 0xb0, 0x35, 0xad, 0x93, 0x2e, 0xa9, 0x9a, 0xc4, + 0xe5, 0x58, 0x3c, 0xa6, 0x35, 0x57, 0xa4, 0xaa, 0x70, 0x24, 0xeb, 0x61, 0xc0, 0xa6, 0x75, 0x08, + 0xff, 0x92, 0xb8, 0xf9, 0xe2, 0xdf, 0x73, 0xe0, 0x5a, 0xba, 0xf0, 0x8c, 0x3d, 0x6a, 0xba, 0xa0, + 0x4b, 0x75, 0xa9, 0x31, 0xbe, 0x63, 0x37, 0x00, 0x1c, 0x6d, 0x6e, 0x28, 0x0d, 0x29, 0x3c, 0x1a, + 0x46, 0xed, 0x95, 0x8a, 0xcc, 0xcd, 0x4d, 0x9a, 0xeb, 0x4a, 0x85, 0x9b, 0x9f, 0x34, 0x0b, 0xb2, + 0xcc, 0x2d, 0x14, 0x7f, 0xc9, 0x81, 0xb5, 0xa9, 0x75, 0xdc, 0x5d, 0xf0, 0x91, 0xae, 0x0a, 0x0d, + 0x4d, 0x10, 0x59, 0xf2, 0x1b, 0x35, 0x4d, 0x91, 0x05, 0x7d, 0x72, 0xc7, 0x7d, 0x0e, 0x3e, 0x9d, + 0xde, 0x4d, 0x95, 0x84, 0x8a, 0x71, 0xd4, 0x08, 0x77, 0xb9, 0x2e, 0x55, 0xb8, 0x1c, 0xbc, 0x07, + 0x3e, 0xf9, 0x95, 0xbe, 0x49, 0xcf, 0x39, 0xf8, 0x19, 0xb8, 0xfb, 0xb6, 0x9e, 0x4d, 0x49, 0xd0, + 0x85, 0xb2, 0x2c, 0x05, 0x22, 0x6e, 0x1e, 0x7e, 0x0a, 0x8a, 0xd3, 0xbb, 0x6a, 0x92, 0x5a, 0x13, + 0xe4, 0xda, 0x4b, 0xd6, 0x99, 0x5b, 0x28, 0x7e, 0x0f, 0x96, 0x52, 0x45, 0x15, 0x3b, 0x0c, 0xca, + 0x2f, 0x74, 0x49, 0x30, 0x94, 0x23, 0xbd, 0x79, 0xa4, 0x4f, 0xee, 0x95, 0x91, 0xd6, 0x27, 0xd2, + 0x73, 0x2e, 0x07, 0x79, 0xb0, 0x36, 0x62, 0x95, 0x34, 0x51, 0x68, 0xb2, 0xf1, 0x16, 0x55, 0x70, + 0x75, 0x58, 0x4a, 0xb1, 0xad, 0xfe, 0xbc, 0x2e, 0x1b, 0xe5, 0x5a, 0x43, 0x50, 0x5f, 0x4c, 0x9e, + 0xf2, 0xa9, 0xb6, 0xb2, 0xa0, 0x49, 0x0f, 0x0f, 0xb8, 0x1c, 0x84, 0x60, 0x25, 0x65, 0x66, 0xde, + 0xe6, 0x8a, 0xcf, 0x03, 0x66, 0x58, 0x3b, 0xc5, 0x4c, 0xa5, 0x39, 0x25, 0x04, 0x9b, 0xa0, 0x90, + 0x6a, 0xab, 0x28, 0xe2, 0x11, 0x8b, 0x2f, 0x97, 0x63, 0x89, 0x93, 0x6a, 0x10, 0x95, 0x86, 0xce, + 0xec, 0x73, 0xec, 0x8c, 0x5d, 0x19, 0xbd, 0x9d, 0xb3, 0xa4, 0x2d, 0x0b, 0xe2, 0x53, 0x4d, 0x16, + 0xb4, 0x27, 0xc6, 0x77, 0x47, 0xec, 0x44, 0x1e, 0x75, 0x52, 0x00, 0xab, 0x63, 0x1d, 0x42, 0x07, + 0xe3, 0x2a, 0xa5, 0xc1, 0xcd, 0xb1, 0x11, 0x4d, 0xd8, 0xab, 0x55, 0x6e, 0x1e, 0x7e, 0x04, 0x6e, + 0x8e, 0x37, 0x68, 0x42, 0x55, 0x32, 0xa4, 0x86, 0xa8, 0x54, 0xd8, 0xc6, 0x5f, 0x28, 0xfe, 0x6b, + 0x0e, 0xac, 0x8f, 0x57, 0x13, 0x1a, 0xf6, 0x4b, 0xfb, 0xf0, 0x27, 0xc0, 0xe1, 0x76, 0x1b, 0x9b, + 0xbe, 0x7d, 0x8a, 0x8d, 0xb0, 0xee, 0x88, 0x7e, 0xec, 0xf0, 0xd5, 0x3b, 0x57, 0x29, 0xe5, 0x85, + 0xff, 0x5c, 0x96, 0x72, 0xea, 0xea, 0x10, 0x1c, 0x36, 0xc0, 0x97, 0x60, 0x69, 0x40, 0xb1, 0x17, + 0xbb, 0x99, 0x7b, 0x4f, 0x37, 0x2a, 0x60, 0xb4, 0x88, 0xfd, 0x17, 0xb0, 0x12, 0x57, 0x3e, 0x11, + 0x7e, 0xfe, 0x7d, 0xf1, 0xcb, 0x11, 0x30, 0x34, 0x94, 0x9f, 0xbd, 0xd4, 0x3b, 0xb6, 0xdf, 0x1d, + 0xb4, 0x76, 0x4d, 0xd2, 0xdb, 0x0b, 0xa9, 0x3b, 0xe1, 0x0f, 0x4b, 0x3a, 0x64, 0xa7, 0x83, 0xdd, + 0xe0, 0x36, 0xb3, 0x37, 0xd3, 0x4f, 0x5e, 0xbe, 0x4e, 0x8c, 0xad, 0x2b, 0x81, 0xee, 0xc1, 0x7f, + 0x03, 0x00, 0x00, 0xff, 0xff, 0x4f, 0x7e, 0x0d, 0x7d, 0x2d, 0x23, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/postgresql11.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/postgresql11.pb.go new file mode 100644 index 000000000..0d87dd10a --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/postgresql11.pb.go @@ -0,0 +1,1291 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/config/postgresql11.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type PostgresqlConfig11_WalLevel int32 + +const ( + PostgresqlConfig11_WAL_LEVEL_UNSPECIFIED PostgresqlConfig11_WalLevel = 0 + PostgresqlConfig11_WAL_LEVEL_REPLICA PostgresqlConfig11_WalLevel = 1 + PostgresqlConfig11_WAL_LEVEL_LOGICAL PostgresqlConfig11_WalLevel = 2 +) + +var PostgresqlConfig11_WalLevel_name = map[int32]string{ + 0: "WAL_LEVEL_UNSPECIFIED", + 1: "WAL_LEVEL_REPLICA", + 2: "WAL_LEVEL_LOGICAL", +} +var PostgresqlConfig11_WalLevel_value = map[string]int32{ + "WAL_LEVEL_UNSPECIFIED": 0, + "WAL_LEVEL_REPLICA": 1, + "WAL_LEVEL_LOGICAL": 2, +} + +func (x PostgresqlConfig11_WalLevel) String() string { + return proto.EnumName(PostgresqlConfig11_WalLevel_name, int32(x)) +} +func (PostgresqlConfig11_WalLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql11_b03c44c8761ca1c0, []int{0, 0} +} + +type PostgresqlConfig11_SynchronousCommit int32 + +const ( + PostgresqlConfig11_SYNCHRONOUS_COMMIT_UNSPECIFIED PostgresqlConfig11_SynchronousCommit = 0 + PostgresqlConfig11_SYNCHRONOUS_COMMIT_ON PostgresqlConfig11_SynchronousCommit = 1 + PostgresqlConfig11_SYNCHRONOUS_COMMIT_OFF PostgresqlConfig11_SynchronousCommit = 2 + PostgresqlConfig11_SYNCHRONOUS_COMMIT_LOCAL PostgresqlConfig11_SynchronousCommit = 3 + PostgresqlConfig11_SYNCHRONOUS_COMMIT_REMOTE_WRITE PostgresqlConfig11_SynchronousCommit = 4 + PostgresqlConfig11_SYNCHRONOUS_COMMIT_REMOTE_APPLY PostgresqlConfig11_SynchronousCommit = 5 +) + +var PostgresqlConfig11_SynchronousCommit_name = map[int32]string{ + 0: "SYNCHRONOUS_COMMIT_UNSPECIFIED", + 1: "SYNCHRONOUS_COMMIT_ON", + 2: "SYNCHRONOUS_COMMIT_OFF", + 3: "SYNCHRONOUS_COMMIT_LOCAL", + 4: "SYNCHRONOUS_COMMIT_REMOTE_WRITE", + 5: "SYNCHRONOUS_COMMIT_REMOTE_APPLY", +} +var PostgresqlConfig11_SynchronousCommit_value = map[string]int32{ + "SYNCHRONOUS_COMMIT_UNSPECIFIED": 0, + "SYNCHRONOUS_COMMIT_ON": 1, + "SYNCHRONOUS_COMMIT_OFF": 2, + "SYNCHRONOUS_COMMIT_LOCAL": 3, + "SYNCHRONOUS_COMMIT_REMOTE_WRITE": 4, + "SYNCHRONOUS_COMMIT_REMOTE_APPLY": 5, +} + +func (x PostgresqlConfig11_SynchronousCommit) String() string { + return proto.EnumName(PostgresqlConfig11_SynchronousCommit_name, int32(x)) +} +func (PostgresqlConfig11_SynchronousCommit) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql11_b03c44c8761ca1c0, []int{0, 1} +} + +type PostgresqlConfig11_ConstraintExclusion int32 + +const ( + PostgresqlConfig11_CONSTRAINT_EXCLUSION_UNSPECIFIED PostgresqlConfig11_ConstraintExclusion = 0 + PostgresqlConfig11_CONSTRAINT_EXCLUSION_ON PostgresqlConfig11_ConstraintExclusion = 1 + PostgresqlConfig11_CONSTRAINT_EXCLUSION_OFF PostgresqlConfig11_ConstraintExclusion = 2 + PostgresqlConfig11_CONSTRAINT_EXCLUSION_PARTITION PostgresqlConfig11_ConstraintExclusion = 3 +) + +var PostgresqlConfig11_ConstraintExclusion_name = map[int32]string{ + 0: "CONSTRAINT_EXCLUSION_UNSPECIFIED", + 1: "CONSTRAINT_EXCLUSION_ON", + 2: "CONSTRAINT_EXCLUSION_OFF", + 3: "CONSTRAINT_EXCLUSION_PARTITION", +} +var PostgresqlConfig11_ConstraintExclusion_value = map[string]int32{ + "CONSTRAINT_EXCLUSION_UNSPECIFIED": 0, + "CONSTRAINT_EXCLUSION_ON": 1, + "CONSTRAINT_EXCLUSION_OFF": 2, + "CONSTRAINT_EXCLUSION_PARTITION": 3, +} + +func (x PostgresqlConfig11_ConstraintExclusion) String() string { + return proto.EnumName(PostgresqlConfig11_ConstraintExclusion_name, int32(x)) +} +func (PostgresqlConfig11_ConstraintExclusion) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql11_b03c44c8761ca1c0, []int{0, 2} +} + +type PostgresqlConfig11_ForceParallelMode int32 + +const ( + PostgresqlConfig11_FORCE_PARALLEL_MODE_UNSPECIFIED PostgresqlConfig11_ForceParallelMode = 0 + PostgresqlConfig11_FORCE_PARALLEL_MODE_ON PostgresqlConfig11_ForceParallelMode = 1 + PostgresqlConfig11_FORCE_PARALLEL_MODE_OFF PostgresqlConfig11_ForceParallelMode = 2 + PostgresqlConfig11_FORCE_PARALLEL_MODE_REGRESS PostgresqlConfig11_ForceParallelMode = 3 +) + +var PostgresqlConfig11_ForceParallelMode_name = map[int32]string{ + 0: "FORCE_PARALLEL_MODE_UNSPECIFIED", + 1: "FORCE_PARALLEL_MODE_ON", + 2: "FORCE_PARALLEL_MODE_OFF", + 3: "FORCE_PARALLEL_MODE_REGRESS", +} +var PostgresqlConfig11_ForceParallelMode_value = map[string]int32{ + "FORCE_PARALLEL_MODE_UNSPECIFIED": 0, + "FORCE_PARALLEL_MODE_ON": 1, + "FORCE_PARALLEL_MODE_OFF": 2, + "FORCE_PARALLEL_MODE_REGRESS": 3, +} + +func (x PostgresqlConfig11_ForceParallelMode) String() string { + return proto.EnumName(PostgresqlConfig11_ForceParallelMode_name, int32(x)) +} +func (PostgresqlConfig11_ForceParallelMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql11_b03c44c8761ca1c0, []int{0, 3} +} + +type PostgresqlConfig11_LogLevel int32 + +const ( + PostgresqlConfig11_LOG_LEVEL_UNSPECIFIED PostgresqlConfig11_LogLevel = 0 + PostgresqlConfig11_LOG_LEVEL_DEBUG5 PostgresqlConfig11_LogLevel = 1 + PostgresqlConfig11_LOG_LEVEL_DEBUG4 PostgresqlConfig11_LogLevel = 2 + PostgresqlConfig11_LOG_LEVEL_DEBUG3 PostgresqlConfig11_LogLevel = 3 + PostgresqlConfig11_LOG_LEVEL_DEBUG2 PostgresqlConfig11_LogLevel = 4 + PostgresqlConfig11_LOG_LEVEL_DEBUG1 PostgresqlConfig11_LogLevel = 5 + PostgresqlConfig11_LOG_LEVEL_LOG PostgresqlConfig11_LogLevel = 6 + PostgresqlConfig11_LOG_LEVEL_NOTICE PostgresqlConfig11_LogLevel = 7 + PostgresqlConfig11_LOG_LEVEL_WARNING PostgresqlConfig11_LogLevel = 8 + PostgresqlConfig11_LOG_LEVEL_ERROR PostgresqlConfig11_LogLevel = 9 + PostgresqlConfig11_LOG_LEVEL_FATAL PostgresqlConfig11_LogLevel = 10 + PostgresqlConfig11_LOG_LEVEL_PANIC PostgresqlConfig11_LogLevel = 11 +) + +var PostgresqlConfig11_LogLevel_name = map[int32]string{ + 0: "LOG_LEVEL_UNSPECIFIED", + 1: "LOG_LEVEL_DEBUG5", + 2: "LOG_LEVEL_DEBUG4", + 3: "LOG_LEVEL_DEBUG3", + 4: "LOG_LEVEL_DEBUG2", + 5: "LOG_LEVEL_DEBUG1", + 6: "LOG_LEVEL_LOG", + 7: "LOG_LEVEL_NOTICE", + 8: "LOG_LEVEL_WARNING", + 9: "LOG_LEVEL_ERROR", + 10: "LOG_LEVEL_FATAL", + 11: "LOG_LEVEL_PANIC", +} +var PostgresqlConfig11_LogLevel_value = map[string]int32{ + "LOG_LEVEL_UNSPECIFIED": 0, + "LOG_LEVEL_DEBUG5": 1, + "LOG_LEVEL_DEBUG4": 2, + "LOG_LEVEL_DEBUG3": 3, + "LOG_LEVEL_DEBUG2": 4, + "LOG_LEVEL_DEBUG1": 5, + "LOG_LEVEL_LOG": 6, + "LOG_LEVEL_NOTICE": 7, + "LOG_LEVEL_WARNING": 8, + "LOG_LEVEL_ERROR": 9, + "LOG_LEVEL_FATAL": 10, + "LOG_LEVEL_PANIC": 11, +} + +func (x PostgresqlConfig11_LogLevel) String() string { + return proto.EnumName(PostgresqlConfig11_LogLevel_name, int32(x)) +} +func (PostgresqlConfig11_LogLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql11_b03c44c8761ca1c0, []int{0, 4} +} + +type PostgresqlConfig11_LogErrorVerbosity int32 + +const ( + PostgresqlConfig11_LOG_ERROR_VERBOSITY_UNSPECIFIED PostgresqlConfig11_LogErrorVerbosity = 0 + PostgresqlConfig11_LOG_ERROR_VERBOSITY_TERSE PostgresqlConfig11_LogErrorVerbosity = 1 + PostgresqlConfig11_LOG_ERROR_VERBOSITY_DEFAULT PostgresqlConfig11_LogErrorVerbosity = 2 + PostgresqlConfig11_LOG_ERROR_VERBOSITY_VERBOSE PostgresqlConfig11_LogErrorVerbosity = 3 +) + +var PostgresqlConfig11_LogErrorVerbosity_name = map[int32]string{ + 0: "LOG_ERROR_VERBOSITY_UNSPECIFIED", + 1: "LOG_ERROR_VERBOSITY_TERSE", + 2: "LOG_ERROR_VERBOSITY_DEFAULT", + 3: "LOG_ERROR_VERBOSITY_VERBOSE", +} +var PostgresqlConfig11_LogErrorVerbosity_value = map[string]int32{ + "LOG_ERROR_VERBOSITY_UNSPECIFIED": 0, + "LOG_ERROR_VERBOSITY_TERSE": 1, + "LOG_ERROR_VERBOSITY_DEFAULT": 2, + "LOG_ERROR_VERBOSITY_VERBOSE": 3, +} + +func (x PostgresqlConfig11_LogErrorVerbosity) String() string { + return proto.EnumName(PostgresqlConfig11_LogErrorVerbosity_name, int32(x)) +} +func (PostgresqlConfig11_LogErrorVerbosity) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql11_b03c44c8761ca1c0, []int{0, 5} +} + +type PostgresqlConfig11_LogStatement int32 + +const ( + PostgresqlConfig11_LOG_STATEMENT_UNSPECIFIED PostgresqlConfig11_LogStatement = 0 + PostgresqlConfig11_LOG_STATEMENT_NONE PostgresqlConfig11_LogStatement = 1 + PostgresqlConfig11_LOG_STATEMENT_DDL PostgresqlConfig11_LogStatement = 2 + PostgresqlConfig11_LOG_STATEMENT_MOD PostgresqlConfig11_LogStatement = 3 + PostgresqlConfig11_LOG_STATEMENT_ALL PostgresqlConfig11_LogStatement = 4 +) + +var PostgresqlConfig11_LogStatement_name = map[int32]string{ + 0: "LOG_STATEMENT_UNSPECIFIED", + 1: "LOG_STATEMENT_NONE", + 2: "LOG_STATEMENT_DDL", + 3: "LOG_STATEMENT_MOD", + 4: "LOG_STATEMENT_ALL", +} +var PostgresqlConfig11_LogStatement_value = map[string]int32{ + "LOG_STATEMENT_UNSPECIFIED": 0, + "LOG_STATEMENT_NONE": 1, + "LOG_STATEMENT_DDL": 2, + "LOG_STATEMENT_MOD": 3, + "LOG_STATEMENT_ALL": 4, +} + +func (x PostgresqlConfig11_LogStatement) String() string { + return proto.EnumName(PostgresqlConfig11_LogStatement_name, int32(x)) +} +func (PostgresqlConfig11_LogStatement) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql11_b03c44c8761ca1c0, []int{0, 6} +} + +type PostgresqlConfig11_TransactionIsolation int32 + +const ( + PostgresqlConfig11_TRANSACTION_ISOLATION_UNSPECIFIED PostgresqlConfig11_TransactionIsolation = 0 + PostgresqlConfig11_TRANSACTION_ISOLATION_READ_UNCOMMITTED PostgresqlConfig11_TransactionIsolation = 1 + PostgresqlConfig11_TRANSACTION_ISOLATION_READ_COMMITTED PostgresqlConfig11_TransactionIsolation = 2 + PostgresqlConfig11_TRANSACTION_ISOLATION_REPEATABLE_READ PostgresqlConfig11_TransactionIsolation = 3 + PostgresqlConfig11_TRANSACTION_ISOLATION_SERIALIZABLE PostgresqlConfig11_TransactionIsolation = 4 +) + +var PostgresqlConfig11_TransactionIsolation_name = map[int32]string{ + 0: "TRANSACTION_ISOLATION_UNSPECIFIED", + 1: "TRANSACTION_ISOLATION_READ_UNCOMMITTED", + 2: "TRANSACTION_ISOLATION_READ_COMMITTED", + 3: "TRANSACTION_ISOLATION_REPEATABLE_READ", + 4: "TRANSACTION_ISOLATION_SERIALIZABLE", +} +var PostgresqlConfig11_TransactionIsolation_value = map[string]int32{ + "TRANSACTION_ISOLATION_UNSPECIFIED": 0, + "TRANSACTION_ISOLATION_READ_UNCOMMITTED": 1, + "TRANSACTION_ISOLATION_READ_COMMITTED": 2, + "TRANSACTION_ISOLATION_REPEATABLE_READ": 3, + "TRANSACTION_ISOLATION_SERIALIZABLE": 4, +} + +func (x PostgresqlConfig11_TransactionIsolation) String() string { + return proto.EnumName(PostgresqlConfig11_TransactionIsolation_name, int32(x)) +} +func (PostgresqlConfig11_TransactionIsolation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql11_b03c44c8761ca1c0, []int{0, 7} +} + +type PostgresqlConfig11_ByteaOutput int32 + +const ( + PostgresqlConfig11_BYTEA_OUTPUT_UNSPECIFIED PostgresqlConfig11_ByteaOutput = 0 + PostgresqlConfig11_BYTEA_OUTPUT_HEX PostgresqlConfig11_ByteaOutput = 1 + PostgresqlConfig11_BYTEA_OUTPUT_ESCAPED PostgresqlConfig11_ByteaOutput = 2 +) + +var PostgresqlConfig11_ByteaOutput_name = map[int32]string{ + 0: "BYTEA_OUTPUT_UNSPECIFIED", + 1: "BYTEA_OUTPUT_HEX", + 2: "BYTEA_OUTPUT_ESCAPED", +} +var PostgresqlConfig11_ByteaOutput_value = map[string]int32{ + "BYTEA_OUTPUT_UNSPECIFIED": 0, + "BYTEA_OUTPUT_HEX": 1, + "BYTEA_OUTPUT_ESCAPED": 2, +} + +func (x PostgresqlConfig11_ByteaOutput) String() string { + return proto.EnumName(PostgresqlConfig11_ByteaOutput_name, int32(x)) +} +func (PostgresqlConfig11_ByteaOutput) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql11_b03c44c8761ca1c0, []int{0, 8} +} + +type PostgresqlConfig11_XmlBinary int32 + +const ( + PostgresqlConfig11_XML_BINARY_UNSPECIFIED PostgresqlConfig11_XmlBinary = 0 + PostgresqlConfig11_XML_BINARY_BASE64 PostgresqlConfig11_XmlBinary = 1 + PostgresqlConfig11_XML_BINARY_HEX PostgresqlConfig11_XmlBinary = 2 +) + +var PostgresqlConfig11_XmlBinary_name = map[int32]string{ + 0: "XML_BINARY_UNSPECIFIED", + 1: "XML_BINARY_BASE64", + 2: "XML_BINARY_HEX", +} +var PostgresqlConfig11_XmlBinary_value = map[string]int32{ + "XML_BINARY_UNSPECIFIED": 0, + "XML_BINARY_BASE64": 1, + "XML_BINARY_HEX": 2, +} + +func (x PostgresqlConfig11_XmlBinary) String() string { + return proto.EnumName(PostgresqlConfig11_XmlBinary_name, int32(x)) +} +func (PostgresqlConfig11_XmlBinary) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql11_b03c44c8761ca1c0, []int{0, 9} +} + +type PostgresqlConfig11_XmlOption int32 + +const ( + PostgresqlConfig11_XML_OPTION_UNSPECIFIED PostgresqlConfig11_XmlOption = 0 + PostgresqlConfig11_XML_OPTION_DOCUMENT PostgresqlConfig11_XmlOption = 1 + PostgresqlConfig11_XML_OPTION_CONTENT PostgresqlConfig11_XmlOption = 2 +) + +var PostgresqlConfig11_XmlOption_name = map[int32]string{ + 0: "XML_OPTION_UNSPECIFIED", + 1: "XML_OPTION_DOCUMENT", + 2: "XML_OPTION_CONTENT", +} +var PostgresqlConfig11_XmlOption_value = map[string]int32{ + "XML_OPTION_UNSPECIFIED": 0, + "XML_OPTION_DOCUMENT": 1, + "XML_OPTION_CONTENT": 2, +} + +func (x PostgresqlConfig11_XmlOption) String() string { + return proto.EnumName(PostgresqlConfig11_XmlOption_name, int32(x)) +} +func (PostgresqlConfig11_XmlOption) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql11_b03c44c8761ca1c0, []int{0, 10} +} + +type PostgresqlConfig11_BackslashQuote int32 + +const ( + PostgresqlConfig11_BACKSLASH_QUOTE_UNSPECIFIED PostgresqlConfig11_BackslashQuote = 0 + PostgresqlConfig11_BACKSLASH_QUOTE PostgresqlConfig11_BackslashQuote = 1 + PostgresqlConfig11_BACKSLASH_QUOTE_ON PostgresqlConfig11_BackslashQuote = 2 + PostgresqlConfig11_BACKSLASH_QUOTE_OFF PostgresqlConfig11_BackslashQuote = 3 + PostgresqlConfig11_BACKSLASH_QUOTE_SAFE_ENCODING PostgresqlConfig11_BackslashQuote = 4 +) + +var PostgresqlConfig11_BackslashQuote_name = map[int32]string{ + 0: "BACKSLASH_QUOTE_UNSPECIFIED", + 1: "BACKSLASH_QUOTE", + 2: "BACKSLASH_QUOTE_ON", + 3: "BACKSLASH_QUOTE_OFF", + 4: "BACKSLASH_QUOTE_SAFE_ENCODING", +} +var PostgresqlConfig11_BackslashQuote_value = map[string]int32{ + "BACKSLASH_QUOTE_UNSPECIFIED": 0, + "BACKSLASH_QUOTE": 1, + "BACKSLASH_QUOTE_ON": 2, + "BACKSLASH_QUOTE_OFF": 3, + "BACKSLASH_QUOTE_SAFE_ENCODING": 4, +} + +func (x PostgresqlConfig11_BackslashQuote) String() string { + return proto.EnumName(PostgresqlConfig11_BackslashQuote_name, int32(x)) +} +func (PostgresqlConfig11_BackslashQuote) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql11_b03c44c8761ca1c0, []int{0, 11} +} + +// Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file +// parameters which detailed description is available in +// [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). +type PostgresqlConfig11 struct { + MaxConnections *wrappers.Int64Value `protobuf:"bytes,1,opt,name=max_connections,json=maxConnections,proto3" json:"max_connections,omitempty"` + SharedBuffers *wrappers.Int64Value `protobuf:"bytes,2,opt,name=shared_buffers,json=sharedBuffers,proto3" json:"shared_buffers,omitempty"` + TempBuffers *wrappers.Int64Value `protobuf:"bytes,3,opt,name=temp_buffers,json=tempBuffers,proto3" json:"temp_buffers,omitempty"` + MaxPreparedTransactions *wrappers.Int64Value `protobuf:"bytes,4,opt,name=max_prepared_transactions,json=maxPreparedTransactions,proto3" json:"max_prepared_transactions,omitempty"` + WorkMem *wrappers.Int64Value `protobuf:"bytes,5,opt,name=work_mem,json=workMem,proto3" json:"work_mem,omitempty"` + MaintenanceWorkMem *wrappers.Int64Value `protobuf:"bytes,6,opt,name=maintenance_work_mem,json=maintenanceWorkMem,proto3" json:"maintenance_work_mem,omitempty"` + AutovacuumWorkMem *wrappers.Int64Value `protobuf:"bytes,7,opt,name=autovacuum_work_mem,json=autovacuumWorkMem,proto3" json:"autovacuum_work_mem,omitempty"` + TempFileLimit *wrappers.Int64Value `protobuf:"bytes,8,opt,name=temp_file_limit,json=tempFileLimit,proto3" json:"temp_file_limit,omitempty"` + VacuumCostDelay *wrappers.Int64Value `protobuf:"bytes,9,opt,name=vacuum_cost_delay,json=vacuumCostDelay,proto3" json:"vacuum_cost_delay,omitempty"` + VacuumCostPageHit *wrappers.Int64Value `protobuf:"bytes,10,opt,name=vacuum_cost_page_hit,json=vacuumCostPageHit,proto3" json:"vacuum_cost_page_hit,omitempty"` + VacuumCostPageMiss *wrappers.Int64Value `protobuf:"bytes,11,opt,name=vacuum_cost_page_miss,json=vacuumCostPageMiss,proto3" json:"vacuum_cost_page_miss,omitempty"` + VacuumCostPageDirty *wrappers.Int64Value `protobuf:"bytes,12,opt,name=vacuum_cost_page_dirty,json=vacuumCostPageDirty,proto3" json:"vacuum_cost_page_dirty,omitempty"` + VacuumCostLimit *wrappers.Int64Value `protobuf:"bytes,13,opt,name=vacuum_cost_limit,json=vacuumCostLimit,proto3" json:"vacuum_cost_limit,omitempty"` + BgwriterDelay *wrappers.Int64Value `protobuf:"bytes,14,opt,name=bgwriter_delay,json=bgwriterDelay,proto3" json:"bgwriter_delay,omitempty"` + BgwriterLruMaxpages *wrappers.Int64Value `protobuf:"bytes,15,opt,name=bgwriter_lru_maxpages,json=bgwriterLruMaxpages,proto3" json:"bgwriter_lru_maxpages,omitempty"` + BgwriterLruMultiplier *wrappers.DoubleValue `protobuf:"bytes,16,opt,name=bgwriter_lru_multiplier,json=bgwriterLruMultiplier,proto3" json:"bgwriter_lru_multiplier,omitempty"` + BgwriterFlushAfter *wrappers.Int64Value `protobuf:"bytes,17,opt,name=bgwriter_flush_after,json=bgwriterFlushAfter,proto3" json:"bgwriter_flush_after,omitempty"` + BackendFlushAfter *wrappers.Int64Value `protobuf:"bytes,18,opt,name=backend_flush_after,json=backendFlushAfter,proto3" json:"backend_flush_after,omitempty"` + OldSnapshotThreshold *wrappers.Int64Value `protobuf:"bytes,19,opt,name=old_snapshot_threshold,json=oldSnapshotThreshold,proto3" json:"old_snapshot_threshold,omitempty"` + WalLevel PostgresqlConfig11_WalLevel `protobuf:"varint,20,opt,name=wal_level,json=walLevel,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_WalLevel" json:"wal_level,omitempty"` + SynchronousCommit PostgresqlConfig11_SynchronousCommit `protobuf:"varint,21,opt,name=synchronous_commit,json=synchronousCommit,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_SynchronousCommit" json:"synchronous_commit,omitempty"` + CheckpointTimeout *wrappers.Int64Value `protobuf:"bytes,22,opt,name=checkpoint_timeout,json=checkpointTimeout,proto3" json:"checkpoint_timeout,omitempty"` + CheckpointCompletionTarget *wrappers.DoubleValue `protobuf:"bytes,23,opt,name=checkpoint_completion_target,json=checkpointCompletionTarget,proto3" json:"checkpoint_completion_target,omitempty"` + CheckpointFlushAfter *wrappers.Int64Value `protobuf:"bytes,24,opt,name=checkpoint_flush_after,json=checkpointFlushAfter,proto3" json:"checkpoint_flush_after,omitempty"` + MaxWalSize *wrappers.Int64Value `protobuf:"bytes,25,opt,name=max_wal_size,json=maxWalSize,proto3" json:"max_wal_size,omitempty"` + MinWalSize *wrappers.Int64Value `protobuf:"bytes,26,opt,name=min_wal_size,json=minWalSize,proto3" json:"min_wal_size,omitempty"` + MaxStandbyStreamingDelay *wrappers.Int64Value `protobuf:"bytes,27,opt,name=max_standby_streaming_delay,json=maxStandbyStreamingDelay,proto3" json:"max_standby_streaming_delay,omitempty"` + DefaultStatisticsTarget *wrappers.Int64Value `protobuf:"bytes,28,opt,name=default_statistics_target,json=defaultStatisticsTarget,proto3" json:"default_statistics_target,omitempty"` + ConstraintExclusion PostgresqlConfig11_ConstraintExclusion `protobuf:"varint,29,opt,name=constraint_exclusion,json=constraintExclusion,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_ConstraintExclusion" json:"constraint_exclusion,omitempty"` + CursorTupleFraction *wrappers.DoubleValue `protobuf:"bytes,30,opt,name=cursor_tuple_fraction,json=cursorTupleFraction,proto3" json:"cursor_tuple_fraction,omitempty"` + FromCollapseLimit *wrappers.Int64Value `protobuf:"bytes,31,opt,name=from_collapse_limit,json=fromCollapseLimit,proto3" json:"from_collapse_limit,omitempty"` + JoinCollapseLimit *wrappers.Int64Value `protobuf:"bytes,32,opt,name=join_collapse_limit,json=joinCollapseLimit,proto3" json:"join_collapse_limit,omitempty"` + ForceParallelMode PostgresqlConfig11_ForceParallelMode `protobuf:"varint,33,opt,name=force_parallel_mode,json=forceParallelMode,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_ForceParallelMode" json:"force_parallel_mode,omitempty"` + ClientMinMessages PostgresqlConfig11_LogLevel `protobuf:"varint,34,opt,name=client_min_messages,json=clientMinMessages,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_LogLevel" json:"client_min_messages,omitempty"` + LogMinMessages PostgresqlConfig11_LogLevel `protobuf:"varint,35,opt,name=log_min_messages,json=logMinMessages,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_LogLevel" json:"log_min_messages,omitempty"` + LogMinErrorStatement PostgresqlConfig11_LogLevel `protobuf:"varint,36,opt,name=log_min_error_statement,json=logMinErrorStatement,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_LogLevel" json:"log_min_error_statement,omitempty"` + LogMinDurationStatement *wrappers.Int64Value `protobuf:"bytes,37,opt,name=log_min_duration_statement,json=logMinDurationStatement,proto3" json:"log_min_duration_statement,omitempty"` + LogCheckpoints *wrappers.BoolValue `protobuf:"bytes,38,opt,name=log_checkpoints,json=logCheckpoints,proto3" json:"log_checkpoints,omitempty"` + LogConnections *wrappers.BoolValue `protobuf:"bytes,39,opt,name=log_connections,json=logConnections,proto3" json:"log_connections,omitempty"` + LogDisconnections *wrappers.BoolValue `protobuf:"bytes,40,opt,name=log_disconnections,json=logDisconnections,proto3" json:"log_disconnections,omitempty"` + LogDuration *wrappers.BoolValue `protobuf:"bytes,41,opt,name=log_duration,json=logDuration,proto3" json:"log_duration,omitempty"` + LogErrorVerbosity PostgresqlConfig11_LogErrorVerbosity `protobuf:"varint,42,opt,name=log_error_verbosity,json=logErrorVerbosity,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_LogErrorVerbosity" json:"log_error_verbosity,omitempty"` + LogLockWaits *wrappers.BoolValue `protobuf:"bytes,43,opt,name=log_lock_waits,json=logLockWaits,proto3" json:"log_lock_waits,omitempty"` + LogStatement PostgresqlConfig11_LogStatement `protobuf:"varint,44,opt,name=log_statement,json=logStatement,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_LogStatement" json:"log_statement,omitempty"` + LogTempFiles *wrappers.Int64Value `protobuf:"bytes,45,opt,name=log_temp_files,json=logTempFiles,proto3" json:"log_temp_files,omitempty"` + SearchPath string `protobuf:"bytes,46,opt,name=search_path,json=searchPath,proto3" json:"search_path,omitempty"` + RowSecurity *wrappers.BoolValue `protobuf:"bytes,47,opt,name=row_security,json=rowSecurity,proto3" json:"row_security,omitempty"` + DefaultTransactionIsolation PostgresqlConfig11_TransactionIsolation `protobuf:"varint,48,opt,name=default_transaction_isolation,json=defaultTransactionIsolation,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_TransactionIsolation" json:"default_transaction_isolation,omitempty"` + StatementTimeout *wrappers.Int64Value `protobuf:"bytes,49,opt,name=statement_timeout,json=statementTimeout,proto3" json:"statement_timeout,omitempty"` + LockTimeout *wrappers.Int64Value `protobuf:"bytes,50,opt,name=lock_timeout,json=lockTimeout,proto3" json:"lock_timeout,omitempty"` + IdleInTransactionSessionTimeout *wrappers.Int64Value `protobuf:"bytes,51,opt,name=idle_in_transaction_session_timeout,json=idleInTransactionSessionTimeout,proto3" json:"idle_in_transaction_session_timeout,omitempty"` + ByteaOutput PostgresqlConfig11_ByteaOutput `protobuf:"varint,52,opt,name=bytea_output,json=byteaOutput,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_ByteaOutput" json:"bytea_output,omitempty"` + Xmlbinary PostgresqlConfig11_XmlBinary `protobuf:"varint,53,opt,name=xmlbinary,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_XmlBinary" json:"xmlbinary,omitempty"` + Xmloption PostgresqlConfig11_XmlOption `protobuf:"varint,54,opt,name=xmloption,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_XmlOption" json:"xmloption,omitempty"` + GinPendingListLimit *wrappers.Int64Value `protobuf:"bytes,55,opt,name=gin_pending_list_limit,json=ginPendingListLimit,proto3" json:"gin_pending_list_limit,omitempty"` + DeadlockTimeout *wrappers.Int64Value `protobuf:"bytes,56,opt,name=deadlock_timeout,json=deadlockTimeout,proto3" json:"deadlock_timeout,omitempty"` + MaxLocksPerTransaction *wrappers.Int64Value `protobuf:"bytes,57,opt,name=max_locks_per_transaction,json=maxLocksPerTransaction,proto3" json:"max_locks_per_transaction,omitempty"` + MaxPredLocksPerTransaction *wrappers.Int64Value `protobuf:"bytes,58,opt,name=max_pred_locks_per_transaction,json=maxPredLocksPerTransaction,proto3" json:"max_pred_locks_per_transaction,omitempty"` + ArrayNulls *wrappers.BoolValue `protobuf:"bytes,59,opt,name=array_nulls,json=arrayNulls,proto3" json:"array_nulls,omitempty"` + BackslashQuote PostgresqlConfig11_BackslashQuote `protobuf:"varint,60,opt,name=backslash_quote,json=backslashQuote,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_BackslashQuote" json:"backslash_quote,omitempty"` + DefaultWithOids *wrappers.BoolValue `protobuf:"bytes,61,opt,name=default_with_oids,json=defaultWithOids,proto3" json:"default_with_oids,omitempty"` + EscapeStringWarning *wrappers.BoolValue `protobuf:"bytes,62,opt,name=escape_string_warning,json=escapeStringWarning,proto3" json:"escape_string_warning,omitempty"` + LoCompatPrivileges *wrappers.BoolValue `protobuf:"bytes,63,opt,name=lo_compat_privileges,json=loCompatPrivileges,proto3" json:"lo_compat_privileges,omitempty"` + OperatorPrecedenceWarning *wrappers.BoolValue `protobuf:"bytes,64,opt,name=operator_precedence_warning,json=operatorPrecedenceWarning,proto3" json:"operator_precedence_warning,omitempty"` + QuoteAllIdentifiers *wrappers.BoolValue `protobuf:"bytes,65,opt,name=quote_all_identifiers,json=quoteAllIdentifiers,proto3" json:"quote_all_identifiers,omitempty"` + StandardConformingStrings *wrappers.BoolValue `protobuf:"bytes,66,opt,name=standard_conforming_strings,json=standardConformingStrings,proto3" json:"standard_conforming_strings,omitempty"` + SynchronizeSeqscans *wrappers.BoolValue `protobuf:"bytes,67,opt,name=synchronize_seqscans,json=synchronizeSeqscans,proto3" json:"synchronize_seqscans,omitempty"` + TransformNullEquals *wrappers.BoolValue `protobuf:"bytes,68,opt,name=transform_null_equals,json=transformNullEquals,proto3" json:"transform_null_equals,omitempty"` + ExitOnError *wrappers.BoolValue `protobuf:"bytes,69,opt,name=exit_on_error,json=exitOnError,proto3" json:"exit_on_error,omitempty"` + SeqPageCost *wrappers.DoubleValue `protobuf:"bytes,70,opt,name=seq_page_cost,json=seqPageCost,proto3" json:"seq_page_cost,omitempty"` + RandomPageCost *wrappers.DoubleValue `protobuf:"bytes,71,opt,name=random_page_cost,json=randomPageCost,proto3" json:"random_page_cost,omitempty"` + AutovacuumMaxWorkers *wrappers.Int64Value `protobuf:"bytes,72,opt,name=autovacuum_max_workers,json=autovacuumMaxWorkers,proto3" json:"autovacuum_max_workers,omitempty"` + AutovacuumVacuumCostDelay *wrappers.Int64Value `protobuf:"bytes,73,opt,name=autovacuum_vacuum_cost_delay,json=autovacuumVacuumCostDelay,proto3" json:"autovacuum_vacuum_cost_delay,omitempty"` + AutovacuumVacuumCostLimit *wrappers.Int64Value `protobuf:"bytes,74,opt,name=autovacuum_vacuum_cost_limit,json=autovacuumVacuumCostLimit,proto3" json:"autovacuum_vacuum_cost_limit,omitempty"` + AutovacuumNaptime *wrappers.Int64Value `protobuf:"bytes,75,opt,name=autovacuum_naptime,json=autovacuumNaptime,proto3" json:"autovacuum_naptime,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PostgresqlConfig11) Reset() { *m = PostgresqlConfig11{} } +func (m *PostgresqlConfig11) String() string { return proto.CompactTextString(m) } +func (*PostgresqlConfig11) ProtoMessage() {} +func (*PostgresqlConfig11) Descriptor() ([]byte, []int) { + return fileDescriptor_postgresql11_b03c44c8761ca1c0, []int{0} +} +func (m *PostgresqlConfig11) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PostgresqlConfig11.Unmarshal(m, b) +} +func (m *PostgresqlConfig11) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PostgresqlConfig11.Marshal(b, m, deterministic) +} +func (dst *PostgresqlConfig11) XXX_Merge(src proto.Message) { + xxx_messageInfo_PostgresqlConfig11.Merge(dst, src) +} +func (m *PostgresqlConfig11) XXX_Size() int { + return xxx_messageInfo_PostgresqlConfig11.Size(m) +} +func (m *PostgresqlConfig11) XXX_DiscardUnknown() { + xxx_messageInfo_PostgresqlConfig11.DiscardUnknown(m) +} + +var xxx_messageInfo_PostgresqlConfig11 proto.InternalMessageInfo + +func (m *PostgresqlConfig11) GetMaxConnections() *wrappers.Int64Value { + if m != nil { + return m.MaxConnections + } + return nil +} + +func (m *PostgresqlConfig11) GetSharedBuffers() *wrappers.Int64Value { + if m != nil { + return m.SharedBuffers + } + return nil +} + +func (m *PostgresqlConfig11) GetTempBuffers() *wrappers.Int64Value { + if m != nil { + return m.TempBuffers + } + return nil +} + +func (m *PostgresqlConfig11) GetMaxPreparedTransactions() *wrappers.Int64Value { + if m != nil { + return m.MaxPreparedTransactions + } + return nil +} + +func (m *PostgresqlConfig11) GetWorkMem() *wrappers.Int64Value { + if m != nil { + return m.WorkMem + } + return nil +} + +func (m *PostgresqlConfig11) GetMaintenanceWorkMem() *wrappers.Int64Value { + if m != nil { + return m.MaintenanceWorkMem + } + return nil +} + +func (m *PostgresqlConfig11) GetAutovacuumWorkMem() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumWorkMem + } + return nil +} + +func (m *PostgresqlConfig11) GetTempFileLimit() *wrappers.Int64Value { + if m != nil { + return m.TempFileLimit + } + return nil +} + +func (m *PostgresqlConfig11) GetVacuumCostDelay() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostDelay + } + return nil +} + +func (m *PostgresqlConfig11) GetVacuumCostPageHit() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostPageHit + } + return nil +} + +func (m *PostgresqlConfig11) GetVacuumCostPageMiss() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostPageMiss + } + return nil +} + +func (m *PostgresqlConfig11) GetVacuumCostPageDirty() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostPageDirty + } + return nil +} + +func (m *PostgresqlConfig11) GetVacuumCostLimit() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostLimit + } + return nil +} + +func (m *PostgresqlConfig11) GetBgwriterDelay() *wrappers.Int64Value { + if m != nil { + return m.BgwriterDelay + } + return nil +} + +func (m *PostgresqlConfig11) GetBgwriterLruMaxpages() *wrappers.Int64Value { + if m != nil { + return m.BgwriterLruMaxpages + } + return nil +} + +func (m *PostgresqlConfig11) GetBgwriterLruMultiplier() *wrappers.DoubleValue { + if m != nil { + return m.BgwriterLruMultiplier + } + return nil +} + +func (m *PostgresqlConfig11) GetBgwriterFlushAfter() *wrappers.Int64Value { + if m != nil { + return m.BgwriterFlushAfter + } + return nil +} + +func (m *PostgresqlConfig11) GetBackendFlushAfter() *wrappers.Int64Value { + if m != nil { + return m.BackendFlushAfter + } + return nil +} + +func (m *PostgresqlConfig11) GetOldSnapshotThreshold() *wrappers.Int64Value { + if m != nil { + return m.OldSnapshotThreshold + } + return nil +} + +func (m *PostgresqlConfig11) GetWalLevel() PostgresqlConfig11_WalLevel { + if m != nil { + return m.WalLevel + } + return PostgresqlConfig11_WAL_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlConfig11) GetSynchronousCommit() PostgresqlConfig11_SynchronousCommit { + if m != nil { + return m.SynchronousCommit + } + return PostgresqlConfig11_SYNCHRONOUS_COMMIT_UNSPECIFIED +} + +func (m *PostgresqlConfig11) GetCheckpointTimeout() *wrappers.Int64Value { + if m != nil { + return m.CheckpointTimeout + } + return nil +} + +func (m *PostgresqlConfig11) GetCheckpointCompletionTarget() *wrappers.DoubleValue { + if m != nil { + return m.CheckpointCompletionTarget + } + return nil +} + +func (m *PostgresqlConfig11) GetCheckpointFlushAfter() *wrappers.Int64Value { + if m != nil { + return m.CheckpointFlushAfter + } + return nil +} + +func (m *PostgresqlConfig11) GetMaxWalSize() *wrappers.Int64Value { + if m != nil { + return m.MaxWalSize + } + return nil +} + +func (m *PostgresqlConfig11) GetMinWalSize() *wrappers.Int64Value { + if m != nil { + return m.MinWalSize + } + return nil +} + +func (m *PostgresqlConfig11) GetMaxStandbyStreamingDelay() *wrappers.Int64Value { + if m != nil { + return m.MaxStandbyStreamingDelay + } + return nil +} + +func (m *PostgresqlConfig11) GetDefaultStatisticsTarget() *wrappers.Int64Value { + if m != nil { + return m.DefaultStatisticsTarget + } + return nil +} + +func (m *PostgresqlConfig11) GetConstraintExclusion() PostgresqlConfig11_ConstraintExclusion { + if m != nil { + return m.ConstraintExclusion + } + return PostgresqlConfig11_CONSTRAINT_EXCLUSION_UNSPECIFIED +} + +func (m *PostgresqlConfig11) GetCursorTupleFraction() *wrappers.DoubleValue { + if m != nil { + return m.CursorTupleFraction + } + return nil +} + +func (m *PostgresqlConfig11) GetFromCollapseLimit() *wrappers.Int64Value { + if m != nil { + return m.FromCollapseLimit + } + return nil +} + +func (m *PostgresqlConfig11) GetJoinCollapseLimit() *wrappers.Int64Value { + if m != nil { + return m.JoinCollapseLimit + } + return nil +} + +func (m *PostgresqlConfig11) GetForceParallelMode() PostgresqlConfig11_ForceParallelMode { + if m != nil { + return m.ForceParallelMode + } + return PostgresqlConfig11_FORCE_PARALLEL_MODE_UNSPECIFIED +} + +func (m *PostgresqlConfig11) GetClientMinMessages() PostgresqlConfig11_LogLevel { + if m != nil { + return m.ClientMinMessages + } + return PostgresqlConfig11_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlConfig11) GetLogMinMessages() PostgresqlConfig11_LogLevel { + if m != nil { + return m.LogMinMessages + } + return PostgresqlConfig11_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlConfig11) GetLogMinErrorStatement() PostgresqlConfig11_LogLevel { + if m != nil { + return m.LogMinErrorStatement + } + return PostgresqlConfig11_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlConfig11) GetLogMinDurationStatement() *wrappers.Int64Value { + if m != nil { + return m.LogMinDurationStatement + } + return nil +} + +func (m *PostgresqlConfig11) GetLogCheckpoints() *wrappers.BoolValue { + if m != nil { + return m.LogCheckpoints + } + return nil +} + +func (m *PostgresqlConfig11) GetLogConnections() *wrappers.BoolValue { + if m != nil { + return m.LogConnections + } + return nil +} + +func (m *PostgresqlConfig11) GetLogDisconnections() *wrappers.BoolValue { + if m != nil { + return m.LogDisconnections + } + return nil +} + +func (m *PostgresqlConfig11) GetLogDuration() *wrappers.BoolValue { + if m != nil { + return m.LogDuration + } + return nil +} + +func (m *PostgresqlConfig11) GetLogErrorVerbosity() PostgresqlConfig11_LogErrorVerbosity { + if m != nil { + return m.LogErrorVerbosity + } + return PostgresqlConfig11_LOG_ERROR_VERBOSITY_UNSPECIFIED +} + +func (m *PostgresqlConfig11) GetLogLockWaits() *wrappers.BoolValue { + if m != nil { + return m.LogLockWaits + } + return nil +} + +func (m *PostgresqlConfig11) GetLogStatement() PostgresqlConfig11_LogStatement { + if m != nil { + return m.LogStatement + } + return PostgresqlConfig11_LOG_STATEMENT_UNSPECIFIED +} + +func (m *PostgresqlConfig11) GetLogTempFiles() *wrappers.Int64Value { + if m != nil { + return m.LogTempFiles + } + return nil +} + +func (m *PostgresqlConfig11) GetSearchPath() string { + if m != nil { + return m.SearchPath + } + return "" +} + +func (m *PostgresqlConfig11) GetRowSecurity() *wrappers.BoolValue { + if m != nil { + return m.RowSecurity + } + return nil +} + +func (m *PostgresqlConfig11) GetDefaultTransactionIsolation() PostgresqlConfig11_TransactionIsolation { + if m != nil { + return m.DefaultTransactionIsolation + } + return PostgresqlConfig11_TRANSACTION_ISOLATION_UNSPECIFIED +} + +func (m *PostgresqlConfig11) GetStatementTimeout() *wrappers.Int64Value { + if m != nil { + return m.StatementTimeout + } + return nil +} + +func (m *PostgresqlConfig11) GetLockTimeout() *wrappers.Int64Value { + if m != nil { + return m.LockTimeout + } + return nil +} + +func (m *PostgresqlConfig11) GetIdleInTransactionSessionTimeout() *wrappers.Int64Value { + if m != nil { + return m.IdleInTransactionSessionTimeout + } + return nil +} + +func (m *PostgresqlConfig11) GetByteaOutput() PostgresqlConfig11_ByteaOutput { + if m != nil { + return m.ByteaOutput + } + return PostgresqlConfig11_BYTEA_OUTPUT_UNSPECIFIED +} + +func (m *PostgresqlConfig11) GetXmlbinary() PostgresqlConfig11_XmlBinary { + if m != nil { + return m.Xmlbinary + } + return PostgresqlConfig11_XML_BINARY_UNSPECIFIED +} + +func (m *PostgresqlConfig11) GetXmloption() PostgresqlConfig11_XmlOption { + if m != nil { + return m.Xmloption + } + return PostgresqlConfig11_XML_OPTION_UNSPECIFIED +} + +func (m *PostgresqlConfig11) GetGinPendingListLimit() *wrappers.Int64Value { + if m != nil { + return m.GinPendingListLimit + } + return nil +} + +func (m *PostgresqlConfig11) GetDeadlockTimeout() *wrappers.Int64Value { + if m != nil { + return m.DeadlockTimeout + } + return nil +} + +func (m *PostgresqlConfig11) GetMaxLocksPerTransaction() *wrappers.Int64Value { + if m != nil { + return m.MaxLocksPerTransaction + } + return nil +} + +func (m *PostgresqlConfig11) GetMaxPredLocksPerTransaction() *wrappers.Int64Value { + if m != nil { + return m.MaxPredLocksPerTransaction + } + return nil +} + +func (m *PostgresqlConfig11) GetArrayNulls() *wrappers.BoolValue { + if m != nil { + return m.ArrayNulls + } + return nil +} + +func (m *PostgresqlConfig11) GetBackslashQuote() PostgresqlConfig11_BackslashQuote { + if m != nil { + return m.BackslashQuote + } + return PostgresqlConfig11_BACKSLASH_QUOTE_UNSPECIFIED +} + +func (m *PostgresqlConfig11) GetDefaultWithOids() *wrappers.BoolValue { + if m != nil { + return m.DefaultWithOids + } + return nil +} + +func (m *PostgresqlConfig11) GetEscapeStringWarning() *wrappers.BoolValue { + if m != nil { + return m.EscapeStringWarning + } + return nil +} + +func (m *PostgresqlConfig11) GetLoCompatPrivileges() *wrappers.BoolValue { + if m != nil { + return m.LoCompatPrivileges + } + return nil +} + +func (m *PostgresqlConfig11) GetOperatorPrecedenceWarning() *wrappers.BoolValue { + if m != nil { + return m.OperatorPrecedenceWarning + } + return nil +} + +func (m *PostgresqlConfig11) GetQuoteAllIdentifiers() *wrappers.BoolValue { + if m != nil { + return m.QuoteAllIdentifiers + } + return nil +} + +func (m *PostgresqlConfig11) GetStandardConformingStrings() *wrappers.BoolValue { + if m != nil { + return m.StandardConformingStrings + } + return nil +} + +func (m *PostgresqlConfig11) GetSynchronizeSeqscans() *wrappers.BoolValue { + if m != nil { + return m.SynchronizeSeqscans + } + return nil +} + +func (m *PostgresqlConfig11) GetTransformNullEquals() *wrappers.BoolValue { + if m != nil { + return m.TransformNullEquals + } + return nil +} + +func (m *PostgresqlConfig11) GetExitOnError() *wrappers.BoolValue { + if m != nil { + return m.ExitOnError + } + return nil +} + +func (m *PostgresqlConfig11) GetSeqPageCost() *wrappers.DoubleValue { + if m != nil { + return m.SeqPageCost + } + return nil +} + +func (m *PostgresqlConfig11) GetRandomPageCost() *wrappers.DoubleValue { + if m != nil { + return m.RandomPageCost + } + return nil +} + +func (m *PostgresqlConfig11) GetAutovacuumMaxWorkers() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumMaxWorkers + } + return nil +} + +func (m *PostgresqlConfig11) GetAutovacuumVacuumCostDelay() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumVacuumCostDelay + } + return nil +} + +func (m *PostgresqlConfig11) GetAutovacuumVacuumCostLimit() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumVacuumCostLimit + } + return nil +} + +func (m *PostgresqlConfig11) GetAutovacuumNaptime() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumNaptime + } + return nil +} + +type PostgresqlConfigSet11 struct { + // Effective settings for a PostgreSQL 11 cluster (a combination of settings defined + // in [user_config] and [default_config]). + EffectiveConfig *PostgresqlConfig11 `protobuf:"bytes,1,opt,name=effective_config,json=effectiveConfig,proto3" json:"effective_config,omitempty"` + // User-defined settings for a PostgreSQL 11 cluster. + UserConfig *PostgresqlConfig11 `protobuf:"bytes,2,opt,name=user_config,json=userConfig,proto3" json:"user_config,omitempty"` + // Default configuration for a PostgreSQL 11 cluster. + DefaultConfig *PostgresqlConfig11 `protobuf:"bytes,3,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PostgresqlConfigSet11) Reset() { *m = PostgresqlConfigSet11{} } +func (m *PostgresqlConfigSet11) String() string { return proto.CompactTextString(m) } +func (*PostgresqlConfigSet11) ProtoMessage() {} +func (*PostgresqlConfigSet11) Descriptor() ([]byte, []int) { + return fileDescriptor_postgresql11_b03c44c8761ca1c0, []int{1} +} +func (m *PostgresqlConfigSet11) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PostgresqlConfigSet11.Unmarshal(m, b) +} +func (m *PostgresqlConfigSet11) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PostgresqlConfigSet11.Marshal(b, m, deterministic) +} +func (dst *PostgresqlConfigSet11) XXX_Merge(src proto.Message) { + xxx_messageInfo_PostgresqlConfigSet11.Merge(dst, src) +} +func (m *PostgresqlConfigSet11) XXX_Size() int { + return xxx_messageInfo_PostgresqlConfigSet11.Size(m) +} +func (m *PostgresqlConfigSet11) XXX_DiscardUnknown() { + xxx_messageInfo_PostgresqlConfigSet11.DiscardUnknown(m) +} + +var xxx_messageInfo_PostgresqlConfigSet11 proto.InternalMessageInfo + +func (m *PostgresqlConfigSet11) GetEffectiveConfig() *PostgresqlConfig11 { + if m != nil { + return m.EffectiveConfig + } + return nil +} + +func (m *PostgresqlConfigSet11) GetUserConfig() *PostgresqlConfig11 { + if m != nil { + return m.UserConfig + } + return nil +} + +func (m *PostgresqlConfigSet11) GetDefaultConfig() *PostgresqlConfig11 { + if m != nil { + return m.DefaultConfig + } + return nil +} + +func init() { + proto.RegisterType((*PostgresqlConfig11)(nil), "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11") + proto.RegisterType((*PostgresqlConfigSet11)(nil), "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet11") + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_WalLevel", PostgresqlConfig11_WalLevel_name, PostgresqlConfig11_WalLevel_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_SynchronousCommit", PostgresqlConfig11_SynchronousCommit_name, PostgresqlConfig11_SynchronousCommit_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_ConstraintExclusion", PostgresqlConfig11_ConstraintExclusion_name, PostgresqlConfig11_ConstraintExclusion_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_ForceParallelMode", PostgresqlConfig11_ForceParallelMode_name, PostgresqlConfig11_ForceParallelMode_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_LogLevel", PostgresqlConfig11_LogLevel_name, PostgresqlConfig11_LogLevel_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_LogErrorVerbosity", PostgresqlConfig11_LogErrorVerbosity_name, PostgresqlConfig11_LogErrorVerbosity_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_LogStatement", PostgresqlConfig11_LogStatement_name, PostgresqlConfig11_LogStatement_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_TransactionIsolation", PostgresqlConfig11_TransactionIsolation_name, PostgresqlConfig11_TransactionIsolation_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_ByteaOutput", PostgresqlConfig11_ByteaOutput_name, PostgresqlConfig11_ByteaOutput_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_XmlBinary", PostgresqlConfig11_XmlBinary_name, PostgresqlConfig11_XmlBinary_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_XmlOption", PostgresqlConfig11_XmlOption_name, PostgresqlConfig11_XmlOption_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig11_BackslashQuote", PostgresqlConfig11_BackslashQuote_name, PostgresqlConfig11_BackslashQuote_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/config/postgresql11.proto", fileDescriptor_postgresql11_b03c44c8761ca1c0) +} + +var fileDescriptor_postgresql11_b03c44c8761ca1c0 = []byte{ + // 2855 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x9a, 0xdb, 0x76, 0xdb, 0xc6, + 0xd5, 0xc7, 0x3f, 0x4a, 0x8e, 0x23, 0x8f, 0x2c, 0x09, 0x1c, 0x4a, 0x32, 0x24, 0x1f, 0xc3, 0xc4, + 0xf9, 0x9c, 0xb4, 0x3a, 0xd0, 0x56, 0x1c, 0xa7, 0x69, 0xdc, 0x80, 0x20, 0x28, 0xb3, 0x06, 0x09, + 0x06, 0x80, 0x2c, 0xdb, 0x59, 0x09, 0x3a, 0x04, 0x86, 0x24, 0x22, 0x00, 0x43, 0x63, 0x40, 0x1d, + 0xdc, 0x9b, 0x5e, 0xf7, 0xae, 0xb9, 0x6b, 0x5f, 0xa5, 0x0f, 0xe0, 0x37, 0xe8, 0x23, 0xf4, 0x21, + 0x7c, 0xd5, 0x35, 0x38, 0x10, 0xa0, 0x48, 0x07, 0x8c, 0x95, 0x3b, 0x69, 0xcf, 0xfe, 0xff, 0xf6, + 0x1c, 0xf6, 0xcc, 0x60, 0xcf, 0x22, 0x78, 0x74, 0x86, 0x3c, 0x0b, 0x9f, 0xee, 0x98, 0x0e, 0x19, + 0x5a, 0x3b, 0xae, 0xd5, 0xd9, 0x19, 0x10, 0x1a, 0xf4, 0x7c, 0x4c, 0x5f, 0x39, 0x3b, 0xc7, 0x95, + 0x1d, 0x93, 0x78, 0x5d, 0xbb, 0x97, 0x31, 0x56, 0x2a, 0xdb, 0x03, 0x9f, 0x04, 0x04, 0xde, 0x8d, + 0x94, 0xdb, 0xa1, 0x72, 0xdb, 0xb5, 0x3a, 0xdb, 0xa9, 0xd3, 0xf6, 0x71, 0x65, 0x3b, 0x52, 0x6e, + 0xde, 0xea, 0x11, 0xd2, 0x73, 0xf0, 0x4e, 0x28, 0xea, 0x0c, 0xbb, 0x3b, 0x27, 0x3e, 0x1a, 0x0c, + 0xb0, 0x4f, 0x23, 0xcc, 0xe6, 0xcd, 0xb1, 0x0e, 0x1c, 0x23, 0xc7, 0xb6, 0x50, 0x60, 0x13, 0x2f, + 0x6a, 0x2e, 0xff, 0x43, 0x00, 0xb0, 0x3d, 0xe2, 0x8a, 0x21, 0xb3, 0x52, 0x81, 0x35, 0xb0, 0xe2, + 0xa2, 0x53, 0xc3, 0x24, 0x9e, 0x87, 0x4d, 0xe6, 0x4e, 0xf9, 0xc2, 0x9d, 0xc2, 0xbd, 0xc5, 0xfb, + 0xd7, 0xb7, 0xa3, 0x78, 0xdb, 0x49, 0xbc, 0xed, 0x86, 0x17, 0x3c, 0xdc, 0x7b, 0x86, 0x9c, 0x21, + 0x56, 0x97, 0x5d, 0x74, 0x2a, 0xa6, 0x12, 0x58, 0x05, 0xcb, 0xb4, 0x8f, 0x7c, 0x6c, 0x19, 0x9d, + 0x61, 0xb7, 0x8b, 0x7d, 0xca, 0xcf, 0xe5, 0x43, 0x96, 0x22, 0x49, 0x35, 0x52, 0xc0, 0xc7, 0xe0, + 0x6a, 0x80, 0xdd, 0xc1, 0x88, 0x30, 0x9f, 0x4f, 0x58, 0x64, 0x82, 0x44, 0x7f, 0x08, 0x36, 0xd8, + 0x48, 0x06, 0x3e, 0x1e, 0x84, 0x3d, 0x09, 0x7c, 0xe4, 0x51, 0x14, 0x8f, 0xe9, 0x52, 0x3e, 0xec, + 0x9a, 0x8b, 0x4e, 0xdb, 0xb1, 0x58, 0xcf, 0x68, 0xe1, 0x43, 0xb0, 0x70, 0x42, 0xfc, 0x23, 0xc3, + 0xc5, 0x2e, 0xff, 0x41, 0x3e, 0xe7, 0x43, 0xe6, 0xdc, 0xc4, 0x2e, 0x6c, 0x82, 0x55, 0x17, 0xd9, + 0x5e, 0x80, 0x3d, 0xe4, 0x99, 0xd8, 0x18, 0x31, 0x2e, 0xe7, 0x33, 0x60, 0x46, 0x78, 0x18, 0xe3, + 0x9e, 0x82, 0x12, 0x1a, 0x06, 0xe4, 0x18, 0x99, 0xc3, 0xa1, 0x9b, 0xd2, 0x3e, 0xcc, 0xa7, 0x15, + 0x53, 0x5d, 0x02, 0x13, 0xc1, 0x4a, 0x38, 0xd9, 0x5d, 0xdb, 0xc1, 0x86, 0x63, 0xbb, 0x76, 0xc0, + 0x2f, 0xcc, 0xb0, 0x62, 0x4c, 0x53, 0xb7, 0x1d, 0x2c, 0x33, 0x05, 0xdc, 0x07, 0xc5, 0xb8, 0x37, + 0x26, 0xa1, 0x81, 0x61, 0x61, 0x07, 0x9d, 0xf1, 0x57, 0xf2, 0x31, 0x2b, 0x91, 0x4a, 0x24, 0x34, + 0xa8, 0x31, 0x0d, 0x94, 0xc1, 0x6a, 0x16, 0x34, 0x40, 0x3d, 0x6c, 0xf4, 0xed, 0x80, 0x07, 0x33, + 0x8c, 0x2d, 0x65, 0xb5, 0x51, 0x0f, 0x3f, 0xb1, 0x03, 0xd8, 0x02, 0x6b, 0x13, 0x34, 0xd7, 0xa6, + 0x94, 0x5f, 0x9c, 0x61, 0xe2, 0xc7, 0x71, 0x4d, 0x9b, 0x52, 0xd8, 0x06, 0xeb, 0x13, 0x3c, 0xcb, + 0xf6, 0x83, 0x33, 0xfe, 0x6a, 0x3e, 0xb0, 0x34, 0x0e, 0xac, 0x31, 0xdd, 0xf9, 0x89, 0x8b, 0xe6, + 0x7f, 0xe9, 0x57, 0x4d, 0x5c, 0xb4, 0x02, 0x6d, 0xb0, 0xdc, 0xe9, 0x9d, 0xf8, 0x76, 0x80, 0xfd, + 0x78, 0xfa, 0x97, 0x73, 0x29, 0xd5, 0xab, 0x6f, 0xdf, 0x54, 0x16, 0x2a, 0xbb, 0x5b, 0x95, 0xdd, + 0xdd, 0xdd, 0x5d, 0x75, 0x29, 0x01, 0x44, 0x4b, 0xa1, 0x80, 0xb5, 0x11, 0xd1, 0xf1, 0x87, 0x86, + 0x8b, 0x4e, 0xd9, 0x80, 0x29, 0xbf, 0x32, 0xc3, 0x58, 0x13, 0xa5, 0xec, 0x0f, 0x9b, 0xb1, 0x0e, + 0xea, 0xe0, 0xda, 0x38, 0x70, 0xe8, 0x04, 0xf6, 0xc0, 0xb1, 0xb1, 0xcf, 0x73, 0x21, 0xf2, 0xc6, + 0x04, 0xb2, 0x46, 0x86, 0x1d, 0x07, 0x47, 0xcc, 0xb5, 0x2c, 0x73, 0x24, 0x85, 0x2f, 0xc0, 0xea, + 0x88, 0xda, 0x75, 0x86, 0xb4, 0x6f, 0xa0, 0x6e, 0x80, 0x7d, 0xbe, 0x98, 0x3f, 0x7c, 0xf0, 0xf6, + 0x4d, 0xe5, 0xf2, 0xee, 0xd6, 0xfd, 0xdd, 0xbd, 0x47, 0x2a, 0x4c, 0x20, 0x75, 0xc6, 0x10, 0x18, + 0x02, 0x1e, 0x82, 0x52, 0x07, 0x99, 0x47, 0xd8, 0xb3, 0xc6, 0xc8, 0xf0, 0xd7, 0x91, 0x8b, 0x31, + 0x23, 0x03, 0xee, 0x80, 0x75, 0xe2, 0x58, 0x06, 0xf5, 0xd0, 0x80, 0xf6, 0x49, 0x60, 0x04, 0x7d, + 0x1f, 0xd3, 0x3e, 0x71, 0x2c, 0xbe, 0x94, 0xcf, 0x5e, 0x79, 0xfb, 0xa6, 0xb2, 0xb8, 0x55, 0xd9, + 0x7a, 0xf4, 0x70, 0x6f, 0x37, 0x5c, 0xb7, 0x55, 0xe2, 0x58, 0x5a, 0x8c, 0xd2, 0x13, 0x12, 0x34, + 0xc0, 0x95, 0x13, 0xe4, 0x18, 0x0e, 0x3e, 0xc6, 0x0e, 0xbf, 0x7a, 0xa7, 0x70, 0x6f, 0xf9, 0x7e, + 0x75, 0x7b, 0xa6, 0xfb, 0x65, 0x7b, 0xf2, 0x72, 0xd8, 0x3e, 0x44, 0x8e, 0xcc, 0x48, 0xea, 0xc2, + 0x49, 0xfc, 0x17, 0x7c, 0x0d, 0x20, 0x3d, 0xf3, 0xcc, 0xbe, 0x4f, 0x3c, 0x32, 0xa4, 0x86, 0x49, + 0x5c, 0x96, 0xbb, 0x6b, 0x61, 0xa4, 0xa7, 0xef, 0x1f, 0x49, 0x4b, 0x99, 0x62, 0x88, 0x54, 0x8b, + 0xf4, 0xbc, 0x09, 0xfe, 0x08, 0xa0, 0xd9, 0xc7, 0xe6, 0xd1, 0x80, 0xd8, 0x5e, 0x60, 0x04, 0xb6, + 0x8b, 0xc9, 0x30, 0xe0, 0xd7, 0xf3, 0x27, 0x0f, 0xbe, 0x7d, 0x53, 0x59, 0x7e, 0xc0, 0xa6, 0x2d, + 0x9d, 0xbf, 0x62, 0x8a, 0xd2, 0x23, 0x12, 0xfc, 0x11, 0xdc, 0xc8, 0xf0, 0x4d, 0xe2, 0x0e, 0x1c, + 0xcc, 0xae, 0x00, 0x23, 0x40, 0x7e, 0x0f, 0x07, 0xfc, 0xb5, 0x19, 0xf2, 0x75, 0x33, 0x25, 0x88, + 0x23, 0x80, 0x1e, 0xea, 0xe1, 0x0f, 0x60, 0x3d, 0xc3, 0xcf, 0x26, 0x17, 0xff, 0xeb, 0x92, 0x6b, + 0x35, 0xc5, 0x64, 0xf2, 0xeb, 0x1b, 0x70, 0x95, 0x5d, 0x80, 0x6c, 0xfd, 0xa9, 0xfd, 0x1a, 0xf3, + 0x1b, 0xf9, 0x3b, 0x16, 0xb8, 0xe8, 0xf4, 0x10, 0x39, 0x9a, 0xfd, 0x1a, 0x87, 0x72, 0xdb, 0x4b, + 0xe5, 0x9b, 0xb3, 0xc8, 0x6d, 0x2f, 0x91, 0xbf, 0x04, 0xd7, 0x59, 0x74, 0x1a, 0x20, 0xcf, 0xea, + 0x9c, 0x19, 0x34, 0xf0, 0x31, 0x72, 0x6d, 0xaf, 0x17, 0x9f, 0x4b, 0xd7, 0xf3, 0x69, 0xbc, 0x8b, + 0x4e, 0xb5, 0x48, 0xae, 0x25, 0xea, 0xe8, 0x50, 0x3a, 0x04, 0x1b, 0x16, 0xee, 0xa2, 0xa1, 0x13, + 0x30, 0x7e, 0x60, 0xd3, 0xc0, 0x36, 0x69, 0xb2, 0x2a, 0x37, 0x66, 0xb8, 0xda, 0x63, 0xb5, 0x36, + 0x12, 0xc7, 0x2b, 0xf2, 0xb7, 0x02, 0x58, 0x35, 0x89, 0x47, 0x03, 0x9f, 0xdd, 0xb7, 0x06, 0x3e, + 0x35, 0x9d, 0x21, 0xb5, 0x89, 0xc7, 0xdf, 0x0c, 0x13, 0xba, 0xf9, 0xfe, 0x09, 0x2d, 0x8e, 0xa8, + 0x52, 0x02, 0x55, 0x4b, 0xe6, 0xa4, 0x11, 0xb6, 0xc1, 0x9a, 0x39, 0xf4, 0x29, 0xf1, 0x8d, 0x60, + 0x38, 0x70, 0xb0, 0xd1, 0xf5, 0xa3, 0xef, 0x0e, 0xfe, 0xd6, 0x0c, 0xd9, 0x56, 0x8a, 0xa4, 0x3a, + 0x53, 0xd6, 0x63, 0x21, 0xfc, 0x01, 0x94, 0xba, 0x3e, 0x61, 0x77, 0x8b, 0xe3, 0xa0, 0x01, 0x4d, + 0xee, 0xf7, 0xdb, 0xf9, 0x39, 0xc6, 0xbd, 0x7d, 0x53, 0xb9, 0x5a, 0xd9, 0xba, 0x5f, 0xd9, 0xfb, + 0x72, 0xef, 0xd1, 0x83, 0x87, 0x7b, 0x5f, 0xaa, 0x45, 0x46, 0x12, 0x63, 0x50, 0x74, 0xe7, 0xfc, + 0x00, 0x4a, 0x3f, 0x11, 0xdb, 0x3b, 0x8f, 0xbf, 0xf3, 0x5e, 0x78, 0x46, 0x1a, 0xc7, 0xff, 0x15, + 0x94, 0xba, 0xc4, 0x37, 0xb1, 0x31, 0x40, 0x3e, 0x72, 0x1c, 0xec, 0x18, 0x2e, 0xb1, 0x30, 0xff, + 0xd1, 0x45, 0x4f, 0x98, 0x3a, 0x83, 0xb6, 0x63, 0x66, 0x93, 0x58, 0x58, 0x2d, 0x76, 0xcf, 0x9b, + 0xa0, 0x0f, 0x4a, 0xa6, 0x63, 0x63, 0x2f, 0x30, 0xd8, 0x56, 0x70, 0x31, 0xa5, 0xe1, 0xdd, 0x57, + 0xbe, 0xe8, 0x41, 0x2a, 0x93, 0x5e, 0x74, 0x90, 0x16, 0x23, 0x7c, 0xd3, 0xf6, 0x9a, 0x31, 0x1c, + 0x3a, 0x80, 0x73, 0x48, 0x6f, 0x3c, 0xe0, 0xc7, 0xbf, 0x59, 0xc0, 0x65, 0x87, 0xf4, 0xb2, 0xd1, + 0xce, 0xc0, 0xb5, 0x24, 0x1a, 0xf6, 0x7d, 0xe2, 0x87, 0x1b, 0x0a, 0xbb, 0xd8, 0x0b, 0xf8, 0x4f, + 0x7e, 0xb3, 0xa0, 0xab, 0x51, 0x50, 0x89, 0x05, 0xd0, 0x12, 0x3e, 0x7c, 0x0e, 0x36, 0x93, 0xd0, + 0xd6, 0xd0, 0x0f, 0x6b, 0x93, 0x4c, 0xf4, 0xbb, 0x33, 0x6c, 0xe3, 0x08, 0x5b, 0x8b, 0xc5, 0x29, + 0x59, 0x04, 0x2b, 0x8c, 0x9c, 0x9e, 0x8a, 0x94, 0xff, 0x34, 0xc4, 0x6d, 0x4e, 0xe0, 0xaa, 0x84, + 0x38, 0x71, 0x0d, 0xe3, 0x90, 0x9e, 0x98, 0x2a, 0x46, 0x90, 0x4c, 0x25, 0xf4, 0xff, 0xb3, 0x41, + 0x32, 0x85, 0x50, 0x03, 0x40, 0x06, 0xb1, 0x6c, 0x9a, 0xe5, 0xdc, 0xcb, 0xe5, 0x14, 0x1d, 0xd2, + 0xab, 0x8d, 0x89, 0xd8, 0x79, 0x1c, 0xa2, 0xe2, 0xd1, 0xf2, 0x9f, 0xe5, 0x42, 0x16, 0x19, 0x24, + 0x76, 0x67, 0xfb, 0x88, 0xc9, 0xa3, 0x45, 0x3e, 0xc6, 0x7e, 0x87, 0x50, 0x3b, 0x38, 0xe3, 0x3f, + 0xbf, 0xe8, 0x3e, 0x92, 0x49, 0x2f, 0x5c, 0xd7, 0x67, 0x09, 0x32, 0xec, 0xfb, 0xb8, 0x09, 0x7e, + 0x0b, 0xd8, 0xc4, 0x18, 0x0e, 0x31, 0x8f, 0x8c, 0x13, 0x64, 0x07, 0x94, 0xff, 0x5d, 0x6e, 0xef, + 0xd9, 0x68, 0x65, 0x62, 0x1e, 0x1d, 0x32, 0x7f, 0x78, 0x04, 0x96, 0x18, 0x21, 0xcd, 0x8f, 0xdf, + 0x87, 0x1d, 0xaf, 0x5f, 0xa8, 0xe3, 0xa3, 0x8c, 0x09, 0x83, 0xa5, 0xf9, 0x23, 0x44, 0xdd, 0x1d, + 0x55, 0x44, 0x94, 0xdf, 0xca, 0xcf, 0x46, 0x86, 0xd0, 0xe3, 0x7a, 0x88, 0xc2, 0xdb, 0x60, 0x91, + 0x62, 0xe4, 0x9b, 0x7d, 0x63, 0x80, 0x82, 0x3e, 0xbf, 0x7d, 0xa7, 0x70, 0xef, 0x8a, 0x0a, 0x22, + 0x53, 0x1b, 0x05, 0x7d, 0xb6, 0x9c, 0x3e, 0x39, 0x31, 0x28, 0x36, 0x87, 0x3e, 0x5b, 0x88, 0x9d, + 0xfc, 0xe5, 0xf4, 0xc9, 0x89, 0x16, 0xbb, 0xc3, 0x9f, 0x0b, 0xe0, 0x66, 0x72, 0x07, 0x66, 0x2a, + 0x5b, 0xc3, 0xa6, 0xc4, 0x89, 0xf2, 0x63, 0x37, 0x9c, 0xa0, 0xd6, 0xfb, 0x4f, 0x50, 0xa6, 0xe8, + 0x6d, 0x24, 0x54, 0xf5, 0x7a, 0x1c, 0x74, 0x5a, 0x23, 0x7c, 0x02, 0x8a, 0xa3, 0x05, 0x1a, 0x7d, + 0x8f, 0x55, 0xf2, 0xa7, 0x8e, 0x1b, 0xa9, 0x92, 0x4f, 0xaf, 0xc7, 0x2c, 0xd9, 0xcd, 0xa3, 0x11, + 0xe4, 0xfe, 0x0c, 0xc5, 0x3f, 0x13, 0x24, 0x7a, 0x1b, 0x7c, 0x6c, 0x5b, 0x0e, 0x36, 0x6c, 0x6f, + 0x6c, 0x76, 0x28, 0xa6, 0x34, 0xfc, 0x80, 0x8b, 0xb1, 0x0f, 0xf2, 0xb1, 0xb7, 0x19, 0xa7, 0xe1, + 0x65, 0xc6, 0xab, 0x45, 0x90, 0x24, 0x54, 0x1f, 0x5c, 0xed, 0x9c, 0x05, 0x18, 0x19, 0x64, 0x18, + 0x0c, 0x86, 0x01, 0xbf, 0x17, 0xce, 0xbb, 0xf4, 0xfe, 0xf3, 0x5e, 0x65, 0x34, 0x25, 0x84, 0xa9, + 0x8b, 0x9d, 0xf4, 0x1f, 0x88, 0xc0, 0x95, 0x53, 0xd7, 0xe9, 0xd8, 0x1e, 0xf2, 0xcf, 0xf8, 0x2f, + 0xc2, 0x30, 0xe2, 0xfb, 0x87, 0x79, 0xee, 0x3a, 0xd5, 0x10, 0xa5, 0xa6, 0xd4, 0x38, 0x04, 0x19, + 0x84, 0x19, 0xf4, 0xf0, 0x37, 0x08, 0xa1, 0x84, 0x28, 0x35, 0xa5, 0xb2, 0xf2, 0xb9, 0x67, 0x7b, + 0xc6, 0x00, 0x7b, 0x16, 0xfb, 0x1c, 0x74, 0xec, 0x51, 0xc5, 0xfb, 0xe5, 0x0c, 0x25, 0x65, 0xcf, + 0xf6, 0xda, 0x91, 0x52, 0xb6, 0x93, 0xaa, 0xb7, 0x0e, 0x38, 0x0b, 0x23, 0x6b, 0x2c, 0x61, 0x1e, + 0xcd, 0x50, 0x3d, 0x27, 0xa2, 0x64, 0x25, 0x9f, 0x45, 0x2f, 0x46, 0xcc, 0x44, 0x8d, 0x01, 0xf6, + 0xb3, 0xa9, 0xc3, 0x7f, 0x95, 0x0f, 0x5c, 0x77, 0xd1, 0x29, 0x3b, 0xb1, 0x68, 0x1b, 0xfb, 0x99, + 0x7c, 0x81, 0x06, 0xb8, 0x15, 0xbf, 0x44, 0x59, 0xef, 0x80, 0xff, 0x21, 0x1f, 0xbe, 0x19, 0x3d, + 0x47, 0x59, 0xd3, 0x02, 0x7c, 0x0d, 0x16, 0x91, 0xef, 0xa3, 0x33, 0xc3, 0x1b, 0x3a, 0x0e, 0xe5, + 0xbf, 0xce, 0x3d, 0x4a, 0x40, 0xe8, 0xde, 0x62, 0xde, 0xf0, 0x15, 0x58, 0x61, 0xb5, 0x29, 0x75, + 0x10, 0xed, 0x1b, 0xaf, 0x86, 0x24, 0xc0, 0xfc, 0x1f, 0xc3, 0x85, 0x7f, 0x72, 0x81, 0x14, 0x4e, + 0x80, 0xdf, 0x31, 0x9e, 0xba, 0xdc, 0x19, 0xfb, 0x1f, 0xd6, 0x41, 0x31, 0x39, 0xbb, 0x4e, 0xec, + 0xa0, 0x6f, 0x10, 0xdb, 0xa2, 0xfc, 0x37, 0xb9, 0xbd, 0x5e, 0x89, 0x45, 0x87, 0x76, 0xd0, 0x57, + 0x6c, 0x8b, 0xc2, 0x16, 0x58, 0xc3, 0xd4, 0x44, 0x03, 0xcc, 0xca, 0x0b, 0x96, 0x4c, 0x27, 0xc8, + 0xf7, 0x6c, 0xaf, 0xc7, 0x3f, 0xce, 0x65, 0x95, 0x22, 0xa1, 0x16, 0xea, 0x0e, 0x23, 0x19, 0x94, + 0xc1, 0xaa, 0x43, 0xc2, 0x42, 0x0f, 0x05, 0xc6, 0xc0, 0xb7, 0x8f, 0x6d, 0x07, 0xb3, 0xcf, 0xaf, + 0x3f, 0xe5, 0xe2, 0xa0, 0x43, 0xc4, 0x50, 0xd6, 0x1e, 0xa9, 0x58, 0x05, 0x44, 0x06, 0xd8, 0x47, + 0x01, 0xf1, 0xd9, 0xda, 0x9b, 0xd8, 0xc2, 0xe1, 0xbb, 0x5f, 0xdc, 0xc7, 0x6f, 0x73, 0xa1, 0x1b, + 0x89, 0xbc, 0x3d, 0x52, 0x27, 0x3d, 0x6d, 0x81, 0xb5, 0x70, 0xa9, 0x0c, 0xe4, 0x38, 0x86, 0x6d, + 0x61, 0x2f, 0xb0, 0xbb, 0x36, 0xf6, 0x29, 0x2f, 0xe4, 0x8f, 0x3c, 0x14, 0x0a, 0x8e, 0xd3, 0x48, + 0x65, 0xac, 0xaf, 0x61, 0xa5, 0x86, 0x7c, 0x8b, 0x7d, 0xf1, 0x74, 0x89, 0x1f, 0xd6, 0x6a, 0xd1, + 0xb4, 0x52, 0xbe, 0x9a, 0xdf, 0xd7, 0x44, 0x2e, 0x8e, 0xd4, 0xd1, 0xdc, 0x52, 0xd8, 0x04, 0xab, + 0x49, 0xed, 0x6e, 0xbf, 0xc6, 0x06, 0xc5, 0xaf, 0xa8, 0x89, 0x3c, 0xca, 0x8b, 0xf9, 0x5d, 0xcd, + 0xe8, 0xb4, 0x58, 0xc6, 0x86, 0x1e, 0x6e, 0x1d, 0x16, 0x25, 0x4c, 0x78, 0x03, 0xbf, 0x1a, 0x22, + 0x87, 0xf2, 0xb5, 0x7c, 0xde, 0x48, 0xc8, 0x52, 0x5f, 0x0a, 0x65, 0xf0, 0x31, 0x58, 0xc2, 0xa7, + 0x76, 0x60, 0x90, 0xf8, 0x0b, 0x98, 0x97, 0xf2, 0x6f, 0x62, 0x26, 0x50, 0xa2, 0xef, 0x59, 0xf8, + 0x2d, 0x58, 0xa2, 0xf8, 0x55, 0xf4, 0x0c, 0x68, 0x12, 0x1a, 0xf0, 0xf5, 0x19, 0x0a, 0xb5, 0x45, + 0x8a, 0x5f, 0xb5, 0x51, 0x0f, 0x8b, 0x84, 0x86, 0xe7, 0x97, 0x8f, 0x3c, 0x8b, 0xb8, 0x19, 0xc8, + 0xfe, 0x0c, 0x90, 0xe5, 0x48, 0x35, 0xe2, 0x7c, 0x0f, 0xd6, 0x33, 0x2f, 0xc2, 0x61, 0xed, 0x4f, + 0xfc, 0x23, 0x96, 0x15, 0x4f, 0xf2, 0x8b, 0xb1, 0x85, 0xb7, 0x6f, 0x2a, 0x97, 0x2a, 0x5b, 0x0f, + 0xee, 0xab, 0xab, 0x29, 0xa4, 0x89, 0x4e, 0x0f, 0x23, 0x04, 0xec, 0x83, 0x1b, 0x19, 0xf8, 0xe4, + 0x3b, 0x6f, 0x63, 0xc6, 0x27, 0x8b, 0xad, 0xca, 0x56, 0x65, 0x77, 0x57, 0xdd, 0x48, 0x61, 0xcf, + 0xce, 0xbd, 0xfe, 0xfe, 0xf4, 0xce, 0x48, 0xd1, 0x35, 0xf1, 0xe7, 0x19, 0x9f, 0x34, 0xa3, 0x48, + 0xef, 0x8a, 0x95, 0x14, 0xaf, 0x30, 0x13, 0xcb, 0x43, 0x03, 0x76, 0x7f, 0xf0, 0x4f, 0xf3, 0x23, + 0x14, 0xdf, 0xbe, 0xa9, 0x2c, 0x55, 0xc6, 0x5f, 0x90, 0x52, 0x52, 0x2b, 0x02, 0x95, 0x35, 0xb0, + 0x90, 0xbc, 0x99, 0xc1, 0x0d, 0xb0, 0x76, 0x28, 0xc8, 0x86, 0x2c, 0x3d, 0x93, 0x64, 0xe3, 0xa0, + 0xa5, 0xb5, 0x25, 0xb1, 0x51, 0x6f, 0x48, 0x35, 0xee, 0xff, 0xe0, 0x1a, 0x28, 0xa6, 0x4d, 0xaa, + 0xd4, 0x96, 0x1b, 0xa2, 0xc0, 0x15, 0xc6, 0xcd, 0xb2, 0xb2, 0xdf, 0x10, 0x05, 0x99, 0x9b, 0x2b, + 0xff, 0xa7, 0x00, 0x8a, 0x13, 0xef, 0x63, 0xb0, 0x0c, 0x6e, 0x69, 0x2f, 0x5a, 0xe2, 0x13, 0x55, + 0x69, 0x29, 0x07, 0x9a, 0x21, 0x2a, 0xcd, 0x66, 0x43, 0x3f, 0x17, 0x67, 0x03, 0xac, 0x4d, 0xf1, + 0x51, 0x5a, 0x5c, 0x01, 0x6e, 0x82, 0xf5, 0x69, 0x4d, 0xf5, 0x3a, 0x37, 0x07, 0x6f, 0x00, 0x7e, + 0x4a, 0x9b, 0xac, 0xb0, 0xee, 0xcc, 0xc3, 0x8f, 0xc1, 0xed, 0x29, 0xad, 0xaa, 0xd4, 0x54, 0x74, + 0xc9, 0x38, 0x54, 0x1b, 0xba, 0xc4, 0x5d, 0xfa, 0x65, 0x27, 0xa1, 0xdd, 0x96, 0x5f, 0x70, 0x1f, + 0x94, 0xff, 0x55, 0x00, 0xa5, 0x29, 0xef, 0x24, 0xf0, 0x13, 0x70, 0x47, 0x54, 0x5a, 0x9a, 0xae, + 0x0a, 0x8d, 0x96, 0x6e, 0x48, 0xcf, 0x45, 0xf9, 0x40, 0x6b, 0x28, 0xad, 0x73, 0x83, 0xbb, 0x0e, + 0xae, 0x4d, 0xf5, 0x0a, 0x87, 0x77, 0x03, 0xf0, 0xd3, 0x1b, 0xc3, 0x01, 0x96, 0xc1, 0xad, 0xa9, + 0xad, 0x6d, 0x41, 0xd5, 0x1b, 0x7a, 0x43, 0x69, 0x71, 0xf3, 0xe5, 0x9f, 0x0b, 0xa0, 0x38, 0xf1, + 0x66, 0xc0, 0xc6, 0x55, 0x57, 0x54, 0x51, 0x62, 0xae, 0x82, 0x2c, 0x4b, 0xb2, 0xd1, 0x54, 0x6a, + 0xd2, 0xb9, 0x9e, 0x6d, 0x82, 0xf5, 0x69, 0x4e, 0x61, 0xc7, 0xae, 0x83, 0x6b, 0x53, 0xdb, 0xc2, + 0x7e, 0xdd, 0x06, 0xd7, 0xa7, 0x35, 0xaa, 0xd2, 0xbe, 0x2a, 0x69, 0x1a, 0xeb, 0xd4, 0x1c, 0x58, + 0x48, 0xaa, 0x6c, 0xb6, 0xba, 0xb2, 0xb2, 0x3f, 0x35, 0xc1, 0x56, 0x01, 0x97, 0x36, 0xd5, 0xa4, + 0xea, 0xc1, 0xfe, 0x17, 0x5c, 0x61, 0x8a, 0x75, 0x8f, 0x9b, 0x9b, 0x62, 0x7d, 0xc0, 0xcd, 0x4f, + 0xb1, 0xde, 0xe7, 0x2e, 0x4d, 0xb1, 0x56, 0xb8, 0x0f, 0x60, 0x11, 0x2c, 0xa5, 0x56, 0x59, 0xd9, + 0xe7, 0x2e, 0x8f, 0x3b, 0xb6, 0x14, 0xbd, 0x21, 0x4a, 0xdc, 0x87, 0x2c, 0xc1, 0x53, 0xeb, 0xa1, + 0xa0, 0xb6, 0x1a, 0xad, 0x7d, 0x6e, 0x01, 0x96, 0xc0, 0x4a, 0x6a, 0x96, 0x54, 0x55, 0x51, 0xb9, + 0x2b, 0xe3, 0xc6, 0xba, 0xa0, 0x0b, 0x32, 0x07, 0xc6, 0x8d, 0x6d, 0xa1, 0xd5, 0x10, 0xb9, 0xc5, + 0xf2, 0x3f, 0x0b, 0xa0, 0x38, 0x51, 0x95, 0xb2, 0x95, 0x62, 0xae, 0x21, 0xce, 0x78, 0x26, 0xa9, + 0x55, 0x45, 0x6b, 0xe8, 0x2f, 0xce, 0xcd, 0xd3, 0x4d, 0xb0, 0x31, 0xcd, 0x49, 0x97, 0x54, 0x4d, + 0xe2, 0x0a, 0x6c, 0x3d, 0xa6, 0x35, 0xd7, 0xa4, 0xba, 0x70, 0x20, 0xeb, 0xd1, 0x82, 0x4d, 0x73, + 0x88, 0xfe, 0x92, 0xb8, 0xf9, 0xf2, 0xdf, 0x0b, 0xe0, 0x6a, 0xb6, 0xf0, 0x4c, 0x22, 0x6a, 0xba, + 0xa0, 0x4b, 0x4d, 0xa9, 0x75, 0x7e, 0xc7, 0xae, 0x03, 0x38, 0xde, 0xdc, 0x52, 0x5a, 0x52, 0x74, + 0x34, 0x8c, 0xdb, 0x6b, 0x35, 0x99, 0x9b, 0x9b, 0x34, 0x37, 0x95, 0x1a, 0x37, 0x3f, 0x69, 0x16, + 0x64, 0x99, 0xbb, 0x54, 0xfe, 0x6f, 0x01, 0xac, 0x4e, 0xad, 0xe3, 0xee, 0x82, 0x8f, 0x74, 0x55, + 0x68, 0x69, 0x82, 0xc8, 0x92, 0xdf, 0x68, 0x68, 0x8a, 0x2c, 0xe8, 0x93, 0x3b, 0xee, 0x73, 0xf0, + 0xe9, 0x74, 0x37, 0x55, 0x12, 0x6a, 0xc6, 0x41, 0x2b, 0xda, 0xe5, 0xba, 0x54, 0xe3, 0x0a, 0xf0, + 0x1e, 0xf8, 0xe4, 0x17, 0x7c, 0x53, 0xcf, 0x39, 0xf8, 0x19, 0xb8, 0xfb, 0x2e, 0xcf, 0xb6, 0x24, + 0xe8, 0x42, 0x55, 0x96, 0x42, 0x11, 0x37, 0x0f, 0x3f, 0x05, 0xe5, 0xe9, 0xae, 0x9a, 0xa4, 0x36, + 0x04, 0xb9, 0xf1, 0x92, 0x39, 0x73, 0x97, 0xca, 0xdf, 0x83, 0xc5, 0x4c, 0x51, 0xc5, 0x0e, 0x83, + 0xea, 0x0b, 0x5d, 0x12, 0x0c, 0xe5, 0x40, 0x6f, 0x1f, 0xe8, 0x93, 0x7b, 0x65, 0xac, 0xf5, 0x89, + 0xf4, 0x9c, 0x2b, 0x40, 0x1e, 0xac, 0x8e, 0x59, 0x25, 0x4d, 0x14, 0xda, 0xac, 0xbf, 0x65, 0x15, + 0x5c, 0x19, 0x95, 0x52, 0x6c, 0xab, 0x3f, 0x6f, 0xca, 0x46, 0xb5, 0xd1, 0x12, 0xd4, 0x17, 0x93, + 0xa7, 0x7c, 0xa6, 0xad, 0x2a, 0x68, 0xd2, 0xc3, 0x3d, 0xae, 0x00, 0x21, 0x58, 0xce, 0x98, 0x59, + 0xb4, 0xb9, 0xf2, 0xf3, 0x90, 0x19, 0xd5, 0x4e, 0x09, 0x53, 0x69, 0x4f, 0x59, 0x82, 0x6b, 0xa0, + 0x94, 0x69, 0xab, 0x29, 0xe2, 0x01, 0x5b, 0x5f, 0xae, 0xc0, 0x12, 0x27, 0xd3, 0x20, 0x2a, 0x2d, + 0x9d, 0xd9, 0xe7, 0xd8, 0x19, 0xbb, 0x3c, 0xfe, 0x75, 0xce, 0x92, 0xb6, 0x2a, 0x88, 0x4f, 0x35, + 0x59, 0xd0, 0x9e, 0x18, 0xdf, 0x1d, 0xb0, 0x13, 0x79, 0x3c, 0x48, 0x09, 0xac, 0x9c, 0x73, 0x88, + 0x02, 0x9c, 0x57, 0x29, 0x2d, 0x6e, 0x8e, 0xf5, 0x68, 0xc2, 0x5e, 0xaf, 0x73, 0xf3, 0xf0, 0x23, + 0x70, 0xf3, 0x7c, 0x83, 0x26, 0xd4, 0x25, 0x43, 0x6a, 0x89, 0x4a, 0x8d, 0x6d, 0xfc, 0x4b, 0xe5, + 0x7f, 0xcf, 0x81, 0xb5, 0xf3, 0xd5, 0x84, 0x86, 0x83, 0x4a, 0x05, 0x5a, 0x80, 0xc3, 0xdd, 0x2e, + 0x36, 0x03, 0xfb, 0x18, 0x1b, 0x51, 0xdd, 0x11, 0xff, 0x2e, 0xe1, 0xab, 0xf7, 0xae, 0x52, 0xd4, + 0x95, 0x11, 0x32, 0x32, 0xc1, 0x97, 0x60, 0x71, 0x48, 0xb1, 0x9f, 0x04, 0x98, 0xbb, 0x68, 0x00, + 0xc0, 0x68, 0x31, 0xfb, 0x2f, 0x60, 0x39, 0xa9, 0x79, 0x62, 0xfc, 0xfc, 0x45, 0xf1, 0x4b, 0x31, + 0x30, 0x32, 0x54, 0x9f, 0xbd, 0xd4, 0x7b, 0x76, 0xd0, 0x1f, 0x76, 0xb6, 0x4d, 0xe2, 0xee, 0x44, + 0xd4, 0xad, 0xe8, 0xd7, 0x1f, 0x3d, 0xb2, 0xd5, 0xc3, 0x5e, 0xf8, 0x1d, 0xb3, 0x33, 0xd3, 0xef, + 0x52, 0xbe, 0x4e, 0x8d, 0x9d, 0xcb, 0xa1, 0xee, 0xc1, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x82, + 0x1c, 0x97, 0x00, 0xd2, 0x22, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/postgresql9_6.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/postgresql9_6.pb.go new file mode 100644 index 000000000..56cc44edf --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config/postgresql9_6.pb.go @@ -0,0 +1,1312 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/config/postgresql9_6.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/config" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type PostgresqlConfig9_6_WalLevel int32 + +const ( + PostgresqlConfig9_6_WAL_LEVEL_UNSPECIFIED PostgresqlConfig9_6_WalLevel = 0 + PostgresqlConfig9_6_WAL_LEVEL_REPLICA PostgresqlConfig9_6_WalLevel = 1 + PostgresqlConfig9_6_WAL_LEVEL_LOGICAL PostgresqlConfig9_6_WalLevel = 2 +) + +var PostgresqlConfig9_6_WalLevel_name = map[int32]string{ + 0: "WAL_LEVEL_UNSPECIFIED", + 1: "WAL_LEVEL_REPLICA", + 2: "WAL_LEVEL_LOGICAL", +} +var PostgresqlConfig9_6_WalLevel_value = map[string]int32{ + "WAL_LEVEL_UNSPECIFIED": 0, + "WAL_LEVEL_REPLICA": 1, + "WAL_LEVEL_LOGICAL": 2, +} + +func (x PostgresqlConfig9_6_WalLevel) String() string { + return proto.EnumName(PostgresqlConfig9_6_WalLevel_name, int32(x)) +} +func (PostgresqlConfig9_6_WalLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a, []int{0, 0} +} + +type PostgresqlConfig9_6_SynchronousCommit int32 + +const ( + PostgresqlConfig9_6_SYNCHRONOUS_COMMIT_UNSPECIFIED PostgresqlConfig9_6_SynchronousCommit = 0 + PostgresqlConfig9_6_SYNCHRONOUS_COMMIT_ON PostgresqlConfig9_6_SynchronousCommit = 1 + PostgresqlConfig9_6_SYNCHRONOUS_COMMIT_OFF PostgresqlConfig9_6_SynchronousCommit = 2 + PostgresqlConfig9_6_SYNCHRONOUS_COMMIT_LOCAL PostgresqlConfig9_6_SynchronousCommit = 3 + PostgresqlConfig9_6_SYNCHRONOUS_COMMIT_REMOTE_WRITE PostgresqlConfig9_6_SynchronousCommit = 4 + PostgresqlConfig9_6_SYNCHRONOUS_COMMIT_REMOTE_APPLY PostgresqlConfig9_6_SynchronousCommit = 5 +) + +var PostgresqlConfig9_6_SynchronousCommit_name = map[int32]string{ + 0: "SYNCHRONOUS_COMMIT_UNSPECIFIED", + 1: "SYNCHRONOUS_COMMIT_ON", + 2: "SYNCHRONOUS_COMMIT_OFF", + 3: "SYNCHRONOUS_COMMIT_LOCAL", + 4: "SYNCHRONOUS_COMMIT_REMOTE_WRITE", + 5: "SYNCHRONOUS_COMMIT_REMOTE_APPLY", +} +var PostgresqlConfig9_6_SynchronousCommit_value = map[string]int32{ + "SYNCHRONOUS_COMMIT_UNSPECIFIED": 0, + "SYNCHRONOUS_COMMIT_ON": 1, + "SYNCHRONOUS_COMMIT_OFF": 2, + "SYNCHRONOUS_COMMIT_LOCAL": 3, + "SYNCHRONOUS_COMMIT_REMOTE_WRITE": 4, + "SYNCHRONOUS_COMMIT_REMOTE_APPLY": 5, +} + +func (x PostgresqlConfig9_6_SynchronousCommit) String() string { + return proto.EnumName(PostgresqlConfig9_6_SynchronousCommit_name, int32(x)) +} +func (PostgresqlConfig9_6_SynchronousCommit) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a, []int{0, 1} +} + +type PostgresqlConfig9_6_ConstraintExclusion int32 + +const ( + PostgresqlConfig9_6_CONSTRAINT_EXCLUSION_UNSPECIFIED PostgresqlConfig9_6_ConstraintExclusion = 0 + PostgresqlConfig9_6_CONSTRAINT_EXCLUSION_ON PostgresqlConfig9_6_ConstraintExclusion = 1 + PostgresqlConfig9_6_CONSTRAINT_EXCLUSION_OFF PostgresqlConfig9_6_ConstraintExclusion = 2 + PostgresqlConfig9_6_CONSTRAINT_EXCLUSION_PARTITION PostgresqlConfig9_6_ConstraintExclusion = 3 +) + +var PostgresqlConfig9_6_ConstraintExclusion_name = map[int32]string{ + 0: "CONSTRAINT_EXCLUSION_UNSPECIFIED", + 1: "CONSTRAINT_EXCLUSION_ON", + 2: "CONSTRAINT_EXCLUSION_OFF", + 3: "CONSTRAINT_EXCLUSION_PARTITION", +} +var PostgresqlConfig9_6_ConstraintExclusion_value = map[string]int32{ + "CONSTRAINT_EXCLUSION_UNSPECIFIED": 0, + "CONSTRAINT_EXCLUSION_ON": 1, + "CONSTRAINT_EXCLUSION_OFF": 2, + "CONSTRAINT_EXCLUSION_PARTITION": 3, +} + +func (x PostgresqlConfig9_6_ConstraintExclusion) String() string { + return proto.EnumName(PostgresqlConfig9_6_ConstraintExclusion_name, int32(x)) +} +func (PostgresqlConfig9_6_ConstraintExclusion) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a, []int{0, 2} +} + +type PostgresqlConfig9_6_ForceParallelMode int32 + +const ( + PostgresqlConfig9_6_FORCE_PARALLEL_MODE_UNSPECIFIED PostgresqlConfig9_6_ForceParallelMode = 0 + PostgresqlConfig9_6_FORCE_PARALLEL_MODE_ON PostgresqlConfig9_6_ForceParallelMode = 1 + PostgresqlConfig9_6_FORCE_PARALLEL_MODE_OFF PostgresqlConfig9_6_ForceParallelMode = 2 + PostgresqlConfig9_6_FORCE_PARALLEL_MODE_REGRESS PostgresqlConfig9_6_ForceParallelMode = 3 +) + +var PostgresqlConfig9_6_ForceParallelMode_name = map[int32]string{ + 0: "FORCE_PARALLEL_MODE_UNSPECIFIED", + 1: "FORCE_PARALLEL_MODE_ON", + 2: "FORCE_PARALLEL_MODE_OFF", + 3: "FORCE_PARALLEL_MODE_REGRESS", +} +var PostgresqlConfig9_6_ForceParallelMode_value = map[string]int32{ + "FORCE_PARALLEL_MODE_UNSPECIFIED": 0, + "FORCE_PARALLEL_MODE_ON": 1, + "FORCE_PARALLEL_MODE_OFF": 2, + "FORCE_PARALLEL_MODE_REGRESS": 3, +} + +func (x PostgresqlConfig9_6_ForceParallelMode) String() string { + return proto.EnumName(PostgresqlConfig9_6_ForceParallelMode_name, int32(x)) +} +func (PostgresqlConfig9_6_ForceParallelMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a, []int{0, 3} +} + +type PostgresqlConfig9_6_LogLevel int32 + +const ( + PostgresqlConfig9_6_LOG_LEVEL_UNSPECIFIED PostgresqlConfig9_6_LogLevel = 0 + PostgresqlConfig9_6_LOG_LEVEL_DEBUG5 PostgresqlConfig9_6_LogLevel = 1 + PostgresqlConfig9_6_LOG_LEVEL_DEBUG4 PostgresqlConfig9_6_LogLevel = 2 + PostgresqlConfig9_6_LOG_LEVEL_DEBUG3 PostgresqlConfig9_6_LogLevel = 3 + PostgresqlConfig9_6_LOG_LEVEL_DEBUG2 PostgresqlConfig9_6_LogLevel = 4 + PostgresqlConfig9_6_LOG_LEVEL_DEBUG1 PostgresqlConfig9_6_LogLevel = 5 + PostgresqlConfig9_6_LOG_LEVEL_LOG PostgresqlConfig9_6_LogLevel = 6 + PostgresqlConfig9_6_LOG_LEVEL_NOTICE PostgresqlConfig9_6_LogLevel = 7 + PostgresqlConfig9_6_LOG_LEVEL_WARNING PostgresqlConfig9_6_LogLevel = 8 + PostgresqlConfig9_6_LOG_LEVEL_ERROR PostgresqlConfig9_6_LogLevel = 9 + PostgresqlConfig9_6_LOG_LEVEL_FATAL PostgresqlConfig9_6_LogLevel = 10 + PostgresqlConfig9_6_LOG_LEVEL_PANIC PostgresqlConfig9_6_LogLevel = 11 +) + +var PostgresqlConfig9_6_LogLevel_name = map[int32]string{ + 0: "LOG_LEVEL_UNSPECIFIED", + 1: "LOG_LEVEL_DEBUG5", + 2: "LOG_LEVEL_DEBUG4", + 3: "LOG_LEVEL_DEBUG3", + 4: "LOG_LEVEL_DEBUG2", + 5: "LOG_LEVEL_DEBUG1", + 6: "LOG_LEVEL_LOG", + 7: "LOG_LEVEL_NOTICE", + 8: "LOG_LEVEL_WARNING", + 9: "LOG_LEVEL_ERROR", + 10: "LOG_LEVEL_FATAL", + 11: "LOG_LEVEL_PANIC", +} +var PostgresqlConfig9_6_LogLevel_value = map[string]int32{ + "LOG_LEVEL_UNSPECIFIED": 0, + "LOG_LEVEL_DEBUG5": 1, + "LOG_LEVEL_DEBUG4": 2, + "LOG_LEVEL_DEBUG3": 3, + "LOG_LEVEL_DEBUG2": 4, + "LOG_LEVEL_DEBUG1": 5, + "LOG_LEVEL_LOG": 6, + "LOG_LEVEL_NOTICE": 7, + "LOG_LEVEL_WARNING": 8, + "LOG_LEVEL_ERROR": 9, + "LOG_LEVEL_FATAL": 10, + "LOG_LEVEL_PANIC": 11, +} + +func (x PostgresqlConfig9_6_LogLevel) String() string { + return proto.EnumName(PostgresqlConfig9_6_LogLevel_name, int32(x)) +} +func (PostgresqlConfig9_6_LogLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a, []int{0, 4} +} + +type PostgresqlConfig9_6_LogErrorVerbosity int32 + +const ( + PostgresqlConfig9_6_LOG_ERROR_VERBOSITY_UNSPECIFIED PostgresqlConfig9_6_LogErrorVerbosity = 0 + PostgresqlConfig9_6_LOG_ERROR_VERBOSITY_TERSE PostgresqlConfig9_6_LogErrorVerbosity = 1 + PostgresqlConfig9_6_LOG_ERROR_VERBOSITY_DEFAULT PostgresqlConfig9_6_LogErrorVerbosity = 2 + PostgresqlConfig9_6_LOG_ERROR_VERBOSITY_VERBOSE PostgresqlConfig9_6_LogErrorVerbosity = 3 +) + +var PostgresqlConfig9_6_LogErrorVerbosity_name = map[int32]string{ + 0: "LOG_ERROR_VERBOSITY_UNSPECIFIED", + 1: "LOG_ERROR_VERBOSITY_TERSE", + 2: "LOG_ERROR_VERBOSITY_DEFAULT", + 3: "LOG_ERROR_VERBOSITY_VERBOSE", +} +var PostgresqlConfig9_6_LogErrorVerbosity_value = map[string]int32{ + "LOG_ERROR_VERBOSITY_UNSPECIFIED": 0, + "LOG_ERROR_VERBOSITY_TERSE": 1, + "LOG_ERROR_VERBOSITY_DEFAULT": 2, + "LOG_ERROR_VERBOSITY_VERBOSE": 3, +} + +func (x PostgresqlConfig9_6_LogErrorVerbosity) String() string { + return proto.EnumName(PostgresqlConfig9_6_LogErrorVerbosity_name, int32(x)) +} +func (PostgresqlConfig9_6_LogErrorVerbosity) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a, []int{0, 5} +} + +type PostgresqlConfig9_6_LogStatement int32 + +const ( + PostgresqlConfig9_6_LOG_STATEMENT_UNSPECIFIED PostgresqlConfig9_6_LogStatement = 0 + PostgresqlConfig9_6_LOG_STATEMENT_NONE PostgresqlConfig9_6_LogStatement = 1 + PostgresqlConfig9_6_LOG_STATEMENT_DDL PostgresqlConfig9_6_LogStatement = 2 + PostgresqlConfig9_6_LOG_STATEMENT_MOD PostgresqlConfig9_6_LogStatement = 3 + PostgresqlConfig9_6_LOG_STATEMENT_ALL PostgresqlConfig9_6_LogStatement = 4 +) + +var PostgresqlConfig9_6_LogStatement_name = map[int32]string{ + 0: "LOG_STATEMENT_UNSPECIFIED", + 1: "LOG_STATEMENT_NONE", + 2: "LOG_STATEMENT_DDL", + 3: "LOG_STATEMENT_MOD", + 4: "LOG_STATEMENT_ALL", +} +var PostgresqlConfig9_6_LogStatement_value = map[string]int32{ + "LOG_STATEMENT_UNSPECIFIED": 0, + "LOG_STATEMENT_NONE": 1, + "LOG_STATEMENT_DDL": 2, + "LOG_STATEMENT_MOD": 3, + "LOG_STATEMENT_ALL": 4, +} + +func (x PostgresqlConfig9_6_LogStatement) String() string { + return proto.EnumName(PostgresqlConfig9_6_LogStatement_name, int32(x)) +} +func (PostgresqlConfig9_6_LogStatement) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a, []int{0, 6} +} + +type PostgresqlConfig9_6_TransactionIsolation int32 + +const ( + PostgresqlConfig9_6_TRANSACTION_ISOLATION_UNSPECIFIED PostgresqlConfig9_6_TransactionIsolation = 0 + PostgresqlConfig9_6_TRANSACTION_ISOLATION_READ_UNCOMMITTED PostgresqlConfig9_6_TransactionIsolation = 1 + PostgresqlConfig9_6_TRANSACTION_ISOLATION_READ_COMMITTED PostgresqlConfig9_6_TransactionIsolation = 2 + PostgresqlConfig9_6_TRANSACTION_ISOLATION_REPEATABLE_READ PostgresqlConfig9_6_TransactionIsolation = 3 + PostgresqlConfig9_6_TRANSACTION_ISOLATION_SERIALIZABLE PostgresqlConfig9_6_TransactionIsolation = 4 +) + +var PostgresqlConfig9_6_TransactionIsolation_name = map[int32]string{ + 0: "TRANSACTION_ISOLATION_UNSPECIFIED", + 1: "TRANSACTION_ISOLATION_READ_UNCOMMITTED", + 2: "TRANSACTION_ISOLATION_READ_COMMITTED", + 3: "TRANSACTION_ISOLATION_REPEATABLE_READ", + 4: "TRANSACTION_ISOLATION_SERIALIZABLE", +} +var PostgresqlConfig9_6_TransactionIsolation_value = map[string]int32{ + "TRANSACTION_ISOLATION_UNSPECIFIED": 0, + "TRANSACTION_ISOLATION_READ_UNCOMMITTED": 1, + "TRANSACTION_ISOLATION_READ_COMMITTED": 2, + "TRANSACTION_ISOLATION_REPEATABLE_READ": 3, + "TRANSACTION_ISOLATION_SERIALIZABLE": 4, +} + +func (x PostgresqlConfig9_6_TransactionIsolation) String() string { + return proto.EnumName(PostgresqlConfig9_6_TransactionIsolation_name, int32(x)) +} +func (PostgresqlConfig9_6_TransactionIsolation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a, []int{0, 7} +} + +type PostgresqlConfig9_6_ByteaOutput int32 + +const ( + PostgresqlConfig9_6_BYTEA_OUTPUT_UNSPECIFIED PostgresqlConfig9_6_ByteaOutput = 0 + PostgresqlConfig9_6_BYTEA_OUTPUT_HEX PostgresqlConfig9_6_ByteaOutput = 1 + PostgresqlConfig9_6_BYTEA_OUTPUT_ESCAPED PostgresqlConfig9_6_ByteaOutput = 2 +) + +var PostgresqlConfig9_6_ByteaOutput_name = map[int32]string{ + 0: "BYTEA_OUTPUT_UNSPECIFIED", + 1: "BYTEA_OUTPUT_HEX", + 2: "BYTEA_OUTPUT_ESCAPED", +} +var PostgresqlConfig9_6_ByteaOutput_value = map[string]int32{ + "BYTEA_OUTPUT_UNSPECIFIED": 0, + "BYTEA_OUTPUT_HEX": 1, + "BYTEA_OUTPUT_ESCAPED": 2, +} + +func (x PostgresqlConfig9_6_ByteaOutput) String() string { + return proto.EnumName(PostgresqlConfig9_6_ByteaOutput_name, int32(x)) +} +func (PostgresqlConfig9_6_ByteaOutput) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a, []int{0, 8} +} + +type PostgresqlConfig9_6_XmlBinary int32 + +const ( + PostgresqlConfig9_6_XML_BINARY_UNSPECIFIED PostgresqlConfig9_6_XmlBinary = 0 + PostgresqlConfig9_6_XML_BINARY_BASE64 PostgresqlConfig9_6_XmlBinary = 1 + PostgresqlConfig9_6_XML_BINARY_HEX PostgresqlConfig9_6_XmlBinary = 2 +) + +var PostgresqlConfig9_6_XmlBinary_name = map[int32]string{ + 0: "XML_BINARY_UNSPECIFIED", + 1: "XML_BINARY_BASE64", + 2: "XML_BINARY_HEX", +} +var PostgresqlConfig9_6_XmlBinary_value = map[string]int32{ + "XML_BINARY_UNSPECIFIED": 0, + "XML_BINARY_BASE64": 1, + "XML_BINARY_HEX": 2, +} + +func (x PostgresqlConfig9_6_XmlBinary) String() string { + return proto.EnumName(PostgresqlConfig9_6_XmlBinary_name, int32(x)) +} +func (PostgresqlConfig9_6_XmlBinary) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a, []int{0, 9} +} + +type PostgresqlConfig9_6_XmlOption int32 + +const ( + PostgresqlConfig9_6_XML_OPTION_UNSPECIFIED PostgresqlConfig9_6_XmlOption = 0 + PostgresqlConfig9_6_XML_OPTION_DOCUMENT PostgresqlConfig9_6_XmlOption = 1 + PostgresqlConfig9_6_XML_OPTION_CONTENT PostgresqlConfig9_6_XmlOption = 2 +) + +var PostgresqlConfig9_6_XmlOption_name = map[int32]string{ + 0: "XML_OPTION_UNSPECIFIED", + 1: "XML_OPTION_DOCUMENT", + 2: "XML_OPTION_CONTENT", +} +var PostgresqlConfig9_6_XmlOption_value = map[string]int32{ + "XML_OPTION_UNSPECIFIED": 0, + "XML_OPTION_DOCUMENT": 1, + "XML_OPTION_CONTENT": 2, +} + +func (x PostgresqlConfig9_6_XmlOption) String() string { + return proto.EnumName(PostgresqlConfig9_6_XmlOption_name, int32(x)) +} +func (PostgresqlConfig9_6_XmlOption) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a, []int{0, 10} +} + +type PostgresqlConfig9_6_BackslashQuote int32 + +const ( + PostgresqlConfig9_6_BACKSLASH_QUOTE_UNSPECIFIED PostgresqlConfig9_6_BackslashQuote = 0 + PostgresqlConfig9_6_BACKSLASH_QUOTE PostgresqlConfig9_6_BackslashQuote = 1 + PostgresqlConfig9_6_BACKSLASH_QUOTE_ON PostgresqlConfig9_6_BackslashQuote = 2 + PostgresqlConfig9_6_BACKSLASH_QUOTE_OFF PostgresqlConfig9_6_BackslashQuote = 3 + PostgresqlConfig9_6_BACKSLASH_QUOTE_SAFE_ENCODING PostgresqlConfig9_6_BackslashQuote = 4 +) + +var PostgresqlConfig9_6_BackslashQuote_name = map[int32]string{ + 0: "BACKSLASH_QUOTE_UNSPECIFIED", + 1: "BACKSLASH_QUOTE", + 2: "BACKSLASH_QUOTE_ON", + 3: "BACKSLASH_QUOTE_OFF", + 4: "BACKSLASH_QUOTE_SAFE_ENCODING", +} +var PostgresqlConfig9_6_BackslashQuote_value = map[string]int32{ + "BACKSLASH_QUOTE_UNSPECIFIED": 0, + "BACKSLASH_QUOTE": 1, + "BACKSLASH_QUOTE_ON": 2, + "BACKSLASH_QUOTE_OFF": 3, + "BACKSLASH_QUOTE_SAFE_ENCODING": 4, +} + +func (x PostgresqlConfig9_6_BackslashQuote) String() string { + return proto.EnumName(PostgresqlConfig9_6_BackslashQuote_name, int32(x)) +} +func (PostgresqlConfig9_6_BackslashQuote) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a, []int{0, 11} +} + +// Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file +// parameters whose detailed description is available in +// [PostgreSQL documentation](https://www.postgresql.org/docs/9.6/static/runtime-config). +type PostgresqlConfig9_6 struct { + MaxConnections *wrappers.Int64Value `protobuf:"bytes,1,opt,name=max_connections,json=maxConnections,proto3" json:"max_connections,omitempty"` + SharedBuffers *wrappers.Int64Value `protobuf:"bytes,2,opt,name=shared_buffers,json=sharedBuffers,proto3" json:"shared_buffers,omitempty"` + TempBuffers *wrappers.Int64Value `protobuf:"bytes,3,opt,name=temp_buffers,json=tempBuffers,proto3" json:"temp_buffers,omitempty"` + MaxPreparedTransactions *wrappers.Int64Value `protobuf:"bytes,4,opt,name=max_prepared_transactions,json=maxPreparedTransactions,proto3" json:"max_prepared_transactions,omitempty"` + WorkMem *wrappers.Int64Value `protobuf:"bytes,5,opt,name=work_mem,json=workMem,proto3" json:"work_mem,omitempty"` + MaintenanceWorkMem *wrappers.Int64Value `protobuf:"bytes,6,opt,name=maintenance_work_mem,json=maintenanceWorkMem,proto3" json:"maintenance_work_mem,omitempty"` + ReplacementSortTuples *wrappers.Int64Value `protobuf:"bytes,7,opt,name=replacement_sort_tuples,json=replacementSortTuples,proto3" json:"replacement_sort_tuples,omitempty"` + AutovacuumWorkMem *wrappers.Int64Value `protobuf:"bytes,8,opt,name=autovacuum_work_mem,json=autovacuumWorkMem,proto3" json:"autovacuum_work_mem,omitempty"` + TempFileLimit *wrappers.Int64Value `protobuf:"bytes,9,opt,name=temp_file_limit,json=tempFileLimit,proto3" json:"temp_file_limit,omitempty"` + VacuumCostDelay *wrappers.Int64Value `protobuf:"bytes,10,opt,name=vacuum_cost_delay,json=vacuumCostDelay,proto3" json:"vacuum_cost_delay,omitempty"` + VacuumCostPageHit *wrappers.Int64Value `protobuf:"bytes,11,opt,name=vacuum_cost_page_hit,json=vacuumCostPageHit,proto3" json:"vacuum_cost_page_hit,omitempty"` + VacuumCostPageMiss *wrappers.Int64Value `protobuf:"bytes,12,opt,name=vacuum_cost_page_miss,json=vacuumCostPageMiss,proto3" json:"vacuum_cost_page_miss,omitempty"` + VacuumCostPageDirty *wrappers.Int64Value `protobuf:"bytes,13,opt,name=vacuum_cost_page_dirty,json=vacuumCostPageDirty,proto3" json:"vacuum_cost_page_dirty,omitempty"` + VacuumCostLimit *wrappers.Int64Value `protobuf:"bytes,14,opt,name=vacuum_cost_limit,json=vacuumCostLimit,proto3" json:"vacuum_cost_limit,omitempty"` + BgwriterDelay *wrappers.Int64Value `protobuf:"bytes,15,opt,name=bgwriter_delay,json=bgwriterDelay,proto3" json:"bgwriter_delay,omitempty"` + BgwriterLruMaxpages *wrappers.Int64Value `protobuf:"bytes,16,opt,name=bgwriter_lru_maxpages,json=bgwriterLruMaxpages,proto3" json:"bgwriter_lru_maxpages,omitempty"` + BgwriterLruMultiplier *wrappers.DoubleValue `protobuf:"bytes,17,opt,name=bgwriter_lru_multiplier,json=bgwriterLruMultiplier,proto3" json:"bgwriter_lru_multiplier,omitempty"` + BgwriterFlushAfter *wrappers.Int64Value `protobuf:"bytes,18,opt,name=bgwriter_flush_after,json=bgwriterFlushAfter,proto3" json:"bgwriter_flush_after,omitempty"` + BackendFlushAfter *wrappers.Int64Value `protobuf:"bytes,19,opt,name=backend_flush_after,json=backendFlushAfter,proto3" json:"backend_flush_after,omitempty"` + OldSnapshotThreshold *wrappers.Int64Value `protobuf:"bytes,20,opt,name=old_snapshot_threshold,json=oldSnapshotThreshold,proto3" json:"old_snapshot_threshold,omitempty"` + WalLevel PostgresqlConfig9_6_WalLevel `protobuf:"varint,21,opt,name=wal_level,json=walLevel,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_WalLevel" json:"wal_level,omitempty"` + SynchronousCommit PostgresqlConfig9_6_SynchronousCommit `protobuf:"varint,22,opt,name=synchronous_commit,json=synchronousCommit,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_SynchronousCommit" json:"synchronous_commit,omitempty"` + CheckpointTimeout *wrappers.Int64Value `protobuf:"bytes,23,opt,name=checkpoint_timeout,json=checkpointTimeout,proto3" json:"checkpoint_timeout,omitempty"` + CheckpointCompletionTarget *wrappers.DoubleValue `protobuf:"bytes,24,opt,name=checkpoint_completion_target,json=checkpointCompletionTarget,proto3" json:"checkpoint_completion_target,omitempty"` + CheckpointFlushAfter *wrappers.Int64Value `protobuf:"bytes,25,opt,name=checkpoint_flush_after,json=checkpointFlushAfter,proto3" json:"checkpoint_flush_after,omitempty"` + MaxWalSize *wrappers.Int64Value `protobuf:"bytes,26,opt,name=max_wal_size,json=maxWalSize,proto3" json:"max_wal_size,omitempty"` + MinWalSize *wrappers.Int64Value `protobuf:"bytes,27,opt,name=min_wal_size,json=minWalSize,proto3" json:"min_wal_size,omitempty"` + MaxStandbyStreamingDelay *wrappers.Int64Value `protobuf:"bytes,28,opt,name=max_standby_streaming_delay,json=maxStandbyStreamingDelay,proto3" json:"max_standby_streaming_delay,omitempty"` + DefaultStatisticsTarget *wrappers.Int64Value `protobuf:"bytes,29,opt,name=default_statistics_target,json=defaultStatisticsTarget,proto3" json:"default_statistics_target,omitempty"` + ConstraintExclusion PostgresqlConfig9_6_ConstraintExclusion `protobuf:"varint,30,opt,name=constraint_exclusion,json=constraintExclusion,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_ConstraintExclusion" json:"constraint_exclusion,omitempty"` + CursorTupleFraction *wrappers.DoubleValue `protobuf:"bytes,31,opt,name=cursor_tuple_fraction,json=cursorTupleFraction,proto3" json:"cursor_tuple_fraction,omitempty"` + FromCollapseLimit *wrappers.Int64Value `protobuf:"bytes,32,opt,name=from_collapse_limit,json=fromCollapseLimit,proto3" json:"from_collapse_limit,omitempty"` + JoinCollapseLimit *wrappers.Int64Value `protobuf:"bytes,33,opt,name=join_collapse_limit,json=joinCollapseLimit,proto3" json:"join_collapse_limit,omitempty"` + ForceParallelMode PostgresqlConfig9_6_ForceParallelMode `protobuf:"varint,34,opt,name=force_parallel_mode,json=forceParallelMode,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_ForceParallelMode" json:"force_parallel_mode,omitempty"` + ClientMinMessages PostgresqlConfig9_6_LogLevel `protobuf:"varint,35,opt,name=client_min_messages,json=clientMinMessages,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_LogLevel" json:"client_min_messages,omitempty"` + LogMinMessages PostgresqlConfig9_6_LogLevel `protobuf:"varint,36,opt,name=log_min_messages,json=logMinMessages,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_LogLevel" json:"log_min_messages,omitempty"` + LogMinErrorStatement PostgresqlConfig9_6_LogLevel `protobuf:"varint,37,opt,name=log_min_error_statement,json=logMinErrorStatement,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_LogLevel" json:"log_min_error_statement,omitempty"` + LogMinDurationStatement *wrappers.Int64Value `protobuf:"bytes,38,opt,name=log_min_duration_statement,json=logMinDurationStatement,proto3" json:"log_min_duration_statement,omitempty"` + LogCheckpoints *wrappers.BoolValue `protobuf:"bytes,39,opt,name=log_checkpoints,json=logCheckpoints,proto3" json:"log_checkpoints,omitempty"` + LogConnections *wrappers.BoolValue `protobuf:"bytes,40,opt,name=log_connections,json=logConnections,proto3" json:"log_connections,omitempty"` + LogDisconnections *wrappers.BoolValue `protobuf:"bytes,41,opt,name=log_disconnections,json=logDisconnections,proto3" json:"log_disconnections,omitempty"` + LogDuration *wrappers.BoolValue `protobuf:"bytes,42,opt,name=log_duration,json=logDuration,proto3" json:"log_duration,omitempty"` + LogErrorVerbosity PostgresqlConfig9_6_LogErrorVerbosity `protobuf:"varint,43,opt,name=log_error_verbosity,json=logErrorVerbosity,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_LogErrorVerbosity" json:"log_error_verbosity,omitempty"` + LogLockWaits *wrappers.BoolValue `protobuf:"bytes,44,opt,name=log_lock_waits,json=logLockWaits,proto3" json:"log_lock_waits,omitempty"` + LogStatement PostgresqlConfig9_6_LogStatement `protobuf:"varint,45,opt,name=log_statement,json=logStatement,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_LogStatement" json:"log_statement,omitempty"` + LogTempFiles *wrappers.Int64Value `protobuf:"bytes,46,opt,name=log_temp_files,json=logTempFiles,proto3" json:"log_temp_files,omitempty"` + SearchPath string `protobuf:"bytes,47,opt,name=search_path,json=searchPath,proto3" json:"search_path,omitempty"` + RowSecurity *wrappers.BoolValue `protobuf:"bytes,48,opt,name=row_security,json=rowSecurity,proto3" json:"row_security,omitempty"` + DefaultTransactionIsolation PostgresqlConfig9_6_TransactionIsolation `protobuf:"varint,49,opt,name=default_transaction_isolation,json=defaultTransactionIsolation,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_TransactionIsolation" json:"default_transaction_isolation,omitempty"` + StatementTimeout *wrappers.Int64Value `protobuf:"bytes,50,opt,name=statement_timeout,json=statementTimeout,proto3" json:"statement_timeout,omitempty"` + LockTimeout *wrappers.Int64Value `protobuf:"bytes,51,opt,name=lock_timeout,json=lockTimeout,proto3" json:"lock_timeout,omitempty"` + IdleInTransactionSessionTimeout *wrappers.Int64Value `protobuf:"bytes,52,opt,name=idle_in_transaction_session_timeout,json=idleInTransactionSessionTimeout,proto3" json:"idle_in_transaction_session_timeout,omitempty"` + ByteaOutput PostgresqlConfig9_6_ByteaOutput `protobuf:"varint,53,opt,name=bytea_output,json=byteaOutput,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_ByteaOutput" json:"bytea_output,omitempty"` + Xmlbinary PostgresqlConfig9_6_XmlBinary `protobuf:"varint,54,opt,name=xmlbinary,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_XmlBinary" json:"xmlbinary,omitempty"` + Xmloption PostgresqlConfig9_6_XmlOption `protobuf:"varint,55,opt,name=xmloption,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_XmlOption" json:"xmloption,omitempty"` + GinPendingListLimit *wrappers.Int64Value `protobuf:"bytes,56,opt,name=gin_pending_list_limit,json=ginPendingListLimit,proto3" json:"gin_pending_list_limit,omitempty"` + DeadlockTimeout *wrappers.Int64Value `protobuf:"bytes,57,opt,name=deadlock_timeout,json=deadlockTimeout,proto3" json:"deadlock_timeout,omitempty"` + MaxLocksPerTransaction *wrappers.Int64Value `protobuf:"bytes,58,opt,name=max_locks_per_transaction,json=maxLocksPerTransaction,proto3" json:"max_locks_per_transaction,omitempty"` + MaxPredLocksPerTransaction *wrappers.Int64Value `protobuf:"bytes,59,opt,name=max_pred_locks_per_transaction,json=maxPredLocksPerTransaction,proto3" json:"max_pred_locks_per_transaction,omitempty"` + ArrayNulls *wrappers.BoolValue `protobuf:"bytes,60,opt,name=array_nulls,json=arrayNulls,proto3" json:"array_nulls,omitempty"` + BackslashQuote PostgresqlConfig9_6_BackslashQuote `protobuf:"varint,61,opt,name=backslash_quote,json=backslashQuote,proto3,enum=yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_BackslashQuote" json:"backslash_quote,omitempty"` + DefaultWithOids *wrappers.BoolValue `protobuf:"bytes,62,opt,name=default_with_oids,json=defaultWithOids,proto3" json:"default_with_oids,omitempty"` + EscapeStringWarning *wrappers.BoolValue `protobuf:"bytes,63,opt,name=escape_string_warning,json=escapeStringWarning,proto3" json:"escape_string_warning,omitempty"` + LoCompatPrivileges *wrappers.BoolValue `protobuf:"bytes,64,opt,name=lo_compat_privileges,json=loCompatPrivileges,proto3" json:"lo_compat_privileges,omitempty"` + OperatorPrecedenceWarning *wrappers.BoolValue `protobuf:"bytes,65,opt,name=operator_precedence_warning,json=operatorPrecedenceWarning,proto3" json:"operator_precedence_warning,omitempty"` + QuoteAllIdentifiers *wrappers.BoolValue `protobuf:"bytes,66,opt,name=quote_all_identifiers,json=quoteAllIdentifiers,proto3" json:"quote_all_identifiers,omitempty"` + StandardConformingStrings *wrappers.BoolValue `protobuf:"bytes,67,opt,name=standard_conforming_strings,json=standardConformingStrings,proto3" json:"standard_conforming_strings,omitempty"` + SynchronizeSeqscans *wrappers.BoolValue `protobuf:"bytes,68,opt,name=synchronize_seqscans,json=synchronizeSeqscans,proto3" json:"synchronize_seqscans,omitempty"` + TransformNullEquals *wrappers.BoolValue `protobuf:"bytes,69,opt,name=transform_null_equals,json=transformNullEquals,proto3" json:"transform_null_equals,omitempty"` + ExitOnError *wrappers.BoolValue `protobuf:"bytes,70,opt,name=exit_on_error,json=exitOnError,proto3" json:"exit_on_error,omitempty"` + SeqPageCost *wrappers.DoubleValue `protobuf:"bytes,71,opt,name=seq_page_cost,json=seqPageCost,proto3" json:"seq_page_cost,omitempty"` + RandomPageCost *wrappers.DoubleValue `protobuf:"bytes,72,opt,name=random_page_cost,json=randomPageCost,proto3" json:"random_page_cost,omitempty"` + // This option has been removed in PostgreSQL 10. + SqlInheritance *wrappers.BoolValue `protobuf:"bytes,73,opt,name=sql_inheritance,json=sqlInheritance,proto3" json:"sql_inheritance,omitempty"` + AutovacuumMaxWorkers *wrappers.Int64Value `protobuf:"bytes,74,opt,name=autovacuum_max_workers,json=autovacuumMaxWorkers,proto3" json:"autovacuum_max_workers,omitempty"` + AutovacuumVacuumCostDelay *wrappers.Int64Value `protobuf:"bytes,75,opt,name=autovacuum_vacuum_cost_delay,json=autovacuumVacuumCostDelay,proto3" json:"autovacuum_vacuum_cost_delay,omitempty"` + AutovacuumVacuumCostLimit *wrappers.Int64Value `protobuf:"bytes,76,opt,name=autovacuum_vacuum_cost_limit,json=autovacuumVacuumCostLimit,proto3" json:"autovacuum_vacuum_cost_limit,omitempty"` + AutovacuumNaptime *wrappers.Int64Value `protobuf:"bytes,77,opt,name=autovacuum_naptime,json=autovacuumNaptime,proto3" json:"autovacuum_naptime,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PostgresqlConfig9_6) Reset() { *m = PostgresqlConfig9_6{} } +func (m *PostgresqlConfig9_6) String() string { return proto.CompactTextString(m) } +func (*PostgresqlConfig9_6) ProtoMessage() {} +func (*PostgresqlConfig9_6) Descriptor() ([]byte, []int) { + return fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a, []int{0} +} +func (m *PostgresqlConfig9_6) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PostgresqlConfig9_6.Unmarshal(m, b) +} +func (m *PostgresqlConfig9_6) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PostgresqlConfig9_6.Marshal(b, m, deterministic) +} +func (dst *PostgresqlConfig9_6) XXX_Merge(src proto.Message) { + xxx_messageInfo_PostgresqlConfig9_6.Merge(dst, src) +} +func (m *PostgresqlConfig9_6) XXX_Size() int { + return xxx_messageInfo_PostgresqlConfig9_6.Size(m) +} +func (m *PostgresqlConfig9_6) XXX_DiscardUnknown() { + xxx_messageInfo_PostgresqlConfig9_6.DiscardUnknown(m) +} + +var xxx_messageInfo_PostgresqlConfig9_6 proto.InternalMessageInfo + +func (m *PostgresqlConfig9_6) GetMaxConnections() *wrappers.Int64Value { + if m != nil { + return m.MaxConnections + } + return nil +} + +func (m *PostgresqlConfig9_6) GetSharedBuffers() *wrappers.Int64Value { + if m != nil { + return m.SharedBuffers + } + return nil +} + +func (m *PostgresqlConfig9_6) GetTempBuffers() *wrappers.Int64Value { + if m != nil { + return m.TempBuffers + } + return nil +} + +func (m *PostgresqlConfig9_6) GetMaxPreparedTransactions() *wrappers.Int64Value { + if m != nil { + return m.MaxPreparedTransactions + } + return nil +} + +func (m *PostgresqlConfig9_6) GetWorkMem() *wrappers.Int64Value { + if m != nil { + return m.WorkMem + } + return nil +} + +func (m *PostgresqlConfig9_6) GetMaintenanceWorkMem() *wrappers.Int64Value { + if m != nil { + return m.MaintenanceWorkMem + } + return nil +} + +func (m *PostgresqlConfig9_6) GetReplacementSortTuples() *wrappers.Int64Value { + if m != nil { + return m.ReplacementSortTuples + } + return nil +} + +func (m *PostgresqlConfig9_6) GetAutovacuumWorkMem() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumWorkMem + } + return nil +} + +func (m *PostgresqlConfig9_6) GetTempFileLimit() *wrappers.Int64Value { + if m != nil { + return m.TempFileLimit + } + return nil +} + +func (m *PostgresqlConfig9_6) GetVacuumCostDelay() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostDelay + } + return nil +} + +func (m *PostgresqlConfig9_6) GetVacuumCostPageHit() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostPageHit + } + return nil +} + +func (m *PostgresqlConfig9_6) GetVacuumCostPageMiss() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostPageMiss + } + return nil +} + +func (m *PostgresqlConfig9_6) GetVacuumCostPageDirty() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostPageDirty + } + return nil +} + +func (m *PostgresqlConfig9_6) GetVacuumCostLimit() *wrappers.Int64Value { + if m != nil { + return m.VacuumCostLimit + } + return nil +} + +func (m *PostgresqlConfig9_6) GetBgwriterDelay() *wrappers.Int64Value { + if m != nil { + return m.BgwriterDelay + } + return nil +} + +func (m *PostgresqlConfig9_6) GetBgwriterLruMaxpages() *wrappers.Int64Value { + if m != nil { + return m.BgwriterLruMaxpages + } + return nil +} + +func (m *PostgresqlConfig9_6) GetBgwriterLruMultiplier() *wrappers.DoubleValue { + if m != nil { + return m.BgwriterLruMultiplier + } + return nil +} + +func (m *PostgresqlConfig9_6) GetBgwriterFlushAfter() *wrappers.Int64Value { + if m != nil { + return m.BgwriterFlushAfter + } + return nil +} + +func (m *PostgresqlConfig9_6) GetBackendFlushAfter() *wrappers.Int64Value { + if m != nil { + return m.BackendFlushAfter + } + return nil +} + +func (m *PostgresqlConfig9_6) GetOldSnapshotThreshold() *wrappers.Int64Value { + if m != nil { + return m.OldSnapshotThreshold + } + return nil +} + +func (m *PostgresqlConfig9_6) GetWalLevel() PostgresqlConfig9_6_WalLevel { + if m != nil { + return m.WalLevel + } + return PostgresqlConfig9_6_WAL_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlConfig9_6) GetSynchronousCommit() PostgresqlConfig9_6_SynchronousCommit { + if m != nil { + return m.SynchronousCommit + } + return PostgresqlConfig9_6_SYNCHRONOUS_COMMIT_UNSPECIFIED +} + +func (m *PostgresqlConfig9_6) GetCheckpointTimeout() *wrappers.Int64Value { + if m != nil { + return m.CheckpointTimeout + } + return nil +} + +func (m *PostgresqlConfig9_6) GetCheckpointCompletionTarget() *wrappers.DoubleValue { + if m != nil { + return m.CheckpointCompletionTarget + } + return nil +} + +func (m *PostgresqlConfig9_6) GetCheckpointFlushAfter() *wrappers.Int64Value { + if m != nil { + return m.CheckpointFlushAfter + } + return nil +} + +func (m *PostgresqlConfig9_6) GetMaxWalSize() *wrappers.Int64Value { + if m != nil { + return m.MaxWalSize + } + return nil +} + +func (m *PostgresqlConfig9_6) GetMinWalSize() *wrappers.Int64Value { + if m != nil { + return m.MinWalSize + } + return nil +} + +func (m *PostgresqlConfig9_6) GetMaxStandbyStreamingDelay() *wrappers.Int64Value { + if m != nil { + return m.MaxStandbyStreamingDelay + } + return nil +} + +func (m *PostgresqlConfig9_6) GetDefaultStatisticsTarget() *wrappers.Int64Value { + if m != nil { + return m.DefaultStatisticsTarget + } + return nil +} + +func (m *PostgresqlConfig9_6) GetConstraintExclusion() PostgresqlConfig9_6_ConstraintExclusion { + if m != nil { + return m.ConstraintExclusion + } + return PostgresqlConfig9_6_CONSTRAINT_EXCLUSION_UNSPECIFIED +} + +func (m *PostgresqlConfig9_6) GetCursorTupleFraction() *wrappers.DoubleValue { + if m != nil { + return m.CursorTupleFraction + } + return nil +} + +func (m *PostgresqlConfig9_6) GetFromCollapseLimit() *wrappers.Int64Value { + if m != nil { + return m.FromCollapseLimit + } + return nil +} + +func (m *PostgresqlConfig9_6) GetJoinCollapseLimit() *wrappers.Int64Value { + if m != nil { + return m.JoinCollapseLimit + } + return nil +} + +func (m *PostgresqlConfig9_6) GetForceParallelMode() PostgresqlConfig9_6_ForceParallelMode { + if m != nil { + return m.ForceParallelMode + } + return PostgresqlConfig9_6_FORCE_PARALLEL_MODE_UNSPECIFIED +} + +func (m *PostgresqlConfig9_6) GetClientMinMessages() PostgresqlConfig9_6_LogLevel { + if m != nil { + return m.ClientMinMessages + } + return PostgresqlConfig9_6_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlConfig9_6) GetLogMinMessages() PostgresqlConfig9_6_LogLevel { + if m != nil { + return m.LogMinMessages + } + return PostgresqlConfig9_6_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlConfig9_6) GetLogMinErrorStatement() PostgresqlConfig9_6_LogLevel { + if m != nil { + return m.LogMinErrorStatement + } + return PostgresqlConfig9_6_LOG_LEVEL_UNSPECIFIED +} + +func (m *PostgresqlConfig9_6) GetLogMinDurationStatement() *wrappers.Int64Value { + if m != nil { + return m.LogMinDurationStatement + } + return nil +} + +func (m *PostgresqlConfig9_6) GetLogCheckpoints() *wrappers.BoolValue { + if m != nil { + return m.LogCheckpoints + } + return nil +} + +func (m *PostgresqlConfig9_6) GetLogConnections() *wrappers.BoolValue { + if m != nil { + return m.LogConnections + } + return nil +} + +func (m *PostgresqlConfig9_6) GetLogDisconnections() *wrappers.BoolValue { + if m != nil { + return m.LogDisconnections + } + return nil +} + +func (m *PostgresqlConfig9_6) GetLogDuration() *wrappers.BoolValue { + if m != nil { + return m.LogDuration + } + return nil +} + +func (m *PostgresqlConfig9_6) GetLogErrorVerbosity() PostgresqlConfig9_6_LogErrorVerbosity { + if m != nil { + return m.LogErrorVerbosity + } + return PostgresqlConfig9_6_LOG_ERROR_VERBOSITY_UNSPECIFIED +} + +func (m *PostgresqlConfig9_6) GetLogLockWaits() *wrappers.BoolValue { + if m != nil { + return m.LogLockWaits + } + return nil +} + +func (m *PostgresqlConfig9_6) GetLogStatement() PostgresqlConfig9_6_LogStatement { + if m != nil { + return m.LogStatement + } + return PostgresqlConfig9_6_LOG_STATEMENT_UNSPECIFIED +} + +func (m *PostgresqlConfig9_6) GetLogTempFiles() *wrappers.Int64Value { + if m != nil { + return m.LogTempFiles + } + return nil +} + +func (m *PostgresqlConfig9_6) GetSearchPath() string { + if m != nil { + return m.SearchPath + } + return "" +} + +func (m *PostgresqlConfig9_6) GetRowSecurity() *wrappers.BoolValue { + if m != nil { + return m.RowSecurity + } + return nil +} + +func (m *PostgresqlConfig9_6) GetDefaultTransactionIsolation() PostgresqlConfig9_6_TransactionIsolation { + if m != nil { + return m.DefaultTransactionIsolation + } + return PostgresqlConfig9_6_TRANSACTION_ISOLATION_UNSPECIFIED +} + +func (m *PostgresqlConfig9_6) GetStatementTimeout() *wrappers.Int64Value { + if m != nil { + return m.StatementTimeout + } + return nil +} + +func (m *PostgresqlConfig9_6) GetLockTimeout() *wrappers.Int64Value { + if m != nil { + return m.LockTimeout + } + return nil +} + +func (m *PostgresqlConfig9_6) GetIdleInTransactionSessionTimeout() *wrappers.Int64Value { + if m != nil { + return m.IdleInTransactionSessionTimeout + } + return nil +} + +func (m *PostgresqlConfig9_6) GetByteaOutput() PostgresqlConfig9_6_ByteaOutput { + if m != nil { + return m.ByteaOutput + } + return PostgresqlConfig9_6_BYTEA_OUTPUT_UNSPECIFIED +} + +func (m *PostgresqlConfig9_6) GetXmlbinary() PostgresqlConfig9_6_XmlBinary { + if m != nil { + return m.Xmlbinary + } + return PostgresqlConfig9_6_XML_BINARY_UNSPECIFIED +} + +func (m *PostgresqlConfig9_6) GetXmloption() PostgresqlConfig9_6_XmlOption { + if m != nil { + return m.Xmloption + } + return PostgresqlConfig9_6_XML_OPTION_UNSPECIFIED +} + +func (m *PostgresqlConfig9_6) GetGinPendingListLimit() *wrappers.Int64Value { + if m != nil { + return m.GinPendingListLimit + } + return nil +} + +func (m *PostgresqlConfig9_6) GetDeadlockTimeout() *wrappers.Int64Value { + if m != nil { + return m.DeadlockTimeout + } + return nil +} + +func (m *PostgresqlConfig9_6) GetMaxLocksPerTransaction() *wrappers.Int64Value { + if m != nil { + return m.MaxLocksPerTransaction + } + return nil +} + +func (m *PostgresqlConfig9_6) GetMaxPredLocksPerTransaction() *wrappers.Int64Value { + if m != nil { + return m.MaxPredLocksPerTransaction + } + return nil +} + +func (m *PostgresqlConfig9_6) GetArrayNulls() *wrappers.BoolValue { + if m != nil { + return m.ArrayNulls + } + return nil +} + +func (m *PostgresqlConfig9_6) GetBackslashQuote() PostgresqlConfig9_6_BackslashQuote { + if m != nil { + return m.BackslashQuote + } + return PostgresqlConfig9_6_BACKSLASH_QUOTE_UNSPECIFIED +} + +func (m *PostgresqlConfig9_6) GetDefaultWithOids() *wrappers.BoolValue { + if m != nil { + return m.DefaultWithOids + } + return nil +} + +func (m *PostgresqlConfig9_6) GetEscapeStringWarning() *wrappers.BoolValue { + if m != nil { + return m.EscapeStringWarning + } + return nil +} + +func (m *PostgresqlConfig9_6) GetLoCompatPrivileges() *wrappers.BoolValue { + if m != nil { + return m.LoCompatPrivileges + } + return nil +} + +func (m *PostgresqlConfig9_6) GetOperatorPrecedenceWarning() *wrappers.BoolValue { + if m != nil { + return m.OperatorPrecedenceWarning + } + return nil +} + +func (m *PostgresqlConfig9_6) GetQuoteAllIdentifiers() *wrappers.BoolValue { + if m != nil { + return m.QuoteAllIdentifiers + } + return nil +} + +func (m *PostgresqlConfig9_6) GetStandardConformingStrings() *wrappers.BoolValue { + if m != nil { + return m.StandardConformingStrings + } + return nil +} + +func (m *PostgresqlConfig9_6) GetSynchronizeSeqscans() *wrappers.BoolValue { + if m != nil { + return m.SynchronizeSeqscans + } + return nil +} + +func (m *PostgresqlConfig9_6) GetTransformNullEquals() *wrappers.BoolValue { + if m != nil { + return m.TransformNullEquals + } + return nil +} + +func (m *PostgresqlConfig9_6) GetExitOnError() *wrappers.BoolValue { + if m != nil { + return m.ExitOnError + } + return nil +} + +func (m *PostgresqlConfig9_6) GetSeqPageCost() *wrappers.DoubleValue { + if m != nil { + return m.SeqPageCost + } + return nil +} + +func (m *PostgresqlConfig9_6) GetRandomPageCost() *wrappers.DoubleValue { + if m != nil { + return m.RandomPageCost + } + return nil +} + +func (m *PostgresqlConfig9_6) GetSqlInheritance() *wrappers.BoolValue { + if m != nil { + return m.SqlInheritance + } + return nil +} + +func (m *PostgresqlConfig9_6) GetAutovacuumMaxWorkers() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumMaxWorkers + } + return nil +} + +func (m *PostgresqlConfig9_6) GetAutovacuumVacuumCostDelay() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumVacuumCostDelay + } + return nil +} + +func (m *PostgresqlConfig9_6) GetAutovacuumVacuumCostLimit() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumVacuumCostLimit + } + return nil +} + +func (m *PostgresqlConfig9_6) GetAutovacuumNaptime() *wrappers.Int64Value { + if m != nil { + return m.AutovacuumNaptime + } + return nil +} + +type PostgresqlConfigSet9_6 struct { + // Effective settings for a PostgreSQL 9.6 cluster (a combination of settings defined + // in [user_config] and [default_config]). + EffectiveConfig *PostgresqlConfig9_6 `protobuf:"bytes,1,opt,name=effective_config,json=effectiveConfig,proto3" json:"effective_config,omitempty"` + // User-defined settings for a PostgreSQL 9.6 cluster. + UserConfig *PostgresqlConfig9_6 `protobuf:"bytes,2,opt,name=user_config,json=userConfig,proto3" json:"user_config,omitempty"` + // Default configuration for a PostgreSQL 9.6 cluster. + DefaultConfig *PostgresqlConfig9_6 `protobuf:"bytes,3,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PostgresqlConfigSet9_6) Reset() { *m = PostgresqlConfigSet9_6{} } +func (m *PostgresqlConfigSet9_6) String() string { return proto.CompactTextString(m) } +func (*PostgresqlConfigSet9_6) ProtoMessage() {} +func (*PostgresqlConfigSet9_6) Descriptor() ([]byte, []int) { + return fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a, []int{1} +} +func (m *PostgresqlConfigSet9_6) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PostgresqlConfigSet9_6.Unmarshal(m, b) +} +func (m *PostgresqlConfigSet9_6) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PostgresqlConfigSet9_6.Marshal(b, m, deterministic) +} +func (dst *PostgresqlConfigSet9_6) XXX_Merge(src proto.Message) { + xxx_messageInfo_PostgresqlConfigSet9_6.Merge(dst, src) +} +func (m *PostgresqlConfigSet9_6) XXX_Size() int { + return xxx_messageInfo_PostgresqlConfigSet9_6.Size(m) +} +func (m *PostgresqlConfigSet9_6) XXX_DiscardUnknown() { + xxx_messageInfo_PostgresqlConfigSet9_6.DiscardUnknown(m) +} + +var xxx_messageInfo_PostgresqlConfigSet9_6 proto.InternalMessageInfo + +func (m *PostgresqlConfigSet9_6) GetEffectiveConfig() *PostgresqlConfig9_6 { + if m != nil { + return m.EffectiveConfig + } + return nil +} + +func (m *PostgresqlConfigSet9_6) GetUserConfig() *PostgresqlConfig9_6 { + if m != nil { + return m.UserConfig + } + return nil +} + +func (m *PostgresqlConfigSet9_6) GetDefaultConfig() *PostgresqlConfig9_6 { + if m != nil { + return m.DefaultConfig + } + return nil +} + +func init() { + proto.RegisterType((*PostgresqlConfig9_6)(nil), "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6") + proto.RegisterType((*PostgresqlConfigSet9_6)(nil), "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet9_6") + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_WalLevel", PostgresqlConfig9_6_WalLevel_name, PostgresqlConfig9_6_WalLevel_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_SynchronousCommit", PostgresqlConfig9_6_SynchronousCommit_name, PostgresqlConfig9_6_SynchronousCommit_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_ConstraintExclusion", PostgresqlConfig9_6_ConstraintExclusion_name, PostgresqlConfig9_6_ConstraintExclusion_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_ForceParallelMode", PostgresqlConfig9_6_ForceParallelMode_name, PostgresqlConfig9_6_ForceParallelMode_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_LogLevel", PostgresqlConfig9_6_LogLevel_name, PostgresqlConfig9_6_LogLevel_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_LogErrorVerbosity", PostgresqlConfig9_6_LogErrorVerbosity_name, PostgresqlConfig9_6_LogErrorVerbosity_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_LogStatement", PostgresqlConfig9_6_LogStatement_name, PostgresqlConfig9_6_LogStatement_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_TransactionIsolation", PostgresqlConfig9_6_TransactionIsolation_name, PostgresqlConfig9_6_TransactionIsolation_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_ByteaOutput", PostgresqlConfig9_6_ByteaOutput_name, PostgresqlConfig9_6_ByteaOutput_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_XmlBinary", PostgresqlConfig9_6_XmlBinary_name, PostgresqlConfig9_6_XmlBinary_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_XmlOption", PostgresqlConfig9_6_XmlOption_name, PostgresqlConfig9_6_XmlOption_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig9_6_BackslashQuote", PostgresqlConfig9_6_BackslashQuote_name, PostgresqlConfig9_6_BackslashQuote_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/config/postgresql9_6.proto", fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a) +} + +var fileDescriptor_postgresql9_6_5d4e3f8156d9ce8a = []byte{ + // 2921 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x9a, 0x4b, 0x77, 0xdb, 0x46, + 0x96, 0xc7, 0x87, 0x92, 0xe2, 0xc8, 0x25, 0x4b, 0x02, 0x8b, 0x7a, 0x40, 0xf2, 0x33, 0x4c, 0x9c, + 0x71, 0x32, 0xa3, 0x07, 0x6d, 0x45, 0xb1, 0x93, 0x89, 0x27, 0x20, 0x08, 0xca, 0x9c, 0x80, 0x04, + 0x03, 0x40, 0x96, 0x9d, 0x9c, 0x04, 0x53, 0x04, 0x8a, 0x24, 0x22, 0x10, 0x45, 0xa1, 0x40, 0x3d, + 0xdc, 0xbd, 0xe9, 0x6d, 0x2f, 0xbb, 0x57, 0xdd, 0x1f, 0xa5, 0xf7, 0x7d, 0xf4, 0x0d, 0xfa, 0x23, + 0xf4, 0xe9, 0xcf, 0xe0, 0x55, 0x9f, 0xc2, 0x83, 0x00, 0x45, 0x3a, 0x60, 0xa2, 0xec, 0xa4, 0x5b, + 0xf7, 0xff, 0xbb, 0xf5, 0xb8, 0x55, 0xa8, 0x5b, 0x87, 0xe0, 0xd9, 0x05, 0x72, 0x2d, 0x7c, 0xbe, + 0x63, 0x3a, 0x64, 0x60, 0xed, 0xf4, 0xac, 0xd6, 0x4e, 0x9f, 0x50, 0xbf, 0xe3, 0x61, 0x7a, 0xe2, + 0xec, 0x9c, 0x96, 0x76, 0x4c, 0xe2, 0xb6, 0xed, 0x4e, 0xca, 0xf8, 0xcc, 0xd8, 0xdf, 0xee, 0x7b, + 0xc4, 0x27, 0xf0, 0x61, 0x28, 0xdd, 0x0e, 0xa4, 0xdb, 0x3d, 0xab, 0xb5, 0x9d, 0x78, 0x6d, 0x9f, + 0x96, 0xb6, 0x43, 0xe9, 0xe6, 0xbd, 0x0e, 0x21, 0x1d, 0x07, 0xef, 0x04, 0xa2, 0xd6, 0xa0, 0xbd, + 0x73, 0xe6, 0xa1, 0x7e, 0x1f, 0x7b, 0x34, 0xc4, 0x6c, 0xde, 0x1d, 0xe9, 0xc1, 0x29, 0x72, 0x6c, + 0x0b, 0xf9, 0x36, 0x71, 0xc3, 0xe6, 0xe2, 0xdf, 0xca, 0xa0, 0xd0, 0x1c, 0x72, 0xc5, 0x80, 0xf9, + 0xcc, 0xd8, 0x87, 0x15, 0xb0, 0xdc, 0x43, 0xe7, 0x86, 0x49, 0x5c, 0x17, 0x9b, 0xcc, 0x9f, 0xf2, + 0xb9, 0x07, 0xb9, 0x47, 0x0b, 0x8f, 0x6f, 0x6f, 0x87, 0x01, 0xb7, 0xe3, 0x80, 0xdb, 0x35, 0xd7, + 0xdf, 0xdf, 0x7b, 0x89, 0x9c, 0x01, 0x56, 0x97, 0x7a, 0xe8, 0x5c, 0x4c, 0x24, 0xb0, 0x0c, 0x96, + 0x68, 0x17, 0x79, 0xd8, 0x32, 0x5a, 0x83, 0x76, 0x1b, 0x7b, 0x94, 0x9f, 0xc9, 0x86, 0x2c, 0x86, + 0x92, 0x72, 0xa8, 0x80, 0xcf, 0xc1, 0x2d, 0x1f, 0xf7, 0xfa, 0x43, 0xc2, 0x6c, 0x36, 0x61, 0x81, + 0x09, 0x62, 0xfd, 0x11, 0xd8, 0x60, 0x23, 0xe9, 0x7b, 0xb8, 0x1f, 0xf4, 0xc4, 0xf7, 0x90, 0x4b, + 0x51, 0x34, 0xa6, 0xb9, 0x6c, 0xd8, 0x7a, 0x0f, 0x9d, 0x37, 0x23, 0xb1, 0x9e, 0xd2, 0xc2, 0x7d, + 0x30, 0x7f, 0x46, 0xbc, 0x63, 0xa3, 0x87, 0x7b, 0xfc, 0x7b, 0xd9, 0x9c, 0xf7, 0x99, 0x73, 0x1d, + 0xf7, 0x60, 0x1d, 0xac, 0xf4, 0x90, 0xed, 0xfa, 0xd8, 0x45, 0xae, 0x89, 0x8d, 0x21, 0xe3, 0x46, + 0x36, 0x03, 0xa6, 0x84, 0x47, 0x11, 0x4e, 0x03, 0xeb, 0x1e, 0xee, 0x3b, 0xc8, 0xc4, 0x3d, 0xec, + 0xfa, 0x06, 0x25, 0x9e, 0x6f, 0xf8, 0x83, 0xbe, 0x83, 0x29, 0xff, 0x7e, 0x36, 0x71, 0x35, 0xa5, + 0xd5, 0x88, 0xe7, 0xeb, 0x81, 0x12, 0x7e, 0x03, 0x0a, 0x68, 0xe0, 0x93, 0x53, 0x64, 0x0e, 0x06, + 0xbd, 0xa4, 0x8b, 0xf3, 0xd9, 0xc0, 0x7c, 0xa2, 0x8b, 0x7b, 0x28, 0x82, 0xe5, 0x60, 0x05, 0xdb, + 0xb6, 0x83, 0x0d, 0xc7, 0xee, 0xd9, 0x3e, 0x7f, 0x73, 0x8a, 0x34, 0x60, 0x9a, 0xaa, 0xed, 0x60, + 0x99, 0x29, 0xe0, 0x01, 0xc8, 0x47, 0xbd, 0x31, 0x09, 0xf5, 0x0d, 0x0b, 0x3b, 0xe8, 0x82, 0x07, + 0xd9, 0x98, 0xe5, 0x50, 0x25, 0x12, 0xea, 0x57, 0x98, 0x06, 0xca, 0x60, 0x25, 0x0d, 0xea, 0xa3, + 0x0e, 0x36, 0xba, 0xb6, 0xcf, 0x2f, 0x4c, 0x31, 0xb6, 0x84, 0xd5, 0x44, 0x1d, 0xfc, 0xc2, 0xf6, + 0x61, 0x03, 0xac, 0x8e, 0xd1, 0x7a, 0x36, 0xa5, 0xfc, 0xad, 0x29, 0x56, 0x73, 0x14, 0x57, 0xb7, + 0x29, 0x85, 0x4d, 0xb0, 0x36, 0xc6, 0xb3, 0x6c, 0xcf, 0xbf, 0xe0, 0x17, 0xb3, 0x81, 0x85, 0x51, + 0x60, 0x85, 0xe9, 0xae, 0x4e, 0x5c, 0x38, 0xff, 0x4b, 0xbf, 0x68, 0xe2, 0xc2, 0x15, 0x68, 0x82, + 0xa5, 0x56, 0xe7, 0xcc, 0xb3, 0x7d, 0xec, 0x45, 0xd3, 0xbf, 0x9c, 0x49, 0x29, 0xdf, 0x7a, 0x7b, + 0x59, 0x9a, 0x2f, 0xed, 0x6e, 0x95, 0x76, 0x77, 0x77, 0x77, 0xd5, 0xc5, 0x18, 0x10, 0x2e, 0x85, + 0x02, 0x56, 0x87, 0x44, 0xc7, 0x1b, 0x18, 0x3d, 0x74, 0xce, 0x06, 0x4c, 0x79, 0x6e, 0x8a, 0xb1, + 0xc6, 0x4a, 0xd9, 0x1b, 0xd4, 0x23, 0x1d, 0xd4, 0xc1, 0xfa, 0x28, 0x70, 0xe0, 0xf8, 0x76, 0xdf, + 0xb1, 0xb1, 0xc7, 0xe7, 0x03, 0xe4, 0x9d, 0x31, 0x64, 0x85, 0x0c, 0x5a, 0x0e, 0x8e, 0x36, 0x43, + 0x9a, 0x39, 0x94, 0xc2, 0xd7, 0x60, 0x65, 0x48, 0x6d, 0x3b, 0x03, 0xda, 0x35, 0x50, 0xdb, 0xc7, + 0x1e, 0x0f, 0xb3, 0x87, 0x0f, 0xde, 0x5e, 0x96, 0x6e, 0xec, 0x6e, 0x3d, 0xde, 0xdd, 0x7b, 0xaa, + 0xc2, 0x18, 0x52, 0x65, 0x0c, 0x81, 0x21, 0xe0, 0x11, 0x28, 0xb4, 0x90, 0x79, 0x8c, 0x5d, 0x6b, + 0x84, 0x5c, 0xf8, 0x65, 0xe4, 0x7c, 0xc4, 0x48, 0x81, 0x5b, 0x60, 0x8d, 0x38, 0x96, 0x41, 0x5d, + 0xd4, 0xa7, 0x5d, 0xe2, 0x1b, 0x7e, 0xd7, 0xc3, 0xb4, 0x4b, 0x1c, 0x8b, 0x5f, 0xc9, 0x66, 0x2f, + 0xbf, 0xbd, 0x2c, 0x2d, 0x6c, 0x95, 0xb6, 0x9e, 0xee, 0xef, 0xed, 0x06, 0xeb, 0xb6, 0x42, 0x1c, + 0x4b, 0x8b, 0x50, 0x7a, 0x4c, 0x82, 0xff, 0x0f, 0x6e, 0x9e, 0x21, 0xc7, 0x70, 0xf0, 0x29, 0x76, + 0xf8, 0xd5, 0x07, 0xb9, 0x47, 0x4b, 0x8f, 0xc5, 0xed, 0xa9, 0xbe, 0x5a, 0xdb, 0x13, 0x3e, 0x39, + 0xdb, 0x47, 0xc8, 0x91, 0x19, 0x4a, 0x9d, 0x3f, 0x8b, 0xfe, 0x82, 0xbf, 0x03, 0x90, 0x5e, 0xb8, + 0x66, 0xd7, 0x23, 0x2e, 0x19, 0x50, 0xc3, 0x24, 0x3d, 0x96, 0xbc, 0x6b, 0x41, 0x28, 0xf9, 0x1a, + 0xa1, 0xb4, 0x04, 0x2a, 0x06, 0x4c, 0x35, 0x4f, 0xaf, 0x9a, 0xe0, 0x8f, 0x00, 0x9a, 0x5d, 0x6c, + 0x1e, 0xf7, 0x89, 0xed, 0xfa, 0x86, 0x6f, 0xf7, 0x30, 0x19, 0xf8, 0xfc, 0x7a, 0xf6, 0xf4, 0xc1, + 0xb7, 0x97, 0xa5, 0xa5, 0x27, 0x6c, 0xe2, 0x92, 0x19, 0xcc, 0x27, 0x28, 0x3d, 0x24, 0xc1, 0x1f, + 0xc1, 0x9d, 0x14, 0xdf, 0x24, 0xbd, 0xbe, 0x83, 0xd9, 0x97, 0xc5, 0xf0, 0x91, 0xd7, 0xc1, 0x3e, + 0xcf, 0x4f, 0x91, 0xb1, 0x9b, 0x09, 0x41, 0x1c, 0x02, 0xf4, 0x40, 0x0f, 0x7f, 0x00, 0x6b, 0x29, + 0x7e, 0x3a, 0xbd, 0x36, 0x7e, 0x59, 0x7a, 0xad, 0x24, 0x98, 0x54, 0x86, 0x7d, 0x05, 0x6e, 0xb1, + 0xef, 0x2a, 0xcb, 0x00, 0x6a, 0xbf, 0xc1, 0xfc, 0x66, 0xf6, 0x9e, 0x05, 0x3d, 0x74, 0x7e, 0x84, + 0x1c, 0xcd, 0x7e, 0x83, 0x03, 0xb9, 0xed, 0x26, 0xf2, 0xdb, 0xd3, 0xc8, 0x6d, 0x37, 0x96, 0x7f, + 0x07, 0x6e, 0xb3, 0xe8, 0xd4, 0x47, 0xae, 0xd5, 0xba, 0x30, 0xa8, 0xef, 0x61, 0xd4, 0xb3, 0xdd, + 0x4e, 0x74, 0x32, 0xdd, 0xc9, 0xa6, 0xf1, 0x3d, 0x74, 0xae, 0x85, 0x72, 0x2d, 0x56, 0x87, 0xc7, + 0xd2, 0x11, 0xd8, 0xb0, 0x70, 0x1b, 0x0d, 0x1c, 0x9f, 0xf1, 0x7d, 0x9b, 0xfa, 0xb6, 0x49, 0xe3, + 0x55, 0xb9, 0x3b, 0xc5, 0x8d, 0x21, 0x52, 0x6b, 0x43, 0x71, 0xb4, 0x22, 0x7f, 0xc8, 0x81, 0x15, + 0x93, 0xb8, 0xd4, 0xf7, 0xd8, 0x67, 0xdc, 0xc0, 0xe7, 0xa6, 0x33, 0xa0, 0x36, 0x71, 0xf9, 0x7b, + 0x41, 0x46, 0x37, 0xae, 0x91, 0xd1, 0xe2, 0x10, 0x2b, 0xc5, 0x54, 0xb5, 0x60, 0x8e, 0x1b, 0x61, + 0x13, 0xac, 0x9a, 0x03, 0x8f, 0x12, 0x2f, 0xbc, 0x24, 0x18, 0x6d, 0x2f, 0xbc, 0xcf, 0xf0, 0xf7, + 0xa7, 0x48, 0xb7, 0x42, 0x28, 0x0d, 0x2e, 0x09, 0xd5, 0x48, 0x08, 0x7f, 0x00, 0x85, 0xb6, 0x47, + 0xd8, 0xe7, 0xc5, 0x71, 0x50, 0x9f, 0xc6, 0x9f, 0xf8, 0x07, 0xd9, 0x49, 0xc6, 0xbd, 0xbd, 0x2c, + 0xdd, 0x2a, 0x6d, 0x3d, 0x2e, 0xed, 0x7d, 0xbe, 0xf7, 0xf4, 0xc9, 0xfe, 0xde, 0xe7, 0x6a, 0x9e, + 0x91, 0xc4, 0x08, 0x14, 0x7e, 0x76, 0x7e, 0x00, 0x85, 0x9f, 0x88, 0xed, 0x5e, 0xc5, 0x7f, 0xf0, + 0xab, 0xf0, 0x8c, 0x34, 0x8a, 0xff, 0x3d, 0x28, 0xb4, 0x89, 0x67, 0x62, 0xa3, 0x8f, 0x3c, 0xe4, + 0x38, 0xd8, 0x31, 0x7a, 0xc4, 0xc2, 0x7c, 0xf1, 0xda, 0x67, 0x4c, 0x95, 0x51, 0x9b, 0x11, 0xb4, + 0x4e, 0x2c, 0xac, 0xe6, 0xdb, 0x57, 0x4d, 0x90, 0x82, 0x82, 0xe9, 0xd8, 0xec, 0xde, 0xc6, 0x36, + 0x43, 0x0f, 0x53, 0x1a, 0x7c, 0xff, 0x3e, 0xbc, 0xf6, 0x61, 0x2a, 0x93, 0x4e, 0x78, 0x98, 0xe6, + 0x43, 0x7e, 0xdd, 0x76, 0xeb, 0x11, 0x1d, 0xf6, 0x00, 0xe7, 0x90, 0xce, 0x68, 0xc4, 0x8f, 0x7e, + 0xbb, 0x88, 0x4b, 0x0e, 0xe9, 0xa4, 0xc3, 0xbd, 0x01, 0xeb, 0x71, 0x38, 0xec, 0x79, 0xc4, 0x0b, + 0x36, 0x55, 0x70, 0xe1, 0xe4, 0x1f, 0xfe, 0x76, 0x51, 0x57, 0xc2, 0xa8, 0x12, 0x8b, 0xa0, 0xc5, + 0x01, 0xe0, 0x2b, 0xb0, 0x19, 0xc7, 0xb6, 0x06, 0x5e, 0x50, 0xf8, 0xa4, 0xc2, 0x7f, 0x3c, 0xc5, + 0x5e, 0x0e, 0xb1, 0x95, 0x48, 0x9c, 0x90, 0x45, 0xb0, 0xcc, 0xc8, 0xc9, 0xd1, 0x48, 0xf9, 0xff, + 0x0c, 0x70, 0x9b, 0x63, 0xb8, 0x32, 0x21, 0x4e, 0x54, 0x1f, 0x39, 0xa4, 0x23, 0x26, 0x8a, 0x21, + 0x24, 0x55, 0x65, 0x3d, 0x9a, 0x0e, 0x92, 0x2a, 0xb2, 0x6a, 0x00, 0x32, 0x88, 0x65, 0xd3, 0x34, + 0xe7, 0x93, 0x4c, 0x4e, 0xde, 0x21, 0x9d, 0xca, 0x88, 0x88, 0x1d, 0xca, 0x01, 0x2a, 0x1a, 0x2d, + 0xff, 0x69, 0x26, 0x64, 0x81, 0x41, 0x22, 0x77, 0xb6, 0x97, 0x98, 0x3c, 0x5c, 0xe5, 0x53, 0xec, + 0xb5, 0x08, 0xb5, 0xfd, 0x0b, 0xfe, 0xbf, 0xae, 0xbd, 0x97, 0x64, 0xd2, 0x09, 0x16, 0xf6, 0x65, + 0xcc, 0x0c, 0x3a, 0x3f, 0x6a, 0x82, 0x5f, 0x03, 0x36, 0x33, 0x86, 0x43, 0xcc, 0x63, 0xe3, 0x0c, + 0xd9, 0x3e, 0xe5, 0xff, 0x3b, 0xb3, 0xfb, 0x6c, 0xb8, 0x32, 0x31, 0x8f, 0x8f, 0x98, 0x3f, 0x74, + 0xc0, 0x22, 0x23, 0x24, 0x09, 0xb2, 0x15, 0xf4, 0xfc, 0xe0, 0x7a, 0x3d, 0x1f, 0xe6, 0x4c, 0x10, + 0x2d, 0xc9, 0x20, 0x21, 0xec, 0xef, 0xb0, 0x34, 0xa2, 0xfc, 0x76, 0x76, 0x3e, 0x32, 0x84, 0x1e, + 0x15, 0x46, 0x14, 0xde, 0x07, 0x0b, 0x14, 0x23, 0xcf, 0xec, 0x1a, 0x7d, 0xe4, 0x77, 0xf9, 0x9d, + 0x07, 0xb9, 0x47, 0x37, 0x55, 0x10, 0x9a, 0x9a, 0xc8, 0xef, 0xb2, 0x05, 0xf5, 0xc8, 0x99, 0x41, + 0xb1, 0x39, 0xf0, 0xd8, 0x52, 0xec, 0x66, 0x2f, 0xa8, 0x47, 0xce, 0xb4, 0xc8, 0x1d, 0xfe, 0x39, + 0x07, 0xee, 0xc6, 0x9f, 0xc2, 0x54, 0xdd, 0x6c, 0xd8, 0x94, 0x38, 0x61, 0x86, 0x94, 0x82, 0x19, + 0x52, 0xae, 0x31, 0x43, 0xa9, 0x9a, 0xba, 0x16, 0x63, 0xd5, 0xdb, 0x51, 0xd4, 0x49, 0x8d, 0xf0, + 0x05, 0xc8, 0x0f, 0x97, 0x68, 0x78, 0x2f, 0x7b, 0x9c, 0x3d, 0x77, 0xdc, 0x50, 0x15, 0x5f, 0xc1, + 0x9e, 0xb3, 0x7c, 0x37, 0x8f, 0x87, 0x90, 0x27, 0x53, 0xbc, 0x2d, 0x30, 0x41, 0xac, 0xb7, 0xc1, + 0x87, 0xb6, 0xe5, 0x60, 0xc3, 0x76, 0x47, 0xa6, 0x87, 0x62, 0x4a, 0x83, 0x8b, 0x5c, 0x84, 0xdd, + 0xcb, 0xc6, 0xde, 0x67, 0x9c, 0x9a, 0x9b, 0x1a, 0xaf, 0x16, 0x42, 0x92, 0x50, 0xb7, 0x5a, 0x17, + 0x3e, 0x46, 0x06, 0x19, 0xf8, 0xfd, 0x81, 0xcf, 0x7f, 0x16, 0x4c, 0x7c, 0xf5, 0x1a, 0x13, 0x5f, + 0x66, 0x38, 0x25, 0xa0, 0xa9, 0x0b, 0xad, 0xe4, 0x1f, 0xd8, 0x02, 0x37, 0xcf, 0x7b, 0x4e, 0xcb, + 0x76, 0x91, 0x77, 0xc1, 0xef, 0x07, 0x71, 0x2a, 0xd7, 0x88, 0xf3, 0xaa, 0xe7, 0x94, 0x03, 0x96, + 0x9a, 0x60, 0xa3, 0x18, 0xa4, 0x1f, 0x24, 0xd1, 0xe7, 0xbf, 0x45, 0x0c, 0x25, 0x60, 0xa9, 0x09, + 0x96, 0xd5, 0xd2, 0x1d, 0xdb, 0x35, 0xfa, 0xd8, 0xb5, 0xd8, 0xcd, 0xd0, 0xb1, 0x87, 0xe5, 0xef, + 0xd3, 0x29, 0xea, 0xcb, 0x8e, 0xed, 0x36, 0x43, 0xa5, 0x6c, 0xc7, 0x25, 0x70, 0x15, 0x70, 0x16, + 0x46, 0xd6, 0x48, 0xce, 0x3c, 0x9b, 0xa2, 0x94, 0x8e, 0x45, 0xf1, 0x62, 0xbe, 0x0c, 0xdf, 0xa4, + 0x98, 0x89, 0x1a, 0x7d, 0xec, 0xa5, 0xb3, 0x87, 0xff, 0x22, 0x1b, 0xb8, 0xd6, 0x43, 0xe7, 0xec, + 0xd8, 0xa2, 0x4d, 0xec, 0xa5, 0x52, 0x06, 0x1a, 0xe0, 0x5e, 0xf4, 0xd6, 0x65, 0xbd, 0x03, 0xfe, + 0x65, 0x36, 0x7c, 0x33, 0x7c, 0xf0, 0xb2, 0x26, 0x05, 0xf8, 0x12, 0x2c, 0x20, 0xcf, 0x43, 0x17, + 0x86, 0x3b, 0x70, 0x1c, 0xca, 0xff, 0x4f, 0xe6, 0x71, 0x02, 0x02, 0xf7, 0x06, 0xf3, 0x86, 0x1e, + 0x58, 0x66, 0x85, 0x2a, 0x75, 0x10, 0xed, 0x1a, 0x27, 0x03, 0xe2, 0x63, 0xfe, 0xab, 0x60, 0xe5, + 0x6b, 0xd7, 0xc9, 0xe2, 0x98, 0xf8, 0x2d, 0x03, 0xaa, 0x4b, 0xad, 0x91, 0xff, 0x61, 0x15, 0xe4, + 0xe3, 0x03, 0xec, 0xcc, 0xf6, 0xbb, 0x06, 0xb1, 0x2d, 0xca, 0x3f, 0xcf, 0xec, 0xf6, 0x72, 0x24, + 0x3a, 0xb2, 0xfd, 0xae, 0x62, 0x5b, 0x14, 0x36, 0xc0, 0x2a, 0xa6, 0x26, 0xea, 0x63, 0x56, 0x6a, + 0xb0, 0x6c, 0x3a, 0x43, 0x9e, 0x6b, 0xbb, 0x1d, 0xfe, 0x7f, 0x33, 0x59, 0x85, 0x50, 0xa8, 0x05, + 0xba, 0xa3, 0x50, 0x06, 0x65, 0xb0, 0xe2, 0x90, 0xa0, 0xe8, 0x43, 0xbe, 0xd1, 0xf7, 0xec, 0x53, + 0xdb, 0xc1, 0xec, 0x1e, 0xf6, 0x75, 0x26, 0x0e, 0x3a, 0x44, 0x0c, 0x64, 0xcd, 0xa1, 0x8a, 0x55, + 0x43, 0xa4, 0x8f, 0x3d, 0xe4, 0x13, 0x8f, 0x2d, 0xbe, 0x89, 0x2d, 0x1c, 0x3c, 0x2d, 0x46, 0x7d, + 0x14, 0x32, 0xa1, 0x1b, 0xb1, 0xbc, 0x39, 0x54, 0xc7, 0x3d, 0x6d, 0x80, 0xd5, 0x60, 0xad, 0x0c, + 0xe4, 0x38, 0x86, 0x6d, 0x61, 0xd7, 0xb7, 0xdb, 0x36, 0xf6, 0x28, 0x5f, 0xce, 0x1e, 0x79, 0x20, + 0x14, 0x1c, 0xa7, 0x96, 0xc8, 0x58, 0x5f, 0x83, 0xaa, 0x0d, 0x79, 0x16, 0xbb, 0xf8, 0xb4, 0x89, + 0x17, 0xd4, 0x6d, 0xe1, 0xb4, 0x52, 0x5e, 0xcc, 0xee, 0x6b, 0x2c, 0x17, 0x87, 0xea, 0x70, 0x6e, + 0x29, 0xac, 0x83, 0x95, 0xb8, 0x8e, 0xb7, 0xdf, 0x60, 0x83, 0xe2, 0x13, 0x6a, 0x22, 0x97, 0xf2, + 0x95, 0xec, 0xae, 0xa6, 0x74, 0x5a, 0x24, 0x63, 0x43, 0x0f, 0xf6, 0x0e, 0x8b, 0x12, 0x64, 0xbc, + 0x81, 0x4f, 0x06, 0xc8, 0xa1, 0xbc, 0x94, 0xcd, 0x1b, 0x0a, 0x59, 0xee, 0x4b, 0x81, 0x0c, 0x3e, + 0x07, 0x8b, 0xf8, 0xdc, 0xf6, 0x0d, 0x12, 0xdd, 0x84, 0xf9, 0x6a, 0xf6, 0xe7, 0x98, 0x09, 0x94, + 0xf0, 0x5a, 0x0b, 0xbf, 0x06, 0x8b, 0x14, 0x9f, 0x84, 0x8f, 0x82, 0x26, 0xa1, 0x3e, 0x7f, 0x30, + 0x45, 0xcd, 0xb6, 0x40, 0xf1, 0x49, 0x13, 0x75, 0xb0, 0x48, 0x68, 0x70, 0x80, 0x79, 0xc8, 0xb5, + 0x48, 0x2f, 0x05, 0x79, 0x31, 0x05, 0x64, 0x29, 0x54, 0x0d, 0x39, 0x22, 0x58, 0xa6, 0x27, 0x8e, + 0x61, 0xbb, 0x5d, 0xec, 0xd9, 0x3e, 0x72, 0x4d, 0xcc, 0xd7, 0xb2, 0x2f, 0xae, 0xf4, 0xc4, 0xa9, + 0x25, 0x0a, 0xf8, 0x3d, 0x58, 0x4b, 0x3d, 0x32, 0x07, 0x8f, 0x09, 0xc4, 0x3b, 0x66, 0xa9, 0xf5, + 0x7f, 0xd9, 0xc5, 0xdd, 0xfc, 0xdb, 0xcb, 0xd2, 0x5c, 0x69, 0xeb, 0xc9, 0x63, 0x75, 0x25, 0x81, + 0xd4, 0xd1, 0xf9, 0x51, 0x88, 0x80, 0x5d, 0x70, 0x27, 0x05, 0x1f, 0x7f, 0x3a, 0xfe, 0x66, 0xca, + 0x37, 0x90, 0xad, 0xd2, 0x56, 0x69, 0x77, 0x57, 0xdd, 0x48, 0x60, 0x2f, 0xaf, 0x3c, 0x28, 0xff, + 0xf4, 0xce, 0x48, 0xe1, 0xc7, 0x46, 0x9e, 0xf2, 0x95, 0x34, 0x8c, 0xf4, 0xae, 0x58, 0x71, 0x31, + 0x0c, 0x53, 0xb1, 0x5c, 0xd4, 0x67, 0x5f, 0x21, 0xbe, 0x9e, 0x1d, 0x21, 0xff, 0xf6, 0xb2, 0xb4, + 0x58, 0x1a, 0x7d, 0x92, 0x4a, 0x48, 0x8d, 0x10, 0x54, 0xd4, 0xc0, 0x7c, 0xfc, 0x0a, 0x07, 0x37, + 0xc0, 0xea, 0x91, 0x20, 0x1b, 0xb2, 0xf4, 0x52, 0x92, 0x8d, 0xc3, 0x86, 0xd6, 0x94, 0xc4, 0x5a, + 0xb5, 0x26, 0x55, 0xb8, 0xff, 0x80, 0xab, 0x20, 0x9f, 0x34, 0xa9, 0x52, 0x53, 0xae, 0x89, 0x02, + 0x97, 0x1b, 0x35, 0xcb, 0xca, 0x41, 0x4d, 0x14, 0x64, 0x6e, 0xa6, 0xf8, 0x8f, 0x1c, 0xc8, 0x8f, + 0x3d, 0xb8, 0xc1, 0x22, 0xb8, 0xa7, 0xbd, 0x6e, 0x88, 0x2f, 0x54, 0xa5, 0xa1, 0x1c, 0x6a, 0x86, + 0xa8, 0xd4, 0xeb, 0x35, 0xfd, 0x4a, 0x9c, 0x0d, 0xb0, 0x3a, 0xc1, 0x47, 0x69, 0x70, 0x39, 0xb8, + 0x09, 0xd6, 0x26, 0x35, 0x55, 0xab, 0xdc, 0x0c, 0xbc, 0x03, 0xf8, 0x09, 0x6d, 0xb2, 0xc2, 0xba, + 0x33, 0x0b, 0x3f, 0x04, 0xf7, 0x27, 0xb4, 0xaa, 0x52, 0x5d, 0xd1, 0x25, 0xe3, 0x48, 0xad, 0xe9, + 0x12, 0x37, 0xf7, 0xf3, 0x4e, 0x42, 0xb3, 0x29, 0xbf, 0xe6, 0xde, 0x2b, 0xfe, 0x35, 0x07, 0x0a, + 0x13, 0xde, 0x5d, 0xe0, 0x47, 0xe0, 0x81, 0xa8, 0x34, 0x34, 0x5d, 0x15, 0x6a, 0x0d, 0xdd, 0x90, + 0x5e, 0x89, 0xf2, 0xa1, 0x56, 0x53, 0x1a, 0x57, 0x06, 0x77, 0x1b, 0xac, 0x4f, 0xf4, 0x0a, 0x86, + 0x77, 0x07, 0xf0, 0x93, 0x1b, 0x83, 0x01, 0x16, 0xc1, 0xbd, 0x89, 0xad, 0x4d, 0x41, 0xd5, 0x6b, + 0x7a, 0x4d, 0x69, 0x70, 0xb3, 0xc5, 0x3f, 0xe5, 0x40, 0x7e, 0xec, 0x09, 0x82, 0x8d, 0xab, 0xaa, + 0xa8, 0xa2, 0xc4, 0x5c, 0x05, 0x59, 0x96, 0x64, 0xa3, 0xae, 0x54, 0xa4, 0x2b, 0x3d, 0xdb, 0x04, + 0x6b, 0x93, 0x9c, 0x82, 0x8e, 0xdd, 0x06, 0xeb, 0x13, 0xdb, 0x82, 0x7e, 0xdd, 0x07, 0xb7, 0x27, + 0x35, 0xaa, 0xd2, 0x81, 0x2a, 0x69, 0x1a, 0xeb, 0xd4, 0x0c, 0x98, 0x8f, 0x2b, 0x76, 0xb6, 0xba, + 0xb2, 0x72, 0x30, 0x31, 0xc1, 0x56, 0x00, 0x97, 0x34, 0x55, 0xa4, 0xf2, 0xe1, 0xc1, 0x67, 0x5c, + 0x6e, 0x82, 0x75, 0x8f, 0x9b, 0x99, 0x60, 0x7d, 0xc2, 0xcd, 0x4e, 0xb0, 0x3e, 0xe6, 0xe6, 0x26, + 0x58, 0x4b, 0xdc, 0x7b, 0x30, 0x0f, 0x16, 0x13, 0xab, 0xac, 0x1c, 0x70, 0x37, 0x46, 0x1d, 0x1b, + 0x8a, 0x5e, 0x13, 0x25, 0xee, 0x7d, 0x96, 0xe0, 0x89, 0xf5, 0x48, 0x50, 0x1b, 0xb5, 0xc6, 0x01, + 0x37, 0x0f, 0x0b, 0x60, 0x39, 0x31, 0x4b, 0xaa, 0xaa, 0xa8, 0xdc, 0xcd, 0x51, 0x63, 0x55, 0xd0, + 0x05, 0x99, 0x03, 0xa3, 0xc6, 0xa6, 0xd0, 0xa8, 0x89, 0xdc, 0x42, 0xf1, 0x2f, 0x39, 0x90, 0x1f, + 0x2b, 0x70, 0xd9, 0x4a, 0x31, 0xd7, 0x00, 0x67, 0xbc, 0x94, 0xd4, 0xb2, 0xa2, 0xd5, 0xf4, 0xd7, + 0x57, 0xe6, 0xe9, 0x2e, 0xd8, 0x98, 0xe4, 0xa4, 0x4b, 0xaa, 0x26, 0x71, 0x39, 0xb6, 0x1e, 0x93, + 0x9a, 0x2b, 0x52, 0x55, 0x38, 0x94, 0xf5, 0x70, 0xc1, 0x26, 0x39, 0x84, 0x7f, 0x49, 0xdc, 0x6c, + 0xf1, 0x8f, 0x39, 0x70, 0x2b, 0x5d, 0xc2, 0xc6, 0x11, 0x35, 0x5d, 0xd0, 0xa5, 0xba, 0xd4, 0xb8, + 0xba, 0x63, 0xd7, 0x00, 0x1c, 0x6d, 0x6e, 0x28, 0x0d, 0x29, 0x3c, 0x1a, 0x46, 0xed, 0x95, 0x8a, + 0xcc, 0xcd, 0x8c, 0x9b, 0xeb, 0x4a, 0x85, 0x9b, 0x1d, 0x37, 0x0b, 0xb2, 0xcc, 0xcd, 0x15, 0xff, + 0x99, 0x03, 0x2b, 0x13, 0x0b, 0xc2, 0x87, 0xe0, 0x03, 0x5d, 0x15, 0x1a, 0x9a, 0x20, 0xb2, 0xe4, + 0x37, 0x6a, 0x9a, 0x22, 0x0b, 0xfa, 0xf8, 0x8e, 0xfb, 0x14, 0x7c, 0x3c, 0xd9, 0x4d, 0x95, 0x84, + 0x8a, 0x71, 0xd8, 0x08, 0x77, 0xb9, 0x2e, 0x55, 0xb8, 0x1c, 0x7c, 0x04, 0x3e, 0xfa, 0x19, 0xdf, + 0xc4, 0x73, 0x06, 0x7e, 0x02, 0x1e, 0xbe, 0xcb, 0xb3, 0x29, 0x09, 0xba, 0x50, 0x96, 0xa5, 0x40, + 0xc4, 0xcd, 0xc2, 0x8f, 0x41, 0x71, 0xb2, 0xab, 0x26, 0xa9, 0x35, 0x41, 0xae, 0x7d, 0xc7, 0x9c, + 0xb9, 0xb9, 0xe2, 0xf7, 0x60, 0x21, 0x55, 0x9c, 0xb1, 0xc3, 0xa0, 0xfc, 0x5a, 0x97, 0x04, 0x43, + 0x39, 0xd4, 0x9b, 0x87, 0xfa, 0xf8, 0x5e, 0x19, 0x69, 0x7d, 0x21, 0xbd, 0xe2, 0x72, 0x90, 0x07, + 0x2b, 0x23, 0x56, 0x49, 0x13, 0x85, 0x26, 0xeb, 0x6f, 0x51, 0x05, 0x37, 0x87, 0x15, 0x19, 0xdb, + 0xea, 0xaf, 0xea, 0xb2, 0x51, 0xae, 0x35, 0x04, 0xf5, 0xf5, 0xf8, 0x29, 0x9f, 0x6a, 0x2b, 0x0b, + 0x9a, 0xb4, 0xbf, 0xc7, 0xe5, 0x20, 0x04, 0x4b, 0x29, 0x33, 0x8b, 0x36, 0x53, 0x7c, 0x15, 0x30, + 0xc3, 0x0a, 0x2c, 0x66, 0x2a, 0xcd, 0x09, 0x4b, 0xb0, 0x0e, 0x0a, 0xa9, 0xb6, 0x8a, 0x22, 0x1e, + 0xb2, 0xf5, 0xe5, 0x72, 0x2c, 0x71, 0x52, 0x0d, 0xa2, 0xd2, 0xd0, 0x99, 0x7d, 0x86, 0x9d, 0xb1, + 0x4b, 0xa3, 0x57, 0x7c, 0x96, 0xb4, 0x65, 0x41, 0xfc, 0x46, 0x93, 0x05, 0xed, 0x85, 0xf1, 0xed, + 0x21, 0x3b, 0x91, 0x47, 0x83, 0x14, 0xc0, 0xf2, 0x15, 0x87, 0x30, 0xc0, 0x55, 0x95, 0xd2, 0xe0, + 0x66, 0x58, 0x8f, 0xc6, 0xec, 0xd5, 0x2a, 0x37, 0x0b, 0x3f, 0x00, 0x77, 0xaf, 0x36, 0x68, 0x42, + 0x55, 0x32, 0xa4, 0x86, 0xa8, 0x54, 0xd8, 0xc6, 0x9f, 0x2b, 0xfe, 0x7d, 0x06, 0xac, 0x5d, 0xad, + 0x49, 0x34, 0xec, 0x3f, 0x33, 0xf6, 0xe1, 0x31, 0xe0, 0x70, 0xbb, 0x8d, 0x4d, 0xdf, 0x3e, 0xc5, + 0x46, 0x58, 0xbe, 0x44, 0x3f, 0xa0, 0xf8, 0xe2, 0xd7, 0x17, 0x3b, 0xe5, 0xb9, 0x7f, 0x5d, 0x96, + 0x72, 0xea, 0xf2, 0x90, 0x1c, 0xb6, 0xc0, 0xef, 0xc1, 0xc2, 0x80, 0x62, 0x2f, 0x8e, 0x33, 0x73, + 0xdd, 0x38, 0x2a, 0x60, 0xb8, 0x08, 0x8e, 0xc0, 0x52, 0x5c, 0x41, 0x45, 0xfc, 0xd9, 0x6b, 0xf3, + 0x17, 0x23, 0x62, 0x68, 0x29, 0xbf, 0xfc, 0x4e, 0xef, 0xd8, 0x7e, 0x77, 0xd0, 0xda, 0x36, 0x49, + 0x6f, 0x27, 0xc4, 0x6e, 0x85, 0x3f, 0x58, 0xe9, 0x90, 0xad, 0x0e, 0x76, 0x83, 0x1b, 0xcd, 0xce, + 0x54, 0xbf, 0xa5, 0xf9, 0x32, 0x31, 0xb6, 0x6e, 0x04, 0xba, 0x27, 0xff, 0x0e, 0x00, 0x00, 0xff, + 0xff, 0x3d, 0xfa, 0x11, 0x47, 0x86, 0x23, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/database.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/database.pb.go new file mode 100644 index 000000000..98c117ca1 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/database.pb.go @@ -0,0 +1,275 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/database.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A PostgreSQL Database resource. For more information, see +// the [Developer's Guide](/docs/managed-postgresql/concepts). +type Database struct { + // Name of the database. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // ID of the PostgreSQL cluster that the database belongs to. + ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user assigned as the owner of the database. + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + // POSIX locale for string sorting order. + // Can only be set at creation time. + LcCollate string `protobuf:"bytes,4,opt,name=lc_collate,json=lcCollate,proto3" json:"lc_collate,omitempty"` + // POSIX locale for character classification. + // Can only be set at creation time. + LcCtype string `protobuf:"bytes,5,opt,name=lc_ctype,json=lcCtype,proto3" json:"lc_ctype,omitempty"` + // PostgreSQL extensions enabled for the database. + Extensions []*Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Database) Reset() { *m = Database{} } +func (m *Database) String() string { return proto.CompactTextString(m) } +func (*Database) ProtoMessage() {} +func (*Database) Descriptor() ([]byte, []int) { + return fileDescriptor_database_b1308b0a06a4388e, []int{0} +} +func (m *Database) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Database.Unmarshal(m, b) +} +func (m *Database) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Database.Marshal(b, m, deterministic) +} +func (dst *Database) XXX_Merge(src proto.Message) { + xxx_messageInfo_Database.Merge(dst, src) +} +func (m *Database) XXX_Size() int { + return xxx_messageInfo_Database.Size(m) +} +func (m *Database) XXX_DiscardUnknown() { + xxx_messageInfo_Database.DiscardUnknown(m) +} + +var xxx_messageInfo_Database proto.InternalMessageInfo + +func (m *Database) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Database) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *Database) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *Database) GetLcCollate() string { + if m != nil { + return m.LcCollate + } + return "" +} + +func (m *Database) GetLcCtype() string { + if m != nil { + return m.LcCtype + } + return "" +} + +func (m *Database) GetExtensions() []*Extension { + if m != nil { + return m.Extensions + } + return nil +} + +type Extension struct { + // Name of the extension, e.g. `pg_trgm` or `pg_btree`. + // Extensions supported by MDB are [listed in the Developer's Guide](/docs/managed-postgresql/concepts). + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Version of the extension. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Extension) Reset() { *m = Extension{} } +func (m *Extension) String() string { return proto.CompactTextString(m) } +func (*Extension) ProtoMessage() {} +func (*Extension) Descriptor() ([]byte, []int) { + return fileDescriptor_database_b1308b0a06a4388e, []int{1} +} +func (m *Extension) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Extension.Unmarshal(m, b) +} +func (m *Extension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Extension.Marshal(b, m, deterministic) +} +func (dst *Extension) XXX_Merge(src proto.Message) { + xxx_messageInfo_Extension.Merge(dst, src) +} +func (m *Extension) XXX_Size() int { + return xxx_messageInfo_Extension.Size(m) +} +func (m *Extension) XXX_DiscardUnknown() { + xxx_messageInfo_Extension.DiscardUnknown(m) +} + +var xxx_messageInfo_Extension proto.InternalMessageInfo + +func (m *Extension) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Extension) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type DatabaseSpec struct { + // Name of the PostgreSQL database. 1-63 characters long. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Name of the user to be assigned as the owner of the database. + // To get the list of available PostgreSQL users, make a [UserService.List] request. + Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` + // POSIX locale for string sorting order. + // Can only be set at creation time. + LcCollate string `protobuf:"bytes,3,opt,name=lc_collate,json=lcCollate,proto3" json:"lc_collate,omitempty"` + // POSIX locale for character classification. + // Can only be set at creation time. + LcCtype string `protobuf:"bytes,4,opt,name=lc_ctype,json=lcCtype,proto3" json:"lc_ctype,omitempty"` + // PostgreSQL extensions to be enabled for the database. + Extensions []*Extension `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DatabaseSpec) Reset() { *m = DatabaseSpec{} } +func (m *DatabaseSpec) String() string { return proto.CompactTextString(m) } +func (*DatabaseSpec) ProtoMessage() {} +func (*DatabaseSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_database_b1308b0a06a4388e, []int{2} +} +func (m *DatabaseSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DatabaseSpec.Unmarshal(m, b) +} +func (m *DatabaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DatabaseSpec.Marshal(b, m, deterministic) +} +func (dst *DatabaseSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DatabaseSpec.Merge(dst, src) +} +func (m *DatabaseSpec) XXX_Size() int { + return xxx_messageInfo_DatabaseSpec.Size(m) +} +func (m *DatabaseSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DatabaseSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DatabaseSpec proto.InternalMessageInfo + +func (m *DatabaseSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DatabaseSpec) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *DatabaseSpec) GetLcCollate() string { + if m != nil { + return m.LcCollate + } + return "" +} + +func (m *DatabaseSpec) GetLcCtype() string { + if m != nil { + return m.LcCtype + } + return "" +} + +func (m *DatabaseSpec) GetExtensions() []*Extension { + if m != nil { + return m.Extensions + } + return nil +} + +func init() { + proto.RegisterType((*Database)(nil), "yandex.cloud.mdb.postgresql.v1.Database") + proto.RegisterType((*Extension)(nil), "yandex.cloud.mdb.postgresql.v1.Extension") + proto.RegisterType((*DatabaseSpec)(nil), "yandex.cloud.mdb.postgresql.v1.DatabaseSpec") +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/database.proto", fileDescriptor_database_b1308b0a06a4388e) +} + +var fileDescriptor_database_b1308b0a06a4388e = []byte{ + // 407 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0xcf, 0xaa, 0xd3, 0x40, + 0x14, 0x87, 0xc9, 0xbd, 0xe9, 0xbd, 0xb7, 0xe3, 0x9f, 0xc5, 0x28, 0x18, 0x85, 0x96, 0xd2, 0x55, + 0x55, 0x66, 0x62, 0x5a, 0x28, 0x16, 0x75, 0x61, 0xaa, 0x42, 0x17, 0x22, 0x54, 0xdd, 0x54, 0x4a, + 0x98, 0xcc, 0x0c, 0x31, 0x30, 0xc9, 0xc4, 0x64, 0x1a, 0x5b, 0xe9, 0x13, 0xf8, 0x30, 0xbe, 0x46, + 0x7d, 0x04, 0x1f, 0xc1, 0x75, 0x9f, 0x40, 0x32, 0x49, 0x6c, 0x0a, 0xda, 0xcd, 0xdd, 0xcd, 0x99, + 0xdf, 0xf9, 0x0e, 0x9c, 0x8f, 0x03, 0xd0, 0x86, 0xc4, 0x8c, 0xaf, 0x6d, 0x2a, 0xe4, 0x8a, 0xd9, + 0x11, 0xf3, 0xed, 0x44, 0x66, 0x2a, 0x48, 0x79, 0xf6, 0x45, 0xd8, 0xb9, 0x63, 0x33, 0xa2, 0x88, + 0x4f, 0x32, 0x8e, 0x93, 0x54, 0x2a, 0x09, 0xbb, 0x65, 0x3b, 0xd6, 0xed, 0x38, 0x62, 0x3e, 0x3e, + 0xb4, 0xe3, 0xdc, 0x79, 0xd0, 0x39, 0x1a, 0x97, 0x13, 0x11, 0x32, 0xa2, 0x42, 0x19, 0x97, 0x78, + 0xff, 0x97, 0x01, 0xae, 0x5e, 0x55, 0x13, 0x21, 0x04, 0x66, 0x4c, 0x22, 0x6e, 0x19, 0x3d, 0x63, + 0xd0, 0x9e, 0xeb, 0x37, 0xec, 0x00, 0x40, 0xc5, 0x2a, 0x53, 0x3c, 0xf5, 0x42, 0x66, 0x9d, 0xe9, + 0xa4, 0x5d, 0xfd, 0xcc, 0x18, 0xbc, 0x0b, 0x5a, 0xf2, 0x6b, 0xcc, 0x53, 0xeb, 0x5c, 0x27, 0x65, + 0x51, 0x40, 0x82, 0x7a, 0x54, 0x0a, 0x41, 0x14, 0xb7, 0xcc, 0x12, 0x12, 0x74, 0x5a, 0x7e, 0xc0, + 0xfb, 0xe0, 0xaa, 0x88, 0xd5, 0x26, 0xe1, 0x56, 0x4b, 0x87, 0x97, 0x82, 0x4e, 0x8b, 0x12, 0xce, + 0x00, 0xe0, 0x6b, 0xc5, 0xe3, 0x2c, 0x94, 0x71, 0x66, 0x5d, 0xf4, 0xce, 0x07, 0x37, 0x86, 0x0f, + 0xf1, 0xe9, 0x1d, 0xf1, 0xeb, 0x9a, 0x98, 0x37, 0xe0, 0xfe, 0x04, 0xb4, 0xff, 0x06, 0xff, 0x5c, + 0xcd, 0x02, 0x97, 0x39, 0x4f, 0x8b, 0xb8, 0xda, 0xab, 0x2e, 0xfb, 0x3f, 0xce, 0xc0, 0xcd, 0xda, + 0xca, 0xfb, 0x84, 0x53, 0x38, 0x6c, 0xe2, 0x6e, 0xf7, 0xf7, 0xce, 0x31, 0xf6, 0x3b, 0xe7, 0xf6, + 0x27, 0x82, 0xbe, 0xbd, 0x44, 0x8b, 0x27, 0x68, 0xe2, 0xa1, 0xe5, 0xa3, 0xef, 0x3f, 0x1d, 0xf3, + 0xf9, 0x8b, 0xf1, 0xa8, 0x1a, 0x3f, 0xaa, 0xd5, 0xe8, 0xe1, 0x6e, 0xa7, 0x82, 0x6e, 0x35, 0xa0, + 0x06, 0x53, 0x99, 0x1b, 0x1f, 0x99, 0xd3, 0x52, 0xdd, 0x7b, 0xfb, 0x9d, 0x73, 0x67, 0x5b, 0x61, + 0xde, 0xf2, 0x31, 0xfe, 0xf8, 0xe1, 0x0d, 0x7a, 0xba, 0x9d, 0x36, 0x95, 0x0e, 0x1b, 0x4a, 0xcd, + 0xd3, 0xd4, 0x7f, 0x5c, 0xb7, 0xae, 0xe1, 0xda, 0x7d, 0xb7, 0x78, 0x1b, 0x84, 0xea, 0xf3, 0xca, + 0xc7, 0x54, 0x46, 0x76, 0x39, 0x02, 0x95, 0x27, 0x17, 0x48, 0x14, 0xf0, 0x58, 0x5f, 0x9b, 0x7d, + 0xfa, 0xb4, 0x9f, 0x1d, 0x2a, 0xff, 0x42, 0x03, 0xa3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x01, + 0x69, 0x06, 0xe9, 0x0e, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/database_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/database_service.pb.go new file mode 100644 index 000000000..75fba98f0 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/database_service.pb.go @@ -0,0 +1,792 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/database_service.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetDatabaseRequest struct { + // ID of the PostgreSQL cluster that the database belongs to. + // To get the cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the PostgreSQL Database resource to return. + // To get the name of the database use a [DatabaseService.List] request. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDatabaseRequest) Reset() { *m = GetDatabaseRequest{} } +func (m *GetDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*GetDatabaseRequest) ProtoMessage() {} +func (*GetDatabaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_0cedf08b74fe82ed, []int{0} +} +func (m *GetDatabaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDatabaseRequest.Unmarshal(m, b) +} +func (m *GetDatabaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDatabaseRequest.Marshal(b, m, deterministic) +} +func (dst *GetDatabaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDatabaseRequest.Merge(dst, src) +} +func (m *GetDatabaseRequest) XXX_Size() int { + return xxx_messageInfo_GetDatabaseRequest.Size(m) +} +func (m *GetDatabaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetDatabaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDatabaseRequest proto.InternalMessageInfo + +func (m *GetDatabaseRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GetDatabaseRequest) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type ListDatabasesRequest struct { + // ID of the PostgreSQL cluster to list databases in. + // To get the cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, Set [page_token] to the [ListDatabasesResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListDatabasesRequest) Reset() { *m = ListDatabasesRequest{} } +func (m *ListDatabasesRequest) String() string { return proto.CompactTextString(m) } +func (*ListDatabasesRequest) ProtoMessage() {} +func (*ListDatabasesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_0cedf08b74fe82ed, []int{1} +} +func (m *ListDatabasesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListDatabasesRequest.Unmarshal(m, b) +} +func (m *ListDatabasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListDatabasesRequest.Marshal(b, m, deterministic) +} +func (dst *ListDatabasesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListDatabasesRequest.Merge(dst, src) +} +func (m *ListDatabasesRequest) XXX_Size() int { + return xxx_messageInfo_ListDatabasesRequest.Size(m) +} +func (m *ListDatabasesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListDatabasesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListDatabasesRequest proto.InternalMessageInfo + +func (m *ListDatabasesRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListDatabasesRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListDatabasesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListDatabasesResponse struct { + // List of PostgreSQL Database resources. + Databases []*Database `protobuf:"bytes,1,rep,name=databases,proto3" json:"databases,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListDatabasesRequest.page_size], use the [next_page_token] as the value + // for the [ListDatabasesRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListDatabasesResponse) Reset() { *m = ListDatabasesResponse{} } +func (m *ListDatabasesResponse) String() string { return proto.CompactTextString(m) } +func (*ListDatabasesResponse) ProtoMessage() {} +func (*ListDatabasesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_0cedf08b74fe82ed, []int{2} +} +func (m *ListDatabasesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListDatabasesResponse.Unmarshal(m, b) +} +func (m *ListDatabasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListDatabasesResponse.Marshal(b, m, deterministic) +} +func (dst *ListDatabasesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListDatabasesResponse.Merge(dst, src) +} +func (m *ListDatabasesResponse) XXX_Size() int { + return xxx_messageInfo_ListDatabasesResponse.Size(m) +} +func (m *ListDatabasesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListDatabasesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListDatabasesResponse proto.InternalMessageInfo + +func (m *ListDatabasesResponse) GetDatabases() []*Database { + if m != nil { + return m.Databases + } + return nil +} + +func (m *ListDatabasesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateDatabaseRequest struct { + // Required. ID of the PostgreSQL cluster to create a database in. + // To get the cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Required. Configuration of the database to create. + DatabaseSpec *DatabaseSpec `protobuf:"bytes,2,opt,name=database_spec,json=databaseSpec,proto3" json:"database_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateDatabaseRequest) Reset() { *m = CreateDatabaseRequest{} } +func (m *CreateDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*CreateDatabaseRequest) ProtoMessage() {} +func (*CreateDatabaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_0cedf08b74fe82ed, []int{3} +} +func (m *CreateDatabaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateDatabaseRequest.Unmarshal(m, b) +} +func (m *CreateDatabaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateDatabaseRequest.Marshal(b, m, deterministic) +} +func (dst *CreateDatabaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateDatabaseRequest.Merge(dst, src) +} +func (m *CreateDatabaseRequest) XXX_Size() int { + return xxx_messageInfo_CreateDatabaseRequest.Size(m) +} +func (m *CreateDatabaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateDatabaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateDatabaseRequest proto.InternalMessageInfo + +func (m *CreateDatabaseRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateDatabaseRequest) GetDatabaseSpec() *DatabaseSpec { + if m != nil { + return m.DatabaseSpec + } + return nil +} + +type CreateDatabaseMetadata struct { + // ID of the PostgreSQL cluster where a database is being created. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the PostgreSQL database that is being created. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateDatabaseMetadata) Reset() { *m = CreateDatabaseMetadata{} } +func (m *CreateDatabaseMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateDatabaseMetadata) ProtoMessage() {} +func (*CreateDatabaseMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_0cedf08b74fe82ed, []int{4} +} +func (m *CreateDatabaseMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateDatabaseMetadata.Unmarshal(m, b) +} +func (m *CreateDatabaseMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateDatabaseMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateDatabaseMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateDatabaseMetadata.Merge(dst, src) +} +func (m *CreateDatabaseMetadata) XXX_Size() int { + return xxx_messageInfo_CreateDatabaseMetadata.Size(m) +} +func (m *CreateDatabaseMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateDatabaseMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateDatabaseMetadata proto.InternalMessageInfo + +func (m *CreateDatabaseMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateDatabaseMetadata) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type UpdateDatabaseRequest struct { + // Required. ID of the PostgreSQL cluster to update a database in. + // To get the cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Required. Name of the database to update. + // To get the name of the database use a [DatabaseService.List] request. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + // Field mask that specifies which fields of the Database resource should be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // PostgreSQL extensions that should be enabled for the database. + // + // If the field is sent, the list of enabled extensions is rewritten entirely. + // Therefore, to disable an active extension you should simply send the list omitting this extension. + Extensions []*Extension `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateDatabaseRequest) Reset() { *m = UpdateDatabaseRequest{} } +func (m *UpdateDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateDatabaseRequest) ProtoMessage() {} +func (*UpdateDatabaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_0cedf08b74fe82ed, []int{5} +} +func (m *UpdateDatabaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateDatabaseRequest.Unmarshal(m, b) +} +func (m *UpdateDatabaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateDatabaseRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateDatabaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateDatabaseRequest.Merge(dst, src) +} +func (m *UpdateDatabaseRequest) XXX_Size() int { + return xxx_messageInfo_UpdateDatabaseRequest.Size(m) +} +func (m *UpdateDatabaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateDatabaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateDatabaseRequest proto.InternalMessageInfo + +func (m *UpdateDatabaseRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateDatabaseRequest) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +func (m *UpdateDatabaseRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateDatabaseRequest) GetExtensions() []*Extension { + if m != nil { + return m.Extensions + } + return nil +} + +type UpdateDatabaseMetadata struct { + // ID of the PostgreSQL cluster where a database is being updated. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the PostgreSQL database that is being updated. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateDatabaseMetadata) Reset() { *m = UpdateDatabaseMetadata{} } +func (m *UpdateDatabaseMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateDatabaseMetadata) ProtoMessage() {} +func (*UpdateDatabaseMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_0cedf08b74fe82ed, []int{6} +} +func (m *UpdateDatabaseMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateDatabaseMetadata.Unmarshal(m, b) +} +func (m *UpdateDatabaseMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateDatabaseMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateDatabaseMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateDatabaseMetadata.Merge(dst, src) +} +func (m *UpdateDatabaseMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateDatabaseMetadata.Size(m) +} +func (m *UpdateDatabaseMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateDatabaseMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateDatabaseMetadata proto.InternalMessageInfo + +func (m *UpdateDatabaseMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateDatabaseMetadata) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type DeleteDatabaseRequest struct { + // Required. ID of the PostgreSQL cluster to delete a database in. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Required. Name of the database to delete. + // To get the name of the database, use a [DatabaseService.List] request. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteDatabaseRequest) Reset() { *m = DeleteDatabaseRequest{} } +func (m *DeleteDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteDatabaseRequest) ProtoMessage() {} +func (*DeleteDatabaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_0cedf08b74fe82ed, []int{7} +} +func (m *DeleteDatabaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteDatabaseRequest.Unmarshal(m, b) +} +func (m *DeleteDatabaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteDatabaseRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteDatabaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteDatabaseRequest.Merge(dst, src) +} +func (m *DeleteDatabaseRequest) XXX_Size() int { + return xxx_messageInfo_DeleteDatabaseRequest.Size(m) +} +func (m *DeleteDatabaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteDatabaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteDatabaseRequest proto.InternalMessageInfo + +func (m *DeleteDatabaseRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteDatabaseRequest) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type DeleteDatabaseMetadata struct { + // ID of the PostgreSQL cluster where a database is being deleted. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the PostgreSQL database that is being deleted. + DatabaseName string `protobuf:"bytes,2,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteDatabaseMetadata) Reset() { *m = DeleteDatabaseMetadata{} } +func (m *DeleteDatabaseMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteDatabaseMetadata) ProtoMessage() {} +func (*DeleteDatabaseMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_database_service_0cedf08b74fe82ed, []int{8} +} +func (m *DeleteDatabaseMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteDatabaseMetadata.Unmarshal(m, b) +} +func (m *DeleteDatabaseMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteDatabaseMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteDatabaseMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteDatabaseMetadata.Merge(dst, src) +} +func (m *DeleteDatabaseMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteDatabaseMetadata.Size(m) +} +func (m *DeleteDatabaseMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteDatabaseMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteDatabaseMetadata proto.InternalMessageInfo + +func (m *DeleteDatabaseMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteDatabaseMetadata) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +func init() { + proto.RegisterType((*GetDatabaseRequest)(nil), "yandex.cloud.mdb.postgresql.v1.GetDatabaseRequest") + proto.RegisterType((*ListDatabasesRequest)(nil), "yandex.cloud.mdb.postgresql.v1.ListDatabasesRequest") + proto.RegisterType((*ListDatabasesResponse)(nil), "yandex.cloud.mdb.postgresql.v1.ListDatabasesResponse") + proto.RegisterType((*CreateDatabaseRequest)(nil), "yandex.cloud.mdb.postgresql.v1.CreateDatabaseRequest") + proto.RegisterType((*CreateDatabaseMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.CreateDatabaseMetadata") + proto.RegisterType((*UpdateDatabaseRequest)(nil), "yandex.cloud.mdb.postgresql.v1.UpdateDatabaseRequest") + proto.RegisterType((*UpdateDatabaseMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.UpdateDatabaseMetadata") + proto.RegisterType((*DeleteDatabaseRequest)(nil), "yandex.cloud.mdb.postgresql.v1.DeleteDatabaseRequest") + proto.RegisterType((*DeleteDatabaseMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.DeleteDatabaseMetadata") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// DatabaseServiceClient is the client API for DatabaseService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type DatabaseServiceClient interface { + // Returns the specified PostgreSQL Database resource. + // + // To get the list of available PostgreSQL Database resources, make a [List] request. + Get(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*Database, error) + // Retrieves the list of PostgreSQL Database resources in the specified cluster. + List(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error) + // Creates a new PostgreSQL database in the specified cluster. + Create(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified PostgreSQL database. + Update(ctx context.Context, in *UpdateDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified PostgreSQL database. + Delete(ctx context.Context, in *DeleteDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) +} + +type databaseServiceClient struct { + cc *grpc.ClientConn +} + +func NewDatabaseServiceClient(cc *grpc.ClientConn) DatabaseServiceClient { + return &databaseServiceClient{cc} +} + +func (c *databaseServiceClient) Get(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*Database, error) { + out := new(Database) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.DatabaseService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseServiceClient) List(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error) { + out := new(ListDatabasesResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.DatabaseService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseServiceClient) Create(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.DatabaseService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseServiceClient) Update(ctx context.Context, in *UpdateDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.DatabaseService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseServiceClient) Delete(ctx context.Context, in *DeleteDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.DatabaseService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DatabaseServiceServer is the server API for DatabaseService service. +type DatabaseServiceServer interface { + // Returns the specified PostgreSQL Database resource. + // + // To get the list of available PostgreSQL Database resources, make a [List] request. + Get(context.Context, *GetDatabaseRequest) (*Database, error) + // Retrieves the list of PostgreSQL Database resources in the specified cluster. + List(context.Context, *ListDatabasesRequest) (*ListDatabasesResponse, error) + // Creates a new PostgreSQL database in the specified cluster. + Create(context.Context, *CreateDatabaseRequest) (*operation.Operation, error) + // Updates the specified PostgreSQL database. + Update(context.Context, *UpdateDatabaseRequest) (*operation.Operation, error) + // Deletes the specified PostgreSQL database. + Delete(context.Context, *DeleteDatabaseRequest) (*operation.Operation, error) +} + +func RegisterDatabaseServiceServer(s *grpc.Server, srv DatabaseServiceServer) { + s.RegisterService(&_DatabaseService_serviceDesc, srv) +} + +func _DatabaseService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.DatabaseService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServiceServer).Get(ctx, req.(*GetDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDatabasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.DatabaseService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServiceServer).List(ctx, req.(*ListDatabasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.DatabaseService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServiceServer).Create(ctx, req.(*CreateDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.DatabaseService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServiceServer).Update(ctx, req.(*UpdateDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.DatabaseService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServiceServer).Delete(ctx, req.(*DeleteDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DatabaseService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.postgresql.v1.DatabaseService", + HandlerType: (*DatabaseServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _DatabaseService_Get_Handler, + }, + { + MethodName: "List", + Handler: _DatabaseService_List_Handler, + }, + { + MethodName: "Create", + Handler: _DatabaseService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _DatabaseService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _DatabaseService_Delete_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/postgresql/v1/database_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/database_service.proto", fileDescriptor_database_service_0cedf08b74fe82ed) +} + +var fileDescriptor_database_service_0cedf08b74fe82ed = []byte{ + // 818 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0x4d, 0x4f, 0x1b, 0x47, + 0x18, 0xd6, 0x62, 0xd7, 0xc2, 0x63, 0x28, 0xd2, 0xa8, 0xae, 0x2c, 0xab, 0x20, 0xba, 0x95, 0xa8, + 0xeb, 0x76, 0x77, 0xbd, 0xa6, 0xa0, 0xb6, 0x40, 0xa5, 0x9a, 0xaf, 0xa2, 0x04, 0x88, 0x96, 0x44, + 0x91, 0x48, 0x22, 0x6b, 0xec, 0x1d, 0x36, 0x2b, 0xbc, 0x1f, 0x78, 0xc6, 0x0e, 0x1f, 0xe2, 0x90, + 0x1c, 0x12, 0x85, 0x5b, 0x12, 0x29, 0xb7, 0xfc, 0x09, 0xf2, 0x23, 0x40, 0xca, 0x8d, 0x1c, 0x73, + 0x8d, 0xa2, 0x9c, 0x73, 0xcc, 0x29, 0x9a, 0x19, 0x7f, 0x2d, 0x18, 0xec, 0x80, 0x0f, 0xb9, 0xed, + 0xce, 0xfb, 0x3e, 0xb3, 0xcf, 0xf3, 0xce, 0xfb, 0x3e, 0x3b, 0x60, 0x62, 0x07, 0xb9, 0x26, 0xde, + 0xd6, 0x8a, 0x25, 0xaf, 0x62, 0x6a, 0x8e, 0x59, 0xd0, 0x7c, 0x8f, 0x50, 0xab, 0x8c, 0xc9, 0x56, + 0x49, 0xab, 0xea, 0x9a, 0x89, 0x28, 0x2a, 0x20, 0x82, 0xf3, 0x04, 0x97, 0xab, 0x76, 0x11, 0xab, + 0x7e, 0xd9, 0xa3, 0x1e, 0x1c, 0x11, 0x30, 0x95, 0xc3, 0x54, 0xc7, 0x2c, 0xa8, 0x4d, 0x98, 0x5a, + 0xd5, 0x93, 0x3f, 0x59, 0x9e, 0x67, 0x95, 0xb0, 0x86, 0x7c, 0x5b, 0x43, 0xae, 0xeb, 0x51, 0x44, + 0x6d, 0xcf, 0x25, 0x02, 0x9d, 0x1c, 0xad, 0x45, 0xf9, 0x5b, 0xa1, 0xb2, 0xa1, 0x6d, 0xd8, 0xb8, + 0x64, 0xe6, 0x1d, 0x44, 0x36, 0x6b, 0x19, 0xc9, 0x1a, 0x2d, 0x86, 0xf7, 0x7c, 0x5c, 0xe6, 0xf0, + 0x5a, 0x6c, 0x38, 0x40, 0xb9, 0x8a, 0x4a, 0xb6, 0xd9, 0x1a, 0x1e, 0x0b, 0x84, 0x1b, 0xe0, 0x33, + 0xdb, 0x28, 0x5d, 0x2a, 0x17, 0xe9, 0xf2, 0x63, 0x09, 0xc0, 0x45, 0x4c, 0xe7, 0x6a, 0xab, 0x06, + 0xde, 0xaa, 0x60, 0x42, 0xe1, 0xef, 0x00, 0x14, 0x4b, 0x15, 0x42, 0x71, 0x39, 0x6f, 0x9b, 0x09, + 0x69, 0x54, 0x4a, 0x45, 0x73, 0x03, 0x1f, 0x8f, 0x74, 0xe9, 0xe0, 0x58, 0x0f, 0x4f, 0xcf, 0x4c, + 0x64, 0x8c, 0x68, 0x2d, 0xbe, 0x64, 0xc2, 0x59, 0x30, 0xd8, 0xa8, 0xa7, 0x8b, 0x1c, 0x9c, 0xe8, + 0xe3, 0xf9, 0x23, 0x2c, 0xff, 0xd3, 0x91, 0xfe, 0xfd, 0x1d, 0xa4, 0xec, 0xfe, 0xa7, 0xac, 0x67, + 0x94, 0xbf, 0xf3, 0xca, 0xbd, 0xb4, 0xd8, 0x61, 0x72, 0xdc, 0x18, 0xa8, 0x83, 0x56, 0x90, 0x83, + 0xe5, 0x97, 0x12, 0xf8, 0xe1, 0xba, 0x4d, 0x1a, 0x4c, 0xc8, 0xa5, 0xa8, 0xfc, 0x0a, 0xa2, 0x3e, + 0xb2, 0x70, 0x9e, 0xd8, 0xbb, 0x82, 0x46, 0x28, 0x07, 0x3e, 0x1f, 0xe9, 0x91, 0xe9, 0x19, 0x3d, + 0x93, 0xc9, 0x18, 0xfd, 0x2c, 0xb8, 0x66, 0xef, 0x62, 0x98, 0x02, 0x80, 0x27, 0x52, 0x6f, 0x13, + 0xbb, 0x89, 0x10, 0xdf, 0x35, 0x7a, 0x70, 0xac, 0x7f, 0xc7, 0x33, 0x0d, 0xbe, 0xcb, 0x4d, 0x16, + 0x93, 0x9f, 0x48, 0x20, 0x7e, 0x8a, 0x18, 0xf1, 0x3d, 0x97, 0x60, 0xb8, 0x00, 0xa2, 0x75, 0x09, + 0x24, 0x21, 0x8d, 0x86, 0x52, 0xb1, 0x6c, 0x4a, 0xbd, 0xb8, 0x83, 0xd4, 0x46, 0xa1, 0x9b, 0x50, + 0x38, 0x06, 0x86, 0x5c, 0xbc, 0x4d, 0xf3, 0x2d, 0x84, 0x78, 0x05, 0x8d, 0x41, 0xb6, 0x7c, 0xa3, + 0xc1, 0xe4, 0x95, 0x04, 0xe2, 0xb3, 0x65, 0x8c, 0x28, 0xbe, 0xd2, 0x71, 0xdd, 0x6e, 0x39, 0x2e, + 0xe2, 0xe3, 0x22, 0xff, 0x58, 0x2c, 0xfb, 0x47, 0xb7, 0xd4, 0xd7, 0x7c, 0x5c, 0xcc, 0x85, 0xd9, + 0xee, 0xcd, 0x23, 0x64, 0x6b, 0xf2, 0x5d, 0xf0, 0x63, 0x90, 0xde, 0x32, 0xa6, 0x88, 0x65, 0xc0, + 0xe1, 0xb3, 0xfc, 0x5a, 0x19, 0xfd, 0xd2, 0xb6, 0x81, 0x4e, 0x35, 0xc8, 0xb3, 0x3e, 0x10, 0xbf, + 0xe5, 0x9b, 0x57, 0x55, 0xdf, 0x8b, 0x66, 0x85, 0x53, 0x20, 0x56, 0xe1, 0x54, 0xf8, 0x70, 0xf3, + 0xf6, 0x89, 0x65, 0x93, 0xaa, 0x98, 0x7f, 0xb5, 0x3e, 0xff, 0xea, 0x02, 0x9b, 0xff, 0x65, 0x44, + 0x36, 0x0d, 0x20, 0xd2, 0xd9, 0x33, 0x5c, 0x02, 0x00, 0x6f, 0x53, 0xec, 0x12, 0x66, 0x1d, 0x89, + 0x30, 0xef, 0x9b, 0xdf, 0x3a, 0x15, 0x7f, 0xbe, 0x8e, 0x30, 0x5a, 0xc0, 0xac, 0xe2, 0xc1, 0x92, + 0xf4, 0xb4, 0xe2, 0x4f, 0x25, 0x10, 0x9f, 0xc3, 0x25, 0xfc, 0x0d, 0x54, 0x9c, 0x29, 0x0d, 0x52, + 0xe9, 0xa5, 0xd2, 0xec, 0xf3, 0x7e, 0x30, 0xd4, 0x68, 0x6f, 0xf1, 0x47, 0x80, 0xaf, 0x25, 0x10, + 0x5a, 0xc4, 0x14, 0x66, 0x3b, 0x1d, 0xcd, 0x59, 0xfb, 0x4c, 0x76, 0x6d, 0x03, 0xf2, 0xca, 0xa3, + 0xb7, 0xef, 0x5f, 0xf4, 0xfd, 0x0f, 0x17, 0x34, 0x07, 0xb9, 0xc8, 0xc2, 0xa6, 0x12, 0xb4, 0xeb, + 0x9a, 0x10, 0xa2, 0xed, 0x35, 0x45, 0xee, 0x37, 0x4c, 0x9c, 0x68, 0x7b, 0x01, 0x71, 0xfb, 0x8c, + 0x75, 0x98, 0xb9, 0x15, 0xfc, 0xb3, 0x13, 0x85, 0x76, 0x66, 0x9b, 0x9c, 0xf8, 0x4a, 0x94, 0x70, + 0x42, 0xf9, 0x5f, 0xae, 0xe2, 0x2f, 0x38, 0x79, 0x39, 0x15, 0xf0, 0x8d, 0x04, 0x22, 0xc2, 0x3a, + 0x60, 0x47, 0x06, 0x6d, 0x1d, 0x30, 0xf9, 0x73, 0x10, 0xd6, 0xfc, 0x2b, 0xae, 0xd6, 0x9f, 0x64, + 0xeb, 0xf0, 0x24, 0x2d, 0x9f, 0x6b, 0x51, 0xfd, 0xf5, 0x15, 0x2e, 0x65, 0x4a, 0xbe, 0xa4, 0x94, + 0x7f, 0xa4, 0x34, 0x7c, 0x27, 0x81, 0x88, 0x18, 0xcb, 0xce, 0x6a, 0xda, 0x3a, 0x5a, 0x37, 0x6a, + 0x1e, 0x08, 0x35, 0xe7, 0x8c, 0x7f, 0x50, 0xcd, 0xb5, 0x6c, 0x8f, 0xda, 0x8b, 0xa9, 0xfb, 0x20, + 0x81, 0x88, 0x18, 0xc5, 0xce, 0xea, 0xda, 0xba, 0x47, 0x37, 0xea, 0x1e, 0x4a, 0x87, 0x27, 0x69, + 0xed, 0xdc, 0x99, 0x8f, 0x9f, 0x76, 0xda, 0x79, 0xc7, 0xa7, 0x3b, 0x62, 0x94, 0xd2, 0x3d, 0xd2, + 0x9a, 0x5b, 0x5d, 0x5f, 0xb6, 0x6c, 0x7a, 0xbf, 0x52, 0x50, 0x8b, 0x9e, 0xa3, 0x09, 0xca, 0x8a, + 0xb8, 0x56, 0x59, 0x9e, 0x62, 0x61, 0x97, 0x7f, 0x5d, 0xbb, 0xf8, 0xbe, 0x35, 0xd5, 0x7c, 0x2b, + 0x44, 0x38, 0x60, 0xfc, 0x4b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4c, 0xef, 0xdf, 0x75, 0x9d, 0x0a, + 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/resource_preset.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/resource_preset.pb.go new file mode 100644 index 000000000..8a0df6d95 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/resource_preset.pb.go @@ -0,0 +1,112 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/resource_preset.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A ResourcePreset resource for describing hardware configuration presets. +type ResourcePreset struct { + // ID of the ResourcePreset resource. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // IDs of availability zones where the resource preset is available. + ZoneIds []string `protobuf:"bytes,2,rep,name=zone_ids,json=zoneIds,proto3" json:"zone_ids,omitempty"` + // Number of CPU cores for a PostgreSQL host created with the preset. + Cores int64 `protobuf:"varint,3,opt,name=cores,proto3" json:"cores,omitempty"` + // RAM volume for a PostgreSQL host created with the preset, in bytes. + Memory int64 `protobuf:"varint,4,opt,name=memory,proto3" json:"memory,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourcePreset) Reset() { *m = ResourcePreset{} } +func (m *ResourcePreset) String() string { return proto.CompactTextString(m) } +func (*ResourcePreset) ProtoMessage() {} +func (*ResourcePreset) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_4cba9c3462d84462, []int{0} +} +func (m *ResourcePreset) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourcePreset.Unmarshal(m, b) +} +func (m *ResourcePreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourcePreset.Marshal(b, m, deterministic) +} +func (dst *ResourcePreset) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePreset.Merge(dst, src) +} +func (m *ResourcePreset) XXX_Size() int { + return xxx_messageInfo_ResourcePreset.Size(m) +} +func (m *ResourcePreset) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePreset.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePreset proto.InternalMessageInfo + +func (m *ResourcePreset) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ResourcePreset) GetZoneIds() []string { + if m != nil { + return m.ZoneIds + } + return nil +} + +func (m *ResourcePreset) GetCores() int64 { + if m != nil { + return m.Cores + } + return 0 +} + +func (m *ResourcePreset) GetMemory() int64 { + if m != nil { + return m.Memory + } + return 0 +} + +func init() { + proto.RegisterType((*ResourcePreset)(nil), "yandex.cloud.mdb.postgresql.v1.ResourcePreset") +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/resource_preset.proto", fileDescriptor_resource_preset_4cba9c3462d84462) +} + +var fileDescriptor_resource_preset_4cba9c3462d84462 = []byte{ + // 215 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0xcf, 0xb1, 0x4b, 0x03, 0x31, + 0x14, 0xc7, 0x71, 0xee, 0x4e, 0xab, 0xcd, 0xd0, 0x21, 0x88, 0xc4, 0x45, 0x0e, 0xa7, 0x5b, 0x9a, + 0x50, 0x74, 0x73, 0x73, 0x73, 0x10, 0x25, 0xa3, 0x4b, 0x31, 0x79, 0x8f, 0x18, 0x68, 0xee, 0x9d, + 0x49, 0xae, 0x58, 0xff, 0x7a, 0x31, 0x29, 0x74, 0x73, 0xfc, 0x3e, 0xf8, 0xc0, 0xfb, 0xb1, 0x87, + 0xc3, 0xc7, 0x08, 0xf8, 0xad, 0xec, 0x8e, 0x66, 0x50, 0x01, 0x8c, 0x9a, 0x28, 0x65, 0x17, 0x31, + 0x7d, 0xed, 0xd4, 0x7e, 0xa3, 0x22, 0x26, 0x9a, 0xa3, 0xc5, 0xed, 0x14, 0x31, 0x61, 0x96, 0x53, + 0xa4, 0x4c, 0xfc, 0xb6, 0x2a, 0x59, 0x94, 0x0c, 0x60, 0xe4, 0x49, 0xc9, 0xfd, 0xe6, 0xce, 0xb3, + 0x95, 0x3e, 0xc2, 0xb7, 0xe2, 0xf8, 0x8a, 0xb5, 0x1e, 0x44, 0xd3, 0x37, 0xc3, 0x52, 0xb7, 0x1e, + 0xf8, 0x0d, 0xbb, 0xfc, 0xa1, 0x11, 0xb7, 0x1e, 0x92, 0x68, 0xfb, 0x6e, 0x58, 0xea, 0x8b, 0xbf, + 0x7e, 0x86, 0xc4, 0xaf, 0xd8, 0xb9, 0xa5, 0x88, 0x49, 0x74, 0x7d, 0x33, 0x74, 0xba, 0x06, 0xbf, + 0x66, 0x8b, 0x80, 0x81, 0xe2, 0x41, 0x9c, 0x95, 0xf3, 0xb1, 0x9e, 0x5e, 0xdf, 0x5f, 0x9c, 0xcf, + 0x9f, 0xb3, 0x91, 0x96, 0x82, 0xaa, 0x7f, 0xad, 0xeb, 0x1a, 0x47, 0x6b, 0x87, 0x63, 0xf9, 0x58, + 0xfd, 0x3f, 0xf3, 0xf1, 0x54, 0x66, 0x51, 0xc0, 0xfd, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6f, + 0x56, 0x82, 0xa9, 0x1a, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/resource_preset_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/resource_preset_service.pb.go new file mode 100644 index 000000000..f93364bbe --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/resource_preset_service.pb.go @@ -0,0 +1,324 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/resource_preset_service.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetResourcePresetRequest struct { + // Required. ID of the resource preset to return. + // To get the resource preset ID, use a [ResourcePresetService.List] request. + ResourcePresetId string `protobuf:"bytes,1,opt,name=resource_preset_id,json=resourcePresetId,proto3" json:"resource_preset_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResourcePresetRequest) Reset() { *m = GetResourcePresetRequest{} } +func (m *GetResourcePresetRequest) String() string { return proto.CompactTextString(m) } +func (*GetResourcePresetRequest) ProtoMessage() {} +func (*GetResourcePresetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_service_5dc2866710ef140a, []int{0} +} +func (m *GetResourcePresetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResourcePresetRequest.Unmarshal(m, b) +} +func (m *GetResourcePresetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResourcePresetRequest.Marshal(b, m, deterministic) +} +func (dst *GetResourcePresetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResourcePresetRequest.Merge(dst, src) +} +func (m *GetResourcePresetRequest) XXX_Size() int { + return xxx_messageInfo_GetResourcePresetRequest.Size(m) +} +func (m *GetResourcePresetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetResourcePresetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResourcePresetRequest proto.InternalMessageInfo + +func (m *GetResourcePresetRequest) GetResourcePresetId() string { + if m != nil { + return m.ResourcePresetId + } + return "" +} + +type ListResourcePresetsRequest struct { + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListResourcePresetsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListResourcePresetsRequest) Reset() { *m = ListResourcePresetsRequest{} } +func (m *ListResourcePresetsRequest) String() string { return proto.CompactTextString(m) } +func (*ListResourcePresetsRequest) ProtoMessage() {} +func (*ListResourcePresetsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_service_5dc2866710ef140a, []int{1} +} +func (m *ListResourcePresetsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListResourcePresetsRequest.Unmarshal(m, b) +} +func (m *ListResourcePresetsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListResourcePresetsRequest.Marshal(b, m, deterministic) +} +func (dst *ListResourcePresetsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListResourcePresetsRequest.Merge(dst, src) +} +func (m *ListResourcePresetsRequest) XXX_Size() int { + return xxx_messageInfo_ListResourcePresetsRequest.Size(m) +} +func (m *ListResourcePresetsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListResourcePresetsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListResourcePresetsRequest proto.InternalMessageInfo + +func (m *ListResourcePresetsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListResourcePresetsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListResourcePresetsResponse struct { + // List of ResourcePreset resources. + ResourcePresets []*ResourcePreset `protobuf:"bytes,1,rep,name=resource_presets,json=resourcePresets,proto3" json:"resource_presets,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value + // for the [ListResourcePresetsRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListResourcePresetsResponse) Reset() { *m = ListResourcePresetsResponse{} } +func (m *ListResourcePresetsResponse) String() string { return proto.CompactTextString(m) } +func (*ListResourcePresetsResponse) ProtoMessage() {} +func (*ListResourcePresetsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_service_5dc2866710ef140a, []int{2} +} +func (m *ListResourcePresetsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListResourcePresetsResponse.Unmarshal(m, b) +} +func (m *ListResourcePresetsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListResourcePresetsResponse.Marshal(b, m, deterministic) +} +func (dst *ListResourcePresetsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListResourcePresetsResponse.Merge(dst, src) +} +func (m *ListResourcePresetsResponse) XXX_Size() int { + return xxx_messageInfo_ListResourcePresetsResponse.Size(m) +} +func (m *ListResourcePresetsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListResourcePresetsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListResourcePresetsResponse proto.InternalMessageInfo + +func (m *ListResourcePresetsResponse) GetResourcePresets() []*ResourcePreset { + if m != nil { + return m.ResourcePresets + } + return nil +} + +func (m *ListResourcePresetsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetResourcePresetRequest)(nil), "yandex.cloud.mdb.postgresql.v1.GetResourcePresetRequest") + proto.RegisterType((*ListResourcePresetsRequest)(nil), "yandex.cloud.mdb.postgresql.v1.ListResourcePresetsRequest") + proto.RegisterType((*ListResourcePresetsResponse)(nil), "yandex.cloud.mdb.postgresql.v1.ListResourcePresetsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ResourcePresetServiceClient is the client API for ResourcePresetService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ResourcePresetServiceClient interface { + // Returns the specified ResourcePreset resource. + // + // To get the list of available ResourcePreset resources, make a [List] request. + Get(ctx context.Context, in *GetResourcePresetRequest, opts ...grpc.CallOption) (*ResourcePreset, error) + // Retrieves the list of available ResourcePreset resources. + List(ctx context.Context, in *ListResourcePresetsRequest, opts ...grpc.CallOption) (*ListResourcePresetsResponse, error) +} + +type resourcePresetServiceClient struct { + cc *grpc.ClientConn +} + +func NewResourcePresetServiceClient(cc *grpc.ClientConn) ResourcePresetServiceClient { + return &resourcePresetServiceClient{cc} +} + +func (c *resourcePresetServiceClient) Get(ctx context.Context, in *GetResourcePresetRequest, opts ...grpc.CallOption) (*ResourcePreset, error) { + out := new(ResourcePreset) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ResourcePresetService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourcePresetServiceClient) List(ctx context.Context, in *ListResourcePresetsRequest, opts ...grpc.CallOption) (*ListResourcePresetsResponse, error) { + out := new(ListResourcePresetsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.ResourcePresetService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ResourcePresetServiceServer is the server API for ResourcePresetService service. +type ResourcePresetServiceServer interface { + // Returns the specified ResourcePreset resource. + // + // To get the list of available ResourcePreset resources, make a [List] request. + Get(context.Context, *GetResourcePresetRequest) (*ResourcePreset, error) + // Retrieves the list of available ResourcePreset resources. + List(context.Context, *ListResourcePresetsRequest) (*ListResourcePresetsResponse, error) +} + +func RegisterResourcePresetServiceServer(s *grpc.Server, srv ResourcePresetServiceServer) { + s.RegisterService(&_ResourcePresetService_serviceDesc, srv) +} + +func _ResourcePresetService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetResourcePresetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePresetServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ResourcePresetService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePresetServiceServer).Get(ctx, req.(*GetResourcePresetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourcePresetService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListResourcePresetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePresetServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.ResourcePresetService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePresetServiceServer).List(ctx, req.(*ListResourcePresetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ResourcePresetService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.postgresql.v1.ResourcePresetService", + HandlerType: (*ResourcePresetServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _ResourcePresetService_Get_Handler, + }, + { + MethodName: "List", + Handler: _ResourcePresetService_List_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/postgresql/v1/resource_preset_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/resource_preset_service.proto", fileDescriptor_resource_preset_service_5dc2866710ef140a) +} + +var fileDescriptor_resource_preset_service_5dc2866710ef140a = []byte{ + // 459 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xcb, 0x6e, 0x13, 0x31, + 0x14, 0x95, 0x33, 0xa5, 0x22, 0x46, 0xa8, 0x95, 0x25, 0xa4, 0xd1, 0xf0, 0x50, 0x34, 0x0b, 0x98, + 0x4d, 0xec, 0x4c, 0x60, 0x81, 0x48, 0xbb, 0x09, 0x8b, 0x0a, 0x89, 0x47, 0x35, 0x65, 0x03, 0x9b, + 0xc8, 0x89, 0xaf, 0x8c, 0x45, 0x62, 0x4f, 0xc7, 0x4e, 0x54, 0x8a, 0x90, 0x10, 0x4b, 0xb6, 0x7c, + 0x06, 0x0b, 0x36, 0xfc, 0x43, 0xd9, 0xf3, 0x0b, 0x2c, 0xf8, 0x06, 0x56, 0x68, 0x3c, 0xa9, 0xca, + 0x0c, 0x7d, 0x10, 0x96, 0xd6, 0xb9, 0xe7, 0x9e, 0x73, 0x7c, 0xef, 0xc5, 0x5b, 0x6f, 0xb8, 0x16, + 0x70, 0xc0, 0x26, 0x53, 0x33, 0x17, 0x6c, 0x26, 0xc6, 0x2c, 0x37, 0xd6, 0xc9, 0x02, 0xec, 0xfe, + 0x94, 0x2d, 0x52, 0x56, 0x80, 0x35, 0xf3, 0x62, 0x02, 0xa3, 0xbc, 0x00, 0x0b, 0x6e, 0x64, 0xa1, + 0x58, 0xa8, 0x09, 0xd0, 0xbc, 0x30, 0xce, 0x90, 0x5b, 0x15, 0x9b, 0x7a, 0x36, 0x9d, 0x89, 0x31, + 0x3d, 0x61, 0xd3, 0x45, 0x1a, 0xdd, 0x90, 0xc6, 0xc8, 0x29, 0x30, 0x9e, 0x2b, 0xc6, 0xb5, 0x36, + 0x8e, 0x3b, 0x65, 0xb4, 0xad, 0xd8, 0xd1, 0xcd, 0x9a, 0xf6, 0x82, 0x4f, 0x95, 0xf0, 0xf8, 0x12, + 0xbe, 0xb7, 0x9a, 0xb5, 0x8a, 0x15, 0x3f, 0xc5, 0xe1, 0x0e, 0xb8, 0x6c, 0x89, 0xed, 0x7a, 0x28, + 0x83, 0xfd, 0x39, 0x58, 0x47, 0xfa, 0x98, 0x34, 0xf3, 0x28, 0x11, 0xa2, 0x0e, 0x4a, 0xda, 0xc3, + 0xb5, 0x9f, 0x47, 0x29, 0xca, 0x36, 0x8b, 0x1a, 0xf1, 0x91, 0x88, 0x0d, 0x8e, 0x1e, 0x2b, 0xdb, + 0x68, 0x68, 0x8f, 0x3b, 0xde, 0xc1, 0xed, 0x9c, 0x4b, 0x18, 0x59, 0x75, 0x08, 0x61, 0xab, 0x83, + 0x92, 0x60, 0x88, 0x7f, 0x1d, 0xa5, 0xeb, 0x5b, 0xdb, 0x69, 0xaf, 0xd7, 0xcb, 0x2e, 0x97, 0xe0, + 0x9e, 0x3a, 0x04, 0x92, 0x60, 0xec, 0x0b, 0x9d, 0x79, 0x0d, 0x3a, 0x0c, 0xbc, 0x64, 0xfb, 0xe3, + 0xb7, 0xf4, 0x92, 0xaf, 0xcc, 0x7c, 0x97, 0xe7, 0x25, 0x16, 0x7f, 0x46, 0xf8, 0xfa, 0xa9, 0x8a, + 0x36, 0x37, 0xda, 0x02, 0x79, 0x81, 0x37, 0x1b, 0x21, 0x6c, 0x88, 0x3a, 0x41, 0x72, 0xa5, 0x4f, + 0xe9, 0xf9, 0xe3, 0xa0, 0x8d, 0x5f, 0xd9, 0xa8, 0x87, 0xb5, 0x24, 0xc5, 0x1b, 0x1a, 0x0e, 0xdc, + 0xe8, 0x0f, 0xa7, 0xad, 0xa6, 0xd3, 0xab, 0x65, 0xc5, 0xee, 0xb1, 0xdb, 0xfe, 0xfb, 0x00, 0x5f, + 0xab, 0xb7, 0xdd, 0xab, 0x36, 0x84, 0x7c, 0x45, 0x38, 0xd8, 0x01, 0x47, 0xee, 0x5f, 0xe4, 0xea, + 0xac, 0x71, 0x45, 0x2b, 0xe6, 0x89, 0x1f, 0x7e, 0xf8, 0xfe, 0xe3, 0x53, 0x6b, 0x9b, 0x0c, 0xd8, + 0x8c, 0x6b, 0x2e, 0x41, 0x74, 0x4f, 0x5f, 0x98, 0x65, 0x5c, 0xf6, 0xf6, 0xef, 0x65, 0x78, 0x47, + 0xbe, 0x20, 0xbc, 0x56, 0x7e, 0x3f, 0x79, 0x70, 0x91, 0xfa, 0xd9, 0x6b, 0x11, 0x0d, 0xfe, 0x8b, + 0x5b, 0x0d, 0x38, 0xa6, 0x3e, 0x46, 0x42, 0x6e, 0xff, 0x5b, 0x8c, 0xe1, 0xb3, 0x97, 0x4f, 0xa4, + 0x72, 0xaf, 0xe6, 0x63, 0x3a, 0x31, 0x33, 0x56, 0x09, 0x77, 0xab, 0xa3, 0x91, 0xa6, 0x2b, 0x41, + 0xfb, 0xc3, 0x60, 0xe7, 0x5f, 0xd3, 0xe0, 0xe4, 0x35, 0x5e, 0xf7, 0x84, 0xbb, 0xbf, 0x03, 0x00, + 0x00, 0xff, 0xff, 0xb8, 0x37, 0xeb, 0x6f, 0x1c, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/user.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/user.pb.go new file mode 100644 index 000000000..aeb92acc2 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/user.pb.go @@ -0,0 +1,456 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/user.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type UserSettings_SynchronousCommit int32 + +const ( + UserSettings_SYNCHRONOUS_COMMIT_UNSPECIFIED UserSettings_SynchronousCommit = 0 + UserSettings_SYNCHRONOUS_COMMIT_ON UserSettings_SynchronousCommit = 1 + UserSettings_SYNCHRONOUS_COMMIT_OFF UserSettings_SynchronousCommit = 2 + UserSettings_SYNCHRONOUS_COMMIT_LOCAL UserSettings_SynchronousCommit = 3 + UserSettings_SYNCHRONOUS_COMMIT_REMOTE_WRITE UserSettings_SynchronousCommit = 4 + UserSettings_SYNCHRONOUS_COMMIT_REMOTE_APPLY UserSettings_SynchronousCommit = 5 +) + +var UserSettings_SynchronousCommit_name = map[int32]string{ + 0: "SYNCHRONOUS_COMMIT_UNSPECIFIED", + 1: "SYNCHRONOUS_COMMIT_ON", + 2: "SYNCHRONOUS_COMMIT_OFF", + 3: "SYNCHRONOUS_COMMIT_LOCAL", + 4: "SYNCHRONOUS_COMMIT_REMOTE_WRITE", + 5: "SYNCHRONOUS_COMMIT_REMOTE_APPLY", +} +var UserSettings_SynchronousCommit_value = map[string]int32{ + "SYNCHRONOUS_COMMIT_UNSPECIFIED": 0, + "SYNCHRONOUS_COMMIT_ON": 1, + "SYNCHRONOUS_COMMIT_OFF": 2, + "SYNCHRONOUS_COMMIT_LOCAL": 3, + "SYNCHRONOUS_COMMIT_REMOTE_WRITE": 4, + "SYNCHRONOUS_COMMIT_REMOTE_APPLY": 5, +} + +func (x UserSettings_SynchronousCommit) String() string { + return proto.EnumName(UserSettings_SynchronousCommit_name, int32(x)) +} +func (UserSettings_SynchronousCommit) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_user_86f81a23ffeb3ce2, []int{3, 0} +} + +type UserSettings_LogStatement int32 + +const ( + UserSettings_LOG_STATEMENT_UNSPECIFIED UserSettings_LogStatement = 0 + UserSettings_LOG_STATEMENT_NONE UserSettings_LogStatement = 1 + UserSettings_LOG_STATEMENT_DDL UserSettings_LogStatement = 2 + UserSettings_LOG_STATEMENT_MOD UserSettings_LogStatement = 3 + UserSettings_LOG_STATEMENT_ALL UserSettings_LogStatement = 4 +) + +var UserSettings_LogStatement_name = map[int32]string{ + 0: "LOG_STATEMENT_UNSPECIFIED", + 1: "LOG_STATEMENT_NONE", + 2: "LOG_STATEMENT_DDL", + 3: "LOG_STATEMENT_MOD", + 4: "LOG_STATEMENT_ALL", +} +var UserSettings_LogStatement_value = map[string]int32{ + "LOG_STATEMENT_UNSPECIFIED": 0, + "LOG_STATEMENT_NONE": 1, + "LOG_STATEMENT_DDL": 2, + "LOG_STATEMENT_MOD": 3, + "LOG_STATEMENT_ALL": 4, +} + +func (x UserSettings_LogStatement) String() string { + return proto.EnumName(UserSettings_LogStatement_name, int32(x)) +} +func (UserSettings_LogStatement) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_user_86f81a23ffeb3ce2, []int{3, 1} +} + +type UserSettings_TransactionIsolation int32 + +const ( + UserSettings_TRANSACTION_ISOLATION_UNSPECIFIED UserSettings_TransactionIsolation = 0 + UserSettings_TRANSACTION_ISOLATION_READ_UNCOMMITTED UserSettings_TransactionIsolation = 1 + UserSettings_TRANSACTION_ISOLATION_READ_COMMITTED UserSettings_TransactionIsolation = 2 + UserSettings_TRANSACTION_ISOLATION_REPEATABLE_READ UserSettings_TransactionIsolation = 3 + UserSettings_TRANSACTION_ISOLATION_SERIALIZABLE UserSettings_TransactionIsolation = 4 +) + +var UserSettings_TransactionIsolation_name = map[int32]string{ + 0: "TRANSACTION_ISOLATION_UNSPECIFIED", + 1: "TRANSACTION_ISOLATION_READ_UNCOMMITTED", + 2: "TRANSACTION_ISOLATION_READ_COMMITTED", + 3: "TRANSACTION_ISOLATION_REPEATABLE_READ", + 4: "TRANSACTION_ISOLATION_SERIALIZABLE", +} +var UserSettings_TransactionIsolation_value = map[string]int32{ + "TRANSACTION_ISOLATION_UNSPECIFIED": 0, + "TRANSACTION_ISOLATION_READ_UNCOMMITTED": 1, + "TRANSACTION_ISOLATION_READ_COMMITTED": 2, + "TRANSACTION_ISOLATION_REPEATABLE_READ": 3, + "TRANSACTION_ISOLATION_SERIALIZABLE": 4, +} + +func (x UserSettings_TransactionIsolation) String() string { + return proto.EnumName(UserSettings_TransactionIsolation_name, int32(x)) +} +func (UserSettings_TransactionIsolation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_user_86f81a23ffeb3ce2, []int{3, 2} +} + +// A PostgreSQL User resource. For more information, see +// the [Developer's Guide](/docs/managed-postgresql/concepts). +type User struct { + // Name of the PostgreSQL user. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // ID of the PostgreSQL cluster the user belongs to. + ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Set of permissions granted to the user. + Permissions []*Permission `protobuf:"bytes,3,rep,name=permissions,proto3" json:"permissions,omitempty"` + // Number of database connections available to the user. + ConnLimit int64 `protobuf:"varint,4,opt,name=conn_limit,json=connLimit,proto3" json:"conn_limit,omitempty"` + // Postgresql settings for this user + Settings *UserSettings `protobuf:"bytes,5,opt,name=settings,proto3" json:"settings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { + return fileDescriptor_user_86f81a23ffeb3ce2, []int{0} +} +func (m *User) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_User.Unmarshal(m, b) +} +func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_User.Marshal(b, m, deterministic) +} +func (dst *User) XXX_Merge(src proto.Message) { + xxx_messageInfo_User.Merge(dst, src) +} +func (m *User) XXX_Size() int { + return xxx_messageInfo_User.Size(m) +} +func (m *User) XXX_DiscardUnknown() { + xxx_messageInfo_User.DiscardUnknown(m) +} + +var xxx_messageInfo_User proto.InternalMessageInfo + +func (m *User) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *User) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *User) GetPermissions() []*Permission { + if m != nil { + return m.Permissions + } + return nil +} + +func (m *User) GetConnLimit() int64 { + if m != nil { + return m.ConnLimit + } + return 0 +} + +func (m *User) GetSettings() *UserSettings { + if m != nil { + return m.Settings + } + return nil +} + +type Permission struct { + // Name of the database that the permission grants access to. + DatabaseName string `protobuf:"bytes,1,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Permission) Reset() { *m = Permission{} } +func (m *Permission) String() string { return proto.CompactTextString(m) } +func (*Permission) ProtoMessage() {} +func (*Permission) Descriptor() ([]byte, []int) { + return fileDescriptor_user_86f81a23ffeb3ce2, []int{1} +} +func (m *Permission) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Permission.Unmarshal(m, b) +} +func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Permission.Marshal(b, m, deterministic) +} +func (dst *Permission) XXX_Merge(src proto.Message) { + xxx_messageInfo_Permission.Merge(dst, src) +} +func (m *Permission) XXX_Size() int { + return xxx_messageInfo_Permission.Size(m) +} +func (m *Permission) XXX_DiscardUnknown() { + xxx_messageInfo_Permission.DiscardUnknown(m) +} + +var xxx_messageInfo_Permission proto.InternalMessageInfo + +func (m *Permission) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type UserSpec struct { + // Name of the PostgreSQL user. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Password of the PostgreSQL user. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + // Set of permissions to grant to the user. + Permissions []*Permission `protobuf:"bytes,3,rep,name=permissions,proto3" json:"permissions,omitempty"` + // Number of database connections that should be available to the user. + ConnLimit *wrappers.Int64Value `protobuf:"bytes,4,opt,name=conn_limit,json=connLimit,proto3" json:"conn_limit,omitempty"` + // Postgresql settings for this user + Settings *UserSettings `protobuf:"bytes,5,opt,name=settings,proto3" json:"settings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserSpec) Reset() { *m = UserSpec{} } +func (m *UserSpec) String() string { return proto.CompactTextString(m) } +func (*UserSpec) ProtoMessage() {} +func (*UserSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_user_86f81a23ffeb3ce2, []int{2} +} +func (m *UserSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UserSpec.Unmarshal(m, b) +} +func (m *UserSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UserSpec.Marshal(b, m, deterministic) +} +func (dst *UserSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserSpec.Merge(dst, src) +} +func (m *UserSpec) XXX_Size() int { + return xxx_messageInfo_UserSpec.Size(m) +} +func (m *UserSpec) XXX_DiscardUnknown() { + xxx_messageInfo_UserSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_UserSpec proto.InternalMessageInfo + +func (m *UserSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UserSpec) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *UserSpec) GetPermissions() []*Permission { + if m != nil { + return m.Permissions + } + return nil +} + +func (m *UserSpec) GetConnLimit() *wrappers.Int64Value { + if m != nil { + return m.ConnLimit + } + return nil +} + +func (m *UserSpec) GetSettings() *UserSettings { + if m != nil { + return m.Settings + } + return nil +} + +// Postgresql user settings config +type UserSettings struct { + DefaultTransactionIsolation UserSettings_TransactionIsolation `protobuf:"varint,1,opt,name=default_transaction_isolation,json=defaultTransactionIsolation,proto3,enum=yandex.cloud.mdb.postgresql.v1.UserSettings_TransactionIsolation" json:"default_transaction_isolation,omitempty"` + // in milliseconds. + LockTimeout *wrappers.Int64Value `protobuf:"bytes,2,opt,name=lock_timeout,json=lockTimeout,proto3" json:"lock_timeout,omitempty"` + // in milliseconds. + LogMinDurationStatement *wrappers.Int64Value `protobuf:"bytes,3,opt,name=log_min_duration_statement,json=logMinDurationStatement,proto3" json:"log_min_duration_statement,omitempty"` + SynchronousCommit UserSettings_SynchronousCommit `protobuf:"varint,4,opt,name=synchronous_commit,json=synchronousCommit,proto3,enum=yandex.cloud.mdb.postgresql.v1.UserSettings_SynchronousCommit" json:"synchronous_commit,omitempty"` + // in bytes. + TempFileLimit *wrappers.Int64Value `protobuf:"bytes,5,opt,name=temp_file_limit,json=tempFileLimit,proto3" json:"temp_file_limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserSettings) Reset() { *m = UserSettings{} } +func (m *UserSettings) String() string { return proto.CompactTextString(m) } +func (*UserSettings) ProtoMessage() {} +func (*UserSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_user_86f81a23ffeb3ce2, []int{3} +} +func (m *UserSettings) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UserSettings.Unmarshal(m, b) +} +func (m *UserSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UserSettings.Marshal(b, m, deterministic) +} +func (dst *UserSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserSettings.Merge(dst, src) +} +func (m *UserSettings) XXX_Size() int { + return xxx_messageInfo_UserSettings.Size(m) +} +func (m *UserSettings) XXX_DiscardUnknown() { + xxx_messageInfo_UserSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_UserSettings proto.InternalMessageInfo + +func (m *UserSettings) GetDefaultTransactionIsolation() UserSettings_TransactionIsolation { + if m != nil { + return m.DefaultTransactionIsolation + } + return UserSettings_TRANSACTION_ISOLATION_UNSPECIFIED +} + +func (m *UserSettings) GetLockTimeout() *wrappers.Int64Value { + if m != nil { + return m.LockTimeout + } + return nil +} + +func (m *UserSettings) GetLogMinDurationStatement() *wrappers.Int64Value { + if m != nil { + return m.LogMinDurationStatement + } + return nil +} + +func (m *UserSettings) GetSynchronousCommit() UserSettings_SynchronousCommit { + if m != nil { + return m.SynchronousCommit + } + return UserSettings_SYNCHRONOUS_COMMIT_UNSPECIFIED +} + +func (m *UserSettings) GetTempFileLimit() *wrappers.Int64Value { + if m != nil { + return m.TempFileLimit + } + return nil +} + +func init() { + proto.RegisterType((*User)(nil), "yandex.cloud.mdb.postgresql.v1.User") + proto.RegisterType((*Permission)(nil), "yandex.cloud.mdb.postgresql.v1.Permission") + proto.RegisterType((*UserSpec)(nil), "yandex.cloud.mdb.postgresql.v1.UserSpec") + proto.RegisterType((*UserSettings)(nil), "yandex.cloud.mdb.postgresql.v1.UserSettings") + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.UserSettings_SynchronousCommit", UserSettings_SynchronousCommit_name, UserSettings_SynchronousCommit_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.UserSettings_LogStatement", UserSettings_LogStatement_name, UserSettings_LogStatement_value) + proto.RegisterEnum("yandex.cloud.mdb.postgresql.v1.UserSettings_TransactionIsolation", UserSettings_TransactionIsolation_name, UserSettings_TransactionIsolation_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/user.proto", fileDescriptor_user_86f81a23ffeb3ce2) +} + +var fileDescriptor_user_86f81a23ffeb3ce2 = []byte{ + // 848 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xdf, 0x6e, 0xdb, 0x54, + 0x18, 0xc7, 0x49, 0x8a, 0xd2, 0x2f, 0xed, 0x70, 0x8f, 0xd8, 0xc8, 0x3a, 0x52, 0x4a, 0xc6, 0xa6, + 0xb4, 0x22, 0xf6, 0x9c, 0xa1, 0x69, 0x08, 0x56, 0xc9, 0x49, 0x5c, 0x66, 0xc9, 0xb1, 0x23, 0xdb, + 0x05, 0x56, 0x84, 0x8e, 0x9c, 0xf8, 0xd4, 0xb3, 0xb0, 0x7d, 0x82, 0xcf, 0x71, 0xc7, 0xb8, 0xe7, + 0xa6, 0xaf, 0xc3, 0x3b, 0xd0, 0x3d, 0x01, 0x8f, 0x80, 0xb8, 0x44, 0x5c, 0x72, 0x85, 0xe2, 0x24, + 0x4d, 0xbb, 0x84, 0x54, 0x93, 0xd8, 0x9d, 0xfd, 0xfb, 0xf3, 0x7d, 0xc7, 0xbf, 0xf3, 0x93, 0x0c, + 0x7b, 0x2f, 0xbd, 0xc4, 0x27, 0x3f, 0xc9, 0xc3, 0x88, 0x66, 0xbe, 0x1c, 0xfb, 0x03, 0x79, 0x44, + 0x19, 0x0f, 0x52, 0xc2, 0x7e, 0x8c, 0xe4, 0x53, 0x45, 0xce, 0x18, 0x49, 0xa5, 0x51, 0x4a, 0x39, + 0x45, 0x3b, 0x13, 0xa9, 0x94, 0x4b, 0xa5, 0xd8, 0x1f, 0x48, 0x73, 0xa9, 0x74, 0xaa, 0x6c, 0xef, + 0x04, 0x94, 0x06, 0x11, 0x91, 0x73, 0xf5, 0x20, 0x3b, 0x91, 0x5f, 0xa4, 0xde, 0x68, 0x44, 0x52, + 0x36, 0xf1, 0x6f, 0xd7, 0xae, 0xac, 0x3a, 0xf5, 0xa2, 0xd0, 0xf7, 0x78, 0x48, 0x93, 0x09, 0x5d, + 0xff, 0x4b, 0x80, 0xd2, 0x11, 0x23, 0x29, 0x42, 0x50, 0x4a, 0xbc, 0x98, 0x54, 0x85, 0x5d, 0xa1, + 0xb1, 0x6e, 0xe7, 0xcf, 0xa8, 0x06, 0x30, 0x8c, 0x32, 0xc6, 0x49, 0x8a, 0x43, 0xbf, 0x5a, 0xc8, + 0x99, 0xf5, 0x29, 0xa2, 0xfb, 0xc8, 0x80, 0xca, 0x88, 0xa4, 0x71, 0xc8, 0x58, 0x48, 0x13, 0x56, + 0x2d, 0xee, 0x16, 0x1b, 0x95, 0xd6, 0xbe, 0xb4, 0xfa, 0xc0, 0x52, 0xff, 0xc2, 0x62, 0x5f, 0xb6, + 0xe7, 0xcb, 0x68, 0x92, 0xe0, 0x28, 0x8c, 0x43, 0x5e, 0x2d, 0xed, 0x0a, 0x8d, 0xa2, 0xbd, 0x3e, + 0x46, 0x8c, 0x31, 0x80, 0x9e, 0x42, 0x99, 0x11, 0xce, 0xc3, 0x24, 0x60, 0xd5, 0xb5, 0x5d, 0xa1, + 0x51, 0x69, 0x7d, 0x7a, 0xdd, 0xa6, 0xf1, 0x77, 0x39, 0x53, 0x8f, 0x7d, 0xe1, 0xae, 0x2b, 0x00, + 0xf3, 0x33, 0xa0, 0xbb, 0xb0, 0xe9, 0x7b, 0xdc, 0x1b, 0x78, 0x8c, 0xe0, 0x4b, 0x01, 0x6c, 0xcc, + 0x40, 0xd3, 0x8b, 0x49, 0xfd, 0xb7, 0x02, 0x94, 0xf3, 0x69, 0x23, 0x32, 0x44, 0xca, 0xe5, 0xa4, + 0xda, 0xb5, 0x3f, 0xcf, 0x15, 0xe1, 0xef, 0x73, 0x65, 0xf3, 0x3b, 0xaf, 0xf9, 0xb3, 0xda, 0x3c, + 0x7e, 0xd0, 0xfc, 0x1c, 0x7f, 0xbf, 0x7f, 0xf6, 0x4a, 0x29, 0x7d, 0xf9, 0xe4, 0xd1, 0xc3, 0x69, + 0x90, 0x7b, 0x50, 0x1e, 0x79, 0x8c, 0xbd, 0xa0, 0xe9, 0x34, 0xc6, 0xf6, 0xe6, 0xd8, 0x76, 0xf6, + 0x4a, 0x59, 0x7b, 0xdc, 0x54, 0x5a, 0x8f, 0xed, 0x0b, 0xfa, 0x7f, 0x0e, 0xb5, 0xbb, 0x10, 0x6a, + 0xa5, 0x75, 0x47, 0x9a, 0x54, 0x46, 0x9a, 0x55, 0x46, 0xd2, 0x13, 0xfe, 0xe8, 0xb3, 0xaf, 0xbd, + 0x28, 0x23, 0xed, 0xf2, 0x3f, 0xe7, 0x4a, 0xe9, 0xe0, 0x89, 0xf2, 0xe0, 0xed, 0x64, 0xff, 0x6b, + 0x19, 0x36, 0x2e, 0x53, 0xe8, 0x17, 0x01, 0x6a, 0x3e, 0x39, 0xf1, 0xb2, 0x88, 0x63, 0x9e, 0x7a, + 0x09, 0xf3, 0x86, 0xe3, 0x76, 0xe2, 0x90, 0xd1, 0x28, 0xef, 0x69, 0x1e, 0xf3, 0x8d, 0x96, 0xfa, + 0x26, 0x0b, 0x25, 0x77, 0x3e, 0x49, 0x9f, 0x0d, 0xb2, 0xef, 0x4c, 0xf7, 0x2c, 0x23, 0xd1, 0x01, + 0x6c, 0x44, 0x74, 0xf8, 0x03, 0xe6, 0x61, 0x4c, 0x68, 0xc6, 0xf3, 0x5b, 0x5a, 0x1d, 0x95, 0x5d, + 0x19, 0x1b, 0xdc, 0x89, 0x1e, 0x7d, 0x0b, 0xdb, 0x11, 0x0d, 0x70, 0x1c, 0x26, 0xd8, 0xcf, 0xd2, + 0x7c, 0x26, 0x66, 0xdc, 0xe3, 0x24, 0x26, 0x09, 0xaf, 0x16, 0xaf, 0x9f, 0xf6, 0x41, 0x44, 0x83, + 0x5e, 0x98, 0x74, 0xa7, 0x66, 0x67, 0xe6, 0x45, 0x31, 0x20, 0xf6, 0x32, 0x19, 0x3e, 0x4f, 0x69, + 0x42, 0x33, 0x86, 0x87, 0x34, 0x9e, 0x5d, 0xe5, 0x8d, 0xd6, 0xc1, 0x1b, 0xa5, 0xe2, 0xcc, 0xc7, + 0x74, 0xf2, 0x29, 0xf6, 0x16, 0x7b, 0x1d, 0x42, 0x1d, 0x78, 0x8f, 0x93, 0x78, 0x84, 0x4f, 0xc2, + 0x88, 0x4c, 0x6b, 0xb3, 0x76, 0xfd, 0xe9, 0x37, 0xc7, 0x9e, 0xc3, 0x30, 0x22, 0x79, 0x61, 0xea, + 0xbf, 0x0b, 0xb0, 0xb5, 0xb0, 0x0d, 0xd5, 0x61, 0xc7, 0x79, 0x66, 0x76, 0x9e, 0xda, 0x96, 0x69, + 0x1d, 0x39, 0xb8, 0x63, 0xf5, 0x7a, 0xba, 0x8b, 0x8f, 0x4c, 0xa7, 0xaf, 0x75, 0xf4, 0x43, 0x5d, + 0xeb, 0x8a, 0xef, 0xa0, 0xdb, 0x70, 0x73, 0x89, 0xc6, 0x32, 0x45, 0x01, 0x6d, 0xc3, 0xad, 0x65, + 0xd4, 0xe1, 0xa1, 0x58, 0x40, 0x1f, 0x42, 0x75, 0x09, 0x67, 0x58, 0x1d, 0xd5, 0x10, 0x8b, 0xe8, + 0x2e, 0x7c, 0xb4, 0x84, 0xb5, 0xb5, 0x9e, 0xe5, 0x6a, 0xf8, 0x1b, 0x5b, 0x77, 0x35, 0xb1, 0xb4, + 0x5a, 0xa4, 0xf6, 0xfb, 0xc6, 0x33, 0x71, 0xad, 0x7e, 0x26, 0xc0, 0x86, 0x41, 0x83, 0xf9, 0xed, + 0xd4, 0xe0, 0xb6, 0x61, 0x7d, 0x85, 0x1d, 0x57, 0x75, 0xb5, 0x9e, 0x66, 0xbe, 0xfe, 0x39, 0xb7, + 0x00, 0x5d, 0xa5, 0x4d, 0xcb, 0xd4, 0x44, 0x01, 0xdd, 0x84, 0xad, 0xab, 0x78, 0xb7, 0x6b, 0x88, + 0x85, 0x45, 0xb8, 0x67, 0x75, 0xc5, 0xe2, 0x22, 0xac, 0x1a, 0x86, 0x58, 0xaa, 0xff, 0x21, 0xc0, + 0xfb, 0x4b, 0xcb, 0x7c, 0x0f, 0x3e, 0x76, 0x6d, 0xd5, 0x74, 0xd4, 0x8e, 0xab, 0x5b, 0x26, 0xd6, + 0x1d, 0xcb, 0x50, 0xf3, 0xa7, 0xab, 0x87, 0xdb, 0x87, 0xfb, 0xcb, 0x65, 0xb6, 0xa6, 0x76, 0xf1, + 0x91, 0x39, 0x89, 0xc0, 0xd5, 0xba, 0xa2, 0x80, 0x1a, 0xf0, 0xc9, 0x0a, 0xed, 0x5c, 0x59, 0x40, + 0x7b, 0x70, 0xef, 0xbf, 0x94, 0x7d, 0x4d, 0x75, 0xd5, 0xb6, 0xa1, 0xe5, 0x26, 0xb1, 0x88, 0xee, + 0x43, 0x7d, 0xb9, 0xd4, 0xd1, 0x6c, 0x5d, 0x35, 0xf4, 0xe3, 0xb1, 0x58, 0x2c, 0xb5, 0xad, 0xe3, + 0x5e, 0x10, 0xf2, 0xe7, 0xd9, 0x40, 0x1a, 0xd2, 0x58, 0x9e, 0x54, 0xbe, 0x39, 0xf9, 0xa1, 0x05, + 0xb4, 0x19, 0x90, 0x24, 0xaf, 0xa4, 0xbc, 0xfa, 0xa7, 0xfa, 0xc5, 0xfc, 0x6d, 0xf0, 0x6e, 0x6e, + 0x78, 0xf8, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x72, 0x36, 0x79, 0x88, 0x07, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/user_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/user_service.pb.go new file mode 100644 index 000000000..d3a80f9d6 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1/user_service.pb.go @@ -0,0 +1,1122 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/postgresql/v1/user_service.proto + +package postgresql // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetUserRequest struct { + // Required. ID of the PostgreSQL cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Required. Name of the PostgreSQL User resource to return. + // To get the name of the user, use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetUserRequest) Reset() { *m = GetUserRequest{} } +func (m *GetUserRequest) String() string { return proto.CompactTextString(m) } +func (*GetUserRequest) ProtoMessage() {} +func (*GetUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_6f25ccbd661d407e, []int{0} +} +func (m *GetUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetUserRequest.Unmarshal(m, b) +} +func (m *GetUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetUserRequest.Marshal(b, m, deterministic) +} +func (dst *GetUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetUserRequest.Merge(dst, src) +} +func (m *GetUserRequest) XXX_Size() int { + return xxx_messageInfo_GetUserRequest.Size(m) +} +func (m *GetUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetUserRequest proto.InternalMessageInfo + +func (m *GetUserRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GetUserRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type ListUsersRequest struct { + // Required. ID of the cluster to list PostgreSQL users in. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListUsersResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUsersRequest) Reset() { *m = ListUsersRequest{} } +func (m *ListUsersRequest) String() string { return proto.CompactTextString(m) } +func (*ListUsersRequest) ProtoMessage() {} +func (*ListUsersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_6f25ccbd661d407e, []int{1} +} +func (m *ListUsersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUsersRequest.Unmarshal(m, b) +} +func (m *ListUsersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUsersRequest.Marshal(b, m, deterministic) +} +func (dst *ListUsersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUsersRequest.Merge(dst, src) +} +func (m *ListUsersRequest) XXX_Size() int { + return xxx_messageInfo_ListUsersRequest.Size(m) +} +func (m *ListUsersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListUsersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUsersRequest proto.InternalMessageInfo + +func (m *ListUsersRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListUsersRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListUsersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListUsersResponse struct { + // List of PostgreSQL User resources. + Users []*User `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListUsersRequest.page_size], use the [next_page_token] as the value + // for the [ListUsersRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUsersResponse) Reset() { *m = ListUsersResponse{} } +func (m *ListUsersResponse) String() string { return proto.CompactTextString(m) } +func (*ListUsersResponse) ProtoMessage() {} +func (*ListUsersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_6f25ccbd661d407e, []int{2} +} +func (m *ListUsersResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUsersResponse.Unmarshal(m, b) +} +func (m *ListUsersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUsersResponse.Marshal(b, m, deterministic) +} +func (dst *ListUsersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUsersResponse.Merge(dst, src) +} +func (m *ListUsersResponse) XXX_Size() int { + return xxx_messageInfo_ListUsersResponse.Size(m) +} +func (m *ListUsersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListUsersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUsersResponse proto.InternalMessageInfo + +func (m *ListUsersResponse) GetUsers() []*User { + if m != nil { + return m.Users + } + return nil +} + +func (m *ListUsersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateUserRequest struct { + // Required. ID of the PostgreSQL cluster to create a user in. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Properties of the user to be created. + UserSpec *UserSpec `protobuf:"bytes,2,opt,name=user_spec,json=userSpec,proto3" json:"user_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateUserRequest) Reset() { *m = CreateUserRequest{} } +func (m *CreateUserRequest) String() string { return proto.CompactTextString(m) } +func (*CreateUserRequest) ProtoMessage() {} +func (*CreateUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_6f25ccbd661d407e, []int{3} +} +func (m *CreateUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateUserRequest.Unmarshal(m, b) +} +func (m *CreateUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateUserRequest.Marshal(b, m, deterministic) +} +func (dst *CreateUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateUserRequest.Merge(dst, src) +} +func (m *CreateUserRequest) XXX_Size() int { + return xxx_messageInfo_CreateUserRequest.Size(m) +} +func (m *CreateUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateUserRequest proto.InternalMessageInfo + +func (m *CreateUserRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateUserRequest) GetUserSpec() *UserSpec { + if m != nil { + return m.UserSpec + } + return nil +} + +type CreateUserMetadata struct { + // ID of the PostgreSQL cluster the user is being created in. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Required. Name of the user that is being created. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateUserMetadata) Reset() { *m = CreateUserMetadata{} } +func (m *CreateUserMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateUserMetadata) ProtoMessage() {} +func (*CreateUserMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_6f25ccbd661d407e, []int{4} +} +func (m *CreateUserMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateUserMetadata.Unmarshal(m, b) +} +func (m *CreateUserMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateUserMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateUserMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateUserMetadata.Merge(dst, src) +} +func (m *CreateUserMetadata) XXX_Size() int { + return xxx_messageInfo_CreateUserMetadata.Size(m) +} +func (m *CreateUserMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateUserMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateUserMetadata proto.InternalMessageInfo + +func (m *CreateUserMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateUserMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type UpdateUserRequest struct { + // Required. ID of the PostgreSQL cluster the user belongs to. + // To get the cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Required. Name of the user to be updated. + // To get the name of the user use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + // Field mask that specifies which fields of the PostgreSQL User resource should be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // New password for the user. + Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"` + // New set of permissions for the user. + Permissions []*Permission `protobuf:"bytes,5,rep,name=permissions,proto3" json:"permissions,omitempty"` + // Number of connections that should be available to the user. + ConnLimit int64 `protobuf:"varint,6,opt,name=conn_limit,json=connLimit,proto3" json:"conn_limit,omitempty"` + // Postgresql settings for this user + Settings *UserSettings `protobuf:"bytes,7,opt,name=settings,proto3" json:"settings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateUserRequest) Reset() { *m = UpdateUserRequest{} } +func (m *UpdateUserRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateUserRequest) ProtoMessage() {} +func (*UpdateUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_6f25ccbd661d407e, []int{5} +} +func (m *UpdateUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateUserRequest.Unmarshal(m, b) +} +func (m *UpdateUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateUserRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateUserRequest.Merge(dst, src) +} +func (m *UpdateUserRequest) XXX_Size() int { + return xxx_messageInfo_UpdateUserRequest.Size(m) +} +func (m *UpdateUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateUserRequest proto.InternalMessageInfo + +func (m *UpdateUserRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateUserRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +func (m *UpdateUserRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateUserRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *UpdateUserRequest) GetPermissions() []*Permission { + if m != nil { + return m.Permissions + } + return nil +} + +func (m *UpdateUserRequest) GetConnLimit() int64 { + if m != nil { + return m.ConnLimit + } + return 0 +} + +func (m *UpdateUserRequest) GetSettings() *UserSettings { + if m != nil { + return m.Settings + } + return nil +} + +type UpdateUserMetadata struct { + // ID of the PostgreSQL cluster the user belongs to. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user that is being updated. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateUserMetadata) Reset() { *m = UpdateUserMetadata{} } +func (m *UpdateUserMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateUserMetadata) ProtoMessage() {} +func (*UpdateUserMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_6f25ccbd661d407e, []int{6} +} +func (m *UpdateUserMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateUserMetadata.Unmarshal(m, b) +} +func (m *UpdateUserMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateUserMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateUserMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateUserMetadata.Merge(dst, src) +} +func (m *UpdateUserMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateUserMetadata.Size(m) +} +func (m *UpdateUserMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateUserMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateUserMetadata proto.InternalMessageInfo + +func (m *UpdateUserMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateUserMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type DeleteUserRequest struct { + // Required. ID of the PostgreSQL cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Required. Name of the user to delete. + // To get the name of the user, use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteUserRequest) Reset() { *m = DeleteUserRequest{} } +func (m *DeleteUserRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteUserRequest) ProtoMessage() {} +func (*DeleteUserRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_6f25ccbd661d407e, []int{7} +} +func (m *DeleteUserRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteUserRequest.Unmarshal(m, b) +} +func (m *DeleteUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteUserRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteUserRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteUserRequest.Merge(dst, src) +} +func (m *DeleteUserRequest) XXX_Size() int { + return xxx_messageInfo_DeleteUserRequest.Size(m) +} +func (m *DeleteUserRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteUserRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteUserRequest proto.InternalMessageInfo + +func (m *DeleteUserRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteUserRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type DeleteUserMetadata struct { + // ID of the PostgreSQL cluster the user belongs to. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user that is being deleted. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteUserMetadata) Reset() { *m = DeleteUserMetadata{} } +func (m *DeleteUserMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteUserMetadata) ProtoMessage() {} +func (*DeleteUserMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_6f25ccbd661d407e, []int{8} +} +func (m *DeleteUserMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteUserMetadata.Unmarshal(m, b) +} +func (m *DeleteUserMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteUserMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteUserMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteUserMetadata.Merge(dst, src) +} +func (m *DeleteUserMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteUserMetadata.Size(m) +} +func (m *DeleteUserMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteUserMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteUserMetadata proto.InternalMessageInfo + +func (m *DeleteUserMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteUserMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type GrantUserPermissionRequest struct { + // Required. ID of the PostgreSQL cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Required. Name of the user to grant the permission to. + // To get the name of the user, use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + // Permission that should be granted to the specified user. + Permission *Permission `protobuf:"bytes,3,opt,name=permission,proto3" json:"permission,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GrantUserPermissionRequest) Reset() { *m = GrantUserPermissionRequest{} } +func (m *GrantUserPermissionRequest) String() string { return proto.CompactTextString(m) } +func (*GrantUserPermissionRequest) ProtoMessage() {} +func (*GrantUserPermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_6f25ccbd661d407e, []int{9} +} +func (m *GrantUserPermissionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GrantUserPermissionRequest.Unmarshal(m, b) +} +func (m *GrantUserPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GrantUserPermissionRequest.Marshal(b, m, deterministic) +} +func (dst *GrantUserPermissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GrantUserPermissionRequest.Merge(dst, src) +} +func (m *GrantUserPermissionRequest) XXX_Size() int { + return xxx_messageInfo_GrantUserPermissionRequest.Size(m) +} +func (m *GrantUserPermissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GrantUserPermissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GrantUserPermissionRequest proto.InternalMessageInfo + +func (m *GrantUserPermissionRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GrantUserPermissionRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +func (m *GrantUserPermissionRequest) GetPermission() *Permission { + if m != nil { + return m.Permission + } + return nil +} + +type GrantUserPermissionMetadata struct { + // ID of the PostgreSQL cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user that is being granted a permission. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GrantUserPermissionMetadata) Reset() { *m = GrantUserPermissionMetadata{} } +func (m *GrantUserPermissionMetadata) String() string { return proto.CompactTextString(m) } +func (*GrantUserPermissionMetadata) ProtoMessage() {} +func (*GrantUserPermissionMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_6f25ccbd661d407e, []int{10} +} +func (m *GrantUserPermissionMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GrantUserPermissionMetadata.Unmarshal(m, b) +} +func (m *GrantUserPermissionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GrantUserPermissionMetadata.Marshal(b, m, deterministic) +} +func (dst *GrantUserPermissionMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_GrantUserPermissionMetadata.Merge(dst, src) +} +func (m *GrantUserPermissionMetadata) XXX_Size() int { + return xxx_messageInfo_GrantUserPermissionMetadata.Size(m) +} +func (m *GrantUserPermissionMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_GrantUserPermissionMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_GrantUserPermissionMetadata proto.InternalMessageInfo + +func (m *GrantUserPermissionMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GrantUserPermissionMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +type RevokeUserPermissionRequest struct { + // Required. ID of the PostgreSQL cluster the user belongs to. + // To get the cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Required. Name of the user to revoke a permission from. + // To get the name of the user, use a [UserService.List] request. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + // Name of the database that the user should lose access to. + DatabaseName string `protobuf:"bytes,3,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RevokeUserPermissionRequest) Reset() { *m = RevokeUserPermissionRequest{} } +func (m *RevokeUserPermissionRequest) String() string { return proto.CompactTextString(m) } +func (*RevokeUserPermissionRequest) ProtoMessage() {} +func (*RevokeUserPermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_6f25ccbd661d407e, []int{11} +} +func (m *RevokeUserPermissionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RevokeUserPermissionRequest.Unmarshal(m, b) +} +func (m *RevokeUserPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RevokeUserPermissionRequest.Marshal(b, m, deterministic) +} +func (dst *RevokeUserPermissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RevokeUserPermissionRequest.Merge(dst, src) +} +func (m *RevokeUserPermissionRequest) XXX_Size() int { + return xxx_messageInfo_RevokeUserPermissionRequest.Size(m) +} +func (m *RevokeUserPermissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RevokeUserPermissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RevokeUserPermissionRequest proto.InternalMessageInfo + +func (m *RevokeUserPermissionRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *RevokeUserPermissionRequest) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +func (m *RevokeUserPermissionRequest) GetDatabaseName() string { + if m != nil { + return m.DatabaseName + } + return "" +} + +type RevokeUserPermissionMetadata struct { + // ID of the PostgreSQL cluster the user belongs to. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Name of the user whose permission is being revoked. + UserName string `protobuf:"bytes,2,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RevokeUserPermissionMetadata) Reset() { *m = RevokeUserPermissionMetadata{} } +func (m *RevokeUserPermissionMetadata) String() string { return proto.CompactTextString(m) } +func (*RevokeUserPermissionMetadata) ProtoMessage() {} +func (*RevokeUserPermissionMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_user_service_6f25ccbd661d407e, []int{12} +} +func (m *RevokeUserPermissionMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RevokeUserPermissionMetadata.Unmarshal(m, b) +} +func (m *RevokeUserPermissionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RevokeUserPermissionMetadata.Marshal(b, m, deterministic) +} +func (dst *RevokeUserPermissionMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_RevokeUserPermissionMetadata.Merge(dst, src) +} +func (m *RevokeUserPermissionMetadata) XXX_Size() int { + return xxx_messageInfo_RevokeUserPermissionMetadata.Size(m) +} +func (m *RevokeUserPermissionMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_RevokeUserPermissionMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_RevokeUserPermissionMetadata proto.InternalMessageInfo + +func (m *RevokeUserPermissionMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *RevokeUserPermissionMetadata) GetUserName() string { + if m != nil { + return m.UserName + } + return "" +} + +func init() { + proto.RegisterType((*GetUserRequest)(nil), "yandex.cloud.mdb.postgresql.v1.GetUserRequest") + proto.RegisterType((*ListUsersRequest)(nil), "yandex.cloud.mdb.postgresql.v1.ListUsersRequest") + proto.RegisterType((*ListUsersResponse)(nil), "yandex.cloud.mdb.postgresql.v1.ListUsersResponse") + proto.RegisterType((*CreateUserRequest)(nil), "yandex.cloud.mdb.postgresql.v1.CreateUserRequest") + proto.RegisterType((*CreateUserMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.CreateUserMetadata") + proto.RegisterType((*UpdateUserRequest)(nil), "yandex.cloud.mdb.postgresql.v1.UpdateUserRequest") + proto.RegisterType((*UpdateUserMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.UpdateUserMetadata") + proto.RegisterType((*DeleteUserRequest)(nil), "yandex.cloud.mdb.postgresql.v1.DeleteUserRequest") + proto.RegisterType((*DeleteUserMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.DeleteUserMetadata") + proto.RegisterType((*GrantUserPermissionRequest)(nil), "yandex.cloud.mdb.postgresql.v1.GrantUserPermissionRequest") + proto.RegisterType((*GrantUserPermissionMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.GrantUserPermissionMetadata") + proto.RegisterType((*RevokeUserPermissionRequest)(nil), "yandex.cloud.mdb.postgresql.v1.RevokeUserPermissionRequest") + proto.RegisterType((*RevokeUserPermissionMetadata)(nil), "yandex.cloud.mdb.postgresql.v1.RevokeUserPermissionMetadata") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// UserServiceClient is the client API for UserService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UserServiceClient interface { + // Returns the specified PostgreSQL User resource. + // + // To get the list of available PostgreSQL User resources, make a [List] request. + Get(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*User, error) + // Retrieves the list of PostgreSQL User resources in the specified cluster. + List(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) + // Creates a PostgreSQL user in the specified cluster. + Create(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified PostgreSQL user. + Update(ctx context.Context, in *UpdateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified PostgreSQL user. + Delete(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Grants permission to the specified PostgreSQL user. + GrantPermission(ctx context.Context, in *GrantUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Revokes permission from the specified PostgreSQL user. + RevokePermission(ctx context.Context, in *RevokeUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) +} + +type userServiceClient struct { + cc *grpc.ClientConn +} + +func NewUserServiceClient(cc *grpc.ClientConn) UserServiceClient { + return &userServiceClient{cc} +} + +func (c *userServiceClient) Get(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*User, error) { + out := new(User) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.UserService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) List(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) { + out := new(ListUsersResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.UserService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) Create(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.UserService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) Update(ctx context.Context, in *UpdateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.UserService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) Delete(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.UserService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) GrantPermission(ctx context.Context, in *GrantUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.UserService/GrantPermission", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) RevokePermission(ctx context.Context, in *RevokeUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.postgresql.v1.UserService/RevokePermission", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UserServiceServer is the server API for UserService service. +type UserServiceServer interface { + // Returns the specified PostgreSQL User resource. + // + // To get the list of available PostgreSQL User resources, make a [List] request. + Get(context.Context, *GetUserRequest) (*User, error) + // Retrieves the list of PostgreSQL User resources in the specified cluster. + List(context.Context, *ListUsersRequest) (*ListUsersResponse, error) + // Creates a PostgreSQL user in the specified cluster. + Create(context.Context, *CreateUserRequest) (*operation.Operation, error) + // Updates the specified PostgreSQL user. + Update(context.Context, *UpdateUserRequest) (*operation.Operation, error) + // Deletes the specified PostgreSQL user. + Delete(context.Context, *DeleteUserRequest) (*operation.Operation, error) + // Grants permission to the specified PostgreSQL user. + GrantPermission(context.Context, *GrantUserPermissionRequest) (*operation.Operation, error) + // Revokes permission from the specified PostgreSQL user. + RevokePermission(context.Context, *RevokeUserPermissionRequest) (*operation.Operation, error) +} + +func RegisterUserServiceServer(s *grpc.Server, srv UserServiceServer) { + s.RegisterService(&_UserService_serviceDesc, srv) +} + +func _UserService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.UserService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).Get(ctx, req.(*GetUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUsersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.UserService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).List(ctx, req.(*ListUsersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.UserService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).Create(ctx, req.(*CreateUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.UserService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).Update(ctx, req.(*UpdateUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.UserService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).Delete(ctx, req.(*DeleteUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_GrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GrantUserPermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).GrantPermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.UserService/GrantPermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).GrantPermission(ctx, req.(*GrantUserPermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_RevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RevokeUserPermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).RevokePermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.postgresql.v1.UserService/RevokePermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).RevokePermission(ctx, req.(*RevokeUserPermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _UserService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.postgresql.v1.UserService", + HandlerType: (*UserServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _UserService_Get_Handler, + }, + { + MethodName: "List", + Handler: _UserService_List_Handler, + }, + { + MethodName: "Create", + Handler: _UserService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _UserService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _UserService_Delete_Handler, + }, + { + MethodName: "GrantPermission", + Handler: _UserService_GrantPermission_Handler, + }, + { + MethodName: "RevokePermission", + Handler: _UserService_RevokePermission_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/postgresql/v1/user_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/postgresql/v1/user_service.proto", fileDescriptor_user_service_6f25ccbd661d407e) +} + +var fileDescriptor_user_service_6f25ccbd661d407e = []byte{ + // 1043 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xd6, 0xd6, 0x8e, 0xb1, 0x5f, 0x37, 0x6d, 0x33, 0x12, 0x92, 0xe5, 0x34, 0x55, 0x58, 0x4a, + 0x1b, 0x5c, 0xbc, 0xeb, 0x75, 0xf9, 0x28, 0x4e, 0x53, 0x41, 0x02, 0x0d, 0x88, 0xa4, 0x89, 0x36, + 0xed, 0x81, 0x54, 0xc8, 0x1a, 0x7b, 0xa7, 0xcb, 0x2a, 0xde, 0x8f, 0xee, 0xac, 0xdd, 0x26, 0xa5, + 0x12, 0xea, 0xb1, 0x12, 0x17, 0xf8, 0x05, 0x88, 0x03, 0x27, 0x2e, 0xb9, 0xf2, 0x03, 0x5c, 0x89, + 0x5b, 0x10, 0xe2, 0x0f, 0x70, 0xe0, 0x0c, 0x07, 0xa4, 0x9e, 0xd0, 0xcc, 0x6c, 0xbc, 0xeb, 0xd8, + 0x64, 0x9d, 0x38, 0x22, 0xb7, 0x1d, 0xcf, 0xfb, 0xcc, 0xfb, 0x3c, 0xf3, 0x7e, 0x8d, 0x41, 0xdb, + 0xc6, 0x8e, 0x41, 0x1e, 0xab, 0xcd, 0x96, 0xdb, 0x36, 0x54, 0xdb, 0x68, 0xa8, 0x9e, 0x4b, 0x03, + 0xd3, 0x27, 0xf4, 0x61, 0x4b, 0xed, 0x68, 0x6a, 0x9b, 0x12, 0xbf, 0x4e, 0x89, 0xdf, 0xb1, 0x9a, + 0x44, 0xf1, 0x7c, 0x37, 0x70, 0xd1, 0x25, 0x01, 0x51, 0x38, 0x44, 0xb1, 0x8d, 0x86, 0x12, 0x41, + 0x94, 0x8e, 0x56, 0xbc, 0x68, 0xba, 0xae, 0xd9, 0x22, 0x2a, 0xf6, 0x2c, 0x15, 0x3b, 0x8e, 0x1b, + 0xe0, 0xc0, 0x72, 0x1d, 0x2a, 0xd0, 0xc5, 0xd9, 0x70, 0x97, 0xaf, 0x1a, 0xed, 0x07, 0xea, 0x03, + 0x8b, 0xb4, 0x8c, 0xba, 0x8d, 0xe9, 0x56, 0x68, 0x71, 0xa5, 0x8f, 0x92, 0xeb, 0x11, 0x9f, 0x1f, + 0x10, 0x7d, 0x85, 0x76, 0x33, 0x7d, 0x76, 0x1d, 0xdc, 0xb2, 0x8c, 0xf8, 0xf6, 0x9b, 0x23, 0x28, + 0x0b, 0x4d, 0x8b, 0xa1, 0x29, 0x63, 0x7c, 0xc0, 0x8b, 0xbc, 0x0d, 0xe7, 0x96, 0x49, 0x70, 0x8f, + 0x12, 0x5f, 0x27, 0x0f, 0xdb, 0x84, 0x06, 0xe8, 0x1a, 0x40, 0xb3, 0xd5, 0xa6, 0x01, 0xf1, 0xeb, + 0x96, 0x51, 0x90, 0x66, 0xa5, 0xb9, 0xdc, 0xe2, 0xd9, 0x3f, 0xbb, 0x9a, 0xf4, 0xfc, 0x85, 0x96, + 0xbe, 0xb9, 0xf0, 0x4e, 0x45, 0xcf, 0x85, 0xfb, 0x9f, 0x1a, 0xa8, 0x06, 0x39, 0x7e, 0x85, 0x0e, + 0xb6, 0x49, 0xe1, 0x0c, 0xb7, 0x9d, 0x61, 0xb6, 0x7f, 0x75, 0xb5, 0xc9, 0xfb, 0xb8, 0xbc, 0xf3, + 0x61, 0x79, 0xb3, 0x52, 0x7e, 0xbf, 0xfe, 0x45, 0x49, 0x80, 0xdf, 0xbd, 0xae, 0x67, 0x99, 0xfd, + 0x1d, 0x6c, 0x13, 0xf9, 0x5b, 0x09, 0x2e, 0xac, 0x58, 0x94, 0x3b, 0xa7, 0xc7, 0xf2, 0x7e, 0x15, + 0x72, 0x1e, 0x36, 0x49, 0x9d, 0x5a, 0x3b, 0xc2, 0x7b, 0x6a, 0x11, 0x5e, 0x76, 0xb5, 0xcc, 0xcd, + 0x05, 0xad, 0x52, 0xa9, 0xe8, 0x59, 0xb6, 0xb9, 0x61, 0xed, 0x10, 0x34, 0x07, 0xc0, 0x0d, 0x03, + 0x77, 0x8b, 0x38, 0x85, 0x14, 0x3f, 0x35, 0xf7, 0xfc, 0x85, 0x36, 0xc1, 0x2d, 0x75, 0x7e, 0xca, + 0x5d, 0xb6, 0x27, 0x3f, 0x82, 0xa9, 0x18, 0x27, 0xea, 0xb9, 0x0e, 0x25, 0xa8, 0x06, 0x13, 0x8c, + 0x35, 0x2d, 0x48, 0xb3, 0xa9, 0xb9, 0x7c, 0xf5, 0xb2, 0x72, 0x78, 0x8a, 0x28, 0xfc, 0x3a, 0x05, + 0x04, 0x5d, 0x81, 0xf3, 0x0e, 0x79, 0x1c, 0xd4, 0x63, 0xfe, 0xf9, 0x3d, 0xe9, 0x93, 0xec, 0xe7, + 0xf5, 0x9e, 0xe3, 0x6f, 0x24, 0x98, 0x5a, 0xf2, 0x09, 0x0e, 0xc8, 0xb1, 0x83, 0xf1, 0x59, 0x18, + 0x0c, 0xea, 0x91, 0x26, 0x77, 0x92, 0xaf, 0xce, 0x8d, 0x42, 0x75, 0xc3, 0x23, 0xcd, 0xc5, 0x34, + 0x3b, 0x55, 0x44, 0x87, 0xad, 0xe5, 0x75, 0x40, 0x11, 0x9d, 0x55, 0x12, 0x60, 0x03, 0x07, 0x18, + 0xcd, 0x0c, 0xf2, 0x89, 0x33, 0x98, 0x1e, 0x48, 0x87, 0x58, 0xbc, 0xbf, 0x4f, 0xc1, 0xd4, 0x3d, + 0xcf, 0x18, 0x47, 0xe1, 0x18, 0xe9, 0x86, 0xe6, 0x21, 0xdf, 0xe6, 0xde, 0x79, 0x31, 0xf2, 0x24, + 0xc8, 0x57, 0x8b, 0x8a, 0xa8, 0x57, 0x65, 0xbf, 0x5e, 0x95, 0xdb, 0xac, 0x5e, 0x57, 0x31, 0xdd, + 0xd2, 0x41, 0x98, 0xb3, 0x6f, 0xf4, 0x06, 0x64, 0x3d, 0x4c, 0xe9, 0x23, 0xd7, 0x37, 0x0a, 0xe9, + 0x28, 0x7d, 0x6e, 0x94, 0xb5, 0xea, 0x0d, 0xbd, 0xb7, 0x85, 0x56, 0x20, 0xef, 0x11, 0xdf, 0xb6, + 0x28, 0x65, 0x2d, 0xa1, 0x30, 0xc1, 0xd3, 0xa5, 0x94, 0x14, 0x83, 0xf5, 0x1e, 0x44, 0x8f, 0xc3, + 0xd1, 0x55, 0x80, 0xa6, 0xeb, 0x38, 0xf5, 0x96, 0x65, 0x5b, 0x41, 0x21, 0xc3, 0xf3, 0x3b, 0xfb, + 0xb2, 0xab, 0xa5, 0x6f, 0x2d, 0x68, 0xec, 0x5a, 0x5c, 0xc7, 0x59, 0x61, 0x5b, 0xe8, 0x13, 0xc8, + 0x52, 0x12, 0x04, 0x96, 0x63, 0xd2, 0xc2, 0x2b, 0x5c, 0xd7, 0x5b, 0x23, 0xc5, 0x3d, 0xc4, 0xe8, + 0x3d, 0x34, 0x8b, 0x7a, 0x14, 0xa2, 0x13, 0x89, 0xfa, 0x57, 0x30, 0xf5, 0x11, 0x69, 0x91, 0xd3, + 0x09, 0x3a, 0xd3, 0x13, 0x79, 0x3f, 0x11, 0x3d, 0xbf, 0x4b, 0x50, 0x5c, 0xf6, 0xb1, 0xc3, 0x5b, + 0x44, 0x2c, 0x72, 0xff, 0x77, 0x3a, 0xaf, 0x03, 0x44, 0xb9, 0x12, 0x66, 0xf3, 0x11, 0x32, 0x2d, + 0xac, 0xf7, 0xd8, 0x19, 0xf2, 0xe7, 0x30, 0x3d, 0x44, 0xd8, 0x89, 0x5c, 0xda, 0x2f, 0x12, 0x4c, + 0xeb, 0xa4, 0xe3, 0x6e, 0x91, 0x53, 0xbe, 0xb5, 0x25, 0x98, 0x64, 0x62, 0x1a, 0x98, 0x12, 0x81, + 0x17, 0xb3, 0xe0, 0x52, 0x88, 0x3f, 0x17, 0xc3, 0x97, 0x63, 0x07, 0x9c, 0xdd, 0x07, 0x71, 0x35, + 0x9b, 0x70, 0x71, 0x98, 0x98, 0x93, 0xb8, 0xa9, 0xea, 0x4f, 0x79, 0xc8, 0x8b, 0xda, 0xe4, 0x6f, + 0x12, 0xf4, 0x83, 0x04, 0xa9, 0x65, 0x12, 0x20, 0x25, 0x29, 0xb4, 0xfd, 0x53, 0xbc, 0x38, 0xd2, + 0x8c, 0x92, 0x6f, 0x3f, 0xfb, 0xf5, 0x8f, 0xef, 0xce, 0x7c, 0x80, 0x6e, 0xa9, 0x36, 0x76, 0xb0, + 0x49, 0x8c, 0x72, 0xff, 0x23, 0x22, 0x24, 0x4d, 0xd5, 0x27, 0x91, 0xa0, 0xa7, 0xfc, 0x69, 0x41, + 0xd5, 0x27, 0x3d, 0x11, 0x4f, 0xd1, 0x8f, 0x12, 0xa4, 0xd9, 0xd8, 0x44, 0x95, 0x24, 0xb7, 0x07, + 0x07, 0x7e, 0x51, 0x3b, 0x02, 0x42, 0x8c, 0x63, 0xb9, 0xc6, 0x59, 0xbf, 0x8d, 0xaa, 0x47, 0x67, + 0x8d, 0x7e, 0x96, 0x20, 0x23, 0xe6, 0x1a, 0x4a, 0xf4, 0x3c, 0x30, 0x8e, 0x8b, 0xaf, 0xf5, 0x43, + 0xa2, 0xc7, 0xd4, 0xda, 0xfe, 0x97, 0x7c, 0x7f, 0x77, 0xaf, 0x54, 0x1c, 0x3a, 0x3b, 0xd3, 0x6c, + 0xc5, 0xa9, 0xbf, 0x27, 0x1f, 0x83, 0x7a, 0x4d, 0x2a, 0xa1, 0xae, 0x04, 0x19, 0xd1, 0x9f, 0x93, + 0xd9, 0x0f, 0x8c, 0xda, 0x51, 0xd8, 0x9b, 0x82, 0xfd, 0x90, 0x19, 0x10, 0xb1, 0x5f, 0xaa, 0x8e, + 0x99, 0x2e, 0x4c, 0xc9, 0x6f, 0x12, 0x64, 0x44, 0x67, 0x4e, 0x56, 0x32, 0x30, 0x3f, 0x46, 0x51, + 0xd2, 0xde, 0xdd, 0x2b, 0x5d, 0x1b, 0xda, 0xfd, 0x5f, 0x3d, 0x38, 0xf2, 0x3f, 0xb6, 0xbd, 0x60, + 0x5b, 0x54, 0x42, 0x69, 0xdc, 0x4a, 0xf8, 0x5b, 0x82, 0xf3, 0xbc, 0x8b, 0x46, 0x7d, 0x01, 0xd5, + 0x12, 0x6b, 0xf7, 0x3f, 0xe7, 0xc9, 0x28, 0x4a, 0xbf, 0x96, 0x76, 0xf7, 0x4a, 0xaf, 0x1f, 0xde, + 0xbc, 0xa3, 0xe8, 0x6d, 0xc8, 0x77, 0xc6, 0x8c, 0x9e, 0xd9, 0x2f, 0x8f, 0x45, 0xf3, 0x1f, 0x09, + 0x2e, 0x88, 0x96, 0x18, 0x93, 0x3d, 0x9f, 0x24, 0xfb, 0x90, 0x89, 0x30, 0x8a, 0xee, 0x67, 0x4c, + 0xf7, 0xe5, 0x84, 0x5e, 0x1c, 0x09, 0xbf, 0x2b, 0xaf, 0x8d, 0x29, 0xdc, 0x3f, 0xa0, 0xb0, 0x26, + 0x95, 0x16, 0xd7, 0x36, 0x57, 0x4d, 0x2b, 0xf8, 0xb2, 0xdd, 0x50, 0x9a, 0xae, 0xad, 0x0a, 0xce, + 0x65, 0xf1, 0x9f, 0xcc, 0x74, 0xcb, 0x26, 0x71, 0x78, 0x96, 0xa9, 0x87, 0xff, 0x59, 0x9b, 0x8f, + 0x56, 0x8d, 0x0c, 0x07, 0x5c, 0xff, 0x37, 0x00, 0x00, 0xff, 0xff, 0x5c, 0xec, 0x33, 0x47, 0xba, + 0x0e, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/backup.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/backup.pb.go new file mode 100644 index 000000000..d9a5d9ea6 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/backup.pb.go @@ -0,0 +1,128 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/redis/v1alpha/backup.proto + +package redis // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Description of a Redis backup. For more information, see +// the Managed Service for Redis [documentation](/docs/managed-redis/concepts/backup). +type Backup struct { + // ID of the backup. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the backup belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format + // (i.e. when the backup operation was completed). + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // ID of the Redis cluster that the backup was created for. + SourceClusterId string `protobuf:"bytes,4,opt,name=source_cluster_id,json=sourceClusterId,proto3" json:"source_cluster_id,omitempty"` + // Start timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format + // (i.e. when the backup operation was started). + StartedAt *timestamp.Timestamp `protobuf:"bytes,5,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup) Reset() { *m = Backup{} } +func (m *Backup) String() string { return proto.CompactTextString(m) } +func (*Backup) ProtoMessage() {} +func (*Backup) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_0460337759388c73, []int{0} +} +func (m *Backup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Backup.Unmarshal(m, b) +} +func (m *Backup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Backup.Marshal(b, m, deterministic) +} +func (dst *Backup) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup.Merge(dst, src) +} +func (m *Backup) XXX_Size() int { + return xxx_messageInfo_Backup.Size(m) +} +func (m *Backup) XXX_DiscardUnknown() { + xxx_messageInfo_Backup.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup proto.InternalMessageInfo + +func (m *Backup) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Backup) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *Backup) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Backup) GetSourceClusterId() string { + if m != nil { + return m.SourceClusterId + } + return "" +} + +func (m *Backup) GetStartedAt() *timestamp.Timestamp { + if m != nil { + return m.StartedAt + } + return nil +} + +func init() { + proto.RegisterType((*Backup)(nil), "yandex.cloud.mdb.redis.v1alpha.Backup") +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/redis/v1alpha/backup.proto", fileDescriptor_backup_0460337759388c73) +} + +var fileDescriptor_backup_0460337759388c73 = []byte{ + // 264 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0xc1, 0x4a, 0x33, 0x31, + 0x14, 0x85, 0x99, 0xf9, 0x7f, 0x8b, 0x13, 0x41, 0x71, 0x56, 0x43, 0x05, 0x2d, 0xae, 0x8a, 0xd2, + 0x04, 0x75, 0x25, 0xae, 0x5a, 0x37, 0xea, 0xb2, 0xb8, 0x72, 0x33, 0x24, 0xb9, 0xe9, 0x34, 0x98, + 0x34, 0x43, 0x72, 0x23, 0xfa, 0xa4, 0xbe, 0x8e, 0x90, 0xa4, 0x5b, 0x5d, 0xe6, 0xe4, 0xbb, 0xe7, + 0x83, 0x43, 0xae, 0xbf, 0xf8, 0x0e, 0xd4, 0x27, 0x93, 0xc6, 0x45, 0x60, 0x16, 0x04, 0xf3, 0x0a, + 0x74, 0x60, 0x1f, 0x37, 0xdc, 0x8c, 0x5b, 0xce, 0x04, 0x97, 0xef, 0x71, 0xa4, 0xa3, 0x77, 0xe8, + 0xda, 0xf3, 0x0c, 0xd3, 0x04, 0x53, 0x0b, 0x82, 0x26, 0x98, 0x16, 0x78, 0x7a, 0x31, 0x38, 0x37, + 0x18, 0xc5, 0x12, 0x2d, 0xe2, 0x86, 0xa1, 0xb6, 0x2a, 0x20, 0xb7, 0xa5, 0xe0, 0xf2, 0xbb, 0x22, + 0x93, 0x55, 0x6a, 0x6c, 0x8f, 0x49, 0xad, 0xa1, 0xab, 0x66, 0xd5, 0xbc, 0x59, 0xd7, 0x1a, 0xda, + 0x33, 0xd2, 0x6c, 0x9c, 0x01, 0xe5, 0x7b, 0x0d, 0x5d, 0x9d, 0xe2, 0xc3, 0x1c, 0x3c, 0x43, 0x7b, + 0x4f, 0x88, 0xf4, 0x8a, 0xa3, 0x82, 0x9e, 0x63, 0xf7, 0x6f, 0x56, 0xcd, 0x8f, 0x6e, 0xa7, 0x34, + 0xdb, 0xe8, 0xde, 0x46, 0x5f, 0xf7, 0xb6, 0x75, 0x53, 0xe8, 0x25, 0xb6, 0x57, 0xe4, 0x34, 0xb8, + 0xe8, 0xa5, 0xea, 0xa5, 0x89, 0x01, 0x73, 0xff, 0xff, 0xd4, 0x7f, 0x92, 0x3f, 0x1e, 0x73, 0x9e, + 0x35, 0x01, 0xb9, 0x2f, 0x9a, 0x83, 0xbf, 0x35, 0x85, 0x5e, 0xe2, 0xea, 0xe5, 0xed, 0x69, 0xd0, + 0xb8, 0x8d, 0x82, 0x4a, 0x67, 0x59, 0xde, 0x69, 0x91, 0x47, 0x1d, 0xdc, 0x62, 0x50, 0xbb, 0x74, + 0xce, 0x7e, 0x5f, 0xfb, 0x21, 0xbd, 0xc4, 0x24, 0xb1, 0x77, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0xe5, 0xf6, 0x76, 0x8c, 0x9c, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/backup_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/backup_service.pb.go new file mode 100644 index 000000000..8d882b11e --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/backup_service.pb.go @@ -0,0 +1,334 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/redis/v1alpha/backup_service.proto + +package redis // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetBackupRequest struct { + // ID of the Redis backup to return. + // To get the backup ID, use a [ClusterService.ListBackups] request. + BackupId string `protobuf:"bytes,1,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetBackupRequest) Reset() { *m = GetBackupRequest{} } +func (m *GetBackupRequest) String() string { return proto.CompactTextString(m) } +func (*GetBackupRequest) ProtoMessage() {} +func (*GetBackupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_service_fa97172451e1ee31, []int{0} +} +func (m *GetBackupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetBackupRequest.Unmarshal(m, b) +} +func (m *GetBackupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetBackupRequest.Marshal(b, m, deterministic) +} +func (dst *GetBackupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetBackupRequest.Merge(dst, src) +} +func (m *GetBackupRequest) XXX_Size() int { + return xxx_messageInfo_GetBackupRequest.Size(m) +} +func (m *GetBackupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetBackupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetBackupRequest proto.InternalMessageInfo + +func (m *GetBackupRequest) GetBackupId() string { + if m != nil { + return m.BackupId + } + return "" +} + +type ListBackupsRequest struct { + // ID of the folder to list backups in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListBackupsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListBackupsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListBackupsRequest) Reset() { *m = ListBackupsRequest{} } +func (m *ListBackupsRequest) String() string { return proto.CompactTextString(m) } +func (*ListBackupsRequest) ProtoMessage() {} +func (*ListBackupsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_service_fa97172451e1ee31, []int{1} +} +func (m *ListBackupsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListBackupsRequest.Unmarshal(m, b) +} +func (m *ListBackupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListBackupsRequest.Marshal(b, m, deterministic) +} +func (dst *ListBackupsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListBackupsRequest.Merge(dst, src) +} +func (m *ListBackupsRequest) XXX_Size() int { + return xxx_messageInfo_ListBackupsRequest.Size(m) +} +func (m *ListBackupsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListBackupsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListBackupsRequest proto.InternalMessageInfo + +func (m *ListBackupsRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListBackupsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListBackupsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListBackupsResponse struct { + // Requested list of backups. + Backups []*Backup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListBackupsRequest.page_size], use the [next_page_token] as the value + // for the [ListBackupsRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListBackupsResponse) Reset() { *m = ListBackupsResponse{} } +func (m *ListBackupsResponse) String() string { return proto.CompactTextString(m) } +func (*ListBackupsResponse) ProtoMessage() {} +func (*ListBackupsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_backup_service_fa97172451e1ee31, []int{2} +} +func (m *ListBackupsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListBackupsResponse.Unmarshal(m, b) +} +func (m *ListBackupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListBackupsResponse.Marshal(b, m, deterministic) +} +func (dst *ListBackupsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListBackupsResponse.Merge(dst, src) +} +func (m *ListBackupsResponse) XXX_Size() int { + return xxx_messageInfo_ListBackupsResponse.Size(m) +} +func (m *ListBackupsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListBackupsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListBackupsResponse proto.InternalMessageInfo + +func (m *ListBackupsResponse) GetBackups() []*Backup { + if m != nil { + return m.Backups + } + return nil +} + +func (m *ListBackupsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetBackupRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.GetBackupRequest") + proto.RegisterType((*ListBackupsRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.ListBackupsRequest") + proto.RegisterType((*ListBackupsResponse)(nil), "yandex.cloud.mdb.redis.v1alpha.ListBackupsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// BackupServiceClient is the client API for BackupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BackupServiceClient interface { + // Returns the specified Redis backup. + // + // To get the list of available Redis backups, make a [List] request. + Get(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error) + // Retrieves the list of Redis backups available for the specified folder. + List(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) +} + +type backupServiceClient struct { + cc *grpc.ClientConn +} + +func NewBackupServiceClient(cc *grpc.ClientConn) BackupServiceClient { + return &backupServiceClient{cc} +} + +func (c *backupServiceClient) Get(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error) { + out := new(Backup) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.BackupService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) List(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) { + out := new(ListBackupsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.BackupService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BackupServiceServer is the server API for BackupService service. +type BackupServiceServer interface { + // Returns the specified Redis backup. + // + // To get the list of available Redis backups, make a [List] request. + Get(context.Context, *GetBackupRequest) (*Backup, error) + // Retrieves the list of Redis backups available for the specified folder. + List(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error) +} + +func RegisterBackupServiceServer(s *grpc.Server, srv BackupServiceServer) { + s.RegisterService(&_BackupService_serviceDesc, srv) +} + +func _BackupService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBackupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.BackupService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).Get(ctx, req.(*GetBackupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBackupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.BackupService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).List(ctx, req.(*ListBackupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BackupService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.redis.v1alpha.BackupService", + HandlerType: (*BackupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _BackupService_Get_Handler, + }, + { + MethodName: "List", + Handler: _BackupService_List_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/redis/v1alpha/backup_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/redis/v1alpha/backup_service.proto", fileDescriptor_backup_service_fa97172451e1ee31) +} + +var fileDescriptor_backup_service_fa97172451e1ee31 = []byte{ + // 460 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0x41, 0x6f, 0xd3, 0x30, + 0x14, 0x80, 0x95, 0xb6, 0x8c, 0xc6, 0x30, 0x81, 0xcc, 0xa5, 0x8a, 0x06, 0x2a, 0x39, 0x94, 0x20, + 0x54, 0x3b, 0x69, 0xb5, 0x13, 0x9b, 0x84, 0x7a, 0x19, 0x43, 0x1c, 0x50, 0xc6, 0x89, 0x4b, 0xe5, + 0xd4, 0x8f, 0xcc, 0x5a, 0x6a, 0x87, 0xda, 0xad, 0xc6, 0x10, 0x42, 0xe2, 0xb8, 0x0b, 0x12, 0xfb, + 0x31, 0xfc, 0x84, 0xed, 0xce, 0x5f, 0xe0, 0xc0, 0x6f, 0xe0, 0x84, 0x62, 0xa7, 0xc0, 0x40, 0xdb, + 0xca, 0xd1, 0xef, 0xbd, 0xcf, 0xef, 0xd3, 0x7b, 0x0f, 0x0d, 0xdf, 0x32, 0xc9, 0xe1, 0x90, 0x4e, + 0x0a, 0x35, 0xe7, 0x74, 0xca, 0x33, 0x3a, 0x03, 0x2e, 0x34, 0x5d, 0x24, 0xac, 0x28, 0xf7, 0x19, + 0xcd, 0xd8, 0xe4, 0x60, 0x5e, 0x8e, 0x35, 0xcc, 0x16, 0x62, 0x02, 0xa4, 0x9c, 0x29, 0xa3, 0xf0, + 0x3d, 0x07, 0x11, 0x0b, 0x91, 0x29, 0xcf, 0x88, 0x85, 0x48, 0x0d, 0x05, 0x1b, 0xb9, 0x52, 0x79, + 0x01, 0x94, 0x95, 0x82, 0x32, 0x29, 0x95, 0x61, 0x46, 0x28, 0xa9, 0x1d, 0x1d, 0xdc, 0x3d, 0xd7, + 0x72, 0xc1, 0x0a, 0xc1, 0x6d, 0xbe, 0x4e, 0x3f, 0x5a, 0xc9, 0xc8, 0x15, 0x87, 0x9b, 0xe8, 0xf6, + 0x0e, 0x98, 0x91, 0x0d, 0xa5, 0xf0, 0x66, 0x0e, 0xda, 0xe0, 0xfb, 0xc8, 0xaf, 0xad, 0x05, 0xef, + 0x78, 0x5d, 0x2f, 0xf2, 0x47, 0xad, 0xef, 0xa7, 0x89, 0x97, 0xb6, 0x5d, 0x78, 0x97, 0x87, 0x9f, + 0x3d, 0x84, 0x9f, 0x0b, 0x5d, 0x83, 0x7a, 0x49, 0x3e, 0x44, 0xfe, 0x6b, 0x55, 0x70, 0x98, 0xfd, + 0x26, 0x6f, 0x56, 0xe4, 0xf1, 0x59, 0xd2, 0xda, 0xda, 0xde, 0x8c, 0xd3, 0xb6, 0x4b, 0xef, 0x72, + 0xfc, 0x00, 0xf9, 0x25, 0xcb, 0x61, 0xac, 0xc5, 0x11, 0x74, 0x1a, 0x5d, 0x2f, 0x6a, 0x8e, 0xd0, + 0x8f, 0xd3, 0x64, 0x2d, 0xee, 0x27, 0x71, 0x1c, 0xa7, 0xed, 0x2a, 0xb9, 0x27, 0x8e, 0x00, 0x47, + 0x08, 0xd9, 0x42, 0xa3, 0x0e, 0x40, 0x76, 0x9a, 0xf6, 0x53, 0xff, 0xf8, 0x2c, 0xb9, 0xb6, 0xb5, + 0x9d, 0xc4, 0x71, 0x6a, 0x7f, 0x79, 0x59, 0xe5, 0xc2, 0x0f, 0xe8, 0xce, 0x39, 0x27, 0x5d, 0x2a, + 0xa9, 0x01, 0x3f, 0x41, 0xd7, 0x9d, 0xb7, 0xee, 0x78, 0xdd, 0x66, 0x74, 0x63, 0xd0, 0x23, 0x97, + 0x8f, 0x9f, 0xd4, 0xe3, 0x58, 0x62, 0xb8, 0x87, 0x6e, 0x49, 0x38, 0x34, 0xe3, 0x3f, 0x3c, 0x2a, + 0x63, 0x3f, 0x5d, 0xaf, 0xc2, 0x2f, 0x96, 0x02, 0x83, 0x2f, 0x0d, 0xb4, 0xee, 0xd8, 0x3d, 0xb7, + 0x6e, 0xfc, 0xc9, 0x43, 0xcd, 0x1d, 0x30, 0x38, 0xbe, 0xaa, 0xe5, 0xdf, 0x4b, 0x08, 0x56, 0x94, + 0x0c, 0xc9, 0xc7, 0xaf, 0xdf, 0x4e, 0x1a, 0x11, 0xee, 0x5d, 0xb8, 0x69, 0x4d, 0xdf, 0xfd, 0x5a, + 0xe7, 0x7b, 0x7c, 0xe2, 0xa1, 0x56, 0x35, 0x25, 0x3c, 0xb8, 0xaa, 0xc1, 0xbf, 0xfb, 0x0d, 0x86, + 0xff, 0xc5, 0xb8, 0xf9, 0x87, 0xa1, 0x35, 0xdc, 0xc0, 0xc1, 0xc5, 0x86, 0xa3, 0x67, 0xaf, 0x9e, + 0xe6, 0xc2, 0xec, 0xcf, 0x33, 0x32, 0x51, 0x53, 0xea, 0x9a, 0xf4, 0xdd, 0x01, 0xe7, 0xaa, 0x9f, + 0x83, 0xb4, 0xd7, 0x4a, 0x2f, 0xbf, 0xec, 0xc7, 0xf6, 0x95, 0xad, 0xd9, 0xda, 0xe1, 0xcf, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x9d, 0x9c, 0x2e, 0x16, 0x9a, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/cluster.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/cluster.pb.go new file mode 100644 index 000000000..a65a28e87 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/cluster.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/redis/v1alpha/cluster.proto + +package redis // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import config "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/config" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Cluster_Environment int32 + +const ( + Cluster_ENVIRONMENT_UNSPECIFIED Cluster_Environment = 0 + // Stable environment with a conservative update policy: + // only hotfixes are applied during regular maintenance. + Cluster_PRODUCTION Cluster_Environment = 1 + // Environment with more aggressive update policy: new versions + // are rolled out irrespective of backward compatibility. + Cluster_PRESTABLE Cluster_Environment = 2 +) + +var Cluster_Environment_name = map[int32]string{ + 0: "ENVIRONMENT_UNSPECIFIED", + 1: "PRODUCTION", + 2: "PRESTABLE", +} +var Cluster_Environment_value = map[string]int32{ + "ENVIRONMENT_UNSPECIFIED": 0, + "PRODUCTION": 1, + "PRESTABLE": 2, +} + +func (x Cluster_Environment) String() string { + return proto.EnumName(Cluster_Environment_name, int32(x)) +} +func (Cluster_Environment) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_4cd4fa078a636ae8, []int{0, 0} +} + +type Cluster_Health int32 + +const ( + // Cluster is in unknown state (we have no data) + Cluster_HEALTH_UNKNOWN Cluster_Health = 0 + // Cluster is alive and well (all hosts are alive) + Cluster_ALIVE Cluster_Health = 1 + // Cluster is inoperable (it cannot perform any of its essential functions) + Cluster_DEAD Cluster_Health = 2 + // Cluster is partially alive (it can perform some of its essential functions) + Cluster_DEGRADED Cluster_Health = 3 +) + +var Cluster_Health_name = map[int32]string{ + 0: "HEALTH_UNKNOWN", + 1: "ALIVE", + 2: "DEAD", + 3: "DEGRADED", +} +var Cluster_Health_value = map[string]int32{ + "HEALTH_UNKNOWN": 0, + "ALIVE": 1, + "DEAD": 2, + "DEGRADED": 3, +} + +func (x Cluster_Health) String() string { + return proto.EnumName(Cluster_Health_name, int32(x)) +} +func (Cluster_Health) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_4cd4fa078a636ae8, []int{0, 1} +} + +type Cluster_Status int32 + +const ( + // Cluster status is unknown + Cluster_STATUS_UNKNOWN Cluster_Status = 0 + // Cluster is being created + Cluster_CREATING Cluster_Status = 1 + // Cluster is running + Cluster_RUNNING Cluster_Status = 2 + // Cluster failed + Cluster_ERROR Cluster_Status = 3 + // Cluster is being updated. + Cluster_UPDATING Cluster_Status = 4 + // Cluster is stopping. + Cluster_STOPPING Cluster_Status = 5 + // Cluster stopped. + Cluster_STOPPED Cluster_Status = 6 + // Cluster is starting. + Cluster_STARTING Cluster_Status = 7 +) + +var Cluster_Status_name = map[int32]string{ + 0: "STATUS_UNKNOWN", + 1: "CREATING", + 2: "RUNNING", + 3: "ERROR", + 4: "UPDATING", + 5: "STOPPING", + 6: "STOPPED", + 7: "STARTING", +} +var Cluster_Status_value = map[string]int32{ + "STATUS_UNKNOWN": 0, + "CREATING": 1, + "RUNNING": 2, + "ERROR": 3, + "UPDATING": 4, + "STOPPING": 5, + "STOPPED": 6, + "STARTING": 7, +} + +func (x Cluster_Status) String() string { + return proto.EnumName(Cluster_Status_name, int32(x)) +} +func (Cluster_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_4cd4fa078a636ae8, []int{0, 2} +} + +type Host_Role int32 + +const ( + // Role of the host in the cluster is unknown. + Host_ROLE_UNKNOWN Host_Role = 0 + // Host is the master Redis server in the cluster. + Host_MASTER Host_Role = 1 + // Host is a replica (standby) Redis server in the cluster. + Host_REPLICA Host_Role = 2 +) + +var Host_Role_name = map[int32]string{ + 0: "ROLE_UNKNOWN", + 1: "MASTER", + 2: "REPLICA", +} +var Host_Role_value = map[string]int32{ + "ROLE_UNKNOWN": 0, + "MASTER": 1, + "REPLICA": 2, +} + +func (x Host_Role) String() string { + return proto.EnumName(Host_Role_name, int32(x)) +} +func (Host_Role) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_4cd4fa078a636ae8, []int{3, 0} +} + +type Host_Health int32 + +const ( + // Health of the host is unknown. + Host_HEALTH_UNKNOWN Host_Health = 0 + // The host is performing all its functions normally. + Host_ALIVE Host_Health = 1 + // The host is inoperable, and cannot perform any of its essential functions. + Host_DEAD Host_Health = 2 + // The host is degraded, and can perform only some of its essential functions. + Host_DEGRADED Host_Health = 3 +) + +var Host_Health_name = map[int32]string{ + 0: "HEALTH_UNKNOWN", + 1: "ALIVE", + 2: "DEAD", + 3: "DEGRADED", +} +var Host_Health_value = map[string]int32{ + "HEALTH_UNKNOWN": 0, + "ALIVE": 1, + "DEAD": 2, + "DEGRADED": 3, +} + +func (x Host_Health) String() string { + return proto.EnumName(Host_Health_name, int32(x)) +} +func (Host_Health) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_4cd4fa078a636ae8, []int{3, 1} +} + +type Service_Type int32 + +const ( + Service_TYPE_UNSPECIFIED Service_Type = 0 + // The host is a Redis server. + Service_REDIS Service_Type = 1 + // The host provides a Sentinel service. + Service_SENTINEL Service_Type = 2 +) + +var Service_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "REDIS", + 2: "SENTINEL", +} +var Service_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "REDIS": 1, + "SENTINEL": 2, +} + +func (x Service_Type) String() string { + return proto.EnumName(Service_Type_name, int32(x)) +} +func (Service_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_4cd4fa078a636ae8, []int{4, 0} +} + +type Service_Health int32 + +const ( + // Health of the server is unknown. + Service_HEALTH_UNKNOWN Service_Health = 0 + // The server is working normally. + Service_ALIVE Service_Health = 1 + // The server is dead or unresponsive. + Service_DEAD Service_Health = 2 +) + +var Service_Health_name = map[int32]string{ + 0: "HEALTH_UNKNOWN", + 1: "ALIVE", + 2: "DEAD", +} +var Service_Health_value = map[string]int32{ + "HEALTH_UNKNOWN": 0, + "ALIVE": 1, + "DEAD": 2, +} + +func (x Service_Health) String() string { + return proto.EnumName(Service_Health_name, int32(x)) +} +func (Service_Health) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_4cd4fa078a636ae8, []int{4, 1} +} + +// Description of a Redis cluster. For more information, see +// the Managed Service for Redis [documentation](/docs/managed-redis/concepts/). +type Cluster struct { + // ID of the Redis cluster. + // This ID is assigned by MDB at creation time. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the Redis cluster belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Name of the Redis cluster. + // The name is unique within the folder. 3-63 characters long. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Description of the Redis cluster. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the Redis cluster as `key:value` pairs. + // Maximum 64 per cluster. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Deployment environment of the Redis cluster. + Environment Cluster_Environment `protobuf:"varint,7,opt,name=environment,proto3,enum=yandex.cloud.mdb.redis.v1alpha.Cluster_Environment" json:"environment,omitempty"` + // Description of monitoring systems relevant to the Redis cluster. + Monitoring []*Monitoring `protobuf:"bytes,8,rep,name=monitoring,proto3" json:"monitoring,omitempty"` + // Configuration of the Redis cluster. + Config *ClusterConfig `protobuf:"bytes,9,opt,name=config,proto3" json:"config,omitempty"` + NetworkId string `protobuf:"bytes,10,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // Aggregated cluster health + Health Cluster_Health `protobuf:"varint,11,opt,name=health,proto3,enum=yandex.cloud.mdb.redis.v1alpha.Cluster_Health" json:"health,omitempty"` + // Cluster status + Status Cluster_Status `protobuf:"varint,12,opt,name=status,proto3,enum=yandex.cloud.mdb.redis.v1alpha.Cluster_Status" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_4cd4fa078a636ae8, []int{0} +} +func (m *Cluster) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cluster.Unmarshal(m, b) +} +func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) +} +func (dst *Cluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cluster.Merge(dst, src) +} +func (m *Cluster) XXX_Size() int { + return xxx_messageInfo_Cluster.Size(m) +} +func (m *Cluster) XXX_DiscardUnknown() { + xxx_messageInfo_Cluster.DiscardUnknown(m) +} + +var xxx_messageInfo_Cluster proto.InternalMessageInfo + +func (m *Cluster) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Cluster) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *Cluster) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Cluster) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Cluster) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Cluster) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Cluster) GetEnvironment() Cluster_Environment { + if m != nil { + return m.Environment + } + return Cluster_ENVIRONMENT_UNSPECIFIED +} + +func (m *Cluster) GetMonitoring() []*Monitoring { + if m != nil { + return m.Monitoring + } + return nil +} + +func (m *Cluster) GetConfig() *ClusterConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *Cluster) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +func (m *Cluster) GetHealth() Cluster_Health { + if m != nil { + return m.Health + } + return Cluster_HEALTH_UNKNOWN +} + +func (m *Cluster) GetStatus() Cluster_Status { + if m != nil { + return m.Status + } + return Cluster_STATUS_UNKNOWN +} + +type Monitoring struct { + // Name of the monitoring system. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Description of the monitoring system. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // Link to the monitoring system charts for the Redis cluster. + Link string `protobuf:"bytes,3,opt,name=link,proto3" json:"link,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Monitoring) Reset() { *m = Monitoring{} } +func (m *Monitoring) String() string { return proto.CompactTextString(m) } +func (*Monitoring) ProtoMessage() {} +func (*Monitoring) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_4cd4fa078a636ae8, []int{1} +} +func (m *Monitoring) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Monitoring.Unmarshal(m, b) +} +func (m *Monitoring) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Monitoring.Marshal(b, m, deterministic) +} +func (dst *Monitoring) XXX_Merge(src proto.Message) { + xxx_messageInfo_Monitoring.Merge(dst, src) +} +func (m *Monitoring) XXX_Size() int { + return xxx_messageInfo_Monitoring.Size(m) +} +func (m *Monitoring) XXX_DiscardUnknown() { + xxx_messageInfo_Monitoring.DiscardUnknown(m) +} + +var xxx_messageInfo_Monitoring proto.InternalMessageInfo + +func (m *Monitoring) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Monitoring) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Monitoring) GetLink() string { + if m != nil { + return m.Link + } + return "" +} + +type ClusterConfig struct { + // Version of Redis server software. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Configuration for Redis servers in the cluster. + // + // Types that are valid to be assigned to RedisConfig: + // *ClusterConfig_RedisConfig_5_0 + RedisConfig isClusterConfig_RedisConfig `protobuf_oneof:"redis_config"` + // Resources allocated to Redis hosts. + Resources *Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterConfig) Reset() { *m = ClusterConfig{} } +func (m *ClusterConfig) String() string { return proto.CompactTextString(m) } +func (*ClusterConfig) ProtoMessage() {} +func (*ClusterConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_4cd4fa078a636ae8, []int{2} +} +func (m *ClusterConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterConfig.Unmarshal(m, b) +} +func (m *ClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterConfig.Marshal(b, m, deterministic) +} +func (dst *ClusterConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterConfig.Merge(dst, src) +} +func (m *ClusterConfig) XXX_Size() int { + return xxx_messageInfo_ClusterConfig.Size(m) +} +func (m *ClusterConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterConfig proto.InternalMessageInfo + +func (m *ClusterConfig) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type isClusterConfig_RedisConfig interface { + isClusterConfig_RedisConfig() +} + +type ClusterConfig_RedisConfig_5_0 struct { + RedisConfig_5_0 *config.RedisConfigSet5_0 `protobuf:"bytes,2,opt,name=redis_config_5_0,json=redisConfig50,proto3,oneof"` +} + +func (*ClusterConfig_RedisConfig_5_0) isClusterConfig_RedisConfig() {} + +func (m *ClusterConfig) GetRedisConfig() isClusterConfig_RedisConfig { + if m != nil { + return m.RedisConfig + } + return nil +} + +func (m *ClusterConfig) GetRedisConfig_5_0() *config.RedisConfigSet5_0 { + if x, ok := m.GetRedisConfig().(*ClusterConfig_RedisConfig_5_0); ok { + return x.RedisConfig_5_0 + } + return nil +} + +func (m *ClusterConfig) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ClusterConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ClusterConfig_OneofMarshaler, _ClusterConfig_OneofUnmarshaler, _ClusterConfig_OneofSizer, []interface{}{ + (*ClusterConfig_RedisConfig_5_0)(nil), + } +} + +func _ClusterConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ClusterConfig) + // redis_config + switch x := m.RedisConfig.(type) { + case *ClusterConfig_RedisConfig_5_0: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RedisConfig_5_0); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ClusterConfig.RedisConfig has unexpected type %T", x) + } + return nil +} + +func _ClusterConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ClusterConfig) + switch tag { + case 2: // redis_config.redis_config_5_0 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(config.RedisConfigSet5_0) + err := b.DecodeMessage(msg) + m.RedisConfig = &ClusterConfig_RedisConfig_5_0{msg} + return true, err + default: + return false, nil + } +} + +func _ClusterConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ClusterConfig) + // redis_config + switch x := m.RedisConfig.(type) { + case *ClusterConfig_RedisConfig_5_0: + s := proto.Size(x.RedisConfig_5_0) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Host struct { + // Name of the Redis host. The host name is assigned by MDB at creation time, and cannot be changed. + // 1-63 characters long. + // + // The name is unique across all existing MDB hosts in Yandex.Cloud, as it defines the FQDN of the host. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // ID of the Redis host. The ID is assigned by MDB at creation time. + ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // ID of the availability zone where the Redis host resides. + ZoneId string `protobuf:"bytes,3,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + // ID of the subnet that the host belongs to. + SubnetId string `protobuf:"bytes,4,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + // Resources allocated to the Redis host. + Resources *Resources `protobuf:"bytes,5,opt,name=resources,proto3" json:"resources,omitempty"` + // Role of the host in the cluster. + Role Host_Role `protobuf:"varint,6,opt,name=role,proto3,enum=yandex.cloud.mdb.redis.v1alpha.Host_Role" json:"role,omitempty"` + // Status code of the aggregated health of the host. + Health Host_Health `protobuf:"varint,7,opt,name=health,proto3,enum=yandex.cloud.mdb.redis.v1alpha.Host_Health" json:"health,omitempty"` + // Services provided by the host. + Services []*Service `protobuf:"bytes,8,rep,name=services,proto3" json:"services,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Host) Reset() { *m = Host{} } +func (m *Host) String() string { return proto.CompactTextString(m) } +func (*Host) ProtoMessage() {} +func (*Host) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_4cd4fa078a636ae8, []int{3} +} +func (m *Host) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Host.Unmarshal(m, b) +} +func (m *Host) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Host.Marshal(b, m, deterministic) +} +func (dst *Host) XXX_Merge(src proto.Message) { + xxx_messageInfo_Host.Merge(dst, src) +} +func (m *Host) XXX_Size() int { + return xxx_messageInfo_Host.Size(m) +} +func (m *Host) XXX_DiscardUnknown() { + xxx_messageInfo_Host.DiscardUnknown(m) +} + +var xxx_messageInfo_Host proto.InternalMessageInfo + +func (m *Host) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Host) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *Host) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func (m *Host) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +func (m *Host) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *Host) GetRole() Host_Role { + if m != nil { + return m.Role + } + return Host_ROLE_UNKNOWN +} + +func (m *Host) GetHealth() Host_Health { + if m != nil { + return m.Health + } + return Host_HEALTH_UNKNOWN +} + +func (m *Host) GetServices() []*Service { + if m != nil { + return m.Services + } + return nil +} + +type Service struct { + // Type of the service provided by the host. + Type Service_Type `protobuf:"varint,1,opt,name=type,proto3,enum=yandex.cloud.mdb.redis.v1alpha.Service_Type" json:"type,omitempty"` + // Status code of server availability. + Health Service_Health `protobuf:"varint,2,opt,name=health,proto3,enum=yandex.cloud.mdb.redis.v1alpha.Service_Health" json:"health,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Service) Reset() { *m = Service{} } +func (m *Service) String() string { return proto.CompactTextString(m) } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_4cd4fa078a636ae8, []int{4} +} +func (m *Service) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Service.Unmarshal(m, b) +} +func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Service.Marshal(b, m, deterministic) +} +func (dst *Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service.Merge(dst, src) +} +func (m *Service) XXX_Size() int { + return xxx_messageInfo_Service.Size(m) +} +func (m *Service) XXX_DiscardUnknown() { + xxx_messageInfo_Service.DiscardUnknown(m) +} + +var xxx_messageInfo_Service proto.InternalMessageInfo + +func (m *Service) GetType() Service_Type { + if m != nil { + return m.Type + } + return Service_TYPE_UNSPECIFIED +} + +func (m *Service) GetHealth() Service_Health { + if m != nil { + return m.Health + } + return Service_HEALTH_UNKNOWN +} + +type Resources struct { + // ID of the preset for computational resources available to a host (CPU, memory etc.). + // All available presets are listed in the [documentation](/docs/managed-redis/concepts/instance-types). + ResourcePresetId string `protobuf:"bytes,1,opt,name=resource_preset_id,json=resourcePresetId,proto3" json:"resource_preset_id,omitempty"` + // Volume of the storage available to a host, in bytes. + DiskSize int64 `protobuf:"varint,2,opt,name=disk_size,json=diskSize,proto3" json:"disk_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resources) Reset() { *m = Resources{} } +func (m *Resources) String() string { return proto.CompactTextString(m) } +func (*Resources) ProtoMessage() {} +func (*Resources) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_4cd4fa078a636ae8, []int{5} +} +func (m *Resources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resources.Unmarshal(m, b) +} +func (m *Resources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resources.Marshal(b, m, deterministic) +} +func (dst *Resources) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resources.Merge(dst, src) +} +func (m *Resources) XXX_Size() int { + return xxx_messageInfo_Resources.Size(m) +} +func (m *Resources) XXX_DiscardUnknown() { + xxx_messageInfo_Resources.DiscardUnknown(m) +} + +var xxx_messageInfo_Resources proto.InternalMessageInfo + +func (m *Resources) GetResourcePresetId() string { + if m != nil { + return m.ResourcePresetId + } + return "" +} + +func (m *Resources) GetDiskSize() int64 { + if m != nil { + return m.DiskSize + } + return 0 +} + +func init() { + proto.RegisterType((*Cluster)(nil), "yandex.cloud.mdb.redis.v1alpha.Cluster") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.redis.v1alpha.Cluster.LabelsEntry") + proto.RegisterType((*Monitoring)(nil), "yandex.cloud.mdb.redis.v1alpha.Monitoring") + proto.RegisterType((*ClusterConfig)(nil), "yandex.cloud.mdb.redis.v1alpha.ClusterConfig") + proto.RegisterType((*Host)(nil), "yandex.cloud.mdb.redis.v1alpha.Host") + proto.RegisterType((*Service)(nil), "yandex.cloud.mdb.redis.v1alpha.Service") + proto.RegisterType((*Resources)(nil), "yandex.cloud.mdb.redis.v1alpha.Resources") + proto.RegisterEnum("yandex.cloud.mdb.redis.v1alpha.Cluster_Environment", Cluster_Environment_name, Cluster_Environment_value) + proto.RegisterEnum("yandex.cloud.mdb.redis.v1alpha.Cluster_Health", Cluster_Health_name, Cluster_Health_value) + proto.RegisterEnum("yandex.cloud.mdb.redis.v1alpha.Cluster_Status", Cluster_Status_name, Cluster_Status_value) + proto.RegisterEnum("yandex.cloud.mdb.redis.v1alpha.Host_Role", Host_Role_name, Host_Role_value) + proto.RegisterEnum("yandex.cloud.mdb.redis.v1alpha.Host_Health", Host_Health_name, Host_Health_value) + proto.RegisterEnum("yandex.cloud.mdb.redis.v1alpha.Service_Type", Service_Type_name, Service_Type_value) + proto.RegisterEnum("yandex.cloud.mdb.redis.v1alpha.Service_Health", Service_Health_name, Service_Health_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/redis/v1alpha/cluster.proto", fileDescriptor_cluster_4cd4fa078a636ae8) +} + +var fileDescriptor_cluster_4cd4fa078a636ae8 = []byte{ + // 1014 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x6e, 0xdb, 0x46, + 0x13, 0x35, 0xf5, 0x43, 0x89, 0x23, 0x5b, 0x20, 0x16, 0x01, 0x42, 0x38, 0xc8, 0xf7, 0x19, 0xbc, + 0xa9, 0xdb, 0xda, 0x94, 0xed, 0xd4, 0x40, 0xd2, 0xa2, 0x68, 0x69, 0x69, 0x63, 0x31, 0x91, 0x29, + 0x61, 0x49, 0xb9, 0x68, 0x6f, 0x08, 0x4a, 0x5c, 0xcb, 0x84, 0x29, 0x52, 0x20, 0x29, 0xb7, 0xf2, + 0x4b, 0xf6, 0x31, 0xfa, 0x04, 0xbd, 0x2c, 0x50, 0xec, 0x2e, 0x65, 0xc9, 0x69, 0x1b, 0x29, 0x45, + 0xef, 0x38, 0xb3, 0x73, 0x0e, 0x67, 0x66, 0x67, 0x0e, 0x16, 0x8e, 0x16, 0x7e, 0x1c, 0xd0, 0x5f, + 0x5a, 0xe3, 0x28, 0x99, 0x07, 0xad, 0x69, 0x30, 0x6a, 0xa5, 0x34, 0x08, 0xb3, 0xd6, 0xfd, 0xa9, + 0x1f, 0xcd, 0x6e, 0xfd, 0xd6, 0x38, 0x9a, 0x67, 0x39, 0x4d, 0x8d, 0x59, 0x9a, 0xe4, 0x09, 0xfa, + 0x9f, 0x88, 0x36, 0x78, 0xb4, 0x31, 0x0d, 0x46, 0x06, 0x8f, 0x36, 0x8a, 0xe8, 0xfd, 0xff, 0x4f, + 0x92, 0x64, 0x12, 0xd1, 0x16, 0x8f, 0x1e, 0xcd, 0x6f, 0x5a, 0x79, 0x38, 0xa5, 0x59, 0xee, 0x4f, + 0x67, 0x82, 0x60, 0xff, 0xab, 0x4d, 0xbf, 0x4b, 0xe2, 0x9b, 0x70, 0x22, 0x9c, 0xe7, 0xde, 0x89, + 0x40, 0xe9, 0xbf, 0xd6, 0xa0, 0xd6, 0x16, 0x89, 0xa0, 0x26, 0x94, 0xc2, 0x40, 0x93, 0x0e, 0xa4, + 0x43, 0x85, 0x94, 0xc2, 0x00, 0xbd, 0x00, 0xe5, 0x26, 0x89, 0x02, 0x9a, 0x7a, 0x61, 0xa0, 0x95, + 0xb8, 0xbb, 0x2e, 0x1c, 0x56, 0x80, 0xde, 0x00, 0x8c, 0x53, 0xea, 0xe7, 0x34, 0xf0, 0xfc, 0x5c, + 0x2b, 0x1f, 0x48, 0x87, 0x8d, 0xb3, 0x7d, 0x43, 0x24, 0x69, 0x2c, 0x93, 0x34, 0xdc, 0x65, 0x92, + 0x44, 0x29, 0xa2, 0xcd, 0x1c, 0x21, 0xa8, 0xc4, 0xfe, 0x94, 0x6a, 0x15, 0x4e, 0xc9, 0xbf, 0xd1, + 0x01, 0x34, 0x02, 0x9a, 0x8d, 0xd3, 0x70, 0x96, 0x87, 0x49, 0xac, 0x55, 0xf9, 0xd1, 0xba, 0x0b, + 0xbd, 0x07, 0x39, 0xf2, 0x47, 0x34, 0xca, 0x34, 0xf9, 0xa0, 0x7c, 0xd8, 0x38, 0x7b, 0x65, 0x7c, + 0xbc, 0x63, 0x46, 0x51, 0x96, 0xd1, 0xe3, 0x28, 0x1c, 0xe7, 0xe9, 0x82, 0x14, 0x14, 0x68, 0x08, + 0x0d, 0x1a, 0xdf, 0x87, 0x69, 0x12, 0x4f, 0x69, 0x9c, 0x6b, 0xb5, 0x03, 0xe9, 0xb0, 0xb9, 0x3d, + 0x23, 0x5e, 0x41, 0xc9, 0x3a, 0x0f, 0x7a, 0x07, 0x30, 0x4d, 0xe2, 0x30, 0x4f, 0xd2, 0x30, 0x9e, + 0x68, 0x75, 0x9e, 0xe7, 0x17, 0x9b, 0x58, 0xaf, 0x1e, 0x11, 0x64, 0x0d, 0x8d, 0x30, 0xc8, 0xe2, + 0xca, 0x34, 0x85, 0x37, 0xf7, 0x78, 0xcb, 0xec, 0xda, 0x1c, 0x44, 0x0a, 0x30, 0x7a, 0x09, 0x10, + 0xd3, 0xfc, 0xe7, 0x24, 0xbd, 0x63, 0xb7, 0x08, 0xbc, 0xaf, 0x4a, 0xe1, 0xb1, 0x02, 0xf4, 0x16, + 0xe4, 0x5b, 0xea, 0x47, 0xf9, 0xad, 0xd6, 0xe0, 0x3d, 0x30, 0xb6, 0xed, 0x41, 0x97, 0xa3, 0x48, + 0x81, 0x66, 0x3c, 0x59, 0xee, 0xe7, 0xf3, 0x4c, 0xdb, 0xfd, 0x34, 0x1e, 0x87, 0xa3, 0x48, 0x81, + 0xde, 0x7f, 0x03, 0x8d, 0xb5, 0xfb, 0x42, 0x2a, 0x94, 0xef, 0xe8, 0xa2, 0x98, 0x49, 0xf6, 0x89, + 0x9e, 0x41, 0xf5, 0xde, 0x8f, 0xe6, 0xb4, 0x18, 0x48, 0x61, 0x7c, 0x5d, 0x7a, 0x2d, 0xe9, 0x16, + 0x34, 0xd6, 0x2e, 0x06, 0xbd, 0x80, 0xe7, 0xd8, 0xbe, 0xb6, 0x48, 0xdf, 0xbe, 0xc2, 0xb6, 0xeb, + 0x0d, 0x6d, 0x67, 0x80, 0xdb, 0xd6, 0x5b, 0x0b, 0x77, 0xd4, 0x1d, 0xd4, 0x04, 0x18, 0x90, 0x7e, + 0x67, 0xd8, 0x76, 0xad, 0xbe, 0xad, 0x4a, 0x68, 0x0f, 0x94, 0x01, 0xc1, 0x8e, 0x6b, 0x5e, 0xf4, + 0xb0, 0x5a, 0xd2, 0xbf, 0x03, 0x59, 0xd4, 0x87, 0x10, 0x34, 0xbb, 0xd8, 0xec, 0xb9, 0x5d, 0x6f, + 0x68, 0xbf, 0xb7, 0xfb, 0x3f, 0xd8, 0xea, 0x0e, 0x52, 0xa0, 0x6a, 0xf6, 0xac, 0x6b, 0xac, 0x4a, + 0xa8, 0x0e, 0x95, 0x0e, 0x36, 0x3b, 0x6a, 0x09, 0xed, 0x42, 0xbd, 0x83, 0x2f, 0x89, 0xd9, 0xc1, + 0x1d, 0xb5, 0xac, 0x2f, 0x40, 0x16, 0x85, 0x31, 0x02, 0xc7, 0x35, 0xdd, 0xa1, 0xb3, 0x46, 0xb0, + 0x0b, 0xf5, 0x36, 0xc1, 0xa6, 0x6b, 0xd9, 0x97, 0xaa, 0x84, 0x1a, 0x50, 0x23, 0x43, 0xdb, 0x66, + 0x46, 0x89, 0x71, 0x63, 0x42, 0xfa, 0x44, 0x2d, 0xb3, 0xa8, 0xe1, 0xa0, 0x23, 0xa2, 0x2a, 0xcc, + 0x72, 0xdc, 0xfe, 0x60, 0xc0, 0xac, 0x2a, 0xc3, 0x70, 0x0b, 0x77, 0x54, 0x59, 0x1c, 0x99, 0x84, + 0x07, 0xd6, 0xf4, 0x6b, 0x80, 0xd5, 0x44, 0x3d, 0xee, 0x9a, 0xf4, 0xcf, 0xbb, 0x56, 0xfa, 0xeb, + 0xae, 0x21, 0xa8, 0x44, 0x61, 0x7c, 0xc7, 0xd7, 0x5a, 0x21, 0xfc, 0x5b, 0xff, 0x4d, 0x82, 0xbd, + 0x27, 0x23, 0x86, 0x34, 0xa8, 0xdd, 0xd3, 0x34, 0x63, 0x1c, 0x82, 0x7e, 0x69, 0xa2, 0x31, 0xa8, + 0xfc, 0xb6, 0x3d, 0x31, 0x84, 0xde, 0xb9, 0x77, 0xc2, 0x7f, 0xd3, 0x38, 0x7b, 0xbd, 0x69, 0x2e, + 0x04, 0xc2, 0x20, 0xcc, 0x29, 0xfe, 0xe3, 0xd0, 0xfc, 0xdc, 0x3b, 0xe9, 0xee, 0x90, 0xbd, 0x74, + 0xe5, 0x3c, 0x3f, 0x41, 0x97, 0xa0, 0xa4, 0x34, 0x4b, 0xe6, 0xe9, 0x98, 0x66, 0x85, 0x00, 0x7d, + 0xbe, 0x89, 0x9d, 0x2c, 0x01, 0x64, 0x85, 0xbd, 0x68, 0xc2, 0xee, 0x7a, 0xb6, 0xfa, 0xef, 0x65, + 0xa8, 0x74, 0x93, 0x2c, 0xff, 0xdb, 0xe6, 0xbd, 0x04, 0x28, 0x84, 0x7b, 0xa5, 0x8a, 0x4a, 0xe1, + 0xb1, 0x02, 0xf4, 0x1c, 0x6a, 0x0f, 0x49, 0x4c, 0xd9, 0x99, 0x68, 0x9e, 0xcc, 0x4c, 0x8b, 0x8b, + 0x69, 0x36, 0x1f, 0xc5, 0x34, 0x67, 0x47, 0x42, 0xf9, 0xea, 0xc2, 0x61, 0x05, 0x4f, 0x4b, 0xa9, + 0xfe, 0xfb, 0x52, 0xd0, 0xb7, 0x50, 0x49, 0x93, 0x88, 0x6a, 0x32, 0x5f, 0xc2, 0x8d, 0x1c, 0xac, + 0x4a, 0x83, 0x24, 0x11, 0x25, 0x1c, 0x86, 0xda, 0x8f, 0x6a, 0x20, 0x14, 0xf1, 0xcb, 0xad, 0x08, + 0x3e, 0x90, 0x82, 0x36, 0xd4, 0x33, 0x9a, 0xde, 0x87, 0xac, 0x16, 0x21, 0x81, 0x9f, 0x6d, 0xa2, + 0x71, 0x44, 0x3c, 0x79, 0x04, 0xea, 0xa7, 0x50, 0x61, 0x79, 0x21, 0x15, 0x76, 0x49, 0xbf, 0x87, + 0xd7, 0x96, 0x07, 0x40, 0xbe, 0x32, 0x1d, 0x17, 0x93, 0x62, 0x75, 0xf0, 0xa0, 0x67, 0xb5, 0xcd, + 0xff, 0x62, 0x69, 0xff, 0x90, 0xa0, 0x56, 0x64, 0x82, 0xbe, 0x87, 0x4a, 0xbe, 0x98, 0x89, 0xab, + 0x6f, 0x9e, 0x1d, 0x6d, 0x59, 0x80, 0xe1, 0x2e, 0x66, 0x94, 0x70, 0xe4, 0x9a, 0xb2, 0x96, 0xb6, + 0x53, 0xc4, 0x25, 0xc7, 0xd3, 0x76, 0xea, 0xe7, 0x50, 0x61, 0xac, 0xe8, 0x19, 0xa8, 0xee, 0x8f, + 0x03, 0xfc, 0x81, 0x90, 0x29, 0x50, 0x25, 0xb8, 0x63, 0x39, 0xaa, 0xc4, 0x65, 0x00, 0xdb, 0xae, + 0x65, 0xe3, 0x9e, 0x5a, 0xd2, 0x4f, 0x3f, 0xb9, 0x1b, 0xfa, 0x35, 0x28, 0x8f, 0x43, 0x85, 0x8e, + 0x00, 0x2d, 0xc7, 0xca, 0x9b, 0xa5, 0x34, 0x13, 0x83, 0x2b, 0x36, 0x41, 0x5d, 0x9e, 0x0c, 0xf8, + 0x81, 0x98, 0xee, 0x20, 0xcc, 0xee, 0xbc, 0x2c, 0x7c, 0x10, 0xca, 0x5c, 0x26, 0x75, 0xe6, 0x70, + 0xc2, 0x07, 0x7a, 0xf1, 0xee, 0xa7, 0xee, 0x24, 0xcc, 0x6f, 0xe7, 0x23, 0x63, 0x9c, 0x4c, 0x5b, + 0xa2, 0x0b, 0xc7, 0xe2, 0x99, 0x32, 0x49, 0x8e, 0x27, 0x34, 0xe6, 0xcf, 0x85, 0xd6, 0xc7, 0xdf, + 0x2f, 0xdf, 0x70, 0x6b, 0x24, 0xf3, 0xd8, 0x57, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x5e, + 0x5c, 0x3f, 0x5d, 0x09, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/cluster_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/cluster_service.pb.go new file mode 100644 index 000000000..f28bba3e5 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/cluster_service.pb.go @@ -0,0 +1,2615 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/redis/v1alpha/cluster_service.proto + +package redis // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import config "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/config" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ListClusterLogsRequest_ServiceType int32 + +const ( + ListClusterLogsRequest_SERVICE_TYPE_UNSPECIFIED ListClusterLogsRequest_ServiceType = 0 + // Logs of Redis activity. + ListClusterLogsRequest_REDIS ListClusterLogsRequest_ServiceType = 1 +) + +var ListClusterLogsRequest_ServiceType_name = map[int32]string{ + 0: "SERVICE_TYPE_UNSPECIFIED", + 1: "REDIS", +} +var ListClusterLogsRequest_ServiceType_value = map[string]int32{ + "SERVICE_TYPE_UNSPECIFIED": 0, + "REDIS": 1, +} + +func (x ListClusterLogsRequest_ServiceType) String() string { + return proto.EnumName(ListClusterLogsRequest_ServiceType_name, int32(x)) +} +func (ListClusterLogsRequest_ServiceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{18, 0} +} + +type GetClusterRequest struct { + // ID of the Redis cluster to return. + // To get the cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterRequest) ProtoMessage() {} +func (*GetClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{0} +} +func (m *GetClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetClusterRequest.Unmarshal(m, b) +} +func (m *GetClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetClusterRequest.Marshal(b, m, deterministic) +} +func (dst *GetClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterRequest.Merge(dst, src) +} +func (m *GetClusterRequest) XXX_Size() int { + return xxx_messageInfo_GetClusterRequest.Size(m) +} +func (m *GetClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterRequest proto.InternalMessageInfo + +func (m *GetClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type ListClustersRequest struct { + // ID of the folder to list Redis clusters in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClustersResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters clusters listed in the response. + // The expression must specify: + // 1. The field name. Currently you can only use filtering with the [Cluster.name] field. + // 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + // 3. The value. Мust be 3-63 characters long and match the regular expression `^[a-z]([-a-z0-9]{,61}[a-z0-9])?$`. + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) } +func (*ListClustersRequest) ProtoMessage() {} +func (*ListClustersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{1} +} +func (m *ListClustersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClustersRequest.Unmarshal(m, b) +} +func (m *ListClustersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClustersRequest.Marshal(b, m, deterministic) +} +func (dst *ListClustersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClustersRequest.Merge(dst, src) +} +func (m *ListClustersRequest) XXX_Size() int { + return xxx_messageInfo_ListClustersRequest.Size(m) +} +func (m *ListClustersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClustersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClustersRequest proto.InternalMessageInfo + +func (m *ListClustersRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListClustersRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClustersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListClustersRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListClustersResponse struct { + // List of Redis clusters. + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClustersRequest.page_size], use the [next_page_token] as the value + // for the [ListClustersRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) } +func (*ListClustersResponse) ProtoMessage() {} +func (*ListClustersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{2} +} +func (m *ListClustersResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClustersResponse.Unmarshal(m, b) +} +func (m *ListClustersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClustersResponse.Marshal(b, m, deterministic) +} +func (dst *ListClustersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClustersResponse.Merge(dst, src) +} +func (m *ListClustersResponse) XXX_Size() int { + return xxx_messageInfo_ListClustersResponse.Size(m) +} +func (m *ListClustersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClustersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClustersResponse proto.InternalMessageInfo + +func (m *ListClustersResponse) GetClusters() []*Cluster { + if m != nil { + return m.Clusters + } + return nil +} + +func (m *ListClustersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateClusterRequest struct { + // ID of the folder to create the Redis cluster in. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Name of the Redis cluster. The name must be unique within the folder. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the Redis cluster. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the Redis cluster as `key:value` pairs. Maximum 64 per cluster. + // For example, "project": "mvp" or "source": "dictionary". + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Deployment environment of the Redis cluster. + Environment Cluster_Environment `protobuf:"varint,5,opt,name=environment,proto3,enum=yandex.cloud.mdb.redis.v1alpha.Cluster_Environment" json:"environment,omitempty"` + // Configuration and resources for hosts that should be created for the Redis cluster. + ConfigSpec *ConfigSpec `protobuf:"bytes,6,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + // Individual configurations for hosts that should be created for the Redis cluster. + HostSpecs []*HostSpec `protobuf:"bytes,7,rep,name=host_specs,json=hostSpecs,proto3" json:"host_specs,omitempty"` + // ID of the network to create the cluster in. + NetworkId string `protobuf:"bytes,10,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} } +func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*CreateClusterRequest) ProtoMessage() {} +func (*CreateClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{3} +} +func (m *CreateClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateClusterRequest.Unmarshal(m, b) +} +func (m *CreateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateClusterRequest.Marshal(b, m, deterministic) +} +func (dst *CreateClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateClusterRequest.Merge(dst, src) +} +func (m *CreateClusterRequest) XXX_Size() int { + return xxx_messageInfo_CreateClusterRequest.Size(m) +} +func (m *CreateClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateClusterRequest proto.InternalMessageInfo + +func (m *CreateClusterRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *CreateClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateClusterRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *CreateClusterRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *CreateClusterRequest) GetEnvironment() Cluster_Environment { + if m != nil { + return m.Environment + } + return Cluster_ENVIRONMENT_UNSPECIFIED +} + +func (m *CreateClusterRequest) GetConfigSpec() *ConfigSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +func (m *CreateClusterRequest) GetHostSpecs() []*HostSpec { + if m != nil { + return m.HostSpecs + } + return nil +} + +func (m *CreateClusterRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +type CreateClusterMetadata struct { + // ID of the Redis cluster that is being created. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateClusterMetadata) Reset() { *m = CreateClusterMetadata{} } +func (m *CreateClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateClusterMetadata) ProtoMessage() {} +func (*CreateClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{4} +} +func (m *CreateClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateClusterMetadata.Unmarshal(m, b) +} +func (m *CreateClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateClusterMetadata.Merge(dst, src) +} +func (m *CreateClusterMetadata) XXX_Size() int { + return xxx_messageInfo_CreateClusterMetadata.Size(m) +} +func (m *CreateClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateClusterMetadata proto.InternalMessageInfo + +func (m *CreateClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type UpdateClusterRequest struct { + // ID of the Redis cluster to update. + // To get the Redis cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Field mask that specifies which fields of the Redis cluster should be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // New description of the Redis cluster. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the Redis cluster as `` key:value `` pairs. Maximum 64 per cluster. + // For example, "project": "mvp" or "source": "dictionary". + // + // The new set of labels will completely replace the old ones. To add a label, request the current + // set with the [ClusterService.Get] method, then send an [ClusterService.Update] request with the new label added to the set. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // New configuration and resources for hosts in the cluster. + ConfigSpec *ConfigSpec `protobuf:"bytes,5,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + // New name for the cluster. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} } +func (m *UpdateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterRequest) ProtoMessage() {} +func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{5} +} +func (m *UpdateClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateClusterRequest.Unmarshal(m, b) +} +func (m *UpdateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateClusterRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateClusterRequest.Merge(dst, src) +} +func (m *UpdateClusterRequest) XXX_Size() int { + return xxx_messageInfo_UpdateClusterRequest.Size(m) +} +func (m *UpdateClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateClusterRequest proto.InternalMessageInfo + +func (m *UpdateClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateClusterRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateClusterRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *UpdateClusterRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *UpdateClusterRequest) GetConfigSpec() *ConfigSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +func (m *UpdateClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type UpdateClusterMetadata struct { + // ID of the Redis cluster that is being updated. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateClusterMetadata) Reset() { *m = UpdateClusterMetadata{} } +func (m *UpdateClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterMetadata) ProtoMessage() {} +func (*UpdateClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{6} +} +func (m *UpdateClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateClusterMetadata.Unmarshal(m, b) +} +func (m *UpdateClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateClusterMetadata.Merge(dst, src) +} +func (m *UpdateClusterMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateClusterMetadata.Size(m) +} +func (m *UpdateClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateClusterMetadata proto.InternalMessageInfo + +func (m *UpdateClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type DeleteClusterRequest struct { + // ID of the Redis cluster to delete. + // To get the Redis cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} } +func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterRequest) ProtoMessage() {} +func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{7} +} +func (m *DeleteClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterRequest.Unmarshal(m, b) +} +func (m *DeleteClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterRequest.Merge(dst, src) +} +func (m *DeleteClusterRequest) XXX_Size() int { + return xxx_messageInfo_DeleteClusterRequest.Size(m) +} +func (m *DeleteClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterRequest proto.InternalMessageInfo + +func (m *DeleteClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type DeleteClusterMetadata struct { + // ID of the Redis cluster that is being deleted. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterMetadata) Reset() { *m = DeleteClusterMetadata{} } +func (m *DeleteClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterMetadata) ProtoMessage() {} +func (*DeleteClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{8} +} +func (m *DeleteClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterMetadata.Unmarshal(m, b) +} +func (m *DeleteClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterMetadata.Merge(dst, src) +} +func (m *DeleteClusterMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteClusterMetadata.Size(m) +} +func (m *DeleteClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterMetadata proto.InternalMessageInfo + +func (m *DeleteClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StartClusterRequest struct { + // Required. ID of the Redis cluster to start. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartClusterRequest) Reset() { *m = StartClusterRequest{} } +func (m *StartClusterRequest) String() string { return proto.CompactTextString(m) } +func (*StartClusterRequest) ProtoMessage() {} +func (*StartClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{9} +} +func (m *StartClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartClusterRequest.Unmarshal(m, b) +} +func (m *StartClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartClusterRequest.Marshal(b, m, deterministic) +} +func (dst *StartClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartClusterRequest.Merge(dst, src) +} +func (m *StartClusterRequest) XXX_Size() int { + return xxx_messageInfo_StartClusterRequest.Size(m) +} +func (m *StartClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartClusterRequest proto.InternalMessageInfo + +func (m *StartClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StartClusterMetadata struct { + // Required. ID of the Redis cluster. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartClusterMetadata) Reset() { *m = StartClusterMetadata{} } +func (m *StartClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*StartClusterMetadata) ProtoMessage() {} +func (*StartClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{10} +} +func (m *StartClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartClusterMetadata.Unmarshal(m, b) +} +func (m *StartClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *StartClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartClusterMetadata.Merge(dst, src) +} +func (m *StartClusterMetadata) XXX_Size() int { + return xxx_messageInfo_StartClusterMetadata.Size(m) +} +func (m *StartClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_StartClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_StartClusterMetadata proto.InternalMessageInfo + +func (m *StartClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StopClusterRequest struct { + // Required. ID of the Redis cluster to stop. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopClusterRequest) Reset() { *m = StopClusterRequest{} } +func (m *StopClusterRequest) String() string { return proto.CompactTextString(m) } +func (*StopClusterRequest) ProtoMessage() {} +func (*StopClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{11} +} +func (m *StopClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopClusterRequest.Unmarshal(m, b) +} +func (m *StopClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopClusterRequest.Marshal(b, m, deterministic) +} +func (dst *StopClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopClusterRequest.Merge(dst, src) +} +func (m *StopClusterRequest) XXX_Size() int { + return xxx_messageInfo_StopClusterRequest.Size(m) +} +func (m *StopClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopClusterRequest proto.InternalMessageInfo + +func (m *StopClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type StopClusterMetadata struct { + // Required. ID of the Redis cluster. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopClusterMetadata) Reset() { *m = StopClusterMetadata{} } +func (m *StopClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*StopClusterMetadata) ProtoMessage() {} +func (*StopClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{12} +} +func (m *StopClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopClusterMetadata.Unmarshal(m, b) +} +func (m *StopClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *StopClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopClusterMetadata.Merge(dst, src) +} +func (m *StopClusterMetadata) XXX_Size() int { + return xxx_messageInfo_StopClusterMetadata.Size(m) +} +func (m *StopClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_StopClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_StopClusterMetadata proto.InternalMessageInfo + +func (m *StopClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type BackupClusterRequest struct { + // ID of the Redis cluster to back up. + // To get the Redis cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BackupClusterRequest) Reset() { *m = BackupClusterRequest{} } +func (m *BackupClusterRequest) String() string { return proto.CompactTextString(m) } +func (*BackupClusterRequest) ProtoMessage() {} +func (*BackupClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{13} +} +func (m *BackupClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BackupClusterRequest.Unmarshal(m, b) +} +func (m *BackupClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BackupClusterRequest.Marshal(b, m, deterministic) +} +func (dst *BackupClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupClusterRequest.Merge(dst, src) +} +func (m *BackupClusterRequest) XXX_Size() int { + return xxx_messageInfo_BackupClusterRequest.Size(m) +} +func (m *BackupClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BackupClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupClusterRequest proto.InternalMessageInfo + +func (m *BackupClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type BackupClusterMetadata struct { + // ID of the Redis cluster that is being backed up. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BackupClusterMetadata) Reset() { *m = BackupClusterMetadata{} } +func (m *BackupClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*BackupClusterMetadata) ProtoMessage() {} +func (*BackupClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{14} +} +func (m *BackupClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BackupClusterMetadata.Unmarshal(m, b) +} +func (m *BackupClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BackupClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *BackupClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupClusterMetadata.Merge(dst, src) +} +func (m *BackupClusterMetadata) XXX_Size() int { + return xxx_messageInfo_BackupClusterMetadata.Size(m) +} +func (m *BackupClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_BackupClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupClusterMetadata proto.InternalMessageInfo + +func (m *BackupClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +type RestoreClusterRequest struct { + // ID of the backup to create a cluster from. + // To get the backup ID, use a [ClusterService.ListBackups] request. + BackupId string `protobuf:"bytes,1,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"` + // Name of the new Redis cluster. The name must be unique within the folder. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the new Redis cluster. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Custom labels for the Redis cluster as `` key:value `` pairs. Maximum 64 per cluster. + // For example, "project": "mvp" or "source": "dictionary". + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Deployment environment of the new Redis cluster. + Environment Cluster_Environment `protobuf:"varint,5,opt,name=environment,proto3,enum=yandex.cloud.mdb.redis.v1alpha.Cluster_Environment" json:"environment,omitempty"` + // Configuration for the Redis cluster to be created. + ConfigSpec *ConfigSpec `protobuf:"bytes,6,opt,name=config_spec,json=configSpec,proto3" json:"config_spec,omitempty"` + // Configurations for Redis hosts that should be created for + // the cluster that is being created from the backup. + HostSpecs []*HostSpec `protobuf:"bytes,7,rep,name=host_specs,json=hostSpecs,proto3" json:"host_specs,omitempty"` + // ID of the network to create the Redis cluster in. + NetworkId string `protobuf:"bytes,8,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestoreClusterRequest) Reset() { *m = RestoreClusterRequest{} } +func (m *RestoreClusterRequest) String() string { return proto.CompactTextString(m) } +func (*RestoreClusterRequest) ProtoMessage() {} +func (*RestoreClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{15} +} +func (m *RestoreClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestoreClusterRequest.Unmarshal(m, b) +} +func (m *RestoreClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestoreClusterRequest.Marshal(b, m, deterministic) +} +func (dst *RestoreClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestoreClusterRequest.Merge(dst, src) +} +func (m *RestoreClusterRequest) XXX_Size() int { + return xxx_messageInfo_RestoreClusterRequest.Size(m) +} +func (m *RestoreClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RestoreClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RestoreClusterRequest proto.InternalMessageInfo + +func (m *RestoreClusterRequest) GetBackupId() string { + if m != nil { + return m.BackupId + } + return "" +} + +func (m *RestoreClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *RestoreClusterRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *RestoreClusterRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *RestoreClusterRequest) GetEnvironment() Cluster_Environment { + if m != nil { + return m.Environment + } + return Cluster_ENVIRONMENT_UNSPECIFIED +} + +func (m *RestoreClusterRequest) GetConfigSpec() *ConfigSpec { + if m != nil { + return m.ConfigSpec + } + return nil +} + +func (m *RestoreClusterRequest) GetHostSpecs() []*HostSpec { + if m != nil { + return m.HostSpecs + } + return nil +} + +func (m *RestoreClusterRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +type RestoreClusterMetadata struct { + // ID of the new Redis cluster that is being created from a backup. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // ID of the backup that is being used for creating a cluster. + BackupId string `protobuf:"bytes,2,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestoreClusterMetadata) Reset() { *m = RestoreClusterMetadata{} } +func (m *RestoreClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*RestoreClusterMetadata) ProtoMessage() {} +func (*RestoreClusterMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{16} +} +func (m *RestoreClusterMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestoreClusterMetadata.Unmarshal(m, b) +} +func (m *RestoreClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestoreClusterMetadata.Marshal(b, m, deterministic) +} +func (dst *RestoreClusterMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestoreClusterMetadata.Merge(dst, src) +} +func (m *RestoreClusterMetadata) XXX_Size() int { + return xxx_messageInfo_RestoreClusterMetadata.Size(m) +} +func (m *RestoreClusterMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_RestoreClusterMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_RestoreClusterMetadata proto.InternalMessageInfo + +func (m *RestoreClusterMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *RestoreClusterMetadata) GetBackupId() string { + if m != nil { + return m.BackupId + } + return "" +} + +type LogRecord struct { + // Log record timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Contents of the log record. + Message map[string]string `protobuf:"bytes,2,rep,name=message,proto3" json:"message,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogRecord) Reset() { *m = LogRecord{} } +func (m *LogRecord) String() string { return proto.CompactTextString(m) } +func (*LogRecord) ProtoMessage() {} +func (*LogRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{17} +} +func (m *LogRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogRecord.Unmarshal(m, b) +} +func (m *LogRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogRecord.Marshal(b, m, deterministic) +} +func (dst *LogRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogRecord.Merge(dst, src) +} +func (m *LogRecord) XXX_Size() int { + return xxx_messageInfo_LogRecord.Size(m) +} +func (m *LogRecord) XXX_DiscardUnknown() { + xxx_messageInfo_LogRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_LogRecord proto.InternalMessageInfo + +func (m *LogRecord) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *LogRecord) GetMessage() map[string]string { + if m != nil { + return m.Message + } + return nil +} + +type ListClusterLogsRequest struct { + // ID of the Redis cluster to request logs for. + // To get the Redis cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Columns from the logs table to request. + // If no columns are specified, entire log records are returned. + ColumnFilter []string `protobuf:"bytes,2,rep,name=column_filter,json=columnFilter,proto3" json:"column_filter,omitempty"` + ServiceType ListClusterLogsRequest_ServiceType `protobuf:"varint,3,opt,name=service_type,json=serviceType,proto3,enum=yandex.cloud.mdb.redis.v1alpha.ListClusterLogsRequest_ServiceType" json:"service_type,omitempty"` + // Start timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + FromTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=from_time,json=fromTime,proto3" json:"from_time,omitempty"` + // End timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + ToTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=to_time,json=toTime,proto3" json:"to_time,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClusterLogsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,6,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListClusterLogsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,7,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterLogsRequest) Reset() { *m = ListClusterLogsRequest{} } +func (m *ListClusterLogsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterLogsRequest) ProtoMessage() {} +func (*ListClusterLogsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{18} +} +func (m *ListClusterLogsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterLogsRequest.Unmarshal(m, b) +} +func (m *ListClusterLogsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterLogsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterLogsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterLogsRequest.Merge(dst, src) +} +func (m *ListClusterLogsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterLogsRequest.Size(m) +} +func (m *ListClusterLogsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterLogsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterLogsRequest proto.InternalMessageInfo + +func (m *ListClusterLogsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterLogsRequest) GetColumnFilter() []string { + if m != nil { + return m.ColumnFilter + } + return nil +} + +func (m *ListClusterLogsRequest) GetServiceType() ListClusterLogsRequest_ServiceType { + if m != nil { + return m.ServiceType + } + return ListClusterLogsRequest_SERVICE_TYPE_UNSPECIFIED +} + +func (m *ListClusterLogsRequest) GetFromTime() *timestamp.Timestamp { + if m != nil { + return m.FromTime + } + return nil +} + +func (m *ListClusterLogsRequest) GetToTime() *timestamp.Timestamp { + if m != nil { + return m.ToTime + } + return nil +} + +func (m *ListClusterLogsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterLogsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterLogsResponse struct { + // Requested log records. + Logs []*LogRecord `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterLogsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterLogsResponse) Reset() { *m = ListClusterLogsResponse{} } +func (m *ListClusterLogsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterLogsResponse) ProtoMessage() {} +func (*ListClusterLogsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{19} +} +func (m *ListClusterLogsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterLogsResponse.Unmarshal(m, b) +} +func (m *ListClusterLogsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterLogsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterLogsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterLogsResponse.Merge(dst, src) +} +func (m *ListClusterLogsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterLogsResponse.Size(m) +} +func (m *ListClusterLogsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterLogsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterLogsResponse proto.InternalMessageInfo + +func (m *ListClusterLogsResponse) GetLogs() []*LogRecord { + if m != nil { + return m.Logs + } + return nil +} + +func (m *ListClusterLogsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ListClusterOperationsRequest struct { + // ID of the Redis cluster to list operations for. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClusterOperationsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterOperationsRequest) Reset() { *m = ListClusterOperationsRequest{} } +func (m *ListClusterOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterOperationsRequest) ProtoMessage() {} +func (*ListClusterOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{20} +} +func (m *ListClusterOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterOperationsRequest.Unmarshal(m, b) +} +func (m *ListClusterOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterOperationsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterOperationsRequest.Merge(dst, src) +} +func (m *ListClusterOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterOperationsRequest.Size(m) +} +func (m *ListClusterOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterOperationsRequest proto.InternalMessageInfo + +func (m *ListClusterOperationsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterOperationsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterOperationsResponse struct { + // List of operations for the specified Redis cluster. + Operations []*operation.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterOperationsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterOperationsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterOperationsResponse) Reset() { *m = ListClusterOperationsResponse{} } +func (m *ListClusterOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterOperationsResponse) ProtoMessage() {} +func (*ListClusterOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{21} +} +func (m *ListClusterOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterOperationsResponse.Unmarshal(m, b) +} +func (m *ListClusterOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterOperationsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterOperationsResponse.Merge(dst, src) +} +func (m *ListClusterOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterOperationsResponse.Size(m) +} +func (m *ListClusterOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterOperationsResponse proto.InternalMessageInfo + +func (m *ListClusterOperationsResponse) GetOperations() []*operation.Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListClusterOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ListClusterBackupsRequest struct { + // ID of the Redis cluster. + // To get the Redis cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClusterBackupsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListClusterBackupsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterBackupsRequest) Reset() { *m = ListClusterBackupsRequest{} } +func (m *ListClusterBackupsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterBackupsRequest) ProtoMessage() {} +func (*ListClusterBackupsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{22} +} +func (m *ListClusterBackupsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterBackupsRequest.Unmarshal(m, b) +} +func (m *ListClusterBackupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterBackupsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterBackupsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterBackupsRequest.Merge(dst, src) +} +func (m *ListClusterBackupsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterBackupsRequest.Size(m) +} +func (m *ListClusterBackupsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterBackupsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterBackupsRequest proto.InternalMessageInfo + +func (m *ListClusterBackupsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterBackupsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterBackupsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterBackupsResponse struct { + // List of Redis backups. + Backups []*Backup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterBackupsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterBackupsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterBackupsResponse) Reset() { *m = ListClusterBackupsResponse{} } +func (m *ListClusterBackupsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterBackupsResponse) ProtoMessage() {} +func (*ListClusterBackupsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{23} +} +func (m *ListClusterBackupsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterBackupsResponse.Unmarshal(m, b) +} +func (m *ListClusterBackupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterBackupsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterBackupsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterBackupsResponse.Merge(dst, src) +} +func (m *ListClusterBackupsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterBackupsResponse.Size(m) +} +func (m *ListClusterBackupsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterBackupsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterBackupsResponse proto.InternalMessageInfo + +func (m *ListClusterBackupsResponse) GetBackups() []*Backup { + if m != nil { + return m.Backups + } + return nil +} + +func (m *ListClusterBackupsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ListClusterHostsRequest struct { + // ID of the Redis cluster. + // To get the Redis cluster ID use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListClusterHostsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterHostsRequest) Reset() { *m = ListClusterHostsRequest{} } +func (m *ListClusterHostsRequest) String() string { return proto.CompactTextString(m) } +func (*ListClusterHostsRequest) ProtoMessage() {} +func (*ListClusterHostsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{24} +} +func (m *ListClusterHostsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterHostsRequest.Unmarshal(m, b) +} +func (m *ListClusterHostsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterHostsRequest.Marshal(b, m, deterministic) +} +func (dst *ListClusterHostsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterHostsRequest.Merge(dst, src) +} +func (m *ListClusterHostsRequest) XXX_Size() int { + return xxx_messageInfo_ListClusterHostsRequest.Size(m) +} +func (m *ListClusterHostsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterHostsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterHostsRequest proto.InternalMessageInfo + +func (m *ListClusterHostsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListClusterHostsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClusterHostsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListClusterHostsResponse struct { + // List of hosts for the cluster. + Hosts []*Host `protobuf:"bytes,1,rep,name=hosts,proto3" json:"hosts,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListClusterHostsRequest.page_size], use the [next_page_token] as the value + // for the [ListClusterHostsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClusterHostsResponse) Reset() { *m = ListClusterHostsResponse{} } +func (m *ListClusterHostsResponse) String() string { return proto.CompactTextString(m) } +func (*ListClusterHostsResponse) ProtoMessage() {} +func (*ListClusterHostsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{25} +} +func (m *ListClusterHostsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClusterHostsResponse.Unmarshal(m, b) +} +func (m *ListClusterHostsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClusterHostsResponse.Marshal(b, m, deterministic) +} +func (dst *ListClusterHostsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClusterHostsResponse.Merge(dst, src) +} +func (m *ListClusterHostsResponse) XXX_Size() int { + return xxx_messageInfo_ListClusterHostsResponse.Size(m) +} +func (m *ListClusterHostsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClusterHostsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClusterHostsResponse proto.InternalMessageInfo + +func (m *ListClusterHostsResponse) GetHosts() []*Host { + if m != nil { + return m.Hosts + } + return nil +} + +func (m *ListClusterHostsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type AddClusterHostsRequest struct { + // ID of the Redis cluster to add hosts to. + // To get the Redis cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Configurations for Redis hosts that should be added to the cluster. + HostSpecs []*HostSpec `protobuf:"bytes,2,rep,name=host_specs,json=hostSpecs,proto3" json:"host_specs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddClusterHostsRequest) Reset() { *m = AddClusterHostsRequest{} } +func (m *AddClusterHostsRequest) String() string { return proto.CompactTextString(m) } +func (*AddClusterHostsRequest) ProtoMessage() {} +func (*AddClusterHostsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{26} +} +func (m *AddClusterHostsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddClusterHostsRequest.Unmarshal(m, b) +} +func (m *AddClusterHostsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddClusterHostsRequest.Marshal(b, m, deterministic) +} +func (dst *AddClusterHostsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddClusterHostsRequest.Merge(dst, src) +} +func (m *AddClusterHostsRequest) XXX_Size() int { + return xxx_messageInfo_AddClusterHostsRequest.Size(m) +} +func (m *AddClusterHostsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AddClusterHostsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AddClusterHostsRequest proto.InternalMessageInfo + +func (m *AddClusterHostsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *AddClusterHostsRequest) GetHostSpecs() []*HostSpec { + if m != nil { + return m.HostSpecs + } + return nil +} + +type AddClusterHostsMetadata struct { + // ID of the Redis cluster to which the hosts are being added. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Names of hosts that are being added to the cluster. + HostNames []string `protobuf:"bytes,2,rep,name=host_names,json=hostNames,proto3" json:"host_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddClusterHostsMetadata) Reset() { *m = AddClusterHostsMetadata{} } +func (m *AddClusterHostsMetadata) String() string { return proto.CompactTextString(m) } +func (*AddClusterHostsMetadata) ProtoMessage() {} +func (*AddClusterHostsMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{27} +} +func (m *AddClusterHostsMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddClusterHostsMetadata.Unmarshal(m, b) +} +func (m *AddClusterHostsMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddClusterHostsMetadata.Marshal(b, m, deterministic) +} +func (dst *AddClusterHostsMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddClusterHostsMetadata.Merge(dst, src) +} +func (m *AddClusterHostsMetadata) XXX_Size() int { + return xxx_messageInfo_AddClusterHostsMetadata.Size(m) +} +func (m *AddClusterHostsMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_AddClusterHostsMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_AddClusterHostsMetadata proto.InternalMessageInfo + +func (m *AddClusterHostsMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *AddClusterHostsMetadata) GetHostNames() []string { + if m != nil { + return m.HostNames + } + return nil +} + +type DeleteClusterHostsRequest struct { + // ID of the Redis cluster to remove hosts from. + // To get the Redis cluster ID, use a [ClusterService.List] request. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Names of hosts to delete. + HostNames []string `protobuf:"bytes,2,rep,name=host_names,json=hostNames,proto3" json:"host_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterHostsRequest) Reset() { *m = DeleteClusterHostsRequest{} } +func (m *DeleteClusterHostsRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterHostsRequest) ProtoMessage() {} +func (*DeleteClusterHostsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{28} +} +func (m *DeleteClusterHostsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterHostsRequest.Unmarshal(m, b) +} +func (m *DeleteClusterHostsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterHostsRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterHostsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterHostsRequest.Merge(dst, src) +} +func (m *DeleteClusterHostsRequest) XXX_Size() int { + return xxx_messageInfo_DeleteClusterHostsRequest.Size(m) +} +func (m *DeleteClusterHostsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterHostsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterHostsRequest proto.InternalMessageInfo + +func (m *DeleteClusterHostsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteClusterHostsRequest) GetHostNames() []string { + if m != nil { + return m.HostNames + } + return nil +} + +type DeleteClusterHostsMetadata struct { + // ID of the Redis cluster to remove hosts from. + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Names of hosts that are being deleted. + HostNames []string `protobuf:"bytes,2,rep,name=host_names,json=hostNames,proto3" json:"host_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterHostsMetadata) Reset() { *m = DeleteClusterHostsMetadata{} } +func (m *DeleteClusterHostsMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterHostsMetadata) ProtoMessage() {} +func (*DeleteClusterHostsMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{29} +} +func (m *DeleteClusterHostsMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterHostsMetadata.Unmarshal(m, b) +} +func (m *DeleteClusterHostsMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterHostsMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteClusterHostsMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterHostsMetadata.Merge(dst, src) +} +func (m *DeleteClusterHostsMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteClusterHostsMetadata.Size(m) +} +func (m *DeleteClusterHostsMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterHostsMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterHostsMetadata proto.InternalMessageInfo + +func (m *DeleteClusterHostsMetadata) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteClusterHostsMetadata) GetHostNames() []string { + if m != nil { + return m.HostNames + } + return nil +} + +type HostSpec struct { + // ID of the availability zone where the host resides. + // To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request. + ZoneId string `protobuf:"bytes,1,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + // ID of the subnet that the host should belong to. This subnet should be a part + // of the network that the cluster belongs to. + // The ID of the network is set in the field [Cluster.network_id]. + SubnetId string `protobuf:"bytes,2,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HostSpec) Reset() { *m = HostSpec{} } +func (m *HostSpec) String() string { return proto.CompactTextString(m) } +func (*HostSpec) ProtoMessage() {} +func (*HostSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{30} +} +func (m *HostSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HostSpec.Unmarshal(m, b) +} +func (m *HostSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HostSpec.Marshal(b, m, deterministic) +} +func (dst *HostSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostSpec.Merge(dst, src) +} +func (m *HostSpec) XXX_Size() int { + return xxx_messageInfo_HostSpec.Size(m) +} +func (m *HostSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HostSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HostSpec proto.InternalMessageInfo + +func (m *HostSpec) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func (m *HostSpec) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +type ConfigSpec struct { + // Version of Redis used in the cluster. + // The only possible value is `5.0`. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Configuration of a Redis cluster. + // + // Types that are valid to be assigned to RedisSpec: + // *ConfigSpec_RedisConfig_5_0 + RedisSpec isConfigSpec_RedisSpec `protobuf_oneof:"redis_spec"` + // Resources allocated to Redis hosts. + Resources *Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigSpec) Reset() { *m = ConfigSpec{} } +func (m *ConfigSpec) String() string { return proto.CompactTextString(m) } +func (*ConfigSpec) ProtoMessage() {} +func (*ConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cluster_service_77935d045fe8fb01, []int{31} +} +func (m *ConfigSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigSpec.Unmarshal(m, b) +} +func (m *ConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigSpec.Marshal(b, m, deterministic) +} +func (dst *ConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigSpec.Merge(dst, src) +} +func (m *ConfigSpec) XXX_Size() int { + return xxx_messageInfo_ConfigSpec.Size(m) +} +func (m *ConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigSpec proto.InternalMessageInfo + +func (m *ConfigSpec) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type isConfigSpec_RedisSpec interface { + isConfigSpec_RedisSpec() +} + +type ConfigSpec_RedisConfig_5_0 struct { + RedisConfig_5_0 *config.RedisConfig5_0 `protobuf:"bytes,2,opt,name=redis_config_5_0,json=redisConfig50,proto3,oneof"` +} + +func (*ConfigSpec_RedisConfig_5_0) isConfigSpec_RedisSpec() {} + +func (m *ConfigSpec) GetRedisSpec() isConfigSpec_RedisSpec { + if m != nil { + return m.RedisSpec + } + return nil +} + +func (m *ConfigSpec) GetRedisConfig_5_0() *config.RedisConfig5_0 { + if x, ok := m.GetRedisSpec().(*ConfigSpec_RedisConfig_5_0); ok { + return x.RedisConfig_5_0 + } + return nil +} + +func (m *ConfigSpec) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ConfigSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ConfigSpec_OneofMarshaler, _ConfigSpec_OneofUnmarshaler, _ConfigSpec_OneofSizer, []interface{}{ + (*ConfigSpec_RedisConfig_5_0)(nil), + } +} + +func _ConfigSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ConfigSpec) + // redis_spec + switch x := m.RedisSpec.(type) { + case *ConfigSpec_RedisConfig_5_0: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RedisConfig_5_0); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ConfigSpec.RedisSpec has unexpected type %T", x) + } + return nil +} + +func _ConfigSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ConfigSpec) + switch tag { + case 2: // redis_spec.redis_config_5_0 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(config.RedisConfig5_0) + err := b.DecodeMessage(msg) + m.RedisSpec = &ConfigSpec_RedisConfig_5_0{msg} + return true, err + default: + return false, nil + } +} + +func _ConfigSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ConfigSpec) + // redis_spec + switch x := m.RedisSpec.(type) { + case *ConfigSpec_RedisConfig_5_0: + s := proto.Size(x.RedisConfig_5_0) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*GetClusterRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.GetClusterRequest") + proto.RegisterType((*ListClustersRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.ListClustersRequest") + proto.RegisterType((*ListClustersResponse)(nil), "yandex.cloud.mdb.redis.v1alpha.ListClustersResponse") + proto.RegisterType((*CreateClusterRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.CreateClusterRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.redis.v1alpha.CreateClusterRequest.LabelsEntry") + proto.RegisterType((*CreateClusterMetadata)(nil), "yandex.cloud.mdb.redis.v1alpha.CreateClusterMetadata") + proto.RegisterType((*UpdateClusterRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.UpdateClusterRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.redis.v1alpha.UpdateClusterRequest.LabelsEntry") + proto.RegisterType((*UpdateClusterMetadata)(nil), "yandex.cloud.mdb.redis.v1alpha.UpdateClusterMetadata") + proto.RegisterType((*DeleteClusterRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.DeleteClusterRequest") + proto.RegisterType((*DeleteClusterMetadata)(nil), "yandex.cloud.mdb.redis.v1alpha.DeleteClusterMetadata") + proto.RegisterType((*StartClusterRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.StartClusterRequest") + proto.RegisterType((*StartClusterMetadata)(nil), "yandex.cloud.mdb.redis.v1alpha.StartClusterMetadata") + proto.RegisterType((*StopClusterRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.StopClusterRequest") + proto.RegisterType((*StopClusterMetadata)(nil), "yandex.cloud.mdb.redis.v1alpha.StopClusterMetadata") + proto.RegisterType((*BackupClusterRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.BackupClusterRequest") + proto.RegisterType((*BackupClusterMetadata)(nil), "yandex.cloud.mdb.redis.v1alpha.BackupClusterMetadata") + proto.RegisterType((*RestoreClusterRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.RestoreClusterRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.redis.v1alpha.RestoreClusterRequest.LabelsEntry") + proto.RegisterType((*RestoreClusterMetadata)(nil), "yandex.cloud.mdb.redis.v1alpha.RestoreClusterMetadata") + proto.RegisterType((*LogRecord)(nil), "yandex.cloud.mdb.redis.v1alpha.LogRecord") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.mdb.redis.v1alpha.LogRecord.MessageEntry") + proto.RegisterType((*ListClusterLogsRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.ListClusterLogsRequest") + proto.RegisterType((*ListClusterLogsResponse)(nil), "yandex.cloud.mdb.redis.v1alpha.ListClusterLogsResponse") + proto.RegisterType((*ListClusterOperationsRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.ListClusterOperationsRequest") + proto.RegisterType((*ListClusterOperationsResponse)(nil), "yandex.cloud.mdb.redis.v1alpha.ListClusterOperationsResponse") + proto.RegisterType((*ListClusterBackupsRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.ListClusterBackupsRequest") + proto.RegisterType((*ListClusterBackupsResponse)(nil), "yandex.cloud.mdb.redis.v1alpha.ListClusterBackupsResponse") + proto.RegisterType((*ListClusterHostsRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.ListClusterHostsRequest") + proto.RegisterType((*ListClusterHostsResponse)(nil), "yandex.cloud.mdb.redis.v1alpha.ListClusterHostsResponse") + proto.RegisterType((*AddClusterHostsRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.AddClusterHostsRequest") + proto.RegisterType((*AddClusterHostsMetadata)(nil), "yandex.cloud.mdb.redis.v1alpha.AddClusterHostsMetadata") + proto.RegisterType((*DeleteClusterHostsRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.DeleteClusterHostsRequest") + proto.RegisterType((*DeleteClusterHostsMetadata)(nil), "yandex.cloud.mdb.redis.v1alpha.DeleteClusterHostsMetadata") + proto.RegisterType((*HostSpec)(nil), "yandex.cloud.mdb.redis.v1alpha.HostSpec") + proto.RegisterType((*ConfigSpec)(nil), "yandex.cloud.mdb.redis.v1alpha.ConfigSpec") + proto.RegisterEnum("yandex.cloud.mdb.redis.v1alpha.ListClusterLogsRequest_ServiceType", ListClusterLogsRequest_ServiceType_name, ListClusterLogsRequest_ServiceType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ClusterServiceClient is the client API for ClusterService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ClusterServiceClient interface { + // Returns the specified Redis cluster. + // + // To get the list of available Redis clusters, make a [List] request. + Get(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) + // Retrieves the list of Redis clusters that belong + // to the specified folder. + List(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + // Creates a Redis cluster in the specified folder. + Create(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified Redis cluster. + Update(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified Redis cluster. + Delete(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Start the specified Redis cluster. + Start(ctx context.Context, in *StartClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Stop the specified Redis cluster. + Stop(ctx context.Context, in *StopClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Creates a backup for the specified Redis cluster. + Backup(ctx context.Context, in *BackupClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Creates a new Redis cluster using the specified backup. + Restore(ctx context.Context, in *RestoreClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Retrieves logs for the specified Redis cluster. + // For more information about logs, see the [Logs](/docs/managed-redis/concepts/logs) section in the documentation. + ListLogs(ctx context.Context, in *ListClusterLogsRequest, opts ...grpc.CallOption) (*ListClusterLogsResponse, error) + // Retrieves the list of operations for the specified cluster. + ListOperations(ctx context.Context, in *ListClusterOperationsRequest, opts ...grpc.CallOption) (*ListClusterOperationsResponse, error) + // Retrieves the list of available backups for the specified Redis cluster. + ListBackups(ctx context.Context, in *ListClusterBackupsRequest, opts ...grpc.CallOption) (*ListClusterBackupsResponse, error) + // Retrieves a list of hosts for the specified cluster. + ListHosts(ctx context.Context, in *ListClusterHostsRequest, opts ...grpc.CallOption) (*ListClusterHostsResponse, error) + // Creates new hosts for a cluster. + AddHosts(ctx context.Context, in *AddClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified hosts for a cluster. + DeleteHosts(ctx context.Context, in *DeleteClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) +} + +type clusterServiceClient struct { + cc *grpc.ClientConn +} + +func NewClusterServiceClient(cc *grpc.ClientConn) ClusterServiceClient { + return &clusterServiceClient{cc} +} + +func (c *clusterServiceClient) Get(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) { + out := new(Cluster) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) List(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Create(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Update(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Delete(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Start(ctx context.Context, in *StartClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Start", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Stop(ctx context.Context, in *StopClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Backup(ctx context.Context, in *BackupClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Backup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Restore(ctx context.Context, in *RestoreClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Restore", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListLogs(ctx context.Context, in *ListClusterLogsRequest, opts ...grpc.CallOption) (*ListClusterLogsResponse, error) { + out := new(ListClusterLogsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/ListLogs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListOperations(ctx context.Context, in *ListClusterOperationsRequest, opts ...grpc.CallOption) (*ListClusterOperationsResponse, error) { + out := new(ListClusterOperationsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListBackups(ctx context.Context, in *ListClusterBackupsRequest, opts ...grpc.CallOption) (*ListClusterBackupsResponse, error) { + out := new(ListClusterBackupsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/ListBackups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) ListHosts(ctx context.Context, in *ListClusterHostsRequest, opts ...grpc.CallOption) (*ListClusterHostsResponse, error) { + out := new(ListClusterHostsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/ListHosts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) AddHosts(ctx context.Context, in *AddClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/AddHosts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) DeleteHosts(ctx context.Context, in *DeleteClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ClusterService/DeleteHosts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ClusterServiceServer is the server API for ClusterService service. +type ClusterServiceServer interface { + // Returns the specified Redis cluster. + // + // To get the list of available Redis clusters, make a [List] request. + Get(context.Context, *GetClusterRequest) (*Cluster, error) + // Retrieves the list of Redis clusters that belong + // to the specified folder. + List(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + // Creates a Redis cluster in the specified folder. + Create(context.Context, *CreateClusterRequest) (*operation.Operation, error) + // Updates the specified Redis cluster. + Update(context.Context, *UpdateClusterRequest) (*operation.Operation, error) + // Deletes the specified Redis cluster. + Delete(context.Context, *DeleteClusterRequest) (*operation.Operation, error) + // Start the specified Redis cluster. + Start(context.Context, *StartClusterRequest) (*operation.Operation, error) + // Stop the specified Redis cluster. + Stop(context.Context, *StopClusterRequest) (*operation.Operation, error) + // Creates a backup for the specified Redis cluster. + Backup(context.Context, *BackupClusterRequest) (*operation.Operation, error) + // Creates a new Redis cluster using the specified backup. + Restore(context.Context, *RestoreClusterRequest) (*operation.Operation, error) + // Retrieves logs for the specified Redis cluster. + // For more information about logs, see the [Logs](/docs/managed-redis/concepts/logs) section in the documentation. + ListLogs(context.Context, *ListClusterLogsRequest) (*ListClusterLogsResponse, error) + // Retrieves the list of operations for the specified cluster. + ListOperations(context.Context, *ListClusterOperationsRequest) (*ListClusterOperationsResponse, error) + // Retrieves the list of available backups for the specified Redis cluster. + ListBackups(context.Context, *ListClusterBackupsRequest) (*ListClusterBackupsResponse, error) + // Retrieves a list of hosts for the specified cluster. + ListHosts(context.Context, *ListClusterHostsRequest) (*ListClusterHostsResponse, error) + // Creates new hosts for a cluster. + AddHosts(context.Context, *AddClusterHostsRequest) (*operation.Operation, error) + // Deletes the specified hosts for a cluster. + DeleteHosts(context.Context, *DeleteClusterHostsRequest) (*operation.Operation, error) +} + +func RegisterClusterServiceServer(s *grpc.Server, srv ClusterServiceServer) { + s.RegisterService(&_ClusterService_serviceDesc, srv) +} + +func _ClusterService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Get(ctx, req.(*GetClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).List(ctx, req.(*ListClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Create(ctx, req.(*CreateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Update(ctx, req.(*UpdateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Delete(ctx, req.(*DeleteClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Start(ctx, req.(*StartClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Stop(ctx, req.(*StopClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Backup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BackupClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Backup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Backup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Backup(ctx, req.(*BackupClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Restore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestoreClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Restore(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/Restore", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Restore(ctx, req.(*RestoreClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListLogs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterLogsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListLogs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/ListLogs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListLogs(ctx, req.(*ListClusterLogsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListOperations(ctx, req.(*ListClusterOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListBackups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterBackupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListBackups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/ListBackups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListBackups(ctx, req.(*ListClusterBackupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_ListHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClusterHostsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).ListHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/ListHosts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).ListHosts(ctx, req.(*ListClusterHostsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_AddHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddClusterHostsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).AddHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/AddHosts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).AddHosts(ctx, req.(*AddClusterHostsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_DeleteHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterHostsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).DeleteHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ClusterService/DeleteHosts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).DeleteHosts(ctx, req.(*DeleteClusterHostsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ClusterService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.redis.v1alpha.ClusterService", + HandlerType: (*ClusterServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _ClusterService_Get_Handler, + }, + { + MethodName: "List", + Handler: _ClusterService_List_Handler, + }, + { + MethodName: "Create", + Handler: _ClusterService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _ClusterService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _ClusterService_Delete_Handler, + }, + { + MethodName: "Start", + Handler: _ClusterService_Start_Handler, + }, + { + MethodName: "Stop", + Handler: _ClusterService_Stop_Handler, + }, + { + MethodName: "Backup", + Handler: _ClusterService_Backup_Handler, + }, + { + MethodName: "Restore", + Handler: _ClusterService_Restore_Handler, + }, + { + MethodName: "ListLogs", + Handler: _ClusterService_ListLogs_Handler, + }, + { + MethodName: "ListOperations", + Handler: _ClusterService_ListOperations_Handler, + }, + { + MethodName: "ListBackups", + Handler: _ClusterService_ListBackups_Handler, + }, + { + MethodName: "ListHosts", + Handler: _ClusterService_ListHosts_Handler, + }, + { + MethodName: "AddHosts", + Handler: _ClusterService_AddHosts_Handler, + }, + { + MethodName: "DeleteHosts", + Handler: _ClusterService_DeleteHosts_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/redis/v1alpha/cluster_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/redis/v1alpha/cluster_service.proto", fileDescriptor_cluster_service_77935d045fe8fb01) +} + +var fileDescriptor_cluster_service_77935d045fe8fb01 = []byte{ + // 2056 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7, + 0x15, 0xce, 0x48, 0xe2, 0xdf, 0xa3, 0xac, 0xa8, 0x13, 0xd9, 0x66, 0x58, 0xff, 0xc8, 0xdb, 0xc0, + 0x96, 0x69, 0x73, 0xf9, 0x27, 0xca, 0x96, 0x62, 0x27, 0x16, 0x65, 0xda, 0x56, 0x6b, 0x3b, 0xee, + 0x52, 0x69, 0x50, 0x1b, 0x06, 0xbb, 0xe4, 0x8e, 0x28, 0x42, 0xe4, 0x2e, 0xbb, 0xbb, 0x54, 0x2d, + 0x15, 0x29, 0x52, 0x17, 0xe8, 0xc1, 0xd7, 0x02, 0x2d, 0xd2, 0x02, 0x05, 0x7a, 0xea, 0x39, 0x02, + 0xda, 0x02, 0x45, 0xdb, 0x43, 0x2f, 0x56, 0x7b, 0xe8, 0x41, 0x39, 0xf4, 0xd8, 0x4b, 0x0e, 0x3d, + 0xf5, 0x90, 0x63, 0x4f, 0xc5, 0xcc, 0xec, 0x92, 0xbb, 0x22, 0xa9, 0xdd, 0x95, 0xe2, 0x20, 0x87, + 0xde, 0xb8, 0x33, 0xf3, 0xde, 0x7c, 0xef, 0x9b, 0x37, 0xf3, 0x7e, 0x24, 0x98, 0xdf, 0x96, 0x55, + 0x85, 0x3c, 0xcb, 0xd4, 0x5b, 0x5a, 0x57, 0xc9, 0xb4, 0x95, 0x5a, 0x46, 0x27, 0x4a, 0xd3, 0xc8, + 0x6c, 0xe5, 0xe4, 0x56, 0x67, 0x43, 0xce, 0xd4, 0x5b, 0x5d, 0xc3, 0x24, 0x7a, 0xd5, 0x20, 0xfa, + 0x56, 0xb3, 0x4e, 0xc4, 0x8e, 0xae, 0x99, 0x1a, 0x3e, 0xc7, 0xa5, 0x44, 0x26, 0x25, 0xb6, 0x95, + 0x9a, 0xc8, 0xa4, 0x44, 0x4b, 0x2a, 0x79, 0xa6, 0xa1, 0x69, 0x8d, 0x16, 0xc9, 0xc8, 0x9d, 0x66, + 0x46, 0x56, 0x55, 0xcd, 0x94, 0xcd, 0xa6, 0xa6, 0x1a, 0x5c, 0x3a, 0x39, 0x6b, 0xcd, 0xb2, 0xaf, + 0x5a, 0x77, 0x3d, 0xb3, 0xde, 0x24, 0x2d, 0xa5, 0xda, 0x96, 0x8d, 0x4d, 0x6b, 0xc5, 0xf9, 0x83, + 0x2b, 0xcc, 0x66, 0x9b, 0x18, 0xa6, 0xdc, 0xee, 0x58, 0x0b, 0x92, 0x16, 0x6c, 0xba, 0x81, 0xd6, + 0x21, 0x3a, 0xd3, 0x6f, 0xcd, 0x5d, 0x74, 0x99, 0xd4, 0x9b, 0x1d, 0x58, 0x77, 0xd6, 0xb5, 0x6e, + 0x4b, 0x6e, 0x35, 0x15, 0xe7, 0xf4, 0x15, 0x0f, 0x66, 0x6a, 0x72, 0x7d, 0xb3, 0x6b, 0xe3, 0xb9, + 0xea, 0x8f, 0x46, 0x6b, 0xb5, 0x27, 0xe9, 0x9a, 0xba, 0xde, 0x6c, 0xf0, 0xc1, 0x62, 0x35, 0xcb, + 0xa5, 0x84, 0x5b, 0xf0, 0xb5, 0xbb, 0xc4, 0x5c, 0xe1, 0x9a, 0x24, 0xf2, 0xfd, 0x2e, 0x31, 0x4c, + 0x7c, 0x05, 0xc0, 0x3e, 0xa2, 0xa6, 0x92, 0x40, 0xb3, 0x68, 0x2e, 0x56, 0x9a, 0xfc, 0xf7, 0xcb, + 0x1c, 0x7a, 0xb1, 0x97, 0x9b, 0xb8, 0x71, 0xb3, 0x98, 0x95, 0x62, 0xd6, 0xfc, 0xaa, 0x22, 0xfc, + 0x01, 0xc1, 0x1b, 0xf7, 0x9b, 0x86, 0xad, 0xc3, 0xb0, 0x95, 0x5c, 0x86, 0xd8, 0xba, 0xd6, 0x52, + 0x46, 0xeb, 0x88, 0xf2, 0xe9, 0x55, 0x05, 0x5f, 0x82, 0x58, 0x47, 0x6e, 0x90, 0xaa, 0xd1, 0xdc, + 0x21, 0x89, 0xb1, 0x59, 0x34, 0x37, 0x5e, 0x82, 0xff, 0xbe, 0xcc, 0x85, 0xb3, 0xe9, 0x5c, 0x36, + 0x9b, 0x95, 0xa2, 0x74, 0xb2, 0xd2, 0xdc, 0x21, 0x78, 0x0e, 0x80, 0x2d, 0x34, 0xb5, 0x4d, 0xa2, + 0x26, 0xc6, 0x99, 0xd2, 0xd8, 0x8b, 0xbd, 0x5c, 0xe8, 0xc6, 0xcd, 0x5c, 0x36, 0x2b, 0x31, 0x2d, + 0x6b, 0x74, 0x0e, 0x0b, 0x10, 0x5e, 0x6f, 0xb6, 0x4c, 0xa2, 0x27, 0x26, 0xd8, 0x2a, 0x78, 0xb1, + 0x97, 0x0b, 0xb3, 0x55, 0x59, 0xc9, 0x9a, 0x11, 0x7e, 0x82, 0x60, 0xc6, 0x8d, 0xdc, 0xe8, 0x68, + 0xaa, 0x41, 0xf0, 0x0a, 0x44, 0x2d, 0xfb, 0x8c, 0x04, 0x9a, 0x1d, 0x9f, 0x8b, 0xe7, 0x2f, 0x89, + 0x87, 0x3b, 0xa7, 0x68, 0x33, 0xd8, 0x13, 0xc4, 0x17, 0xe1, 0x75, 0x95, 0x3c, 0x33, 0xab, 0x0e, + 0xc0, 0xd4, 0xb4, 0x98, 0x74, 0x82, 0x0e, 0x3f, 0xb2, 0x91, 0x0a, 0xbf, 0x0d, 0xc1, 0xcc, 0x8a, + 0x4e, 0x64, 0x93, 0x1c, 0x38, 0x85, 0x00, 0x04, 0xe6, 0x61, 0x42, 0x95, 0xdb, 0x9c, 0xbb, 0x58, + 0xe9, 0x1c, 0x5d, 0xf5, 0xf9, 0xcb, 0xdc, 0xd4, 0x13, 0x39, 0xbd, 0xb3, 0x9c, 0x7e, 0x9c, 0x4d, + 0x2f, 0x56, 0xd3, 0x4f, 0x53, 0x5c, 0x6e, 0xa1, 0x20, 0xb1, 0xb5, 0xf8, 0x0a, 0xc4, 0x15, 0x62, + 0xd4, 0xf5, 0x66, 0x87, 0xfa, 0xa7, 0x9b, 0xcc, 0x7c, 0x71, 0x41, 0x72, 0xce, 0xe2, 0x8f, 0x11, + 0x84, 0x5b, 0x72, 0x8d, 0xb4, 0x8c, 0xc4, 0x04, 0x23, 0xe4, 0x96, 0x27, 0x21, 0x43, 0x4c, 0x12, + 0xef, 0x33, 0x15, 0x65, 0xd5, 0xd4, 0xb7, 0x4b, 0xef, 0x7e, 0xfe, 0x32, 0x17, 0x7f, 0x92, 0xae, + 0x66, 0xd3, 0x8b, 0x72, 0x7a, 0xe7, 0x69, 0xea, 0x39, 0x87, 0x37, 0x6f, 0xc3, 0xdc, 0xdd, 0xcb, + 0x85, 0x93, 0xf6, 0x2f, 0x8c, 0xa7, 0xa9, 0x31, 0x4f, 0x1d, 0xeb, 0x25, 0x0b, 0x10, 0x7e, 0x02, + 0x71, 0xa2, 0x6e, 0x35, 0x75, 0x4d, 0x6d, 0x13, 0xd5, 0x4c, 0x84, 0x66, 0xd1, 0xdc, 0x54, 0xbe, + 0xe0, 0xf3, 0xc0, 0xc4, 0x72, 0x5f, 0xb4, 0x34, 0x41, 0x89, 0x93, 0x9c, 0xda, 0xf0, 0xb7, 0x21, + 0xce, 0x2f, 0x4e, 0xd5, 0xe8, 0x90, 0x7a, 0x22, 0x3c, 0x8b, 0xe6, 0xe2, 0xf9, 0x94, 0xa7, 0x72, + 0x26, 0x52, 0xe9, 0x90, 0xba, 0xa5, 0x13, 0xea, 0xbd, 0x11, 0xfc, 0x1e, 0xc0, 0x86, 0x66, 0x98, + 0x4c, 0xa1, 0x91, 0x88, 0x30, 0x3a, 0xe7, 0xbc, 0x34, 0xde, 0xd3, 0x0c, 0x93, 0xe9, 0x0b, 0x3f, + 0xdf, 0xcb, 0x8d, 0xbd, 0x93, 0x95, 0x62, 0x1b, 0xd6, 0x88, 0x41, 0xaf, 0xab, 0x4a, 0xcc, 0x1f, + 0x68, 0xfa, 0x26, 0xf5, 0x14, 0x18, 0x76, 0x5d, 0xad, 0xf9, 0x55, 0x25, 0xb9, 0x08, 0x71, 0xc7, + 0x29, 0xe0, 0x69, 0x18, 0xdf, 0x24, 0xdb, 0xdc, 0xbd, 0x24, 0xfa, 0x13, 0xcf, 0x40, 0x68, 0x4b, + 0x6e, 0x75, 0x2d, 0x67, 0x92, 0xf8, 0xc7, 0xd2, 0xd8, 0x75, 0x24, 0x2c, 0xc0, 0x49, 0xd7, 0xa9, + 0x3e, 0x20, 0xa6, 0xac, 0xc8, 0xa6, 0x8c, 0xcf, 0x0e, 0xbe, 0x17, 0xce, 0x17, 0xe2, 0xc7, 0x13, + 0x30, 0xf3, 0x7e, 0x47, 0x19, 0xf4, 0xf0, 0x20, 0xef, 0x0c, 0x7e, 0x1b, 0xe2, 0x5d, 0xa6, 0x84, + 0xbd, 0xe9, 0x0c, 0x5d, 0x3c, 0x9f, 0x14, 0xf9, 0xa3, 0x2e, 0xda, 0x8f, 0xba, 0x78, 0x87, 0x3e, + 0xfb, 0x0f, 0x64, 0x63, 0x53, 0x02, 0xbe, 0x9c, 0xfe, 0x7e, 0xd5, 0xce, 0x3e, 0xcc, 0xba, 0x57, + 0xe3, 0xec, 0xdf, 0x72, 0xfb, 0x63, 0x28, 0xa8, 0x3f, 0xba, 0x3c, 0x51, 0xb4, 0x9e, 0x8d, 0x30, + 0xa3, 0x23, 0xe9, 0xf5, 0x64, 0x1c, 0xd3, 0x77, 0x5c, 0x24, 0xf9, 0xf5, 0x9d, 0x15, 0x98, 0xb9, + 0x4d, 0x5a, 0xe4, 0x58, 0xae, 0x43, 0x37, 0x77, 0x29, 0xf1, 0xbb, 0x79, 0x09, 0xde, 0xa8, 0x98, + 0xb2, 0x7e, 0xac, 0xf0, 0x58, 0x84, 0x19, 0xa7, 0x0e, 0xbf, 0x5b, 0x2f, 0x03, 0xae, 0x98, 0x5a, + 0xe7, 0x38, 0x3b, 0xcf, 0x53, 0xf4, 0x3d, 0x15, 0x01, 0x08, 0x2f, 0xb1, 0x24, 0xe4, 0x98, 0x84, + 0xbb, 0x94, 0xf8, 0xdd, 0xfc, 0x37, 0x21, 0x38, 0x29, 0x11, 0xc3, 0xd4, 0xf4, 0x83, 0xe7, 0x7d, + 0x01, 0x62, 0x3c, 0x37, 0xea, 0xef, 0xce, 0x5f, 0xd6, 0x28, 0x1f, 0xfe, 0x32, 0x82, 0xe0, 0x2f, + 0x0f, 0xbe, 0x0b, 0xcb, 0x5e, 0xf7, 0x6e, 0xa8, 0x2d, 0xff, 0x8f, 0x82, 0x5f, 0x42, 0x14, 0x8c, + 0xbe, 0xb2, 0x28, 0xb8, 0x06, 0xa7, 0xdc, 0xc7, 0xea, 0xd3, 0xb9, 0xf1, 0xd7, 0x9d, 0x2e, 0xcc, + 0xd5, 0xf6, 0x9c, 0x57, 0xf8, 0x14, 0x41, 0xec, 0xbe, 0xd6, 0x90, 0x48, 0x5d, 0xd3, 0x15, 0x7c, + 0x1d, 0x62, 0xbd, 0xe2, 0x84, 0x29, 0x1a, 0x16, 0xe9, 0xd6, 0xec, 0x15, 0x52, 0x7f, 0x31, 0x7e, + 0x04, 0x91, 0x36, 0x31, 0x0c, 0xb9, 0x41, 0x91, 0x53, 0x4e, 0x17, 0xbc, 0x38, 0xed, 0xed, 0x2a, + 0x3e, 0xe0, 0x82, 0x8c, 0x12, 0xc9, 0x56, 0x93, 0x5c, 0x82, 0x49, 0xe7, 0x44, 0x20, 0xae, 0xfe, + 0x3e, 0x0e, 0xa7, 0x1c, 0x19, 0xf6, 0x7d, 0xad, 0x61, 0x1c, 0x29, 0xf6, 0x7f, 0x03, 0x4e, 0xd4, + 0xb5, 0x56, 0xb7, 0xad, 0x56, 0xad, 0xa4, 0x9e, 0xda, 0x16, 0x93, 0x26, 0xf9, 0xe0, 0x1d, 0x36, + 0x86, 0x09, 0x4c, 0x5a, 0x05, 0x65, 0xd5, 0xdc, 0xee, 0x10, 0x76, 0x99, 0xa7, 0xf2, 0x25, 0x4f, + 0xfb, 0x87, 0xe2, 0x13, 0x2b, 0x5c, 0xd5, 0xda, 0x76, 0x87, 0x48, 0x71, 0xa3, 0xff, 0x81, 0xaf, + 0x41, 0x6c, 0x5d, 0xd7, 0xda, 0x55, 0xca, 0x39, 0x2b, 0x2e, 0x0e, 0x3f, 0x9b, 0x28, 0x5d, 0x4c, + 0x3f, 0x71, 0x01, 0x22, 0xa6, 0xc6, 0xc5, 0x42, 0x9e, 0x62, 0x61, 0x53, 0x63, 0x42, 0xae, 0xd2, + 0x28, 0xec, 0xbb, 0x34, 0x8a, 0x8c, 0x2e, 0x8d, 0x84, 0x05, 0x88, 0x3b, 0x8c, 0xc3, 0x67, 0x20, + 0x51, 0x29, 0x4b, 0xdf, 0x59, 0x5d, 0x29, 0x57, 0xd7, 0xbe, 0xfb, 0xa8, 0x5c, 0x7d, 0xff, 0x61, + 0xe5, 0x51, 0x79, 0x65, 0xf5, 0xce, 0x6a, 0xf9, 0xf6, 0xf4, 0x6b, 0x38, 0x06, 0x21, 0xa9, 0x7c, + 0x7b, 0xb5, 0x32, 0x8d, 0x84, 0x8f, 0x10, 0x9c, 0x1e, 0x20, 0xcb, 0xaa, 0x98, 0x6e, 0xc2, 0x44, + 0x4b, 0x6b, 0xd8, 0xd5, 0xd2, 0x65, 0xdf, 0x3e, 0x27, 0x31, 0x31, 0xdf, 0xb5, 0xd2, 0xaf, 0x11, + 0x9c, 0x71, 0x40, 0x78, 0xcf, 0x2e, 0xbe, 0x8f, 0xe6, 0x55, 0x5f, 0x7c, 0xd9, 0x29, 0xbc, 0x40, + 0x70, 0x76, 0x04, 0x40, 0x8b, 0xa9, 0x65, 0x80, 0x5e, 0xcf, 0xc0, 0xe6, 0xeb, 0x82, 0x9b, 0xaf, + 0x7e, 0x4f, 0xa1, 0x27, 0x2f, 0x39, 0x84, 0x7c, 0xb3, 0xf5, 0x2b, 0x04, 0x6f, 0x3a, 0xc0, 0xf0, + 0x88, 0xfc, 0x95, 0xa1, 0xea, 0xa7, 0x08, 0x92, 0xc3, 0xd0, 0x59, 0x3c, 0xdd, 0x82, 0x08, 0x7f, + 0x1c, 0x6d, 0x92, 0x2e, 0x7a, 0x39, 0x15, 0xd7, 0x20, 0xd9, 0x62, 0xbe, 0x69, 0xfa, 0xd8, 0xed, + 0xd7, 0x34, 0xc6, 0x7c, 0x65, 0x48, 0xfa, 0x11, 0x24, 0x06, 0xa1, 0x59, 0x0c, 0x2d, 0x41, 0x88, + 0x46, 0x3f, 0x9b, 0x9f, 0xb7, 0xfc, 0x04, 0x4f, 0x89, 0x8b, 0xf8, 0xe6, 0xe6, 0xe7, 0x08, 0x4e, + 0x2d, 0x2b, 0xca, 0xb1, 0xa9, 0x71, 0x47, 0xfb, 0xb1, 0x63, 0x47, 0x7b, 0xe1, 0x03, 0x38, 0x7d, + 0x00, 0x97, 0xdf, 0x30, 0x7c, 0xd6, 0x82, 0x42, 0xf3, 0x3f, 0xc3, 0x0a, 0x24, 0x4c, 0xf1, 0x43, + 0x3a, 0x20, 0x3c, 0x83, 0x37, 0x5d, 0xb5, 0xc2, 0xd1, 0x6d, 0x16, 0x07, 0x37, 0x2a, 0xbd, 0xce, + 0x2d, 0xb1, 0x13, 0xcc, 0x82, 0x73, 0xe7, 0xc7, 0x90, 0x1c, 0xdc, 0xf9, 0x0b, 0xb2, 0xea, 0x16, + 0x44, 0x6d, 0x36, 0xf1, 0x69, 0x88, 0xec, 0x68, 0x2a, 0xe9, 0xab, 0x09, 0xd3, 0x4f, 0x9e, 0xa0, + 0x18, 0xdd, 0x9a, 0x4a, 0x4c, 0x47, 0x82, 0xc2, 0x07, 0x56, 0x15, 0xe1, 0x5f, 0x08, 0xa0, 0x9f, + 0xd0, 0xe1, 0x04, 0x44, 0xb6, 0x88, 0x6e, 0xd0, 0xa4, 0x99, 0x2b, 0xb1, 0x3f, 0xf1, 0xf7, 0x60, + 0x9a, 0x1d, 0x63, 0xd5, 0xca, 0x18, 0x8b, 0xd5, 0xac, 0x55, 0xac, 0x17, 0xbd, 0x0e, 0x9c, 0x4b, + 0x88, 0x12, 0x1d, 0xe4, 0x7b, 0x15, 0xab, 0xd9, 0x7b, 0xaf, 0x49, 0x27, 0x74, 0xc7, 0x48, 0x16, + 0xdf, 0x85, 0x98, 0x4e, 0x0c, 0xad, 0xab, 0xd7, 0x89, 0xc1, 0x6e, 0x8f, 0x8f, 0x88, 0x23, 0xd9, + 0x02, 0x52, 0x5f, 0xb6, 0x34, 0x09, 0xc0, 0xa1, 0x52, 0xb7, 0xcc, 0xff, 0xf3, 0x14, 0x4c, 0x59, + 0xd4, 0x5b, 0xf1, 0x11, 0xff, 0x02, 0xc1, 0xf8, 0x5d, 0x62, 0xe2, 0x9c, 0x97, 0xfa, 0x81, 0x1e, + 0x6a, 0xd2, 0x6f, 0xc7, 0x50, 0x98, 0x7f, 0xfe, 0xe9, 0x67, 0x3f, 0x1b, 0x13, 0xf1, 0xd5, 0x4c, + 0x5b, 0x56, 0xe5, 0x06, 0x51, 0xd2, 0x43, 0xbb, 0xbc, 0x46, 0xe6, 0x87, 0xfd, 0xf3, 0xff, 0x90, + 0xd6, 0x22, 0x13, 0xf4, 0x65, 0xc0, 0x85, 0x00, 0xf9, 0x8d, 0xed, 0xc7, 0xc9, 0xf9, 0x60, 0x42, + 0xfc, 0xc1, 0x11, 0x2e, 0x31, 0xa4, 0x17, 0xf0, 0x79, 0x0f, 0xa4, 0xf8, 0x13, 0x04, 0x61, 0xde, + 0x29, 0xc2, 0xf3, 0x47, 0xe9, 0x13, 0x26, 0xbd, 0x03, 0xa2, 0xf0, 0x70, 0x77, 0x3f, 0x35, 0x3b, + 0xaa, 0x21, 0x15, 0xb1, 0x06, 0x18, 0xe0, 0xb7, 0x04, 0x2f, 0xc0, 0x4b, 0x28, 0x85, 0xff, 0x82, + 0x20, 0xcc, 0x3b, 0x14, 0xde, 0x98, 0x87, 0xb5, 0x7b, 0xfc, 0x60, 0x7e, 0xc2, 0x31, 0x0f, 0x6f, + 0x84, 0xb8, 0x30, 0xe7, 0xf2, 0x81, 0xdc, 0x81, 0x1a, 0xf0, 0x37, 0x04, 0x61, 0xfe, 0x7e, 0x78, + 0x1b, 0x30, 0xac, 0xa5, 0xe2, 0xc7, 0x80, 0xf5, 0xdd, 0xfd, 0x94, 0x38, 0xaa, 0x99, 0x72, 0xf2, + 0x60, 0x36, 0x5b, 0x6e, 0x77, 0xcc, 0x6d, 0xee, 0xdd, 0xa9, 0x60, 0xde, 0xfd, 0x3b, 0x04, 0x21, + 0xd6, 0x35, 0xf1, 0x76, 0xef, 0x21, 0x0d, 0x1a, 0x3f, 0x96, 0xac, 0xed, 0xee, 0xa7, 0xce, 0x8f, + 0x68, 0xcd, 0xb8, 0x4e, 0xe2, 0x8a, 0x70, 0xd9, 0xf5, 0xc7, 0x94, 0x11, 0x07, 0x60, 0x30, 0xb4, + 0x9f, 0x20, 0x98, 0xa8, 0x98, 0x5a, 0x07, 0xe7, 0xbd, 0x61, 0x1f, 0x6c, 0xee, 0xf8, 0x41, 0x2d, + 0xed, 0xee, 0xa7, 0xce, 0x0d, 0x6f, 0xeb, 0xb8, 0x40, 0xa7, 0x84, 0x39, 0x7f, 0xa0, 0xb5, 0x0e, + 0xfe, 0x2b, 0x82, 0x30, 0x4f, 0x9d, 0xbc, 0xfd, 0x66, 0x58, 0x67, 0xc8, 0x0f, 0xee, 0x2a, 0x77, + 0xfc, 0xe1, 0x3d, 0x21, 0x17, 0xf2, 0xa2, 0x50, 0x08, 0xe4, 0xf8, 0x3c, 0xd9, 0xc3, 0x7f, 0x42, + 0x10, 0xb1, 0xaa, 0x72, 0x5c, 0x3c, 0x52, 0x57, 0xc6, 0x8f, 0x19, 0x1f, 0xec, 0xee, 0xa7, 0x2e, + 0x8c, 0x2c, 0xff, 0x5d, 0x76, 0x5c, 0x15, 0x2e, 0x79, 0x3d, 0x3a, 0x3a, 0x57, 0x43, 0xef, 0xee, + 0xef, 0x11, 0x44, 0xe9, 0x93, 0x4b, 0x6b, 0x2a, 0xbc, 0x70, 0xb4, 0x8a, 0x35, 0x79, 0x2d, 0xb0, + 0x9c, 0xf5, 0xae, 0x2f, 0x32, 0xc4, 0x05, 0x9c, 0x0b, 0xc4, 0x3c, 0x2b, 0xdc, 0xfe, 0x81, 0x60, + 0x8a, 0xaa, 0xed, 0x17, 0x3a, 0xf8, 0x46, 0x00, 0x18, 0x03, 0x05, 0x5c, 0xf2, 0xe6, 0x11, 0xa5, + 0x2d, 0x53, 0xde, 0x65, 0xa6, 0x2c, 0xe2, 0x6b, 0x41, 0x4c, 0xc9, 0x38, 0x6a, 0xab, 0x3f, 0x23, + 0x88, 0xd3, 0x2d, 0xac, 0x72, 0x04, 0x2f, 0x06, 0xc0, 0xe3, 0x2e, 0xb0, 0x92, 0x4b, 0x47, 0x11, + 0xb5, 0xec, 0xb8, 0xc6, 0xa3, 0x00, 0xce, 0x0c, 0xd8, 0x31, 0xc2, 0x04, 0xbb, 0xe8, 0xf9, 0x23, + 0x82, 0x18, 0xd5, 0xcb, 0x92, 0x47, 0x1c, 0xc4, 0x25, 0x9c, 0x89, 0x6e, 0xf2, 0x7a, 0x70, 0x41, + 0x0b, 0xf9, 0x12, 0x43, 0x3e, 0x8f, 0xf3, 0x81, 0x4e, 0x80, 0x57, 0x25, 0x9f, 0x21, 0x88, 0x2e, + 0x2b, 0x0a, 0xc7, 0xee, 0x79, 0x0d, 0x86, 0xd7, 0x25, 0x7e, 0xee, 0xf1, 0x87, 0xbb, 0xfb, 0xa9, + 0xec, 0xe8, 0x02, 0xe2, 0x90, 0x40, 0xb6, 0x22, 0xbc, 0x13, 0xdc, 0xae, 0xa5, 0x9a, 0x6c, 0xd6, + 0x37, 0x78, 0xa6, 0x42, 0x6f, 0xfb, 0x7f, 0x10, 0xc4, 0x79, 0x08, 0xe5, 0x96, 0x2e, 0x06, 0x0a, + 0xd7, 0x41, 0x8d, 0xfd, 0x08, 0xed, 0xee, 0xa7, 0x0a, 0x87, 0xd6, 0x16, 0xaf, 0xc6, 0x60, 0xbe, + 0xe1, 0x12, 0x4a, 0x95, 0xbe, 0xf9, 0xf8, 0x5e, 0xa3, 0x69, 0x6e, 0x74, 0x6b, 0x62, 0x5d, 0x6b, + 0x67, 0x38, 0xe2, 0x34, 0xff, 0x3f, 0x85, 0x86, 0x96, 0x6e, 0x10, 0x95, 0x6d, 0x9c, 0x39, 0xfc, + 0x1f, 0x18, 0xde, 0x66, 0x5f, 0xb5, 0x30, 0x5b, 0x5b, 0xf8, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x6a, 0x38, 0x7b, 0xbd, 0x64, 0x22, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/config/redis5_0.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/config/redis5_0.pb.go new file mode 100644 index 000000000..b139f51aa --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/config/redis5_0.pb.go @@ -0,0 +1,240 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/redis/v1alpha/config/redis5_0.proto + +package redis // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/config" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type RedisConfig5_0_MaxmemoryPolicy int32 + +const ( + RedisConfig5_0_MAXMEMORY_POLICY_UNSPECIFIED RedisConfig5_0_MaxmemoryPolicy = 0 + // Try to remove less recently used (LRU) keys with `expire set`. + RedisConfig5_0_VOLATILE_LRU RedisConfig5_0_MaxmemoryPolicy = 1 + // Remove less recently used (LRU) keys. + RedisConfig5_0_ALLKEYS_LRU RedisConfig5_0_MaxmemoryPolicy = 2 + // Try to remove least frequently used (LFU) keys with `expire set`. + RedisConfig5_0_VOLATILE_LFU RedisConfig5_0_MaxmemoryPolicy = 3 + // Remove least frequently used (LFU) keys. + RedisConfig5_0_ALLKEYS_LFU RedisConfig5_0_MaxmemoryPolicy = 4 + // Try to remove keys with `expire set` randomly. + RedisConfig5_0_VOLATILE_RANDOM RedisConfig5_0_MaxmemoryPolicy = 5 + // Remove keys randomly. + RedisConfig5_0_ALLKEYS_RANDOM RedisConfig5_0_MaxmemoryPolicy = 6 + // Try to remove less recently used (LRU) keys with `expire set` + // and shorter TTL first. + RedisConfig5_0_VOLATILE_TTL RedisConfig5_0_MaxmemoryPolicy = 7 + // Return errors when memory limit was reached and commands could require + // more memory to be used. + RedisConfig5_0_NOEVICTION RedisConfig5_0_MaxmemoryPolicy = 8 +) + +var RedisConfig5_0_MaxmemoryPolicy_name = map[int32]string{ + 0: "MAXMEMORY_POLICY_UNSPECIFIED", + 1: "VOLATILE_LRU", + 2: "ALLKEYS_LRU", + 3: "VOLATILE_LFU", + 4: "ALLKEYS_LFU", + 5: "VOLATILE_RANDOM", + 6: "ALLKEYS_RANDOM", + 7: "VOLATILE_TTL", + 8: "NOEVICTION", +} +var RedisConfig5_0_MaxmemoryPolicy_value = map[string]int32{ + "MAXMEMORY_POLICY_UNSPECIFIED": 0, + "VOLATILE_LRU": 1, + "ALLKEYS_LRU": 2, + "VOLATILE_LFU": 3, + "ALLKEYS_LFU": 4, + "VOLATILE_RANDOM": 5, + "ALLKEYS_RANDOM": 6, + "VOLATILE_TTL": 7, + "NOEVICTION": 8, +} + +func (x RedisConfig5_0_MaxmemoryPolicy) String() string { + return proto.EnumName(RedisConfig5_0_MaxmemoryPolicy_name, int32(x)) +} +func (RedisConfig5_0_MaxmemoryPolicy) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_redis5_0_e003f487f83590f3, []int{0, 0} +} + +// Fields and structure of `RedisConfig` reflects Redis configuration file +// parameters. +type RedisConfig5_0 struct { + // Redis key eviction policy for a dataset that reaches maximum memory, + // available to the host. Redis maxmemory setting depends on Managed + // Service for Redis [host class](/docs/managed-redis/concepts/instance-types). + // + // All policies are described in detail in [Redis documentation](https://redis.io/topics/lru-cache). + MaxmemoryPolicy RedisConfig5_0_MaxmemoryPolicy `protobuf:"varint,1,opt,name=maxmemory_policy,json=maxmemoryPolicy,proto3,enum=yandex.cloud.mdb.redis.v1alpha.config.RedisConfig5_0_MaxmemoryPolicy" json:"maxmemory_policy,omitempty"` + // Time that Redis keeps the connection open while the client is idle. + // If no new command is sent during that time, the connection is closed. + Timeout *wrappers.Int64Value `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` + // Authentication password. + Password string `protobuf:"bytes,3,opt,name=password,proto3" json:"password,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RedisConfig5_0) Reset() { *m = RedisConfig5_0{} } +func (m *RedisConfig5_0) String() string { return proto.CompactTextString(m) } +func (*RedisConfig5_0) ProtoMessage() {} +func (*RedisConfig5_0) Descriptor() ([]byte, []int) { + return fileDescriptor_redis5_0_e003f487f83590f3, []int{0} +} +func (m *RedisConfig5_0) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RedisConfig5_0.Unmarshal(m, b) +} +func (m *RedisConfig5_0) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RedisConfig5_0.Marshal(b, m, deterministic) +} +func (dst *RedisConfig5_0) XXX_Merge(src proto.Message) { + xxx_messageInfo_RedisConfig5_0.Merge(dst, src) +} +func (m *RedisConfig5_0) XXX_Size() int { + return xxx_messageInfo_RedisConfig5_0.Size(m) +} +func (m *RedisConfig5_0) XXX_DiscardUnknown() { + xxx_messageInfo_RedisConfig5_0.DiscardUnknown(m) +} + +var xxx_messageInfo_RedisConfig5_0 proto.InternalMessageInfo + +func (m *RedisConfig5_0) GetMaxmemoryPolicy() RedisConfig5_0_MaxmemoryPolicy { + if m != nil { + return m.MaxmemoryPolicy + } + return RedisConfig5_0_MAXMEMORY_POLICY_UNSPECIFIED +} + +func (m *RedisConfig5_0) GetTimeout() *wrappers.Int64Value { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *RedisConfig5_0) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type RedisConfigSet5_0 struct { + // Effective settings for a Redis 5.0 cluster (a combination of settings + // defined in [user_config] and [default_config]). + EffectiveConfig *RedisConfig5_0 `protobuf:"bytes,1,opt,name=effective_config,json=effectiveConfig,proto3" json:"effective_config,omitempty"` + // User-defined settings for a Redis 5.0 cluster. + UserConfig *RedisConfig5_0 `protobuf:"bytes,2,opt,name=user_config,json=userConfig,proto3" json:"user_config,omitempty"` + // Default configuration for a Redis 5.0 cluster. + DefaultConfig *RedisConfig5_0 `protobuf:"bytes,3,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RedisConfigSet5_0) Reset() { *m = RedisConfigSet5_0{} } +func (m *RedisConfigSet5_0) String() string { return proto.CompactTextString(m) } +func (*RedisConfigSet5_0) ProtoMessage() {} +func (*RedisConfigSet5_0) Descriptor() ([]byte, []int) { + return fileDescriptor_redis5_0_e003f487f83590f3, []int{1} +} +func (m *RedisConfigSet5_0) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RedisConfigSet5_0.Unmarshal(m, b) +} +func (m *RedisConfigSet5_0) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RedisConfigSet5_0.Marshal(b, m, deterministic) +} +func (dst *RedisConfigSet5_0) XXX_Merge(src proto.Message) { + xxx_messageInfo_RedisConfigSet5_0.Merge(dst, src) +} +func (m *RedisConfigSet5_0) XXX_Size() int { + return xxx_messageInfo_RedisConfigSet5_0.Size(m) +} +func (m *RedisConfigSet5_0) XXX_DiscardUnknown() { + xxx_messageInfo_RedisConfigSet5_0.DiscardUnknown(m) +} + +var xxx_messageInfo_RedisConfigSet5_0 proto.InternalMessageInfo + +func (m *RedisConfigSet5_0) GetEffectiveConfig() *RedisConfig5_0 { + if m != nil { + return m.EffectiveConfig + } + return nil +} + +func (m *RedisConfigSet5_0) GetUserConfig() *RedisConfig5_0 { + if m != nil { + return m.UserConfig + } + return nil +} + +func (m *RedisConfigSet5_0) GetDefaultConfig() *RedisConfig5_0 { + if m != nil { + return m.DefaultConfig + } + return nil +} + +func init() { + proto.RegisterType((*RedisConfig5_0)(nil), "yandex.cloud.mdb.redis.v1alpha.config.RedisConfig5_0") + proto.RegisterType((*RedisConfigSet5_0)(nil), "yandex.cloud.mdb.redis.v1alpha.config.RedisConfigSet5_0") + proto.RegisterEnum("yandex.cloud.mdb.redis.v1alpha.config.RedisConfig5_0_MaxmemoryPolicy", RedisConfig5_0_MaxmemoryPolicy_name, RedisConfig5_0_MaxmemoryPolicy_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/redis/v1alpha/config/redis5_0.proto", fileDescriptor_redis5_0_e003f487f83590f3) +} + +var fileDescriptor_redis5_0_e003f487f83590f3 = []byte{ + // 469 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcb, 0x6e, 0xd3, 0x40, + 0x14, 0x86, 0x71, 0x02, 0x6d, 0x39, 0x01, 0x7b, 0x18, 0x36, 0x55, 0x41, 0x28, 0x8a, 0x84, 0x94, + 0x4d, 0xc7, 0x25, 0x34, 0x6c, 0x58, 0x85, 0xd4, 0x91, 0x2c, 0xec, 0x38, 0x38, 0x17, 0x11, 0x84, + 0x64, 0x7c, 0x19, 0xbb, 0x96, 0xec, 0x8c, 0xe5, 0x4b, 0xdb, 0xbc, 0x0b, 0x6f, 0xc0, 0x6b, 0xf0, + 0x60, 0x28, 0x33, 0x71, 0x84, 0x59, 0x55, 0x74, 0x39, 0xbf, 0xfe, 0xff, 0x3b, 0x47, 0xe7, 0xcc, + 0x81, 0xcb, 0xad, 0xbb, 0x09, 0xe8, 0x9d, 0xea, 0x27, 0xac, 0x0a, 0xd4, 0x34, 0xf0, 0xd4, 0x9c, + 0x06, 0x71, 0xa1, 0xde, 0xbc, 0x73, 0x93, 0xec, 0xda, 0x55, 0x7d, 0xb6, 0x09, 0xe3, 0x48, 0x88, + 0x43, 0xe7, 0x82, 0x64, 0x39, 0x2b, 0x19, 0x7e, 0x2b, 0x52, 0x84, 0xa7, 0x48, 0x1a, 0x78, 0x84, + 0x1b, 0xc8, 0x3e, 0x45, 0x44, 0xea, 0xec, 0x4d, 0xc4, 0x58, 0x94, 0x50, 0x95, 0x87, 0xbc, 0x2a, + 0x54, 0x6f, 0x73, 0x37, 0xcb, 0x68, 0x5e, 0x08, 0x4c, 0xef, 0x67, 0x1b, 0x64, 0x7b, 0x17, 0x1c, + 0x73, 0xff, 0xd0, 0xb9, 0xc0, 0x19, 0xa0, 0xd4, 0xbd, 0x4b, 0x69, 0xca, 0xf2, 0xad, 0x93, 0xb1, + 0x24, 0xf6, 0xb7, 0xa7, 0x52, 0x57, 0xea, 0xcb, 0x03, 0x8d, 0xdc, 0xab, 0x28, 0x69, 0x02, 0x89, + 0x59, 0xd3, 0x66, 0x1c, 0x66, 0x2b, 0x69, 0x53, 0xc0, 0x43, 0x38, 0x2e, 0xe3, 0x94, 0xb2, 0xaa, + 0x3c, 0x6d, 0x75, 0xa5, 0x7e, 0x67, 0xf0, 0x8a, 0x88, 0xb6, 0x49, 0xdd, 0x36, 0xd1, 0x37, 0xe5, + 0x87, 0xcb, 0x95, 0x9b, 0x54, 0xd4, 0xae, 0xbd, 0xf8, 0x0c, 0x4e, 0x32, 0xb7, 0x28, 0x6e, 0x59, + 0x1e, 0x9c, 0xb6, 0xbb, 0x52, 0xff, 0xa9, 0x7d, 0x78, 0xf7, 0x7e, 0x4b, 0xa0, 0xfc, 0x53, 0x17, + 0x77, 0xe1, 0xb5, 0x39, 0xfa, 0x6a, 0x6a, 0xa6, 0x65, 0xaf, 0x9d, 0x99, 0x65, 0xe8, 0xe3, 0xb5, + 0xb3, 0x9c, 0xce, 0x67, 0xda, 0x58, 0x9f, 0xe8, 0xda, 0x15, 0x7a, 0x84, 0x11, 0x3c, 0x5b, 0x59, + 0xc6, 0x68, 0xa1, 0x1b, 0x9a, 0x63, 0xd8, 0x4b, 0x24, 0x61, 0x05, 0x3a, 0x23, 0xc3, 0xf8, 0xac, + 0xad, 0xe7, 0x5c, 0x68, 0x35, 0x2d, 0x93, 0x25, 0x6a, 0x37, 0x2c, 0x93, 0x25, 0x7a, 0x8c, 0x5f, + 0x82, 0x72, 0xb0, 0xd8, 0xa3, 0xe9, 0x95, 0x65, 0xa2, 0x27, 0x18, 0x83, 0x5c, 0xbb, 0xf6, 0xda, + 0x51, 0x83, 0xb5, 0x58, 0x18, 0xe8, 0x18, 0xcb, 0x00, 0x53, 0x4b, 0x5b, 0xe9, 0xe3, 0x85, 0x6e, + 0x4d, 0xd1, 0x49, 0xef, 0x57, 0x0b, 0x5e, 0xfc, 0x35, 0xcd, 0x39, 0x2d, 0x77, 0x1b, 0xfa, 0x01, + 0x88, 0x86, 0x21, 0xf5, 0xcb, 0xf8, 0x86, 0x3a, 0x62, 0xe6, 0x7c, 0x43, 0x9d, 0xc1, 0xf0, 0xbf, + 0x36, 0x64, 0x2b, 0x07, 0x9c, 0xd0, 0xf0, 0x0a, 0x3a, 0x55, 0x41, 0xf3, 0x1a, 0xde, 0x7a, 0x08, + 0x1c, 0x76, 0xa4, 0x3d, 0xf7, 0x3b, 0xc8, 0x01, 0x0d, 0xdd, 0x2a, 0x29, 0x6b, 0x74, 0xfb, 0x21, + 0xe8, 0xe7, 0x7b, 0x98, 0x50, 0x3e, 0x7d, 0xf9, 0x66, 0x45, 0x71, 0x79, 0x5d, 0x79, 0xc4, 0x67, + 0xa9, 0x2a, 0x88, 0xe7, 0xe2, 0xac, 0x22, 0x76, 0x1e, 0xd1, 0x0d, 0xff, 0x4e, 0xea, 0xbd, 0xee, + 0xed, 0x23, 0x17, 0xbd, 0x23, 0x1e, 0x79, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x43, 0x88, + 0xaf, 0xa5, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/resource_preset.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/resource_preset.pb.go new file mode 100644 index 000000000..e26057cc3 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/resource_preset.pb.go @@ -0,0 +1,112 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/redis/v1alpha/resource_preset.proto + +package redis // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A resource preset that describes hardware configuration for a host. +type ResourcePreset struct { + // ID of the resource preset. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // IDs of availability zones where the resource preset is available. + ZoneIds []string `protobuf:"bytes,2,rep,name=zone_ids,json=zoneIds,proto3" json:"zone_ids,omitempty"` + // Number of CPU cores for a Redis host created with the preset. + Cores int64 `protobuf:"varint,3,opt,name=cores,proto3" json:"cores,omitempty"` + // RAM volume for a Redis host created with the preset, in bytes. + Memory int64 `protobuf:"varint,4,opt,name=memory,proto3" json:"memory,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourcePreset) Reset() { *m = ResourcePreset{} } +func (m *ResourcePreset) String() string { return proto.CompactTextString(m) } +func (*ResourcePreset) ProtoMessage() {} +func (*ResourcePreset) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_a1f85fd0e8a82560, []int{0} +} +func (m *ResourcePreset) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourcePreset.Unmarshal(m, b) +} +func (m *ResourcePreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourcePreset.Marshal(b, m, deterministic) +} +func (dst *ResourcePreset) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePreset.Merge(dst, src) +} +func (m *ResourcePreset) XXX_Size() int { + return xxx_messageInfo_ResourcePreset.Size(m) +} +func (m *ResourcePreset) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePreset.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePreset proto.InternalMessageInfo + +func (m *ResourcePreset) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ResourcePreset) GetZoneIds() []string { + if m != nil { + return m.ZoneIds + } + return nil +} + +func (m *ResourcePreset) GetCores() int64 { + if m != nil { + return m.Cores + } + return 0 +} + +func (m *ResourcePreset) GetMemory() int64 { + if m != nil { + return m.Memory + } + return 0 +} + +func init() { + proto.RegisterType((*ResourcePreset)(nil), "yandex.cloud.mdb.redis.v1alpha.ResourcePreset") +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/redis/v1alpha/resource_preset.proto", fileDescriptor_resource_preset_a1f85fd0e8a82560) +} + +var fileDescriptor_resource_preset_a1f85fd0e8a82560 = []byte{ + // 213 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0xcf, 0xb1, 0x4b, 0xc4, 0x30, + 0x14, 0xc7, 0x71, 0xda, 0xea, 0xe9, 0x65, 0xb8, 0x21, 0x88, 0xc4, 0x45, 0x8a, 0x53, 0x97, 0x4b, + 0x10, 0xdd, 0xdc, 0x9c, 0xd4, 0x49, 0x32, 0xba, 0x1c, 0x4d, 0xde, 0xa3, 0x0d, 0x34, 0x7d, 0x25, + 0x69, 0xc5, 0xfa, 0xd7, 0x8b, 0x49, 0xe7, 0x1b, 0xbf, 0x21, 0x1f, 0x78, 0x3f, 0xf6, 0xbc, 0xb6, + 0x23, 0xe0, 0x8f, 0xb2, 0x03, 0x2d, 0xa0, 0x3c, 0x18, 0x15, 0x10, 0x5c, 0x54, 0xdf, 0x8f, 0xed, + 0x30, 0xf5, 0xad, 0x0a, 0x18, 0x69, 0x09, 0x16, 0x4f, 0x53, 0xc0, 0x88, 0xb3, 0x9c, 0x02, 0xcd, + 0xc4, 0xef, 0xb3, 0x92, 0x49, 0x49, 0x0f, 0x46, 0x26, 0x25, 0x37, 0xf5, 0xe0, 0xd8, 0x41, 0x6f, + 0xf0, 0x33, 0x39, 0x7e, 0x60, 0xa5, 0x03, 0x51, 0xd4, 0x45, 0xb3, 0xd7, 0xa5, 0x03, 0x7e, 0xc7, + 0xae, 0x7f, 0x69, 0xc4, 0x93, 0x83, 0x28, 0xca, 0xba, 0x6a, 0xf6, 0xfa, 0xea, 0xbf, 0xdf, 0x21, + 0xf2, 0x1b, 0x76, 0x69, 0x29, 0x60, 0x14, 0x55, 0x5d, 0x34, 0x95, 0xce, 0xc1, 0x6f, 0xd9, 0xce, + 0xa3, 0xa7, 0xb0, 0x8a, 0x8b, 0xf4, 0xbc, 0xd5, 0xeb, 0xc7, 0xd7, 0x5b, 0xe7, 0xe6, 0x7e, 0x31, + 0xd2, 0x92, 0x57, 0xf9, 0xae, 0x63, 0x5e, 0xd3, 0xd1, 0xb1, 0xc3, 0x31, 0x5d, 0xac, 0xce, 0xcf, + 0x7c, 0x49, 0x65, 0x76, 0xe9, 0xef, 0xd3, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x36, 0xff, 0xd3, + 0x72, 0x15, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/resource_preset_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/resource_preset_service.pb.go new file mode 100644 index 000000000..dd4e8ef82 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha/resource_preset_service.pb.go @@ -0,0 +1,321 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/mdb/redis/v1alpha/resource_preset_service.proto + +package redis // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetResourcePresetRequest struct { + // Required. ID of the resource preset to return. + // To get the resource preset ID, use a [ResourcePresetService.List] request. + ResourcePresetId string `protobuf:"bytes,1,opt,name=resource_preset_id,json=resourcePresetId,proto3" json:"resource_preset_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResourcePresetRequest) Reset() { *m = GetResourcePresetRequest{} } +func (m *GetResourcePresetRequest) String() string { return proto.CompactTextString(m) } +func (*GetResourcePresetRequest) ProtoMessage() {} +func (*GetResourcePresetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_service_7ef8dd7aa031ff08, []int{0} +} +func (m *GetResourcePresetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResourcePresetRequest.Unmarshal(m, b) +} +func (m *GetResourcePresetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResourcePresetRequest.Marshal(b, m, deterministic) +} +func (dst *GetResourcePresetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResourcePresetRequest.Merge(dst, src) +} +func (m *GetResourcePresetRequest) XXX_Size() int { + return xxx_messageInfo_GetResourcePresetRequest.Size(m) +} +func (m *GetResourcePresetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetResourcePresetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResourcePresetRequest proto.InternalMessageInfo + +func (m *GetResourcePresetRequest) GetResourcePresetId() string { + if m != nil { + return m.ResourcePresetId + } + return "" +} + +type ListResourcePresetsRequest struct { + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListResourcePresetsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] + // returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListResourcePresetsRequest) Reset() { *m = ListResourcePresetsRequest{} } +func (m *ListResourcePresetsRequest) String() string { return proto.CompactTextString(m) } +func (*ListResourcePresetsRequest) ProtoMessage() {} +func (*ListResourcePresetsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_service_7ef8dd7aa031ff08, []int{1} +} +func (m *ListResourcePresetsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListResourcePresetsRequest.Unmarshal(m, b) +} +func (m *ListResourcePresetsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListResourcePresetsRequest.Marshal(b, m, deterministic) +} +func (dst *ListResourcePresetsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListResourcePresetsRequest.Merge(dst, src) +} +func (m *ListResourcePresetsRequest) XXX_Size() int { + return xxx_messageInfo_ListResourcePresetsRequest.Size(m) +} +func (m *ListResourcePresetsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListResourcePresetsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListResourcePresetsRequest proto.InternalMessageInfo + +func (m *ListResourcePresetsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListResourcePresetsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListResourcePresetsResponse struct { + // List of resource presets. + ResourcePresets []*ResourcePreset `protobuf:"bytes,1,rep,name=resource_presets,json=resourcePresets,proto3" json:"resource_presets,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value + // for the [ListResourcePresetsRequest.page_token] parameter in the next list request. Each subsequent + // list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListResourcePresetsResponse) Reset() { *m = ListResourcePresetsResponse{} } +func (m *ListResourcePresetsResponse) String() string { return proto.CompactTextString(m) } +func (*ListResourcePresetsResponse) ProtoMessage() {} +func (*ListResourcePresetsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_preset_service_7ef8dd7aa031ff08, []int{2} +} +func (m *ListResourcePresetsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListResourcePresetsResponse.Unmarshal(m, b) +} +func (m *ListResourcePresetsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListResourcePresetsResponse.Marshal(b, m, deterministic) +} +func (dst *ListResourcePresetsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListResourcePresetsResponse.Merge(dst, src) +} +func (m *ListResourcePresetsResponse) XXX_Size() int { + return xxx_messageInfo_ListResourcePresetsResponse.Size(m) +} +func (m *ListResourcePresetsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListResourcePresetsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListResourcePresetsResponse proto.InternalMessageInfo + +func (m *ListResourcePresetsResponse) GetResourcePresets() []*ResourcePreset { + if m != nil { + return m.ResourcePresets + } + return nil +} + +func (m *ListResourcePresetsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetResourcePresetRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.GetResourcePresetRequest") + proto.RegisterType((*ListResourcePresetsRequest)(nil), "yandex.cloud.mdb.redis.v1alpha.ListResourcePresetsRequest") + proto.RegisterType((*ListResourcePresetsResponse)(nil), "yandex.cloud.mdb.redis.v1alpha.ListResourcePresetsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ResourcePresetServiceClient is the client API for ResourcePresetService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ResourcePresetServiceClient interface { + // Returns the specified resource preset. + // + // To get the list of available resource presets, make a [List] request. + Get(ctx context.Context, in *GetResourcePresetRequest, opts ...grpc.CallOption) (*ResourcePreset, error) + // Retrieves the list of available resource presets. + List(ctx context.Context, in *ListResourcePresetsRequest, opts ...grpc.CallOption) (*ListResourcePresetsResponse, error) +} + +type resourcePresetServiceClient struct { + cc *grpc.ClientConn +} + +func NewResourcePresetServiceClient(cc *grpc.ClientConn) ResourcePresetServiceClient { + return &resourcePresetServiceClient{cc} +} + +func (c *resourcePresetServiceClient) Get(ctx context.Context, in *GetResourcePresetRequest, opts ...grpc.CallOption) (*ResourcePreset, error) { + out := new(ResourcePreset) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ResourcePresetService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourcePresetServiceClient) List(ctx context.Context, in *ListResourcePresetsRequest, opts ...grpc.CallOption) (*ListResourcePresetsResponse, error) { + out := new(ListResourcePresetsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.mdb.redis.v1alpha.ResourcePresetService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ResourcePresetServiceServer is the server API for ResourcePresetService service. +type ResourcePresetServiceServer interface { + // Returns the specified resource preset. + // + // To get the list of available resource presets, make a [List] request. + Get(context.Context, *GetResourcePresetRequest) (*ResourcePreset, error) + // Retrieves the list of available resource presets. + List(context.Context, *ListResourcePresetsRequest) (*ListResourcePresetsResponse, error) +} + +func RegisterResourcePresetServiceServer(s *grpc.Server, srv ResourcePresetServiceServer) { + s.RegisterService(&_ResourcePresetService_serviceDesc, srv) +} + +func _ResourcePresetService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetResourcePresetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePresetServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ResourcePresetService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePresetServiceServer).Get(ctx, req.(*GetResourcePresetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourcePresetService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListResourcePresetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourcePresetServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.mdb.redis.v1alpha.ResourcePresetService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourcePresetServiceServer).List(ctx, req.(*ListResourcePresetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ResourcePresetService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.mdb.redis.v1alpha.ResourcePresetService", + HandlerType: (*ResourcePresetServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _ResourcePresetService_Get_Handler, + }, + { + MethodName: "List", + Handler: _ResourcePresetService_List_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/mdb/redis/v1alpha/resource_preset_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/mdb/redis/v1alpha/resource_preset_service.proto", fileDescriptor_resource_preset_service_7ef8dd7aa031ff08) +} + +var fileDescriptor_resource_preset_service_7ef8dd7aa031ff08 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xcd, 0xca, 0xd3, 0x40, + 0x14, 0x25, 0x8d, 0x88, 0xdf, 0x88, 0x7c, 0x1f, 0x03, 0x42, 0xc8, 0xa7, 0x52, 0xb2, 0x28, 0x59, + 0xd8, 0x19, 0xac, 0x2e, 0xc4, 0xea, 0x46, 0x17, 0xad, 0xe2, 0xa2, 0xa4, 0x2e, 0xd4, 0x4d, 0x98, + 0x64, 0x2e, 0xe9, 0x60, 0x33, 0x13, 0x33, 0x93, 0x52, 0x2b, 0x82, 0xf8, 0x0a, 0xbe, 0x80, 0x6f, + 0xe0, 0xc6, 0x37, 0xf1, 0x15, 0x7c, 0x10, 0xc9, 0xa4, 0x05, 0x1b, 0xfb, 0x63, 0x5d, 0xce, 0xbd, + 0x73, 0xee, 0x39, 0x67, 0xce, 0x5c, 0xf4, 0xf8, 0x03, 0x93, 0x1c, 0x96, 0x34, 0x9d, 0xab, 0x8a, + 0xd3, 0x9c, 0x27, 0xb4, 0x04, 0x2e, 0x34, 0x5d, 0xdc, 0x63, 0xf3, 0x62, 0xc6, 0x68, 0x09, 0x5a, + 0x55, 0x65, 0x0a, 0x71, 0x51, 0x82, 0x06, 0x13, 0x6b, 0x28, 0x17, 0x22, 0x05, 0x52, 0x94, 0xca, + 0x28, 0x7c, 0xa7, 0x41, 0x13, 0x8b, 0x26, 0x39, 0x4f, 0x88, 0x45, 0x93, 0x35, 0xda, 0xbf, 0x95, + 0x29, 0x95, 0xcd, 0x81, 0xb2, 0x42, 0x50, 0x26, 0xa5, 0x32, 0xcc, 0x08, 0x25, 0x75, 0x83, 0xf6, + 0x1f, 0x9c, 0xc6, 0xdd, 0xa0, 0x82, 0x31, 0xf2, 0x46, 0x60, 0xa2, 0x75, 0x6f, 0x62, 0x5b, 0x11, + 0xbc, 0xaf, 0x40, 0x1b, 0x7c, 0x17, 0xe1, 0xb6, 0x60, 0xc1, 0x3d, 0xa7, 0xeb, 0x84, 0x67, 0xd1, + 0x45, 0xb9, 0x05, 0x79, 0xce, 0x83, 0xd7, 0xc8, 0x7f, 0x29, 0x74, 0x6b, 0x94, 0xde, 0xcc, 0xba, + 0x44, 0x67, 0x05, 0xcb, 0x20, 0xd6, 0x62, 0x05, 0x5e, 0xa7, 0xeb, 0x84, 0x6e, 0x74, 0xad, 0x2e, + 0x4c, 0xc5, 0x0a, 0xf0, 0x6d, 0x84, 0x6c, 0xd3, 0xa8, 0x77, 0x20, 0x3d, 0xd7, 0x12, 0xd8, 0xeb, + 0xaf, 0xea, 0x42, 0xf0, 0xcd, 0x41, 0x97, 0x3b, 0x47, 0xeb, 0x42, 0x49, 0x0d, 0xf8, 0x0d, 0xba, + 0x68, 0xe9, 0xd4, 0x9e, 0xd3, 0x75, 0xc3, 0xeb, 0x03, 0x42, 0x0e, 0x3f, 0x29, 0x69, 0x19, 0x3f, + 0xdf, 0x76, 0xa5, 0x71, 0x0f, 0x9d, 0x4b, 0x58, 0x9a, 0xf8, 0x0f, 0x79, 0x1d, 0x2b, 0xef, 0x46, + 0x5d, 0x9e, 0x6c, 0x24, 0x0e, 0x3e, 0xbb, 0xe8, 0xe6, 0xf6, 0xac, 0x69, 0x13, 0x2d, 0xfe, 0xe1, + 0x20, 0x77, 0x04, 0x06, 0x3f, 0x3c, 0x26, 0x65, 0x5f, 0x0c, 0xfe, 0x89, 0x26, 0x82, 0x67, 0x5f, + 0x7e, 0xfe, 0xfa, 0xda, 0x79, 0x82, 0x87, 0x34, 0x67, 0x92, 0x65, 0xc0, 0xfb, 0xbb, 0x3f, 0xc2, + 0xda, 0x23, 0xfd, 0xf8, 0x77, 0xc8, 0x9f, 0xf0, 0x77, 0x07, 0x5d, 0xa9, 0xdf, 0x1c, 0x3f, 0x3a, + 0xc6, 0xbe, 0x3f, 0x74, 0x7f, 0xf8, 0x5f, 0xd8, 0x26, 0xd5, 0x80, 0x58, 0x1b, 0x21, 0xee, 0xfd, + 0x9b, 0x8d, 0xa7, 0x2f, 0xde, 0x8e, 0x33, 0x61, 0x66, 0x55, 0x42, 0x52, 0x95, 0xd3, 0x86, 0xb8, + 0xdf, 0x2c, 0x43, 0xa6, 0xfa, 0x19, 0x48, 0xfb, 0xe1, 0xe9, 0xe1, 0x2d, 0x19, 0xda, 0x53, 0x72, + 0xd5, 0xde, 0xbd, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x9e, 0x4d, 0xed, 0xd0, 0x03, 0x00, + 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/operation/operation.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/operation/operation.pb.go new file mode 100644 index 000000000..a74a0f5b9 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/operation/operation.pb.go @@ -0,0 +1,274 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/operation/operation.proto + +package operation // import "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import any "github.com/golang/protobuf/ptypes/any" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import status "google.golang.org/genproto/googleapis/rpc/status" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// An Operation resource. For more information, see [Operation](/docs/api-design-guide/concepts/operation). +type Operation struct { + // ID of the operation. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Description of the operation. 0-256 characters long. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // ID of the user or service account who initiated the operation. + CreatedBy string `protobuf:"bytes,4,opt,name=created_by,json=createdBy,proto3" json:"created_by,omitempty"` + // The time when the Operation resource was last modified. + // This value is in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + ModifiedAt *timestamp.Timestamp `protobuf:"bytes,5,opt,name=modified_at,json=modifiedAt,proto3" json:"modified_at,omitempty"` + // If the value is `false`, it means the operation is still in progress. + // If `true`, the operation is completed, and either `error` or `response` is available. + Done bool `protobuf:"varint,6,opt,name=done,proto3" json:"done,omitempty"` + // Service-specific metadata associated with the operation. + // It typically contains the ID of the target resource that the operation is performed on. + // Any method that returns a long-running operation should document the metadata type, if any. + Metadata *any.Any `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The operation result. + // If `done == false` and there was no failure detected, neither `error` nor `response` is set. + // If `done == false` and there was a failure detected, `error` is set. + // If `done == true`, exactly one of `error` or `response` is set. + // + // Types that are valid to be assigned to Result: + // *Operation_Error + // *Operation_Response + Result isOperation_Result `protobuf_oneof:"result"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { + return fileDescriptor_operation_fa9f2af44e01a2d7, []int{0} +} +func (m *Operation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Operation.Unmarshal(m, b) +} +func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Operation.Marshal(b, m, deterministic) +} +func (dst *Operation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Operation.Merge(dst, src) +} +func (m *Operation) XXX_Size() int { + return xxx_messageInfo_Operation.Size(m) +} +func (m *Operation) XXX_DiscardUnknown() { + xxx_messageInfo_Operation.DiscardUnknown(m) +} + +var xxx_messageInfo_Operation proto.InternalMessageInfo + +func (m *Operation) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Operation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Operation) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Operation) GetCreatedBy() string { + if m != nil { + return m.CreatedBy + } + return "" +} + +func (m *Operation) GetModifiedAt() *timestamp.Timestamp { + if m != nil { + return m.ModifiedAt + } + return nil +} + +func (m *Operation) GetDone() bool { + if m != nil { + return m.Done + } + return false +} + +func (m *Operation) GetMetadata() *any.Any { + if m != nil { + return m.Metadata + } + return nil +} + +type isOperation_Result interface { + isOperation_Result() +} + +type Operation_Error struct { + Error *status.Status `protobuf:"bytes,8,opt,name=error,proto3,oneof"` +} + +type Operation_Response struct { + Response *any.Any `protobuf:"bytes,9,opt,name=response,proto3,oneof"` +} + +func (*Operation_Error) isOperation_Result() {} + +func (*Operation_Response) isOperation_Result() {} + +func (m *Operation) GetResult() isOperation_Result { + if m != nil { + return m.Result + } + return nil +} + +func (m *Operation) GetError() *status.Status { + if x, ok := m.GetResult().(*Operation_Error); ok { + return x.Error + } + return nil +} + +func (m *Operation) GetResponse() *any.Any { + if x, ok := m.GetResult().(*Operation_Response); ok { + return x.Response + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Operation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Operation_OneofMarshaler, _Operation_OneofUnmarshaler, _Operation_OneofSizer, []interface{}{ + (*Operation_Error)(nil), + (*Operation_Response)(nil), + } +} + +func _Operation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Operation) + // result + switch x := m.Result.(type) { + case *Operation_Error: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case *Operation_Response: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Response); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Operation.Result has unexpected type %T", x) + } + return nil +} + +func _Operation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Operation) + switch tag { + case 8: // result.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(status.Status) + err := b.DecodeMessage(msg) + m.Result = &Operation_Error{msg} + return true, err + case 9: // result.response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(any.Any) + err := b.DecodeMessage(msg) + m.Result = &Operation_Response{msg} + return true, err + default: + return false, nil + } +} + +func _Operation_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Operation) + // result + switch x := m.Result.(type) { + case *Operation_Error: + s := proto.Size(x.Error) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Operation_Response: + s := proto.Size(x.Response) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*Operation)(nil), "yandex.cloud.operation.Operation") +} + +func init() { + proto.RegisterFile("yandex/cloud/operation/operation.proto", fileDescriptor_operation_fa9f2af44e01a2d7) +} + +var fileDescriptor_operation_fa9f2af44e01a2d7 = []byte{ + // 345 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x4d, 0x4b, 0xeb, 0x40, + 0x14, 0x86, 0x9b, 0x7e, 0xdd, 0x64, 0x0a, 0x77, 0x31, 0x5c, 0xae, 0x63, 0x41, 0x0c, 0x2e, 0xa4, + 0x08, 0x9d, 0x48, 0x5d, 0x49, 0x57, 0x2d, 0x22, 0xdd, 0x09, 0xd1, 0x95, 0x1b, 0x99, 0x64, 0x4e, + 0xe3, 0x40, 0x93, 0x09, 0x93, 0x13, 0x30, 0x7b, 0x7f, 0xb8, 0x38, 0xd3, 0xa4, 0xe2, 0x07, 0xee, + 0x0e, 0xf3, 0x3e, 0xe7, 0x79, 0x61, 0x38, 0xe4, 0xbc, 0x11, 0x85, 0x84, 0x97, 0x28, 0xdd, 0xe9, + 0x5a, 0x46, 0xba, 0x04, 0x23, 0x50, 0xe9, 0xe2, 0x30, 0xf1, 0xd2, 0x68, 0xd4, 0xf4, 0xbf, 0xe3, + 0xb8, 0xe5, 0x78, 0x97, 0x4e, 0x8f, 0x33, 0xad, 0xb3, 0x1d, 0x44, 0x96, 0x4a, 0xea, 0x6d, 0x24, + 0x8a, 0xc6, 0xad, 0x4c, 0x8f, 0xf6, 0x91, 0x29, 0xd3, 0xa8, 0x42, 0x81, 0x75, 0xb5, 0x0f, 0x4e, + 0x3f, 0xef, 0xa0, 0xca, 0xa1, 0x42, 0x91, 0x97, 0x0e, 0x38, 0x7b, 0x1d, 0x90, 0xe0, 0xae, 0xad, + 0xa0, 0x7f, 0x49, 0x5f, 0x49, 0xe6, 0x85, 0xde, 0x2c, 0x88, 0xfb, 0x4a, 0xd2, 0x90, 0x4c, 0x24, + 0x54, 0xa9, 0x51, 0xe5, 0x7b, 0xcc, 0xfa, 0x36, 0xf8, 0xf8, 0x44, 0xaf, 0x09, 0x49, 0x0d, 0x08, + 0x04, 0xf9, 0x24, 0x90, 0x0d, 0x42, 0x6f, 0x36, 0x59, 0x4c, 0xb9, 0x6b, 0xe5, 0x6d, 0x2b, 0x7f, + 0x68, 0x5b, 0xe3, 0x60, 0x4f, 0xaf, 0x90, 0x9e, 0x1c, 0x56, 0x93, 0x86, 0x0d, 0xad, 0xbb, 0x8d, + 0xd7, 0x0d, 0x5d, 0x92, 0x49, 0xae, 0xa5, 0xda, 0x2a, 0xa7, 0x1e, 0xfd, 0xaa, 0x26, 0x2d, 0xbe, + 0x42, 0x4a, 0xc9, 0x50, 0xea, 0x02, 0xd8, 0x38, 0xf4, 0x66, 0x7e, 0x6c, 0x67, 0x7a, 0x49, 0xfc, + 0x1c, 0x50, 0x48, 0x81, 0x82, 0xfd, 0xb1, 0xb6, 0x7f, 0x5f, 0x6c, 0xab, 0xa2, 0x89, 0x3b, 0x8a, + 0x5e, 0x90, 0x11, 0x18, 0xa3, 0x0d, 0xf3, 0x2d, 0x4e, 0x5b, 0xdc, 0x94, 0x29, 0xbf, 0xb7, 0xdf, + 0xbc, 0xe9, 0xc5, 0x0e, 0xa1, 0x0b, 0xe2, 0x1b, 0xa8, 0x4a, 0x5d, 0x54, 0xc0, 0x82, 0x9f, 0xed, + 0x9b, 0x5e, 0xdc, 0x71, 0x6b, 0x9f, 0x8c, 0x0d, 0x54, 0xf5, 0x0e, 0xd7, 0xb7, 0x8f, 0x37, 0x99, + 0xc2, 0xe7, 0x3a, 0xe1, 0xa9, 0xce, 0x23, 0x77, 0x00, 0x73, 0x77, 0x28, 0x99, 0x9e, 0x67, 0x50, + 0x58, 0x47, 0xf4, 0xfd, 0x05, 0x2d, 0xbb, 0x29, 0x19, 0x5b, 0xee, 0xea, 0x2d, 0x00, 0x00, 0xff, + 0xff, 0x31, 0x4a, 0xd9, 0x89, 0x6c, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/operation/operation_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/operation/operation_service.pb.go new file mode 100644 index 000000000..92fd69246 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/operation/operation_service.pb.go @@ -0,0 +1,245 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/operation/operation_service.proto + +package operation // import "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetOperationRequest struct { + // ID of the Operation resource to return. + OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetOperationRequest) Reset() { *m = GetOperationRequest{} } +func (m *GetOperationRequest) String() string { return proto.CompactTextString(m) } +func (*GetOperationRequest) ProtoMessage() {} +func (*GetOperationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_operation_service_00da9fe853881983, []int{0} +} +func (m *GetOperationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetOperationRequest.Unmarshal(m, b) +} +func (m *GetOperationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetOperationRequest.Marshal(b, m, deterministic) +} +func (dst *GetOperationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetOperationRequest.Merge(dst, src) +} +func (m *GetOperationRequest) XXX_Size() int { + return xxx_messageInfo_GetOperationRequest.Size(m) +} +func (m *GetOperationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetOperationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetOperationRequest proto.InternalMessageInfo + +func (m *GetOperationRequest) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +type CancelOperationRequest struct { + // ID of the operation to cancel. + OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CancelOperationRequest) Reset() { *m = CancelOperationRequest{} } +func (m *CancelOperationRequest) String() string { return proto.CompactTextString(m) } +func (*CancelOperationRequest) ProtoMessage() {} +func (*CancelOperationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_operation_service_00da9fe853881983, []int{1} +} +func (m *CancelOperationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CancelOperationRequest.Unmarshal(m, b) +} +func (m *CancelOperationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CancelOperationRequest.Marshal(b, m, deterministic) +} +func (dst *CancelOperationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CancelOperationRequest.Merge(dst, src) +} +func (m *CancelOperationRequest) XXX_Size() int { + return xxx_messageInfo_CancelOperationRequest.Size(m) +} +func (m *CancelOperationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CancelOperationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CancelOperationRequest proto.InternalMessageInfo + +func (m *CancelOperationRequest) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func init() { + proto.RegisterType((*GetOperationRequest)(nil), "yandex.cloud.operation.GetOperationRequest") + proto.RegisterType((*CancelOperationRequest)(nil), "yandex.cloud.operation.CancelOperationRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// OperationServiceClient is the client API for OperationService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type OperationServiceClient interface { + // Returns the specified Operation resource. + Get(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) + // Cancels the specified operation. + Cancel(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*Operation, error) +} + +type operationServiceClient struct { + cc *grpc.ClientConn +} + +func NewOperationServiceClient(cc *grpc.ClientConn) OperationServiceClient { + return &operationServiceClient{cc} +} + +func (c *operationServiceClient) Get(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.operation.OperationService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *operationServiceClient) Cancel(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.operation.OperationService/Cancel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// OperationServiceServer is the server API for OperationService service. +type OperationServiceServer interface { + // Returns the specified Operation resource. + Get(context.Context, *GetOperationRequest) (*Operation, error) + // Cancels the specified operation. + Cancel(context.Context, *CancelOperationRequest) (*Operation, error) +} + +func RegisterOperationServiceServer(s *grpc.Server, srv OperationServiceServer) { + s.RegisterService(&_OperationService_serviceDesc, srv) +} + +func _OperationService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OperationServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.operation.OperationService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OperationServiceServer).Get(ctx, req.(*GetOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OperationService_Cancel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OperationServiceServer).Cancel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.operation.OperationService/Cancel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OperationServiceServer).Cancel(ctx, req.(*CancelOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _OperationService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.operation.OperationService", + HandlerType: (*OperationServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _OperationService_Get_Handler, + }, + { + MethodName: "Cancel", + Handler: _OperationService_Cancel_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/operation/operation_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/operation/operation_service.proto", fileDescriptor_operation_service_00da9fe853881983) +} + +var fileDescriptor_operation_service_00da9fe853881983 = []byte{ + // 291 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xab, 0x4c, 0xcc, 0x4b, + 0x49, 0xad, 0xd0, 0x4f, 0xce, 0xc9, 0x2f, 0x4d, 0xd1, 0xcf, 0x2f, 0x48, 0x2d, 0x4a, 0x2c, 0xc9, + 0xcc, 0xcf, 0x43, 0xb0, 0xe2, 0x8b, 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, 0xf5, 0x0a, 0x8a, 0xf2, + 0x4b, 0xf2, 0x85, 0xc4, 0x20, 0xea, 0xf5, 0xc0, 0xea, 0xf5, 0xe0, 0xaa, 0xa4, 0x64, 0xd2, 0xf3, + 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13, 0xf3, 0xf2, 0xf2, 0x4b, 0xc0, 0xc2, + 0xc5, 0x10, 0x5d, 0x52, 0x6a, 0x84, 0x6c, 0x81, 0xaa, 0x93, 0x45, 0x51, 0x57, 0x96, 0x98, 0x93, + 0x99, 0x82, 0x24, 0xad, 0x64, 0xc7, 0x25, 0xec, 0x9e, 0x5a, 0xe2, 0x0f, 0xd3, 0x14, 0x94, 0x5a, + 0x58, 0x9a, 0x5a, 0x5c, 0x22, 0xa4, 0xce, 0xc5, 0x83, 0x70, 0x6e, 0x66, 0x8a, 0x04, 0xa3, 0x02, + 0xa3, 0x06, 0xa7, 0x13, 0xcb, 0x8b, 0xe3, 0x86, 0x8c, 0x41, 0xdc, 0x70, 0x19, 0xcf, 0x14, 0x25, + 0x47, 0x2e, 0x31, 0xe7, 0xc4, 0xbc, 0xe4, 0xd4, 0x1c, 0xb2, 0x8d, 0x30, 0x9a, 0xc6, 0xc4, 0x25, + 0x00, 0xd7, 0x1d, 0x0c, 0x09, 0x1a, 0xa1, 0x4a, 0x2e, 0x66, 0xf7, 0xd4, 0x12, 0x21, 0x6d, 0x3d, + 0xec, 0x81, 0xa3, 0x87, 0xc5, 0xd1, 0x52, 0x8a, 0xb8, 0x14, 0xc3, 0x55, 0x2a, 0x29, 0x35, 0x5d, + 0x7e, 0x32, 0x99, 0x49, 0x46, 0x48, 0x0a, 0x11, 0x4e, 0xc5, 0xfa, 0xd5, 0xc8, 0xee, 0xac, 0x15, + 0x6a, 0x63, 0xe4, 0x62, 0x83, 0xf8, 0x49, 0x48, 0x0f, 0x97, 0x89, 0xd8, 0xfd, 0x4c, 0x8c, 0x0b, + 0x34, 0xc1, 0x2e, 0x50, 0x16, 0x52, 0xc4, 0xed, 0x02, 0xab, 0x64, 0xb0, 0xe9, 0x4e, 0x6e, 0x51, + 0x2e, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0x93, 0x75, 0x21, + 0xf1, 0x98, 0x9e, 0xaf, 0x9b, 0x9e, 0x9a, 0x07, 0x8e, 0x42, 0x7d, 0xec, 0x09, 0xc1, 0x1a, 0xce, + 0x4a, 0x62, 0x03, 0xab, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xc5, 0xa7, 0x20, 0x3f, 0x99, + 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/cloud.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/cloud.pb.go new file mode 100644 index 000000000..698c3744b --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/cloud.pb.go @@ -0,0 +1,114 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/resourcemanager/v1/cloud.proto + +package resourcemanager // import "github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Cloud resource. For more information, see [Cloud](/docs/resource-manager/concepts/resources-hierarchy#cloud). +type Cloud struct { + // ID of the cloud. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Name of the cloud. 3-63 characters long. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Description of the cloud. 0-256 characters long. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cloud) Reset() { *m = Cloud{} } +func (m *Cloud) String() string { return proto.CompactTextString(m) } +func (*Cloud) ProtoMessage() {} +func (*Cloud) Descriptor() ([]byte, []int) { + return fileDescriptor_cloud_4164113932f10e4f, []int{0} +} +func (m *Cloud) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cloud.Unmarshal(m, b) +} +func (m *Cloud) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cloud.Marshal(b, m, deterministic) +} +func (dst *Cloud) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cloud.Merge(dst, src) +} +func (m *Cloud) XXX_Size() int { + return xxx_messageInfo_Cloud.Size(m) +} +func (m *Cloud) XXX_DiscardUnknown() { + xxx_messageInfo_Cloud.DiscardUnknown(m) +} + +var xxx_messageInfo_Cloud proto.InternalMessageInfo + +func (m *Cloud) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Cloud) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Cloud) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Cloud) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func init() { + proto.RegisterType((*Cloud)(nil), "yandex.cloud.resourcemanager.v1.Cloud") +} + +func init() { + proto.RegisterFile("yandex/cloud/resourcemanager/v1/cloud.proto", fileDescriptor_cloud_4164113932f10e4f) +} + +var fileDescriptor_cloud_4164113932f10e4f = []byte{ + // 235 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0xc1, 0x4a, 0x03, 0x31, + 0x10, 0x86, 0xd9, 0xb5, 0x0a, 0x4d, 0xc1, 0x43, 0x4e, 0xa1, 0x97, 0x2e, 0x9e, 0x0a, 0xd2, 0x84, + 0xea, 0x49, 0x3c, 0xa9, 0x6f, 0x50, 0xf5, 0xe2, 0x45, 0xb2, 0xc9, 0x18, 0x03, 0x4d, 0x66, 0xc9, + 0x4e, 0x8a, 0xbe, 0x81, 0x8f, 0x2d, 0x26, 0x16, 0x64, 0x2f, 0xde, 0xc2, 0x3f, 0xdf, 0x7c, 0x61, + 0x7e, 0x76, 0xf9, 0xa9, 0xa3, 0x85, 0x0f, 0x65, 0xf6, 0x98, 0xad, 0x4a, 0x30, 0x62, 0x4e, 0x06, + 0x82, 0x8e, 0xda, 0x41, 0x52, 0x87, 0x6d, 0x1d, 0xc8, 0x21, 0x21, 0x21, 0x5f, 0x55, 0x58, 0xd6, + 0x6c, 0x02, 0xcb, 0xc3, 0x76, 0xb9, 0x72, 0x88, 0x6e, 0x0f, 0xaa, 0xe0, 0x7d, 0x7e, 0x53, 0xe4, + 0x03, 0x8c, 0xa4, 0xc3, 0x50, 0x0d, 0x17, 0x5f, 0x0d, 0x3b, 0x7d, 0xf8, 0xd9, 0xe6, 0xe7, 0xac, + 0xf5, 0x56, 0x34, 0x5d, 0xb3, 0x9e, 0xef, 0x5a, 0x6f, 0xf9, 0x0d, 0x63, 0x26, 0x81, 0x26, 0xb0, + 0xaf, 0x9a, 0x44, 0xdb, 0x35, 0xeb, 0xc5, 0xd5, 0x52, 0x56, 0x9f, 0x3c, 0xfa, 0xe4, 0xd3, 0xd1, + 0xb7, 0x9b, 0xff, 0xd2, 0x77, 0xc4, 0x39, 0x9b, 0x45, 0x1d, 0x40, 0x9c, 0x14, 0x59, 0x79, 0xf3, + 0x8e, 0x2d, 0x2c, 0x8c, 0x26, 0xf9, 0x81, 0x3c, 0x46, 0x31, 0x2b, 0xa3, 0xbf, 0xd1, 0xfd, 0xf3, + 0xcb, 0xa3, 0xf3, 0xf4, 0x9e, 0x7b, 0x69, 0x30, 0xa8, 0x7a, 0xd9, 0xa6, 0xd6, 0xe0, 0x70, 0xe3, + 0x20, 0x96, 0x4f, 0xd5, 0x3f, 0xfd, 0xdc, 0x4e, 0xa2, 0xfe, 0xac, 0xac, 0x5d, 0x7f, 0x07, 0x00, + 0x00, 0xff, 0xff, 0xca, 0x72, 0x49, 0xcc, 0x59, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/cloud_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/cloud_service.pb.go new file mode 100644 index 000000000..1fef90b79 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/cloud_service.pb.go @@ -0,0 +1,616 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/resourcemanager/v1/cloud_service.proto + +package resourcemanager // import "github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import access "github.com/yandex-cloud/go-genproto/yandex/cloud/access" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetCloudRequest struct { + // ID of the Cloud resource to return. + // To get the cloud ID, use a [CloudService.List] request. + CloudId string `protobuf:"bytes,1,opt,name=cloud_id,json=cloudId,proto3" json:"cloud_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCloudRequest) Reset() { *m = GetCloudRequest{} } +func (m *GetCloudRequest) String() string { return proto.CompactTextString(m) } +func (*GetCloudRequest) ProtoMessage() {} +func (*GetCloudRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cloud_service_7c926146593289a6, []int{0} +} +func (m *GetCloudRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetCloudRequest.Unmarshal(m, b) +} +func (m *GetCloudRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetCloudRequest.Marshal(b, m, deterministic) +} +func (dst *GetCloudRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCloudRequest.Merge(dst, src) +} +func (m *GetCloudRequest) XXX_Size() int { + return xxx_messageInfo_GetCloudRequest.Size(m) +} +func (m *GetCloudRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetCloudRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCloudRequest proto.InternalMessageInfo + +func (m *GetCloudRequest) GetCloudId() string { + if m != nil { + return m.CloudId + } + return "" +} + +type ListCloudsRequest struct { + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListCloudsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + // Default value: 100. + PageSize int64 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. Set [page_token] + // to the [ListCloudsResponse.next_page_token] + // returned by a previous list request to get the next page of results. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters resources listed in the response. + // The expression must specify: + // 1. The field name. Currently you can use filtering only on the [Cloud.name] field. + // 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + // 3. The value. Must be 3-63 characters long and match the regular expression `^[a-z][-a-z0-9]{1,61}[a-z0-9]$`. + Filter string `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListCloudsRequest) Reset() { *m = ListCloudsRequest{} } +func (m *ListCloudsRequest) String() string { return proto.CompactTextString(m) } +func (*ListCloudsRequest) ProtoMessage() {} +func (*ListCloudsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cloud_service_7c926146593289a6, []int{1} +} +func (m *ListCloudsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListCloudsRequest.Unmarshal(m, b) +} +func (m *ListCloudsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListCloudsRequest.Marshal(b, m, deterministic) +} +func (dst *ListCloudsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListCloudsRequest.Merge(dst, src) +} +func (m *ListCloudsRequest) XXX_Size() int { + return xxx_messageInfo_ListCloudsRequest.Size(m) +} +func (m *ListCloudsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListCloudsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListCloudsRequest proto.InternalMessageInfo + +func (m *ListCloudsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListCloudsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListCloudsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListCloudsResponse struct { + // List of Cloud resources. + Clouds []*Cloud `protobuf:"bytes,1,rep,name=clouds,proto3" json:"clouds,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListCloudsRequest.page_size], use + // the [next_page_token] as the value + // for the [ListCloudsRequest.page_token] query parameter + // in the next list request. Each subsequent list request will have its own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListCloudsResponse) Reset() { *m = ListCloudsResponse{} } +func (m *ListCloudsResponse) String() string { return proto.CompactTextString(m) } +func (*ListCloudsResponse) ProtoMessage() {} +func (*ListCloudsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cloud_service_7c926146593289a6, []int{2} +} +func (m *ListCloudsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListCloudsResponse.Unmarshal(m, b) +} +func (m *ListCloudsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListCloudsResponse.Marshal(b, m, deterministic) +} +func (dst *ListCloudsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListCloudsResponse.Merge(dst, src) +} +func (m *ListCloudsResponse) XXX_Size() int { + return xxx_messageInfo_ListCloudsResponse.Size(m) +} +func (m *ListCloudsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListCloudsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListCloudsResponse proto.InternalMessageInfo + +func (m *ListCloudsResponse) GetClouds() []*Cloud { + if m != nil { + return m.Clouds + } + return nil +} + +func (m *ListCloudsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ListCloudOperationsRequest struct { + // ID of the Cloud resource to list operations for. + CloudId string `protobuf:"bytes,1,opt,name=cloud_id,json=cloudId,proto3" json:"cloud_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListCloudOperationsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + // Acceptable values are 0 to 1000, inclusive. Default value: 100. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. Set [page_token] + // to the [ListCloudOperationsResponse.next_page_token] + // returned by a previous list request to get the next page of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListCloudOperationsRequest) Reset() { *m = ListCloudOperationsRequest{} } +func (m *ListCloudOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListCloudOperationsRequest) ProtoMessage() {} +func (*ListCloudOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cloud_service_7c926146593289a6, []int{3} +} +func (m *ListCloudOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListCloudOperationsRequest.Unmarshal(m, b) +} +func (m *ListCloudOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListCloudOperationsRequest.Marshal(b, m, deterministic) +} +func (dst *ListCloudOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListCloudOperationsRequest.Merge(dst, src) +} +func (m *ListCloudOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListCloudOperationsRequest.Size(m) +} +func (m *ListCloudOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListCloudOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListCloudOperationsRequest proto.InternalMessageInfo + +func (m *ListCloudOperationsRequest) GetCloudId() string { + if m != nil { + return m.CloudId + } + return "" +} + +func (m *ListCloudOperationsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListCloudOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListCloudOperationsResponse struct { + // List of operations for the specified cloud. + Operations []*operation.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListCloudOperationsRequest.page_size], use the [next_page_token] as the value + // for the [ListCloudOperationsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListCloudOperationsResponse) Reset() { *m = ListCloudOperationsResponse{} } +func (m *ListCloudOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListCloudOperationsResponse) ProtoMessage() {} +func (*ListCloudOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cloud_service_7c926146593289a6, []int{4} +} +func (m *ListCloudOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListCloudOperationsResponse.Unmarshal(m, b) +} +func (m *ListCloudOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListCloudOperationsResponse.Marshal(b, m, deterministic) +} +func (dst *ListCloudOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListCloudOperationsResponse.Merge(dst, src) +} +func (m *ListCloudOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListCloudOperationsResponse.Size(m) +} +func (m *ListCloudOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListCloudOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListCloudOperationsResponse proto.InternalMessageInfo + +func (m *ListCloudOperationsResponse) GetOperations() []*operation.Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListCloudOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetCloudRequest)(nil), "yandex.cloud.resourcemanager.v1.GetCloudRequest") + proto.RegisterType((*ListCloudsRequest)(nil), "yandex.cloud.resourcemanager.v1.ListCloudsRequest") + proto.RegisterType((*ListCloudsResponse)(nil), "yandex.cloud.resourcemanager.v1.ListCloudsResponse") + proto.RegisterType((*ListCloudOperationsRequest)(nil), "yandex.cloud.resourcemanager.v1.ListCloudOperationsRequest") + proto.RegisterType((*ListCloudOperationsResponse)(nil), "yandex.cloud.resourcemanager.v1.ListCloudOperationsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// CloudServiceClient is the client API for CloudService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type CloudServiceClient interface { + // Returns the specified Cloud resource. + // + // To get the list of available Cloud resources, make a [List] request. + Get(ctx context.Context, in *GetCloudRequest, opts ...grpc.CallOption) (*Cloud, error) + // Retrieves the list of Cloud resources. + List(ctx context.Context, in *ListCloudsRequest, opts ...grpc.CallOption) (*ListCloudsResponse, error) + // Lists operations for the specified cloud. + ListOperations(ctx context.Context, in *ListCloudOperationsRequest, opts ...grpc.CallOption) (*ListCloudOperationsResponse, error) + // Lists access bindings for the specified cloud. + ListAccessBindings(ctx context.Context, in *access.ListAccessBindingsRequest, opts ...grpc.CallOption) (*access.ListAccessBindingsResponse, error) + // Sets access bindings for the specified cloud. + SetAccessBindings(ctx context.Context, in *access.SetAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates access bindings for the specified cloud. + UpdateAccessBindings(ctx context.Context, in *access.UpdateAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) +} + +type cloudServiceClient struct { + cc *grpc.ClientConn +} + +func NewCloudServiceClient(cc *grpc.ClientConn) CloudServiceClient { + return &cloudServiceClient{cc} +} + +func (c *cloudServiceClient) Get(ctx context.Context, in *GetCloudRequest, opts ...grpc.CallOption) (*Cloud, error) { + out := new(Cloud) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.CloudService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudServiceClient) List(ctx context.Context, in *ListCloudsRequest, opts ...grpc.CallOption) (*ListCloudsResponse, error) { + out := new(ListCloudsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.CloudService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudServiceClient) ListOperations(ctx context.Context, in *ListCloudOperationsRequest, opts ...grpc.CallOption) (*ListCloudOperationsResponse, error) { + out := new(ListCloudOperationsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.CloudService/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudServiceClient) ListAccessBindings(ctx context.Context, in *access.ListAccessBindingsRequest, opts ...grpc.CallOption) (*access.ListAccessBindingsResponse, error) { + out := new(access.ListAccessBindingsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.CloudService/ListAccessBindings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudServiceClient) SetAccessBindings(ctx context.Context, in *access.SetAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.CloudService/SetAccessBindings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudServiceClient) UpdateAccessBindings(ctx context.Context, in *access.UpdateAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.CloudService/UpdateAccessBindings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CloudServiceServer is the server API for CloudService service. +type CloudServiceServer interface { + // Returns the specified Cloud resource. + // + // To get the list of available Cloud resources, make a [List] request. + Get(context.Context, *GetCloudRequest) (*Cloud, error) + // Retrieves the list of Cloud resources. + List(context.Context, *ListCloudsRequest) (*ListCloudsResponse, error) + // Lists operations for the specified cloud. + ListOperations(context.Context, *ListCloudOperationsRequest) (*ListCloudOperationsResponse, error) + // Lists access bindings for the specified cloud. + ListAccessBindings(context.Context, *access.ListAccessBindingsRequest) (*access.ListAccessBindingsResponse, error) + // Sets access bindings for the specified cloud. + SetAccessBindings(context.Context, *access.SetAccessBindingsRequest) (*operation.Operation, error) + // Updates access bindings for the specified cloud. + UpdateAccessBindings(context.Context, *access.UpdateAccessBindingsRequest) (*operation.Operation, error) +} + +func RegisterCloudServiceServer(s *grpc.Server, srv CloudServiceServer) { + s.RegisterService(&_CloudService_serviceDesc, srv) +} + +func _CloudService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCloudRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.CloudService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudServiceServer).Get(ctx, req.(*GetCloudRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCloudsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.CloudService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudServiceServer).List(ctx, req.(*ListCloudsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudService_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCloudOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudServiceServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.CloudService/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudServiceServer).ListOperations(ctx, req.(*ListCloudOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudService_ListAccessBindings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(access.ListAccessBindingsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudServiceServer).ListAccessBindings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.CloudService/ListAccessBindings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudServiceServer).ListAccessBindings(ctx, req.(*access.ListAccessBindingsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudService_SetAccessBindings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(access.SetAccessBindingsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudServiceServer).SetAccessBindings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.CloudService/SetAccessBindings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudServiceServer).SetAccessBindings(ctx, req.(*access.SetAccessBindingsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudService_UpdateAccessBindings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(access.UpdateAccessBindingsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudServiceServer).UpdateAccessBindings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.CloudService/UpdateAccessBindings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudServiceServer).UpdateAccessBindings(ctx, req.(*access.UpdateAccessBindingsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _CloudService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.resourcemanager.v1.CloudService", + HandlerType: (*CloudServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _CloudService_Get_Handler, + }, + { + MethodName: "List", + Handler: _CloudService_List_Handler, + }, + { + MethodName: "ListOperations", + Handler: _CloudService_ListOperations_Handler, + }, + { + MethodName: "ListAccessBindings", + Handler: _CloudService_ListAccessBindings_Handler, + }, + { + MethodName: "SetAccessBindings", + Handler: _CloudService_SetAccessBindings_Handler, + }, + { + MethodName: "UpdateAccessBindings", + Handler: _CloudService_UpdateAccessBindings_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/resourcemanager/v1/cloud_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/resourcemanager/v1/cloud_service.proto", fileDescriptor_cloud_service_7c926146593289a6) +} + +var fileDescriptor_cloud_service_7c926146593289a6 = []byte{ + // 716 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x31, 0x6f, 0xd3, 0x40, + 0x14, 0xd6, 0x35, 0x25, 0x34, 0x47, 0xa1, 0xea, 0x09, 0xa4, 0xc8, 0xa5, 0x22, 0xb8, 0x52, 0x88, + 0x8a, 0x62, 0xc7, 0x8d, 0x18, 0x9a, 0xa6, 0x15, 0x4d, 0x41, 0x15, 0x12, 0x08, 0x94, 0xd0, 0x85, + 0xa5, 0xba, 0xc6, 0x57, 0x63, 0x91, 0xfa, 0x8c, 0xef, 0x12, 0xb5, 0x05, 0x16, 0x58, 0x20, 0x0b, + 0x03, 0x0c, 0xfc, 0x02, 0x7e, 0x00, 0x2b, 0x3b, 0xed, 0x0c, 0x7f, 0x81, 0x01, 0x89, 0x1f, 0x80, + 0xc4, 0x84, 0x7c, 0x67, 0xa7, 0x89, 0x9d, 0x36, 0x0e, 0x93, 0x25, 0xbf, 0xef, 0x7b, 0xef, 0x7b, + 0xdf, 0x7b, 0x77, 0x07, 0xcb, 0x07, 0xd8, 0x31, 0xc9, 0xbe, 0xde, 0x6c, 0xd1, 0xb6, 0xa9, 0x7b, + 0x84, 0xd1, 0xb6, 0xd7, 0x24, 0x7b, 0xd8, 0xc1, 0x16, 0xf1, 0xf4, 0x8e, 0x21, 0x03, 0xdb, 0x8c, + 0x78, 0x1d, 0xbb, 0x49, 0x34, 0xd7, 0xa3, 0x9c, 0xa2, 0x6b, 0x92, 0xa4, 0x89, 0x98, 0x16, 0x21, + 0x69, 0x1d, 0x43, 0xb9, 0x6a, 0x51, 0x6a, 0xb5, 0x88, 0x8e, 0x5d, 0x5b, 0xc7, 0x8e, 0x43, 0x39, + 0xe6, 0x36, 0x75, 0x98, 0xa4, 0x2b, 0x37, 0x13, 0xd5, 0x0c, 0xc0, 0x4a, 0x00, 0xf6, 0x53, 0x51, + 0x97, 0x78, 0x22, 0x53, 0x10, 0xcb, 0x0d, 0x24, 0xc2, 0xcd, 0x26, 0x61, 0x2c, 0xf8, 0x04, 0x88, + 0xfc, 0x00, 0xa2, 0xc7, 0x8f, 0x65, 0x9a, 0x1f, 0xc0, 0x75, 0x70, 0xcb, 0x36, 0xfb, 0xc2, 0x6a, + 0x05, 0xce, 0x6c, 0x12, 0xbe, 0xe1, 0x07, 0xeb, 0xe4, 0x79, 0x9b, 0x30, 0x8e, 0x6e, 0xc0, 0x29, + 0x69, 0x8d, 0x6d, 0x66, 0x41, 0x0e, 0x14, 0x32, 0xb5, 0xe9, 0x5f, 0x47, 0x06, 0xe8, 0x1e, 0x1b, + 0x93, 0xd5, 0xd5, 0x5b, 0xa5, 0xfa, 0x79, 0x11, 0xbd, 0x67, 0xaa, 0x5d, 0x00, 0x67, 0xef, 0xdb, + 0x4c, 0xb2, 0xd9, 0x09, 0x3d, 0xe3, 0x62, 0x8b, 0x6c, 0x33, 0xfb, 0x90, 0x08, 0x7e, 0xaa, 0x06, + 0xff, 0x1e, 0x19, 0xe9, 0xea, 0xaa, 0x51, 0x2a, 0x95, 0xea, 0x53, 0x7e, 0xb0, 0x61, 0x1f, 0x12, + 0x54, 0x80, 0x50, 0x00, 0x39, 0x7d, 0x46, 0x9c, 0xec, 0x84, 0xa8, 0x94, 0xe9, 0x1e, 0x1b, 0xe7, + 0x04, 0xb2, 0x2e, 0xb2, 0x3c, 0xf6, 0x63, 0x48, 0x85, 0xe9, 0x5d, 0xbb, 0xc5, 0x89, 0x97, 0x4d, + 0x09, 0x14, 0xec, 0x1e, 0xf7, 0xf2, 0x05, 0x11, 0xf5, 0x25, 0x44, 0xfd, 0x5a, 0x98, 0x4b, 0x1d, + 0x46, 0xd0, 0x1a, 0x4c, 0x0b, 0xb5, 0x2c, 0x0b, 0x72, 0xa9, 0xc2, 0x85, 0xa5, 0xbc, 0x36, 0x62, + 0xc0, 0x9a, 0xb4, 0x22, 0x60, 0xa1, 0x3c, 0x9c, 0x71, 0xc8, 0x3e, 0xdf, 0x8e, 0x0a, 0xad, 0x5f, + 0xf4, 0x7f, 0x3f, 0x0a, 0x15, 0xaa, 0x9f, 0x00, 0x54, 0x7a, 0xe5, 0x1f, 0x86, 0x23, 0x60, 0xe3, + 0x5a, 0x3a, 0x68, 0xde, 0x44, 0x62, 0xf3, 0x52, 0xa7, 0x9b, 0xa7, 0xbe, 0x05, 0x70, 0x6e, 0xa8, + 0xb4, 0xc0, 0xa2, 0x75, 0x08, 0x7b, 0x3b, 0x13, 0xda, 0x74, 0x7d, 0xd0, 0xa6, 0x93, 0x9d, 0xea, + 0xf1, 0xeb, 0x7d, 0xa4, 0xa4, 0x2e, 0x2d, 0x7d, 0xce, 0xc0, 0x69, 0x21, 0xa3, 0x21, 0x0f, 0x1d, + 0x7a, 0x0f, 0x60, 0x6a, 0x93, 0x70, 0x54, 0x1a, 0x39, 0x96, 0xc8, 0x92, 0x2a, 0x09, 0x07, 0xa9, + 0x6a, 0xaf, 0x7f, 0xfc, 0xfc, 0x30, 0x51, 0x40, 0xf9, 0xde, 0x69, 0x2c, 0x46, 0x8f, 0x23, 0xd3, + 0x5f, 0x84, 0xc3, 0x79, 0x85, 0x3e, 0x02, 0x38, 0xe9, 0xbb, 0x85, 0x96, 0x46, 0x16, 0x88, 0xad, + 0xbe, 0x52, 0x1e, 0x8b, 0x23, 0xfd, 0x57, 0x17, 0x84, 0xc2, 0x79, 0x34, 0x77, 0x86, 0x42, 0xf4, + 0x0d, 0xc0, 0x4b, 0x3e, 0xf7, 0x64, 0x7e, 0x68, 0x25, 0x79, 0xb1, 0xd8, 0x42, 0x2a, 0xd5, 0xff, + 0x23, 0x07, 0x92, 0x97, 0x85, 0xe4, 0x32, 0x32, 0x92, 0x99, 0xaa, 0xf7, 0xad, 0xca, 0x57, 0x20, + 0xcf, 0xe9, 0xba, 0xb8, 0xcb, 0x6a, 0xb6, 0x63, 0xda, 0x8e, 0xc5, 0x90, 0x36, 0xa8, 0x27, 0xb8, + 0xe9, 0xe2, 0xc0, 0x50, 0xbf, 0x9e, 0x18, 0x1f, 0x48, 0xbe, 0x23, 0x24, 0xaf, 0xa1, 0xea, 0x99, + 0x92, 0xc3, 0x98, 0xaf, 0xba, 0xd2, 0x8a, 0xcb, 0xfc, 0x0d, 0xe0, 0x6c, 0x83, 0x44, 0xff, 0x16, + 0x87, 0x8a, 0x89, 0xe1, 0x42, 0xed, 0xa3, 0x0f, 0x97, 0xfa, 0x06, 0x7c, 0xf9, 0xbe, 0xb8, 0x0c, + 0x73, 0xa7, 0xa5, 0x7a, 0x40, 0x38, 0x36, 0x31, 0xc7, 0xe8, 0x8a, 0x7c, 0x8e, 0xe4, 0x65, 0xbe, + 0xd3, 0xde, 0xd5, 0xee, 0xee, 0xb9, 0xfc, 0x40, 0xf4, 0x7a, 0x5b, 0x5d, 0x49, 0xde, 0x2b, 0x8b, + 0x56, 0xa8, 0x80, 0x45, 0xf4, 0x07, 0xc0, 0xcb, 0x5b, 0xae, 0x89, 0x39, 0x89, 0x34, 0x5c, 0x1a, + 0xda, 0xf0, 0x30, 0xe8, 0x18, 0x3d, 0xbf, 0xf3, 0x7b, 0xae, 0xc2, 0x85, 0x33, 0xb2, 0x25, 0x69, + 0x7b, 0x43, 0x5d, 0x4b, 0xde, 0x76, 0x7b, 0x48, 0x91, 0x0a, 0x58, 0xac, 0x6d, 0x3d, 0x69, 0x58, + 0x36, 0x7f, 0xda, 0xde, 0xd1, 0x9a, 0x74, 0x4f, 0x97, 0xd2, 0x8b, 0xf2, 0x05, 0xb5, 0x68, 0xd1, + 0x22, 0x8e, 0x28, 0xac, 0x8f, 0x78, 0xed, 0x57, 0x22, 0xbf, 0x76, 0xd2, 0x82, 0x56, 0xfe, 0x17, + 0x00, 0x00, 0xff, 0xff, 0x6f, 0x0f, 0xcf, 0x65, 0x9b, 0x08, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/folder.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/folder.pb.go new file mode 100644 index 000000000..68a546aaf --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/folder.pb.go @@ -0,0 +1,182 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/resourcemanager/v1/folder.proto + +package resourcemanager // import "github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Folder_Status int32 + +const ( + Folder_STATUS_UNSPECIFIED Folder_Status = 0 + // The folder is active. + Folder_ACTIVE Folder_Status = 1 + // The folder is being deleted. + Folder_DELETING Folder_Status = 2 +) + +var Folder_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "ACTIVE", + 2: "DELETING", +} +var Folder_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "ACTIVE": 1, + "DELETING": 2, +} + +func (x Folder_Status) String() string { + return proto.EnumName(Folder_Status_name, int32(x)) +} +func (Folder_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_folder_e30a59b36b19bdc0, []int{0, 0} +} + +// A Folder resource. For more information, see [Folder](/docs/resource-manager/concepts/resources-hierarchy#folder). +type Folder struct { + // ID of the folder. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the cloud that the folder belongs to. + CloudId string `protobuf:"bytes,2,opt,name=cloud_id,json=cloudId,proto3" json:"cloud_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Name of the folder. + // The name is unique within the cloud. 3-63 characters long. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Description of the folder. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. Мaximum of 64 per resource. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Status of the folder. + Status Folder_Status `protobuf:"varint,7,opt,name=status,proto3,enum=yandex.cloud.resourcemanager.v1.Folder_Status" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Folder) Reset() { *m = Folder{} } +func (m *Folder) String() string { return proto.CompactTextString(m) } +func (*Folder) ProtoMessage() {} +func (*Folder) Descriptor() ([]byte, []int) { + return fileDescriptor_folder_e30a59b36b19bdc0, []int{0} +} +func (m *Folder) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Folder.Unmarshal(m, b) +} +func (m *Folder) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Folder.Marshal(b, m, deterministic) +} +func (dst *Folder) XXX_Merge(src proto.Message) { + xxx_messageInfo_Folder.Merge(dst, src) +} +func (m *Folder) XXX_Size() int { + return xxx_messageInfo_Folder.Size(m) +} +func (m *Folder) XXX_DiscardUnknown() { + xxx_messageInfo_Folder.DiscardUnknown(m) +} + +var xxx_messageInfo_Folder proto.InternalMessageInfo + +func (m *Folder) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Folder) GetCloudId() string { + if m != nil { + return m.CloudId + } + return "" +} + +func (m *Folder) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Folder) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Folder) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Folder) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Folder) GetStatus() Folder_Status { + if m != nil { + return m.Status + } + return Folder_STATUS_UNSPECIFIED +} + +func init() { + proto.RegisterType((*Folder)(nil), "yandex.cloud.resourcemanager.v1.Folder") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.resourcemanager.v1.Folder.LabelsEntry") + proto.RegisterEnum("yandex.cloud.resourcemanager.v1.Folder_Status", Folder_Status_name, Folder_Status_value) +} + +func init() { + proto.RegisterFile("yandex/cloud/resourcemanager/v1/folder.proto", fileDescriptor_folder_e30a59b36b19bdc0) +} + +var fileDescriptor_folder_e30a59b36b19bdc0 = []byte{ + // 394 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x6e, 0x9b, 0x40, + 0x10, 0x86, 0x0b, 0x24, 0x24, 0x19, 0xaa, 0x08, 0xad, 0xaa, 0x8a, 0xfa, 0x12, 0x94, 0x13, 0x87, + 0x66, 0x57, 0x71, 0x2e, 0x4d, 0x7a, 0x72, 0x13, 0x5c, 0xa1, 0x46, 0x51, 0x05, 0xb8, 0x87, 0x5e, + 0xac, 0x85, 0x5d, 0x53, 0x54, 0x60, 0xad, 0x65, 0xb1, 0xea, 0xb7, 0xea, 0x23, 0x56, 0xdd, 0xc5, + 0x92, 0xe5, 0x8b, 0x73, 0x9b, 0x19, 0xfe, 0xff, 0x1b, 0x66, 0x76, 0xe0, 0xe3, 0x96, 0x76, 0x8c, + 0xff, 0x21, 0x65, 0x23, 0x06, 0x46, 0x24, 0xef, 0xc5, 0x20, 0x4b, 0xde, 0xd2, 0x8e, 0x56, 0x5c, + 0x92, 0xcd, 0x2d, 0x59, 0x89, 0x86, 0x71, 0x89, 0xd7, 0x52, 0x28, 0x81, 0xae, 0x8c, 0x1a, 0x6b, + 0x35, 0x3e, 0x50, 0xe3, 0xcd, 0xed, 0xe4, 0xaa, 0x12, 0xa2, 0x6a, 0x38, 0xd1, 0xf2, 0x62, 0x58, + 0x11, 0x55, 0xb7, 0xbc, 0x57, 0xb4, 0x5d, 0x1b, 0xc2, 0xf5, 0x5f, 0x07, 0xdc, 0xb9, 0x46, 0xa2, + 0x4b, 0xb0, 0x6b, 0x16, 0x58, 0xa1, 0x15, 0x5d, 0xa4, 0x76, 0xcd, 0xd0, 0x07, 0x38, 0xd7, 0xdc, + 0x65, 0xcd, 0x02, 0x5b, 0x57, 0xcf, 0x74, 0x9e, 0x30, 0x74, 0x0f, 0x50, 0x4a, 0x4e, 0x15, 0x67, + 0x4b, 0xaa, 0x02, 0x27, 0xb4, 0x22, 0x6f, 0x3a, 0xc1, 0xa6, 0x17, 0xde, 0xf5, 0xc2, 0xf9, 0xae, + 0x57, 0x7a, 0x31, 0xaa, 0x67, 0x0a, 0x21, 0x38, 0xe9, 0x68, 0xcb, 0x83, 0x13, 0x4d, 0xd4, 0x31, + 0x0a, 0xc1, 0x63, 0xbc, 0x2f, 0x65, 0xbd, 0x56, 0xb5, 0xe8, 0x82, 0x53, 0xfd, 0x69, 0xbf, 0x84, + 0xbe, 0x81, 0xdb, 0xd0, 0x82, 0x37, 0x7d, 0xe0, 0x86, 0x4e, 0xe4, 0x4d, 0xef, 0xf0, 0x91, 0xc9, + 0xb1, 0x19, 0x0a, 0x3f, 0x6b, 0x57, 0xdc, 0x29, 0xb9, 0x4d, 0x47, 0x04, 0x9a, 0x83, 0xdb, 0x2b, + 0xaa, 0x86, 0x3e, 0x38, 0x0b, 0xad, 0xe8, 0x72, 0x8a, 0x5f, 0x0b, 0xcb, 0xb4, 0x2b, 0x1d, 0xdd, + 0x93, 0x7b, 0xf0, 0xf6, 0xf0, 0xc8, 0x07, 0xe7, 0x37, 0xdf, 0x8e, 0x0b, 0xfc, 0x1f, 0xa2, 0x77, + 0x70, 0xba, 0xa1, 0xcd, 0xc0, 0xc7, 0xf5, 0x99, 0xe4, 0xc1, 0xfe, 0x64, 0x5d, 0x3f, 0x80, 0x6b, + 0x60, 0xe8, 0x3d, 0xa0, 0x2c, 0x9f, 0xe5, 0x8b, 0x6c, 0xb9, 0x78, 0xc9, 0xbe, 0xc7, 0x8f, 0xc9, + 0x3c, 0x89, 0x9f, 0xfc, 0x37, 0x08, 0xc0, 0x9d, 0x3d, 0xe6, 0xc9, 0x8f, 0xd8, 0xb7, 0xd0, 0x5b, + 0x38, 0x7f, 0x8a, 0x9f, 0xe3, 0x3c, 0x79, 0xf9, 0xea, 0xdb, 0x5f, 0x16, 0x3f, 0xb3, 0xaa, 0x56, + 0xbf, 0x86, 0x02, 0x97, 0xa2, 0x25, 0xe6, 0xd7, 0x6f, 0xcc, 0xbd, 0x54, 0xe2, 0xa6, 0xe2, 0x9d, + 0x7e, 0x00, 0x72, 0xe4, 0x90, 0x3e, 0x1f, 0x94, 0x0a, 0x57, 0xdb, 0xee, 0xfe, 0x05, 0x00, 0x00, + 0xff, 0xff, 0x91, 0xd1, 0x66, 0x6f, 0x82, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/folder_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/folder_service.pb.go new file mode 100644 index 000000000..21b7c7331 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1/folder_service.pb.go @@ -0,0 +1,1071 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/resourcemanager/v1/folder_service.proto + +package resourcemanager // import "github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import access "github.com/yandex-cloud/go-genproto/yandex/cloud/access" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetFolderRequest struct { + // ID of the Folder resource to return. + // To get the folder ID, use a [FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetFolderRequest) Reset() { *m = GetFolderRequest{} } +func (m *GetFolderRequest) String() string { return proto.CompactTextString(m) } +func (*GetFolderRequest) ProtoMessage() {} +func (*GetFolderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_folder_service_1b69e2b4ea1a512d, []int{0} +} +func (m *GetFolderRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetFolderRequest.Unmarshal(m, b) +} +func (m *GetFolderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetFolderRequest.Marshal(b, m, deterministic) +} +func (dst *GetFolderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetFolderRequest.Merge(dst, src) +} +func (m *GetFolderRequest) XXX_Size() int { + return xxx_messageInfo_GetFolderRequest.Size(m) +} +func (m *GetFolderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetFolderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetFolderRequest proto.InternalMessageInfo + +func (m *GetFolderRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +type ListFoldersRequest struct { + // ID of the cloud to list folders in. + // To get the cloud ID, use a [yandex.cloud.resourcemanager.v1.CloudService.List] request. + CloudId string `protobuf:"bytes,1,opt,name=cloud_id,json=cloudId,proto3" json:"cloud_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListFoldersResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + // Default value: 100. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. Set [page_token] + // to the [ListFoldersResponse.next_page_token] + // returned by a previous list request to get the next page of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters resources listed in the response. + // The expression must specify: + // 1. The field name. Currently you can use filtering only on the [Folder.name] field. + // 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + // 3. The value. Must be 3-63 characters long and match the regular expression `^[a-z][-a-z0-9]{1,61}[a-z0-9]$`. + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListFoldersRequest) Reset() { *m = ListFoldersRequest{} } +func (m *ListFoldersRequest) String() string { return proto.CompactTextString(m) } +func (*ListFoldersRequest) ProtoMessage() {} +func (*ListFoldersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_folder_service_1b69e2b4ea1a512d, []int{1} +} +func (m *ListFoldersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListFoldersRequest.Unmarshal(m, b) +} +func (m *ListFoldersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListFoldersRequest.Marshal(b, m, deterministic) +} +func (dst *ListFoldersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListFoldersRequest.Merge(dst, src) +} +func (m *ListFoldersRequest) XXX_Size() int { + return xxx_messageInfo_ListFoldersRequest.Size(m) +} +func (m *ListFoldersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListFoldersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListFoldersRequest proto.InternalMessageInfo + +func (m *ListFoldersRequest) GetCloudId() string { + if m != nil { + return m.CloudId + } + return "" +} + +func (m *ListFoldersRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListFoldersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListFoldersRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListFoldersResponse struct { + // List of Folder resources. + Folders []*Folder `protobuf:"bytes,1,rep,name=folders,proto3" json:"folders,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListFoldersRequest.page_size], use + // the [next_page_token] as the value + // for the [ListFoldersRequest.page_token] query parameter + // in the next list request. Each subsequent list request will have its own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListFoldersResponse) Reset() { *m = ListFoldersResponse{} } +func (m *ListFoldersResponse) String() string { return proto.CompactTextString(m) } +func (*ListFoldersResponse) ProtoMessage() {} +func (*ListFoldersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_folder_service_1b69e2b4ea1a512d, []int{2} +} +func (m *ListFoldersResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListFoldersResponse.Unmarshal(m, b) +} +func (m *ListFoldersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListFoldersResponse.Marshal(b, m, deterministic) +} +func (dst *ListFoldersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListFoldersResponse.Merge(dst, src) +} +func (m *ListFoldersResponse) XXX_Size() int { + return xxx_messageInfo_ListFoldersResponse.Size(m) +} +func (m *ListFoldersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListFoldersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListFoldersResponse proto.InternalMessageInfo + +func (m *ListFoldersResponse) GetFolders() []*Folder { + if m != nil { + return m.Folders + } + return nil +} + +func (m *ListFoldersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateFolderRequest struct { + // ID of the cloud to create a folder in. + // To get the cloud ID, use a [yandex.cloud.resourcemanager.v1.CloudService.List] request. + CloudId string `protobuf:"bytes,1,opt,name=cloud_id,json=cloudId,proto3" json:"cloud_id,omitempty"` + // Name of the folder. + // The name must be unique within the cloud. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the folder. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateFolderRequest) Reset() { *m = CreateFolderRequest{} } +func (m *CreateFolderRequest) String() string { return proto.CompactTextString(m) } +func (*CreateFolderRequest) ProtoMessage() {} +func (*CreateFolderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_folder_service_1b69e2b4ea1a512d, []int{3} +} +func (m *CreateFolderRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateFolderRequest.Unmarshal(m, b) +} +func (m *CreateFolderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateFolderRequest.Marshal(b, m, deterministic) +} +func (dst *CreateFolderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateFolderRequest.Merge(dst, src) +} +func (m *CreateFolderRequest) XXX_Size() int { + return xxx_messageInfo_CreateFolderRequest.Size(m) +} +func (m *CreateFolderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateFolderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateFolderRequest proto.InternalMessageInfo + +func (m *CreateFolderRequest) GetCloudId() string { + if m != nil { + return m.CloudId + } + return "" +} + +func (m *CreateFolderRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateFolderRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *CreateFolderRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +type CreateFolderMetadata struct { + // ID of the folder that is being created. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateFolderMetadata) Reset() { *m = CreateFolderMetadata{} } +func (m *CreateFolderMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateFolderMetadata) ProtoMessage() {} +func (*CreateFolderMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_folder_service_1b69e2b4ea1a512d, []int{4} +} +func (m *CreateFolderMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateFolderMetadata.Unmarshal(m, b) +} +func (m *CreateFolderMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateFolderMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateFolderMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateFolderMetadata.Merge(dst, src) +} +func (m *CreateFolderMetadata) XXX_Size() int { + return xxx_messageInfo_CreateFolderMetadata.Size(m) +} +func (m *CreateFolderMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateFolderMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateFolderMetadata proto.InternalMessageInfo + +func (m *CreateFolderMetadata) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +type UpdateFolderRequest struct { + // ID of the Folder resource to update. + // To get the folder ID, use a [FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Field mask that specifies which fields of the Folder resource are going to be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Name of the folder. + // The name must be unique within the cloud. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Description of the folder. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateFolderRequest) Reset() { *m = UpdateFolderRequest{} } +func (m *UpdateFolderRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateFolderRequest) ProtoMessage() {} +func (*UpdateFolderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_folder_service_1b69e2b4ea1a512d, []int{5} +} +func (m *UpdateFolderRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateFolderRequest.Unmarshal(m, b) +} +func (m *UpdateFolderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateFolderRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateFolderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateFolderRequest.Merge(dst, src) +} +func (m *UpdateFolderRequest) XXX_Size() int { + return xxx_messageInfo_UpdateFolderRequest.Size(m) +} +func (m *UpdateFolderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateFolderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateFolderRequest proto.InternalMessageInfo + +func (m *UpdateFolderRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *UpdateFolderRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateFolderRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateFolderRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *UpdateFolderRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +type UpdateFolderMetadata struct { + // ID of the Folder resource that is being updated. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateFolderMetadata) Reset() { *m = UpdateFolderMetadata{} } +func (m *UpdateFolderMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateFolderMetadata) ProtoMessage() {} +func (*UpdateFolderMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_folder_service_1b69e2b4ea1a512d, []int{6} +} +func (m *UpdateFolderMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateFolderMetadata.Unmarshal(m, b) +} +func (m *UpdateFolderMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateFolderMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateFolderMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateFolderMetadata.Merge(dst, src) +} +func (m *UpdateFolderMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateFolderMetadata.Size(m) +} +func (m *UpdateFolderMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateFolderMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateFolderMetadata proto.InternalMessageInfo + +func (m *UpdateFolderMetadata) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +type DeleteFolderRequest struct { + // ID of the folder to delete. + // To get the folder ID, use a [FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteFolderRequest) Reset() { *m = DeleteFolderRequest{} } +func (m *DeleteFolderRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteFolderRequest) ProtoMessage() {} +func (*DeleteFolderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_folder_service_1b69e2b4ea1a512d, []int{7} +} +func (m *DeleteFolderRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteFolderRequest.Unmarshal(m, b) +} +func (m *DeleteFolderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteFolderRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteFolderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteFolderRequest.Merge(dst, src) +} +func (m *DeleteFolderRequest) XXX_Size() int { + return xxx_messageInfo_DeleteFolderRequest.Size(m) +} +func (m *DeleteFolderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteFolderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteFolderRequest proto.InternalMessageInfo + +func (m *DeleteFolderRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +type DeleteFolderMetadata struct { + // ID of the folder that is being deleted. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteFolderMetadata) Reset() { *m = DeleteFolderMetadata{} } +func (m *DeleteFolderMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteFolderMetadata) ProtoMessage() {} +func (*DeleteFolderMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_folder_service_1b69e2b4ea1a512d, []int{8} +} +func (m *DeleteFolderMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteFolderMetadata.Unmarshal(m, b) +} +func (m *DeleteFolderMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteFolderMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteFolderMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteFolderMetadata.Merge(dst, src) +} +func (m *DeleteFolderMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteFolderMetadata.Size(m) +} +func (m *DeleteFolderMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteFolderMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteFolderMetadata proto.InternalMessageInfo + +func (m *DeleteFolderMetadata) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +type ListFolderOperationsRequest struct { + // ID of the Folder resource to list operations for. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], the service returns a [ListFolderOperationsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. + // Default value: 100. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. Set [page_token] + // to the [ListFolderOperationsResponse.next_page_token] + // returned by a previous list request to get the next page of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListFolderOperationsRequest) Reset() { *m = ListFolderOperationsRequest{} } +func (m *ListFolderOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListFolderOperationsRequest) ProtoMessage() {} +func (*ListFolderOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_folder_service_1b69e2b4ea1a512d, []int{9} +} +func (m *ListFolderOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListFolderOperationsRequest.Unmarshal(m, b) +} +func (m *ListFolderOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListFolderOperationsRequest.Marshal(b, m, deterministic) +} +func (dst *ListFolderOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListFolderOperationsRequest.Merge(dst, src) +} +func (m *ListFolderOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListFolderOperationsRequest.Size(m) +} +func (m *ListFolderOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListFolderOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListFolderOperationsRequest proto.InternalMessageInfo + +func (m *ListFolderOperationsRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListFolderOperationsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListFolderOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListFolderOperationsResponse struct { + // List of operations for the specified folder. + Operations []*operation.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListFolderOperationsRequest.page_size], use the [next_page_token] as the value + // for the [ListFolderOperationsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListFolderOperationsResponse) Reset() { *m = ListFolderOperationsResponse{} } +func (m *ListFolderOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListFolderOperationsResponse) ProtoMessage() {} +func (*ListFolderOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_folder_service_1b69e2b4ea1a512d, []int{10} +} +func (m *ListFolderOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListFolderOperationsResponse.Unmarshal(m, b) +} +func (m *ListFolderOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListFolderOperationsResponse.Marshal(b, m, deterministic) +} +func (dst *ListFolderOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListFolderOperationsResponse.Merge(dst, src) +} +func (m *ListFolderOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListFolderOperationsResponse.Size(m) +} +func (m *ListFolderOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListFolderOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListFolderOperationsResponse proto.InternalMessageInfo + +func (m *ListFolderOperationsResponse) GetOperations() []*operation.Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListFolderOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetFolderRequest)(nil), "yandex.cloud.resourcemanager.v1.GetFolderRequest") + proto.RegisterType((*ListFoldersRequest)(nil), "yandex.cloud.resourcemanager.v1.ListFoldersRequest") + proto.RegisterType((*ListFoldersResponse)(nil), "yandex.cloud.resourcemanager.v1.ListFoldersResponse") + proto.RegisterType((*CreateFolderRequest)(nil), "yandex.cloud.resourcemanager.v1.CreateFolderRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.resourcemanager.v1.CreateFolderRequest.LabelsEntry") + proto.RegisterType((*CreateFolderMetadata)(nil), "yandex.cloud.resourcemanager.v1.CreateFolderMetadata") + proto.RegisterType((*UpdateFolderRequest)(nil), "yandex.cloud.resourcemanager.v1.UpdateFolderRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.resourcemanager.v1.UpdateFolderRequest.LabelsEntry") + proto.RegisterType((*UpdateFolderMetadata)(nil), "yandex.cloud.resourcemanager.v1.UpdateFolderMetadata") + proto.RegisterType((*DeleteFolderRequest)(nil), "yandex.cloud.resourcemanager.v1.DeleteFolderRequest") + proto.RegisterType((*DeleteFolderMetadata)(nil), "yandex.cloud.resourcemanager.v1.DeleteFolderMetadata") + proto.RegisterType((*ListFolderOperationsRequest)(nil), "yandex.cloud.resourcemanager.v1.ListFolderOperationsRequest") + proto.RegisterType((*ListFolderOperationsResponse)(nil), "yandex.cloud.resourcemanager.v1.ListFolderOperationsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// FolderServiceClient is the client API for FolderService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type FolderServiceClient interface { + // Returns the specified Folder resource. + // + // To get the list of available Folder resources, make a [List] request. + Get(ctx context.Context, in *GetFolderRequest, opts ...grpc.CallOption) (*Folder, error) + // Retrieves the list of Folder resources in the specified cloud. + List(ctx context.Context, in *ListFoldersRequest, opts ...grpc.CallOption) (*ListFoldersResponse, error) + // Creates a folder in the specified cloud. + Create(ctx context.Context, in *CreateFolderRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified folder. + Update(ctx context.Context, in *UpdateFolderRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified folder. + // + // The method is temporarily unavailable. + Delete(ctx context.Context, in *DeleteFolderRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Lists operations for the specified folder. + ListOperations(ctx context.Context, in *ListFolderOperationsRequest, opts ...grpc.CallOption) (*ListFolderOperationsResponse, error) + // Lists access bindings for the specified folder. + ListAccessBindings(ctx context.Context, in *access.ListAccessBindingsRequest, opts ...grpc.CallOption) (*access.ListAccessBindingsResponse, error) + // Sets access bindings for the specified folder. + SetAccessBindings(ctx context.Context, in *access.SetAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates access bindings for the specified folder. + UpdateAccessBindings(ctx context.Context, in *access.UpdateAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) +} + +type folderServiceClient struct { + cc *grpc.ClientConn +} + +func NewFolderServiceClient(cc *grpc.ClientConn) FolderServiceClient { + return &folderServiceClient{cc} +} + +func (c *folderServiceClient) Get(ctx context.Context, in *GetFolderRequest, opts ...grpc.CallOption) (*Folder, error) { + out := new(Folder) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.FolderService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *folderServiceClient) List(ctx context.Context, in *ListFoldersRequest, opts ...grpc.CallOption) (*ListFoldersResponse, error) { + out := new(ListFoldersResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.FolderService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *folderServiceClient) Create(ctx context.Context, in *CreateFolderRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.FolderService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *folderServiceClient) Update(ctx context.Context, in *UpdateFolderRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.FolderService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *folderServiceClient) Delete(ctx context.Context, in *DeleteFolderRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.FolderService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *folderServiceClient) ListOperations(ctx context.Context, in *ListFolderOperationsRequest, opts ...grpc.CallOption) (*ListFolderOperationsResponse, error) { + out := new(ListFolderOperationsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.FolderService/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *folderServiceClient) ListAccessBindings(ctx context.Context, in *access.ListAccessBindingsRequest, opts ...grpc.CallOption) (*access.ListAccessBindingsResponse, error) { + out := new(access.ListAccessBindingsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.FolderService/ListAccessBindings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *folderServiceClient) SetAccessBindings(ctx context.Context, in *access.SetAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.FolderService/SetAccessBindings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *folderServiceClient) UpdateAccessBindings(ctx context.Context, in *access.UpdateAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.resourcemanager.v1.FolderService/UpdateAccessBindings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// FolderServiceServer is the server API for FolderService service. +type FolderServiceServer interface { + // Returns the specified Folder resource. + // + // To get the list of available Folder resources, make a [List] request. + Get(context.Context, *GetFolderRequest) (*Folder, error) + // Retrieves the list of Folder resources in the specified cloud. + List(context.Context, *ListFoldersRequest) (*ListFoldersResponse, error) + // Creates a folder in the specified cloud. + Create(context.Context, *CreateFolderRequest) (*operation.Operation, error) + // Updates the specified folder. + Update(context.Context, *UpdateFolderRequest) (*operation.Operation, error) + // Deletes the specified folder. + // + // The method is temporarily unavailable. + Delete(context.Context, *DeleteFolderRequest) (*operation.Operation, error) + // Lists operations for the specified folder. + ListOperations(context.Context, *ListFolderOperationsRequest) (*ListFolderOperationsResponse, error) + // Lists access bindings for the specified folder. + ListAccessBindings(context.Context, *access.ListAccessBindingsRequest) (*access.ListAccessBindingsResponse, error) + // Sets access bindings for the specified folder. + SetAccessBindings(context.Context, *access.SetAccessBindingsRequest) (*operation.Operation, error) + // Updates access bindings for the specified folder. + UpdateAccessBindings(context.Context, *access.UpdateAccessBindingsRequest) (*operation.Operation, error) +} + +func RegisterFolderServiceServer(s *grpc.Server, srv FolderServiceServer) { + s.RegisterService(&_FolderService_serviceDesc, srv) +} + +func _FolderService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetFolderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FolderServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.FolderService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FolderServiceServer).Get(ctx, req.(*GetFolderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _FolderService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListFoldersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FolderServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.FolderService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FolderServiceServer).List(ctx, req.(*ListFoldersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _FolderService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateFolderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FolderServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.FolderService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FolderServiceServer).Create(ctx, req.(*CreateFolderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _FolderService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateFolderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FolderServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.FolderService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FolderServiceServer).Update(ctx, req.(*UpdateFolderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _FolderService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteFolderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FolderServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.FolderService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FolderServiceServer).Delete(ctx, req.(*DeleteFolderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _FolderService_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListFolderOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FolderServiceServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.FolderService/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FolderServiceServer).ListOperations(ctx, req.(*ListFolderOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _FolderService_ListAccessBindings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(access.ListAccessBindingsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FolderServiceServer).ListAccessBindings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.FolderService/ListAccessBindings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FolderServiceServer).ListAccessBindings(ctx, req.(*access.ListAccessBindingsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _FolderService_SetAccessBindings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(access.SetAccessBindingsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FolderServiceServer).SetAccessBindings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.FolderService/SetAccessBindings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FolderServiceServer).SetAccessBindings(ctx, req.(*access.SetAccessBindingsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _FolderService_UpdateAccessBindings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(access.UpdateAccessBindingsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FolderServiceServer).UpdateAccessBindings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.resourcemanager.v1.FolderService/UpdateAccessBindings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FolderServiceServer).UpdateAccessBindings(ctx, req.(*access.UpdateAccessBindingsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _FolderService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.resourcemanager.v1.FolderService", + HandlerType: (*FolderServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _FolderService_Get_Handler, + }, + { + MethodName: "List", + Handler: _FolderService_List_Handler, + }, + { + MethodName: "Create", + Handler: _FolderService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _FolderService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _FolderService_Delete_Handler, + }, + { + MethodName: "ListOperations", + Handler: _FolderService_ListOperations_Handler, + }, + { + MethodName: "ListAccessBindings", + Handler: _FolderService_ListAccessBindings_Handler, + }, + { + MethodName: "SetAccessBindings", + Handler: _FolderService_SetAccessBindings_Handler, + }, + { + MethodName: "UpdateAccessBindings", + Handler: _FolderService_UpdateAccessBindings_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/resourcemanager/v1/folder_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/resourcemanager/v1/folder_service.proto", fileDescriptor_folder_service_1b69e2b4ea1a512d) +} + +var fileDescriptor_folder_service_1b69e2b4ea1a512d = []byte{ + // 1110 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xcf, 0x6f, 0xdb, 0x54, + 0x1c, 0xd7, 0x6b, 0xd2, 0xac, 0x79, 0xa1, 0x50, 0x5e, 0x87, 0x14, 0x79, 0xdd, 0xe8, 0x0c, 0xac, + 0x25, 0xcc, 0x76, 0x9c, 0xb4, 0x15, 0x69, 0x13, 0x75, 0x0d, 0xeb, 0xa6, 0x4a, 0x9b, 0x40, 0x2e, + 0xbd, 0x50, 0x4d, 0xd1, 0x6b, 0xfc, 0x1a, 0xac, 0x3a, 0x76, 0xf0, 0x73, 0xa2, 0xb5, 0x63, 0x12, + 0x9a, 0xb8, 0xd0, 0xeb, 0x0e, 0x08, 0xf1, 0x2f, 0x70, 0xaa, 0x38, 0x70, 0xd8, 0x11, 0xa9, 0x15, + 0xc7, 0xf2, 0x2f, 0x20, 0xc4, 0x81, 0xd3, 0x4e, 0x08, 0x09, 0x09, 0xf9, 0x3d, 0x3b, 0x75, 0x12, + 0xb7, 0x71, 0x0a, 0x87, 0x9d, 0xe2, 0xe7, 0xef, 0xaf, 0xcf, 0xf7, 0xc7, 0xfb, 0x7c, 0x1d, 0xb8, + 0xb0, 0x8f, 0x2d, 0x9d, 0x3c, 0x56, 0xea, 0xa6, 0xdd, 0xd6, 0x15, 0x87, 0x50, 0xbb, 0xed, 0xd4, + 0x49, 0x13, 0x5b, 0xb8, 0x41, 0x1c, 0xa5, 0xa3, 0x2a, 0xbb, 0xb6, 0xa9, 0x13, 0xa7, 0x46, 0x89, + 0xd3, 0x31, 0xea, 0x44, 0x6e, 0x39, 0xb6, 0x6b, 0xa3, 0xb7, 0xb9, 0x95, 0xcc, 0xac, 0xe4, 0x3e, + 0x2b, 0xb9, 0xa3, 0x0a, 0x33, 0x0d, 0xdb, 0x6e, 0x98, 0x44, 0xc1, 0x2d, 0x43, 0xc1, 0x96, 0x65, + 0xbb, 0xd8, 0x35, 0x6c, 0x8b, 0x72, 0x73, 0x61, 0xd6, 0x97, 0xb2, 0xd3, 0x4e, 0x7b, 0x57, 0xd9, + 0x35, 0x88, 0xa9, 0xd7, 0x9a, 0x98, 0xee, 0xf9, 0x1a, 0x82, 0x0f, 0xcb, 0xb3, 0xb7, 0x5b, 0xc4, + 0x61, 0xe6, 0xbe, 0xec, 0x76, 0x3c, 0xc8, 0x41, 0xac, 0x1e, 0x6d, 0x5c, 0xaf, 0x13, 0x4a, 0xfd, + 0x1f, 0x5f, 0xe3, 0x56, 0x8f, 0x46, 0x37, 0xda, 0x40, 0xdc, 0xeb, 0x3d, 0x7a, 0x1d, 0x6c, 0x1a, + 0x7a, 0x48, 0x2c, 0x56, 0xe0, 0xd4, 0x7d, 0xe2, 0xde, 0x63, 0xb1, 0x35, 0xf2, 0x45, 0x9b, 0x50, + 0x17, 0xbd, 0x0f, 0xd3, 0x7e, 0xfd, 0x0c, 0x3d, 0x0b, 0x66, 0xc1, 0x7c, 0xba, 0xfa, 0xda, 0x1f, + 0xc7, 0x2a, 0x38, 0x3c, 0x51, 0x93, 0xe5, 0xca, 0x62, 0x5e, 0x9b, 0xe0, 0xe2, 0x0d, 0x5d, 0xfc, + 0x11, 0x40, 0xf4, 0xc0, 0xa0, 0xbe, 0x03, 0x1a, 0x78, 0x98, 0x83, 0x13, 0x2c, 0xde, 0x79, 0x0e, + 0xae, 0x30, 0xe9, 0x86, 0x8e, 0xe6, 0x60, 0xba, 0x85, 0x1b, 0xa4, 0x46, 0x8d, 0x03, 0x92, 0x1d, + 0x9b, 0x05, 0xf3, 0x89, 0x2a, 0xfc, 0xfb, 0x58, 0x4d, 0x95, 0x2b, 0x6a, 0x3e, 0x9f, 0xd7, 0x26, + 0x3c, 0xe1, 0xa6, 0x71, 0x40, 0xd0, 0x3c, 0x84, 0x4c, 0xd1, 0xb5, 0xf7, 0x88, 0x95, 0x4d, 0x30, + 0x9f, 0xe9, 0xc3, 0x13, 0x75, 0x9c, 0x69, 0x6a, 0xcc, 0xcb, 0xa7, 0x9e, 0x0c, 0x89, 0x30, 0xb5, + 0x6b, 0x98, 0x2e, 0x71, 0xb2, 0x49, 0xa6, 0x05, 0x0f, 0x4f, 0xba, 0xfe, 0x7c, 0x89, 0xf8, 0x15, + 0x80, 0xd3, 0x3d, 0xb0, 0x69, 0xcb, 0xb6, 0x28, 0x41, 0x6b, 0xf0, 0x0a, 0x4f, 0x8d, 0x66, 0xc1, + 0x6c, 0x62, 0x3e, 0x53, 0x98, 0x93, 0x87, 0xcc, 0x8c, 0xec, 0x97, 0x2e, 0xb0, 0x43, 0xb7, 0xe0, + 0x1b, 0x16, 0x79, 0xec, 0xd6, 0x42, 0x68, 0xbd, 0xbc, 0xd2, 0xda, 0xa4, 0xf7, 0xfa, 0x93, 0x00, + 0xa6, 0xf8, 0xcf, 0x18, 0x9c, 0xfe, 0xc8, 0x21, 0xd8, 0x25, 0xbd, 0xc5, 0x8f, 0x5d, 0xba, 0x12, + 0x4c, 0x5a, 0xb8, 0xc9, 0xab, 0x96, 0xae, 0xbe, 0xe7, 0x29, 0xbd, 0x3c, 0x56, 0xaf, 0x7f, 0xb9, + 0x8d, 0xa5, 0x83, 0x47, 0xdb, 0x12, 0x96, 0x0e, 0xf2, 0x52, 0xe9, 0xd1, 0x13, 0xf5, 0xf6, 0x92, + 0xfa, 0x74, 0xdb, 0x3f, 0x69, 0xcc, 0x04, 0x7d, 0x00, 0x33, 0x3a, 0xa1, 0x75, 0xc7, 0x68, 0x79, + 0x93, 0xd0, 0x5b, 0xcd, 0xc2, 0xe2, 0x92, 0x16, 0x96, 0xa2, 0xef, 0x00, 0x4c, 0x99, 0x78, 0x87, + 0x98, 0x34, 0x9b, 0x64, 0x35, 0xb9, 0x33, 0xb4, 0x26, 0x11, 0x79, 0xc9, 0x0f, 0x98, 0x8b, 0x75, + 0xcb, 0x75, 0xf6, 0xab, 0xab, 0x2f, 0x8f, 0xd5, 0xcc, 0xb6, 0x54, 0xcb, 0x4b, 0x25, 0x0f, 0x6c, + 0xee, 0x19, 0x4b, 0x6e, 0x69, 0x81, 0x27, 0xb9, 0x54, 0x3c, 0x3a, 0x51, 0x53, 0x42, 0x52, 0x95, + 0xd8, 0x13, 0x42, 0x53, 0x7e, 0x4a, 0x5d, 0x7d, 0xcd, 0x07, 0x24, 0x94, 0x60, 0x26, 0xe4, 0x17, + 0x4d, 0xc1, 0xc4, 0x1e, 0xd9, 0xe7, 0x65, 0xd3, 0xbc, 0x47, 0x74, 0x15, 0x8e, 0x77, 0xb0, 0xd9, + 0xf6, 0xab, 0xa4, 0xf1, 0xc3, 0xf2, 0xd8, 0x87, 0x40, 0x2c, 0xc2, 0xab, 0x61, 0x98, 0x0f, 0x89, + 0x8b, 0x75, 0xec, 0x62, 0x74, 0x6d, 0x60, 0xf8, 0x43, 0xe3, 0xfe, 0x22, 0x01, 0xa7, 0xb7, 0x5a, + 0xfa, 0x40, 0xd3, 0xe2, 0xdf, 0x18, 0xb4, 0x02, 0x33, 0x6d, 0xe6, 0x81, 0x11, 0x07, 0xc3, 0x95, + 0x29, 0x08, 0x32, 0xe7, 0x16, 0x39, 0xe0, 0x16, 0xf9, 0x9e, 0xc7, 0x2d, 0x0f, 0x31, 0xdd, 0xd3, + 0x20, 0x57, 0xf7, 0x9e, 0xbb, 0x3d, 0x4f, 0xfc, 0xe7, 0x9e, 0x27, 0xe3, 0xf6, 0x7c, 0x3c, 0x66, + 0xcf, 0x23, 0xca, 0xf2, 0x2a, 0xf6, 0x3c, 0x0c, 0x33, 0x5e, 0xcf, 0xef, 0xc0, 0xe9, 0xbb, 0xc4, + 0x24, 0x97, 0x6f, 0xb9, 0x17, 0x36, 0xec, 0x21, 0x5e, 0xd8, 0xef, 0x01, 0xbc, 0x76, 0x46, 0x51, + 0x1f, 0x07, 0xac, 0x4e, 0x2f, 0x31, 0x72, 0xff, 0x3f, 0xc9, 0x8a, 0xdf, 0x00, 0x38, 0x13, 0x8d, + 0xae, 0xcb, 0xa4, 0xb0, 0xbb, 0x89, 0x02, 0x32, 0xbd, 0xd9, 0x3b, 0x44, 0x67, 0x9b, 0xaa, 0x6b, + 0xaf, 0x85, 0x8c, 0xe2, 0x32, 0x69, 0xe1, 0xf7, 0x49, 0x38, 0xc9, 0x71, 0x6c, 0xf2, 0x75, 0x8f, + 0x9e, 0x03, 0x98, 0xb8, 0x4f, 0x5c, 0xa4, 0x0e, 0x9d, 0xda, 0xfe, 0xdd, 0x27, 0xc4, 0x25, 0x7c, + 0x31, 0xff, 0xec, 0xd7, 0xdf, 0x9e, 0x8f, 0xe5, 0xd0, 0x7c, 0x77, 0x97, 0x4b, 0x03, 0xcb, 0x9c, + 0x2a, 0x4f, 0xba, 0x3d, 0x7a, 0x8a, 0xbe, 0x05, 0x30, 0xe9, 0xd5, 0x0c, 0x15, 0x87, 0xc6, 0x18, + 0x5c, 0xa9, 0xc2, 0xc2, 0x68, 0x46, 0xbc, 0x0d, 0xe2, 0xbb, 0x0c, 0xe5, 0x0d, 0x34, 0x73, 0x11, + 0x4a, 0xf4, 0x03, 0x80, 0x29, 0x4e, 0x86, 0x68, 0xe1, 0x32, 0xe4, 0x2e, 0x0c, 0xef, 0xac, 0xb8, + 0x71, 0x74, 0x9a, 0xbb, 0x71, 0x0e, 0xe7, 0xa6, 0xf8, 0x99, 0x61, 0xbd, 0x29, 0x5e, 0x88, 0x75, + 0x19, 0xe4, 0xd0, 0x4f, 0x00, 0xa6, 0xf8, 0x3d, 0x8e, 0x01, 0x37, 0x82, 0x97, 0xe2, 0xc0, 0xdd, + 0xe2, 0x70, 0x23, 0xe9, 0x22, 0x0c, 0x57, 0x2a, 0xc4, 0x1e, 0x00, 0x0f, 0xfa, 0xcf, 0x00, 0xa6, + 0x38, 0x17, 0xc4, 0x80, 0x1e, 0x41, 0x3b, 0x71, 0xa0, 0xe3, 0xa3, 0xd3, 0x9c, 0x74, 0x0e, 0xe5, + 0xbc, 0xd5, 0xbf, 0x67, 0xd6, 0x9b, 0x2d, 0x77, 0x9f, 0x8f, 0x72, 0x2e, 0xfe, 0x28, 0xff, 0x02, + 0xe0, 0xeb, 0xde, 0xb8, 0x9d, 0x5d, 0x7c, 0x54, 0x1e, 0x61, 0x3e, 0x07, 0xd8, 0x4c, 0xa8, 0x5c, + 0xd2, 0xda, 0x1f, 0xf3, 0x15, 0x96, 0xc1, 0x22, 0x2a, 0xc6, 0xcd, 0x40, 0x09, 0xf1, 0xcc, 0x0b, + 0xff, 0x1b, 0x76, 0x8d, 0x7d, 0x5e, 0x57, 0x0d, 0x4b, 0x37, 0xac, 0x06, 0x45, 0x72, 0x2f, 0x24, + 0xff, 0xe3, 0x7b, 0x50, 0x31, 0x48, 0x41, 0x89, 0xad, 0xef, 0x83, 0x5e, 0x67, 0xa0, 0x57, 0x51, + 0xe5, 0x62, 0xd0, 0x81, 0x90, 0x8d, 0x90, 0x39, 0x88, 0xf3, 0x4f, 0x00, 0xdf, 0xdc, 0x24, 0xfd, + 0x6f, 0xa5, 0x48, 0x34, 0x03, 0x7a, 0x23, 0x8c, 0xd5, 0xd7, 0xe0, 0xe8, 0x34, 0x57, 0x82, 0xb3, + 0xe7, 0xb9, 0x8a, 0x33, 0x63, 0x6b, 0x62, 0x79, 0x84, 0x64, 0x69, 0x7f, 0x08, 0xef, 0x06, 0xfd, + 0x05, 0x82, 0x5b, 0xd9, 0x97, 0x71, 0x3e, 0x32, 0xe3, 0x28, 0xd5, 0x11, 0x92, 0x3e, 0xf4, 0x92, + 0x2e, 0xc3, 0x77, 0x2e, 0xf0, 0x16, 0x27, 0xef, 0xbb, 0xe2, 0xea, 0x08, 0x79, 0xb7, 0x23, 0xa2, + 0x2c, 0x83, 0x5c, 0x75, 0xeb, 0xb3, 0xcd, 0x86, 0xe1, 0x7e, 0xde, 0xde, 0x91, 0xeb, 0x76, 0x53, + 0xe1, 0xd8, 0x25, 0xfe, 0xbf, 0xae, 0x61, 0x4b, 0x0d, 0x62, 0xb1, 0xc8, 0xca, 0x90, 0x3f, 0x9a, + 0x2b, 0x7d, 0xaf, 0x76, 0x52, 0xcc, 0xac, 0xf8, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xed, + 0x19, 0x34, 0x55, 0x0f, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/validation/validation.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/validation/validation.pb.go new file mode 100644 index 000000000..d2ed0dacf --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/validation/validation.pb.go @@ -0,0 +1,178 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/validation.proto + +package validation // import "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type MapKeySpec struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Pattern string `protobuf:"bytes,2,opt,name=pattern,proto3" json:"pattern,omitempty"` + Length string `protobuf:"bytes,3,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MapKeySpec) Reset() { *m = MapKeySpec{} } +func (m *MapKeySpec) String() string { return proto.CompactTextString(m) } +func (*MapKeySpec) ProtoMessage() {} +func (*MapKeySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_validation_03a1dce1f846faa7, []int{0} +} +func (m *MapKeySpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MapKeySpec.Unmarshal(m, b) +} +func (m *MapKeySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MapKeySpec.Marshal(b, m, deterministic) +} +func (dst *MapKeySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MapKeySpec.Merge(dst, src) +} +func (m *MapKeySpec) XXX_Size() int { + return xxx_messageInfo_MapKeySpec.Size(m) +} +func (m *MapKeySpec) XXX_DiscardUnknown() { + xxx_messageInfo_MapKeySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MapKeySpec proto.InternalMessageInfo + +func (m *MapKeySpec) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *MapKeySpec) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *MapKeySpec) GetLength() string { + if m != nil { + return m.Length + } + return "" +} + +var E_ExactlyOne = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.OneofOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 101400, + Name: "yandex.cloud.exactly_one", + Tag: "varint,101400,opt,name=exactly_one,json=exactlyOne", + Filename: "yandex/cloud/validation.proto", +} + +var E_Required = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 101501, + Name: "yandex.cloud.required", + Tag: "varint,101501,opt,name=required", + Filename: "yandex/cloud/validation.proto", +} + +var E_Pattern = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 101502, + Name: "yandex.cloud.pattern", + Tag: "bytes,101502,opt,name=pattern", + Filename: "yandex/cloud/validation.proto", +} + +var E_Value = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 101503, + Name: "yandex.cloud.value", + Tag: "bytes,101503,opt,name=value", + Filename: "yandex/cloud/validation.proto", +} + +var E_Size = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 101504, + Name: "yandex.cloud.size", + Tag: "bytes,101504,opt,name=size", + Filename: "yandex/cloud/validation.proto", +} + +var E_Length = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 101505, + Name: "yandex.cloud.length", + Tag: "bytes,101505,opt,name=length", + Filename: "yandex/cloud/validation.proto", +} + +var E_MapKey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*MapKeySpec)(nil), + Field: 101510, + Name: "yandex.cloud.map_key", + Tag: "bytes,101510,opt,name=map_key,json=mapKey", + Filename: "yandex/cloud/validation.proto", +} + +func init() { + proto.RegisterType((*MapKeySpec)(nil), "yandex.cloud.MapKeySpec") + proto.RegisterExtension(E_ExactlyOne) + proto.RegisterExtension(E_Required) + proto.RegisterExtension(E_Pattern) + proto.RegisterExtension(E_Value) + proto.RegisterExtension(E_Size) + proto.RegisterExtension(E_Length) + proto.RegisterExtension(E_MapKey) +} + +func init() { + proto.RegisterFile("yandex/cloud/validation.proto", fileDescriptor_validation_03a1dce1f846faa7) +} + +var fileDescriptor_validation_03a1dce1f846faa7 = []byte{ + // 339 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xcf, 0x4b, 0xfb, 0x30, + 0x18, 0xc6, 0xd9, 0xf7, 0xab, 0xdd, 0xcc, 0x3c, 0x15, 0x91, 0x22, 0x0c, 0x86, 0xa7, 0x5d, 0x96, + 0x82, 0x43, 0xc4, 0xee, 0x22, 0x1e, 0x86, 0x20, 0x32, 0xa8, 0x9e, 0xbc, 0x8c, 0xac, 0x7d, 0xd7, + 0x05, 0xb3, 0x24, 0x66, 0xe9, 0x58, 0x3d, 0xe9, 0xc5, 0xbf, 0x61, 0xfe, 0xb1, 0xfe, 0xc0, 0x24, + 0x5d, 0x3d, 0x28, 0xbd, 0xbd, 0x6f, 0x9e, 0xe7, 0x43, 0x9e, 0xbc, 0x79, 0x51, 0xa7, 0x20, 0x3c, + 0x85, 0x75, 0x98, 0x30, 0x91, 0xa7, 0xe1, 0x8a, 0x30, 0x9a, 0x12, 0x4d, 0x05, 0xc7, 0x52, 0x09, + 0x2d, 0xfc, 0x7d, 0x2b, 0x63, 0x23, 0x1f, 0x75, 0x33, 0x21, 0x32, 0x06, 0xa1, 0xd1, 0xa6, 0xf9, + 0x2c, 0x4c, 0x61, 0x99, 0x28, 0x2a, 0xb5, 0x50, 0xd6, 0x7f, 0x7c, 0x87, 0xd0, 0x0d, 0x91, 0xd7, + 0x50, 0xdc, 0x4a, 0x48, 0xfc, 0x03, 0xb4, 0xbb, 0x22, 0x2c, 0x87, 0xa0, 0xd1, 0x6d, 0xf4, 0xf6, + 0x62, 0xdb, 0xf8, 0x01, 0x6a, 0x4a, 0xa2, 0x35, 0x28, 0x1e, 0xfc, 0x33, 0xe7, 0x65, 0xeb, 0x1f, + 0x22, 0x8f, 0x01, 0xcf, 0xf4, 0x3c, 0xf8, 0x6f, 0x04, 0xd7, 0x45, 0x17, 0xa8, 0x0d, 0x6b, 0x92, + 0x68, 0x56, 0x4c, 0x04, 0x07, 0xbf, 0x83, 0x6d, 0x0e, 0x5c, 0xe6, 0xc0, 0x63, 0x0e, 0x62, 0x36, + 0x96, 0xdf, 0xc1, 0x97, 0xc1, 0x66, 0xe3, 0x75, 0x1b, 0xbd, 0x56, 0x8c, 0x1c, 0x33, 0xe6, 0x10, + 0x0d, 0x51, 0x4b, 0xc1, 0x63, 0x4e, 0x15, 0xa4, 0xbf, 0xe0, 0x23, 0x0a, 0x2c, 0x2d, 0xf1, 0x77, + 0x87, 0x6f, 0x81, 0xe8, 0x7c, 0x1b, 0xb8, 0x8e, 0xfd, 0x30, 0x6c, 0xf5, 0xa2, 0xe8, 0xd4, 0x4d, + 0xa0, 0x0e, 0xfc, 0x74, 0xa0, 0x75, 0x47, 0x03, 0xb4, 0xb3, 0xa4, 0x4f, 0xb5, 0xd4, 0xf3, 0x9b, + 0xa5, 0x8c, 0x39, 0x3a, 0x2b, 0xa7, 0x57, 0x87, 0xbd, 0x38, 0xac, 0x1c, 0x6f, 0x8c, 0x9a, 0x0b, + 0x22, 0x27, 0x0f, 0x50, 0xd4, 0x91, 0xaf, 0x86, 0x6c, 0x9f, 0x04, 0xf8, 0xe7, 0x5e, 0xe0, 0xea, + 0xcb, 0x63, 0x6f, 0x61, 0xea, 0xcb, 0xab, 0xfb, 0x51, 0x46, 0xf5, 0x3c, 0x9f, 0xe2, 0x44, 0x2c, + 0x42, 0xeb, 0xee, 0xdb, 0x25, 0xcb, 0x44, 0x3f, 0x03, 0x6e, 0x2e, 0x08, 0xff, 0xd8, 0xbe, 0x61, + 0x55, 0x4e, 0x3d, 0xe3, 0x1c, 0x7c, 0x05, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x2a, 0xf0, 0x4e, 0xaa, + 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/network.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/network.pb.go new file mode 100644 index 000000000..9fbff19ba --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/network.pb.go @@ -0,0 +1,139 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/vpc/v1/network.proto + +package vpc // import "github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Network resource. For more information, see [Networks](/docs/vpc/concepts/network). +type Network struct { + // ID of the network. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the network belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Name of the network. + // The name is unique within the folder. 3-63 characters long. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Optional description of the network. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. Мaximum of 64 per resource. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Network) Reset() { *m = Network{} } +func (m *Network) String() string { return proto.CompactTextString(m) } +func (*Network) ProtoMessage() {} +func (*Network) Descriptor() ([]byte, []int) { + return fileDescriptor_network_b5763e43292627a3, []int{0} +} +func (m *Network) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Network.Unmarshal(m, b) +} +func (m *Network) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Network.Marshal(b, m, deterministic) +} +func (dst *Network) XXX_Merge(src proto.Message) { + xxx_messageInfo_Network.Merge(dst, src) +} +func (m *Network) XXX_Size() int { + return xxx_messageInfo_Network.Size(m) +} +func (m *Network) XXX_DiscardUnknown() { + xxx_messageInfo_Network.DiscardUnknown(m) +} + +var xxx_messageInfo_Network proto.InternalMessageInfo + +func (m *Network) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Network) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *Network) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Network) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Network) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Network) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterType((*Network)(nil), "yandex.cloud.vpc.v1.Network") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.vpc.v1.Network.LabelsEntry") +} + +func init() { + proto.RegisterFile("yandex/cloud/vpc/v1/network.proto", fileDescriptor_network_b5763e43292627a3) +} + +var fileDescriptor_network_b5763e43292627a3 = []byte{ + // 314 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x4f, 0x4b, 0xf3, 0x40, + 0x10, 0xc6, 0x49, 0xfa, 0xe7, 0x7d, 0x3b, 0x01, 0x91, 0xd5, 0x43, 0xa8, 0x07, 0xa3, 0xa7, 0x5c, + 0xba, 0x4b, 0xeb, 0xc5, 0x5a, 0x04, 0x15, 0x3c, 0x08, 0xe2, 0xa1, 0x78, 0xf2, 0x52, 0x36, 0xbb, + 0xd3, 0xb8, 0x34, 0xc9, 0x86, 0x74, 0xb3, 0xda, 0xef, 0xe3, 0x07, 0x15, 0x77, 0x53, 0xe8, 0xa1, + 0xb7, 0x99, 0x67, 0x9e, 0x99, 0x87, 0x1f, 0x03, 0x57, 0x3b, 0x5e, 0x49, 0xfc, 0x66, 0xa2, 0xd0, + 0xad, 0x64, 0xb6, 0x16, 0xcc, 0x4e, 0x59, 0x85, 0xe6, 0x4b, 0x37, 0x1b, 0x5a, 0x37, 0xda, 0x68, + 0x72, 0xe6, 0x2d, 0xd4, 0x59, 0xa8, 0xad, 0x05, 0xb5, 0xd3, 0xf1, 0x65, 0xae, 0x75, 0x5e, 0x20, + 0x73, 0x96, 0xac, 0x5d, 0x33, 0xa3, 0x4a, 0xdc, 0x1a, 0x5e, 0xd6, 0x7e, 0xeb, 0xfa, 0x27, 0x84, + 0x7f, 0x6f, 0xfe, 0x0e, 0x39, 0x81, 0x50, 0xc9, 0x38, 0x48, 0x82, 0x74, 0xb4, 0x0c, 0x95, 0x24, + 0x17, 0x30, 0x5a, 0xeb, 0x42, 0x62, 0xb3, 0x52, 0x32, 0x0e, 0x9d, 0xfc, 0xdf, 0x0b, 0x2f, 0x92, + 0xcc, 0x01, 0x44, 0x83, 0xdc, 0xa0, 0x5c, 0x71, 0x13, 0xf7, 0x92, 0x20, 0x8d, 0x66, 0x63, 0xea, + 0xe3, 0xe8, 0x3e, 0x8e, 0xbe, 0xef, 0xe3, 0x96, 0xa3, 0xce, 0xfd, 0x68, 0x08, 0x81, 0x7e, 0xc5, + 0x4b, 0x8c, 0xfb, 0xee, 0xa4, 0xab, 0x49, 0x02, 0x91, 0xc4, 0xad, 0x68, 0x54, 0x6d, 0x94, 0xae, + 0xe2, 0x81, 0x1b, 0x1d, 0x4a, 0xe4, 0x01, 0x86, 0x05, 0xcf, 0xb0, 0xd8, 0xc6, 0xc3, 0xa4, 0x97, + 0x46, 0xb3, 0x94, 0x1e, 0x01, 0xa6, 0x1d, 0x0b, 0x7d, 0x75, 0xd6, 0xe7, 0xca, 0x34, 0xbb, 0x65, + 0xb7, 0x37, 0x9e, 0x43, 0x74, 0x20, 0x93, 0x53, 0xe8, 0x6d, 0x70, 0xd7, 0xf1, 0xfe, 0x95, 0xe4, + 0x1c, 0x06, 0x96, 0x17, 0x2d, 0x76, 0xb0, 0xbe, 0xb9, 0x0b, 0x6f, 0x83, 0xa7, 0xfb, 0x8f, 0x45, + 0xae, 0xcc, 0x67, 0x9b, 0x51, 0xa1, 0x4b, 0xe6, 0x83, 0x27, 0xfe, 0x19, 0xb9, 0x9e, 0xe4, 0x58, + 0x39, 0x62, 0x76, 0xe4, 0x4b, 0x0b, 0x5b, 0x8b, 0x6c, 0xe8, 0xc6, 0x37, 0xbf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x16, 0x76, 0x18, 0x3b, 0xc7, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/network_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/network_service.pb.go new file mode 100644 index 000000000..410493f2c --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/network_service.pb.go @@ -0,0 +1,1109 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/vpc/v1/network_service.proto + +package vpc // import "github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetNetworkRequest struct { + // ID of the Network resource to return. + // To get the network ID, use a [NetworkService.List] request. + NetworkId string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNetworkRequest) Reset() { *m = GetNetworkRequest{} } +func (m *GetNetworkRequest) String() string { return proto.CompactTextString(m) } +func (*GetNetworkRequest) ProtoMessage() {} +func (*GetNetworkRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_network_service_ee3168d16ae63f82, []int{0} +} +func (m *GetNetworkRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNetworkRequest.Unmarshal(m, b) +} +func (m *GetNetworkRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNetworkRequest.Marshal(b, m, deterministic) +} +func (dst *GetNetworkRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNetworkRequest.Merge(dst, src) +} +func (m *GetNetworkRequest) XXX_Size() int { + return xxx_messageInfo_GetNetworkRequest.Size(m) +} +func (m *GetNetworkRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNetworkRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNetworkRequest proto.InternalMessageInfo + +func (m *GetNetworkRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +type ListNetworksRequest struct { + // ID of the folder to list networks in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListNetworksResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. Default value: 100. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListNetworksResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters resources listed in the response. + // The expression must specify: + // 1. The field name. Currently you can use filtering only on the [Network.name] field. + // 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + // 3. The value. Must be 3-63 characters long and match the regular expression `^[a-z][-a-z0-9]{1,61}[a-z0-9]$`. + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNetworksRequest) Reset() { *m = ListNetworksRequest{} } +func (m *ListNetworksRequest) String() string { return proto.CompactTextString(m) } +func (*ListNetworksRequest) ProtoMessage() {} +func (*ListNetworksRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_network_service_ee3168d16ae63f82, []int{1} +} +func (m *ListNetworksRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNetworksRequest.Unmarshal(m, b) +} +func (m *ListNetworksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNetworksRequest.Marshal(b, m, deterministic) +} +func (dst *ListNetworksRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNetworksRequest.Merge(dst, src) +} +func (m *ListNetworksRequest) XXX_Size() int { + return xxx_messageInfo_ListNetworksRequest.Size(m) +} +func (m *ListNetworksRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListNetworksRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNetworksRequest proto.InternalMessageInfo + +func (m *ListNetworksRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListNetworksRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListNetworksRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListNetworksRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListNetworksResponse struct { + // List of Network resources. + Networks []*Network `protobuf:"bytes,1,rep,name=networks,proto3" json:"networks,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListNetworksRequest.page_size], use + // the [next_page_token] as the value + // for the [ListNetworksRequest.page_token] query parameter + // in the next list request. Subsequent list requests will have their own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNetworksResponse) Reset() { *m = ListNetworksResponse{} } +func (m *ListNetworksResponse) String() string { return proto.CompactTextString(m) } +func (*ListNetworksResponse) ProtoMessage() {} +func (*ListNetworksResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_network_service_ee3168d16ae63f82, []int{2} +} +func (m *ListNetworksResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNetworksResponse.Unmarshal(m, b) +} +func (m *ListNetworksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNetworksResponse.Marshal(b, m, deterministic) +} +func (dst *ListNetworksResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNetworksResponse.Merge(dst, src) +} +func (m *ListNetworksResponse) XXX_Size() int { + return xxx_messageInfo_ListNetworksResponse.Size(m) +} +func (m *ListNetworksResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListNetworksResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNetworksResponse proto.InternalMessageInfo + +func (m *ListNetworksResponse) GetNetworks() []*Network { + if m != nil { + return m.Networks + } + return nil +} + +func (m *ListNetworksResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateNetworkRequest struct { + // ID of the folder for this request to create a network in. + // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Name of the network. + // The name must be unique within the folder. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the network. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateNetworkRequest) Reset() { *m = CreateNetworkRequest{} } +func (m *CreateNetworkRequest) String() string { return proto.CompactTextString(m) } +func (*CreateNetworkRequest) ProtoMessage() {} +func (*CreateNetworkRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_network_service_ee3168d16ae63f82, []int{3} +} +func (m *CreateNetworkRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateNetworkRequest.Unmarshal(m, b) +} +func (m *CreateNetworkRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateNetworkRequest.Marshal(b, m, deterministic) +} +func (dst *CreateNetworkRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateNetworkRequest.Merge(dst, src) +} +func (m *CreateNetworkRequest) XXX_Size() int { + return xxx_messageInfo_CreateNetworkRequest.Size(m) +} +func (m *CreateNetworkRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateNetworkRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateNetworkRequest proto.InternalMessageInfo + +func (m *CreateNetworkRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *CreateNetworkRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateNetworkRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *CreateNetworkRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +type CreateNetworkMetadata struct { + // ID of the Network that is being created. + NetworkId string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateNetworkMetadata) Reset() { *m = CreateNetworkMetadata{} } +func (m *CreateNetworkMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateNetworkMetadata) ProtoMessage() {} +func (*CreateNetworkMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_network_service_ee3168d16ae63f82, []int{4} +} +func (m *CreateNetworkMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateNetworkMetadata.Unmarshal(m, b) +} +func (m *CreateNetworkMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateNetworkMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateNetworkMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateNetworkMetadata.Merge(dst, src) +} +func (m *CreateNetworkMetadata) XXX_Size() int { + return xxx_messageInfo_CreateNetworkMetadata.Size(m) +} +func (m *CreateNetworkMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateNetworkMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateNetworkMetadata proto.InternalMessageInfo + +func (m *CreateNetworkMetadata) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +type UpdateNetworkRequest struct { + // ID of the Network resource to update. + // To get the network ID use a [NetworkService.List] request. + NetworkId string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // Field mask that specifies which fields of the Network resource are going to be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Name of the network. + // The name must be unique within the folder. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Description of the network. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateNetworkRequest) Reset() { *m = UpdateNetworkRequest{} } +func (m *UpdateNetworkRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateNetworkRequest) ProtoMessage() {} +func (*UpdateNetworkRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_network_service_ee3168d16ae63f82, []int{5} +} +func (m *UpdateNetworkRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateNetworkRequest.Unmarshal(m, b) +} +func (m *UpdateNetworkRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateNetworkRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateNetworkRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateNetworkRequest.Merge(dst, src) +} +func (m *UpdateNetworkRequest) XXX_Size() int { + return xxx_messageInfo_UpdateNetworkRequest.Size(m) +} +func (m *UpdateNetworkRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateNetworkRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateNetworkRequest proto.InternalMessageInfo + +func (m *UpdateNetworkRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +func (m *UpdateNetworkRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateNetworkRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateNetworkRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *UpdateNetworkRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +type UpdateNetworkMetadata struct { + // ID of the Network resource that is being updated. + NetworkId string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateNetworkMetadata) Reset() { *m = UpdateNetworkMetadata{} } +func (m *UpdateNetworkMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateNetworkMetadata) ProtoMessage() {} +func (*UpdateNetworkMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_network_service_ee3168d16ae63f82, []int{6} +} +func (m *UpdateNetworkMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateNetworkMetadata.Unmarshal(m, b) +} +func (m *UpdateNetworkMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateNetworkMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateNetworkMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateNetworkMetadata.Merge(dst, src) +} +func (m *UpdateNetworkMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateNetworkMetadata.Size(m) +} +func (m *UpdateNetworkMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateNetworkMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateNetworkMetadata proto.InternalMessageInfo + +func (m *UpdateNetworkMetadata) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +type DeleteNetworkRequest struct { + // ID of the Network resource to update. + // To get the network ID, use a [NetworkService.List] request. + NetworkId string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteNetworkRequest) Reset() { *m = DeleteNetworkRequest{} } +func (m *DeleteNetworkRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteNetworkRequest) ProtoMessage() {} +func (*DeleteNetworkRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_network_service_ee3168d16ae63f82, []int{7} +} +func (m *DeleteNetworkRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteNetworkRequest.Unmarshal(m, b) +} +func (m *DeleteNetworkRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteNetworkRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteNetworkRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteNetworkRequest.Merge(dst, src) +} +func (m *DeleteNetworkRequest) XXX_Size() int { + return xxx_messageInfo_DeleteNetworkRequest.Size(m) +} +func (m *DeleteNetworkRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteNetworkRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteNetworkRequest proto.InternalMessageInfo + +func (m *DeleteNetworkRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +type DeleteNetworkMetadata struct { + // ID of the network that is being deleted. + NetworkId string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteNetworkMetadata) Reset() { *m = DeleteNetworkMetadata{} } +func (m *DeleteNetworkMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteNetworkMetadata) ProtoMessage() {} +func (*DeleteNetworkMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_network_service_ee3168d16ae63f82, []int{8} +} +func (m *DeleteNetworkMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteNetworkMetadata.Unmarshal(m, b) +} +func (m *DeleteNetworkMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteNetworkMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteNetworkMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteNetworkMetadata.Merge(dst, src) +} +func (m *DeleteNetworkMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteNetworkMetadata.Size(m) +} +func (m *DeleteNetworkMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteNetworkMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteNetworkMetadata proto.InternalMessageInfo + +func (m *DeleteNetworkMetadata) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +type ListNetworkSubnetsRequest struct { + // ID of the Network resource to list subnets for. + NetworkId string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // The maximum number of results per page that should be returned. If the number of available + // results is larger than [page_size], + // the service returns a [ListNetworkSubnetsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. Default value: 100. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. Set [page_token] + // to the [ListNetworkSubnetsResponse.next_page_token] + // returned by a previous list request to get the next page of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNetworkSubnetsRequest) Reset() { *m = ListNetworkSubnetsRequest{} } +func (m *ListNetworkSubnetsRequest) String() string { return proto.CompactTextString(m) } +func (*ListNetworkSubnetsRequest) ProtoMessage() {} +func (*ListNetworkSubnetsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_network_service_ee3168d16ae63f82, []int{9} +} +func (m *ListNetworkSubnetsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNetworkSubnetsRequest.Unmarshal(m, b) +} +func (m *ListNetworkSubnetsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNetworkSubnetsRequest.Marshal(b, m, deterministic) +} +func (dst *ListNetworkSubnetsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNetworkSubnetsRequest.Merge(dst, src) +} +func (m *ListNetworkSubnetsRequest) XXX_Size() int { + return xxx_messageInfo_ListNetworkSubnetsRequest.Size(m) +} +func (m *ListNetworkSubnetsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListNetworkSubnetsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNetworkSubnetsRequest proto.InternalMessageInfo + +func (m *ListNetworkSubnetsRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +func (m *ListNetworkSubnetsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListNetworkSubnetsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListNetworkSubnetsResponse struct { + // List of subnets that belong to the network which is specified in the request. + Subnets []*Subnet `protobuf:"bytes,1,rep,name=subnets,proto3" json:"subnets,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListNetworkSubnetsRequest.page_size], use + // the [next_page_token] as the value + // for the [ListNetworkSubnetsRequest.page_token] query parameter + // in the next list request. Subsequent list requests will have their own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNetworkSubnetsResponse) Reset() { *m = ListNetworkSubnetsResponse{} } +func (m *ListNetworkSubnetsResponse) String() string { return proto.CompactTextString(m) } +func (*ListNetworkSubnetsResponse) ProtoMessage() {} +func (*ListNetworkSubnetsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_network_service_ee3168d16ae63f82, []int{10} +} +func (m *ListNetworkSubnetsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNetworkSubnetsResponse.Unmarshal(m, b) +} +func (m *ListNetworkSubnetsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNetworkSubnetsResponse.Marshal(b, m, deterministic) +} +func (dst *ListNetworkSubnetsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNetworkSubnetsResponse.Merge(dst, src) +} +func (m *ListNetworkSubnetsResponse) XXX_Size() int { + return xxx_messageInfo_ListNetworkSubnetsResponse.Size(m) +} +func (m *ListNetworkSubnetsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListNetworkSubnetsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNetworkSubnetsResponse proto.InternalMessageInfo + +func (m *ListNetworkSubnetsResponse) GetSubnets() []*Subnet { + if m != nil { + return m.Subnets + } + return nil +} + +func (m *ListNetworkSubnetsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type ListNetworkOperationsRequest struct { + // ID of the Network resource to list operations for. + NetworkId string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // The maximum number of results per page that should be returned. If the number of available + // results is larger than [page_size], the service returns a [ListNetworkOperationsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. Default value: 100. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListNetworkOperationsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNetworkOperationsRequest) Reset() { *m = ListNetworkOperationsRequest{} } +func (m *ListNetworkOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListNetworkOperationsRequest) ProtoMessage() {} +func (*ListNetworkOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_network_service_ee3168d16ae63f82, []int{11} +} +func (m *ListNetworkOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNetworkOperationsRequest.Unmarshal(m, b) +} +func (m *ListNetworkOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNetworkOperationsRequest.Marshal(b, m, deterministic) +} +func (dst *ListNetworkOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNetworkOperationsRequest.Merge(dst, src) +} +func (m *ListNetworkOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListNetworkOperationsRequest.Size(m) +} +func (m *ListNetworkOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListNetworkOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNetworkOperationsRequest proto.InternalMessageInfo + +func (m *ListNetworkOperationsRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +func (m *ListNetworkOperationsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListNetworkOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListNetworkOperationsResponse struct { + // List of operations for the specified network. + Operations []*operation.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListNetworkOperationsRequest.page_size], use the [next_page_token] as the value + // for the [ListNetworkOperationsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNetworkOperationsResponse) Reset() { *m = ListNetworkOperationsResponse{} } +func (m *ListNetworkOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListNetworkOperationsResponse) ProtoMessage() {} +func (*ListNetworkOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_network_service_ee3168d16ae63f82, []int{12} +} +func (m *ListNetworkOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNetworkOperationsResponse.Unmarshal(m, b) +} +func (m *ListNetworkOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNetworkOperationsResponse.Marshal(b, m, deterministic) +} +func (dst *ListNetworkOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNetworkOperationsResponse.Merge(dst, src) +} +func (m *ListNetworkOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListNetworkOperationsResponse.Size(m) +} +func (m *ListNetworkOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListNetworkOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNetworkOperationsResponse proto.InternalMessageInfo + +func (m *ListNetworkOperationsResponse) GetOperations() []*operation.Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListNetworkOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetNetworkRequest)(nil), "yandex.cloud.vpc.v1.GetNetworkRequest") + proto.RegisterType((*ListNetworksRequest)(nil), "yandex.cloud.vpc.v1.ListNetworksRequest") + proto.RegisterType((*ListNetworksResponse)(nil), "yandex.cloud.vpc.v1.ListNetworksResponse") + proto.RegisterType((*CreateNetworkRequest)(nil), "yandex.cloud.vpc.v1.CreateNetworkRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.vpc.v1.CreateNetworkRequest.LabelsEntry") + proto.RegisterType((*CreateNetworkMetadata)(nil), "yandex.cloud.vpc.v1.CreateNetworkMetadata") + proto.RegisterType((*UpdateNetworkRequest)(nil), "yandex.cloud.vpc.v1.UpdateNetworkRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.vpc.v1.UpdateNetworkRequest.LabelsEntry") + proto.RegisterType((*UpdateNetworkMetadata)(nil), "yandex.cloud.vpc.v1.UpdateNetworkMetadata") + proto.RegisterType((*DeleteNetworkRequest)(nil), "yandex.cloud.vpc.v1.DeleteNetworkRequest") + proto.RegisterType((*DeleteNetworkMetadata)(nil), "yandex.cloud.vpc.v1.DeleteNetworkMetadata") + proto.RegisterType((*ListNetworkSubnetsRequest)(nil), "yandex.cloud.vpc.v1.ListNetworkSubnetsRequest") + proto.RegisterType((*ListNetworkSubnetsResponse)(nil), "yandex.cloud.vpc.v1.ListNetworkSubnetsResponse") + proto.RegisterType((*ListNetworkOperationsRequest)(nil), "yandex.cloud.vpc.v1.ListNetworkOperationsRequest") + proto.RegisterType((*ListNetworkOperationsResponse)(nil), "yandex.cloud.vpc.v1.ListNetworkOperationsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// NetworkServiceClient is the client API for NetworkService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type NetworkServiceClient interface { + // Returns the specified Network resource. + // + // Get the list of available Network resources by making a [List] request. + Get(ctx context.Context, in *GetNetworkRequest, opts ...grpc.CallOption) (*Network, error) + // Retrieves the list of Network resources in the specified folder. + List(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) + // Creates a network in the specified folder using the data specified in the request. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Create(ctx context.Context, in *CreateNetworkRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified network. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Update(ctx context.Context, in *UpdateNetworkRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified network. + Delete(ctx context.Context, in *DeleteNetworkRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Lists subnets from the specified network. + ListSubnets(ctx context.Context, in *ListNetworkSubnetsRequest, opts ...grpc.CallOption) (*ListNetworkSubnetsResponse, error) + // Lists operations for the specified network. + ListOperations(ctx context.Context, in *ListNetworkOperationsRequest, opts ...grpc.CallOption) (*ListNetworkOperationsResponse, error) +} + +type networkServiceClient struct { + cc *grpc.ClientConn +} + +func NewNetworkServiceClient(cc *grpc.ClientConn) NetworkServiceClient { + return &networkServiceClient{cc} +} + +func (c *networkServiceClient) Get(ctx context.Context, in *GetNetworkRequest, opts ...grpc.CallOption) (*Network, error) { + out := new(Network) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.NetworkService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *networkServiceClient) List(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) { + out := new(ListNetworksResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.NetworkService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *networkServiceClient) Create(ctx context.Context, in *CreateNetworkRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.NetworkService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *networkServiceClient) Update(ctx context.Context, in *UpdateNetworkRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.NetworkService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *networkServiceClient) Delete(ctx context.Context, in *DeleteNetworkRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.NetworkService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *networkServiceClient) ListSubnets(ctx context.Context, in *ListNetworkSubnetsRequest, opts ...grpc.CallOption) (*ListNetworkSubnetsResponse, error) { + out := new(ListNetworkSubnetsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.NetworkService/ListSubnets", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *networkServiceClient) ListOperations(ctx context.Context, in *ListNetworkOperationsRequest, opts ...grpc.CallOption) (*ListNetworkOperationsResponse, error) { + out := new(ListNetworkOperationsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.NetworkService/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NetworkServiceServer is the server API for NetworkService service. +type NetworkServiceServer interface { + // Returns the specified Network resource. + // + // Get the list of available Network resources by making a [List] request. + Get(context.Context, *GetNetworkRequest) (*Network, error) + // Retrieves the list of Network resources in the specified folder. + List(context.Context, *ListNetworksRequest) (*ListNetworksResponse, error) + // Creates a network in the specified folder using the data specified in the request. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Create(context.Context, *CreateNetworkRequest) (*operation.Operation, error) + // Updates the specified network. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Update(context.Context, *UpdateNetworkRequest) (*operation.Operation, error) + // Deletes the specified network. + Delete(context.Context, *DeleteNetworkRequest) (*operation.Operation, error) + // Lists subnets from the specified network. + ListSubnets(context.Context, *ListNetworkSubnetsRequest) (*ListNetworkSubnetsResponse, error) + // Lists operations for the specified network. + ListOperations(context.Context, *ListNetworkOperationsRequest) (*ListNetworkOperationsResponse, error) +} + +func RegisterNetworkServiceServer(s *grpc.Server, srv NetworkServiceServer) { + s.RegisterService(&_NetworkService_serviceDesc, srv) +} + +func _NetworkService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NetworkServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.NetworkService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NetworkServiceServer).Get(ctx, req.(*GetNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NetworkService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNetworksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NetworkServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.NetworkService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NetworkServiceServer).List(ctx, req.(*ListNetworksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NetworkService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NetworkServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.NetworkService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NetworkServiceServer).Create(ctx, req.(*CreateNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NetworkService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NetworkServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.NetworkService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NetworkServiceServer).Update(ctx, req.(*UpdateNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NetworkService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NetworkServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.NetworkService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NetworkServiceServer).Delete(ctx, req.(*DeleteNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NetworkService_ListSubnets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNetworkSubnetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NetworkServiceServer).ListSubnets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.NetworkService/ListSubnets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NetworkServiceServer).ListSubnets(ctx, req.(*ListNetworkSubnetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NetworkService_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNetworkOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NetworkServiceServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.NetworkService/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NetworkServiceServer).ListOperations(ctx, req.(*ListNetworkOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _NetworkService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.vpc.v1.NetworkService", + HandlerType: (*NetworkServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _NetworkService_Get_Handler, + }, + { + MethodName: "List", + Handler: _NetworkService_List_Handler, + }, + { + MethodName: "Create", + Handler: _NetworkService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _NetworkService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _NetworkService_Delete_Handler, + }, + { + MethodName: "ListSubnets", + Handler: _NetworkService_ListSubnets_Handler, + }, + { + MethodName: "ListOperations", + Handler: _NetworkService_ListOperations_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/vpc/v1/network_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/vpc/v1/network_service.proto", fileDescriptor_network_service_ee3168d16ae63f82) +} + +var fileDescriptor_network_service_ee3168d16ae63f82 = []byte{ + // 1041 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xcf, 0x6f, 0xdb, 0x64, + 0x18, 0x96, 0x9b, 0x34, 0x6b, 0xde, 0xc0, 0x28, 0xdf, 0x52, 0x29, 0x98, 0x46, 0xa4, 0x96, 0xd6, + 0xa5, 0x19, 0xb6, 0xe3, 0x8c, 0x44, 0xeb, 0xba, 0x6a, 0x90, 0x31, 0xa6, 0x49, 0x1d, 0x20, 0x0f, + 0x84, 0x44, 0x35, 0x45, 0x4e, 0xfc, 0x35, 0x58, 0x71, 0x6d, 0x63, 0x3b, 0xa1, 0x3f, 0xd8, 0x65, + 0xc7, 0x1e, 0xb8, 0x70, 0xe0, 0x80, 0xe0, 0xc4, 0x85, 0x13, 0xa8, 0x17, 0x24, 0xfe, 0x81, 0xf6, + 0x3c, 0xfe, 0x05, 0x0e, 0x5c, 0xd9, 0x91, 0x13, 0xf2, 0xf7, 0x7d, 0x4e, 0xe3, 0xc6, 0x49, 0x5c, + 0xc6, 0x61, 0x37, 0xbb, 0xef, 0xf3, 0xbd, 0xef, 0xf3, 0x3d, 0xef, 0xdb, 0xe7, 0x75, 0x60, 0x6d, + 0x5f, 0xb3, 0x74, 0xbc, 0x27, 0x77, 0x4c, 0xbb, 0xaf, 0xcb, 0x03, 0xa7, 0x23, 0x0f, 0x14, 0xd9, + 0xc2, 0xfe, 0x57, 0xb6, 0xdb, 0x6b, 0x79, 0xd8, 0x1d, 0x18, 0x1d, 0x2c, 0x39, 0xae, 0xed, 0xdb, + 0xe8, 0x0a, 0x85, 0x4a, 0x04, 0x2a, 0x0d, 0x9c, 0x8e, 0x34, 0x50, 0xf8, 0xe5, 0xae, 0x6d, 0x77, + 0x4d, 0x2c, 0x6b, 0x8e, 0x21, 0x6b, 0x96, 0x65, 0xfb, 0x9a, 0x6f, 0xd8, 0x96, 0x47, 0x8f, 0xf0, + 0x25, 0x16, 0x25, 0x6f, 0xed, 0xfe, 0x8e, 0xbc, 0x63, 0x60, 0x53, 0x6f, 0xed, 0x6a, 0x5e, 0x8f, + 0x21, 0x78, 0x56, 0x3f, 0x38, 0x6f, 0x3b, 0xd8, 0x25, 0xc7, 0x59, 0x6c, 0x65, 0x0a, 0xb7, 0xb0, + 0x40, 0x1c, 0xc4, 0xeb, 0xb7, 0x2d, 0xec, 0x33, 0xc4, 0x6a, 0x04, 0x31, 0x2c, 0x31, 0x56, 0xac, + 0x18, 0xcd, 0xa4, 0x99, 0x86, 0x3e, 0x12, 0x16, 0xde, 0x85, 0xd7, 0xef, 0x63, 0xff, 0x43, 0x5a, + 0x5c, 0xc5, 0x5f, 0xf6, 0xb1, 0xe7, 0xa3, 0xeb, 0x00, 0xa1, 0x54, 0x86, 0x5e, 0xe0, 0x4a, 0x5c, + 0x39, 0xdb, 0x7c, 0xe5, 0xaf, 0x13, 0x85, 0x3b, 0x3a, 0x55, 0xd2, 0xb7, 0x37, 0xeb, 0x55, 0x35, + 0xcb, 0xe2, 0x0f, 0x74, 0xe1, 0x37, 0x0e, 0xae, 0x6c, 0x19, 0x5e, 0x98, 0xc3, 0x0b, 0x93, 0xac, + 0x41, 0x76, 0xc7, 0x36, 0x75, 0xec, 0x4e, 0xca, 0xb1, 0x40, 0xc3, 0x0f, 0x74, 0x74, 0x0d, 0xb2, + 0x8e, 0xd6, 0xc5, 0x2d, 0xcf, 0x38, 0xc0, 0x85, 0xb9, 0x12, 0x57, 0x4e, 0x35, 0xe1, 0x9f, 0x13, + 0x25, 0x73, 0x7b, 0x53, 0xa9, 0x56, 0xab, 0xea, 0x42, 0x10, 0x7c, 0x64, 0x1c, 0x60, 0x54, 0x06, + 0x20, 0x40, 0xdf, 0xee, 0x61, 0xab, 0x90, 0x22, 0x49, 0xb3, 0x47, 0xa7, 0xca, 0x3c, 0x41, 0xaa, + 0x24, 0xcb, 0x27, 0x41, 0x0c, 0x09, 0x90, 0xd9, 0x31, 0x4c, 0x1f, 0xbb, 0x85, 0x34, 0x41, 0xc1, + 0xd1, 0xe9, 0x30, 0x1f, 0x8b, 0x08, 0x7b, 0x90, 0x8f, 0x12, 0xf7, 0x1c, 0xdb, 0xf2, 0x30, 0xba, + 0x09, 0x0b, 0xec, 0x7a, 0x5e, 0x81, 0x2b, 0xa5, 0xca, 0xb9, 0xda, 0xb2, 0x14, 0x33, 0x23, 0x52, + 0xa8, 0xda, 0x10, 0x8d, 0x56, 0xe1, 0x35, 0x0b, 0xef, 0xf9, 0xad, 0x11, 0x92, 0xc1, 0x75, 0xb2, + 0xea, 0xab, 0xc1, 0x9f, 0x3f, 0x0e, 0xd9, 0x09, 0x7f, 0xcf, 0x41, 0xfe, 0xae, 0x8b, 0x35, 0x1f, + 0x9f, 0x53, 0xfe, 0x02, 0xa2, 0xd5, 0x21, 0x6d, 0x69, 0xbb, 0x54, 0xaf, 0x6c, 0x73, 0xe5, 0xf9, + 0x89, 0x52, 0xfc, 0x7a, 0x5b, 0x13, 0x0f, 0x1e, 0x6f, 0x8b, 0x9a, 0x78, 0x50, 0x15, 0xd7, 0x1f, + 0x1f, 0x2a, 0x6f, 0x37, 0x94, 0x27, 0xdb, 0xec, 0x4d, 0x25, 0x70, 0x74, 0x1d, 0x72, 0x3a, 0xf6, + 0x3a, 0xae, 0xe1, 0x04, 0x53, 0x10, 0xd5, 0xb0, 0x56, 0x6f, 0xa8, 0xa3, 0x51, 0xf4, 0x0d, 0x07, + 0x19, 0x53, 0x6b, 0x63, 0xd3, 0x2b, 0xa4, 0x89, 0x10, 0xf5, 0x58, 0x21, 0xe2, 0xae, 0x22, 0x6d, + 0x91, 0x73, 0xf7, 0x2c, 0xdf, 0xdd, 0x6f, 0xde, 0x79, 0x7e, 0xa2, 0xe4, 0xb6, 0xc5, 0x56, 0x55, + 0x5c, 0x0f, 0x18, 0x56, 0x9e, 0x92, 0xeb, 0x34, 0xde, 0xa1, 0xd7, 0x6a, 0xdc, 0x38, 0x3e, 0x55, + 0x32, 0x7c, 0x5a, 0x11, 0xc9, 0x13, 0x42, 0x8b, 0xec, 0x1e, 0x43, 0xbc, 0xca, 0x58, 0xf0, 0xeb, + 0x90, 0x1b, 0xc9, 0x8b, 0x16, 0x21, 0xd5, 0xc3, 0xfb, 0x54, 0x28, 0x35, 0x78, 0x44, 0x79, 0x98, + 0x1f, 0x68, 0x66, 0x9f, 0xc9, 0xa2, 0xd2, 0x97, 0x5b, 0x73, 0x37, 0x39, 0xa1, 0x01, 0x4b, 0x11, + 0x9e, 0x0f, 0xb1, 0xaf, 0xe9, 0x9a, 0xaf, 0xa1, 0xe2, 0xf8, 0xb4, 0x8f, 0xce, 0xf7, 0xaf, 0x29, + 0xc8, 0x7f, 0xea, 0xe8, 0xe3, 0xbd, 0xba, 0xc8, 0x7f, 0x09, 0xda, 0x80, 0x5c, 0x9f, 0x24, 0x21, + 0x26, 0x41, 0xd8, 0xe5, 0x6a, 0xbc, 0x44, 0x7d, 0x44, 0x0a, 0x7d, 0x44, 0xfa, 0x20, 0xf0, 0x91, + 0x87, 0x9a, 0xd7, 0x53, 0x81, 0xc2, 0x83, 0xe7, 0x61, 0xab, 0x53, 0x2f, 0xd4, 0xea, 0x74, 0xd2, + 0x56, 0xcf, 0x4f, 0x69, 0x75, 0x9c, 0x12, 0x2f, 0x63, 0xab, 0x23, 0x3c, 0x93, 0xb6, 0xfa, 0x2e, + 0xe4, 0xdf, 0xc7, 0x26, 0x7e, 0xa1, 0x4e, 0x07, 0xc5, 0x23, 0x49, 0x92, 0x16, 0xff, 0x9e, 0x83, + 0x37, 0x46, 0xec, 0xe8, 0x11, 0x31, 0x7b, 0xef, 0x3f, 0x0d, 0xdb, 0xff, 0xef, 0xa7, 0xc2, 0x21, + 0xf0, 0x71, 0xe4, 0x98, 0x63, 0xd6, 0xe1, 0x12, 0x5d, 0x4e, 0xa1, 0x61, 0xbe, 0x19, 0x3b, 0x3c, + 0xf4, 0x98, 0x1a, 0x62, 0x13, 0xdb, 0xe5, 0x8f, 0x1c, 0x2c, 0x8f, 0x54, 0xff, 0x28, 0x5c, 0x71, + 0x2f, 0x8d, 0x3a, 0x47, 0x1c, 0x14, 0x27, 0x10, 0x64, 0x0a, 0xbd, 0x07, 0x30, 0xdc, 0xcc, 0xa1, + 0x48, 0x2b, 0x51, 0x91, 0xce, 0x36, 0xf7, 0xf0, 0xbc, 0x3a, 0x72, 0x28, 0xa9, 0x5a, 0xb5, 0xdf, + 0x17, 0xe0, 0x72, 0xd8, 0x27, 0xfa, 0xa1, 0x83, 0x5c, 0x48, 0xdd, 0xc7, 0x3e, 0x5a, 0x8d, 0xed, + 0xca, 0xd8, 0xfe, 0xe7, 0xa7, 0xae, 0x3b, 0xe1, 0xea, 0xd3, 0x3f, 0xfe, 0xfc, 0x76, 0xee, 0x2d, + 0x54, 0x3c, 0xf7, 0xe9, 0xe2, 0xc9, 0x87, 0x67, 0x4d, 0x78, 0x82, 0xfa, 0x90, 0x0e, 0x24, 0x41, + 0xe5, 0xd8, 0x64, 0x31, 0x5f, 0x0c, 0xfc, 0x5a, 0x02, 0x24, 0x95, 0x53, 0x28, 0x10, 0x0e, 0x08, + 0x2d, 0x9e, 0xe7, 0x80, 0xbe, 0xe3, 0x20, 0x43, 0x7d, 0x1e, 0xad, 0x25, 0x5e, 0x56, 0xfc, 0xec, + 0x56, 0x08, 0x77, 0x8e, 0x9f, 0x55, 0x4a, 0x93, 0x76, 0xc8, 0x25, 0xf6, 0x07, 0x42, 0x6b, 0x49, + 0x18, 0xa3, 0x75, 0x8b, 0xab, 0xa0, 0x9f, 0x38, 0xc8, 0x50, 0x5b, 0x9a, 0xc0, 0x2c, 0xce, 0x5b, + 0x93, 0x30, 0xdb, 0xa2, 0xcc, 0xe2, 0x2d, 0x2f, 0xc2, 0x4c, 0xa8, 0x4d, 0x6f, 0x5a, 0x40, 0xf3, + 0x17, 0x0e, 0x32, 0xd4, 0xc0, 0x26, 0xd0, 0x8c, 0xb3, 0xc8, 0x24, 0x34, 0x3f, 0x3b, 0x7e, 0x56, + 0x91, 0x26, 0x99, 0xe3, 0xd2, 0xf9, 0x4d, 0x78, 0x6f, 0xd7, 0xf1, 0xf7, 0xe9, 0xa4, 0x55, 0x66, + 0x4c, 0xda, 0x0f, 0x1c, 0xe4, 0x82, 0x29, 0x61, 0xae, 0x84, 0xa4, 0x59, 0x73, 0x14, 0xf5, 0x56, + 0x5e, 0x4e, 0x8c, 0x67, 0xd3, 0x27, 0x12, 0x5e, 0xd7, 0xd0, 0xd5, 0xa9, 0xbc, 0xe4, 0xd0, 0xe6, + 0x7e, 0xe6, 0xe0, 0x72, 0x90, 0xed, 0xcc, 0x16, 0x90, 0x32, 0xab, 0xe4, 0x98, 0xc7, 0xf1, 0xb5, + 0x8b, 0x1c, 0x61, 0x44, 0xab, 0x84, 0x68, 0x05, 0x95, 0xa7, 0x13, 0x3d, 0x33, 0x99, 0xe6, 0xe6, + 0xe7, 0x1b, 0x5d, 0xc3, 0xff, 0xa2, 0xdf, 0x96, 0x3a, 0xf6, 0xae, 0x4c, 0x2b, 0x8a, 0xf4, 0xb7, + 0x43, 0xd7, 0x16, 0xbb, 0xd8, 0x22, 0x0d, 0x92, 0x63, 0x7e, 0x9e, 0x6c, 0x0c, 0x9c, 0x4e, 0x3b, + 0x43, 0xc2, 0x37, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xae, 0xe7, 0x44, 0x00, 0x7f, 0x0d, 0x00, + 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/route_table.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/route_table.pb.go new file mode 100644 index 000000000..8296f0e09 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/route_table.pb.go @@ -0,0 +1,328 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/vpc/v1/route_table.proto + +package vpc // import "github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A RouteTable resource. For more information, see [RouteTables](/docs/vpc/concepts/route_tables). +type RouteTable struct { + // ID of the route table. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the route table belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Name of the route table. The name is unique within the project. 3-63 characters long. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Optional description of the route table. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. Мaximum of 64 per resource. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID of the network the route table belongs to. + NetworkId string `protobuf:"bytes,7,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // List of static routes. + StaticRoutes []*StaticRoute `protobuf:"bytes,8,rep,name=static_routes,json=staticRoutes,proto3" json:"static_routes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RouteTable) Reset() { *m = RouteTable{} } +func (m *RouteTable) String() string { return proto.CompactTextString(m) } +func (*RouteTable) ProtoMessage() {} +func (*RouteTable) Descriptor() ([]byte, []int) { + return fileDescriptor_route_table_5b58e1ab44cd12e3, []int{0} +} +func (m *RouteTable) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RouteTable.Unmarshal(m, b) +} +func (m *RouteTable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RouteTable.Marshal(b, m, deterministic) +} +func (dst *RouteTable) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteTable.Merge(dst, src) +} +func (m *RouteTable) XXX_Size() int { + return xxx_messageInfo_RouteTable.Size(m) +} +func (m *RouteTable) XXX_DiscardUnknown() { + xxx_messageInfo_RouteTable.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteTable proto.InternalMessageInfo + +func (m *RouteTable) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *RouteTable) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *RouteTable) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *RouteTable) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *RouteTable) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *RouteTable) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *RouteTable) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +func (m *RouteTable) GetStaticRoutes() []*StaticRoute { + if m != nil { + return m.StaticRoutes + } + return nil +} + +// A StaticRoute resource. For more information, see [StaticRoutes](/docs/vpc/concepts/static_routes). +type StaticRoute struct { + // Types that are valid to be assigned to Destination: + // *StaticRoute_DestinationPrefix + Destination isStaticRoute_Destination `protobuf_oneof:"destination"` + // Types that are valid to be assigned to NextHop: + // *StaticRoute_NextHopAddress + NextHop isStaticRoute_NextHop `protobuf_oneof:"next_hop"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StaticRoute) Reset() { *m = StaticRoute{} } +func (m *StaticRoute) String() string { return proto.CompactTextString(m) } +func (*StaticRoute) ProtoMessage() {} +func (*StaticRoute) Descriptor() ([]byte, []int) { + return fileDescriptor_route_table_5b58e1ab44cd12e3, []int{1} +} +func (m *StaticRoute) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StaticRoute.Unmarshal(m, b) +} +func (m *StaticRoute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StaticRoute.Marshal(b, m, deterministic) +} +func (dst *StaticRoute) XXX_Merge(src proto.Message) { + xxx_messageInfo_StaticRoute.Merge(dst, src) +} +func (m *StaticRoute) XXX_Size() int { + return xxx_messageInfo_StaticRoute.Size(m) +} +func (m *StaticRoute) XXX_DiscardUnknown() { + xxx_messageInfo_StaticRoute.DiscardUnknown(m) +} + +var xxx_messageInfo_StaticRoute proto.InternalMessageInfo + +type isStaticRoute_Destination interface { + isStaticRoute_Destination() +} + +type StaticRoute_DestinationPrefix struct { + DestinationPrefix string `protobuf:"bytes,1,opt,name=destination_prefix,json=destinationPrefix,proto3,oneof"` +} + +func (*StaticRoute_DestinationPrefix) isStaticRoute_Destination() {} + +func (m *StaticRoute) GetDestination() isStaticRoute_Destination { + if m != nil { + return m.Destination + } + return nil +} + +func (m *StaticRoute) GetDestinationPrefix() string { + if x, ok := m.GetDestination().(*StaticRoute_DestinationPrefix); ok { + return x.DestinationPrefix + } + return "" +} + +type isStaticRoute_NextHop interface { + isStaticRoute_NextHop() +} + +type StaticRoute_NextHopAddress struct { + NextHopAddress string `protobuf:"bytes,2,opt,name=next_hop_address,json=nextHopAddress,proto3,oneof"` +} + +func (*StaticRoute_NextHopAddress) isStaticRoute_NextHop() {} + +func (m *StaticRoute) GetNextHop() isStaticRoute_NextHop { + if m != nil { + return m.NextHop + } + return nil +} + +func (m *StaticRoute) GetNextHopAddress() string { + if x, ok := m.GetNextHop().(*StaticRoute_NextHopAddress); ok { + return x.NextHopAddress + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StaticRoute) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StaticRoute_OneofMarshaler, _StaticRoute_OneofUnmarshaler, _StaticRoute_OneofSizer, []interface{}{ + (*StaticRoute_DestinationPrefix)(nil), + (*StaticRoute_NextHopAddress)(nil), + } +} + +func _StaticRoute_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StaticRoute) + // destination + switch x := m.Destination.(type) { + case *StaticRoute_DestinationPrefix: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.DestinationPrefix) + case nil: + default: + return fmt.Errorf("StaticRoute.Destination has unexpected type %T", x) + } + // next_hop + switch x := m.NextHop.(type) { + case *StaticRoute_NextHopAddress: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.NextHopAddress) + case nil: + default: + return fmt.Errorf("StaticRoute.NextHop has unexpected type %T", x) + } + return nil +} + +func _StaticRoute_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StaticRoute) + switch tag { + case 1: // destination.destination_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Destination = &StaticRoute_DestinationPrefix{x} + return true, err + case 2: // next_hop.next_hop_address + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.NextHop = &StaticRoute_NextHopAddress{x} + return true, err + default: + return false, nil + } +} + +func _StaticRoute_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StaticRoute) + // destination + switch x := m.Destination.(type) { + case *StaticRoute_DestinationPrefix: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.DestinationPrefix))) + n += len(x.DestinationPrefix) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // next_hop + switch x := m.NextHop.(type) { + case *StaticRoute_NextHopAddress: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.NextHopAddress))) + n += len(x.NextHopAddress) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*RouteTable)(nil), "yandex.cloud.vpc.v1.RouteTable") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.vpc.v1.RouteTable.LabelsEntry") + proto.RegisterType((*StaticRoute)(nil), "yandex.cloud.vpc.v1.StaticRoute") +} + +func init() { + proto.RegisterFile("yandex/cloud/vpc/v1/route_table.proto", fileDescriptor_route_table_5b58e1ab44cd12e3) +} + +var fileDescriptor_route_table_5b58e1ab44cd12e3 = []byte{ + // 436 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x41, 0x6b, 0xdb, 0x30, + 0x18, 0x9d, 0x93, 0x36, 0x4b, 0x3e, 0xaf, 0xa5, 0xd3, 0x76, 0x30, 0x19, 0x63, 0xa6, 0x30, 0x08, + 0x1b, 0x95, 0x69, 0x77, 0x59, 0x57, 0x76, 0x68, 0x46, 0x21, 0x85, 0x1d, 0x86, 0xd7, 0xd3, 0x2e, + 0x46, 0xb6, 0xbe, 0xb8, 0xa2, 0x8e, 0x25, 0x24, 0xd9, 0x4b, 0x7e, 0xc0, 0xee, 0xfb, 0xc9, 0xc5, + 0x92, 0x43, 0x72, 0xc8, 0x4d, 0x7a, 0xef, 0x7d, 0xef, 0xf1, 0x3d, 0x09, 0x3e, 0x6e, 0x58, 0xcd, + 0x71, 0x9d, 0x14, 0x95, 0x6c, 0x78, 0xd2, 0xaa, 0x22, 0x69, 0x2f, 0x13, 0x2d, 0x1b, 0x8b, 0x99, + 0x65, 0x79, 0x85, 0x54, 0x69, 0x69, 0x25, 0x79, 0xe3, 0x65, 0xd4, 0xc9, 0x68, 0xab, 0x0a, 0xda, + 0x5e, 0x4e, 0x3f, 0x94, 0x52, 0x96, 0x15, 0x26, 0x4e, 0x92, 0x37, 0xcb, 0xc4, 0x8a, 0x15, 0x1a, + 0xcb, 0x56, 0xca, 0x4f, 0x9d, 0xff, 0x1f, 0x02, 0xa4, 0x9d, 0xd7, 0x43, 0x67, 0x45, 0x4e, 0x61, + 0x20, 0x78, 0x14, 0xc4, 0xc1, 0x6c, 0x92, 0x0e, 0x04, 0x27, 0xef, 0x60, 0xb2, 0x94, 0x15, 0x47, + 0x9d, 0x09, 0x1e, 0x0d, 0x1c, 0x3c, 0xf6, 0xc0, 0x3d, 0x27, 0xd7, 0x00, 0x85, 0x46, 0x66, 0x91, + 0x67, 0xcc, 0x46, 0xc3, 0x38, 0x98, 0x85, 0x57, 0x53, 0xea, 0x13, 0xe9, 0x36, 0x91, 0x3e, 0x6c, + 0x13, 0xd3, 0x49, 0xaf, 0xbe, 0xb5, 0x84, 0xc0, 0x51, 0xcd, 0x56, 0x18, 0x1d, 0x39, 0x4b, 0x77, + 0x26, 0x31, 0x84, 0x1c, 0x4d, 0xa1, 0x85, 0xb2, 0x42, 0xd6, 0xd1, 0xb1, 0xa3, 0xf6, 0x21, 0xf2, + 0x03, 0x46, 0x15, 0xcb, 0xb1, 0x32, 0xd1, 0x28, 0x1e, 0xce, 0xc2, 0xab, 0xcf, 0xf4, 0xc0, 0xce, + 0x74, 0xb7, 0x0e, 0xfd, 0xe9, 0xd4, 0x77, 0xb5, 0xd5, 0x9b, 0xb4, 0x1f, 0x25, 0xef, 0x01, 0x6a, + 0xb4, 0x7f, 0xa5, 0x7e, 0xea, 0x76, 0x7a, 0xe9, 0x52, 0x26, 0x3d, 0x72, 0xcf, 0xc9, 0x1d, 0x9c, + 0x18, 0xcb, 0xac, 0x28, 0x32, 0x57, 0xb1, 0x89, 0xc6, 0x2e, 0x2a, 0x3e, 0x18, 0xf5, 0xdb, 0x29, + 0x5d, 0x60, 0xfa, 0xca, 0xec, 0x2e, 0x66, 0x7a, 0x0d, 0xe1, 0x5e, 0x38, 0x39, 0x83, 0xe1, 0x13, + 0x6e, 0xfa, 0x62, 0xbb, 0x23, 0x79, 0x0b, 0xc7, 0x2d, 0xab, 0x1a, 0xec, 0x5b, 0xf5, 0x97, 0x6f, + 0x83, 0xaf, 0xc1, 0xf9, 0xbf, 0x00, 0xc2, 0x3d, 0x63, 0x92, 0x00, 0xe1, 0x68, 0xac, 0xa8, 0x59, + 0x57, 0x42, 0xa6, 0x34, 0x2e, 0xc5, 0xda, 0x5b, 0x2d, 0x5e, 0xa4, 0xaf, 0xf7, 0xb8, 0x5f, 0x8e, + 0x22, 0x9f, 0xe0, 0xac, 0xc6, 0xb5, 0xcd, 0x1e, 0xa5, 0xca, 0x18, 0xe7, 0x1a, 0x8d, 0xf1, 0x29, + 0x8b, 0x20, 0x3d, 0xed, 0x98, 0x85, 0x54, 0xb7, 0x1e, 0x9f, 0x9f, 0xb8, 0xd2, 0xb7, 0x06, 0x73, + 0x80, 0xf1, 0x76, 0x74, 0xfe, 0xfd, 0xcf, 0x4d, 0x29, 0xec, 0x63, 0x93, 0xd3, 0x42, 0xae, 0x12, + 0xbf, 0xfe, 0x85, 0xff, 0x84, 0xa5, 0xbc, 0x28, 0xb1, 0x76, 0x4f, 0x9c, 0x1c, 0xf8, 0x9d, 0x37, + 0xad, 0x2a, 0xf2, 0x91, 0xa3, 0xbf, 0x3c, 0x07, 0x00, 0x00, 0xff, 0xff, 0xbf, 0x9a, 0x2f, 0x68, + 0xbf, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/route_table_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/route_table_service.pb.go new file mode 100644 index 000000000..d873aabf9 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/route_table_service.pb.go @@ -0,0 +1,982 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/vpc/v1/route_table_service.proto + +package vpc // import "github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetRouteTableRequest struct { + // ID of the RouteTable resource to return. + // To get the route table ID use a [RouteTableService.List] request. + RouteTableId string `protobuf:"bytes,1,opt,name=route_table_id,json=routeTableId,proto3" json:"route_table_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRouteTableRequest) Reset() { *m = GetRouteTableRequest{} } +func (m *GetRouteTableRequest) String() string { return proto.CompactTextString(m) } +func (*GetRouteTableRequest) ProtoMessage() {} +func (*GetRouteTableRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_route_table_service_18dbfac795d82b53, []int{0} +} +func (m *GetRouteTableRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRouteTableRequest.Unmarshal(m, b) +} +func (m *GetRouteTableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRouteTableRequest.Marshal(b, m, deterministic) +} +func (dst *GetRouteTableRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRouteTableRequest.Merge(dst, src) +} +func (m *GetRouteTableRequest) XXX_Size() int { + return xxx_messageInfo_GetRouteTableRequest.Size(m) +} +func (m *GetRouteTableRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRouteTableRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRouteTableRequest proto.InternalMessageInfo + +func (m *GetRouteTableRequest) GetRouteTableId() string { + if m != nil { + return m.RouteTableId + } + return "" +} + +type ListRouteTablesRequest struct { + // ID of the folder that the route table belongs to. + // To get the folder ID use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListRouteTablesResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. Default value: 100. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListRouteTablesResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters resources listed in the response. + // The expression must specify: + // 1. The field name. Currently you can use filtering only on [RouteTable.name] field. + // 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + // 3. The value. Must be 3-63 characters long and match the regular expression `^[a-z][-a-z0-9]{1,61}[a-z0-9]$`. + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListRouteTablesRequest) Reset() { *m = ListRouteTablesRequest{} } +func (m *ListRouteTablesRequest) String() string { return proto.CompactTextString(m) } +func (*ListRouteTablesRequest) ProtoMessage() {} +func (*ListRouteTablesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_route_table_service_18dbfac795d82b53, []int{1} +} +func (m *ListRouteTablesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListRouteTablesRequest.Unmarshal(m, b) +} +func (m *ListRouteTablesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListRouteTablesRequest.Marshal(b, m, deterministic) +} +func (dst *ListRouteTablesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListRouteTablesRequest.Merge(dst, src) +} +func (m *ListRouteTablesRequest) XXX_Size() int { + return xxx_messageInfo_ListRouteTablesRequest.Size(m) +} +func (m *ListRouteTablesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListRouteTablesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListRouteTablesRequest proto.InternalMessageInfo + +func (m *ListRouteTablesRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListRouteTablesRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListRouteTablesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListRouteTablesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListRouteTablesResponse struct { + // List of RouteTable resources. + RouteTables []*RouteTable `protobuf:"bytes,1,rep,name=route_tables,json=routeTables,proto3" json:"route_tables,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListRouteTablesRequest.page_size], use + // the [next_page_token] as the value + // for the [ListRouteTablesRequest.page_token] query parameter + // in the next list request. Subsequent list requests will have their own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListRouteTablesResponse) Reset() { *m = ListRouteTablesResponse{} } +func (m *ListRouteTablesResponse) String() string { return proto.CompactTextString(m) } +func (*ListRouteTablesResponse) ProtoMessage() {} +func (*ListRouteTablesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_route_table_service_18dbfac795d82b53, []int{2} +} +func (m *ListRouteTablesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListRouteTablesResponse.Unmarshal(m, b) +} +func (m *ListRouteTablesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListRouteTablesResponse.Marshal(b, m, deterministic) +} +func (dst *ListRouteTablesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListRouteTablesResponse.Merge(dst, src) +} +func (m *ListRouteTablesResponse) XXX_Size() int { + return xxx_messageInfo_ListRouteTablesResponse.Size(m) +} +func (m *ListRouteTablesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListRouteTablesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListRouteTablesResponse proto.InternalMessageInfo + +func (m *ListRouteTablesResponse) GetRouteTables() []*RouteTable { + if m != nil { + return m.RouteTables + } + return nil +} + +func (m *ListRouteTablesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateRouteTableRequest struct { + // ID of the folder that the route table belongs to. + // To get the folder ID use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Name of the route table. + // The name must be unique within the folder. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the route table. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels, `` key:value `` pairs. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID of the network the route table belongs to. + NetworkId string `protobuf:"bytes,5,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // List of static routes. + StaticRoutes []*StaticRoute `protobuf:"bytes,6,rep,name=static_routes,json=staticRoutes,proto3" json:"static_routes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateRouteTableRequest) Reset() { *m = CreateRouteTableRequest{} } +func (m *CreateRouteTableRequest) String() string { return proto.CompactTextString(m) } +func (*CreateRouteTableRequest) ProtoMessage() {} +func (*CreateRouteTableRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_route_table_service_18dbfac795d82b53, []int{3} +} +func (m *CreateRouteTableRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateRouteTableRequest.Unmarshal(m, b) +} +func (m *CreateRouteTableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateRouteTableRequest.Marshal(b, m, deterministic) +} +func (dst *CreateRouteTableRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateRouteTableRequest.Merge(dst, src) +} +func (m *CreateRouteTableRequest) XXX_Size() int { + return xxx_messageInfo_CreateRouteTableRequest.Size(m) +} +func (m *CreateRouteTableRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateRouteTableRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateRouteTableRequest proto.InternalMessageInfo + +func (m *CreateRouteTableRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *CreateRouteTableRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateRouteTableRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *CreateRouteTableRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *CreateRouteTableRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +func (m *CreateRouteTableRequest) GetStaticRoutes() []*StaticRoute { + if m != nil { + return m.StaticRoutes + } + return nil +} + +type CreateRouteTableMetadata struct { + // ID of the route table that is being created. + RouteTableId string `protobuf:"bytes,1,opt,name=route_table_id,json=routeTableId,proto3" json:"route_table_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateRouteTableMetadata) Reset() { *m = CreateRouteTableMetadata{} } +func (m *CreateRouteTableMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateRouteTableMetadata) ProtoMessage() {} +func (*CreateRouteTableMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_route_table_service_18dbfac795d82b53, []int{4} +} +func (m *CreateRouteTableMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateRouteTableMetadata.Unmarshal(m, b) +} +func (m *CreateRouteTableMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateRouteTableMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateRouteTableMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateRouteTableMetadata.Merge(dst, src) +} +func (m *CreateRouteTableMetadata) XXX_Size() int { + return xxx_messageInfo_CreateRouteTableMetadata.Size(m) +} +func (m *CreateRouteTableMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateRouteTableMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateRouteTableMetadata proto.InternalMessageInfo + +func (m *CreateRouteTableMetadata) GetRouteTableId() string { + if m != nil { + return m.RouteTableId + } + return "" +} + +type UpdateRouteTableRequest struct { + // ID of the RouteTable resource to update. + RouteTableId string `protobuf:"bytes,1,opt,name=route_table_id,json=routeTableId,proto3" json:"route_table_id,omitempty"` + // Field mask that specifies which fields of the RouteTable resource are going to be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Name of the route table. + // The name must be unique within the folder. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Description of the route table. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // List of static routes. + StaticRoutes []*StaticRoute `protobuf:"bytes,6,rep,name=static_routes,json=staticRoutes,proto3" json:"static_routes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateRouteTableRequest) Reset() { *m = UpdateRouteTableRequest{} } +func (m *UpdateRouteTableRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateRouteTableRequest) ProtoMessage() {} +func (*UpdateRouteTableRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_route_table_service_18dbfac795d82b53, []int{5} +} +func (m *UpdateRouteTableRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateRouteTableRequest.Unmarshal(m, b) +} +func (m *UpdateRouteTableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateRouteTableRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateRouteTableRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateRouteTableRequest.Merge(dst, src) +} +func (m *UpdateRouteTableRequest) XXX_Size() int { + return xxx_messageInfo_UpdateRouteTableRequest.Size(m) +} +func (m *UpdateRouteTableRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateRouteTableRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateRouteTableRequest proto.InternalMessageInfo + +func (m *UpdateRouteTableRequest) GetRouteTableId() string { + if m != nil { + return m.RouteTableId + } + return "" +} + +func (m *UpdateRouteTableRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateRouteTableRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateRouteTableRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *UpdateRouteTableRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *UpdateRouteTableRequest) GetStaticRoutes() []*StaticRoute { + if m != nil { + return m.StaticRoutes + } + return nil +} + +type UpdateRouteTableMetadata struct { + // ID of the RouteTable resource that is being updated. + RouteTableId string `protobuf:"bytes,1,opt,name=route_table_id,json=routeTableId,proto3" json:"route_table_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateRouteTableMetadata) Reset() { *m = UpdateRouteTableMetadata{} } +func (m *UpdateRouteTableMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateRouteTableMetadata) ProtoMessage() {} +func (*UpdateRouteTableMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_route_table_service_18dbfac795d82b53, []int{6} +} +func (m *UpdateRouteTableMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateRouteTableMetadata.Unmarshal(m, b) +} +func (m *UpdateRouteTableMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateRouteTableMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateRouteTableMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateRouteTableMetadata.Merge(dst, src) +} +func (m *UpdateRouteTableMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateRouteTableMetadata.Size(m) +} +func (m *UpdateRouteTableMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateRouteTableMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateRouteTableMetadata proto.InternalMessageInfo + +func (m *UpdateRouteTableMetadata) GetRouteTableId() string { + if m != nil { + return m.RouteTableId + } + return "" +} + +type DeleteRouteTableRequest struct { + // ID of the route table to delete. + // To get the route table ID use a [RouteTableService.List] request. + RouteTableId string `protobuf:"bytes,1,opt,name=route_table_id,json=routeTableId,proto3" json:"route_table_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteRouteTableRequest) Reset() { *m = DeleteRouteTableRequest{} } +func (m *DeleteRouteTableRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRouteTableRequest) ProtoMessage() {} +func (*DeleteRouteTableRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_route_table_service_18dbfac795d82b53, []int{7} +} +func (m *DeleteRouteTableRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteRouteTableRequest.Unmarshal(m, b) +} +func (m *DeleteRouteTableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteRouteTableRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteRouteTableRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRouteTableRequest.Merge(dst, src) +} +func (m *DeleteRouteTableRequest) XXX_Size() int { + return xxx_messageInfo_DeleteRouteTableRequest.Size(m) +} +func (m *DeleteRouteTableRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRouteTableRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteRouteTableRequest proto.InternalMessageInfo + +func (m *DeleteRouteTableRequest) GetRouteTableId() string { + if m != nil { + return m.RouteTableId + } + return "" +} + +type DeleteRouteTableMetadata struct { + // ID of the RouteTable resource that is being deleted. + RouteTableId string `protobuf:"bytes,1,opt,name=route_table_id,json=routeTableId,proto3" json:"route_table_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteRouteTableMetadata) Reset() { *m = DeleteRouteTableMetadata{} } +func (m *DeleteRouteTableMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteRouteTableMetadata) ProtoMessage() {} +func (*DeleteRouteTableMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_route_table_service_18dbfac795d82b53, []int{8} +} +func (m *DeleteRouteTableMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteRouteTableMetadata.Unmarshal(m, b) +} +func (m *DeleteRouteTableMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteRouteTableMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteRouteTableMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRouteTableMetadata.Merge(dst, src) +} +func (m *DeleteRouteTableMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteRouteTableMetadata.Size(m) +} +func (m *DeleteRouteTableMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRouteTableMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteRouteTableMetadata proto.InternalMessageInfo + +func (m *DeleteRouteTableMetadata) GetRouteTableId() string { + if m != nil { + return m.RouteTableId + } + return "" +} + +type ListRouteTableOperationsRequest struct { + // ID of the RouteTable resource to list operations for. + RouteTableId string `protobuf:"bytes,1,opt,name=route_table_id,json=routeTableId,proto3" json:"route_table_id,omitempty"` + // The maximum number of results per page that should be returned. If the number of available + // results is larger than [page_size], the service returns a [ListRouteTableOperationsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. Default value: 100. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListRouteTableOperationsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListRouteTableOperationsRequest) Reset() { *m = ListRouteTableOperationsRequest{} } +func (m *ListRouteTableOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListRouteTableOperationsRequest) ProtoMessage() {} +func (*ListRouteTableOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_route_table_service_18dbfac795d82b53, []int{9} +} +func (m *ListRouteTableOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListRouteTableOperationsRequest.Unmarshal(m, b) +} +func (m *ListRouteTableOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListRouteTableOperationsRequest.Marshal(b, m, deterministic) +} +func (dst *ListRouteTableOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListRouteTableOperationsRequest.Merge(dst, src) +} +func (m *ListRouteTableOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListRouteTableOperationsRequest.Size(m) +} +func (m *ListRouteTableOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListRouteTableOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListRouteTableOperationsRequest proto.InternalMessageInfo + +func (m *ListRouteTableOperationsRequest) GetRouteTableId() string { + if m != nil { + return m.RouteTableId + } + return "" +} + +func (m *ListRouteTableOperationsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListRouteTableOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListRouteTableOperationsResponse struct { + // List of operations for the specified RouteTable resource. + Operations []*operation.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListRouteTableOperationsRequest.page_size], use the [next_page_token] as the value + // for the [ListRouteTableOperationsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListRouteTableOperationsResponse) Reset() { *m = ListRouteTableOperationsResponse{} } +func (m *ListRouteTableOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListRouteTableOperationsResponse) ProtoMessage() {} +func (*ListRouteTableOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_route_table_service_18dbfac795d82b53, []int{10} +} +func (m *ListRouteTableOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListRouteTableOperationsResponse.Unmarshal(m, b) +} +func (m *ListRouteTableOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListRouteTableOperationsResponse.Marshal(b, m, deterministic) +} +func (dst *ListRouteTableOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListRouteTableOperationsResponse.Merge(dst, src) +} +func (m *ListRouteTableOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListRouteTableOperationsResponse.Size(m) +} +func (m *ListRouteTableOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListRouteTableOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListRouteTableOperationsResponse proto.InternalMessageInfo + +func (m *ListRouteTableOperationsResponse) GetOperations() []*operation.Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListRouteTableOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetRouteTableRequest)(nil), "yandex.cloud.vpc.v1.GetRouteTableRequest") + proto.RegisterType((*ListRouteTablesRequest)(nil), "yandex.cloud.vpc.v1.ListRouteTablesRequest") + proto.RegisterType((*ListRouteTablesResponse)(nil), "yandex.cloud.vpc.v1.ListRouteTablesResponse") + proto.RegisterType((*CreateRouteTableRequest)(nil), "yandex.cloud.vpc.v1.CreateRouteTableRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.vpc.v1.CreateRouteTableRequest.LabelsEntry") + proto.RegisterType((*CreateRouteTableMetadata)(nil), "yandex.cloud.vpc.v1.CreateRouteTableMetadata") + proto.RegisterType((*UpdateRouteTableRequest)(nil), "yandex.cloud.vpc.v1.UpdateRouteTableRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.vpc.v1.UpdateRouteTableRequest.LabelsEntry") + proto.RegisterType((*UpdateRouteTableMetadata)(nil), "yandex.cloud.vpc.v1.UpdateRouteTableMetadata") + proto.RegisterType((*DeleteRouteTableRequest)(nil), "yandex.cloud.vpc.v1.DeleteRouteTableRequest") + proto.RegisterType((*DeleteRouteTableMetadata)(nil), "yandex.cloud.vpc.v1.DeleteRouteTableMetadata") + proto.RegisterType((*ListRouteTableOperationsRequest)(nil), "yandex.cloud.vpc.v1.ListRouteTableOperationsRequest") + proto.RegisterType((*ListRouteTableOperationsResponse)(nil), "yandex.cloud.vpc.v1.ListRouteTableOperationsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// RouteTableServiceClient is the client API for RouteTableService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type RouteTableServiceClient interface { + // Returns the specified RouteTable resource. + // + // To get the list of available RouteTable resources, make a [List] request. + Get(ctx context.Context, in *GetRouteTableRequest, opts ...grpc.CallOption) (*RouteTable, error) + // Retrieves the list of RouteTable resources in the specified folder. + List(ctx context.Context, in *ListRouteTablesRequest, opts ...grpc.CallOption) (*ListRouteTablesResponse, error) + // Creates a route table in the specified folder and network. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Create(ctx context.Context, in *CreateRouteTableRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified route table. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Update(ctx context.Context, in *UpdateRouteTableRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified route table. + Delete(ctx context.Context, in *DeleteRouteTableRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // List operations for the specified route table. + ListOperations(ctx context.Context, in *ListRouteTableOperationsRequest, opts ...grpc.CallOption) (*ListRouteTableOperationsResponse, error) +} + +type routeTableServiceClient struct { + cc *grpc.ClientConn +} + +func NewRouteTableServiceClient(cc *grpc.ClientConn) RouteTableServiceClient { + return &routeTableServiceClient{cc} +} + +func (c *routeTableServiceClient) Get(ctx context.Context, in *GetRouteTableRequest, opts ...grpc.CallOption) (*RouteTable, error) { + out := new(RouteTable) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.RouteTableService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *routeTableServiceClient) List(ctx context.Context, in *ListRouteTablesRequest, opts ...grpc.CallOption) (*ListRouteTablesResponse, error) { + out := new(ListRouteTablesResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.RouteTableService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *routeTableServiceClient) Create(ctx context.Context, in *CreateRouteTableRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.RouteTableService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *routeTableServiceClient) Update(ctx context.Context, in *UpdateRouteTableRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.RouteTableService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *routeTableServiceClient) Delete(ctx context.Context, in *DeleteRouteTableRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.RouteTableService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *routeTableServiceClient) ListOperations(ctx context.Context, in *ListRouteTableOperationsRequest, opts ...grpc.CallOption) (*ListRouteTableOperationsResponse, error) { + out := new(ListRouteTableOperationsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.RouteTableService/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RouteTableServiceServer is the server API for RouteTableService service. +type RouteTableServiceServer interface { + // Returns the specified RouteTable resource. + // + // To get the list of available RouteTable resources, make a [List] request. + Get(context.Context, *GetRouteTableRequest) (*RouteTable, error) + // Retrieves the list of RouteTable resources in the specified folder. + List(context.Context, *ListRouteTablesRequest) (*ListRouteTablesResponse, error) + // Creates a route table in the specified folder and network. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Create(context.Context, *CreateRouteTableRequest) (*operation.Operation, error) + // Updates the specified route table. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Update(context.Context, *UpdateRouteTableRequest) (*operation.Operation, error) + // Deletes the specified route table. + Delete(context.Context, *DeleteRouteTableRequest) (*operation.Operation, error) + // List operations for the specified route table. + ListOperations(context.Context, *ListRouteTableOperationsRequest) (*ListRouteTableOperationsResponse, error) +} + +func RegisterRouteTableServiceServer(s *grpc.Server, srv RouteTableServiceServer) { + s.RegisterService(&_RouteTableService_serviceDesc, srv) +} + +func _RouteTableService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRouteTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouteTableServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.RouteTableService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouteTableServiceServer).Get(ctx, req.(*GetRouteTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RouteTableService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListRouteTablesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouteTableServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.RouteTableService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouteTableServiceServer).List(ctx, req.(*ListRouteTablesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RouteTableService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateRouteTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouteTableServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.RouteTableService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouteTableServiceServer).Create(ctx, req.(*CreateRouteTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RouteTableService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateRouteTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouteTableServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.RouteTableService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouteTableServiceServer).Update(ctx, req.(*UpdateRouteTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RouteTableService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRouteTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouteTableServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.RouteTableService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouteTableServiceServer).Delete(ctx, req.(*DeleteRouteTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RouteTableService_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListRouteTableOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouteTableServiceServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.RouteTableService/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouteTableServiceServer).ListOperations(ctx, req.(*ListRouteTableOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _RouteTableService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.vpc.v1.RouteTableService", + HandlerType: (*RouteTableServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _RouteTableService_Get_Handler, + }, + { + MethodName: "List", + Handler: _RouteTableService_List_Handler, + }, + { + MethodName: "Create", + Handler: _RouteTableService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _RouteTableService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _RouteTableService_Delete_Handler, + }, + { + MethodName: "ListOperations", + Handler: _RouteTableService_ListOperations_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/vpc/v1/route_table_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/vpc/v1/route_table_service.proto", fileDescriptor_route_table_service_18dbfac795d82b53) +} + +var fileDescriptor_route_table_service_18dbfac795d82b53 = []byte{ + // 1036 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xd7, 0xc6, 0x8e, 0xbf, 0xf5, 0x73, 0xda, 0x6f, 0x99, 0xb6, 0xc4, 0x5a, 0xa8, 0xea, 0xae, + 0x4a, 0x48, 0x9c, 0xec, 0xae, 0xd7, 0x6d, 0x42, 0xd3, 0x34, 0x02, 0x0c, 0xa1, 0x04, 0x35, 0x02, + 0x6d, 0xca, 0x01, 0xa2, 0xca, 0x1a, 0x7b, 0x27, 0x66, 0xe5, 0xcd, 0xee, 0xb2, 0xb3, 0x36, 0x4d, + 0x4a, 0x41, 0xaa, 0xe0, 0x12, 0x89, 0x03, 0xe2, 0x4f, 0xe0, 0xc2, 0x39, 0x42, 0x1c, 0x10, 0xf7, + 0xe4, 0x5c, 0xfe, 0x05, 0x0e, 0x20, 0x6e, 0x3d, 0x72, 0x42, 0x33, 0xb3, 0xfe, 0x99, 0x75, 0xb2, + 0xa6, 0x45, 0xe2, 0xb6, 0xe3, 0xf7, 0x23, 0x9f, 0xf7, 0x99, 0xcf, 0x7b, 0x6f, 0x02, 0xea, 0x2e, + 0x76, 0x2d, 0xf2, 0x40, 0xaf, 0x3b, 0x5e, 0xcb, 0xd2, 0xdb, 0x7e, 0x5d, 0x6f, 0x1b, 0x7a, 0xe0, + 0xb5, 0x42, 0x52, 0x0d, 0x71, 0xcd, 0x21, 0x55, 0x4a, 0x82, 0xb6, 0x5d, 0x27, 0x9a, 0x1f, 0x78, + 0xa1, 0x87, 0x2e, 0x08, 0x77, 0x8d, 0xbb, 0x6b, 0x6d, 0xbf, 0xae, 0xb5, 0x0d, 0xf9, 0xe5, 0x86, + 0xe7, 0x35, 0x1c, 0xa2, 0x63, 0xdf, 0xd6, 0xb1, 0xeb, 0x7a, 0x21, 0x0e, 0x6d, 0xcf, 0xa5, 0x22, + 0x44, 0x2e, 0x44, 0x56, 0x7e, 0xaa, 0xb5, 0xb6, 0xf5, 0x6d, 0x9b, 0x38, 0x56, 0x75, 0x07, 0xd3, + 0x66, 0xe4, 0x21, 0x47, 0x18, 0x58, 0xbc, 0xe7, 0x93, 0x80, 0x87, 0x47, 0xb6, 0x57, 0x4e, 0xc1, + 0x17, 0xb9, 0xcd, 0x0c, 0xb8, 0x75, 0x93, 0x1c, 0x4b, 0x77, 0x79, 0x30, 0x1d, 0x76, 0x6c, 0xab, + 0xcf, 0xac, 0xbc, 0x07, 0x17, 0xef, 0x90, 0xd0, 0x64, 0xe9, 0xef, 0xb1, 0xec, 0x26, 0xf9, 0xb4, + 0x45, 0x68, 0x88, 0xca, 0x70, 0xae, 0x9f, 0x13, 0xdb, 0xca, 0x4b, 0x05, 0x69, 0x36, 0x5b, 0x99, + 0xfa, 0xfd, 0xd0, 0x90, 0xf6, 0x8f, 0x8c, 0xf4, 0xed, 0xd5, 0xc5, 0x92, 0x39, 0x15, 0x74, 0x03, + 0xd7, 0x2d, 0xe5, 0x67, 0x09, 0x5e, 0xbc, 0x6b, 0xd3, 0xbe, 0x6c, 0xb4, 0x93, 0x6e, 0x0e, 0xb2, + 0xdb, 0x9e, 0x63, 0x91, 0x60, 0x54, 0xa6, 0x33, 0xc2, 0xbc, 0x6e, 0xa1, 0x57, 0x21, 0xeb, 0xe3, + 0x06, 0xa9, 0x52, 0x7b, 0x8f, 0xe4, 0x27, 0x0a, 0xd2, 0x6c, 0xaa, 0x02, 0x7f, 0x1d, 0x1a, 0x99, + 0xdb, 0xab, 0x46, 0xa9, 0x54, 0x32, 0xcf, 0x30, 0xe3, 0xa6, 0xbd, 0x47, 0xd0, 0x2c, 0x00, 0x77, + 0x0c, 0xbd, 0x26, 0x71, 0xf3, 0x29, 0x9e, 0x34, 0xbb, 0x7f, 0x64, 0x4c, 0x72, 0x4f, 0x93, 0x67, + 0xb9, 0xc7, 0x6c, 0x48, 0x81, 0xcc, 0xb6, 0xed, 0x84, 0x24, 0xc8, 0xa7, 0xb9, 0x17, 0xec, 0x1f, + 0x75, 0xf3, 0x45, 0x16, 0xe5, 0x6b, 0x09, 0xa6, 0x8f, 0x81, 0xa7, 0xbe, 0xe7, 0x52, 0x82, 0x2a, + 0x30, 0xd5, 0x47, 0x06, 0xcd, 0x4b, 0x85, 0xd4, 0x6c, 0xae, 0x7c, 0x45, 0x8b, 0x91, 0x86, 0xd6, + 0x47, 0x65, 0xae, 0xc7, 0x0e, 0x45, 0x33, 0xf0, 0x7f, 0x97, 0x3c, 0x08, 0xab, 0x7d, 0x90, 0x59, + 0x71, 0x59, 0xf3, 0x2c, 0xfb, 0xf9, 0x83, 0x0e, 0x56, 0xe5, 0xcf, 0x14, 0x4c, 0xbf, 0x15, 0x10, + 0x1c, 0x92, 0xe3, 0x97, 0x32, 0x06, 0x8b, 0x8b, 0x90, 0x76, 0xf1, 0x8e, 0x20, 0x30, 0x5b, 0xb9, + 0xfa, 0xf4, 0xd0, 0xb8, 0xfc, 0xf9, 0x16, 0x56, 0xf7, 0xee, 0x6f, 0xa9, 0x58, 0xdd, 0x2b, 0xa9, + 0xcb, 0xf7, 0x1f, 0x1a, 0x0b, 0x4b, 0xc6, 0xa3, 0xad, 0xe8, 0x64, 0x72, 0x77, 0x34, 0x0f, 0x39, + 0x8b, 0xd0, 0x7a, 0x60, 0xfb, 0x4c, 0x23, 0x83, 0xa4, 0x96, 0x17, 0x97, 0xcc, 0x7e, 0x2b, 0xfa, + 0x56, 0x82, 0x8c, 0x83, 0x6b, 0xc4, 0xa1, 0xf9, 0x34, 0x67, 0xe4, 0x66, 0x2c, 0x23, 0x23, 0xaa, + 0xd1, 0xee, 0xf2, 0xd0, 0x35, 0x37, 0x0c, 0x76, 0x2b, 0xaf, 0x3f, 0x3d, 0x34, 0x72, 0x5b, 0x6a, + 0xb5, 0xa4, 0x2e, 0x33, 0x90, 0xc5, 0xc7, 0xbc, 0xa2, 0xa5, 0x1b, 0xa2, 0xb2, 0xa5, 0xeb, 0x07, + 0x47, 0x46, 0x46, 0x4e, 0x1b, 0x2a, 0xff, 0x42, 0xe8, 0x7c, 0x54, 0x4a, 0xd7, 0xdf, 0x8c, 0x80, + 0xa0, 0x79, 0x00, 0x97, 0x84, 0x9f, 0x79, 0x41, 0x93, 0x71, 0x34, 0x19, 0xc3, 0x51, 0x36, 0xb2, + 0xaf, 0x5b, 0x68, 0x0d, 0xce, 0x52, 0xd6, 0xba, 0xf5, 0x2a, 0xbf, 0x29, 0x9a, 0xcf, 0xf0, 0x32, + 0x0a, 0xb1, 0x65, 0x6c, 0x72, 0x4f, 0x5e, 0x86, 0x39, 0x45, 0x7b, 0x07, 0x2a, 0x2f, 0x43, 0xae, + 0xaf, 0x16, 0x74, 0x1e, 0x52, 0x4d, 0xb2, 0x2b, 0xee, 0xc7, 0x64, 0x9f, 0xe8, 0x22, 0x4c, 0xb6, + 0xb1, 0xd3, 0x8a, 0x6e, 0xc3, 0x14, 0x87, 0x5b, 0x13, 0x37, 0x25, 0xe5, 0x0d, 0xc8, 0x0f, 0xd3, + 0xb3, 0x41, 0x42, 0x6c, 0xe1, 0x10, 0xa3, 0x6b, 0xf1, 0x2d, 0x38, 0xd4, 0x74, 0x5f, 0xa5, 0x61, + 0xfa, 0x43, 0xdf, 0x8a, 0xd5, 0xcb, 0x3f, 0x68, 0x62, 0xb4, 0x02, 0xb9, 0x16, 0x4f, 0xc7, 0xe7, + 0x15, 0x47, 0x9c, 0x2b, 0xcb, 0x9a, 0x18, 0x69, 0x5a, 0x67, 0xa4, 0x69, 0xef, 0xb0, 0x91, 0xb6, + 0x81, 0x69, 0xd3, 0x04, 0xe1, 0xce, 0xbe, 0xbb, 0xaa, 0x4b, 0x3d, 0x93, 0xea, 0xd2, 0x49, 0x55, + 0x37, 0x79, 0x82, 0xea, 0x46, 0x70, 0xf2, 0xef, 0xa8, 0xee, 0x3f, 0x21, 0xa4, 0xe1, 0x8a, 0xc7, + 0x14, 0xd2, 0x06, 0x4c, 0xbf, 0x4d, 0x1c, 0xf2, 0x9c, 0x74, 0xc4, 0x00, 0x0d, 0xa7, 0x1b, 0x13, + 0xd0, 0x0f, 0x12, 0x5c, 0x19, 0x9c, 0xc8, 0xef, 0x77, 0x76, 0x1b, 0x7d, 0x16, 0x85, 0x3f, 0xff, + 0x05, 0xa3, 0x7c, 0x23, 0x41, 0x61, 0x34, 0xd4, 0x68, 0x8b, 0xbc, 0x09, 0xd0, 0x5d, 0xce, 0x9d, + 0x1d, 0x72, 0x75, 0x50, 0x21, 0xbd, 0xe5, 0xdd, 0x8d, 0x37, 0xfb, 0x82, 0x92, 0x2e, 0x91, 0xf2, + 0x1f, 0xff, 0x83, 0x17, 0x7a, 0x58, 0x36, 0xc5, 0x83, 0x06, 0x7d, 0x09, 0xa9, 0x3b, 0x24, 0x44, + 0x73, 0xb1, 0xaa, 0x8c, 0x7b, 0x05, 0xc8, 0xa7, 0xad, 0x38, 0x65, 0xe1, 0xf1, 0xaf, 0xbf, 0x7d, + 0x37, 0x31, 0x83, 0xae, 0x0d, 0x3c, 0x54, 0xc4, 0xca, 0xd3, 0x1f, 0x0e, 0x5e, 0xcd, 0x23, 0xf4, + 0x05, 0xa4, 0x19, 0x4b, 0x68, 0x3e, 0x36, 0x6d, 0xfc, 0xd3, 0x41, 0x5e, 0x48, 0xe6, 0x2c, 0x48, + 0x56, 0x5e, 0xe2, 0x80, 0x2e, 0xa1, 0x0b, 0x31, 0x80, 0xd0, 0xf7, 0x12, 0x64, 0xc4, 0xb8, 0x45, + 0x0b, 0xe3, 0xac, 0x2a, 0xf9, 0xf4, 0x6b, 0x52, 0xde, 0x3d, 0x78, 0x52, 0x9c, 0x39, 0x61, 0x9a, + 0x43, 0xef, 0x37, 0x0e, 0x31, 0xaf, 0xc4, 0x41, 0xbc, 0x25, 0x15, 0xd1, 0x8f, 0x12, 0x64, 0x44, + 0x2f, 0x8f, 0x40, 0x39, 0x62, 0xb4, 0x25, 0x41, 0xf9, 0x91, 0x40, 0x39, 0x72, 0x54, 0x0c, 0xa3, + 0x9c, 0x2b, 0x27, 0xba, 0x59, 0x06, 0xfb, 0x17, 0x09, 0x32, 0xa2, 0xe3, 0x47, 0xc0, 0x1e, 0x31, + 0x5d, 0x92, 0xc0, 0xc6, 0x07, 0x4f, 0x8a, 0xc6, 0x09, 0x03, 0xe5, 0xd2, 0xf0, 0x7a, 0x5a, 0xdb, + 0xf1, 0xc3, 0x5d, 0xa1, 0xcd, 0x62, 0x32, 0x6d, 0xfe, 0x24, 0xc1, 0x39, 0x26, 0xaa, 0x5e, 0xe3, + 0xa2, 0x1b, 0x09, 0x94, 0x77, 0x6c, 0x24, 0xc9, 0x8b, 0x63, 0x46, 0x45, 0xc2, 0x7d, 0x8d, 0xa3, + 0x35, 0x90, 0x9e, 0x04, 0x6d, 0xef, 0x95, 0x4f, 0x2b, 0xab, 0x1f, 0xaf, 0x34, 0xec, 0xf0, 0x93, + 0x56, 0x4d, 0xab, 0x7b, 0x3b, 0xba, 0xf8, 0xdb, 0xaa, 0x78, 0xed, 0x37, 0x3c, 0xb5, 0x41, 0x5c, + 0x4e, 0x8a, 0x1e, 0xf3, 0x5f, 0xc5, 0x4a, 0xdb, 0xaf, 0xd7, 0x32, 0xdc, 0x7c, 0xfd, 0xef, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x80, 0x29, 0xd2, 0x08, 0x17, 0x0d, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/subnet.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/subnet.pb.go new file mode 100644 index 000000000..47023bcb6 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/subnet.pb.go @@ -0,0 +1,193 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/vpc/v1/subnet.proto + +package vpc // import "github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Subnet resource. For more information, see [Subnets](/docs/vpc/concepts/subnets). +type Subnet struct { + // ID of the subnet. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the folder that the subnet belongs to. + FolderId string `protobuf:"bytes,2,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Name of the subnet. The name is unique within the project. 3-63 characters long. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Optional description of the subnet. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. Мaximum of 64 per resource. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID of the network the subnet belongs to. + NetworkId string `protobuf:"bytes,7,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // ID of the availability zone where the subnet resides. + ZoneId string `protobuf:"bytes,8,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + // CIDR block. + // The range of internal addresses that are defined for this subnet. + // This field can be set only at Subnet resource creation time and cannot be changed. + // For example, 10.0.0.0/22 or 192.168.0.0/24. + // Minimum subnet size is /28, maximum subnet size is /16. + V4CidrBlocks []string `protobuf:"bytes,10,rep,name=v4_cidr_blocks,json=v4CidrBlocks,proto3" json:"v4_cidr_blocks,omitempty"` + // IPv6 not available yet. + V6CidrBlocks []string `protobuf:"bytes,11,rep,name=v6_cidr_blocks,json=v6CidrBlocks,proto3" json:"v6_cidr_blocks,omitempty"` + // ID of route table the subnet is linked to. + RouteTableId string `protobuf:"bytes,12,opt,name=route_table_id,json=routeTableId,proto3" json:"route_table_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Subnet) Reset() { *m = Subnet{} } +func (m *Subnet) String() string { return proto.CompactTextString(m) } +func (*Subnet) ProtoMessage() {} +func (*Subnet) Descriptor() ([]byte, []int) { + return fileDescriptor_subnet_ffafe502c297ef9c, []int{0} +} +func (m *Subnet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Subnet.Unmarshal(m, b) +} +func (m *Subnet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Subnet.Marshal(b, m, deterministic) +} +func (dst *Subnet) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subnet.Merge(dst, src) +} +func (m *Subnet) XXX_Size() int { + return xxx_messageInfo_Subnet.Size(m) +} +func (m *Subnet) XXX_DiscardUnknown() { + xxx_messageInfo_Subnet.DiscardUnknown(m) +} + +var xxx_messageInfo_Subnet proto.InternalMessageInfo + +func (m *Subnet) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Subnet) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *Subnet) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Subnet) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Subnet) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Subnet) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Subnet) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +func (m *Subnet) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func (m *Subnet) GetV4CidrBlocks() []string { + if m != nil { + return m.V4CidrBlocks + } + return nil +} + +func (m *Subnet) GetV6CidrBlocks() []string { + if m != nil { + return m.V6CidrBlocks + } + return nil +} + +func (m *Subnet) GetRouteTableId() string { + if m != nil { + return m.RouteTableId + } + return "" +} + +func init() { + proto.RegisterType((*Subnet)(nil), "yandex.cloud.vpc.v1.Subnet") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.vpc.v1.Subnet.LabelsEntry") +} + +func init() { + proto.RegisterFile("yandex/cloud/vpc/v1/subnet.proto", fileDescriptor_subnet_ffafe502c297ef9c) +} + +var fileDescriptor_subnet_ffafe502c297ef9c = []byte{ + // 404 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x51, 0x6b, 0xd4, 0x40, + 0x14, 0x85, 0xd9, 0x4d, 0x9b, 0x36, 0x37, 0x4b, 0x91, 0x51, 0x30, 0xac, 0x88, 0x41, 0x04, 0xf7, + 0xa5, 0x13, 0x5a, 0x4b, 0xb1, 0x16, 0x11, 0x2b, 0x3e, 0x2c, 0xf8, 0xb4, 0xf6, 0xc9, 0x97, 0x90, + 0xcc, 0xdc, 0xc6, 0x61, 0x93, 0x4c, 0x98, 0x4c, 0x46, 0xd7, 0xdf, 0xe8, 0x8f, 0x2a, 0xb9, 0x93, + 0x42, 0x0b, 0xfb, 0x36, 0x73, 0xce, 0x37, 0x73, 0x38, 0x73, 0x07, 0xd2, 0x5d, 0xd1, 0x4a, 0xfc, + 0x9b, 0x89, 0x5a, 0x0f, 0x32, 0x73, 0x9d, 0xc8, 0xdc, 0x59, 0xd6, 0x0f, 0x65, 0x8b, 0x96, 0x77, + 0x46, 0x5b, 0xcd, 0x9e, 0x7b, 0x82, 0x13, 0xc1, 0x5d, 0x27, 0xb8, 0x3b, 0x5b, 0xbe, 0xa9, 0xb4, + 0xae, 0x6a, 0xcc, 0x08, 0x29, 0x87, 0xbb, 0xcc, 0xaa, 0x06, 0x7b, 0x5b, 0x34, 0x9d, 0x3f, 0xf5, + 0xf6, 0x7f, 0x00, 0xe1, 0x4f, 0xba, 0x86, 0x9d, 0xc0, 0x5c, 0xc9, 0x64, 0x96, 0xce, 0x56, 0xd1, + 0x66, 0xae, 0x24, 0x7b, 0x05, 0xd1, 0x9d, 0xae, 0x25, 0x9a, 0x5c, 0xc9, 0x64, 0x4e, 0xf2, 0xb1, + 0x17, 0xd6, 0x92, 0x5d, 0x01, 0x08, 0x83, 0x85, 0x45, 0x99, 0x17, 0x36, 0x09, 0xd2, 0xd9, 0x2a, + 0x3e, 0x5f, 0x72, 0x9f, 0xc6, 0x1f, 0xd2, 0xf8, 0xed, 0x43, 0xda, 0x26, 0x9a, 0xe8, 0xaf, 0x96, + 0x31, 0x38, 0x68, 0x8b, 0x06, 0x93, 0x03, 0xba, 0x92, 0xd6, 0x2c, 0x85, 0x58, 0x62, 0x2f, 0x8c, + 0xea, 0xac, 0xd2, 0x6d, 0x72, 0x48, 0xd6, 0x63, 0x89, 0x7d, 0x81, 0xb0, 0x2e, 0x4a, 0xac, 0xfb, + 0x24, 0x4c, 0x83, 0x55, 0x7c, 0xfe, 0x9e, 0xef, 0xe9, 0xcb, 0x7d, 0x15, 0xfe, 0x83, 0xc8, 0xef, + 0xad, 0x35, 0xbb, 0xcd, 0x74, 0x8c, 0xbd, 0x06, 0x68, 0xd1, 0xfe, 0xd1, 0x66, 0x3b, 0xf6, 0x39, + 0xa2, 0x84, 0x68, 0x52, 0xd6, 0x92, 0xbd, 0x84, 0xa3, 0x7f, 0xba, 0xc5, 0xd1, 0x3b, 0x26, 0x2f, + 0x1c, 0xb7, 0x6b, 0xc9, 0xde, 0xc1, 0x89, 0xbb, 0xc8, 0x85, 0x92, 0x26, 0x2f, 0x6b, 0x2d, 0xb6, + 0x7d, 0x02, 0x69, 0xb0, 0x8a, 0x36, 0x0b, 0x77, 0xf1, 0x4d, 0x49, 0x73, 0x43, 0x1a, 0x51, 0x97, + 0x4f, 0xa8, 0x78, 0xa2, 0x2e, 0x9f, 0x52, 0x46, 0x0f, 0x16, 0x73, 0x5b, 0x94, 0x35, 0x65, 0x2d, + 0x28, 0x6b, 0x41, 0xea, 0xed, 0x28, 0xae, 0xe5, 0xf2, 0x0a, 0xe2, 0x47, 0x05, 0xd8, 0x33, 0x08, + 0xb6, 0xb8, 0x9b, 0x06, 0x33, 0x2e, 0xd9, 0x0b, 0x38, 0x74, 0x45, 0x3d, 0xe0, 0x34, 0x15, 0xbf, + 0xf9, 0x34, 0xff, 0x38, 0xbb, 0xf9, 0xfc, 0xeb, 0xba, 0x52, 0xf6, 0xf7, 0x50, 0x72, 0xa1, 0x9b, + 0xcc, 0xbf, 0xd0, 0xa9, 0xff, 0x33, 0x95, 0x3e, 0xad, 0xb0, 0xa5, 0xd1, 0x64, 0x7b, 0x3e, 0xd3, + 0xb5, 0xeb, 0x44, 0x19, 0x92, 0xfd, 0xe1, 0x3e, 0x00, 0x00, 0xff, 0xff, 0x42, 0x30, 0x95, 0x96, + 0x6e, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/subnet_service.pb.go b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/subnet_service.pb.go new file mode 100644 index 000000000..4b6da76e1 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1/subnet_service.pb.go @@ -0,0 +1,1016 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: yandex/cloud/vpc/v1/subnet_service.proto + +package vpc // import "github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/yandex-cloud/go-genproto/yandex/api" +import operation "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetSubnetRequest struct { + // ID of the Subnet resource to return. + // To get the subnet ID use a [SubnetService.List] request. + SubnetId string `protobuf:"bytes,1,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSubnetRequest) Reset() { *m = GetSubnetRequest{} } +func (m *GetSubnetRequest) String() string { return proto.CompactTextString(m) } +func (*GetSubnetRequest) ProtoMessage() {} +func (*GetSubnetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_subnet_service_c165cb64a0d126f6, []int{0} +} +func (m *GetSubnetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSubnetRequest.Unmarshal(m, b) +} +func (m *GetSubnetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSubnetRequest.Marshal(b, m, deterministic) +} +func (dst *GetSubnetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSubnetRequest.Merge(dst, src) +} +func (m *GetSubnetRequest) XXX_Size() int { + return xxx_messageInfo_GetSubnetRequest.Size(m) +} +func (m *GetSubnetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSubnetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSubnetRequest proto.InternalMessageInfo + +func (m *GetSubnetRequest) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +type ListSubnetsRequest struct { + // ID of the folder to list subnets in. + // To get the folder ID use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // The maximum number of results per page to return. If the number of available + // results is larger than [page_size], + // the service returns a [ListSubnetsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. Default value: 100. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListSubnetsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // A filter expression that filters resources listed in the response. + // The expression must specify: + // 1. The field name. Currently you can use filtering only on [Subnet.name] field. + // 2. An operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + // 3. The value. Must be 3-63 characters long and match the regular expression `^[a-z][-a-z0-9]{1,61}[a-z0-9]$`. + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSubnetsRequest) Reset() { *m = ListSubnetsRequest{} } +func (m *ListSubnetsRequest) String() string { return proto.CompactTextString(m) } +func (*ListSubnetsRequest) ProtoMessage() {} +func (*ListSubnetsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_subnet_service_c165cb64a0d126f6, []int{1} +} +func (m *ListSubnetsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSubnetsRequest.Unmarshal(m, b) +} +func (m *ListSubnetsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSubnetsRequest.Marshal(b, m, deterministic) +} +func (dst *ListSubnetsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSubnetsRequest.Merge(dst, src) +} +func (m *ListSubnetsRequest) XXX_Size() int { + return xxx_messageInfo_ListSubnetsRequest.Size(m) +} +func (m *ListSubnetsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListSubnetsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSubnetsRequest proto.InternalMessageInfo + +func (m *ListSubnetsRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *ListSubnetsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListSubnetsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListSubnetsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type ListSubnetsResponse struct { + // List of Subnet resources. + Subnets []*Subnet `protobuf:"bytes,1,rep,name=subnets,proto3" json:"subnets,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListSubnetsRequest.page_size], use + // the [next_page_token] as the value + // for the [ListSubnetsRequest.page_token] query parameter + // in the next list request. Subsequent list requests will have their own + // [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSubnetsResponse) Reset() { *m = ListSubnetsResponse{} } +func (m *ListSubnetsResponse) String() string { return proto.CompactTextString(m) } +func (*ListSubnetsResponse) ProtoMessage() {} +func (*ListSubnetsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_subnet_service_c165cb64a0d126f6, []int{2} +} +func (m *ListSubnetsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSubnetsResponse.Unmarshal(m, b) +} +func (m *ListSubnetsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSubnetsResponse.Marshal(b, m, deterministic) +} +func (dst *ListSubnetsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSubnetsResponse.Merge(dst, src) +} +func (m *ListSubnetsResponse) XXX_Size() int { + return xxx_messageInfo_ListSubnetsResponse.Size(m) +} +func (m *ListSubnetsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListSubnetsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSubnetsResponse proto.InternalMessageInfo + +func (m *ListSubnetsResponse) GetSubnets() []*Subnet { + if m != nil { + return m.Subnets + } + return nil +} + +func (m *ListSubnetsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateSubnetRequest struct { + // ID of the folder to create a subnet in. + // To get folder ID use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + FolderId string `protobuf:"bytes,1,opt,name=folder_id,json=folderId,proto3" json:"folder_id,omitempty"` + // Name of the subnet. + // The name must be unique within the folder. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Description of the subnet. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels, `` key:value `` pairs. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID of the network to create subnet in. + NetworkId string `protobuf:"bytes,5,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // ID of the availability zone where the subnet resides. + // To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request. + ZoneId string `protobuf:"bytes,6,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + // CIDR block. + // The range of internal addresses that are defined for this subnet. + // This field can be set only at Subnet resource creation time and cannot be changed. + // For example, 10.0.0.0/22 or 192.168.0.0/24. + // Minimum subnet size is /28, maximum subnet size is /16. + V4CidrBlocks []string `protobuf:"bytes,7,rep,name=v4_cidr_blocks,json=v4CidrBlocks,proto3" json:"v4_cidr_blocks,omitempty"` + // IPv6 not available yet. + V6CidrBlocks []string `protobuf:"bytes,8,rep,name=v6_cidr_blocks,json=v6CidrBlocks,proto3" json:"v6_cidr_blocks,omitempty"` + // ID of route table the subnet is linked to. + RouteTableId string `protobuf:"bytes,9,opt,name=route_table_id,json=routeTableId,proto3" json:"route_table_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSubnetRequest) Reset() { *m = CreateSubnetRequest{} } +func (m *CreateSubnetRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSubnetRequest) ProtoMessage() {} +func (*CreateSubnetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_subnet_service_c165cb64a0d126f6, []int{3} +} +func (m *CreateSubnetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSubnetRequest.Unmarshal(m, b) +} +func (m *CreateSubnetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSubnetRequest.Marshal(b, m, deterministic) +} +func (dst *CreateSubnetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSubnetRequest.Merge(dst, src) +} +func (m *CreateSubnetRequest) XXX_Size() int { + return xxx_messageInfo_CreateSubnetRequest.Size(m) +} +func (m *CreateSubnetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSubnetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSubnetRequest proto.InternalMessageInfo + +func (m *CreateSubnetRequest) GetFolderId() string { + if m != nil { + return m.FolderId + } + return "" +} + +func (m *CreateSubnetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateSubnetRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *CreateSubnetRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *CreateSubnetRequest) GetNetworkId() string { + if m != nil { + return m.NetworkId + } + return "" +} + +func (m *CreateSubnetRequest) GetZoneId() string { + if m != nil { + return m.ZoneId + } + return "" +} + +func (m *CreateSubnetRequest) GetV4CidrBlocks() []string { + if m != nil { + return m.V4CidrBlocks + } + return nil +} + +func (m *CreateSubnetRequest) GetV6CidrBlocks() []string { + if m != nil { + return m.V6CidrBlocks + } + return nil +} + +func (m *CreateSubnetRequest) GetRouteTableId() string { + if m != nil { + return m.RouteTableId + } + return "" +} + +type CreateSubnetMetadata struct { + // ID of the subnet that is being created. + SubnetId string `protobuf:"bytes,1,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSubnetMetadata) Reset() { *m = CreateSubnetMetadata{} } +func (m *CreateSubnetMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateSubnetMetadata) ProtoMessage() {} +func (*CreateSubnetMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_subnet_service_c165cb64a0d126f6, []int{4} +} +func (m *CreateSubnetMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSubnetMetadata.Unmarshal(m, b) +} +func (m *CreateSubnetMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSubnetMetadata.Marshal(b, m, deterministic) +} +func (dst *CreateSubnetMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSubnetMetadata.Merge(dst, src) +} +func (m *CreateSubnetMetadata) XXX_Size() int { + return xxx_messageInfo_CreateSubnetMetadata.Size(m) +} +func (m *CreateSubnetMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSubnetMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSubnetMetadata proto.InternalMessageInfo + +func (m *CreateSubnetMetadata) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +type UpdateSubnetRequest struct { + // ID of the Subnet resource to update. + SubnetId string `protobuf:"bytes,1,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + // Field mask that specifies which fields of the Subnet resource are going to be updated. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Name of the subnet. + // The name must be unique within the folder. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Description of the subnet. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Resource labels as `` key:value `` pairs. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID of route table the subnet is linked to. + RouteTableId string `protobuf:"bytes,6,opt,name=route_table_id,json=routeTableId,proto3" json:"route_table_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateSubnetRequest) Reset() { *m = UpdateSubnetRequest{} } +func (m *UpdateSubnetRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateSubnetRequest) ProtoMessage() {} +func (*UpdateSubnetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_subnet_service_c165cb64a0d126f6, []int{5} +} +func (m *UpdateSubnetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateSubnetRequest.Unmarshal(m, b) +} +func (m *UpdateSubnetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateSubnetRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateSubnetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateSubnetRequest.Merge(dst, src) +} +func (m *UpdateSubnetRequest) XXX_Size() int { + return xxx_messageInfo_UpdateSubnetRequest.Size(m) +} +func (m *UpdateSubnetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateSubnetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateSubnetRequest proto.InternalMessageInfo + +func (m *UpdateSubnetRequest) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +func (m *UpdateSubnetRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateSubnetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateSubnetRequest) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *UpdateSubnetRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *UpdateSubnetRequest) GetRouteTableId() string { + if m != nil { + return m.RouteTableId + } + return "" +} + +type UpdateSubnetMetadata struct { + // ID of the Subnet resource that is being updated. + SubnetId string `protobuf:"bytes,1,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateSubnetMetadata) Reset() { *m = UpdateSubnetMetadata{} } +func (m *UpdateSubnetMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateSubnetMetadata) ProtoMessage() {} +func (*UpdateSubnetMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_subnet_service_c165cb64a0d126f6, []int{6} +} +func (m *UpdateSubnetMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateSubnetMetadata.Unmarshal(m, b) +} +func (m *UpdateSubnetMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateSubnetMetadata.Marshal(b, m, deterministic) +} +func (dst *UpdateSubnetMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateSubnetMetadata.Merge(dst, src) +} +func (m *UpdateSubnetMetadata) XXX_Size() int { + return xxx_messageInfo_UpdateSubnetMetadata.Size(m) +} +func (m *UpdateSubnetMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateSubnetMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateSubnetMetadata proto.InternalMessageInfo + +func (m *UpdateSubnetMetadata) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +type DeleteSubnetRequest struct { + // ID of the subnet to delete. + // To get the subnet ID use a [SubnetService.List] request. + SubnetId string `protobuf:"bytes,1,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSubnetRequest) Reset() { *m = DeleteSubnetRequest{} } +func (m *DeleteSubnetRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSubnetRequest) ProtoMessage() {} +func (*DeleteSubnetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_subnet_service_c165cb64a0d126f6, []int{7} +} +func (m *DeleteSubnetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteSubnetRequest.Unmarshal(m, b) +} +func (m *DeleteSubnetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteSubnetRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteSubnetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSubnetRequest.Merge(dst, src) +} +func (m *DeleteSubnetRequest) XXX_Size() int { + return xxx_messageInfo_DeleteSubnetRequest.Size(m) +} +func (m *DeleteSubnetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSubnetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSubnetRequest proto.InternalMessageInfo + +func (m *DeleteSubnetRequest) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +type DeleteSubnetMetadata struct { + // ID of the Subnet resource that is being deleted. + SubnetId string `protobuf:"bytes,1,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSubnetMetadata) Reset() { *m = DeleteSubnetMetadata{} } +func (m *DeleteSubnetMetadata) String() string { return proto.CompactTextString(m) } +func (*DeleteSubnetMetadata) ProtoMessage() {} +func (*DeleteSubnetMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_subnet_service_c165cb64a0d126f6, []int{8} +} +func (m *DeleteSubnetMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteSubnetMetadata.Unmarshal(m, b) +} +func (m *DeleteSubnetMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteSubnetMetadata.Marshal(b, m, deterministic) +} +func (dst *DeleteSubnetMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSubnetMetadata.Merge(dst, src) +} +func (m *DeleteSubnetMetadata) XXX_Size() int { + return xxx_messageInfo_DeleteSubnetMetadata.Size(m) +} +func (m *DeleteSubnetMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSubnetMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSubnetMetadata proto.InternalMessageInfo + +func (m *DeleteSubnetMetadata) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +type ListSubnetOperationsRequest struct { + // ID of the Subnet resource to list operations for. + SubnetId string `protobuf:"bytes,1,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + // The maximum number of results per page that should be returned. If the number of available + // results is larger than [page_size], the service returns a [ListSubnetOperationsResponse.next_page_token] + // that can be used to get the next page of results in subsequent list requests. Default value: 100. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Page token. To get the next page of results, set [page_token] to the + // [ListSubnetOperationsResponse.next_page_token] returned by a previous list request. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSubnetOperationsRequest) Reset() { *m = ListSubnetOperationsRequest{} } +func (m *ListSubnetOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListSubnetOperationsRequest) ProtoMessage() {} +func (*ListSubnetOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_subnet_service_c165cb64a0d126f6, []int{9} +} +func (m *ListSubnetOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSubnetOperationsRequest.Unmarshal(m, b) +} +func (m *ListSubnetOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSubnetOperationsRequest.Marshal(b, m, deterministic) +} +func (dst *ListSubnetOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSubnetOperationsRequest.Merge(dst, src) +} +func (m *ListSubnetOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListSubnetOperationsRequest.Size(m) +} +func (m *ListSubnetOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListSubnetOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSubnetOperationsRequest proto.InternalMessageInfo + +func (m *ListSubnetOperationsRequest) GetSubnetId() string { + if m != nil { + return m.SubnetId + } + return "" +} + +func (m *ListSubnetOperationsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListSubnetOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +type ListSubnetOperationsResponse struct { + // List of operations for the specified Subnet resource. + Operations []*operation.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // This token allows you to get the next page of results for list requests. If the number of results + // is larger than [ListSubnetOperationsRequest.page_size], use the [next_page_token] as the value + // for the [ListSubnetOperationsRequest.page_token] query parameter in the next list request. + // Each subsequent list request will have its own [next_page_token] to continue paging through the results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSubnetOperationsResponse) Reset() { *m = ListSubnetOperationsResponse{} } +func (m *ListSubnetOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListSubnetOperationsResponse) ProtoMessage() {} +func (*ListSubnetOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_subnet_service_c165cb64a0d126f6, []int{10} +} +func (m *ListSubnetOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSubnetOperationsResponse.Unmarshal(m, b) +} +func (m *ListSubnetOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSubnetOperationsResponse.Marshal(b, m, deterministic) +} +func (dst *ListSubnetOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSubnetOperationsResponse.Merge(dst, src) +} +func (m *ListSubnetOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListSubnetOperationsResponse.Size(m) +} +func (m *ListSubnetOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListSubnetOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSubnetOperationsResponse proto.InternalMessageInfo + +func (m *ListSubnetOperationsResponse) GetOperations() []*operation.Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListSubnetOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetSubnetRequest)(nil), "yandex.cloud.vpc.v1.GetSubnetRequest") + proto.RegisterType((*ListSubnetsRequest)(nil), "yandex.cloud.vpc.v1.ListSubnetsRequest") + proto.RegisterType((*ListSubnetsResponse)(nil), "yandex.cloud.vpc.v1.ListSubnetsResponse") + proto.RegisterType((*CreateSubnetRequest)(nil), "yandex.cloud.vpc.v1.CreateSubnetRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.vpc.v1.CreateSubnetRequest.LabelsEntry") + proto.RegisterType((*CreateSubnetMetadata)(nil), "yandex.cloud.vpc.v1.CreateSubnetMetadata") + proto.RegisterType((*UpdateSubnetRequest)(nil), "yandex.cloud.vpc.v1.UpdateSubnetRequest") + proto.RegisterMapType((map[string]string)(nil), "yandex.cloud.vpc.v1.UpdateSubnetRequest.LabelsEntry") + proto.RegisterType((*UpdateSubnetMetadata)(nil), "yandex.cloud.vpc.v1.UpdateSubnetMetadata") + proto.RegisterType((*DeleteSubnetRequest)(nil), "yandex.cloud.vpc.v1.DeleteSubnetRequest") + proto.RegisterType((*DeleteSubnetMetadata)(nil), "yandex.cloud.vpc.v1.DeleteSubnetMetadata") + proto.RegisterType((*ListSubnetOperationsRequest)(nil), "yandex.cloud.vpc.v1.ListSubnetOperationsRequest") + proto.RegisterType((*ListSubnetOperationsResponse)(nil), "yandex.cloud.vpc.v1.ListSubnetOperationsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// SubnetServiceClient is the client API for SubnetService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SubnetServiceClient interface { + // Returns the specified Subnet resource. + // + // To get the list of available Subnet resources, make a [List] request. + Get(ctx context.Context, in *GetSubnetRequest, opts ...grpc.CallOption) (*Subnet, error) + // Retrieves the list of Subnet resources in the specified folder. + List(ctx context.Context, in *ListSubnetsRequest, opts ...grpc.CallOption) (*ListSubnetsResponse, error) + // Creates a subnet in the specified folder and network. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Create(ctx context.Context, in *CreateSubnetRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Updates the specified subnet. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Update(ctx context.Context, in *UpdateSubnetRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // Deletes the specified subnet. + Delete(ctx context.Context, in *DeleteSubnetRequest, opts ...grpc.CallOption) (*operation.Operation, error) + // List operations for the specified subnet. + ListOperations(ctx context.Context, in *ListSubnetOperationsRequest, opts ...grpc.CallOption) (*ListSubnetOperationsResponse, error) +} + +type subnetServiceClient struct { + cc *grpc.ClientConn +} + +func NewSubnetServiceClient(cc *grpc.ClientConn) SubnetServiceClient { + return &subnetServiceClient{cc} +} + +func (c *subnetServiceClient) Get(ctx context.Context, in *GetSubnetRequest, opts ...grpc.CallOption) (*Subnet, error) { + out := new(Subnet) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.SubnetService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subnetServiceClient) List(ctx context.Context, in *ListSubnetsRequest, opts ...grpc.CallOption) (*ListSubnetsResponse, error) { + out := new(ListSubnetsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.SubnetService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subnetServiceClient) Create(ctx context.Context, in *CreateSubnetRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.SubnetService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subnetServiceClient) Update(ctx context.Context, in *UpdateSubnetRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.SubnetService/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subnetServiceClient) Delete(ctx context.Context, in *DeleteSubnetRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + out := new(operation.Operation) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.SubnetService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subnetServiceClient) ListOperations(ctx context.Context, in *ListSubnetOperationsRequest, opts ...grpc.CallOption) (*ListSubnetOperationsResponse, error) { + out := new(ListSubnetOperationsResponse) + err := c.cc.Invoke(ctx, "/yandex.cloud.vpc.v1.SubnetService/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SubnetServiceServer is the server API for SubnetService service. +type SubnetServiceServer interface { + // Returns the specified Subnet resource. + // + // To get the list of available Subnet resources, make a [List] request. + Get(context.Context, *GetSubnetRequest) (*Subnet, error) + // Retrieves the list of Subnet resources in the specified folder. + List(context.Context, *ListSubnetsRequest) (*ListSubnetsResponse, error) + // Creates a subnet in the specified folder and network. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Create(context.Context, *CreateSubnetRequest) (*operation.Operation, error) + // Updates the specified subnet. + // Method starts an asynchronous operation that can be cancelled while it is in progress. + Update(context.Context, *UpdateSubnetRequest) (*operation.Operation, error) + // Deletes the specified subnet. + Delete(context.Context, *DeleteSubnetRequest) (*operation.Operation, error) + // List operations for the specified subnet. + ListOperations(context.Context, *ListSubnetOperationsRequest) (*ListSubnetOperationsResponse, error) +} + +func RegisterSubnetServiceServer(s *grpc.Server, srv SubnetServiceServer) { + s.RegisterService(&_SubnetService_serviceDesc, srv) +} + +func _SubnetService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSubnetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubnetServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.SubnetService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubnetServiceServer).Get(ctx, req.(*GetSubnetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SubnetService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSubnetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubnetServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.SubnetService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubnetServiceServer).List(ctx, req.(*ListSubnetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SubnetService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSubnetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubnetServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.SubnetService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubnetServiceServer).Create(ctx, req.(*CreateSubnetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SubnetService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSubnetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubnetServiceServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.SubnetService/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubnetServiceServer).Update(ctx, req.(*UpdateSubnetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SubnetService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSubnetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubnetServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.SubnetService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubnetServiceServer).Delete(ctx, req.(*DeleteSubnetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SubnetService_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSubnetOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubnetServiceServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/yandex.cloud.vpc.v1.SubnetService/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubnetServiceServer).ListOperations(ctx, req.(*ListSubnetOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SubnetService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "yandex.cloud.vpc.v1.SubnetService", + HandlerType: (*SubnetServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _SubnetService_Get_Handler, + }, + { + MethodName: "List", + Handler: _SubnetService_List_Handler, + }, + { + MethodName: "Create", + Handler: _SubnetService_Create_Handler, + }, + { + MethodName: "Update", + Handler: _SubnetService_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _SubnetService_Delete_Handler, + }, + { + MethodName: "ListOperations", + Handler: _SubnetService_ListOperations_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "yandex/cloud/vpc/v1/subnet_service.proto", +} + +func init() { + proto.RegisterFile("yandex/cloud/vpc/v1/subnet_service.proto", fileDescriptor_subnet_service_c165cb64a0d126f6) +} + +var fileDescriptor_subnet_service_c165cb64a0d126f6 = []byte{ + // 1063 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x41, 0x73, 0xdb, 0x44, + 0x14, 0x1e, 0xc5, 0x8e, 0x12, 0x3f, 0xa7, 0x69, 0x58, 0x87, 0xc1, 0xa3, 0x34, 0x8c, 0x23, 0x20, + 0x35, 0x2e, 0x92, 0x2c, 0x27, 0xf1, 0x90, 0x26, 0x19, 0xc0, 0xa5, 0x14, 0xcf, 0xb4, 0x03, 0xa3, + 0x84, 0x0b, 0x99, 0x8e, 0x47, 0xb6, 0x36, 0x46, 0x63, 0x45, 0x12, 0xd2, 0xda, 0x34, 0x2e, 0x3d, + 0xd0, 0x1b, 0x99, 0xe1, 0xd4, 0x23, 0x07, 0x4e, 0x9c, 0xb8, 0xe5, 0xc0, 0x85, 0x1f, 0x90, 0x9c, + 0xcb, 0x95, 0x23, 0x07, 0xce, 0x3d, 0x72, 0x62, 0xb4, 0x2b, 0x3b, 0xb2, 0x2d, 0x3b, 0xa6, 0xd0, + 0x9b, 0x56, 0xef, 0x7b, 0x6f, 0xbf, 0x7d, 0xfb, 0xbd, 0xf7, 0x16, 0xf2, 0x27, 0xba, 0x6d, 0xe0, + 0x47, 0x4a, 0xc3, 0x72, 0xda, 0x86, 0xd2, 0x71, 0x1b, 0x4a, 0x47, 0x55, 0xfc, 0x76, 0xdd, 0xc6, + 0xa4, 0xe6, 0x63, 0xaf, 0x63, 0x36, 0xb0, 0xec, 0x7a, 0x0e, 0x71, 0x50, 0x86, 0x21, 0x65, 0x8a, + 0x94, 0x3b, 0x6e, 0x43, 0xee, 0xa8, 0xc2, 0x8d, 0xa6, 0xe3, 0x34, 0x2d, 0xac, 0xe8, 0xae, 0xa9, + 0xe8, 0xb6, 0xed, 0x10, 0x9d, 0x98, 0x8e, 0xed, 0x33, 0x17, 0x21, 0x17, 0x5a, 0xe9, 0xaa, 0xde, + 0x3e, 0x52, 0x8e, 0x4c, 0x6c, 0x19, 0xb5, 0x63, 0xdd, 0x6f, 0x85, 0x08, 0x21, 0xdc, 0x3e, 0xf0, + 0x77, 0x5c, 0xec, 0x51, 0xf7, 0x9e, 0xf7, 0x78, 0x6a, 0x21, 0x62, 0x7d, 0x00, 0xd1, 0xf7, 0x1f, + 0x89, 0xb4, 0x3a, 0x18, 0x49, 0xb7, 0x4c, 0x23, 0x62, 0x16, 0xf7, 0x60, 0xe9, 0x1e, 0x26, 0xfb, + 0x34, 0xb2, 0x86, 0xbf, 0x6e, 0x63, 0x9f, 0xa0, 0x77, 0x21, 0x15, 0x66, 0xc1, 0x34, 0xb2, 0x5c, + 0x8e, 0xcb, 0xa7, 0x2a, 0x0b, 0x7f, 0x9d, 0xab, 0xdc, 0xe9, 0x85, 0x9a, 0xdc, 0xdd, 0xdb, 0x2a, + 0x6a, 0xf3, 0xcc, 0x5c, 0x35, 0xc4, 0x5f, 0x39, 0x40, 0xf7, 0x4d, 0x3f, 0x0c, 0xe0, 0x47, 0x22, + 0x1c, 0x39, 0x96, 0x81, 0xbd, 0xb1, 0x11, 0x98, 0xb9, 0x6a, 0xa0, 0x9b, 0x90, 0x72, 0xf5, 0x26, + 0xae, 0xf9, 0x66, 0x17, 0x67, 0x67, 0x72, 0x5c, 0x3e, 0x51, 0x81, 0xbf, 0xcf, 0x55, 0x7e, 0x77, + 0x4f, 0x2d, 0x16, 0x8b, 0xda, 0x7c, 0x60, 0xdc, 0x37, 0xbb, 0x18, 0xe5, 0x01, 0x28, 0x90, 0x38, + 0x2d, 0x6c, 0x67, 0x13, 0x34, 0x68, 0xea, 0xf4, 0x42, 0x9d, 0xa5, 0x48, 0x8d, 0x46, 0x39, 0x08, + 0x6c, 0x48, 0x04, 0xfe, 0xc8, 0xb4, 0x08, 0xf6, 0xb2, 0x49, 0x8a, 0x82, 0xd3, 0x8b, 0x7e, 0xbc, + 0xd0, 0x22, 0x12, 0xc8, 0x0c, 0xf0, 0xf6, 0x5d, 0xc7, 0xf6, 0x31, 0xda, 0x82, 0x39, 0x76, 0x36, + 0x3f, 0xcb, 0xe5, 0x12, 0xf9, 0x74, 0x69, 0x45, 0x8e, 0xb9, 0x7a, 0x39, 0xcc, 0x57, 0x0f, 0x8b, + 0xd6, 0xe1, 0xba, 0x8d, 0x1f, 0x91, 0x5a, 0x84, 0x60, 0x70, 0x94, 0x94, 0x76, 0x2d, 0xf8, 0xfd, + 0x79, 0x8f, 0x99, 0xf8, 0x5b, 0x12, 0x32, 0x77, 0x3c, 0xac, 0x13, 0x3c, 0x92, 0xf1, 0x69, 0xf3, + 0xb5, 0x05, 0x49, 0x5b, 0x3f, 0x66, 0xa9, 0x4a, 0x55, 0xd6, 0x5e, 0x9c, 0xab, 0xab, 0xdf, 0x1e, + 0xea, 0x52, 0xf7, 0xe1, 0xa1, 0xa4, 0x4b, 0xdd, 0xa2, 0xb4, 0xfd, 0xf0, 0xb1, 0xfa, 0x5e, 0x59, + 0x7d, 0x72, 0x18, 0xae, 0x34, 0x0a, 0x47, 0xb7, 0x20, 0x6d, 0x60, 0xbf, 0xe1, 0x99, 0x6e, 0x70, + 0xf9, 0x83, 0xe9, 0x2b, 0x6d, 0x95, 0xb5, 0xa8, 0x15, 0xfd, 0xc0, 0x01, 0x6f, 0xe9, 0x75, 0x6c, + 0xf9, 0xd9, 0x24, 0xcd, 0xc2, 0x66, 0x6c, 0x16, 0x62, 0x4e, 0x22, 0xdf, 0xa7, 0x6e, 0x77, 0x6d, + 0xe2, 0x9d, 0x54, 0x3e, 0x78, 0x71, 0xae, 0xa6, 0x0f, 0xa5, 0x5a, 0x51, 0xda, 0x0e, 0x08, 0x16, + 0x9e, 0xd2, 0xd3, 0x94, 0x37, 0xd9, 0xa9, 0xca, 0x1b, 0x67, 0x17, 0x2a, 0x2f, 0x24, 0x55, 0x89, + 0x7e, 0x21, 0xb4, 0x14, 0x1e, 0xa3, 0x8f, 0xd7, 0x42, 0x12, 0xe8, 0x16, 0x80, 0x8d, 0xc9, 0x37, + 0x8e, 0xd7, 0x0a, 0xf2, 0x33, 0x1b, 0x93, 0x9f, 0x54, 0x68, 0xaf, 0x1a, 0x68, 0x0d, 0xe6, 0xba, + 0x8e, 0x8d, 0x03, 0x24, 0x4f, 0x91, 0xf3, 0x7d, 0x14, 0x1f, 0x18, 0xaa, 0x06, 0x7a, 0x1b, 0x16, + 0x3b, 0x9b, 0xb5, 0x86, 0x69, 0x78, 0xb5, 0xba, 0xe5, 0x34, 0x5a, 0x7e, 0x76, 0x2e, 0x97, 0xc8, + 0xa7, 0xb4, 0x85, 0xce, 0xe6, 0x1d, 0xd3, 0xf0, 0x2a, 0xf4, 0x1f, 0x45, 0x95, 0x07, 0x50, 0xf3, + 0x21, 0xaa, 0x1c, 0x41, 0xc9, 0xb0, 0xe8, 0x39, 0x6d, 0x82, 0x6b, 0x44, 0xaf, 0x5b, 0x74, 0xd7, + 0xd4, 0xd0, 0xae, 0x0b, 0xd4, 0x7e, 0x10, 0x98, 0xab, 0x86, 0xb0, 0x0d, 0xe9, 0x48, 0x8e, 0xd0, + 0x12, 0x24, 0x5a, 0xf8, 0x84, 0xdd, 0xb9, 0x16, 0x7c, 0xa2, 0x65, 0x98, 0xed, 0xe8, 0x56, 0x3b, + 0xbc, 0x61, 0x8d, 0x2d, 0x6e, 0xcf, 0xbc, 0xcf, 0x89, 0x1b, 0xb0, 0x1c, 0x4d, 0xf9, 0x03, 0x4c, + 0x74, 0x43, 0x27, 0x3a, 0x5a, 0x19, 0xa9, 0xd7, 0x48, 0x85, 0xfe, 0x91, 0x80, 0xcc, 0x17, 0xae, + 0x11, 0x27, 0xb9, 0x29, 0x8b, 0x1c, 0xed, 0x40, 0xba, 0x4d, 0x23, 0xd0, 0xee, 0x45, 0x79, 0xa5, + 0x4b, 0x82, 0xcc, 0x1a, 0x9c, 0xdc, 0x6b, 0x70, 0xf2, 0x27, 0x41, 0x83, 0x7b, 0xa0, 0xfb, 0x2d, + 0x0d, 0x18, 0x3c, 0xf8, 0xee, 0xeb, 0x35, 0xf1, 0x9f, 0xf4, 0x9a, 0x9c, 0x56, 0xaf, 0xb3, 0x13, + 0xf4, 0x1a, 0x93, 0x86, 0x57, 0xa3, 0xd7, 0x51, 0x4d, 0xf0, 0xaf, 0x50, 0x13, 0xd1, 0x63, 0x4d, + 0xa7, 0x89, 0x0f, 0x21, 0xf3, 0x31, 0xb6, 0xf0, 0xcb, 0x4b, 0x22, 0xd8, 0x36, 0x1a, 0x61, 0xba, + 0x6d, 0x7f, 0xe4, 0x60, 0xe5, 0xb2, 0xe9, 0x7e, 0xd6, 0x1b, 0x54, 0xfe, 0x4b, 0x48, 0xf2, 0xff, + 0x9f, 0x1a, 0xe2, 0xf7, 0x1c, 0xdc, 0x88, 0x67, 0x17, 0xce, 0x86, 0x8f, 0x00, 0xfa, 0xc3, 0xb5, + 0x37, 0x1e, 0xd6, 0x06, 0x85, 0x76, 0x39, 0x7c, 0xfb, 0xfe, 0x5a, 0xc4, 0x69, 0xda, 0x39, 0x51, + 0xfa, 0x6e, 0x0e, 0xae, 0x31, 0x1e, 0xfb, 0xec, 0x1d, 0x82, 0x6c, 0x48, 0xdc, 0xc3, 0x04, 0xbd, + 0x13, 0x2b, 0xec, 0xe1, 0x09, 0x2e, 0x4c, 0x9a, 0x5a, 0xe2, 0x5b, 0x4f, 0x7f, 0xff, 0xf3, 0xd9, + 0xcc, 0x2a, 0x5a, 0x19, 0x7c, 0x57, 0xf8, 0xca, 0xe3, 0x7e, 0xf6, 0x9f, 0x20, 0x0f, 0x92, 0x41, + 0x32, 0xd0, 0xcd, 0xd8, 0x48, 0xa3, 0x23, 0x5f, 0xc8, 0x5f, 0x0d, 0x64, 0x79, 0x14, 0xdf, 0xa0, + 0xfb, 0xbf, 0x86, 0xae, 0x0f, 0xed, 0x8f, 0x9e, 0x71, 0xc0, 0xb3, 0x06, 0x87, 0xf2, 0xd3, 0x0e, + 0x1c, 0xe1, 0xea, 0x1b, 0x10, 0x77, 0xcf, 0x9e, 0x17, 0xde, 0x1c, 0xd3, 0x3b, 0x79, 0xb6, 0xa6, + 0x94, 0x96, 0xc5, 0x61, 0x4a, 0xb7, 0xb9, 0x02, 0xfa, 0x89, 0x03, 0x9e, 0x95, 0xd8, 0x18, 0x56, + 0x31, 0x6d, 0x65, 0x1a, 0x56, 0x9f, 0x32, 0x56, 0xb1, 0xd5, 0x1b, 0x65, 0x95, 0x2b, 0x4d, 0xba, + 0xa8, 0x80, 0xe1, 0x2f, 0x1c, 0xf0, 0xac, 0x1a, 0xc7, 0x30, 0x8c, 0x29, 0xf6, 0x69, 0x18, 0x1e, + 0x9c, 0x3d, 0x2f, 0x48, 0x63, 0x0a, 0xfd, 0xf5, 0xe1, 0xee, 0x7f, 0xf7, 0xd8, 0x25, 0x27, 0x4c, + 0x59, 0x85, 0x89, 0xca, 0xfa, 0x99, 0x83, 0xc5, 0x40, 0x16, 0x97, 0x15, 0x86, 0x8a, 0x57, 0x68, + 0x67, 0xa4, 0x55, 0x08, 0xea, 0xbf, 0xf0, 0x08, 0x65, 0x27, 0x53, 0x72, 0x79, 0xb4, 0x3e, 0x81, + 0xdc, 0xe5, 0xeb, 0xd9, 0xaf, 0xec, 0x7d, 0xb9, 0xd3, 0x34, 0xc9, 0x57, 0xed, 0xba, 0xdc, 0x70, + 0x8e, 0x15, 0xb6, 0x9d, 0xc4, 0x5e, 0xd1, 0x4d, 0x47, 0x6a, 0x62, 0x9b, 0x1e, 0x5d, 0x89, 0x79, + 0xa8, 0xef, 0x74, 0xdc, 0x46, 0x9d, 0xa7, 0xe6, 0x8d, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x08, + 0x43, 0xd1, 0xd5, 0x65, 0x0c, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/AUTHORS b/vendor/github.com/yandex-cloud/go-sdk/AUTHORS new file mode 100644 index 000000000..382a1c868 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/AUTHORS @@ -0,0 +1,11 @@ +The following authors have created the source code of "Yandex.Cloud Go SDK" published and distributed by YANDEX LLC as the owner: + +Alexey Baranov +Andrey Kraynov +Dmitry Novikov +Gennady Lipenkov +Luba Grinkevich +Maxim Kolganov +Rurik Krylov +Vasilii Briginets <0x40@yandex-team.ru> +Vladimir Skipor \ No newline at end of file diff --git a/vendor/github.com/yandex-cloud/go-sdk/CONTRIBUTING.md b/vendor/github.com/yandex-cloud/go-sdk/CONTRIBUTING.md new file mode 100644 index 000000000..f2dccda23 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/CONTRIBUTING.md @@ -0,0 +1,35 @@ +# Notice to external contributors + + +## General info + +Hello! In order for us (YANDEX LLC) to accept patches and other contributions from you, you will have to adopt our Yandex Contributor License Agreement (the “**CLA**”). The current version of the CLA can be found here: +1) https://yandex.ru/legal/cla/?lang=en (in English) and +2) https://yandex.ru/legal/cla/?lang=ru (in Russian). + +By adopting the CLA, you state the following: + +* You obviously wish and are willingly licensing your contributions to us for our open source projects under the terms of the CLA, +* You have read the terms and conditions of the CLA and agree with them in full, +* You are legally able to provide and license your contributions as stated, +* We may use your contributions for our open source projects and for any other our project too, +* We rely on your assurances concerning the rights of third parties in relation to your contributions. + +If you agree with these principles, please read and adopt our CLA. By providing us your contributions, you hereby declare that you have already read and adopt our CLA, and we may freely merge your contributions with our corresponding open source project and use it in further in accordance with terms and conditions of the CLA. + +## Provide contributions + +If you have already adopted terms and conditions of the CLA, you are able to provide your contributions. When you submit your pull request, please add the following information into it: + +``` +I hereby agree to the terms of the CLA available at: [link]. +``` + +Replace the bracketed text as follows: +* [link] is the link to the current version of the CLA: https://yandex.ru/legal/cla/?lang=en (in English) or https://yandex.ru/legal/cla/?lang=ru (in Russian). + +It is enough to provide us such notification once. + +## Other questions + +If you have any questions, please mail us at opensource@yandex-team.ru. \ No newline at end of file diff --git a/vendor/github.com/yandex-cloud/go-sdk/LICENSE b/vendor/github.com/yandex-cloud/go-sdk/LICENSE new file mode 100644 index 000000000..0cd74fabf --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 YANDEX LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/yandex-cloud/go-sdk/README.md b/vendor/github.com/yandex-cloud/go-sdk/README.md new file mode 100644 index 000000000..dbc4b1065 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/README.md @@ -0,0 +1,31 @@ +# Yandex.Cloud Go SDK + +[![GoDoc](https://godoc.org/github.com/yandex-cloud/go-sdk?status.svg)](https://godoc.org/github.com/yandex-cloud/go-sdk) + +Go SDK for Yandex.Cloud services. + +**NOTE:** SDK is under development, and may make +backwards-incompatible changes. + +## Installation + +```bash +go get github.com/yandex-cloud/go-sdk +``` + +## Example usages + +### Initializing SDK + +```go +sdk, err := ycsdk.Build(ctx, ycsdk.Config{ + Credentials: ycsdk.OAuthToken(token), +}) +if err != nil { + log.Fatal(err) +} +``` + +### More examples + +More examples can be found in [examples dir](examples). diff --git a/vendor/github.com/yandex-cloud/go-sdk/credentials.go b/vendor/github.com/yandex-cloud/go-sdk/credentials.go new file mode 100644 index 000000000..237c3952f --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/credentials.go @@ -0,0 +1,142 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Author: Vladimir Skipor + +package ycsdk + +import ( + "crypto/rsa" + "errors" + "fmt" + "time" + + jwt "github.com/dgrijalva/jwt-go" + + iampb "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" + "github.com/yandex-cloud/go-sdk/iamkey" + "github.com/yandex-cloud/go-sdk/pkg/sdkerrors" +) + +const ( + // iamTokenExpiration is refreshAfter time of IAM token. + // for now it constant, but in near future token expiration will be returned in + // See https://cloud.yandex.ru/docs/iam/concepts/authorization/iam-token for details. + iamTokenExpiration = 12 * time.Hour +) + +// Credentials is an abstraction of API authorization credentials. +// See https://cloud.yandex.ru/docs/iam/concepts/authorization/authorization for details. +// Note that functions that return Credentials may return different Credentials implementation +// in next SDK version, and this is not considered breaking change. +type Credentials interface { + // YandexCloudAPICredentials is a marker method. All compatible Credentials implementations have it + YandexCloudAPICredentials() +} + +// ExchangeableCredentials can be exchanged for IAM Token in IAM Token Service, that can be used +// to authorize API calls. +// For now, this is the only option to authorize API calls, but this may be changed in future. +// See https://cloud.yandex.ru/docs/iam/concepts/authorization/iam-token for details. +type ExchangeableCredentials interface { + Credentials + // IAMTokenRequest returns request for fresh IAM token or error. + IAMTokenRequest() (iamTokenReq *iampb.CreateIamTokenRequest, err error) +} + +// OAuthToken returns API credentials for user Yandex Passport OAuth token, that can be received +// on page https://oauth.yandex.ru/authorize?response_type=token&client_id=1a6990aa636648e9b2ef855fa7bec2fb +// See https://cloud.yandex.ru/docs/iam/concepts/authorization/oauth-token for details. +func OAuthToken(token string) Credentials { + return exchangeableCredentialsFunc(func() (*iampb.CreateIamTokenRequest, error) { + return &iampb.CreateIamTokenRequest{ + Identity: &iampb.CreateIamTokenRequest_YandexPassportOauthToken{ + YandexPassportOauthToken: token, + }, + }, nil + }) +} + +type exchangeableCredentialsFunc func() (iamTokenReq *iampb.CreateIamTokenRequest, err error) + +var _ ExchangeableCredentials = (exchangeableCredentialsFunc)(nil) + +func (exchangeableCredentialsFunc) YandexCloudAPICredentials() {} + +func (f exchangeableCredentialsFunc) IAMTokenRequest() (iamTokenReq *iampb.CreateIamTokenRequest, err error) { + return f() +} + +// ServiceAccountKey returns credentials for the given IAM Key. The key is used to sign JWT tokens. +// JWT tokens are exchanged for IAM Tokens used to authorize API calls. +// This authorization method is not supported for IAM Keys issued for User Accounts. +func ServiceAccountKey(key *iamkey.Key) (Credentials, error) { + jwtBuilder, err := newServiceAccountJWTBuilder(key) + if err != nil { + return nil, err + } + return exchangeableCredentialsFunc(func() (*iampb.CreateIamTokenRequest, error) { + signedJWT, err := jwtBuilder.SignedToken() + if err != nil { + return nil, sdkerrors.WithMessage(err, "JWT sign failed") + } + return &iampb.CreateIamTokenRequest{ + Identity: &iampb.CreateIamTokenRequest_Jwt{ + Jwt: signedJWT, + }, + }, nil + }), nil +} + +func newServiceAccountJWTBuilder(key *iamkey.Key) (*serviceAccountJWTBuilder, error) { + err := validateServiceAccountKey(key) + if err != nil { + return nil, sdkerrors.WithMessage(err, "key validation failed") + } + rsaPrivateKey, err := jwt.ParseRSAPrivateKeyFromPEM([]byte(key.PrivateKey)) + if err != nil { + return nil, sdkerrors.WithMessage(err, "private key parsing failed") + } + return &serviceAccountJWTBuilder{ + key: key, + rsaPrivateKey: rsaPrivateKey, + }, nil +} + +func validateServiceAccountKey(key *iamkey.Key) error { + if key.Id == "" { + return errors.New("key id is missing") + } + if key.GetServiceAccountId() == "" { + return fmt.Errorf("key should de issued for service account, but subject is %#v", key.Subject) + } + return nil +} + +type serviceAccountJWTBuilder struct { + key *iamkey.Key + rsaPrivateKey *rsa.PrivateKey +} + +func (b *serviceAccountJWTBuilder) SignedToken() (string, error) { + return b.issueToken().SignedString(b.rsaPrivateKey) +} + +func (b *serviceAccountJWTBuilder) issueToken() *jwt.Token { + issuedAt := time.Now() + token := jwt.NewWithClaims(jwtSigningMethodPS256WithSaltLengthEqualsHash, jwt.StandardClaims{ + Issuer: b.key.GetServiceAccountId(), + IssuedAt: issuedAt.Unix(), + ExpiresAt: issuedAt.Add(time.Hour).Unix(), + Audience: "https://iam.api.cloud.yandex.net/iam/v1/tokens", + }) + token.Header["kid"] = b.key.Id + return token +} + +// NOTE(skipor): by default, Go RSA PSS uses PSSSaltLengthAuto, which is not accepted by jwt.io and some python libraries. +// Should be removed after https://github.com/dgrijalva/jwt-go/issues/285 fix. +var jwtSigningMethodPS256WithSaltLengthEqualsHash = &jwt.SigningMethodRSAPSS{ + SigningMethodRSA: jwt.SigningMethodPS256.SigningMethodRSA, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/dial/LICENSE b/vendor/github.com/yandex-cloud/go-sdk/dial/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/dial/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/yandex-cloud/go-sdk/dial/dialer.go b/vendor/github.com/yandex-cloud/go-sdk/dial/dialer.go new file mode 100644 index 000000000..de7c17ef4 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/dial/dialer.go @@ -0,0 +1,41 @@ +// Copyright (c) 2019 Yandex LLC. All rights reserved. +// Author: Vladimir Skipor + +package dial + +import ( + "net" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" +) + +type DialFunc = func(context.Context, string) (net.Conn, error) + +func NewDialer() DialFunc { + return func(ctx context.Context, target string) (net.Conn, error) { + dialer := &net.Dialer{ + DualStack: true, + } + net, addr := parseDialTarget(target) + + deadline, ok := ctx.Deadline() + if ok { + grpclog.Infof("Dialing %s with timeout %s", target, time.Until(deadline)) + } else { + grpclog.Infof("Dialing %s without deadline", target) + } + + conn, err := dialer.DialContext(ctx, net, addr) + if err != nil { + grpclog.Warningf("Dial %s failed: %s", target, err) + return nil, err + } + grpclog.Warningf("Dial %s successfully connected to: %s", target, conn.RemoteAddr()) + return conn, nil + } +} + +const grpcUA = "grpc-go/" + grpc.Version diff --git a/vendor/github.com/yandex-cloud/go-sdk/dial/proxy.go b/vendor/github.com/yandex-cloud/go-sdk/dial/proxy.go new file mode 100644 index 000000000..b5a8d0588 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/dial/proxy.go @@ -0,0 +1,183 @@ +// NOTE(skipor): code took from google.golang.org/grpc/proxy.go and google.golang.org/grpc/rpc_util.go +// Modifications: +// * rename newProxyDialer to NewProxyDialer +// * log when proxy using proxy + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dial + +import ( + "bufio" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strings" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +// NewProxyDialer returns a dialer that connects to proxy first if necessary. +// The returned dialer checks if a proxy is necessary, dial to the proxy with the +// provided dialer, does HTTP CONNECT handshake and returns the connection. +func NewProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (conn net.Conn, err error) { + var skipHandshake bool + newAddr, err := mapAddress(ctx, addr) + if err != nil { + if err != errDisabled { + return nil, err + } + skipHandshake = true + newAddr = addr + } + + if addr != newAddr { + grpclog.Infof("Using proxy %s for dialing %s", newAddr, addr) + } + + conn, err = dialer(ctx, newAddr) + if err != nil { + return + } + if !skipHandshake { + conn, err = doHTTPConnectHandshake(ctx, conn, addr) + } + return + } +} + +var ( + // errDisabled indicates that proxy is disabled for the address. + errDisabled = errors.New("proxy is disabled for the address") + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(ctx context.Context, address string) (string, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return "", err + } + if url == nil { + return "", errDisabled + } + return url.Host, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := (&http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: addr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + }) + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} + +// parseDialTarget returns the network and address to pass to dialer +func parseDialTarget(target string) (net string, addr string) { + net = "tcp" + + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + net = n + addr = target[m1+1:] + return net, addr + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr = t.Path + if scheme == "unix" { + net = scheme + if addr == "" { + addr = t.Host + } + return net, addr + } + } + + return net, target +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/errdetails.go b/vendor/github.com/yandex-cloud/go-sdk/errdetails.go new file mode 100644 index 000000000..bbc0bab19 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/errdetails.go @@ -0,0 +1,10 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Author: Vladimir Skipor + +package ycsdk + +import ( + // Import error details, so they could be decoded from error details Any's and appear in + // debug log. + _ "google.golang.org/genproto/googleapis/rpc/errdetails" +) diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/apiendpoint/apiendpoint.go b/vendor/github.com/yandex-cloud/go-sdk/gen/apiendpoint/apiendpoint.go new file mode 100644 index 000000000..a24dde974 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/apiendpoint/apiendpoint.go @@ -0,0 +1,38 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package endpoint + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/endpoint" +) + +// ApiEndpointServiceClient is a endpoint.ApiEndpointServiceClient with +// lazy GRPC connection initialization. +type ApiEndpointServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ endpoint.ApiEndpointServiceClient = &ApiEndpointServiceClient{} + +// Get implements endpoint.ApiEndpointServiceClient +func (c *ApiEndpointServiceClient) Get(ctx context.Context, in *endpoint.GetApiEndpointRequest, opts ...grpc.CallOption) (*endpoint.ApiEndpoint, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return endpoint.NewApiEndpointServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements endpoint.ApiEndpointServiceClient +func (c *ApiEndpointServiceClient) List(ctx context.Context, in *endpoint.ListApiEndpointsRequest, opts ...grpc.CallOption) (*endpoint.ListApiEndpointsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return endpoint.NewApiEndpointServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/apiendpoint/endpoint_group.go b/vendor/github.com/yandex-cloud/go-sdk/gen/apiendpoint/endpoint_group.go new file mode 100644 index 000000000..f4c3fffb8 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/apiendpoint/endpoint_group.go @@ -0,0 +1,24 @@ +// Code generated by sdkgen. DO NOT EDIT. + +package endpoint + +import ( + "context" + + "google.golang.org/grpc" +) + +// APIEndpoint provides access to "endpoint" component of Yandex.Cloud +type APIEndpoint struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +// NewAPIEndpoint creates instance of APIEndpoint +func NewAPIEndpoint(g func(ctx context.Context) (*grpc.ClientConn, error)) *APIEndpoint { + return &APIEndpoint{g} +} + +// ApiEndpoint gets ApiEndpointService client +func (a *APIEndpoint) ApiEndpoint() *ApiEndpointServiceClient { + return &ApiEndpointServiceClient{getConn: a.getConn} +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/compute/compute_group.go b/vendor/github.com/yandex-cloud/go-sdk/gen/compute/compute_group.go new file mode 100644 index 000000000..ee4d17bff --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/compute/compute_group.go @@ -0,0 +1,49 @@ +// Code generated by sdkgen. DO NOT EDIT. + +package compute + +import ( + "context" + + "google.golang.org/grpc" +) + +// Compute provides access to "compute" component of Yandex.Cloud +type Compute struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +// NewCompute creates instance of Compute +func NewCompute(g func(ctx context.Context) (*grpc.ClientConn, error)) *Compute { + return &Compute{g} +} + +// Disk gets DiskService client +func (c *Compute) Disk() *DiskServiceClient { + return &DiskServiceClient{getConn: c.getConn} +} + +// DiskType gets DiskTypeService client +func (c *Compute) DiskType() *DiskTypeServiceClient { + return &DiskTypeServiceClient{getConn: c.getConn} +} + +// Image gets ImageService client +func (c *Compute) Image() *ImageServiceClient { + return &ImageServiceClient{getConn: c.getConn} +} + +// Instance gets InstanceService client +func (c *Compute) Instance() *InstanceServiceClient { + return &InstanceServiceClient{getConn: c.getConn} +} + +// Snapshot gets SnapshotService client +func (c *Compute) Snapshot() *SnapshotServiceClient { + return &SnapshotServiceClient{getConn: c.getConn} +} + +// Zone gets ZoneService client +func (c *Compute) Zone() *ZoneServiceClient { + return &ZoneServiceClient{getConn: c.getConn} +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/compute/disk.go b/vendor/github.com/yandex-cloud/go-sdk/gen/compute/disk.go new file mode 100644 index 000000000..2519ac40d --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/compute/disk.go @@ -0,0 +1,75 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package compute + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// DiskServiceClient is a compute.DiskServiceClient with +// lazy GRPC connection initialization. +type DiskServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ compute.DiskServiceClient = &DiskServiceClient{} + +// Create implements compute.DiskServiceClient +func (c *DiskServiceClient) Create(ctx context.Context, in *compute.CreateDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewDiskServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements compute.DiskServiceClient +func (c *DiskServiceClient) Delete(ctx context.Context, in *compute.DeleteDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewDiskServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements compute.DiskServiceClient +func (c *DiskServiceClient) Get(ctx context.Context, in *compute.GetDiskRequest, opts ...grpc.CallOption) (*compute.Disk, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewDiskServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements compute.DiskServiceClient +func (c *DiskServiceClient) List(ctx context.Context, in *compute.ListDisksRequest, opts ...grpc.CallOption) (*compute.ListDisksResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewDiskServiceClient(conn).List(ctx, in, opts...) +} + +// ListOperations implements compute.DiskServiceClient +func (c *DiskServiceClient) ListOperations(ctx context.Context, in *compute.ListDiskOperationsRequest, opts ...grpc.CallOption) (*compute.ListDiskOperationsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewDiskServiceClient(conn).ListOperations(ctx, in, opts...) +} + +// Update implements compute.DiskServiceClient +func (c *DiskServiceClient) Update(ctx context.Context, in *compute.UpdateDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewDiskServiceClient(conn).Update(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/compute/disktype.go b/vendor/github.com/yandex-cloud/go-sdk/gen/compute/disktype.go new file mode 100644 index 000000000..454e5ea88 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/compute/disktype.go @@ -0,0 +1,38 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package compute + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" +) + +// DiskTypeServiceClient is a compute.DiskTypeServiceClient with +// lazy GRPC connection initialization. +type DiskTypeServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ compute.DiskTypeServiceClient = &DiskTypeServiceClient{} + +// Get implements compute.DiskTypeServiceClient +func (c *DiskTypeServiceClient) Get(ctx context.Context, in *compute.GetDiskTypeRequest, opts ...grpc.CallOption) (*compute.DiskType, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewDiskTypeServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements compute.DiskTypeServiceClient +func (c *DiskTypeServiceClient) List(ctx context.Context, in *compute.ListDiskTypesRequest, opts ...grpc.CallOption) (*compute.ListDiskTypesResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewDiskTypeServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/compute/image.go b/vendor/github.com/yandex-cloud/go-sdk/gen/compute/image.go new file mode 100644 index 000000000..94f42a713 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/compute/image.go @@ -0,0 +1,84 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package compute + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// ImageServiceClient is a compute.ImageServiceClient with +// lazy GRPC connection initialization. +type ImageServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ compute.ImageServiceClient = &ImageServiceClient{} + +// Create implements compute.ImageServiceClient +func (c *ImageServiceClient) Create(ctx context.Context, in *compute.CreateImageRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewImageServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements compute.ImageServiceClient +func (c *ImageServiceClient) Delete(ctx context.Context, in *compute.DeleteImageRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewImageServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements compute.ImageServiceClient +func (c *ImageServiceClient) Get(ctx context.Context, in *compute.GetImageRequest, opts ...grpc.CallOption) (*compute.Image, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewImageServiceClient(conn).Get(ctx, in, opts...) +} + +// GetLatestByFamily implements compute.ImageServiceClient +func (c *ImageServiceClient) GetLatestByFamily(ctx context.Context, in *compute.GetImageLatestByFamilyRequest, opts ...grpc.CallOption) (*compute.Image, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewImageServiceClient(conn).GetLatestByFamily(ctx, in, opts...) +} + +// List implements compute.ImageServiceClient +func (c *ImageServiceClient) List(ctx context.Context, in *compute.ListImagesRequest, opts ...grpc.CallOption) (*compute.ListImagesResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewImageServiceClient(conn).List(ctx, in, opts...) +} + +// ListOperations implements compute.ImageServiceClient +func (c *ImageServiceClient) ListOperations(ctx context.Context, in *compute.ListImageOperationsRequest, opts ...grpc.CallOption) (*compute.ListImageOperationsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewImageServiceClient(conn).ListOperations(ctx, in, opts...) +} + +// Update implements compute.ImageServiceClient +func (c *ImageServiceClient) Update(ctx context.Context, in *compute.UpdateImageRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewImageServiceClient(conn).Update(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/compute/instance.go b/vendor/github.com/yandex-cloud/go-sdk/gen/compute/instance.go new file mode 100644 index 000000000..7eb2678fc --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/compute/instance.go @@ -0,0 +1,138 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package compute + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// InstanceServiceClient is a compute.InstanceServiceClient with +// lazy GRPC connection initialization. +type InstanceServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ compute.InstanceServiceClient = &InstanceServiceClient{} + +// AttachDisk implements compute.InstanceServiceClient +func (c *InstanceServiceClient) AttachDisk(ctx context.Context, in *compute.AttachInstanceDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewInstanceServiceClient(conn).AttachDisk(ctx, in, opts...) +} + +// Create implements compute.InstanceServiceClient +func (c *InstanceServiceClient) Create(ctx context.Context, in *compute.CreateInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewInstanceServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements compute.InstanceServiceClient +func (c *InstanceServiceClient) Delete(ctx context.Context, in *compute.DeleteInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewInstanceServiceClient(conn).Delete(ctx, in, opts...) +} + +// DetachDisk implements compute.InstanceServiceClient +func (c *InstanceServiceClient) DetachDisk(ctx context.Context, in *compute.DetachInstanceDiskRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewInstanceServiceClient(conn).DetachDisk(ctx, in, opts...) +} + +// Get implements compute.InstanceServiceClient +func (c *InstanceServiceClient) Get(ctx context.Context, in *compute.GetInstanceRequest, opts ...grpc.CallOption) (*compute.Instance, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewInstanceServiceClient(conn).Get(ctx, in, opts...) +} + +// GetSerialPortOutput implements compute.InstanceServiceClient +func (c *InstanceServiceClient) GetSerialPortOutput(ctx context.Context, in *compute.GetInstanceSerialPortOutputRequest, opts ...grpc.CallOption) (*compute.GetInstanceSerialPortOutputResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewInstanceServiceClient(conn).GetSerialPortOutput(ctx, in, opts...) +} + +// List implements compute.InstanceServiceClient +func (c *InstanceServiceClient) List(ctx context.Context, in *compute.ListInstancesRequest, opts ...grpc.CallOption) (*compute.ListInstancesResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewInstanceServiceClient(conn).List(ctx, in, opts...) +} + +// ListOperations implements compute.InstanceServiceClient +func (c *InstanceServiceClient) ListOperations(ctx context.Context, in *compute.ListInstanceOperationsRequest, opts ...grpc.CallOption) (*compute.ListInstanceOperationsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewInstanceServiceClient(conn).ListOperations(ctx, in, opts...) +} + +// Restart implements compute.InstanceServiceClient +func (c *InstanceServiceClient) Restart(ctx context.Context, in *compute.RestartInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewInstanceServiceClient(conn).Restart(ctx, in, opts...) +} + +// Start implements compute.InstanceServiceClient +func (c *InstanceServiceClient) Start(ctx context.Context, in *compute.StartInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewInstanceServiceClient(conn).Start(ctx, in, opts...) +} + +// Stop implements compute.InstanceServiceClient +func (c *InstanceServiceClient) Stop(ctx context.Context, in *compute.StopInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewInstanceServiceClient(conn).Stop(ctx, in, opts...) +} + +// Update implements compute.InstanceServiceClient +func (c *InstanceServiceClient) Update(ctx context.Context, in *compute.UpdateInstanceRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewInstanceServiceClient(conn).Update(ctx, in, opts...) +} + +// UpdateMetadata implements compute.InstanceServiceClient +func (c *InstanceServiceClient) UpdateMetadata(ctx context.Context, in *compute.UpdateInstanceMetadataRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewInstanceServiceClient(conn).UpdateMetadata(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/compute/snapshot.go b/vendor/github.com/yandex-cloud/go-sdk/gen/compute/snapshot.go new file mode 100644 index 000000000..cf71d55ea --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/compute/snapshot.go @@ -0,0 +1,75 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package compute + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// SnapshotServiceClient is a compute.SnapshotServiceClient with +// lazy GRPC connection initialization. +type SnapshotServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ compute.SnapshotServiceClient = &SnapshotServiceClient{} + +// Create implements compute.SnapshotServiceClient +func (c *SnapshotServiceClient) Create(ctx context.Context, in *compute.CreateSnapshotRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewSnapshotServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements compute.SnapshotServiceClient +func (c *SnapshotServiceClient) Delete(ctx context.Context, in *compute.DeleteSnapshotRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewSnapshotServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements compute.SnapshotServiceClient +func (c *SnapshotServiceClient) Get(ctx context.Context, in *compute.GetSnapshotRequest, opts ...grpc.CallOption) (*compute.Snapshot, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewSnapshotServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements compute.SnapshotServiceClient +func (c *SnapshotServiceClient) List(ctx context.Context, in *compute.ListSnapshotsRequest, opts ...grpc.CallOption) (*compute.ListSnapshotsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewSnapshotServiceClient(conn).List(ctx, in, opts...) +} + +// ListOperations implements compute.SnapshotServiceClient +func (c *SnapshotServiceClient) ListOperations(ctx context.Context, in *compute.ListSnapshotOperationsRequest, opts ...grpc.CallOption) (*compute.ListSnapshotOperationsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewSnapshotServiceClient(conn).ListOperations(ctx, in, opts...) +} + +// Update implements compute.SnapshotServiceClient +func (c *SnapshotServiceClient) Update(ctx context.Context, in *compute.UpdateSnapshotRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewSnapshotServiceClient(conn).Update(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/compute/zone.go b/vendor/github.com/yandex-cloud/go-sdk/gen/compute/zone.go new file mode 100644 index 000000000..0bba1f182 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/compute/zone.go @@ -0,0 +1,38 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package compute + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1" +) + +// ZoneServiceClient is a compute.ZoneServiceClient with +// lazy GRPC connection initialization. +type ZoneServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ compute.ZoneServiceClient = &ZoneServiceClient{} + +// Get implements compute.ZoneServiceClient +func (c *ZoneServiceClient) Get(ctx context.Context, in *compute.GetZoneRequest, opts ...grpc.CallOption) (*compute.Zone, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewZoneServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements compute.ZoneServiceClient +func (c *ZoneServiceClient) List(ctx context.Context, in *compute.ListZonesRequest, opts ...grpc.CallOption) (*compute.ListZonesResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return compute.NewZoneServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/iam/awscompatibility/accesskey.go b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/awscompatibility/accesskey.go new file mode 100644 index 000000000..8c4609677 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/awscompatibility/accesskey.go @@ -0,0 +1,57 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package awscompatibility + +import ( + "context" + + "github.com/golang/protobuf/ptypes/empty" + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/awscompatibility" +) + +// AccessKeyServiceClient is a awscompatibility.AccessKeyServiceClient with +// lazy GRPC connection initialization. +type AccessKeyServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ awscompatibility.AccessKeyServiceClient = &AccessKeyServiceClient{} + +// Create implements awscompatibility.AccessKeyServiceClient +func (c *AccessKeyServiceClient) Create(ctx context.Context, in *awscompatibility.CreateAccessKeyRequest, opts ...grpc.CallOption) (*awscompatibility.CreateAccessKeyResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return awscompatibility.NewAccessKeyServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements awscompatibility.AccessKeyServiceClient +func (c *AccessKeyServiceClient) Delete(ctx context.Context, in *awscompatibility.DeleteAccessKeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return awscompatibility.NewAccessKeyServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements awscompatibility.AccessKeyServiceClient +func (c *AccessKeyServiceClient) Get(ctx context.Context, in *awscompatibility.GetAccessKeyRequest, opts ...grpc.CallOption) (*awscompatibility.AccessKey, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return awscompatibility.NewAccessKeyServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements awscompatibility.AccessKeyServiceClient +func (c *AccessKeyServiceClient) List(ctx context.Context, in *awscompatibility.ListAccessKeysRequest, opts ...grpc.CallOption) (*awscompatibility.ListAccessKeysResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return awscompatibility.NewAccessKeyServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/iam/awscompatibility/awscompatibility_group.go b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/awscompatibility/awscompatibility_group.go new file mode 100644 index 000000000..7bb4bd627 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/awscompatibility/awscompatibility_group.go @@ -0,0 +1,24 @@ +// Code generated by sdkgen. DO NOT EDIT. + +package awscompatibility + +import ( + "context" + + "google.golang.org/grpc" +) + +// AWSCompatibility provides access to "awscompatibility" component of Yandex.Cloud +type AWSCompatibility struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +// NewAWSCompatibility creates instance of AWSCompatibility +func NewAWSCompatibility(g func(ctx context.Context) (*grpc.ClientConn, error)) *AWSCompatibility { + return &AWSCompatibility{g} +} + +// AccessKey gets AccessKeyService client +func (a *AWSCompatibility) AccessKey() *AccessKeyServiceClient { + return &AccessKeyServiceClient{getConn: a.getConn} +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/iam/iam_compatibility_group.go b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/iam_compatibility_group.go new file mode 100644 index 000000000..0da6a0ba4 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/iam_compatibility_group.go @@ -0,0 +1,12 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Author: Vladimir Skipor + +package iam + +import ( + "github.com/yandex-cloud/go-sdk/gen/iam/awscompatibility" +) + +func (i *IAM) AWSCompatibility() *awscompatibility.AWSCompatibility { + return awscompatibility.NewAWSCompatibility(i.getConn) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/iam/iam_group.go b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/iam_group.go new file mode 100644 index 000000000..318ac4b21 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/iam_group.go @@ -0,0 +1,49 @@ +// Code generated by sdkgen. DO NOT EDIT. + +package iam + +import ( + "context" + + "google.golang.org/grpc" +) + +// IAM provides access to "iam" component of Yandex.Cloud +type IAM struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +// NewIAM creates instance of IAM +func NewIAM(g func(ctx context.Context) (*grpc.ClientConn, error)) *IAM { + return &IAM{g} +} + +// IamToken gets IamTokenService client +func (i *IAM) IamToken() *IamTokenServiceClient { + return &IamTokenServiceClient{getConn: i.getConn} +} + +// Role gets RoleService client +func (i *IAM) Role() *RoleServiceClient { + return &RoleServiceClient{getConn: i.getConn} +} + +// ServiceAccount gets ServiceAccountService client +func (i *IAM) ServiceAccount() *ServiceAccountServiceClient { + return &ServiceAccountServiceClient{getConn: i.getConn} +} + +// UserAccount gets UserAccountService client +func (i *IAM) UserAccount() *UserAccountServiceClient { + return &UserAccountServiceClient{getConn: i.getConn} +} + +// YandexPassportUserAccount gets YandexPassportUserAccountService client +func (i *IAM) YandexPassportUserAccount() *YandexPassportUserAccountServiceClient { + return &YandexPassportUserAccountServiceClient{getConn: i.getConn} +} + +// Key gets KeyService client +func (i *IAM) Key() *KeyServiceClient { + return &KeyServiceClient{getConn: i.getConn} +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/iam/iamtoken.go b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/iamtoken.go new file mode 100644 index 000000000..dd139606d --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/iamtoken.go @@ -0,0 +1,29 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package iam + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" +) + +// IamTokenServiceClient is a iam.IamTokenServiceClient with +// lazy GRPC connection initialization. +type IamTokenServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ iam.IamTokenServiceClient = &IamTokenServiceClient{} + +// Create implements iam.IamTokenServiceClient +func (c *IamTokenServiceClient) Create(ctx context.Context, in *iam.CreateIamTokenRequest, opts ...grpc.CallOption) (*iam.CreateIamTokenResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewIamTokenServiceClient(conn).Create(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/iam/key.go b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/key.go new file mode 100644 index 000000000..10b3cbef7 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/key.go @@ -0,0 +1,57 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package iam + +import ( + "context" + + "github.com/golang/protobuf/ptypes/empty" + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" +) + +// KeyServiceClient is a iam.KeyServiceClient with +// lazy GRPC connection initialization. +type KeyServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ iam.KeyServiceClient = &KeyServiceClient{} + +// Create implements iam.KeyServiceClient +func (c *KeyServiceClient) Create(ctx context.Context, in *iam.CreateKeyRequest, opts ...grpc.CallOption) (*iam.CreateKeyResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewKeyServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements iam.KeyServiceClient +func (c *KeyServiceClient) Delete(ctx context.Context, in *iam.DeleteKeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewKeyServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements iam.KeyServiceClient +func (c *KeyServiceClient) Get(ctx context.Context, in *iam.GetKeyRequest, opts ...grpc.CallOption) (*iam.Key, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewKeyServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements iam.KeyServiceClient +func (c *KeyServiceClient) List(ctx context.Context, in *iam.ListKeysRequest, opts ...grpc.CallOption) (*iam.ListKeysResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewKeyServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/iam/role.go b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/role.go new file mode 100644 index 000000000..6fdfd033c --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/role.go @@ -0,0 +1,38 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package iam + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" +) + +// RoleServiceClient is a iam.RoleServiceClient with +// lazy GRPC connection initialization. +type RoleServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ iam.RoleServiceClient = &RoleServiceClient{} + +// Get implements iam.RoleServiceClient +func (c *RoleServiceClient) Get(ctx context.Context, in *iam.GetRoleRequest, opts ...grpc.CallOption) (*iam.Role, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewRoleServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements iam.RoleServiceClient +func (c *RoleServiceClient) List(ctx context.Context, in *iam.ListRolesRequest, opts ...grpc.CallOption) (*iam.ListRolesResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewRoleServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/iam/serviceaccount.go b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/serviceaccount.go new file mode 100644 index 000000000..67e00318f --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/serviceaccount.go @@ -0,0 +1,103 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package iam + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/access" + "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// ServiceAccountServiceClient is a iam.ServiceAccountServiceClient with +// lazy GRPC connection initialization. +type ServiceAccountServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ iam.ServiceAccountServiceClient = &ServiceAccountServiceClient{} + +// Create implements iam.ServiceAccountServiceClient +func (c *ServiceAccountServiceClient) Create(ctx context.Context, in *iam.CreateServiceAccountRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewServiceAccountServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements iam.ServiceAccountServiceClient +func (c *ServiceAccountServiceClient) Delete(ctx context.Context, in *iam.DeleteServiceAccountRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewServiceAccountServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements iam.ServiceAccountServiceClient +func (c *ServiceAccountServiceClient) Get(ctx context.Context, in *iam.GetServiceAccountRequest, opts ...grpc.CallOption) (*iam.ServiceAccount, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewServiceAccountServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements iam.ServiceAccountServiceClient +func (c *ServiceAccountServiceClient) List(ctx context.Context, in *iam.ListServiceAccountsRequest, opts ...grpc.CallOption) (*iam.ListServiceAccountsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewServiceAccountServiceClient(conn).List(ctx, in, opts...) +} + +// ListAccessBindings implements iam.ServiceAccountServiceClient +func (c *ServiceAccountServiceClient) ListAccessBindings(ctx context.Context, in *access.ListAccessBindingsRequest, opts ...grpc.CallOption) (*access.ListAccessBindingsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewServiceAccountServiceClient(conn).ListAccessBindings(ctx, in, opts...) +} + +// ListOperations implements iam.ServiceAccountServiceClient +func (c *ServiceAccountServiceClient) ListOperations(ctx context.Context, in *iam.ListServiceAccountOperationsRequest, opts ...grpc.CallOption) (*iam.ListServiceAccountOperationsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewServiceAccountServiceClient(conn).ListOperations(ctx, in, opts...) +} + +// SetAccessBindings implements iam.ServiceAccountServiceClient +func (c *ServiceAccountServiceClient) SetAccessBindings(ctx context.Context, in *access.SetAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewServiceAccountServiceClient(conn).SetAccessBindings(ctx, in, opts...) +} + +// Update implements iam.ServiceAccountServiceClient +func (c *ServiceAccountServiceClient) Update(ctx context.Context, in *iam.UpdateServiceAccountRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewServiceAccountServiceClient(conn).Update(ctx, in, opts...) +} + +// UpdateAccessBindings implements iam.ServiceAccountServiceClient +func (c *ServiceAccountServiceClient) UpdateAccessBindings(ctx context.Context, in *access.UpdateAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewServiceAccountServiceClient(conn).UpdateAccessBindings(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/iam/useraccount.go b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/useraccount.go new file mode 100644 index 000000000..022768510 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/useraccount.go @@ -0,0 +1,29 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package iam + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" +) + +// UserAccountServiceClient is a iam.UserAccountServiceClient with +// lazy GRPC connection initialization. +type UserAccountServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ iam.UserAccountServiceClient = &UserAccountServiceClient{} + +// Get implements iam.UserAccountServiceClient +func (c *UserAccountServiceClient) Get(ctx context.Context, in *iam.GetUserAccountRequest, opts ...grpc.CallOption) (*iam.UserAccount, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewUserAccountServiceClient(conn).Get(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/iam/yandexpassportuseraccount.go b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/yandexpassportuseraccount.go new file mode 100644 index 000000000..e59922e83 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/iam/yandexpassportuseraccount.go @@ -0,0 +1,29 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package iam + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" +) + +// YandexPassportUserAccountServiceClient is a iam.YandexPassportUserAccountServiceClient with +// lazy GRPC connection initialization. +type YandexPassportUserAccountServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ iam.YandexPassportUserAccountServiceClient = &YandexPassportUserAccountServiceClient{} + +// GetByLogin implements iam.YandexPassportUserAccountServiceClient +func (c *YandexPassportUserAccountServiceClient) GetByLogin(ctx context.Context, in *iam.GetUserAccountByLoginRequest, opts ...grpc.CallOption) (*iam.UserAccount, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return iam.NewYandexPassportUserAccountServiceClient(conn).GetByLogin(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/backup.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/backup.go new file mode 100644 index 000000000..1bc92e733 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/backup.go @@ -0,0 +1,38 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package clickhouse + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" +) + +// BackupServiceClient is a clickhouse.BackupServiceClient with +// lazy GRPC connection initialization. +type BackupServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ clickhouse.BackupServiceClient = &BackupServiceClient{} + +// Get implements clickhouse.BackupServiceClient +func (c *BackupServiceClient) Get(ctx context.Context, in *clickhouse.GetBackupRequest, opts ...grpc.CallOption) (*clickhouse.Backup, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewBackupServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements clickhouse.BackupServiceClient +func (c *BackupServiceClient) List(ctx context.Context, in *clickhouse.ListBackupsRequest, opts ...grpc.CallOption) (*clickhouse.ListBackupsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewBackupServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/clickhouse_group.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/clickhouse_group.go new file mode 100644 index 000000000..5b37a3565 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/clickhouse_group.go @@ -0,0 +1,44 @@ +// Code generated by sdkgen. DO NOT EDIT. + +package clickhouse + +import ( + "context" + + "google.golang.org/grpc" +) + +// Clickhouse provides access to "clickhouse" component of Yandex.Cloud +type Clickhouse struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +// NewClickhouse creates instance of Clickhouse +func NewClickhouse(g func(ctx context.Context) (*grpc.ClientConn, error)) *Clickhouse { + return &Clickhouse{g} +} + +// Backup gets BackupService client +func (c *Clickhouse) Backup() *BackupServiceClient { + return &BackupServiceClient{getConn: c.getConn} +} + +// Cluster gets ClusterService client +func (c *Clickhouse) Cluster() *ClusterServiceClient { + return &ClusterServiceClient{getConn: c.getConn} +} + +// Database gets DatabaseService client +func (c *Clickhouse) Database() *DatabaseServiceClient { + return &DatabaseServiceClient{getConn: c.getConn} +} + +// ResourcePreset gets ResourcePresetService client +func (c *Clickhouse) ResourcePreset() *ResourcePresetServiceClient { + return &ResourcePresetServiceClient{getConn: c.getConn} +} + +// User gets UserService client +func (c *Clickhouse) User() *UserServiceClient { + return &UserServiceClient{getConn: c.getConn} +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/cluster.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/cluster.go new file mode 100644 index 000000000..338feb742 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/cluster.go @@ -0,0 +1,201 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package clickhouse + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// ClusterServiceClient is a clickhouse.ClusterServiceClient with +// lazy GRPC connection initialization. +type ClusterServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ clickhouse.ClusterServiceClient = &ClusterServiceClient{} + +// AddHosts implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) AddHosts(ctx context.Context, in *clickhouse.AddClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).AddHosts(ctx, in, opts...) +} + +// AddShard implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) AddShard(ctx context.Context, in *clickhouse.AddClusterShardRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).AddShard(ctx, in, opts...) +} + +// Backup implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) Backup(ctx context.Context, in *clickhouse.BackupClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).Backup(ctx, in, opts...) +} + +// Create implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) Create(ctx context.Context, in *clickhouse.CreateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) Delete(ctx context.Context, in *clickhouse.DeleteClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).Delete(ctx, in, opts...) +} + +// DeleteHosts implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) DeleteHosts(ctx context.Context, in *clickhouse.DeleteClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).DeleteHosts(ctx, in, opts...) +} + +// DeleteShard implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) DeleteShard(ctx context.Context, in *clickhouse.DeleteClusterShardRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).DeleteShard(ctx, in, opts...) +} + +// Get implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) Get(ctx context.Context, in *clickhouse.GetClusterRequest, opts ...grpc.CallOption) (*clickhouse.Cluster, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).Get(ctx, in, opts...) +} + +// GetShard implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) GetShard(ctx context.Context, in *clickhouse.GetClusterShardRequest, opts ...grpc.CallOption) (*clickhouse.Shard, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).GetShard(ctx, in, opts...) +} + +// List implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) List(ctx context.Context, in *clickhouse.ListClustersRequest, opts ...grpc.CallOption) (*clickhouse.ListClustersResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).List(ctx, in, opts...) +} + +// ListBackups implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) ListBackups(ctx context.Context, in *clickhouse.ListClusterBackupsRequest, opts ...grpc.CallOption) (*clickhouse.ListClusterBackupsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).ListBackups(ctx, in, opts...) +} + +// ListHosts implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) ListHosts(ctx context.Context, in *clickhouse.ListClusterHostsRequest, opts ...grpc.CallOption) (*clickhouse.ListClusterHostsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).ListHosts(ctx, in, opts...) +} + +// ListLogs implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) ListLogs(ctx context.Context, in *clickhouse.ListClusterLogsRequest, opts ...grpc.CallOption) (*clickhouse.ListClusterLogsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).ListLogs(ctx, in, opts...) +} + +// ListOperations implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) ListOperations(ctx context.Context, in *clickhouse.ListClusterOperationsRequest, opts ...grpc.CallOption) (*clickhouse.ListClusterOperationsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).ListOperations(ctx, in, opts...) +} + +// ListShards implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) ListShards(ctx context.Context, in *clickhouse.ListClusterShardsRequest, opts ...grpc.CallOption) (*clickhouse.ListClusterShardsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).ListShards(ctx, in, opts...) +} + +// Restore implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) Restore(ctx context.Context, in *clickhouse.RestoreClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).Restore(ctx, in, opts...) +} + +// Start implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) Start(ctx context.Context, in *clickhouse.StartClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).Start(ctx, in, opts...) +} + +// Stop implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) Stop(ctx context.Context, in *clickhouse.StopClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).Stop(ctx, in, opts...) +} + +// Update implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) Update(ctx context.Context, in *clickhouse.UpdateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).Update(ctx, in, opts...) +} + +// UpdateShard implements clickhouse.ClusterServiceClient +func (c *ClusterServiceClient) UpdateShard(ctx context.Context, in *clickhouse.UpdateClusterShardRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewClusterServiceClient(conn).UpdateShard(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/database.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/database.go new file mode 100644 index 000000000..61912bfbf --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/database.go @@ -0,0 +1,57 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package clickhouse + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// DatabaseServiceClient is a clickhouse.DatabaseServiceClient with +// lazy GRPC connection initialization. +type DatabaseServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ clickhouse.DatabaseServiceClient = &DatabaseServiceClient{} + +// Create implements clickhouse.DatabaseServiceClient +func (c *DatabaseServiceClient) Create(ctx context.Context, in *clickhouse.CreateDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewDatabaseServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements clickhouse.DatabaseServiceClient +func (c *DatabaseServiceClient) Delete(ctx context.Context, in *clickhouse.DeleteDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewDatabaseServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements clickhouse.DatabaseServiceClient +func (c *DatabaseServiceClient) Get(ctx context.Context, in *clickhouse.GetDatabaseRequest, opts ...grpc.CallOption) (*clickhouse.Database, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewDatabaseServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements clickhouse.DatabaseServiceClient +func (c *DatabaseServiceClient) List(ctx context.Context, in *clickhouse.ListDatabasesRequest, opts ...grpc.CallOption) (*clickhouse.ListDatabasesResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewDatabaseServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/resourcepreset.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/resourcepreset.go new file mode 100644 index 000000000..228845a83 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/resourcepreset.go @@ -0,0 +1,38 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package clickhouse + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" +) + +// ResourcePresetServiceClient is a clickhouse.ResourcePresetServiceClient with +// lazy GRPC connection initialization. +type ResourcePresetServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ clickhouse.ResourcePresetServiceClient = &ResourcePresetServiceClient{} + +// Get implements clickhouse.ResourcePresetServiceClient +func (c *ResourcePresetServiceClient) Get(ctx context.Context, in *clickhouse.GetResourcePresetRequest, opts ...grpc.CallOption) (*clickhouse.ResourcePreset, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewResourcePresetServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements clickhouse.ResourcePresetServiceClient +func (c *ResourcePresetServiceClient) List(ctx context.Context, in *clickhouse.ListResourcePresetsRequest, opts ...grpc.CallOption) (*clickhouse.ListResourcePresetsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewResourcePresetServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/user.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/user.go new file mode 100644 index 000000000..b6bbeeb4b --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse/user.go @@ -0,0 +1,84 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package clickhouse + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// UserServiceClient is a clickhouse.UserServiceClient with +// lazy GRPC connection initialization. +type UserServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ clickhouse.UserServiceClient = &UserServiceClient{} + +// Create implements clickhouse.UserServiceClient +func (c *UserServiceClient) Create(ctx context.Context, in *clickhouse.CreateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewUserServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements clickhouse.UserServiceClient +func (c *UserServiceClient) Delete(ctx context.Context, in *clickhouse.DeleteUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewUserServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements clickhouse.UserServiceClient +func (c *UserServiceClient) Get(ctx context.Context, in *clickhouse.GetUserRequest, opts ...grpc.CallOption) (*clickhouse.User, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewUserServiceClient(conn).Get(ctx, in, opts...) +} + +// GrantPermission implements clickhouse.UserServiceClient +func (c *UserServiceClient) GrantPermission(ctx context.Context, in *clickhouse.GrantUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewUserServiceClient(conn).GrantPermission(ctx, in, opts...) +} + +// List implements clickhouse.UserServiceClient +func (c *UserServiceClient) List(ctx context.Context, in *clickhouse.ListUsersRequest, opts ...grpc.CallOption) (*clickhouse.ListUsersResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewUserServiceClient(conn).List(ctx, in, opts...) +} + +// RevokePermission implements clickhouse.UserServiceClient +func (c *UserServiceClient) RevokePermission(ctx context.Context, in *clickhouse.RevokeUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewUserServiceClient(conn).RevokePermission(ctx, in, opts...) +} + +// Update implements clickhouse.UserServiceClient +func (c *UserServiceClient) Update(ctx context.Context, in *clickhouse.UpdateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return clickhouse.NewUserServiceClient(conn).Update(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/backup.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/backup.go new file mode 100644 index 000000000..2baa46215 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/backup.go @@ -0,0 +1,38 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package mongodb + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" +) + +// BackupServiceClient is a mongodb.BackupServiceClient with +// lazy GRPC connection initialization. +type BackupServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ mongodb.BackupServiceClient = &BackupServiceClient{} + +// Get implements mongodb.BackupServiceClient +func (c *BackupServiceClient) Get(ctx context.Context, in *mongodb.GetBackupRequest, opts ...grpc.CallOption) (*mongodb.Backup, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewBackupServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements mongodb.BackupServiceClient +func (c *BackupServiceClient) List(ctx context.Context, in *mongodb.ListBackupsRequest, opts ...grpc.CallOption) (*mongodb.ListBackupsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewBackupServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/cluster.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/cluster.go new file mode 100644 index 000000000..347a0db50 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/cluster.go @@ -0,0 +1,156 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package mongodb + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// ClusterServiceClient is a mongodb.ClusterServiceClient with +// lazy GRPC connection initialization. +type ClusterServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ mongodb.ClusterServiceClient = &ClusterServiceClient{} + +// AddHosts implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) AddHosts(ctx context.Context, in *mongodb.AddClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).AddHosts(ctx, in, opts...) +} + +// Backup implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) Backup(ctx context.Context, in *mongodb.BackupClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).Backup(ctx, in, opts...) +} + +// Create implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) Create(ctx context.Context, in *mongodb.CreateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) Delete(ctx context.Context, in *mongodb.DeleteClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).Delete(ctx, in, opts...) +} + +// DeleteHosts implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) DeleteHosts(ctx context.Context, in *mongodb.DeleteClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).DeleteHosts(ctx, in, opts...) +} + +// Get implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) Get(ctx context.Context, in *mongodb.GetClusterRequest, opts ...grpc.CallOption) (*mongodb.Cluster, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) List(ctx context.Context, in *mongodb.ListClustersRequest, opts ...grpc.CallOption) (*mongodb.ListClustersResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).List(ctx, in, opts...) +} + +// ListBackups implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) ListBackups(ctx context.Context, in *mongodb.ListClusterBackupsRequest, opts ...grpc.CallOption) (*mongodb.ListClusterBackupsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).ListBackups(ctx, in, opts...) +} + +// ListHosts implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) ListHosts(ctx context.Context, in *mongodb.ListClusterHostsRequest, opts ...grpc.CallOption) (*mongodb.ListClusterHostsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).ListHosts(ctx, in, opts...) +} + +// ListLogs implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) ListLogs(ctx context.Context, in *mongodb.ListClusterLogsRequest, opts ...grpc.CallOption) (*mongodb.ListClusterLogsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).ListLogs(ctx, in, opts...) +} + +// ListOperations implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) ListOperations(ctx context.Context, in *mongodb.ListClusterOperationsRequest, opts ...grpc.CallOption) (*mongodb.ListClusterOperationsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).ListOperations(ctx, in, opts...) +} + +// Restore implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) Restore(ctx context.Context, in *mongodb.RestoreClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).Restore(ctx, in, opts...) +} + +// Start implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) Start(ctx context.Context, in *mongodb.StartClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).Start(ctx, in, opts...) +} + +// Stop implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) Stop(ctx context.Context, in *mongodb.StopClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).Stop(ctx, in, opts...) +} + +// Update implements mongodb.ClusterServiceClient +func (c *ClusterServiceClient) Update(ctx context.Context, in *mongodb.UpdateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewClusterServiceClient(conn).Update(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/database.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/database.go new file mode 100644 index 000000000..c10308331 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/database.go @@ -0,0 +1,57 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package mongodb + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// DatabaseServiceClient is a mongodb.DatabaseServiceClient with +// lazy GRPC connection initialization. +type DatabaseServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ mongodb.DatabaseServiceClient = &DatabaseServiceClient{} + +// Create implements mongodb.DatabaseServiceClient +func (c *DatabaseServiceClient) Create(ctx context.Context, in *mongodb.CreateDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewDatabaseServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements mongodb.DatabaseServiceClient +func (c *DatabaseServiceClient) Delete(ctx context.Context, in *mongodb.DeleteDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewDatabaseServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements mongodb.DatabaseServiceClient +func (c *DatabaseServiceClient) Get(ctx context.Context, in *mongodb.GetDatabaseRequest, opts ...grpc.CallOption) (*mongodb.Database, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewDatabaseServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements mongodb.DatabaseServiceClient +func (c *DatabaseServiceClient) List(ctx context.Context, in *mongodb.ListDatabasesRequest, opts ...grpc.CallOption) (*mongodb.ListDatabasesResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewDatabaseServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/mongodb_group.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/mongodb_group.go new file mode 100644 index 000000000..6ec7836e1 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/mongodb_group.go @@ -0,0 +1,44 @@ +// Code generated by sdkgen. DO NOT EDIT. + +package mongodb + +import ( + "context" + + "google.golang.org/grpc" +) + +// MongoDB provides access to "mongodb" component of Yandex.Cloud +type MongoDB struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +// NewMongoDB creates instance of MongoDB +func NewMongoDB(g func(ctx context.Context) (*grpc.ClientConn, error)) *MongoDB { + return &MongoDB{g} +} + +// Backup gets BackupService client +func (m *MongoDB) Backup() *BackupServiceClient { + return &BackupServiceClient{getConn: m.getConn} +} + +// Cluster gets ClusterService client +func (m *MongoDB) Cluster() *ClusterServiceClient { + return &ClusterServiceClient{getConn: m.getConn} +} + +// Database gets DatabaseService client +func (m *MongoDB) Database() *DatabaseServiceClient { + return &DatabaseServiceClient{getConn: m.getConn} +} + +// ResourcePreset gets ResourcePresetService client +func (m *MongoDB) ResourcePreset() *ResourcePresetServiceClient { + return &ResourcePresetServiceClient{getConn: m.getConn} +} + +// User gets UserService client +func (m *MongoDB) User() *UserServiceClient { + return &UserServiceClient{getConn: m.getConn} +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/resourcepreset.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/resourcepreset.go new file mode 100644 index 000000000..ead938159 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/resourcepreset.go @@ -0,0 +1,38 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package mongodb + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" +) + +// ResourcePresetServiceClient is a mongodb.ResourcePresetServiceClient with +// lazy GRPC connection initialization. +type ResourcePresetServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ mongodb.ResourcePresetServiceClient = &ResourcePresetServiceClient{} + +// Get implements mongodb.ResourcePresetServiceClient +func (c *ResourcePresetServiceClient) Get(ctx context.Context, in *mongodb.GetResourcePresetRequest, opts ...grpc.CallOption) (*mongodb.ResourcePreset, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewResourcePresetServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements mongodb.ResourcePresetServiceClient +func (c *ResourcePresetServiceClient) List(ctx context.Context, in *mongodb.ListResourcePresetsRequest, opts ...grpc.CallOption) (*mongodb.ListResourcePresetsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewResourcePresetServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/user.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/user.go new file mode 100644 index 000000000..1ce2a5e22 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/mongodb/user.go @@ -0,0 +1,84 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package mongodb + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// UserServiceClient is a mongodb.UserServiceClient with +// lazy GRPC connection initialization. +type UserServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ mongodb.UserServiceClient = &UserServiceClient{} + +// Create implements mongodb.UserServiceClient +func (c *UserServiceClient) Create(ctx context.Context, in *mongodb.CreateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewUserServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements mongodb.UserServiceClient +func (c *UserServiceClient) Delete(ctx context.Context, in *mongodb.DeleteUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewUserServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements mongodb.UserServiceClient +func (c *UserServiceClient) Get(ctx context.Context, in *mongodb.GetUserRequest, opts ...grpc.CallOption) (*mongodb.User, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewUserServiceClient(conn).Get(ctx, in, opts...) +} + +// GrantPermission implements mongodb.UserServiceClient +func (c *UserServiceClient) GrantPermission(ctx context.Context, in *mongodb.GrantUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewUserServiceClient(conn).GrantPermission(ctx, in, opts...) +} + +// List implements mongodb.UserServiceClient +func (c *UserServiceClient) List(ctx context.Context, in *mongodb.ListUsersRequest, opts ...grpc.CallOption) (*mongodb.ListUsersResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewUserServiceClient(conn).List(ctx, in, opts...) +} + +// RevokePermission implements mongodb.UserServiceClient +func (c *UserServiceClient) RevokePermission(ctx context.Context, in *mongodb.RevokeUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewUserServiceClient(conn).RevokePermission(ctx, in, opts...) +} + +// Update implements mongodb.UserServiceClient +func (c *UserServiceClient) Update(ctx context.Context, in *mongodb.UpdateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return mongodb.NewUserServiceClient(conn).Update(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/backup.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/backup.go new file mode 100644 index 000000000..37637b247 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/backup.go @@ -0,0 +1,38 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package postgresql + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" +) + +// BackupServiceClient is a postgresql.BackupServiceClient with +// lazy GRPC connection initialization. +type BackupServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ postgresql.BackupServiceClient = &BackupServiceClient{} + +// Get implements postgresql.BackupServiceClient +func (c *BackupServiceClient) Get(ctx context.Context, in *postgresql.GetBackupRequest, opts ...grpc.CallOption) (*postgresql.Backup, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewBackupServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements postgresql.BackupServiceClient +func (c *BackupServiceClient) List(ctx context.Context, in *postgresql.ListBackupsRequest, opts ...grpc.CallOption) (*postgresql.ListBackupsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewBackupServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/cluster.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/cluster.go new file mode 100644 index 000000000..8fc65c75e --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/cluster.go @@ -0,0 +1,165 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package postgresql + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// ClusterServiceClient is a postgresql.ClusterServiceClient with +// lazy GRPC connection initialization. +type ClusterServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ postgresql.ClusterServiceClient = &ClusterServiceClient{} + +// AddHosts implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) AddHosts(ctx context.Context, in *postgresql.AddClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).AddHosts(ctx, in, opts...) +} + +// Backup implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) Backup(ctx context.Context, in *postgresql.BackupClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).Backup(ctx, in, opts...) +} + +// Create implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) Create(ctx context.Context, in *postgresql.CreateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) Delete(ctx context.Context, in *postgresql.DeleteClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).Delete(ctx, in, opts...) +} + +// DeleteHosts implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) DeleteHosts(ctx context.Context, in *postgresql.DeleteClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).DeleteHosts(ctx, in, opts...) +} + +// Get implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) Get(ctx context.Context, in *postgresql.GetClusterRequest, opts ...grpc.CallOption) (*postgresql.Cluster, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) List(ctx context.Context, in *postgresql.ListClustersRequest, opts ...grpc.CallOption) (*postgresql.ListClustersResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).List(ctx, in, opts...) +} + +// ListBackups implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) ListBackups(ctx context.Context, in *postgresql.ListClusterBackupsRequest, opts ...grpc.CallOption) (*postgresql.ListClusterBackupsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).ListBackups(ctx, in, opts...) +} + +// ListHosts implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) ListHosts(ctx context.Context, in *postgresql.ListClusterHostsRequest, opts ...grpc.CallOption) (*postgresql.ListClusterHostsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).ListHosts(ctx, in, opts...) +} + +// ListLogs implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) ListLogs(ctx context.Context, in *postgresql.ListClusterLogsRequest, opts ...grpc.CallOption) (*postgresql.ListClusterLogsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).ListLogs(ctx, in, opts...) +} + +// ListOperations implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) ListOperations(ctx context.Context, in *postgresql.ListClusterOperationsRequest, opts ...grpc.CallOption) (*postgresql.ListClusterOperationsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).ListOperations(ctx, in, opts...) +} + +// Restore implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) Restore(ctx context.Context, in *postgresql.RestoreClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).Restore(ctx, in, opts...) +} + +// Start implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) Start(ctx context.Context, in *postgresql.StartClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).Start(ctx, in, opts...) +} + +// Stop implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) Stop(ctx context.Context, in *postgresql.StopClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).Stop(ctx, in, opts...) +} + +// Update implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) Update(ctx context.Context, in *postgresql.UpdateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).Update(ctx, in, opts...) +} + +// UpdateHosts implements postgresql.ClusterServiceClient +func (c *ClusterServiceClient) UpdateHosts(ctx context.Context, in *postgresql.UpdateClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewClusterServiceClient(conn).UpdateHosts(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/database.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/database.go new file mode 100644 index 000000000..bb87ab013 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/database.go @@ -0,0 +1,66 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package postgresql + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// DatabaseServiceClient is a postgresql.DatabaseServiceClient with +// lazy GRPC connection initialization. +type DatabaseServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ postgresql.DatabaseServiceClient = &DatabaseServiceClient{} + +// Create implements postgresql.DatabaseServiceClient +func (c *DatabaseServiceClient) Create(ctx context.Context, in *postgresql.CreateDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewDatabaseServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements postgresql.DatabaseServiceClient +func (c *DatabaseServiceClient) Delete(ctx context.Context, in *postgresql.DeleteDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewDatabaseServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements postgresql.DatabaseServiceClient +func (c *DatabaseServiceClient) Get(ctx context.Context, in *postgresql.GetDatabaseRequest, opts ...grpc.CallOption) (*postgresql.Database, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewDatabaseServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements postgresql.DatabaseServiceClient +func (c *DatabaseServiceClient) List(ctx context.Context, in *postgresql.ListDatabasesRequest, opts ...grpc.CallOption) (*postgresql.ListDatabasesResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewDatabaseServiceClient(conn).List(ctx, in, opts...) +} + +// Update implements postgresql.DatabaseServiceClient +func (c *DatabaseServiceClient) Update(ctx context.Context, in *postgresql.UpdateDatabaseRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewDatabaseServiceClient(conn).Update(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/postgresql_group.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/postgresql_group.go new file mode 100644 index 000000000..ca14ba2fb --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/postgresql_group.go @@ -0,0 +1,44 @@ +// Code generated by sdkgen. DO NOT EDIT. + +package postgresql + +import ( + "context" + + "google.golang.org/grpc" +) + +// PostgreSQL provides access to "postgresql" component of Yandex.Cloud +type PostgreSQL struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +// NewPostgreSQL creates instance of PostgreSQL +func NewPostgreSQL(g func(ctx context.Context) (*grpc.ClientConn, error)) *PostgreSQL { + return &PostgreSQL{g} +} + +// Backup gets BackupService client +func (p *PostgreSQL) Backup() *BackupServiceClient { + return &BackupServiceClient{getConn: p.getConn} +} + +// Cluster gets ClusterService client +func (p *PostgreSQL) Cluster() *ClusterServiceClient { + return &ClusterServiceClient{getConn: p.getConn} +} + +// Database gets DatabaseService client +func (p *PostgreSQL) Database() *DatabaseServiceClient { + return &DatabaseServiceClient{getConn: p.getConn} +} + +// ResourcePreset gets ResourcePresetService client +func (p *PostgreSQL) ResourcePreset() *ResourcePresetServiceClient { + return &ResourcePresetServiceClient{getConn: p.getConn} +} + +// User gets UserService client +func (p *PostgreSQL) User() *UserServiceClient { + return &UserServiceClient{getConn: p.getConn} +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/resourcepreset.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/resourcepreset.go new file mode 100644 index 000000000..ca7fbb48e --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/resourcepreset.go @@ -0,0 +1,38 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package postgresql + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" +) + +// ResourcePresetServiceClient is a postgresql.ResourcePresetServiceClient with +// lazy GRPC connection initialization. +type ResourcePresetServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ postgresql.ResourcePresetServiceClient = &ResourcePresetServiceClient{} + +// Get implements postgresql.ResourcePresetServiceClient +func (c *ResourcePresetServiceClient) Get(ctx context.Context, in *postgresql.GetResourcePresetRequest, opts ...grpc.CallOption) (*postgresql.ResourcePreset, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewResourcePresetServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements postgresql.ResourcePresetServiceClient +func (c *ResourcePresetServiceClient) List(ctx context.Context, in *postgresql.ListResourcePresetsRequest, opts ...grpc.CallOption) (*postgresql.ListResourcePresetsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewResourcePresetServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/user.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/user.go new file mode 100644 index 000000000..1b9e1f555 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/postgresql/user.go @@ -0,0 +1,84 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package postgresql + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/postgresql/v1" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// UserServiceClient is a postgresql.UserServiceClient with +// lazy GRPC connection initialization. +type UserServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ postgresql.UserServiceClient = &UserServiceClient{} + +// Create implements postgresql.UserServiceClient +func (c *UserServiceClient) Create(ctx context.Context, in *postgresql.CreateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewUserServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements postgresql.UserServiceClient +func (c *UserServiceClient) Delete(ctx context.Context, in *postgresql.DeleteUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewUserServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements postgresql.UserServiceClient +func (c *UserServiceClient) Get(ctx context.Context, in *postgresql.GetUserRequest, opts ...grpc.CallOption) (*postgresql.User, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewUserServiceClient(conn).Get(ctx, in, opts...) +} + +// GrantPermission implements postgresql.UserServiceClient +func (c *UserServiceClient) GrantPermission(ctx context.Context, in *postgresql.GrantUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewUserServiceClient(conn).GrantPermission(ctx, in, opts...) +} + +// List implements postgresql.UserServiceClient +func (c *UserServiceClient) List(ctx context.Context, in *postgresql.ListUsersRequest, opts ...grpc.CallOption) (*postgresql.ListUsersResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewUserServiceClient(conn).List(ctx, in, opts...) +} + +// RevokePermission implements postgresql.UserServiceClient +func (c *UserServiceClient) RevokePermission(ctx context.Context, in *postgresql.RevokeUserPermissionRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewUserServiceClient(conn).RevokePermission(ctx, in, opts...) +} + +// Update implements postgresql.UserServiceClient +func (c *UserServiceClient) Update(ctx context.Context, in *postgresql.UpdateUserRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return postgresql.NewUserServiceClient(conn).Update(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/backup.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/backup.go new file mode 100644 index 000000000..3846c684a --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/backup.go @@ -0,0 +1,38 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package redis + +import ( + "context" + + "google.golang.org/grpc" + + redis "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha" +) + +// BackupServiceClient is a redis.BackupServiceClient with +// lazy GRPC connection initialization. +type BackupServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ redis.BackupServiceClient = &BackupServiceClient{} + +// Get implements redis.BackupServiceClient +func (c *BackupServiceClient) Get(ctx context.Context, in *redis.GetBackupRequest, opts ...grpc.CallOption) (*redis.Backup, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewBackupServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements redis.BackupServiceClient +func (c *BackupServiceClient) List(ctx context.Context, in *redis.ListBackupsRequest, opts ...grpc.CallOption) (*redis.ListBackupsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewBackupServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/cluster.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/cluster.go new file mode 100644 index 000000000..c92cf8d46 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/cluster.go @@ -0,0 +1,156 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package redis + +import ( + "context" + + "google.golang.org/grpc" + + redis "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// ClusterServiceClient is a redis.ClusterServiceClient with +// lazy GRPC connection initialization. +type ClusterServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ redis.ClusterServiceClient = &ClusterServiceClient{} + +// AddHosts implements redis.ClusterServiceClient +func (c *ClusterServiceClient) AddHosts(ctx context.Context, in *redis.AddClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).AddHosts(ctx, in, opts...) +} + +// Backup implements redis.ClusterServiceClient +func (c *ClusterServiceClient) Backup(ctx context.Context, in *redis.BackupClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).Backup(ctx, in, opts...) +} + +// Create implements redis.ClusterServiceClient +func (c *ClusterServiceClient) Create(ctx context.Context, in *redis.CreateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements redis.ClusterServiceClient +func (c *ClusterServiceClient) Delete(ctx context.Context, in *redis.DeleteClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).Delete(ctx, in, opts...) +} + +// DeleteHosts implements redis.ClusterServiceClient +func (c *ClusterServiceClient) DeleteHosts(ctx context.Context, in *redis.DeleteClusterHostsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).DeleteHosts(ctx, in, opts...) +} + +// Get implements redis.ClusterServiceClient +func (c *ClusterServiceClient) Get(ctx context.Context, in *redis.GetClusterRequest, opts ...grpc.CallOption) (*redis.Cluster, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements redis.ClusterServiceClient +func (c *ClusterServiceClient) List(ctx context.Context, in *redis.ListClustersRequest, opts ...grpc.CallOption) (*redis.ListClustersResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).List(ctx, in, opts...) +} + +// ListBackups implements redis.ClusterServiceClient +func (c *ClusterServiceClient) ListBackups(ctx context.Context, in *redis.ListClusterBackupsRequest, opts ...grpc.CallOption) (*redis.ListClusterBackupsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).ListBackups(ctx, in, opts...) +} + +// ListHosts implements redis.ClusterServiceClient +func (c *ClusterServiceClient) ListHosts(ctx context.Context, in *redis.ListClusterHostsRequest, opts ...grpc.CallOption) (*redis.ListClusterHostsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).ListHosts(ctx, in, opts...) +} + +// ListLogs implements redis.ClusterServiceClient +func (c *ClusterServiceClient) ListLogs(ctx context.Context, in *redis.ListClusterLogsRequest, opts ...grpc.CallOption) (*redis.ListClusterLogsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).ListLogs(ctx, in, opts...) +} + +// ListOperations implements redis.ClusterServiceClient +func (c *ClusterServiceClient) ListOperations(ctx context.Context, in *redis.ListClusterOperationsRequest, opts ...grpc.CallOption) (*redis.ListClusterOperationsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).ListOperations(ctx, in, opts...) +} + +// Restore implements redis.ClusterServiceClient +func (c *ClusterServiceClient) Restore(ctx context.Context, in *redis.RestoreClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).Restore(ctx, in, opts...) +} + +// Start implements redis.ClusterServiceClient +func (c *ClusterServiceClient) Start(ctx context.Context, in *redis.StartClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).Start(ctx, in, opts...) +} + +// Stop implements redis.ClusterServiceClient +func (c *ClusterServiceClient) Stop(ctx context.Context, in *redis.StopClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).Stop(ctx, in, opts...) +} + +// Update implements redis.ClusterServiceClient +func (c *ClusterServiceClient) Update(ctx context.Context, in *redis.UpdateClusterRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewClusterServiceClient(conn).Update(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/redis_group.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/redis_group.go new file mode 100644 index 000000000..bb8b66b31 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/redis_group.go @@ -0,0 +1,34 @@ +// Code generated by sdkgen. DO NOT EDIT. + +package redis + +import ( + "context" + + "google.golang.org/grpc" +) + +// Redis provides access to "redis" component of Yandex.Cloud +type Redis struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +// NewRedis creates instance of Redis +func NewRedis(g func(ctx context.Context) (*grpc.ClientConn, error)) *Redis { + return &Redis{g} +} + +// Cluster gets ClusterService client +func (r *Redis) Cluster() *ClusterServiceClient { + return &ClusterServiceClient{getConn: r.getConn} +} + +// ResourcePreset gets ResourcePresetService client +func (r *Redis) ResourcePreset() *ResourcePresetServiceClient { + return &ResourcePresetServiceClient{getConn: r.getConn} +} + +// Backup gets BackupService client +func (r *Redis) Backup() *BackupServiceClient { + return &BackupServiceClient{getConn: r.getConn} +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/resourcepreset.go b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/resourcepreset.go new file mode 100644 index 000000000..fe021c05a --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/mdb/redis/resourcepreset.go @@ -0,0 +1,38 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package redis + +import ( + "context" + + "google.golang.org/grpc" + + redis "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1alpha" +) + +// ResourcePresetServiceClient is a redis.ResourcePresetServiceClient with +// lazy GRPC connection initialization. +type ResourcePresetServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ redis.ResourcePresetServiceClient = &ResourcePresetServiceClient{} + +// Get implements redis.ResourcePresetServiceClient +func (c *ResourcePresetServiceClient) Get(ctx context.Context, in *redis.GetResourcePresetRequest, opts ...grpc.CallOption) (*redis.ResourcePreset, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewResourcePresetServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements redis.ResourcePresetServiceClient +func (c *ResourcePresetServiceClient) List(ctx context.Context, in *redis.ListResourcePresetsRequest, opts ...grpc.CallOption) (*redis.ListResourcePresetsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return redis.NewResourcePresetServiceClient(conn).List(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/operation/operation.go b/vendor/github.com/yandex-cloud/go-sdk/gen/operation/operation.go new file mode 100644 index 000000000..46c1f3711 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/operation/operation.go @@ -0,0 +1,38 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package operation + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" +) + +// OperationServiceClient is a operation.OperationServiceClient with +// lazy GRPC connection initialization. +type OperationServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ operation.OperationServiceClient = &OperationServiceClient{} + +// Cancel implements operation.OperationServiceClient +func (c *OperationServiceClient) Cancel(ctx context.Context, in *operation.CancelOperationRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return operation.NewOperationServiceClient(conn).Cancel(ctx, in, opts...) +} + +// Get implements operation.OperationServiceClient +func (c *OperationServiceClient) Get(ctx context.Context, in *operation.GetOperationRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return operation.NewOperationServiceClient(conn).Get(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/operation/operation_group.go b/vendor/github.com/yandex-cloud/go-sdk/gen/operation/operation_group.go new file mode 100644 index 000000000..b0c6da5d0 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/operation/operation_group.go @@ -0,0 +1,24 @@ +// Code generated by sdkgen. DO NOT EDIT. + +package operation + +import ( + "context" + + "google.golang.org/grpc" +) + +// Operation provides access to "operation" component of Yandex.Cloud +type Operation struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +// NewOperation creates instance of Operation +func NewOperation(g func(ctx context.Context) (*grpc.ClientConn, error)) *Operation { + return &Operation{g} +} + +// Operation gets OperationService client +func (o *Operation) Operation() *OperationServiceClient { + return &OperationServiceClient{getConn: o.getConn} +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/resourcemanager/cloud.go b/vendor/github.com/yandex-cloud/go-sdk/gen/resourcemanager/cloud.go new file mode 100644 index 000000000..40d082805 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/resourcemanager/cloud.go @@ -0,0 +1,76 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package resourcemanager + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/access" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" + "github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1" +) + +// CloudServiceClient is a resourcemanager.CloudServiceClient with +// lazy GRPC connection initialization. +type CloudServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ resourcemanager.CloudServiceClient = &CloudServiceClient{} + +// Get implements resourcemanager.CloudServiceClient +func (c *CloudServiceClient) Get(ctx context.Context, in *resourcemanager.GetCloudRequest, opts ...grpc.CallOption) (*resourcemanager.Cloud, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewCloudServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements resourcemanager.CloudServiceClient +func (c *CloudServiceClient) List(ctx context.Context, in *resourcemanager.ListCloudsRequest, opts ...grpc.CallOption) (*resourcemanager.ListCloudsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewCloudServiceClient(conn).List(ctx, in, opts...) +} + +// ListAccessBindings implements resourcemanager.CloudServiceClient +func (c *CloudServiceClient) ListAccessBindings(ctx context.Context, in *access.ListAccessBindingsRequest, opts ...grpc.CallOption) (*access.ListAccessBindingsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewCloudServiceClient(conn).ListAccessBindings(ctx, in, opts...) +} + +// ListOperations implements resourcemanager.CloudServiceClient +func (c *CloudServiceClient) ListOperations(ctx context.Context, in *resourcemanager.ListCloudOperationsRequest, opts ...grpc.CallOption) (*resourcemanager.ListCloudOperationsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewCloudServiceClient(conn).ListOperations(ctx, in, opts...) +} + +// SetAccessBindings implements resourcemanager.CloudServiceClient +func (c *CloudServiceClient) SetAccessBindings(ctx context.Context, in *access.SetAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewCloudServiceClient(conn).SetAccessBindings(ctx, in, opts...) +} + +// UpdateAccessBindings implements resourcemanager.CloudServiceClient +func (c *CloudServiceClient) UpdateAccessBindings(ctx context.Context, in *access.UpdateAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewCloudServiceClient(conn).UpdateAccessBindings(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/resourcemanager/folder.go b/vendor/github.com/yandex-cloud/go-sdk/gen/resourcemanager/folder.go new file mode 100644 index 000000000..2146c8aac --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/resourcemanager/folder.go @@ -0,0 +1,103 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package resourcemanager + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/access" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" + "github.com/yandex-cloud/go-genproto/yandex/cloud/resourcemanager/v1" +) + +// FolderServiceClient is a resourcemanager.FolderServiceClient with +// lazy GRPC connection initialization. +type FolderServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ resourcemanager.FolderServiceClient = &FolderServiceClient{} + +// Create implements resourcemanager.FolderServiceClient +func (c *FolderServiceClient) Create(ctx context.Context, in *resourcemanager.CreateFolderRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewFolderServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements resourcemanager.FolderServiceClient +func (c *FolderServiceClient) Delete(ctx context.Context, in *resourcemanager.DeleteFolderRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewFolderServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements resourcemanager.FolderServiceClient +func (c *FolderServiceClient) Get(ctx context.Context, in *resourcemanager.GetFolderRequest, opts ...grpc.CallOption) (*resourcemanager.Folder, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewFolderServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements resourcemanager.FolderServiceClient +func (c *FolderServiceClient) List(ctx context.Context, in *resourcemanager.ListFoldersRequest, opts ...grpc.CallOption) (*resourcemanager.ListFoldersResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewFolderServiceClient(conn).List(ctx, in, opts...) +} + +// ListAccessBindings implements resourcemanager.FolderServiceClient +func (c *FolderServiceClient) ListAccessBindings(ctx context.Context, in *access.ListAccessBindingsRequest, opts ...grpc.CallOption) (*access.ListAccessBindingsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewFolderServiceClient(conn).ListAccessBindings(ctx, in, opts...) +} + +// ListOperations implements resourcemanager.FolderServiceClient +func (c *FolderServiceClient) ListOperations(ctx context.Context, in *resourcemanager.ListFolderOperationsRequest, opts ...grpc.CallOption) (*resourcemanager.ListFolderOperationsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewFolderServiceClient(conn).ListOperations(ctx, in, opts...) +} + +// SetAccessBindings implements resourcemanager.FolderServiceClient +func (c *FolderServiceClient) SetAccessBindings(ctx context.Context, in *access.SetAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewFolderServiceClient(conn).SetAccessBindings(ctx, in, opts...) +} + +// Update implements resourcemanager.FolderServiceClient +func (c *FolderServiceClient) Update(ctx context.Context, in *resourcemanager.UpdateFolderRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewFolderServiceClient(conn).Update(ctx, in, opts...) +} + +// UpdateAccessBindings implements resourcemanager.FolderServiceClient +func (c *FolderServiceClient) UpdateAccessBindings(ctx context.Context, in *access.UpdateAccessBindingsRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return resourcemanager.NewFolderServiceClient(conn).UpdateAccessBindings(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/resourcemanager/resourcemanager_group.go b/vendor/github.com/yandex-cloud/go-sdk/gen/resourcemanager/resourcemanager_group.go new file mode 100644 index 000000000..e8a152841 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/resourcemanager/resourcemanager_group.go @@ -0,0 +1,29 @@ +// Code generated by sdkgen. DO NOT EDIT. + +package resourcemanager + +import ( + "context" + + "google.golang.org/grpc" +) + +// ResourceManager provides access to "resourcemanager" component of Yandex.Cloud +type ResourceManager struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +// NewResourceManager creates instance of ResourceManager +func NewResourceManager(g func(ctx context.Context) (*grpc.ClientConn, error)) *ResourceManager { + return &ResourceManager{g} +} + +// Folder gets FolderService client +func (r *ResourceManager) Folder() *FolderServiceClient { + return &FolderServiceClient{getConn: r.getConn} +} + +// Cloud gets CloudService client +func (r *ResourceManager) Cloud() *CloudServiceClient { + return &CloudServiceClient{getConn: r.getConn} +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/vpc/network.go b/vendor/github.com/yandex-cloud/go-sdk/gen/vpc/network.go new file mode 100644 index 000000000..5b78ab156 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/vpc/network.go @@ -0,0 +1,84 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package vpc + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" + "github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1" +) + +// NetworkServiceClient is a vpc.NetworkServiceClient with +// lazy GRPC connection initialization. +type NetworkServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ vpc.NetworkServiceClient = &NetworkServiceClient{} + +// Create implements vpc.NetworkServiceClient +func (c *NetworkServiceClient) Create(ctx context.Context, in *vpc.CreateNetworkRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return vpc.NewNetworkServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements vpc.NetworkServiceClient +func (c *NetworkServiceClient) Delete(ctx context.Context, in *vpc.DeleteNetworkRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return vpc.NewNetworkServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements vpc.NetworkServiceClient +func (c *NetworkServiceClient) Get(ctx context.Context, in *vpc.GetNetworkRequest, opts ...grpc.CallOption) (*vpc.Network, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return vpc.NewNetworkServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements vpc.NetworkServiceClient +func (c *NetworkServiceClient) List(ctx context.Context, in *vpc.ListNetworksRequest, opts ...grpc.CallOption) (*vpc.ListNetworksResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return vpc.NewNetworkServiceClient(conn).List(ctx, in, opts...) +} + +// ListOperations implements vpc.NetworkServiceClient +func (c *NetworkServiceClient) ListOperations(ctx context.Context, in *vpc.ListNetworkOperationsRequest, opts ...grpc.CallOption) (*vpc.ListNetworkOperationsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return vpc.NewNetworkServiceClient(conn).ListOperations(ctx, in, opts...) +} + +// ListSubnets implements vpc.NetworkServiceClient +func (c *NetworkServiceClient) ListSubnets(ctx context.Context, in *vpc.ListNetworkSubnetsRequest, opts ...grpc.CallOption) (*vpc.ListNetworkSubnetsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return vpc.NewNetworkServiceClient(conn).ListSubnets(ctx, in, opts...) +} + +// Update implements vpc.NetworkServiceClient +func (c *NetworkServiceClient) Update(ctx context.Context, in *vpc.UpdateNetworkRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return vpc.NewNetworkServiceClient(conn).Update(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/vpc/subnet.go b/vendor/github.com/yandex-cloud/go-sdk/gen/vpc/subnet.go new file mode 100644 index 000000000..3eb39a3cb --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/vpc/subnet.go @@ -0,0 +1,75 @@ +// Code generated by sdkgen. DO NOT EDIT. + +// nolint +package vpc + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" + "github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1" +) + +// SubnetServiceClient is a vpc.SubnetServiceClient with +// lazy GRPC connection initialization. +type SubnetServiceClient struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +var _ vpc.SubnetServiceClient = &SubnetServiceClient{} + +// Create implements vpc.SubnetServiceClient +func (c *SubnetServiceClient) Create(ctx context.Context, in *vpc.CreateSubnetRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return vpc.NewSubnetServiceClient(conn).Create(ctx, in, opts...) +} + +// Delete implements vpc.SubnetServiceClient +func (c *SubnetServiceClient) Delete(ctx context.Context, in *vpc.DeleteSubnetRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return vpc.NewSubnetServiceClient(conn).Delete(ctx, in, opts...) +} + +// Get implements vpc.SubnetServiceClient +func (c *SubnetServiceClient) Get(ctx context.Context, in *vpc.GetSubnetRequest, opts ...grpc.CallOption) (*vpc.Subnet, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return vpc.NewSubnetServiceClient(conn).Get(ctx, in, opts...) +} + +// List implements vpc.SubnetServiceClient +func (c *SubnetServiceClient) List(ctx context.Context, in *vpc.ListSubnetsRequest, opts ...grpc.CallOption) (*vpc.ListSubnetsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return vpc.NewSubnetServiceClient(conn).List(ctx, in, opts...) +} + +// ListOperations implements vpc.SubnetServiceClient +func (c *SubnetServiceClient) ListOperations(ctx context.Context, in *vpc.ListSubnetOperationsRequest, opts ...grpc.CallOption) (*vpc.ListSubnetOperationsResponse, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return vpc.NewSubnetServiceClient(conn).ListOperations(ctx, in, opts...) +} + +// Update implements vpc.SubnetServiceClient +func (c *SubnetServiceClient) Update(ctx context.Context, in *vpc.UpdateSubnetRequest, opts ...grpc.CallOption) (*operation.Operation, error) { + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + return vpc.NewSubnetServiceClient(conn).Update(ctx, in, opts...) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/gen/vpc/vpc_group.go b/vendor/github.com/yandex-cloud/go-sdk/gen/vpc/vpc_group.go new file mode 100644 index 000000000..b77bbe9c1 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/gen/vpc/vpc_group.go @@ -0,0 +1,29 @@ +// Code generated by sdkgen. DO NOT EDIT. + +package vpc + +import ( + "context" + + "google.golang.org/grpc" +) + +// VPC provides access to "vpc" component of Yandex.Cloud +type VPC struct { + getConn func(ctx context.Context) (*grpc.ClientConn, error) +} + +// NewVPC creates instance of VPC +func NewVPC(g func(ctx context.Context) (*grpc.ClientConn, error)) *VPC { + return &VPC{g} +} + +// Network gets NetworkService client +func (v *VPC) Network() *NetworkServiceClient { + return &NetworkServiceClient{getConn: v.getConn} +} + +// Subnet gets SubnetService client +func (v *VPC) Subnet() *SubnetServiceClient { + return &SubnetServiceClient{getConn: v.getConn} +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/iamkey/generate_proto.sh b/vendor/github.com/yandex-cloud/go-sdk/iamkey/generate_proto.sh new file mode 100755 index 000000000..b78c9081d --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/iamkey/generate_proto.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +cd "$(dirname "${BASH_SOURCE[0]}")" +protoc \ + --proto_path ../../vendor/bb.yandex-team.ru/cloud/public-api/ \ + --proto_path . \ + --go_out=Myandex/cloud/iam/v1/key.proto=bb.yandex-team.ru/cloud/cloud-go/genproto/publicapi/yandex/cloud/iam/v1:$GOPATH/src *.proto + diff --git a/vendor/github.com/yandex-cloud/go-sdk/iamkey/key.go b/vendor/github.com/yandex-cloud/go-sdk/iamkey/key.go new file mode 100644 index 000000000..e9d705786 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/iamkey/key.go @@ -0,0 +1,133 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Author: Vladimir Skipor + +package iamkey + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + + yaml2json "github.com/ghodss/yaml" + "github.com/golang/protobuf/jsonpb" + yaml "gopkg.in/yaml.v2" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" + "github.com/yandex-cloud/go-sdk/pkg/sdkerrors" +) + +var ( + _ json.Marshaler = &Key{} + _ json.Unmarshaler = &Key{} + _ yaml.Marshaler = &Key{} + _ yaml.Unmarshaler = &Key{} +) + +// New creates new Key from IAM Key Service Create response. +func New(created *iam.CreateKeyResponse) *Key { + if created == nil { + panic("nil key") + } + public := created.GetKey() + key := &Key{ + Id: public.GetId(), + Subject: nil, + CreatedAt: public.GetCreatedAt(), + Description: public.GetDescription(), + KeyAlgorithm: public.GetKeyAlgorithm(), + PublicKey: public.GetPublicKey(), + PrivateKey: created.GetPrivateKey(), + } + switch subj := public.GetSubject().(type) { + case *iam.Key_ServiceAccountId: + key.Subject = &Key_ServiceAccountId{ + ServiceAccountId: subj.ServiceAccountId, + } + case *iam.Key_UserAccountId: + key.Subject = &Key_UserAccountId{ + UserAccountId: subj.UserAccountId, + } + case nil: + // Do nothing. + default: + panic(fmt.Sprintf("unexpected key subject: %#v", subj)) + } + return key +} + +// UnmarshalJSON unmarshals IAM Key JSON data. +// Both snake_case (gRPC API) and camelCase (REST API) fields are accepted. +func (m *Key) UnmarshalJSON(data []byte) error { + return jsonpb.Unmarshal(bytes.NewReader(data), m) +} + +func (m *Key) MarshalJSON() ([]byte, error) { + marshaller := &jsonpb.Marshaler{OrigName: true} + buf := &bytes.Buffer{} + err := marshaller.Marshal(buf, m) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalYAML unmarshals IAM Key YAML data. +// Both snake_case (gRPC API) and camelCase (REST API) fields are accepted. +func (m *Key) UnmarshalYAML(unmarshal func(interface{}) error) error { + var obj yaml.MapSlice + err := unmarshal(&obj) + if err != nil { + return err + } + yamlData, err := yaml.Marshal(obj) + if err != nil { + return err + } + jsonData, err := yaml2json.YAMLToJSON(yamlData) + if err != nil { + return err + } + return m.UnmarshalJSON(jsonData) +} + +func (m *Key) MarshalYAML() (interface{}, error) { + jsonData, err := m.MarshalJSON() + if err != nil { + return nil, err + } + var obj yaml.MapSlice + err = yaml.Unmarshal(jsonData, &obj) + if err != nil { + return nil, err + } + return obj, nil +} + +// ReadFromJSONFile reads IAM Key from JSON file. +func ReadFromJSONFile(path string) (*Key, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, sdkerrors.WithMessagef(err, "key file '%s' read fail", path) + } + key := &Key{} + err = json.Unmarshal(data, key) + if err != nil { + return nil, sdkerrors.WithMessage(err, "key unmarshal fail") + } + return key, nil +} + +// WriteToJSONFile writes key to file in JSON format. +// File permissions will be 0600, because private key part is sensitive data. +func WriteToJSONFile(path string, key *Key) error { + data, err := json.MarshalIndent(key, "", " ") + if err != nil { + return sdkerrors.WithMessage(err, "key marshal fail") + } + err = ioutil.WriteFile(path, data, 0600) + if err != nil { + return sdkerrors.WithMessagef(err, "file '%s' write fail", path) + } + return nil +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/iamkey/key.pb.go b/vendor/github.com/yandex-cloud/go-sdk/iamkey/key.pb.go new file mode 100644 index 000000000..87596d9e2 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/iamkey/key.pb.go @@ -0,0 +1,245 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: key.proto + +package iamkey // import "github.com/yandex-cloud/go-sdk/iamkey" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import v1 "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Key is resource managed by IAM Key Service. +// Can be issued for User or Service Account, but key authorization is supported only for Service Accounts. +// Issued key contains private part that is not saved on server side, and should be saved by client. +type Key struct { + // ID of the Key resource. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Types that are valid to be assigned to Subject: + // *Key_UserAccountId + // *Key_ServiceAccountId + Subject isKey_Subject `protobuf_oneof:"subject"` + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreatedAt *timestamp.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Description of the Key resource. 0-256 characters long. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // An algorithm used to generate a key pair of the Key resource. + KeyAlgorithm v1.Key_Algorithm `protobuf:"varint,6,opt,name=key_algorithm,json=keyAlgorithm,proto3,enum=yandex.cloud.iam.v1.Key_Algorithm" json:"key_algorithm,omitempty"` + // A public key of the Key resource. + PublicKey string `protobuf:"bytes,7,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + // A public key of the Key resource. + PrivateKey string `protobuf:"bytes,8,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Key) Reset() { *m = Key{} } +func (m *Key) String() string { return proto.CompactTextString(m) } +func (*Key) ProtoMessage() {} +func (*Key) Descriptor() ([]byte, []int) { + return fileDescriptor_key_fecd1f348d833dbd, []int{0} +} +func (m *Key) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Key.Unmarshal(m, b) +} +func (m *Key) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Key.Marshal(b, m, deterministic) +} +func (dst *Key) XXX_Merge(src proto.Message) { + xxx_messageInfo_Key.Merge(dst, src) +} +func (m *Key) XXX_Size() int { + return xxx_messageInfo_Key.Size(m) +} +func (m *Key) XXX_DiscardUnknown() { + xxx_messageInfo_Key.DiscardUnknown(m) +} + +var xxx_messageInfo_Key proto.InternalMessageInfo + +type isKey_Subject interface { + isKey_Subject() +} + +type Key_UserAccountId struct { + UserAccountId string `protobuf:"bytes,2,opt,name=user_account_id,json=userAccountId,proto3,oneof"` +} +type Key_ServiceAccountId struct { + ServiceAccountId string `protobuf:"bytes,3,opt,name=service_account_id,json=serviceAccountId,proto3,oneof"` +} + +func (*Key_UserAccountId) isKey_Subject() {} +func (*Key_ServiceAccountId) isKey_Subject() {} + +func (m *Key) GetSubject() isKey_Subject { + if m != nil { + return m.Subject + } + return nil +} + +func (m *Key) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Key) GetUserAccountId() string { + if x, ok := m.GetSubject().(*Key_UserAccountId); ok { + return x.UserAccountId + } + return "" +} + +func (m *Key) GetServiceAccountId() string { + if x, ok := m.GetSubject().(*Key_ServiceAccountId); ok { + return x.ServiceAccountId + } + return "" +} + +func (m *Key) GetCreatedAt() *timestamp.Timestamp { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *Key) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Key) GetKeyAlgorithm() v1.Key_Algorithm { + if m != nil { + return m.KeyAlgorithm + } + return v1.Key_ALGORITHM_UNSPECIFIED +} + +func (m *Key) GetPublicKey() string { + if m != nil { + return m.PublicKey + } + return "" +} + +func (m *Key) GetPrivateKey() string { + if m != nil { + return m.PrivateKey + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Key) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Key_OneofMarshaler, _Key_OneofUnmarshaler, _Key_OneofSizer, []interface{}{ + (*Key_UserAccountId)(nil), + (*Key_ServiceAccountId)(nil), + } +} + +func _Key_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Key) + // subject + switch x := m.Subject.(type) { + case *Key_UserAccountId: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.UserAccountId) + case *Key_ServiceAccountId: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ServiceAccountId) + case nil: + default: + return fmt.Errorf("Key.Subject has unexpected type %T", x) + } + return nil +} + +func _Key_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Key) + switch tag { + case 2: // subject.user_account_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Subject = &Key_UserAccountId{x} + return true, err + case 3: // subject.service_account_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Subject = &Key_ServiceAccountId{x} + return true, err + default: + return false, nil + } +} + +func _Key_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Key) + // subject + switch x := m.Subject.(type) { + case *Key_UserAccountId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.UserAccountId))) + n += len(x.UserAccountId) + case *Key_ServiceAccountId: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.ServiceAccountId))) + n += len(x.ServiceAccountId) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*Key)(nil), "yandex.cloud.sdk.v1.Key") +} + +func init() { proto.RegisterFile("key.proto", fileDescriptor_key_fecd1f348d833dbd) } + +var fileDescriptor_key_fecd1f348d833dbd = []byte{ + // 349 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0x41, 0x6f, 0xa3, 0x30, + 0x10, 0x85, 0x17, 0xb2, 0x9b, 0x2c, 0xce, 0x26, 0xbb, 0xf2, 0x5e, 0x50, 0xa4, 0x28, 0x28, 0x27, + 0xa4, 0x2a, 0x46, 0x49, 0x4f, 0x3d, 0x26, 0x97, 0xb6, 0xca, 0x0d, 0xf5, 0xd4, 0x0b, 0x32, 0xf6, + 0x94, 0xba, 0x40, 0x8c, 0x8c, 0x41, 0xf5, 0xbf, 0xed, 0x4f, 0xa9, 0x30, 0x24, 0x4d, 0x2f, 0x96, + 0xfc, 0xde, 0x37, 0xf6, 0xcc, 0x1b, 0xe4, 0xe5, 0x60, 0x48, 0xa5, 0xa4, 0x96, 0xf8, 0xbf, 0xa1, + 0x27, 0x0e, 0xef, 0x84, 0x15, 0xb2, 0xe1, 0xa4, 0xe6, 0x39, 0x69, 0xb7, 0x8b, 0x55, 0x26, 0x65, + 0x56, 0x40, 0x64, 0x91, 0xb4, 0x79, 0x89, 0xb4, 0x28, 0xa1, 0xd6, 0xb4, 0xac, 0xfa, 0xaa, 0xc5, + 0xb2, 0xaf, 0x8a, 0x6c, 0x55, 0x24, 0x68, 0x19, 0xb5, 0xdb, 0xe8, 0xf2, 0xe8, 0xfa, 0xc3, 0x45, + 0xa3, 0x23, 0x18, 0x3c, 0x47, 0xae, 0xe0, 0xbe, 0x13, 0x38, 0xa1, 0x17, 0xbb, 0x82, 0xe3, 0x10, + 0xfd, 0x6d, 0x6a, 0x50, 0x09, 0x65, 0x4c, 0x36, 0x27, 0x9d, 0x08, 0xee, 0xbb, 0x9d, 0xf9, 0xf0, + 0x23, 0x9e, 0x75, 0xc6, 0xbe, 0xd7, 0x1f, 0x39, 0x26, 0x08, 0xd7, 0xa0, 0x5a, 0xc1, 0xe0, 0x1a, + 0x1e, 0x0d, 0xf0, 0xbf, 0xc1, 0xfb, 0xe2, 0xef, 0x10, 0x62, 0x0a, 0xa8, 0x06, 0x9e, 0x50, 0xed, + 0xff, 0x0c, 0x9c, 0x70, 0xba, 0x5b, 0x90, 0x7e, 0x0c, 0x72, 0x1e, 0x83, 0x3c, 0x9d, 0xc7, 0x88, + 0xbd, 0x81, 0xde, 0x6b, 0x1c, 0xa0, 0x29, 0x87, 0x9a, 0x29, 0x51, 0x69, 0x21, 0x4f, 0xfe, 0x2f, + 0xdb, 0xed, 0xb5, 0x84, 0xef, 0xd1, 0x2c, 0x07, 0x93, 0xd0, 0x22, 0x93, 0x4a, 0xe8, 0xd7, 0xd2, + 0x1f, 0x07, 0x4e, 0x38, 0xdf, 0xad, 0xc9, 0xb7, 0xec, 0x04, 0x2d, 0x49, 0xbb, 0x25, 0x47, 0x30, + 0x64, 0x7f, 0x26, 0xe3, 0x3f, 0x39, 0x98, 0xcb, 0x0d, 0x2f, 0x11, 0xaa, 0x9a, 0xb4, 0x10, 0x2c, + 0xc9, 0xc1, 0xf8, 0x13, 0xfb, 0x93, 0xd7, 0x2b, 0x5d, 0x5c, 0x2b, 0x34, 0xad, 0x94, 0x68, 0xa9, + 0x06, 0xeb, 0xff, 0xb6, 0x3e, 0x1a, 0xa4, 0x23, 0x98, 0x83, 0x87, 0x26, 0x75, 0x93, 0xbe, 0x01, + 0xd3, 0x87, 0xcd, 0xf3, 0x4d, 0x9a, 0x0e, 0x0d, 0x6c, 0x34, 0xd0, 0x92, 0xa8, 0x66, 0x58, 0x87, + 0x3d, 0x37, 0x99, 0x8c, 0x6a, 0x9e, 0x77, 0xbb, 0xc9, 0xc1, 0xa4, 0x63, 0x9b, 0xc1, 0xed, 0x67, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x14, 0xc2, 0x3d, 0x22, 0xfa, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/iamkey/key.proto b/vendor/github.com/yandex-cloud/go-sdk/iamkey/key.proto new file mode 100644 index 000000000..25b021e08 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/iamkey/key.proto @@ -0,0 +1,42 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Author: Vladimir Skipor + +syntax = "proto3"; + +package yandex.cloud.sdk.v1; + +import "google/protobuf/timestamp.proto"; +import "yandex/cloud/iam/v1/key.proto"; + +option go_package = "bb.yandex-team.ru/cloud/cloud-go/sdk/iamkey"; + +// Key is resource managed by IAM Key Service. +// Can be issued for User or Service Account, but key authorization is supported only for Service Accounts. +// Issued key contains private part that is not saved on server side, and should be saved by client. +message Key { + // ID of the Key resource. + string id = 1; + + oneof subject { + // ID of the user account that the Key resource belongs to. + string user_account_id = 2; + + // ID of the service account that the Key resource belongs to. + string service_account_id = 3; + } + + // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + google.protobuf.Timestamp created_at = 4; + + // Description of the Key resource. 0-256 characters long. + string description = 5; + + // An algorithm used to generate a key pair of the Key resource. + iam.v1.Key.Algorithm key_algorithm = 6; + + // A public key of the Key resource. + string public_key = 7; + + // A public key of the Key resource. + string private_key = 8; +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/mdb.go b/vendor/github.com/yandex-cloud/go-sdk/mdb.go new file mode 100644 index 000000000..2706c45a6 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/mdb.go @@ -0,0 +1,38 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Author: Dmitry Novikov + +package ycsdk + +import ( + "github.com/yandex-cloud/go-sdk/gen/mdb/clickhouse" + "github.com/yandex-cloud/go-sdk/gen/mdb/mongodb" + "github.com/yandex-cloud/go-sdk/gen/mdb/postgresql" + "github.com/yandex-cloud/go-sdk/gen/mdb/redis" +) + +const ( + MDBMongoDBServiceID Endpoint = "managed-mongodb" + MDBClickhouseServiceID Endpoint = "managed-clickhouse" + MDBPostgreSQLServiceID Endpoint = "managed-postgresql" + MDBRedisServiceID Endpoint = "managed-redis" +) + +type MDB struct { + sdk *SDK +} + +func (m *MDB) PostgreSQL() *postgresql.PostgreSQL { + return postgresql.NewPostgreSQL(m.sdk.getConn(MDBPostgreSQLServiceID)) +} + +func (m *MDB) MongoDB() *mongodb.MongoDB { + return mongodb.NewMongoDB(m.sdk.getConn(MDBMongoDBServiceID)) +} + +func (m *MDB) Clickhouse() *clickhouse.Clickhouse { + return clickhouse.NewClickhouse(m.sdk.getConn(MDBClickhouseServiceID)) +} + +func (m *MDB) Redis() *redis.Redis { + return redis.NewRedis(m.sdk.getConn(MDBRedisServiceID)) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/operation/operation.go b/vendor/github.com/yandex-cloud/go-sdk/operation/operation.go new file mode 100644 index 000000000..a8c91eeb1 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/operation/operation.go @@ -0,0 +1,187 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Author: Vladimir Skipor + +package operation + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/any" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" + "github.com/yandex-cloud/go-sdk/pkg/sdkerrors" +) + +type Client = operation.OperationServiceClient +type Proto = operation.Operation + +func New(client Client, proto *Proto) *Operation { + if proto == nil { + panic("nil operation") + } + return &Operation{proto: proto, client: client, newTimer: defaultTimer} +} + +func defaultTimer(d time.Duration) (func() <-chan time.Time, func() bool) { + timer := time.NewTimer(d) + return func() <-chan time.Time { + return timer.C + }, timer.Stop +} + +type Operation struct { + proto *Proto + client Client + newTimer func(time.Duration) (func() <-chan time.Time, func() bool) +} + +func (o *Operation) Proto() *Proto { return o.proto } +func (o *Operation) Client() Client { return o.client } + +//revive:disable:var-naming +func (o *Operation) Id() string { return o.proto.Id } + +//revive:enable:var-naming +func (o *Operation) Description() string { return o.proto.Description } +func (o *Operation) CreatedBy() string { return o.proto.CreatedBy } + +func (o *Operation) CreatedAt() time.Time { + ts, err := ptypes.Timestamp(o.proto.CreatedAt) + if err != nil { + panic(fmt.Sprintf("invalid created at: %v", err)) + } + return ts +} + +func (o *Operation) Metadata() (proto.Message, error) { + return UnmarshalAny(o.RawMetadata()) +} + +func (o *Operation) RawMetadata() *any.Any { return o.proto.Metadata } + +func (o *Operation) Error() error { + st := o.ErrorStatus() + if st == nil { + return nil + } + return st.Err() +} + +func (o *Operation) ErrorStatus() *status.Status { + proto := o.proto.GetError() + if proto == nil { + return nil + } + return status.FromProto(proto) +} + +func (o *Operation) Response() (proto.Message, error) { + resp := o.RawResponse() + if resp == nil { + return nil, nil + } + return UnmarshalAny(resp) +} + +func (o *Operation) RawResponse() *any.Any { + return o.proto.GetResponse() +} + +func (o *Operation) Done() bool { return o.proto.Done } +func (o *Operation) Ok() bool { return o.Done() && o.proto.GetResponse() != nil } +func (o *Operation) Failed() bool { return o.Done() && o.proto.GetError() != nil } + +// Poll gets new state of operation from operation client. On success the operation state is updated. +// Returns error if update request failed. +func (o *Operation) Poll(ctx context.Context, opts ...grpc.CallOption) error { + req := &operation.GetOperationRequest{OperationId: o.Id()} + state, err := o.Client().Get(ctx, req, opts...) + if err != nil { + return err + } + o.proto = state + return nil +} + +// Cancel requests operation cancel. On success operation state is updated. +// Returns error if cancel failed. +func (o *Operation) Cancel(ctx context.Context, opts ...grpc.CallOption) error { + req := &operation.CancelOperationRequest{OperationId: o.Id()} + state, err := o.Client().Cancel(ctx, req, opts...) + if err != nil { + return err + } + o.proto = state + return nil +} + +const DefaultPollInterval = time.Second + +func (o *Operation) Wait(ctx context.Context, opts ...grpc.CallOption) error { + return o.WaitInterval(ctx, DefaultPollInterval, opts...) +} + +func (o *Operation) WaitInterval(ctx context.Context, pollInterval time.Duration, opts ...grpc.CallOption) error { + return o.waitInterval(ctx, pollInterval, opts...) +} + +const ( + pollIntervalMetadataKey = "x-operation-poll-interval" +) + +func (o *Operation) waitInterval(ctx context.Context, pollInterval time.Duration, opts ...grpc.CallOption) error { + var headers metadata.MD + opts = append(opts, grpc.Header(&headers)) + + // Sometimes, the returned operation is not on all replicas yet, + // so we need to ignore first couple of NotFound errors. + const maxNotFoundRetry = 3 + notFoundCount := 0 + for !o.Done() { + headers = metadata.MD{} + err := o.Poll(ctx, opts...) + if err != nil { + if notFoundCount < maxNotFoundRetry && shoudRetry(err) { + notFoundCount++ + } else { + // Message needed to distinguish poll fail and operation error, which are both gRPC status. + return sdkerrors.WithMessage(err, "poll fail") + } + } + if o.Done() { + break + } + interval := pollInterval + if vals := headers.Get(pollIntervalMetadataKey); len(vals) > 0 { + i, err := strconv.Atoi(vals[0]) + if err == nil { + interval = time.Duration(i) * time.Second + } + } + if interval <= 0 { + continue + } + wait, stop := o.newTimer(interval) + select { + case <-wait(): + case <-ctx.Done(): + stop() + return ctx.Err() + } + } + return o.Error() +} + +func shoudRetry(err error) bool { + status, ok := status.FromError(err) + return ok && status.Code() == codes.NotFound +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/operation/utils.go b/vendor/github.com/yandex-cloud/go-sdk/operation/utils.go new file mode 100644 index 000000000..f1d04ddde --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/operation/utils.go @@ -0,0 +1,23 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Author: Vladimir Skipor + +package operation + +import ( + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/any" +) + +// Copy from bb.yandex-team.ru/cloud/cloud-go/pkg/protoutil/any.go +func UnmarshalAny(msg *any.Any) (proto.Message, error) { + if msg == nil { + return nil, nil + } + box := &ptypes.DynamicAny{} + err := ptypes.UnmarshalAny(msg, box) + if err != nil { + return nil, err + } + return box.Message, nil +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/pkg/grpcclient/conn_context.go b/vendor/github.com/yandex-cloud/go-sdk/pkg/grpcclient/conn_context.go new file mode 100644 index 000000000..831701fe2 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/pkg/grpcclient/conn_context.go @@ -0,0 +1,169 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Author: Alexey Baranov + +package grpcclient + +import ( + "context" + "errors" + "sync" + + multierror "github.com/hashicorp/go-multierror" + "google.golang.org/grpc" + + "github.com/yandex-cloud/go-sdk/pkg/singleflight" +) + +var ErrConnContextClosed = errors.New("grpcutil: client connection context closed") + +type DialError struct { + Err error + Add string +} + +func (d *DialError) Error() string { + return `error dialing endpoint "` + d.Add + `": ` + d.Err.Error() +} + +//go:generate mockery -name=ConnContext + +type ConnContext interface { + GetConn(ctx context.Context, addr string) (*grpc.ClientConn, error) + CallOptions() []grpc.CallOption + Shutdown(context.Context) error +} + +type LazyConnContextOption func(*lazyConnContextOptions) + +type lazyConnContextOptions struct { + dialOpts []grpc.DialOption + callOpts []grpc.CallOption +} + +func DialOptions(dopts ...grpc.DialOption) LazyConnContextOption { + return func(o *lazyConnContextOptions) { + o.dialOpts = append(o.dialOpts, dopts...) + } +} + +func CallOptions(copts ...grpc.CallOption) LazyConnContextOption { + return func(o *lazyConnContextOptions) { + o.callOpts = append(o.callOpts, copts...) + } +} + +type lazyConnContext struct { + opts *lazyConnContextOptions + + ctx context.Context + cancel context.CancelFunc + + mu sync.Mutex + conns map[string]*grpc.ClientConn + closed bool + closing bool + + dial singleflight.Group + shutdown singleflight.Call +} + +func NewLazyConnContext(opt ...LazyConnContextOption) ConnContext { + opts := &lazyConnContextOptions{} + for _, o := range opt { + o(opts) + } + ctx, cancel := context.WithCancel(context.Background()) + return &lazyConnContext{ + opts: opts, + ctx: ctx, + cancel: cancel, + conns: map[string]*grpc.ClientConn{}, + } +} + +func (cc *lazyConnContext) GetConn(ctx context.Context, addr string) (*grpc.ClientConn, error) { + cc.mu.Lock() + if cc.closed || cc.closing { + cc.mu.Unlock() + return nil, ErrConnContextClosed + } + if conn, ok := cc.conns[addr]; ok { + cc.mu.Unlock() + return conn, nil + } + cc.mu.Unlock() + + result := cc.dial.Do(addr, func() interface{} { + conn, err := grpc.DialContext(cc.ctx, addr, cc.opts.dialOpts...) + if err != nil { + if err == cc.ctx.Err() { + err = ErrConnContextClosed + } else { + err = &DialError{err, addr} + } + return connAndErr{err: err} + } + cc.mu.Lock() + if cc.closed || cc.closing { + cc.mu.Unlock() + // we swallow error here, since the client doesn't care about it + _ = conn.Close() + return connAndErr{conn: nil, err: ErrConnContextClosed} + } + cc.conns[addr] = conn + cc.mu.Unlock() + return connAndErr{conn: conn} + }) + ce := result.(connAndErr) + return ce.conn, ce.err +} + +func (cc *lazyConnContext) CallOptions() []grpc.CallOption { + callOpts := make([]grpc.CallOption, len(cc.opts.callOpts)) + copy(callOpts, cc.opts.callOpts) + return callOpts +} + +func (cc *lazyConnContext) Shutdown(ctx context.Context) error { + cc.mu.Lock() + if cc.closed { + cc.mu.Unlock() + return nil + } + cc.closing = true + cc.mu.Unlock() + + result := cc.shutdown.Do(func() interface{} { + cc.mu.Lock() + cc.cancel() + conns := make([]*grpc.ClientConn, 0, len(cc.conns)) + for _, conn := range cc.conns { + conns = append(conns, conn) + } + cc.mu.Unlock() + + var errs error + for _, conn := range conns { + err := conn.Close() + if err != nil { + errs = multierror.Append(errs, err) + } + } + + cc.mu.Lock() + cc.closed = true + cc.closing = false + cc.mu.Unlock() + return errs + }) + + if result == nil { + return nil + } + return result.(error) +} + +type connAndErr struct { + conn *grpc.ClientConn + err error +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/pkg/requestid/interceptor.go b/vendor/github.com/yandex-cloud/go-sdk/pkg/requestid/interceptor.go new file mode 100644 index 000000000..c0c962b8c --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/pkg/requestid/interceptor.go @@ -0,0 +1,108 @@ +package requestid + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + clientTraceIDHeader = "x-client-trace-id" + clientRequestIDHeader = "x-client-request-id" + serverRequestIDHeader = "x-request-id" + serverTraceIDHeader = "x-server-trace-id" +) + +func Interceptor() func(ctx context.Context, method string, req interface{}, reply interface{}, conn *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req interface{}, reply interface{}, conn *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + clientTraceID := uuid.New().String() + clientRequestID := uuid.New().String() + var responseHeader metadata.MD + opts = append(opts, grpc.Header(&responseHeader)) + ctx = withClientRequestIDs(ctx, clientTraceID, clientRequestID) + err := invoker(ctx, method, req, reply, conn, opts...) + return wrapError(err, clientTraceID, clientRequestID, responseHeader) + } +} + +type RequestIDs struct { + ClientTraceID string + ClientRequestID string + ServerRequestID string + ServerTraceID string +} + +type errorWithRequestIDs struct { + origErr error + ids RequestIDs +} + +func (e *errorWithRequestIDs) Error() string { + switch { + case e.ids.ServerRequestID != "": + return fmt.Sprintf("request-id = %s %s", e.ids.ServerRequestID, e.origErr.Error()) + case e.ids.ClientRequestID != "": + return fmt.Sprintf("client-request-id = %s %s", e.ids.ClientRequestID, e.origErr.Error()) + default: + return e.origErr.Error() + } +} + +func (e errorWithRequestIDs) GRPCStatus() *status.Status { + return status.Convert(e.origErr) +} + +func RequestIDsFromError(err error) (*RequestIDs, bool) { + if withID, ok := err.(*errorWithRequestIDs); ok { + return &withID.ids, ok + } + return nil, false +} + +func wrapError(err error, clientTraceID, clientRequestID string, responseHeader metadata.MD) error { + if err == nil { + return nil + } + + if _, ok := err.(*errorWithRequestIDs); ok { + return err + } + + serverRequestID := getServerHeader(responseHeader, serverRequestIDHeader) + serverTraceID := getServerHeader(responseHeader, serverTraceIDHeader) + + return &errorWithRequestIDs{ + err, + RequestIDs{ + ClientTraceID: clientTraceID, + ClientRequestID: clientRequestID, + ServerRequestID: serverRequestID, + ServerTraceID: serverTraceID, + }, + } +} + +func getServerHeader(responseHeader metadata.MD, key string) string { + serverHeaderIDRaw := responseHeader.Get(key) + if len(serverHeaderIDRaw) == 0 { + return "" + } + + return serverHeaderIDRaw[0] +} + +func withClientRequestIDs(ctx context.Context, clientTraceID, clientRequestID string) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + md = metadata.MD{} + } else { + md = md.Copy() + } + md.Set(clientRequestIDHeader, clientRequestID) + md.Set(clientTraceIDHeader, clientTraceID) + return metadata.NewOutgoingContext(ctx, md) +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/pkg/sdkerrors/message.go b/vendor/github.com/yandex-cloud/go-sdk/pkg/sdkerrors/message.go new file mode 100644 index 000000000..1657c8e2c --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/pkg/sdkerrors/message.go @@ -0,0 +1,35 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Author: Dmitry Novikov + +package sdkerrors + +import ( + "fmt" +) + +type errWithMessage struct { + err error + message string +} + +func (e *errWithMessage) Error() string { + return e.message + ": " + e.err.Error() +} + +func (e *errWithMessage) Cause() error { + return e.err +} + +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &errWithMessage{err, message} +} + +func WithMessagef(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + return &errWithMessage{err, fmt.Sprintf(format, args...)} +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/pkg/sdkerrors/multierr.go b/vendor/github.com/yandex-cloud/go-sdk/pkg/sdkerrors/multierr.go new file mode 100644 index 000000000..0f22d3f68 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/pkg/sdkerrors/multierr.go @@ -0,0 +1,79 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Author: Dmitry Novikov + +package sdkerrors + +import ( + "fmt" + "strings" +) + +type multerr struct { + errs []error +} + +func (e *multerr) Errors() []error { + return e.errs +} + +func (e *multerr) Error() string { + lines := make([]string, len(e.errs)) + for k, v := range e.errs { + lines[k] = v.Error() + } + return strings.Join(lines, "\n") +} + +func Errors(err error) []error { + if err == nil { + return nil + } + switch err := err.(type) { + case interface { + Errors() []error + }: + // go.uber.org/multierr + return err.Errors() + case interface { + WrappedErrors() []error + }: + // github.com/hashicorp/go-multierror + return err.WrappedErrors() + default: + } + return []error{err} +} + +func Append(lhs, rhs error) error { + if lhs == nil { + return rhs + } else if rhs == nil { + return lhs + } + var result []error + result = append(result, Errors(lhs)...) + result = append(result, Errors(rhs)...) + return &multerr{result} +} + +func CombineGoroutines(funcs ...func() error) error { + errChan := make(chan error, len(funcs)) + for _, f := range funcs { + go func(f func() error) { + var err error + defer func() { + if r := recover(); r != nil { + errChan <- fmt.Errorf("Panic recovered: %v", r) + } else { + errChan <- err + } + }() + err = f() + }(f) + } + var errs error + for i := 0; i < cap(errChan); i++ { + errs = Append(errs, <-errChan) + } + return errs +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/pkg/singleflight/LICENSE b/vendor/github.com/yandex-cloud/go-sdk/pkg/singleflight/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/pkg/singleflight/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/yandex-cloud/go-sdk/pkg/singleflight/singleflight.go b/vendor/github.com/yandex-cloud/go-sdk/pkg/singleflight/singleflight.go new file mode 100644 index 000000000..ee125856e --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/pkg/singleflight/singleflight.go @@ -0,0 +1,101 @@ +// Package singleflight based on github.com/golang/groupcache/singleflight +package singleflight + +import "sync" + +// call is an in-flight or completed Do call +type call struct { + wg sync.WaitGroup + val interface{} +} + +// Group represents a class of work and forms a namespace in which +// units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[interface{}]*call // lazily initialized +} + +// Do executes and returns the results of the given function, making sure that +// only one execution is in-flight for a given key at a time. If a duplicate +// comes in, the duplicate caller waits for the original to complete and +// receives the same results. +func (g *Group) Do(key interface{}, fn func() interface{}) interface{} { + g.mu.Lock() + if g.m == nil { + g.m = make(map[interface{}]*call) + } + if c, ok := g.m[key]; ok { + g.mu.Unlock() + c.wg.Wait() + return c.val + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + c.val = fn() + c.wg.Done() + + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() + + return c.val +} + +// DoAsync used instead of Do, when there is not need to wait for result. It +// behaves like go { Group.Do(key, fn) }(), but doesn't create goroutine when +// there is another execution for given key in-flight. +func (g *Group) DoAsync(key interface{}, fn func() interface{}) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[interface{}]*call) + } + if _, ok := g.m[key]; ok { + g.mu.Unlock() + return + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + go func() { + c.val = fn() + c.wg.Done() + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() + }() +} + +// Call represents single deduplicated function call. +type Call struct { + wg sync.WaitGroup + mu sync.Mutex + calling bool + val interface{} +} + +func (c *Call) Do(fn func() interface{}) interface{} { + c.mu.Lock() + if c.calling { + c.mu.Unlock() + c.wg.Wait() + return c.val + } + + c.calling = true + c.wg.Add(1) + c.mu.Unlock() + + c.val = fn() + + c.mu.Lock() + c.calling = false + c.mu.Unlock() + c.wg.Done() + + return c.val +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/resolver.go b/vendor/github.com/yandex-cloud/go-sdk/resolver.go new file mode 100644 index 000000000..b8fd7add6 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/resolver.go @@ -0,0 +1,17 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Author: Dmitry Novikov + +package ycsdk + +import ( + "context" + + "google.golang.org/grpc" +) + +type Resolver interface { + ID() string + Err() error + + Run(context.Context, *SDK, ...grpc.CallOption) error +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/rpc_credentials.go b/vendor/github.com/yandex-cloud/go-sdk/rpc_credentials.go new file mode 100644 index 000000000..8c2fbcfae --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/rpc_credentials.go @@ -0,0 +1,121 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Author: Maxim Kolganov + +package ycsdk + +import ( + "context" + "net/url" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1" + "github.com/yandex-cloud/go-sdk/pkg/sdkerrors" +) + +type rpcCredentials struct { + creds ExchangeableCredentials + plaintext bool + + // getConn set in Init. + getConn lazyConn + // now may be replaced in tests + now func() time.Time + + // mutex guards conn and currentState, and excludes multiple simultaneous token updates + mutex sync.RWMutex + conn *grpc.ClientConn // initialized lazily from getConn + currentState rpcCredentialsState +} + +var _ credentials.PerRPCCredentials = &rpcCredentials{} + +type rpcCredentialsState struct { + token string + refreshAfter time.Time + version int64 +} + +func newRPCCredentials(creds ExchangeableCredentials, plaintext bool) *rpcCredentials { + return &rpcCredentials{ + creds: creds, + plaintext: plaintext, + now: time.Now, + } +} + +func (c *rpcCredentials) Init(lazyConn lazyConn) { + c.getConn = lazyConn +} + +func (c *rpcCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + audienceURL, err := url.Parse(uri[0]) + if err != nil { + return nil, err + } + if audienceURL.Path == "/yandex.cloud.iam.v1.IamTokenService" || + audienceURL.Path == "/yandex.cloud.endpoint.ApiEndpointService" { + return nil, nil + } + + c.mutex.RLock() + state := c.currentState + c.mutex.RUnlock() + + token := state.token + outdated := state.refreshAfter.Before(c.now()) + if outdated { + token, err = c.updateToken(ctx, state) + if err != nil { + return nil, err + } + } + + return map[string]string{ + "authorization": "Bearer " + token, + }, nil +} + +func (c *rpcCredentials) RequireTransportSecurity() bool { + return !c.plaintext +} + +func (c *rpcCredentials) updateToken(ctx context.Context, currentState rpcCredentialsState) (string, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + + if c.currentState.version != currentState.version { + // someone have already updated it + return c.currentState.token, nil + } + + if c.conn == nil { + conn, err := c.getConn(ctx) + if err != nil { + return "", err + } + c.conn = conn + } + tokenClient := iam.NewIamTokenServiceClient(c.conn) + + tokenReq, err := c.creds.IAMTokenRequest() + if err != nil { + return "", sdkerrors.WithMessage(err, "failed to create IAM token request from credentials") + } + + resp, err := tokenClient.Create(ctx, tokenReq) + if err != nil { + return "", err + } + + c.currentState = rpcCredentialsState{ + token: resp.IamToken, + refreshAfter: c.now().Add(iamTokenExpiration), + version: currentState.version + 1, + } + + return c.currentState.token, nil +} diff --git a/vendor/github.com/yandex-cloud/go-sdk/sdk.go b/vendor/github.com/yandex-cloud/go-sdk/sdk.go new file mode 100644 index 000000000..452814da7 --- /dev/null +++ b/vendor/github.com/yandex-cloud/go-sdk/sdk.go @@ -0,0 +1,276 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Author: Alexey Baranov + +package ycsdk + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "sort" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/yandex-cloud/go-genproto/yandex/cloud/endpoint" + "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" + "github.com/yandex-cloud/go-sdk/dial" + apiendpoint "github.com/yandex-cloud/go-sdk/gen/apiendpoint" + "github.com/yandex-cloud/go-sdk/gen/compute" + "github.com/yandex-cloud/go-sdk/gen/iam" + gen_operation "github.com/yandex-cloud/go-sdk/gen/operation" + "github.com/yandex-cloud/go-sdk/gen/resourcemanager" + "github.com/yandex-cloud/go-sdk/gen/vpc" + sdk_operation "github.com/yandex-cloud/go-sdk/operation" + "github.com/yandex-cloud/go-sdk/pkg/grpcclient" + "github.com/yandex-cloud/go-sdk/pkg/sdkerrors" + "github.com/yandex-cloud/go-sdk/pkg/singleflight" +) + +type Endpoint string + +const ( + ComputeServiceID Endpoint = "compute" + IAMServiceID Endpoint = "iam" + OperationServiceID Endpoint = "operation" + ResourceManagementServiceID Endpoint = "resource-manager" + // revive:disable:var-naming + ApiEndpointServiceID Endpoint = "endpoint" + // revive:enable:var-naming + VpcServiceID Endpoint = "vpc" +) + +// Config is a config that is used to create SDK instance. +type Config struct { + // Credentials are used to authenticate the client. See Credentials for more info. + Credentials Credentials + // DialContextTimeout specifies timeout of dial on API endpoint that + // is used when building an SDK instance. + DialContextTimeout time.Duration + // TLSConfig is optional tls.Config that one can use in order to tune TLS options. + TLSConfig *tls.Config + + // Endpoint is an API endpoint of Yandex.Cloud against which the SDK is used. + // Most users won't need to explicitly set it. + Endpoint string + Plaintext bool +} + +// SDK is a Yandex.Cloud SDK +type SDK struct { + conf Config + cc grpcclient.ConnContext + endpoints struct { + initDone bool + mu sync.Mutex + ep map[Endpoint]*endpoint.ApiEndpoint + } + + initErr error + initCall singleflight.Call + muErr sync.Mutex +} + +// Build creates an SDK instance +func Build(ctx context.Context, conf Config, customOpts ...grpc.DialOption) (*SDK, error) { + if conf.Credentials == nil { + return nil, errors.New("credentials required") + } + + const defaultEndpoint = "api.cloud.yandex.net:443" + if conf.Endpoint == "" { + conf.Endpoint = defaultEndpoint + } + const DefaultTimeout = 10 * time.Second + if conf.DialContextTimeout == 0 { + conf.DialContextTimeout = DefaultTimeout + } + + creds, ok := conf.Credentials.(ExchangeableCredentials) + if !ok { + return nil, fmt.Errorf("unsupported credentials type %T", conf.Credentials) + } + var dialOpts []grpc.DialOption + + dialOpts = append(dialOpts, grpc.WithDialer( + func(target string, timeout time.Duration) (conn net.Conn, e error) { + // Remove extra wrapper when grpc.withContextDialer become exported in https://github.com/grpc/grpc-go/issues/1786 + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + dialer := dial.NewProxyDialer(dial.NewDialer()) + return dialer(ctx, target) + })) + + rpcCreds := newRPCCredentials(creds, conf.Plaintext) + dialOpts = append(dialOpts, grpc.WithPerRPCCredentials(rpcCreds)) + if conf.DialContextTimeout > 0 { + dialOpts = append(dialOpts, grpc.WithBlock(), grpc.WithTimeout(conf.DialContextTimeout)) // nolint + } + if conf.Plaintext { + dialOpts = append(dialOpts, grpc.WithInsecure()) + } else { + tlsConfig := conf.TLSConfig + if tlsConfig == nil { + tlsConfig = &tls.Config{} + } + creds := credentials.NewTLS(tlsConfig) + dialOpts = append(dialOpts, grpc.WithTransportCredentials(creds)) + } + // Append custom options after default, to allow to customize dialer and etc. + dialOpts = append(dialOpts, customOpts...) + + cc := grpcclient.NewLazyConnContext(grpcclient.DialOptions(dialOpts...)) + sdk := &SDK{ + cc: cc, + conf: conf, + } + rpcCreds.Init(sdk.getConn(IAMServiceID)) + return sdk, nil +} + +// Shutdown shutdowns SDK and closes all open connections. +func (sdk *SDK) Shutdown(ctx context.Context) error { + return sdk.cc.Shutdown(ctx) +} + +// WrapOperation wraps operation proto message to +func (sdk *SDK) WrapOperation(op *operation.Operation, err error) (*sdk_operation.Operation, error) { + if err != nil { + return nil, err + } + return sdk_operation.New(sdk.Operation(), op), nil +} + +// IAM returns IAM object that is used to operate on Yandex Cloud Identity and Access Manager +func (sdk *SDK) IAM() *iam.IAM { + return iam.NewIAM(sdk.getConn(IAMServiceID)) +} + +// Compute returns Compute object that is used to operate on Yandex Compute Cloud +func (sdk *SDK) Compute() *compute.Compute { + return compute.NewCompute(sdk.getConn(ComputeServiceID)) +} + +// VPC returns VPC object that is used to operate on Yandex Virtual Private Cloud +func (sdk *SDK) VPC() *vpc.VPC { + return vpc.NewVPC(sdk.getConn(VpcServiceID)) +} + +// MDB returns MDB object that is used to operate on Yandex Managed Databases +func (sdk *SDK) MDB() *MDB { + return &MDB{sdk: sdk} +} + +// Operation gets OperationService client +func (sdk *SDK) Operation() *gen_operation.OperationServiceClient { + group := gen_operation.NewOperation(sdk.getConn(OperationServiceID)) + return group.Operation() +} + +// ResourceManager returns ResourceManager object that is used to operate on Folders and Clouds +func (sdk *SDK) ResourceManager() *resourcemanager.ResourceManager { + return resourcemanager.NewResourceManager(sdk.getConn(ResourceManagementServiceID)) +} + +// revive:disable:var-naming + +// ApiEndpoint gets ApiEndpointService client +func (sdk *SDK) ApiEndpoint() *apiendpoint.APIEndpoint { + return apiendpoint.NewAPIEndpoint(sdk.getConn(ApiEndpointServiceID)) +} + +// revive:enable:var-naming + +func (sdk *SDK) Resolve(ctx context.Context, r ...Resolver) error { + args := make([]func() error, len(r)) + for k, v := range r { + resolver := v + args[k] = func() error { + return resolver.Run(ctx, sdk) + } + } + return sdkerrors.CombineGoroutines(args...) +} + +type lazyConn func(ctx context.Context) (*grpc.ClientConn, error) + +func (sdk *SDK) getConn(serviceID Endpoint) func(ctx context.Context) (*grpc.ClientConn, error) { + return func(ctx context.Context) (*grpc.ClientConn, error) { + if !sdk.initDone() { + sdk.initCall.Do(func() interface{} { + sdk.muErr.Lock() + sdk.initErr = sdk.initConns(ctx) + sdk.muErr.Unlock() + return nil + }) + if err := sdk.InitErr(); err != nil { + return nil, err + } + } + endpoint, endpointExist := sdk.Endpoint(serviceID) + if !endpointExist { + return nil, fmt.Errorf("server doesn't know service \"%v\". Known services: %v", + serviceID, + sdk.KnownServices()) + } + return sdk.cc.GetConn(ctx, endpoint.Address) + } +} + +func (sdk *SDK) initDone() (b bool) { + sdk.endpoints.mu.Lock() + b = sdk.endpoints.initDone + sdk.endpoints.mu.Unlock() + return +} + +func (sdk *SDK) KnownServices() []string { + sdk.endpoints.mu.Lock() + result := make([]string, 0, len(sdk.endpoints.ep)) + for k := range sdk.endpoints.ep { + result = append(result, string(k)) + } + sdk.endpoints.mu.Unlock() + sort.Strings(result) + return result +} + +func (sdk *SDK) Endpoint(endpointName Endpoint) (ep *endpoint.ApiEndpoint, exist bool) { + sdk.endpoints.mu.Lock() + ep, exist = sdk.endpoints.ep[endpointName] + sdk.endpoints.mu.Unlock() + return +} + +func (sdk *SDK) InitErr() error { + sdk.muErr.Lock() + defer sdk.muErr.Unlock() + return sdk.initErr +} + +func (sdk *SDK) initConns(ctx context.Context) error { + discoveryConn, err := sdk.cc.GetConn(ctx, sdk.conf.Endpoint) + if err != nil { + return err + } + ec := endpoint.NewApiEndpointServiceClient(discoveryConn) + const defaultEndpointPageSize = 100 + listResponse, err := ec.List(ctx, &endpoint.ListApiEndpointsRequest{ + PageSize: defaultEndpointPageSize, + }) + if err != nil { + return err + } + sdk.endpoints.mu.Lock() + sdk.endpoints.ep = make(map[Endpoint]*endpoint.ApiEndpoint, len(listResponse.Endpoints)) + for _, e := range listResponse.Endpoints { + sdk.endpoints.ep[Endpoint(e.Id)] = e + } + sdk.endpoints.initDone = true + sdk.endpoints.mu.Unlock() + return nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go new file mode 100644 index 000000000..3dd632886 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -0,0 +1,765 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/rpc/error_details.proto + +package errdetails // import "google.golang.org/genproto/googleapis/rpc/errdetails" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import duration "github.com/golang/protobuf/ptypes/duration" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Describes when the clients can retry a failed request. Clients could ignore +// the recommendation here or retry when this information is missing from error +// responses. +// +// It's always recommended that clients should use exponential backoff when +// retrying. +// +// Clients should wait until `retry_delay` amount of time has passed since +// receiving the error response before retrying. If retrying requests also +// fail, clients should use an exponential backoff scheme to gradually increase +// the delay between retries based on `retry_delay`, until either a maximum +// number of retires have been reached or a maximum retry delay cap has been +// reached. +type RetryInfo struct { + // Clients should wait at least this long between retrying the same request. + RetryDelay *duration.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RetryInfo) Reset() { *m = RetryInfo{} } +func (m *RetryInfo) String() string { return proto.CompactTextString(m) } +func (*RetryInfo) ProtoMessage() {} +func (*RetryInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_error_details_0786ccff29c8b842, []int{0} +} +func (m *RetryInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RetryInfo.Unmarshal(m, b) +} +func (m *RetryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RetryInfo.Marshal(b, m, deterministic) +} +func (dst *RetryInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_RetryInfo.Merge(dst, src) +} +func (m *RetryInfo) XXX_Size() int { + return xxx_messageInfo_RetryInfo.Size(m) +} +func (m *RetryInfo) XXX_DiscardUnknown() { + xxx_messageInfo_RetryInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_RetryInfo proto.InternalMessageInfo + +func (m *RetryInfo) GetRetryDelay() *duration.Duration { + if m != nil { + return m.RetryDelay + } + return nil +} + +// Describes additional debugging info. +type DebugInfo struct { + // The stack trace entries indicating where the error occurred. + StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"` + // Additional debugging information provided by the server. + Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DebugInfo) Reset() { *m = DebugInfo{} } +func (m *DebugInfo) String() string { return proto.CompactTextString(m) } +func (*DebugInfo) ProtoMessage() {} +func (*DebugInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_error_details_0786ccff29c8b842, []int{1} +} +func (m *DebugInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DebugInfo.Unmarshal(m, b) +} +func (m *DebugInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DebugInfo.Marshal(b, m, deterministic) +} +func (dst *DebugInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_DebugInfo.Merge(dst, src) +} +func (m *DebugInfo) XXX_Size() int { + return xxx_messageInfo_DebugInfo.Size(m) +} +func (m *DebugInfo) XXX_DiscardUnknown() { + xxx_messageInfo_DebugInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_DebugInfo proto.InternalMessageInfo + +func (m *DebugInfo) GetStackEntries() []string { + if m != nil { + return m.StackEntries + } + return nil +} + +func (m *DebugInfo) GetDetail() string { + if m != nil { + return m.Detail + } + return "" +} + +// Describes how a quota check failed. +// +// For example if a daily limit was exceeded for the calling project, +// a service could respond with a QuotaFailure detail containing the project +// id and the description of the quota limit that was exceeded. If the +// calling project hasn't enabled the service in the developer console, then +// a service could respond with the project id and set `service_disabled` +// to true. +// +// Also see RetryDetail and Help types for other details about handling a +// quota failure. +type QuotaFailure struct { + // Describes all quota violations. + Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QuotaFailure) Reset() { *m = QuotaFailure{} } +func (m *QuotaFailure) String() string { return proto.CompactTextString(m) } +func (*QuotaFailure) ProtoMessage() {} +func (*QuotaFailure) Descriptor() ([]byte, []int) { + return fileDescriptor_error_details_0786ccff29c8b842, []int{2} +} +func (m *QuotaFailure) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QuotaFailure.Unmarshal(m, b) +} +func (m *QuotaFailure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QuotaFailure.Marshal(b, m, deterministic) +} +func (dst *QuotaFailure) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuotaFailure.Merge(dst, src) +} +func (m *QuotaFailure) XXX_Size() int { + return xxx_messageInfo_QuotaFailure.Size(m) +} +func (m *QuotaFailure) XXX_DiscardUnknown() { + xxx_messageInfo_QuotaFailure.DiscardUnknown(m) +} + +var xxx_messageInfo_QuotaFailure proto.InternalMessageInfo + +func (m *QuotaFailure) GetViolations() []*QuotaFailure_Violation { + if m != nil { + return m.Violations + } + return nil +} + +// A message type used to describe a single quota violation. For example, a +// daily quota or a custom quota that was exceeded. +type QuotaFailure_Violation struct { + // The subject on which the quota check failed. + // For example, "clientip:" or "project:". + Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the quota check failed. Clients can use this + // description to find more about the quota configuration in the service's + // public documentation, or find the relevant quota limit to adjust through + // developer console. + // + // For example: "Service disabled" or "Daily Limit for read operations + // exceeded". + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QuotaFailure_Violation) Reset() { *m = QuotaFailure_Violation{} } +func (m *QuotaFailure_Violation) String() string { return proto.CompactTextString(m) } +func (*QuotaFailure_Violation) ProtoMessage() {} +func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) { + return fileDescriptor_error_details_0786ccff29c8b842, []int{2, 0} +} +func (m *QuotaFailure_Violation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QuotaFailure_Violation.Unmarshal(m, b) +} +func (m *QuotaFailure_Violation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QuotaFailure_Violation.Marshal(b, m, deterministic) +} +func (dst *QuotaFailure_Violation) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuotaFailure_Violation.Merge(dst, src) +} +func (m *QuotaFailure_Violation) XXX_Size() int { + return xxx_messageInfo_QuotaFailure_Violation.Size(m) +} +func (m *QuotaFailure_Violation) XXX_DiscardUnknown() { + xxx_messageInfo_QuotaFailure_Violation.DiscardUnknown(m) +} + +var xxx_messageInfo_QuotaFailure_Violation proto.InternalMessageInfo + +func (m *QuotaFailure_Violation) GetSubject() string { + if m != nil { + return m.Subject + } + return "" +} + +func (m *QuotaFailure_Violation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Describes what preconditions have failed. +// +// For example, if an RPC failed because it required the Terms of Service to be +// acknowledged, it could list the terms of service violation in the +// PreconditionFailure message. +type PreconditionFailure struct { + // Describes all precondition violations. + Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PreconditionFailure) Reset() { *m = PreconditionFailure{} } +func (m *PreconditionFailure) String() string { return proto.CompactTextString(m) } +func (*PreconditionFailure) ProtoMessage() {} +func (*PreconditionFailure) Descriptor() ([]byte, []int) { + return fileDescriptor_error_details_0786ccff29c8b842, []int{3} +} +func (m *PreconditionFailure) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PreconditionFailure.Unmarshal(m, b) +} +func (m *PreconditionFailure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PreconditionFailure.Marshal(b, m, deterministic) +} +func (dst *PreconditionFailure) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreconditionFailure.Merge(dst, src) +} +func (m *PreconditionFailure) XXX_Size() int { + return xxx_messageInfo_PreconditionFailure.Size(m) +} +func (m *PreconditionFailure) XXX_DiscardUnknown() { + xxx_messageInfo_PreconditionFailure.DiscardUnknown(m) +} + +var xxx_messageInfo_PreconditionFailure proto.InternalMessageInfo + +func (m *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation { + if m != nil { + return m.Violations + } + return nil +} + +// A message type used to describe a single precondition failure. +type PreconditionFailure_Violation struct { + // The type of PreconditionFailure. We recommend using a service-specific + // enum type to define the supported precondition violation types. For + // example, "TOS" for "Terms of Service violation". + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The subject, relative to the type, that failed. + // For example, "google.com/cloud" relative to the "TOS" type would + // indicate which terms of service is being referenced. + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the precondition failed. Developers can use this + // description to understand how to fix the failure. + // + // For example: "Terms of service not accepted". + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PreconditionFailure_Violation) Reset() { *m = PreconditionFailure_Violation{} } +func (m *PreconditionFailure_Violation) String() string { return proto.CompactTextString(m) } +func (*PreconditionFailure_Violation) ProtoMessage() {} +func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) { + return fileDescriptor_error_details_0786ccff29c8b842, []int{3, 0} +} +func (m *PreconditionFailure_Violation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PreconditionFailure_Violation.Unmarshal(m, b) +} +func (m *PreconditionFailure_Violation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PreconditionFailure_Violation.Marshal(b, m, deterministic) +} +func (dst *PreconditionFailure_Violation) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreconditionFailure_Violation.Merge(dst, src) +} +func (m *PreconditionFailure_Violation) XXX_Size() int { + return xxx_messageInfo_PreconditionFailure_Violation.Size(m) +} +func (m *PreconditionFailure_Violation) XXX_DiscardUnknown() { + xxx_messageInfo_PreconditionFailure_Violation.DiscardUnknown(m) +} + +var xxx_messageInfo_PreconditionFailure_Violation proto.InternalMessageInfo + +func (m *PreconditionFailure_Violation) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PreconditionFailure_Violation) GetSubject() string { + if m != nil { + return m.Subject + } + return "" +} + +func (m *PreconditionFailure_Violation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Describes violations in a client request. This error type focuses on the +// syntactic aspects of the request. +type BadRequest struct { + // Describes all violations in a client request. + FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BadRequest) Reset() { *m = BadRequest{} } +func (m *BadRequest) String() string { return proto.CompactTextString(m) } +func (*BadRequest) ProtoMessage() {} +func (*BadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_error_details_0786ccff29c8b842, []int{4} +} +func (m *BadRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BadRequest.Unmarshal(m, b) +} +func (m *BadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BadRequest.Marshal(b, m, deterministic) +} +func (dst *BadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BadRequest.Merge(dst, src) +} +func (m *BadRequest) XXX_Size() int { + return xxx_messageInfo_BadRequest.Size(m) +} +func (m *BadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BadRequest proto.InternalMessageInfo + +func (m *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation { + if m != nil { + return m.FieldViolations + } + return nil +} + +// A message type used to describe a single bad request field. +type BadRequest_FieldViolation struct { + // A path leading to a field in the request body. The value will be a + // sequence of dot-separated identifiers that identify a protocol buffer + // field. E.g., "field_violations.field" would identify this field. + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + // A description of why the request element is bad. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BadRequest_FieldViolation) Reset() { *m = BadRequest_FieldViolation{} } +func (m *BadRequest_FieldViolation) String() string { return proto.CompactTextString(m) } +func (*BadRequest_FieldViolation) ProtoMessage() {} +func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) { + return fileDescriptor_error_details_0786ccff29c8b842, []int{4, 0} +} +func (m *BadRequest_FieldViolation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BadRequest_FieldViolation.Unmarshal(m, b) +} +func (m *BadRequest_FieldViolation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BadRequest_FieldViolation.Marshal(b, m, deterministic) +} +func (dst *BadRequest_FieldViolation) XXX_Merge(src proto.Message) { + xxx_messageInfo_BadRequest_FieldViolation.Merge(dst, src) +} +func (m *BadRequest_FieldViolation) XXX_Size() int { + return xxx_messageInfo_BadRequest_FieldViolation.Size(m) +} +func (m *BadRequest_FieldViolation) XXX_DiscardUnknown() { + xxx_messageInfo_BadRequest_FieldViolation.DiscardUnknown(m) +} + +var xxx_messageInfo_BadRequest_FieldViolation proto.InternalMessageInfo + +func (m *BadRequest_FieldViolation) GetField() string { + if m != nil { + return m.Field + } + return "" +} + +func (m *BadRequest_FieldViolation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Contains metadata about the request that clients can attach when filing a bug +// or providing other forms of feedback. +type RequestInfo struct { + // An opaque string that should only be interpreted by the service generating + // it. For example, it can be used to identify requests in the service's logs. + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Any data that was used to serve this request. For example, an encrypted + // stack trace that can be sent back to the service provider for debugging. + ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestInfo) Reset() { *m = RequestInfo{} } +func (m *RequestInfo) String() string { return proto.CompactTextString(m) } +func (*RequestInfo) ProtoMessage() {} +func (*RequestInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_error_details_0786ccff29c8b842, []int{5} +} +func (m *RequestInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RequestInfo.Unmarshal(m, b) +} +func (m *RequestInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RequestInfo.Marshal(b, m, deterministic) +} +func (dst *RequestInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestInfo.Merge(dst, src) +} +func (m *RequestInfo) XXX_Size() int { + return xxx_messageInfo_RequestInfo.Size(m) +} +func (m *RequestInfo) XXX_DiscardUnknown() { + xxx_messageInfo_RequestInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestInfo proto.InternalMessageInfo + +func (m *RequestInfo) GetRequestId() string { + if m != nil { + return m.RequestId + } + return "" +} + +func (m *RequestInfo) GetServingData() string { + if m != nil { + return m.ServingData + } + return "" +} + +// Describes the resource that is being accessed. +type ResourceInfo struct { + // A name for the type of resource being accessed, e.g. "sql table", + // "cloud storage bucket", "file", "Google calendar"; or the type URL + // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". + ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` + // The name of the resource being accessed. For example, a shared calendar + // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current + // error is + // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + // The owner of the resource (optional). + // For example, "user:" or "project:". + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + // Describes what error is encountered when accessing this resource. + // For example, updating a cloud project may require the `writer` permission + // on the developer console project. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceInfo) Reset() { *m = ResourceInfo{} } +func (m *ResourceInfo) String() string { return proto.CompactTextString(m) } +func (*ResourceInfo) ProtoMessage() {} +func (*ResourceInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_error_details_0786ccff29c8b842, []int{6} +} +func (m *ResourceInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceInfo.Unmarshal(m, b) +} +func (m *ResourceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceInfo.Marshal(b, m, deterministic) +} +func (dst *ResourceInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceInfo.Merge(dst, src) +} +func (m *ResourceInfo) XXX_Size() int { + return xxx_messageInfo_ResourceInfo.Size(m) +} +func (m *ResourceInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceInfo proto.InternalMessageInfo + +func (m *ResourceInfo) GetResourceType() string { + if m != nil { + return m.ResourceType + } + return "" +} + +func (m *ResourceInfo) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +func (m *ResourceInfo) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *ResourceInfo) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Provides links to documentation or for performing an out of band action. +// +// For example, if a quota check failed with an error indicating the calling +// project hasn't enabled the accessed service, this can contain a URL pointing +// directly to the right place in the developer console to flip the bit. +type Help struct { + // URL(s) pointing to additional information on handling the current error. + Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Help) Reset() { *m = Help{} } +func (m *Help) String() string { return proto.CompactTextString(m) } +func (*Help) ProtoMessage() {} +func (*Help) Descriptor() ([]byte, []int) { + return fileDescriptor_error_details_0786ccff29c8b842, []int{7} +} +func (m *Help) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Help.Unmarshal(m, b) +} +func (m *Help) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Help.Marshal(b, m, deterministic) +} +func (dst *Help) XXX_Merge(src proto.Message) { + xxx_messageInfo_Help.Merge(dst, src) +} +func (m *Help) XXX_Size() int { + return xxx_messageInfo_Help.Size(m) +} +func (m *Help) XXX_DiscardUnknown() { + xxx_messageInfo_Help.DiscardUnknown(m) +} + +var xxx_messageInfo_Help proto.InternalMessageInfo + +func (m *Help) GetLinks() []*Help_Link { + if m != nil { + return m.Links + } + return nil +} + +// Describes a URL link. +type Help_Link struct { + // Describes what the link offers. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The URL of the link. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Help_Link) Reset() { *m = Help_Link{} } +func (m *Help_Link) String() string { return proto.CompactTextString(m) } +func (*Help_Link) ProtoMessage() {} +func (*Help_Link) Descriptor() ([]byte, []int) { + return fileDescriptor_error_details_0786ccff29c8b842, []int{7, 0} +} +func (m *Help_Link) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Help_Link.Unmarshal(m, b) +} +func (m *Help_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Help_Link.Marshal(b, m, deterministic) +} +func (dst *Help_Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Help_Link.Merge(dst, src) +} +func (m *Help_Link) XXX_Size() int { + return xxx_messageInfo_Help_Link.Size(m) +} +func (m *Help_Link) XXX_DiscardUnknown() { + xxx_messageInfo_Help_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Help_Link proto.InternalMessageInfo + +func (m *Help_Link) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Help_Link) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +// Provides a localized error message that is safe to return to the user +// which can be attached to an RPC error. +type LocalizedMessage struct { + // The locale used following the specification defined at + // http://www.rfc-editor.org/rfc/bcp/bcp47.txt. + // Examples are: "en-US", "fr-CH", "es-MX" + Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"` + // The localized error message in the above locale. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LocalizedMessage) Reset() { *m = LocalizedMessage{} } +func (m *LocalizedMessage) String() string { return proto.CompactTextString(m) } +func (*LocalizedMessage) ProtoMessage() {} +func (*LocalizedMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_error_details_0786ccff29c8b842, []int{8} +} +func (m *LocalizedMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LocalizedMessage.Unmarshal(m, b) +} +func (m *LocalizedMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LocalizedMessage.Marshal(b, m, deterministic) +} +func (dst *LocalizedMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalizedMessage.Merge(dst, src) +} +func (m *LocalizedMessage) XXX_Size() int { + return xxx_messageInfo_LocalizedMessage.Size(m) +} +func (m *LocalizedMessage) XXX_DiscardUnknown() { + xxx_messageInfo_LocalizedMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalizedMessage proto.InternalMessageInfo + +func (m *LocalizedMessage) GetLocale() string { + if m != nil { + return m.Locale + } + return "" +} + +func (m *LocalizedMessage) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func init() { + proto.RegisterType((*RetryInfo)(nil), "google.rpc.RetryInfo") + proto.RegisterType((*DebugInfo)(nil), "google.rpc.DebugInfo") + proto.RegisterType((*QuotaFailure)(nil), "google.rpc.QuotaFailure") + proto.RegisterType((*QuotaFailure_Violation)(nil), "google.rpc.QuotaFailure.Violation") + proto.RegisterType((*PreconditionFailure)(nil), "google.rpc.PreconditionFailure") + proto.RegisterType((*PreconditionFailure_Violation)(nil), "google.rpc.PreconditionFailure.Violation") + proto.RegisterType((*BadRequest)(nil), "google.rpc.BadRequest") + proto.RegisterType((*BadRequest_FieldViolation)(nil), "google.rpc.BadRequest.FieldViolation") + proto.RegisterType((*RequestInfo)(nil), "google.rpc.RequestInfo") + proto.RegisterType((*ResourceInfo)(nil), "google.rpc.ResourceInfo") + proto.RegisterType((*Help)(nil), "google.rpc.Help") + proto.RegisterType((*Help_Link)(nil), "google.rpc.Help.Link") + proto.RegisterType((*LocalizedMessage)(nil), "google.rpc.LocalizedMessage") +} + +func init() { + proto.RegisterFile("google/rpc/error_details.proto", fileDescriptor_error_details_0786ccff29c8b842) +} + +var fileDescriptor_error_details_0786ccff29c8b842 = []byte{ + // 595 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0x95, 0x9b, 0xb4, 0x9f, 0x7c, 0x93, 0xaf, 0x14, 0xf3, 0xa3, 0x10, 0x09, 0x14, 0x8c, 0x90, + 0x8a, 0x90, 0x1c, 0xa9, 0xec, 0xca, 0x02, 0x29, 0xb8, 0x7f, 0x52, 0x81, 0x60, 0x21, 0x16, 0xb0, + 0xb0, 0x26, 0xf6, 0x8d, 0x35, 0x74, 0xe2, 0x31, 0x33, 0xe3, 0xa2, 0xf0, 0x14, 0xec, 0xd9, 0xb1, + 0xe2, 0x25, 0x78, 0x37, 0x34, 0x9e, 0x99, 0xc6, 0x6d, 0x0a, 0x62, 0x37, 0xe7, 0xcc, 0x99, 0xe3, + 0x73, 0xaf, 0xae, 0x2f, 0x3c, 0x28, 0x38, 0x2f, 0x18, 0x8e, 0x45, 0x95, 0x8d, 0x51, 0x08, 0x2e, + 0xd2, 0x1c, 0x15, 0xa1, 0x4c, 0x46, 0x95, 0xe0, 0x8a, 0x07, 0x60, 0xee, 0x23, 0x51, 0x65, 0x43, + 0xa7, 0x6d, 0x6e, 0x66, 0xf5, 0x7c, 0x9c, 0xd7, 0x82, 0x28, 0xca, 0x4b, 0xa3, 0x0d, 0x8f, 0xc0, + 0x4f, 0x50, 0x89, 0xe5, 0x49, 0x39, 0xe7, 0xc1, 0x3e, 0xf4, 0x84, 0x06, 0x69, 0x8e, 0x8c, 0x2c, + 0x07, 0xde, 0xc8, 0xdb, 0xed, 0xed, 0xdd, 0x8b, 0xac, 0x9d, 0xb3, 0x88, 0x62, 0x6b, 0x91, 0x40, + 0xa3, 0x8e, 0xb5, 0x38, 0x3c, 0x06, 0x3f, 0xc6, 0x59, 0x5d, 0x34, 0x46, 0x8f, 0xe0, 0x7f, 0xa9, + 0x48, 0x76, 0x96, 0x62, 0xa9, 0x04, 0x45, 0x39, 0xf0, 0x46, 0x9d, 0x5d, 0x3f, 0xe9, 0x37, 0xe4, + 0x81, 0xe1, 0x82, 0xbb, 0xb0, 0x65, 0x72, 0x0f, 0x36, 0x46, 0xde, 0xae, 0x9f, 0x58, 0x14, 0x7e, + 0xf7, 0xa0, 0xff, 0xb6, 0xe6, 0x8a, 0x1c, 0x12, 0xca, 0x6a, 0x81, 0xc1, 0x04, 0xe0, 0x9c, 0x72, + 0xd6, 0x7c, 0xd3, 0x58, 0xf5, 0xf6, 0xc2, 0x68, 0x55, 0x64, 0xd4, 0x56, 0x47, 0xef, 0x9d, 0x34, + 0x69, 0xbd, 0x1a, 0x1e, 0x81, 0x7f, 0x71, 0x11, 0x0c, 0xe0, 0x3f, 0x59, 0xcf, 0x3e, 0x61, 0xa6, + 0x9a, 0x1a, 0xfd, 0xc4, 0xc1, 0x60, 0x04, 0xbd, 0x1c, 0x65, 0x26, 0x68, 0xa5, 0x85, 0x36, 0x58, + 0x9b, 0x0a, 0x7f, 0x79, 0x70, 0x6b, 0x2a, 0x30, 0xe3, 0x65, 0x4e, 0x35, 0xe1, 0x42, 0x9e, 0x5c, + 0x13, 0xf2, 0x49, 0x3b, 0xe4, 0x35, 0x8f, 0xfe, 0x90, 0xf5, 0x63, 0x3b, 0x6b, 0x00, 0x5d, 0xb5, + 0xac, 0xd0, 0x06, 0x6d, 0xce, 0xed, 0xfc, 0x1b, 0x7f, 0xcd, 0xdf, 0x59, 0xcf, 0xff, 0xd3, 0x03, + 0x98, 0x90, 0x3c, 0xc1, 0xcf, 0x35, 0x4a, 0x15, 0x4c, 0x61, 0x67, 0x4e, 0x91, 0xe5, 0xe9, 0x5a, + 0xf8, 0xc7, 0xed, 0xf0, 0xab, 0x17, 0xd1, 0xa1, 0x96, 0xaf, 0x82, 0xdf, 0x98, 0x5f, 0xc2, 0x72, + 0x78, 0x0c, 0xdb, 0x97, 0x25, 0xc1, 0x6d, 0xd8, 0x6c, 0x44, 0xb6, 0x06, 0x03, 0xfe, 0xa1, 0xd5, + 0x6f, 0xa0, 0x67, 0x3f, 0xda, 0x0c, 0xd5, 0x7d, 0x00, 0x61, 0x60, 0x4a, 0x9d, 0x97, 0x6f, 0x99, + 0x93, 0x3c, 0x78, 0x08, 0x7d, 0x89, 0xe2, 0x9c, 0x96, 0x45, 0x9a, 0x13, 0x45, 0x9c, 0xa1, 0xe5, + 0x62, 0xa2, 0x48, 0xf8, 0xcd, 0x83, 0x7e, 0x82, 0x92, 0xd7, 0x22, 0x43, 0x37, 0xa7, 0xc2, 0xe2, + 0xb4, 0xd5, 0xe5, 0xbe, 0x23, 0xdf, 0xe9, 0x6e, 0xb7, 0x45, 0x25, 0x59, 0xa0, 0x75, 0xbe, 0x10, + 0xbd, 0x26, 0x0b, 0xd4, 0x35, 0xf2, 0x2f, 0x25, 0x0a, 0xdb, 0x72, 0x03, 0xae, 0xd6, 0xd8, 0x5d, + 0xaf, 0x91, 0x43, 0xf7, 0x18, 0x59, 0x15, 0x3c, 0x85, 0x4d, 0x46, 0xcb, 0x33, 0xd7, 0xfc, 0x3b, + 0xed, 0xe6, 0x6b, 0x41, 0x74, 0x4a, 0xcb, 0xb3, 0xc4, 0x68, 0x86, 0xfb, 0xd0, 0xd5, 0xf0, 0xaa, + 0xbd, 0xb7, 0x66, 0x1f, 0xec, 0x40, 0xa7, 0x16, 0xee, 0x07, 0xd3, 0xc7, 0x30, 0x86, 0x9d, 0x53, + 0x9e, 0x11, 0x46, 0xbf, 0x62, 0xfe, 0x0a, 0xa5, 0x24, 0x05, 0xea, 0x3f, 0x91, 0x69, 0xce, 0xd5, + 0x6f, 0x91, 0x9e, 0xb3, 0x85, 0x91, 0xb8, 0x39, 0xb3, 0x70, 0xc2, 0x60, 0x3b, 0xe3, 0x8b, 0x56, + 0xc8, 0xc9, 0xcd, 0x03, 0xbd, 0x89, 0x62, 0xb3, 0x88, 0xa6, 0x7a, 0x55, 0x4c, 0xbd, 0x0f, 0x2f, + 0xac, 0xa0, 0xe0, 0x8c, 0x94, 0x45, 0xc4, 0x45, 0x31, 0x2e, 0xb0, 0x6c, 0x16, 0xc9, 0xd8, 0x5c, + 0x91, 0x8a, 0x4a, 0xb7, 0xc8, 0xec, 0x16, 0x7b, 0xbe, 0x3a, 0xfe, 0xd8, 0xe8, 0x24, 0xd3, 0x97, + 0xb3, 0xad, 0xe6, 0xc5, 0xb3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x90, 0x15, 0x46, 0x2d, 0xf9, + 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go new file mode 100644 index 000000000..86886693f --- /dev/null +++ b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go @@ -0,0 +1,280 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +package field_mask // import "google.golang.org/genproto/protobuf/field_mask" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (m *FieldMask) String() string { return proto.CompactTextString(m) } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { + return fileDescriptor_field_mask_02a8b0c0831edcce, []int{0} +} +func (m *FieldMask) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldMask.Unmarshal(m, b) +} +func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic) +} +func (dst *FieldMask) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldMask.Merge(dst, src) +} +func (m *FieldMask) XXX_Size() int { + return xxx_messageInfo_FieldMask.Size(m) +} +func (m *FieldMask) XXX_DiscardUnknown() { + xxx_messageInfo_FieldMask.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldMask proto.InternalMessageInfo + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} + +func init() { + proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_field_mask_02a8b0c0831edcce) +} + +var fileDescriptor_field_mask_02a8b0c0831edcce = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, + 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x3d, 0x8c, + 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x5a, 0x9d, 0xf8, 0xe0, 0x1a, 0x03, 0x40, 0x42, 0x01, + 0x8c, 0x51, 0x96, 0x50, 0x25, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa, + 0xe9, 0xa9, 0x79, 0x60, 0x0d, 0xd8, 0xdc, 0x64, 0x8d, 0x60, 0xfe, 0x60, 0x64, 0x5c, 0xc4, 0xc4, + 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x48, 0x00, 0x54, 0x83, 0x5e, 0x78, 0x6a, + 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x48, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x24, + 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xda, 0xb7, 0xa8, 0xed, 0x00, 0x00, 0x00, +}