diff --git a/endpoints.md b/endpoints.md index eae71ad..280d56b 100755 --- a/endpoints.md +++ b/endpoints.md @@ -3,6 +3,8 @@ * api.calculator.billing-data-plane.api.nebius.cloud:443 * [nebius.billing.v1alpha1.CalculatorService](nebius/billing/v1alpha1/calculator_service.proto) * apps.msp.api.nebius.cloud:443 + * [nebius.ai.v1.EndpointService](nebius/ai/v1/endpoint_service.proto) + * [nebius.ai.v1.JobService](nebius/ai/v1/job_service.proto) * [nebius.common.v1.OperationService](nebius/common/v1/operation_service.proto) * [nebius.msp.serverless.v1alpha1.EndpointService](nebius/msp/serverless/v1alpha1/endpoint_service.proto) * [nebius.msp.serverless.v1alpha1.JobService](nebius/msp/serverless/v1alpha1/job_service.proto) diff --git a/nebius/ai/v1/endpoint.proto b/nebius/ai/v1/endpoint.proto new file mode 100644 index 0000000..e82a1e3 --- /dev/null +++ b/nebius/ai/v1/endpoint.proto @@ -0,0 +1,292 @@ +syntax = "proto3"; + +package nebius.ai.v1; + +import "buf/validate/validate.proto"; +import "nebius/annotations.proto"; +import "nebius/common/v1/metadata.proto"; +import "nebius/compute/v1/disk.proto"; +import "nebius/compute/v1/instance.proto"; + +option go_package = "github.com/nebius/gosdk/proto/nebius/ai/v1"; +option java_multiple_files = true; +option java_outer_classname = "EndpointProto"; +option java_package = "ai.nebius.pub.ai.v1"; + +// Represents an endpoint with a specified workload. +message Endpoint { + common.v1.ResourceMetadata metadata = 1 [(buf.validate.field).required = true]; + + EndpointSpec spec = 2 [(buf.validate.field).required = true]; + + EndpointStatus status = 3 [(field_behavior) = OUTPUT_ONLY]; +} + +// EndpointSpec defines a endpoint that will be run. +message EndpointSpec { + // The Docker image to use for the endpoint's container. + string image = 1 [(buf.validate.field).required = true]; + + // Specifies the environment variables for the endpoint's container. + repeated EnvironmentVariable environment_variables = 2; + + // Specifies the ports that the endpoint exposes. + repeated Port ports = 3; + + // The entrypoint command for the endpoint's container. + string container_command = 4; + + // The arguments to pass to the entrypoint command. + string args = 5; + + // The working directory for the endpoint's container. + string working_dir = 6 [(buf.validate.field) = { + string: {max_len: 128} + }]; + + // Volumes to be mounted into the endpoint's container. + repeated VolumeMount volumes = 7; + + // Registry credentials for private Docker registry. + RegistryCredentials registry_credentials = 10; + + // Compute platform that the endpoint will be run on. + string platform = 20 [(buf.validate.field).required = true]; + + // Compute preset that the endpoint will be run on. + string preset = 21 [(buf.validate.field).required = true]; + + // Shared memory size in bytes for the endpoint's container. + int64 shm_size_bytes = 22 [(buf.validate.field) = { + int64: {gte: 0} + }]; + + // Disk spec for the main disk of the endpoint. + DiskSpec disk = 23 [(buf.validate.field).required = true]; + + // Subnet ID where the endpoint will be deployed. + string subnet_id = 24 [ + (buf.validate.field).required = true, + (nid) = { + resource: ["vpcsubnet"] + } + ]; + + // Whether to assign a public IP to the endpoint. + bool public_ip = 25; + + // Public keys to be authorized for SSH access to the job. + repeated string ssh_authorized_keys = 26; + + // Authentication token needed to access the endpoint. + // + // Authentication can only be enabled if the endpoint exposes one and only one HTTP port. + // + // If not provided, a authentication will be disabled. + string auth_token = 30 [(sensitive) = true]; + + // EnvironmentVariable defines an environment variable for the endpoint's container. + message EnvironmentVariable { + // The name of the environment variable. + string name = 1 [(buf.validate.field).required = true]; + + // Environment variable value. + string value = 2 [(sensitive) = true]; + } + + message Port { + // Container port. + int32 container_port = 1 [(buf.validate.field) = { + int32: { + lte: 65535 + gte: 1 + } + }]; + + // Host port. + // + // If not specified, will be same as container_port. + int32 host_port = 2 [(buf.validate.field) = { + int32: { + lte: 65535 + gte: 0 + } + }]; + + // Port's protocol. + Protocol protocol = 3 [(buf.validate.field).required = true]; + + // Represents protocol of the endpoint's port which will be exposed. + enum Protocol { + PROTOCOL_UNSPECIFIED = 0; + + // HTTP protocol. + HTTP = 1; + + // TCP protocol. + TCP = 2; + + // UDP protocol. + UDP = 3; + } + } + + // VolumeMount represents a volume mount for the endpoint's container. + message VolumeMount { + // Source of the volume mount. + // + // Can be a name of an ID of Nebius Storage bucket or filesystem. + string source = 1 [(nid) = { + resource: [ + "computefilesystem", + "storagebucket" + ] + }]; + + // Path inside the source volume. + // + // Optional. + string source_path = 2; + + // Path inside the endpoint's container where the volume is mounted. + // + // Must be an absolute path. + string container_path = 3; + + // Mount mode. + Mode mode = 4 [(buf.validate.field).required = true]; + + // Mode that will be used to mount the volume. + enum Mode { + MODE_UNSPECIFIED = 0; + + // Read-only mode. + READ_ONLY = 1; + + // Read-write mode. + READ_WRITE = 2; + } + } + + message DiskSpec { + // Disk type. + nebius.compute.v1.DiskSpec.DiskType type = 1 [(buf.validate.field).required = true]; + + // Disk size in bytes. + int64 size_bytes = 2; + } + + message RegistryCredentials { + // Registry username for private Docker registry. + string username = 1; + + // Registry password for private Docker registry. + string password = 2 [(sensitive) = true]; + + // Secret version storing the registry credentials. + // Must have keys "REGISTRY_USERNAME" and "REGISTRY_PASSWORD". + string mysterybox_secret_version = 3; + } +} + +// EndpointStatus represents the status of a VM app. +message EndpointStatus { + // Private endpoints to access the workload. + repeated string private_endpoints = 1; + + // Public endpoints to access the workload. + repeated string public_endpoints = 2; + + // Status of individual endpoint instances. + repeated EndpointInstanceStatus instances = 10; + + // State of the endpoint. + State state = 20; + + // Details of the endpoint's state. + EndpointStateDetails state_details = 21; + + // Endpoint state. + enum State { + STATE_UNSPECIFIED = 0; + + // The endpoint is creating resources. + PROVISIONING = 1; + + // The endpoint is being started. + STARTING = 2; + + // The endpoint is running. + RUNNING = 3; + + // The endpoint is being stopped. + STOPPING = 4; + + // The endpoint is being deleted. + DELETING = 5; + + // The endpoint has been stopped. + STOPPED = 6; + + // The endpoint encountered an error. + ERROR = 8; + } +} + +// Endpoint state details. +message EndpointStateDetails { + // Short state description. + string code = 1 [(buf.validate.field).required = true]; + + // Detailed human-readable description. + string message = 2; +} + +// EndpointInstanceStatus represents the status of a endpoint instance. +message EndpointInstanceStatus { + // The current state of the endpoint's workload. + State state = 1 [(buf.validate.field).required = true]; + + // ID of the compute instance running the endpoint. + string compute_instance_id = 10 [(nid) = { + resource: ["computeinstance"] + }]; + + // The current state of the compute instance. + nebius.compute.v1.InstanceStatus.InstanceState compute_instance_state = 11; + + // Private IP address of the instance. + string private_ip = 12; + + // Public IP address of the instance. + string public_ip = 13; + + // Endpoint instance state. + enum State { + STATE_UNSPECIFIED = 0; + + // The endpoint is creating resources. + PROVISIONING = 1; + + // The endpoint is being started. + STARTING = 2; + + // The endpoint is running. + RUNNING = 3; + + // The endpoint is being stopped. + STOPPING = 4; + + // The endpoint is being deleted. + DELETING = 5; + + // The endpoint has been stopped. + STOPPED = 6; + + // The endpoint has failed. + FAILED = 7; + + // The endpoint encountered an error. + ERROR = 8; + } +} diff --git a/nebius/ai/v1/endpoint_service.proto b/nebius/ai/v1/endpoint_service.proto new file mode 100644 index 0000000..030f0e4 --- /dev/null +++ b/nebius/ai/v1/endpoint_service.proto @@ -0,0 +1,114 @@ +syntax = "proto3"; + +package nebius.ai.v1; + +import "buf/validate/validate.proto"; +import "nebius/ai/v1/endpoint.proto"; +import "nebius/annotations.proto"; +import "nebius/common/v1/metadata.proto"; +import "nebius/common/v1/operation.proto"; + +option go_package = "github.com/nebius/gosdk/proto/nebius/ai/v1"; +option java_multiple_files = true; +option java_outer_classname = "EndpointServiceProto"; +option java_package = "ai.nebius.pub.ai.v1"; + +// Service to create/manage endpoints. +service EndpointService { + option (api_service_name) = "apps.msp"; + + // Returns the specified endpoint. + rpc Get(GetEndpointRequest) returns (Endpoint); + + // Returns the specified endpoint by name. + rpc GetByName(GetEndpointByNameRequest) returns (Endpoint); + + // Retrieves a list of endpoints. + rpc List(ListEndpointsRequest) returns (ListEndpointsResponse); + + // Creates an endpoint. + rpc Create(CreateEndpointRequest) returns (common.v1.Operation); + + // Deletes an endpoint. + rpc Delete(DeleteEndpointRequest) returns (common.v1.Operation); + + // Updates an endpoint. + rpc Update(UpdateEndpointRequest) returns (common.v1.Operation); + + // Starts an endpoint. + rpc Start(StartEndpointRequest) returns (common.v1.Operation); + + // Stops an endpoint. + rpc Stop(StopEndpointRequest) returns (common.v1.Operation); +} + +message GetEndpointRequest { + string id = 1 [ + (buf.validate.field).required = true, + (nid) = { + resource: ["aiendpoint"] + } + ]; +} + +message GetEndpointByNameRequest { + string parent_id = 1 [(buf.validate.field).required = true]; + + string name = 2 [(buf.validate.field).required = true]; +} + +message ListEndpointsRequest { + string parent_id = 1 [(buf.validate.field).required = true]; + + int64 page_size = 2; + + string page_token = 3; +} + +message CreateEndpointRequest { + common.v1.ResourceMetadata metadata = 1 [(buf.validate.field).required = true]; + + EndpointSpec spec = 2 [(buf.validate.field).required = true]; + + // Dry run creation of the endpoint. + bool dry_run = 3; +} + +message DeleteEndpointRequest { + string id = 1 [ + (buf.validate.field).required = true, + (nid) = { + resource: ["aiendpoint"] + } + ]; +} + +message UpdateEndpointRequest { + common.v1.ResourceMetadata metadata = 1 [(buf.validate.field).required = true]; + + EndpointSpec spec = 2 [(buf.validate.field).required = true]; +} + +message StartEndpointRequest { + string id = 1 [ + (buf.validate.field).required = true, + (nid) = { + resource: ["aiendpoint"] + } + ]; +} + +message StopEndpointRequest { + string id = 1 [ + (buf.validate.field).required = true, + (nid) = { + resource: ["aiendpoint"] + } + ]; +} + +message ListEndpointsResponse { + repeated Endpoint items = 1; + + string next_page_token = 2; +} diff --git a/nebius/ai/v1/job.proto b/nebius/ai/v1/job.proto new file mode 100644 index 0000000..58b00eb --- /dev/null +++ b/nebius/ai/v1/job.proto @@ -0,0 +1,306 @@ +syntax = "proto3"; + +package nebius.ai.v1; + +import "buf/validate/validate.proto"; +import "google/protobuf/duration.proto"; +import "nebius/annotations.proto"; +import "nebius/common/v1/metadata.proto"; +import "nebius/compute/v1/disk.proto"; +import "nebius/compute/v1/instance.proto"; + +option go_package = "github.com/nebius/gosdk/proto/nebius/ai/v1"; +option java_multiple_files = true; +option java_outer_classname = "JobProto"; +option java_package = "ai.nebius.pub.ai.v1"; + +// Represents a job with a specified workload. +message Job { + common.v1.ResourceMetadata metadata = 1 [(buf.validate.field).required = true]; + + JobSpec spec = 2 [(buf.validate.field).required = true]; + + JobStatus status = 3 [(field_behavior) = OUTPUT_ONLY]; +} + +// JobSpec defines a job that will be run. +message JobSpec { + // The Docker image to use for the job's container. + string image = 1 [(buf.validate.field).required = true]; + + // Specifies the environment variables for the job's container. + repeated EnvironmentVariable environment_variables = 2; + + // Specifies the ports that the job exposes. + repeated Port ports = 3; + + // The entrypoint command for the job's container. + string container_command = 4; + + // The arguments to pass to the entrypoint command. + string args = 5; + + // The working directory for the job's container. + string working_dir = 6 [(buf.validate.field) = { + string: {max_len: 128} + }]; + + // Volumes to be mounted into the job's container. + repeated VolumeMount volumes = 7; + + // Registry credentials for private Docker registry. + RegistryCredentials registry_credentials = 10; + + // Compute platform that the job will be run on. + string platform = 20 [(buf.validate.field).required = true]; + + // Compute preset that the job will be run on. + string preset = 21 [(buf.validate.field).required = true]; + + // Shared memory size in bytes for the job's container. + int64 shm_size_bytes = 22 [(buf.validate.field) = { + int64: {gte: 0} + }]; + + // Disk spec for the main disk of the job. + DiskSpec disk = 23 [(buf.validate.field).required = true]; + + // Subnet ID where the job will be deployed. + string subnet_id = 24 [ + (buf.validate.field).required = true, + (nid) = { + resource: ["vpcsubnet"] + } + ]; + + // Whether to assign a public IP to the job. + bool public_ip = 25; + + // Public keys to be authorized for SSH access to the job. + repeated string ssh_authorized_keys = 26; + + // Restart attempts for the job. + int64 restart_attempts = 30 [(buf.validate.field) = { + int64: {gte: 0} + }]; + + // Job timeout. + google.protobuf.Duration timeout = 31; + + // EnvironmentVariable defines an environment variable for the endpoint's container. + message EnvironmentVariable { + // The name of the environment variable. + string name = 1 [(buf.validate.field).required = true]; + + // Environment variable value. + string value = 2 [(sensitive) = true]; + } + + message Port { + // Container port. + int32 container_port = 1 [(buf.validate.field) = { + int32: { + lte: 65535 + gte: 1 + } + }]; + + // Host port. + // + // If not specified, will be same as container_port. + int32 host_port = 2 [(buf.validate.field) = { + int32: { + lte: 65535 + gte: 0 + } + }]; + + // Port's protocol. + Protocol protocol = 3 [(buf.validate.field).required = true]; + + // Represents protocol of the job's port which will be exposed. + enum Protocol { + PROTOCOL_UNSPECIFIED = 0; + + // HTTP protocol. + HTTP = 1; + + // TCP protocol. + TCP = 2; + + // UDP protocol. + UDP = 3; + } + } + + // VolumeMount represents a volume mount for the endpoint's container. + message VolumeMount { + // Source of the volume mount. + // + // Can be a name of an ID of Nebius Storage bucket or filesystem. + string source = 1 [(nid) = { + resource: [ + "computefilesystem", + "storagebucket" + ] + }]; + + // Path inside the source volume. + // + // Optional. + string source_path = 2; + + // Path inside the endpoint's container where the volume is mounted. + // + // Must be an absolute path. + string container_path = 3; + + // Mount mode. + Mode mode = 4 [(buf.validate.field).required = true]; + + // Mode that will be used to mount the volume. + enum Mode { + MODE_UNSPECIFIED = 0; + + // Read-write mode. + READ_WRITE = 1; + + // Read-only mode. + READ_ONLY = 2; + } + } + + message DiskSpec { + // Disk type. + nebius.compute.v1.DiskSpec.DiskType type = 1 [(buf.validate.field).required = true]; + + // Disk size in bytes. + int64 size_bytes = 2; + } + + message RegistryCredentials { + // Registry username for private Docker registry. + string username = 1; + + // Registry password for private Docker registry. + string password = 2 [(sensitive) = true]; + + // Secret version storing the registry credentials. + // Must have keys "REGISTRY_USERNAME" and "REGISTRY_PASSWORD". + string mysterybox_secret_version = 3; + } +} + +// JobStatus represents the status of a VM app. +message JobStatus { + // Private endpoints to access the workload. + repeated string private_endpoints = 1; + + // Public endpoints to access the workload. + repeated string public_endpoints = 2; + + // Status of individual job instances. + repeated JobInstanceStatus instances = 10; + + // State of the job. + State state = 20; + + // Details of the job's state. + JobStateDetails state_details = 21; + + // Job state. + enum State { + STATE_UNSPECIFIED = 0; + + // The job is creating resources. + PROVISIONING = 1; + + // The job is being started. + STARTING = 2; + + // The job is running. + RUNNING = 3; + + // The job is being cancelled. + CANCELLING = 4; + + // The job is being deleted. + DELETING = 5; + + // The job has successfully completed. + COMPLETED = 6; + + // The job has failed. + FAILED = 7; + + // The job has been cancelled. + CANCELLED = 8; + + // The job encountered an internal error. + ERROR = 9; + } +} + +// Job state details. +message JobStateDetails { + // Short state description. + string code = 1 [(buf.validate.field).required = true]; + + // Detailed human-readable description. + string message = 2; +} + +// JobInstanceStatus represents the status of a job instance. +message JobInstanceStatus { + // The current state of the job's workload. + State state = 1 [(buf.validate.field).required = true]; + + // ID of the compute instance running the job. + string compute_instance_id = 10 [(nid) = { + resource: ["computeinstance"] + }]; + + // The current state of the compute instance. + nebius.compute.v1.InstanceStatus.InstanceState compute_instance_state = 11 [(buf.validate.field).required = true]; + + // Private IP address of the instance. + string private_ip = 12; + + // Public IP address of the instance. + string public_ip = 13; + + // Job instance state. + enum State { + STATE_UNSPECIFIED = 0; + + // The job is creating resources. + PROVISIONING = 1; + + // The job is being started. + STARTING = 2; + + // The job is running. + RUNNING = 3; + + // The job is completing. + COMPLETING = 4; + + // The job is being cancelled. + CANCELLING = 5; + + // The job is being deleted. + DELETING = 6; + + // The job has successfully completed. + COMPLETED = 7; + + // The job has failed. + FAILED = 8; + + // The job has been cancelled. + CANCELLED = 9; + + // The job encountered an internal error. + ERROR = 10; + } +} diff --git a/nebius/ai/v1/job_service.proto b/nebius/ai/v1/job_service.proto new file mode 100644 index 0000000..45576cb --- /dev/null +++ b/nebius/ai/v1/job_service.proto @@ -0,0 +1,93 @@ +syntax = "proto3"; + +package nebius.ai.v1; + +import "buf/validate/validate.proto"; +import "nebius/ai/v1/job.proto"; +import "nebius/annotations.proto"; +import "nebius/common/v1/metadata.proto"; +import "nebius/common/v1/operation.proto"; + +option go_package = "github.com/nebius/gosdk/proto/nebius/ai/v1"; +option java_multiple_files = true; +option java_outer_classname = "JobServiceProto"; +option java_package = "ai.nebius.pub.ai.v1"; + +// Service to create/manage jobs. +service JobService { + option (api_service_name) = "apps.msp"; + + // Returns the specified job. + rpc Get(GetJobRequest) returns (Job); + + // Returns the specified job by name. + rpc GetByName(GetJobByNameRequest) returns (Job); + + // Retrieves a list of jobs. + rpc List(ListJobsRequest) returns (ListJobsResponse); + + // Creates a job. + rpc Create(CreateJobRequest) returns (common.v1.Operation); + + // Deletes a job. + rpc Delete(DeleteJobRequest) returns (common.v1.Operation); + + // Cancels a job. + rpc Cancel(CancelJobRequest) returns (common.v1.Operation); +} + +message GetJobRequest { + string id = 1 [ + (buf.validate.field).required = true, + (nid) = { + resource: ["aijob"] + } + ]; +} + +message GetJobByNameRequest { + string parent_id = 1 [(buf.validate.field).required = true]; + + string name = 2 [(buf.validate.field).required = true]; +} + +message ListJobsRequest { + string parent_id = 1 [(buf.validate.field).required = true]; + + int64 page_size = 2; + + string page_token = 3; +} + +message CreateJobRequest { + common.v1.ResourceMetadata metadata = 1 [(buf.validate.field).required = true]; + + JobSpec spec = 2 [(buf.validate.field).required = true]; + + // Dry run creation of the job. + bool dry_run = 3; +} + +message DeleteJobRequest { + string id = 1 [ + (buf.validate.field).required = true, + (nid) = { + resource: ["aijob"] + } + ]; +} + +message CancelJobRequest { + string id = 1 [ + (buf.validate.field).required = true, + (nid) = { + resource: ["aijob"] + } + ]; +} + +message ListJobsResponse { + repeated Job items = 1; + + string next_page_token = 2; +} diff --git a/nebius/iam/v1/federation_certificate_service.proto b/nebius/iam/v1/federation_certificate_service.proto index c03d051..89b081c 100644 --- a/nebius/iam/v1/federation_certificate_service.proto +++ b/nebius/iam/v1/federation_certificate_service.proto @@ -41,7 +41,9 @@ message GetFederationCertificateRequest { message ListFederationCertificateByFederationRequest { // Represents the parent federation ID. Corresponds to the parent_id value. - string federation_id = 1; + string federation_id = 1 [(nid) = { + resource: ["federation"] + }]; // Specifies the maximum number of items to return in the response. // Default value: 10 diff --git a/nebius/storage/v1/bucket.proto b/nebius/storage/v1/bucket.proto index 3a27957..dab7ec0 100644 --- a/nebius/storage/v1/bucket.proto +++ b/nebius/storage/v1/bucket.proto @@ -8,6 +8,7 @@ import "nebius/annotations.proto"; import "nebius/common/v1/metadata.proto"; import "nebius/storage/v1/base.proto"; import "nebius/storage/v1/bucket_counters.proto"; +import "nebius/storage/v1/bucket_policy.proto"; import "nebius/storage/v1/lifecycle.proto"; option go_package = "github.com/nebius/gosdk/proto/nebius/storage/v1"; @@ -73,6 +74,9 @@ message BucketSpec { // Logging enabled for all requests. ALL = 3; } + + // Bucket policy specifies granular permissions for a bucket. + BucketPolicy bucket_policy = 13; } message BucketStatus { diff --git a/nebius/storage/v1/bucket_policy.proto b/nebius/storage/v1/bucket_policy.proto new file mode 100644 index 0000000..4ceb5e0 --- /dev/null +++ b/nebius/storage/v1/bucket_policy.proto @@ -0,0 +1,45 @@ +syntax = "proto3"; + +package nebius.storage.v1; + +import "buf/validate/validate.proto"; +import "nebius/annotations.proto"; + +option go_package = "github.com/nebius/gosdk/proto/nebius/storage/v1"; +option java_multiple_files = true; +option java_outer_classname = "BucketPolicyProto"; +option java_package = "ai.nebius.pub.storage.v1"; + +// Bucket policy specifies granular permissions for a bucket. +message BucketPolicy { + // Rule specifies which role must be given to a subject to access a set of objects with given + // prefixes or a whole bucket. + repeated Rule rules = 1 [(buf.validate.field) = { + repeated: {max_items: 10} + }]; + + message Rule { + // A list of paths each of which is either a full object key or a prefix ending with a + // single "*" wildcard character. A rule is only applied to objects matching any of paths. + // If there is a path equal to "*", a rule applies to a whole bucket. + repeated string paths = 1 [(buf.validate.field) = { + repeated: {max_items: 10} + }]; + + // A set of roles which a subject will have. All `storage.*` roles are supported. + repeated string roles = 2; + + reserved 3; + + // A subject of a rule. + oneof subject { + // Group ID to grant access to. + string group_id = 4; + + // Enable anonymous access. Only read-only roles are allowed in anonymous mode. + AnonymousAccess anonymous = 5 [(field_behavior) = MEANINGFUL_EMPTY_VALUE]; + } + + message AnonymousAccess {} + } +} diff --git a/nebius/vpc/v1/allocation_service.proto b/nebius/vpc/v1/allocation_service.proto index b5e0211..c533e22 100644 --- a/nebius/vpc/v1/allocation_service.proto +++ b/nebius/vpc/v1/allocation_service.proto @@ -54,7 +54,12 @@ message ListAllocationsRequest { } message ListAllocationsByPoolRequest { - string pool_id = 1 [(buf.validate.field).required = true]; + string pool_id = 1 [ + (buf.validate.field).required = true, + (nid) = { + resource: ["vpcpool"] + } + ]; int64 page_size = 2; @@ -62,7 +67,12 @@ message ListAllocationsByPoolRequest { } message ListAllocationsBySubnetRequest { - string subnet_id = 1 [(buf.validate.field).required = true]; + string subnet_id = 1 [ + (buf.validate.field).required = true, + (nid) = { + resource: ["vpcsubnet"] + } + ]; int64 page_size = 2; diff --git a/nebius/vpc/v1/pool_service.proto b/nebius/vpc/v1/pool_service.proto index 84809c7..1bbed80 100644 --- a/nebius/vpc/v1/pool_service.proto +++ b/nebius/vpc/v1/pool_service.proto @@ -52,7 +52,12 @@ message ListPoolsRequest { } message ListPoolsBySourcePoolRequest { - string pool_id = 1 [(buf.validate.field).required = true]; + string pool_id = 1 [ + (buf.validate.field).required = true, + (nid) = { + resource: ["vpcpool"] + } + ]; int64 page_size = 2; diff --git a/nebius/vpc/v1/route_table_service.proto b/nebius/vpc/v1/route_table_service.proto index ea74b9a..7d3fc1d 100644 --- a/nebius/vpc/v1/route_table_service.proto +++ b/nebius/vpc/v1/route_table_service.proto @@ -52,7 +52,12 @@ message ListRouteTablesRequest { } message ListRouteTablesByNetworkRequest { - string network_id = 1 [(buf.validate.field).required = true]; + string network_id = 1 [ + (buf.validate.field).required = true, + (nid) = { + resource: ["vpcnetwork"] + } + ]; int64 page_size = 2; diff --git a/nebius/vpc/v1/subnet_service.proto b/nebius/vpc/v1/subnet_service.proto index cc1f453..071d17b 100644 --- a/nebius/vpc/v1/subnet_service.proto +++ b/nebius/vpc/v1/subnet_service.proto @@ -52,7 +52,12 @@ message ListSubnetsRequest { } message ListSubnetsByNetworkRequest { - string network_id = 1 [(buf.validate.field).required = true]; + string network_id = 1 [ + (buf.validate.field).required = true, + (nid) = { + resource: ["vpcnetwork"] + } + ]; int64 page_size = 2;