diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index 2c8faf1a87..9c54ee4c48 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -617,6 +617,7 @@ from .types.machine_resources import BatchDedicatedResources from .types.machine_resources import DedicatedResources from .types.machine_resources import DiskSpec +from .types.machine_resources import LustreMount from .types.machine_resources import MachineSpec from .types.machine_resources import NfsMount from .types.machine_resources import PersistentDiskSpec @@ -1775,6 +1776,7 @@ def _get_version(dependency_name): "LlmUtilityServiceClient", "LogprobsResult", "LookupStudyRequest", + "LustreMount", "MachineSpec", "ManualBatchTuningParameters", "MatchServiceClient", diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index 8755475c2d..e626e873a0 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -271,40 +271,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py index a52a91a2a7..0bfadf9248 100644 --- a/google/cloud/aiplatform_v1/types/__init__.py +++ b/google/cloud/aiplatform_v1/types/__init__.py @@ -630,6 +630,7 @@ BatchDedicatedResources, DedicatedResources, DiskSpec, + LustreMount, MachineSpec, NfsMount, PersistentDiskSpec, @@ -1718,6 +1719,7 @@ "BatchDedicatedResources", "DedicatedResources", "DiskSpec", + "LustreMount", "MachineSpec", "NfsMount", "PersistentDiskSpec", diff --git a/google/cloud/aiplatform_v1/types/content.py b/google/cloud/aiplatform_v1/types/content.py index 430a81f954..8cb833aa6f 100644 --- a/google/cloud/aiplatform_v1/types/content.py +++ b/google/cloud/aiplatform_v1/types/content.py @@ -220,8 +220,56 @@ class Part(proto.Message): or file_data. This field is a member of `oneof`_ ``metadata``. + media_resolution (google.cloud.aiplatform_v1.types.Part.MediaResolution): + per part media resolution. + Media resolution for the input media. """ + class MediaResolution(proto.Message): + r"""per part media resolution. + Media resolution for the input media. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + level (google.cloud.aiplatform_v1.types.Part.MediaResolution.Level): + The tokenization quality used for given + media. + + This field is a member of `oneof`_ ``value``. + """ + + class Level(proto.Enum): + r"""The media resolution level. + + Values: + MEDIA_RESOLUTION_UNSPECIFIED (0): + Media resolution has not been set. + MEDIA_RESOLUTION_LOW (1): + Media resolution set to low. + MEDIA_RESOLUTION_MEDIUM (2): + Media resolution set to medium. + MEDIA_RESOLUTION_HIGH (3): + Media resolution set to high. + MEDIA_RESOLUTION_ULTRA_HIGH (4): + Media resolution set to ultra high. This is + for image only. + """ + + MEDIA_RESOLUTION_UNSPECIFIED = 0 + MEDIA_RESOLUTION_LOW = 1 + MEDIA_RESOLUTION_MEDIUM = 2 + MEDIA_RESOLUTION_HIGH = 3 + MEDIA_RESOLUTION_ULTRA_HIGH = 4 + + level: "Part.MediaResolution.Level" = proto.Field( + proto.ENUM, + number=1, + oneof="value", + enum="Part.MediaResolution.Level", + ) + text: str = proto.Field( proto.STRING, number=1, @@ -277,6 +325,11 @@ class Part(proto.Message): oneof="metadata", message="VideoMetadata", ) + media_resolution: MediaResolution = proto.Field( + proto.MESSAGE, + number=12, + message=MediaResolution, + ) class Blob(proto.Message): @@ -333,6 +386,10 @@ class VideoMetadata(proto.Message): Optional. The start offset of the video. end_offset (google.protobuf.duration_pb2.Duration): Optional. The end offset of the video. + fps (float): + Optional. The frame rate of the video sent to the model. If + not specified, the default value is 1.0. The valid range is + (0.0, 24.0]. """ start_offset: duration_pb2.Duration = proto.Field( @@ -345,6 +402,10 @@ class VideoMetadata(proto.Message): number=2, message=duration_pb2.Duration, ) + fps: float = proto.Field( + proto.DOUBLE, + number=3, + ) class PrebuiltVoiceConfig(proto.Message): @@ -505,6 +566,11 @@ class ImageConfig(proto.Message): .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: + image_output_options (google.cloud.aiplatform_v1.types.ImageConfig.ImageOutputOptions): + Optional. The image output format for + generated images. + + This field is a member of `oneof`_ ``_image_output_options``. aspect_ratio (str): Optional. The desired aspect ratio for the generated images. The following aspect ratios @@ -518,13 +584,94 @@ class ImageConfig(proto.Message): "21:9". This field is a member of `oneof`_ ``_aspect_ratio``. + person_generation (google.cloud.aiplatform_v1.types.ImageConfig.PersonGeneration): + Optional. Controls whether the model can + generate people. + + This field is a member of `oneof`_ ``_person_generation``. + image_size (str): + Optional. Specifies the size of generated images. Supported + values are ``1K``, ``2K``, ``4K``. If not specified, the + model will use default value ``1K``. + + This field is a member of `oneof`_ ``_image_size``. """ + class PersonGeneration(proto.Enum): + r"""Enum for controlling the generation of people in images. + + Values: + PERSON_GENERATION_UNSPECIFIED (0): + The default behavior is unspecified. The + model will decide whether to generate images of + people. + ALLOW_ALL (1): + Allows the model to generate images of + people, including adults and children. + ALLOW_ADULT (2): + Allows the model to generate images of + adults, but not children. + ALLOW_NONE (3): + Prevents the model from generating images of + people. + """ + + PERSON_GENERATION_UNSPECIFIED = 0 + ALLOW_ALL = 1 + ALLOW_ADULT = 2 + ALLOW_NONE = 3 + + class ImageOutputOptions(proto.Message): + r"""The image output format for generated images. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + mime_type (str): + Optional. The image format that the output + should be saved as. + + This field is a member of `oneof`_ ``_mime_type``. + compression_quality (int): + Optional. The compression quality of the + output image. + + This field is a member of `oneof`_ ``_compression_quality``. + """ + + mime_type: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + compression_quality: int = proto.Field( + proto.INT32, + number=2, + optional=True, + ) + + image_output_options: ImageOutputOptions = proto.Field( + proto.MESSAGE, + number=1, + optional=True, + message=ImageOutputOptions, + ) aspect_ratio: str = proto.Field( proto.STRING, number=2, optional=True, ) + person_generation: PersonGeneration = proto.Field( + proto.ENUM, + number=3, + optional=True, + enum=PersonGeneration, + ) + image_size: str = proto.Field( + proto.STRING, + number=4, + optional=True, + ) class GenerationConfig(proto.Message): @@ -647,6 +794,31 @@ class GenerationConfig(proto.Message): Optional. Routing configuration. This field is a member of `oneof`_ ``_routing_config``. + audio_timestamp (bool): + Optional. If enabled, audio timestamps will + be included in the request to the model. This + can be useful for synchronizing audio with other + modalities in the response. + + This field is a member of `oneof`_ ``_audio_timestamp``. + response_modalities (MutableSequence[google.cloud.aiplatform_v1.types.GenerationConfig.Modality]): + Optional. The modalities of the response. The model will + generate a response that includes all the specified + modalities. For example, if this is set to + ``[TEXT, IMAGE]``, the response will include both text and + an image. + media_resolution (google.cloud.aiplatform_v1.types.GenerationConfig.MediaResolution): + Optional. The token resolution at which input + media content is sampled. This is used to + control the trade-off between the quality of the + response and the number of tokens used to + represent the media. A higher resolution allows + the model to perceive more detail, which can + lead to a more nuanced response, but it will + also use more tokens. This does not affect the + image dimensions sent to the model. + + This field is a member of `oneof`_ ``_media_resolution``. speech_config (google.cloud.aiplatform_v1.types.SpeechConfig): Optional. The speech generation config. @@ -662,6 +834,46 @@ class GenerationConfig(proto.Message): This field is a member of `oneof`_ ``_image_config``. """ + class Modality(proto.Enum): + r"""The modalities of the response. + + Values: + MODALITY_UNSPECIFIED (0): + Unspecified modality. Will be processed as + text. + TEXT (1): + Text modality. + IMAGE (2): + Image modality. + AUDIO (3): + Audio modality. + """ + + MODALITY_UNSPECIFIED = 0 + TEXT = 1 + IMAGE = 2 + AUDIO = 3 + + class MediaResolution(proto.Enum): + r"""Media resolution for the input media. + + Values: + MEDIA_RESOLUTION_UNSPECIFIED (0): + Media resolution has not been set. + MEDIA_RESOLUTION_LOW (1): + Media resolution set to low (64 tokens). + MEDIA_RESOLUTION_MEDIUM (2): + Media resolution set to medium (256 tokens). + MEDIA_RESOLUTION_HIGH (3): + Media resolution set to high (zoomed + reframing with 256 tokens). + """ + + MEDIA_RESOLUTION_UNSPECIFIED = 0 + MEDIA_RESOLUTION_LOW = 1 + MEDIA_RESOLUTION_MEDIUM = 2 + MEDIA_RESOLUTION_HIGH = 3 + class RoutingConfig(proto.Message): r"""The configuration for routing the request to a specific model. @@ -778,8 +990,35 @@ class ThinkingConfig(proto.Message): only applied when enable_thinking is true. This field is a member of `oneof`_ ``_thinking_budget``. + thinking_level (google.cloud.aiplatform_v1.types.GenerationConfig.ThinkingConfig.ThinkingLevel): + Optional. The number of thoughts tokens that + the model should generate. + + This field is a member of `oneof`_ ``_thinking_level``. """ + class ThinkingLevel(proto.Enum): + r"""The thinking level for the model. + + Values: + THINKING_LEVEL_UNSPECIFIED (0): + Unspecified thinking level. + LOW (1): + Low thinking level. + MEDIUM (2): + Medium thinking level. + HIGH (3): + High thinking level. + MINIMAL (4): + MINIMAL thinking level. + """ + + THINKING_LEVEL_UNSPECIFIED = 0 + LOW = 1 + MEDIUM = 2 + HIGH = 3 + MINIMAL = 4 + include_thoughts: bool = proto.Field( proto.BOOL, number=1, @@ -790,6 +1029,12 @@ class ThinkingConfig(proto.Message): number=3, optional=True, ) + thinking_level: "GenerationConfig.ThinkingConfig.ThinkingLevel" = proto.Field( + proto.ENUM, + number=4, + optional=True, + enum="GenerationConfig.ThinkingConfig.ThinkingLevel", + ) temperature: float = proto.Field( proto.FLOAT, @@ -867,6 +1112,22 @@ class ThinkingConfig(proto.Message): optional=True, message=RoutingConfig, ) + audio_timestamp: bool = proto.Field( + proto.BOOL, + number=20, + optional=True, + ) + response_modalities: MutableSequence[Modality] = proto.RepeatedField( + proto.ENUM, + number=21, + enum=Modality, + ) + media_resolution: MediaResolution = proto.Field( + proto.ENUM, + number=22, + optional=True, + enum=MediaResolution, + ) speech_config: "SpeechConfig" = proto.Field( proto.MESSAGE, number=23, diff --git a/google/cloud/aiplatform_v1/types/custom_job.py b/google/cloud/aiplatform_v1/types/custom_job.py index cb70c2ded7..828af16172 100644 --- a/google/cloud/aiplatform_v1/types/custom_job.py +++ b/google/cloud/aiplatform_v1/types/custom_job.py @@ -420,6 +420,8 @@ class WorkerPoolSpec(proto.Message): use for this worker pool. nfs_mounts (MutableSequence[google.cloud.aiplatform_v1.types.NfsMount]): Optional. List of NFS mount spec. + lustre_mounts (MutableSequence[google.cloud.aiplatform_v1.types.LustreMount]): + Optional. List of Lustre mounts. disk_spec (google.cloud.aiplatform_v1.types.DiskSpec): Disk spec. """ @@ -450,6 +452,11 @@ class WorkerPoolSpec(proto.Message): number=4, message=machine_resources.NfsMount, ) + lustre_mounts: MutableSequence[machine_resources.LustreMount] = proto.RepeatedField( + proto.MESSAGE, + number=9, + message=machine_resources.LustreMount, + ) disk_spec: machine_resources.DiskSpec = proto.Field( proto.MESSAGE, number=5, diff --git a/google/cloud/aiplatform_v1/types/machine_resources.py b/google/cloud/aiplatform_v1/types/machine_resources.py index 470b974e10..de5671cff3 100644 --- a/google/cloud/aiplatform_v1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1/types/machine_resources.py @@ -36,6 +36,7 @@ "DiskSpec", "PersistentDiskSpec", "NfsMount", + "LustreMount", "AutoscalingMetricSpec", "ShieldedVmConfig", }, @@ -125,33 +126,32 @@ class MachineSpec(proto.Message): class DedicatedResources(proto.Message): r"""A description of resources that are dedicated to a - DeployedModel, and that need a higher degree of manual - configuration. + DeployedModel or DeployedIndex, and that need a higher degree of + manual configuration. Attributes: machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): Required. Immutable. The specification of a - single machine used by the prediction. + single machine being used. min_replica_count (int): Required. Immutable. The minimum number of - machine replicas this DeployedModel will be - always deployed on. This value must be greater - than or equal to 1. - - If traffic against the DeployedModel increases, - it may dynamically be deployed onto more - replicas, and as traffic decreases, some of - these extra replicas may be freed. + machine replicas that will be always deployed + on. This value must be greater than or equal to + 1. + + If traffic increases, it may dynamically be + deployed onto more replicas, and as traffic + decreases, some of these extra replicas may be + freed. max_replica_count (int): - Immutable. The maximum number of replicas this DeployedModel - may be deployed on when the traffic against it increases. If - the requested value is too large, the deployment will error, - but if deployment succeeds then the ability to scale the - model to that many replicas is guaranteed (barring service - outages). If traffic against the DeployedModel increases - beyond what its replicas at maximum may handle, a portion of - the traffic will be dropped. If this value is not provided, - will use + Immutable. The maximum number of replicas that may be + deployed on when the traffic against it increases. If the + requested value is too large, the deployment will error, but + if deployment succeeds then the ability to scale to that + many replicas is guaranteed (barring service outages). If + traffic increases beyond what its replicas at maximum may + handle, a portion of the traffic will be dropped. If this + value is not provided, will use [min_replica_count][google.cloud.aiplatform.v1.DedicatedResources.min_replica_count] as the default value. @@ -163,8 +163,8 @@ class DedicatedResources(proto.Message): required_replica_count (int): Optional. Number of required available replicas for the deployment to succeed. This field is only needed when - partial model deployment/mutation is desired. If set, the - model deploy/mutate operation will succeed once + partial deployment/mutation is desired. If set, the + deploy/mutate operation will succeed once available_replica_count reaches required_replica_count, and the rest of the replicas will be retried. If not set, the default required_replica_count will be min_replica_count. @@ -239,28 +239,27 @@ class AutomaticResources(proto.Message): Attributes: min_replica_count (int): - Immutable. The minimum number of replicas this DeployedModel - will be always deployed on. If traffic against it increases, - it may dynamically be deployed onto more replicas up to + Immutable. The minimum number of replicas that will be + always deployed on. If traffic against it increases, it may + dynamically be deployed onto more replicas up to [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count], and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. max_replica_count (int): Immutable. The maximum number of replicas - this DeployedModel may be deployed on when the - traffic against it increases. If the requested - value is too large, the deployment will error, - but if deployment succeeds then the ability to - scale the model to that many replicas is - guaranteed (barring service outages). If traffic - against the DeployedModel increases beyond what - its replicas at maximum may handle, a portion of - the traffic will be dropped. If this value is - not provided, a no upper bound for scaling under - heavy traffic will be assume, though Vertex AI - may be unable to scale beyond certain replica - number. + that may be deployed on when the traffic against + it increases. If the requested value is too + large, the deployment will error, but if + deployment succeeds then the ability to scale to + that many replicas is guaranteed (barring + service outages). If traffic increases beyond + what its replicas at maximum may handle, a + portion of the traffic will be dropped. If this + value is not provided, a no upper bound for + scaling under heavy traffic will be assume, + though Vertex AI may be unable to scale beyond + certain replica number. """ min_replica_count: int = proto.Field( @@ -331,10 +330,12 @@ class DiskSpec(proto.Message): Attributes: boot_disk_type (str): - Type of the boot disk (default is "pd-ssd"). - Valid values: "pd-ssd" (Persistent Disk Solid - State Drive) or "pd-standard" (Persistent Disk - Hard Disk Drive). + Type of the boot disk. For non-A3U machines, + the default value is "pd-ssd", for A3U machines, + the default value is "hyperdisk-balanced". Valid + values: "pd-ssd" (Persistent Disk Solid State + Drive), "pd-standard" (Persistent Disk Hard Disk + Drive) or "hyperdisk-balanced". boot_disk_size_gb (int): Size in GB of the boot disk (default is 100GB). @@ -407,6 +408,40 @@ class NfsMount(proto.Message): ) +class LustreMount(proto.Message): + r"""Represents a mount configuration for Lustre file system. + + Attributes: + instance_ip (str): + Required. IP address of the Lustre instance. + volume_handle (str): + Required. The unique identifier of the Lustre + volume. + filesystem (str): + Required. The name of the Lustre filesystem. + mount_point (str): + Required. Destination mount path. The Lustre file system + will be mounted for the user under /mnt/lustre/ + """ + + instance_ip: str = proto.Field( + proto.STRING, + number=1, + ) + volume_handle: str = proto.Field( + proto.STRING, + number=2, + ) + filesystem: str = proto.Field( + proto.STRING, + number=3, + ) + mount_point: str = proto.Field( + proto.STRING, + number=4, + ) + + class AutoscalingMetricSpec(proto.Message): r"""The metric specification that defines the target resource utilization (CPU utilization, accelerator's duty cycle, and so @@ -419,6 +454,7 @@ class AutoscalingMetricSpec(proto.Message): - For Online Prediction: - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + - ``aiplatform.googleapis.com/prediction/online/request_count`` target (int): The target resource utilization in percentage (1% - 100%) for the given metric; once the real diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index 9bd968d6da..759601dc07 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -1206,6 +1206,7 @@ from .types.tool import FunctionResponsePart from .types.tool import GoogleMaps from .types.tool import GoogleSearchRetrieval +from .types.tool import PartialArg from .types.tool import RagRetrievalConfig from .types.tool import Retrieval from .types.tool import RetrievalConfig @@ -2237,6 +2238,7 @@ def _get_version(dependency_name): "PairwiseSummarizationQualityResult", "PairwiseSummarizationQualitySpec", "Part", + "PartialArg", "PartnerModelTuningSpec", "PauseModelDeploymentMonitoringJobRequest", "PauseScheduleRequest", diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index 0063a8d7b1..299cc957ec 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -1337,6 +1337,7 @@ FunctionResponsePart, GoogleMaps, GoogleSearchRetrieval, + PartialArg, RagRetrievalConfig, Retrieval, RetrievalConfig, @@ -2548,6 +2549,7 @@ "FunctionResponsePart", "GoogleMaps", "GoogleSearchRetrieval", + "PartialArg", "RagRetrievalConfig", "Retrieval", "RetrievalConfig", diff --git a/google/cloud/aiplatform_v1beta1/types/tool.py b/google/cloud/aiplatform_v1beta1/types/tool.py index bc9378c69f..88a3a55bae 100644 --- a/google/cloud/aiplatform_v1beta1/types/tool.py +++ b/google/cloud/aiplatform_v1beta1/types/tool.py @@ -32,6 +32,7 @@ "ToolUseExample", "FunctionDeclaration", "FunctionCall", + "PartialArg", "FunctionResponsePart", "FunctionResponseBlob", "FunctionResponseFileData", @@ -484,12 +485,22 @@ class FunctionCall(proto.Message): the client to execute the ``function_call`` and return the response with the matching ``id``. name (str): - Required. The name of the function to call. Matches + Optional. The name of the function to call. Matches [FunctionDeclaration.name]. args (google.protobuf.struct_pb2.Struct): - Optional. Required. The function parameters and values in - JSON object format. See [FunctionDeclaration.parameters] for - parameter details. + Optional. The function parameters and values in JSON object + format. See [FunctionDeclaration.parameters] for parameter + details. + partial_args (MutableSequence[google.cloud.aiplatform_v1beta1.types.PartialArg]): + Optional. The partial argument value of the + function call. If provided, represents the + arguments/fields that are streamed + incrementally. + will_continue (bool): + Optional. Whether this is the last part of + the FunctionCall. If true, another partial + message for the current FunctionCall is expected + to follow. """ id: str = proto.Field( @@ -505,6 +516,83 @@ class FunctionCall(proto.Message): number=2, message=struct_pb2.Struct, ) + partial_args: MutableSequence["PartialArg"] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message="PartialArg", + ) + will_continue: bool = proto.Field( + proto.BOOL, + number=5, + ) + + +class PartialArg(proto.Message): + r"""Partial argument value of the function call. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + null_value (google.protobuf.struct_pb2.NullValue): + Optional. Represents a null value. + + This field is a member of `oneof`_ ``delta``. + number_value (float): + Optional. Represents a double value. + + This field is a member of `oneof`_ ``delta``. + string_value (str): + Optional. Represents a string value. + + This field is a member of `oneof`_ ``delta``. + bool_value (bool): + Optional. Represents a boolean value. + + This field is a member of `oneof`_ ``delta``. + json_path (str): + Required. A JSON Path (RFC 9535) to the argument being + streamed. https://datatracker.ietf.org/doc/html/rfc9535. + e.g. "$.foo.bar[0].data". + will_continue (bool): + Optional. Whether this is not the last part of the same + json_path. If true, another PartialArg message for the + current json_path is expected to follow. + """ + + null_value: struct_pb2.NullValue = proto.Field( + proto.ENUM, + number=2, + oneof="delta", + enum=struct_pb2.NullValue, + ) + number_value: float = proto.Field( + proto.DOUBLE, + number=3, + oneof="delta", + ) + string_value: str = proto.Field( + proto.STRING, + number=4, + oneof="delta", + ) + bool_value: bool = proto.Field( + proto.BOOL, + number=5, + oneof="delta", + ) + json_path: str = proto.Field( + proto.STRING, + number=1, + ) + will_continue: bool = proto.Field( + proto.BOOL, + number=6, + ) class FunctionResponsePart(proto.Message): @@ -1115,6 +1203,11 @@ class FunctionCallingConfig(proto.Message): ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided. + stream_function_call_arguments (bool): + Optional. When set to true, arguments of a single function + call will be streamed out in multiple + parts/contents/responses. Partial parameter results will be + returned in the [FunctionCall.partial_args] field. """ class Mode(proto.Enum): @@ -1165,6 +1258,10 @@ class Mode(proto.Enum): proto.STRING, number=2, ) + stream_function_call_arguments: bool = proto.Field( + proto.BOOL, + number=4, + ) class RetrievalConfig(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/vertex_rag_data_service.py b/google/cloud/aiplatform_v1beta1/types/vertex_rag_data_service.py index f04874e191..da33450b39 100644 --- a/google/cloud/aiplatform_v1beta1/types/vertex_rag_data_service.py +++ b/google/cloud/aiplatform_v1beta1/types/vertex_rag_data_service.py @@ -167,6 +167,11 @@ class DeleteRagCorpusRequest(proto.Message): this RagCorpus will also be deleted. Otherwise, the request will only work if the RagCorpus has no RagFiles. + force_delete (bool): + Optional. If set to true, any errors + generated by external vector database during the + deletion will be ignored. The default value is + false. """ name: str = proto.Field( @@ -177,6 +182,10 @@ class DeleteRagCorpusRequest(proto.Message): proto.BOOL, number=2, ) + force_delete: bool = proto.Field( + proto.BOOL, + number=3, + ) class UploadRagFileRequest(proto.Message): diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json index 4110f20070..35d3487b2b 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.135.0" + "version": "0.0.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json index f0e08e964b..2a9e00ef91 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.135.0" + "version": "0.0.0" }, "snippets": [ { diff --git a/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py b/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py index 6b16001a4b..93b8dff680 100644 --- a/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py @@ -4848,7 +4848,12 @@ def test_create_cached_content_rest_call_success(request_type): "code_execution_result": {"outcome": 1, "output": "output_value"}, "thought": True, "thought_signature": b"thought_signature_blob", - "video_metadata": {"start_offset": {}, "end_offset": {}}, + "video_metadata": { + "start_offset": {}, + "end_offset": {}, + "fps": 0.329, + }, + "media_resolution": {"level": 1}, } ], }, @@ -5393,7 +5398,12 @@ def test_update_cached_content_rest_call_success(request_type): "code_execution_result": {"outcome": 1, "output": "output_value"}, "thought": True, "thought_signature": b"thought_signature_blob", - "video_metadata": {"start_offset": {}, "end_offset": {}}, + "video_metadata": { + "start_offset": {}, + "end_offset": {}, + "fps": 0.329, + }, + "media_resolution": {"level": 1}, } ], }, @@ -6800,7 +6810,12 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "code_execution_result": {"outcome": 1, "output": "output_value"}, "thought": True, "thought_signature": b"thought_signature_blob", - "video_metadata": {"start_offset": {}, "end_offset": {}}, + "video_metadata": { + "start_offset": {}, + "end_offset": {}, + "fps": 0.329, + }, + "media_resolution": {"level": 1}, } ], }, @@ -7381,7 +7396,12 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "code_execution_result": {"outcome": 1, "output": "output_value"}, "thought": True, "thought_signature": b"thought_signature_blob", - "video_metadata": {"start_offset": {}, "end_offset": {}}, + "video_metadata": { + "start_offset": {}, + "end_offset": {}, + "fps": 0.329, + }, + "media_resolution": {"level": 1}, } ], }, diff --git a/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py b/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py index ead47ee805..154d31774d 100644 --- a/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py @@ -4854,7 +4854,9 @@ def test_create_tuning_job_rest_call_success(request_type): "video_metadata": { "start_offset": {"seconds": 751, "nanos": 543}, "end_offset": {}, + "fps": 0.329, }, + "media_resolution": {"level": 1}, } ], } @@ -6485,7 +6487,9 @@ async def test_create_tuning_job_rest_asyncio_call_success(request_type): "video_metadata": { "start_offset": {"seconds": 751, "nanos": 543}, "end_offset": {}, + "fps": 0.329, }, + "media_resolution": {"level": 1}, } ], } diff --git a/tests/unit/gapic/aiplatform_v1/test_job_service.py b/tests/unit/gapic/aiplatform_v1/test_job_service.py index 88d9de6c10..25c241ef4d 100644 --- a/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -24609,6 +24609,14 @@ def test_create_custom_job_rest_call_success(request_type): "mount_point": "mount_point_value", } ], + "lustre_mounts": [ + { + "instance_ip": "instance_ip_value", + "volume_handle": "volume_handle_value", + "filesystem": "filesystem_value", + "mount_point": "mount_point_value", + } + ], "disk_spec": { "boot_disk_type": "boot_disk_type_value", "boot_disk_size_gb": 1792, @@ -26268,6 +26276,14 @@ def test_create_hyperparameter_tuning_job_rest_call_success(request_type): "mount_point": "mount_point_value", } ], + "lustre_mounts": [ + { + "instance_ip": "instance_ip_value", + "volume_handle": "volume_handle_value", + "filesystem": "filesystem_value", + "mount_point": "mount_point_value", + } + ], "disk_spec": { "boot_disk_type": "boot_disk_type_value", "boot_disk_size_gb": 1792, @@ -27155,6 +27171,14 @@ def test_create_nas_job_rest_call_success(request_type): "mount_point": "mount_point_value", } ], + "lustre_mounts": [ + { + "instance_ip": "instance_ip_value", + "volume_handle": "volume_handle_value", + "filesystem": "filesystem_value", + "mount_point": "mount_point_value", + } + ], "disk_spec": { "boot_disk_type": "boot_disk_type_value", "boot_disk_size_gb": 1792, @@ -32047,6 +32071,14 @@ async def test_create_custom_job_rest_asyncio_call_success(request_type): "mount_point": "mount_point_value", } ], + "lustre_mounts": [ + { + "instance_ip": "instance_ip_value", + "volume_handle": "volume_handle_value", + "filesystem": "filesystem_value", + "mount_point": "mount_point_value", + } + ], "disk_spec": { "boot_disk_type": "boot_disk_type_value", "boot_disk_size_gb": 1792, @@ -33871,6 +33903,14 @@ async def test_create_hyperparameter_tuning_job_rest_asyncio_call_success(reques "mount_point": "mount_point_value", } ], + "lustre_mounts": [ + { + "instance_ip": "instance_ip_value", + "volume_handle": "volume_handle_value", + "filesystem": "filesystem_value", + "mount_point": "mount_point_value", + } + ], "disk_spec": { "boot_disk_type": "boot_disk_type_value", "boot_disk_size_gb": 1792, @@ -34856,6 +34896,14 @@ async def test_create_nas_job_rest_asyncio_call_success(request_type): "mount_point": "mount_point_value", } ], + "lustre_mounts": [ + { + "instance_ip": "instance_ip_value", + "volume_handle": "volume_handle_value", + "filesystem": "filesystem_value", + "mount_point": "mount_point_value", + } + ], "disk_spec": { "boot_disk_type": "boot_disk_type_value", "boot_disk_size_gb": 1792, diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index a783bb76ea..aefcb219d7 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -5596,22 +5596,19 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", + "project": "winkle", + "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) @@ -5621,19 +5618,22 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( + project = "scallop" + location = "abalone" + dataset = "squid" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", + "project": "clam", + "location": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py index 4ab43d718d..e901f5fc61 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py @@ -4815,6 +4815,17 @@ def test_create_cached_content_rest_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", @@ -4968,6 +4979,7 @@ def test_create_cached_content_rest_call_success(request_type): "allowed_function_names_value1", "allowed_function_names_value2", ], + "stream_function_call_arguments": True, }, "retrieval_config": { "lat_lng": {"latitude": 0.86, "longitude": 0.971}, @@ -5353,6 +5365,17 @@ def test_update_cached_content_rest_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", @@ -5506,6 +5529,7 @@ def test_update_cached_content_rest_call_success(request_type): "allowed_function_names_value1", "allowed_function_names_value2", ], + "stream_function_call_arguments": True, }, "retrieval_config": { "lat_lng": {"latitude": 0.86, "longitude": 0.971}, @@ -6753,6 +6777,17 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", @@ -6906,6 +6941,7 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "allowed_function_names_value1", "allowed_function_names_value2", ], + "stream_function_call_arguments": True, }, "retrieval_config": { "lat_lng": {"latitude": 0.86, "longitude": 0.971}, @@ -7327,6 +7363,17 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", @@ -7480,6 +7527,7 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "allowed_function_names_value1", "allowed_function_names_value2", ], + "stream_function_call_arguments": True, }, "retrieval_config": { "lat_lng": {"latitude": 0.86, "longitude": 0.971}, diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py index fa42930e05..620aeaa91f 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py @@ -4911,6 +4911,17 @@ def test_create_tuning_job_rest_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", @@ -6676,6 +6687,17 @@ async def test_create_tuning_job_rest_asyncio_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_session_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_session_service.py index 4bcbe9a0f1..aa91168a9c 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_session_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_session_service.py @@ -6978,6 +6978,17 @@ def test_append_event_rest_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", @@ -9189,6 +9200,17 @@ async def test_append_event_rest_asyncio_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py index c41d7da44c..88efef8423 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py @@ -6882,7 +6882,12 @@ def test_delete_rag_corpus_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).delete_rag_corpus._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("force",)) + assert not set(unset_fields) - set( + ( + "force", + "force_delete", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -6934,7 +6939,15 @@ def test_delete_rag_corpus_rest_unset_required_fields(): ) unset_fields = transport.delete_rag_corpus._get_unset_required_fields({}) - assert set(unset_fields) == (set(("force",)) & set(("name",))) + assert set(unset_fields) == ( + set( + ( + "force", + "forceDelete", + ) + ) + & set(("name",)) + ) def test_delete_rag_corpus_rest_flattened():