diff --git a/sdk/batch/azure-batch/CHANGELOG.md b/sdk/batch/azure-batch/CHANGELOG.md index df4de1b9f955..c188dce064dc 100644 --- a/sdk/batch/azure-batch/CHANGELOG.md +++ b/sdk/batch/azure-batch/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History +## 15.1.0b3 (2026-02-05) + +### Other Changes + +- Minor parameter renaming: `read_io_gi_b` to `read_io_gib`, `write_io_gi_b` to `write_io_gib`, and `v_tpm_enabled` to `vtpm_enabled`. + ## 15.1.0b2 (2025-11-20) ### Features Added diff --git a/sdk/batch/azure-batch/assets.json b/sdk/batch/azure-batch/assets.json index 60f11b1a0e8f..2196e76361e1 100644 --- a/sdk/batch/azure-batch/assets.json +++ b/sdk/batch/azure-batch/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/batch/azure-batch", - "Tag": "python/batch/azure-batch_3e56fd21d3" + "Tag": "python/batch/azure-batch_2f5749e81e" } diff --git a/sdk/batch/azure-batch/azure/batch/_serialization.py b/sdk/batch/azure-batch/azure/batch/_serialization.py index 7a0232de5ddc..38cab8750dc3 100644 --- a/sdk/batch/azure-batch/azure/batch/_serialization.py +++ b/sdk/batch/azure-batch/azure/batch/_serialization.py @@ -808,7 +808,7 @@ def serialize_data(self, data, data_type, **kwargs): # If dependencies is empty, try with current data class # It has to be a subclass of Enum anyway enum_type = self.dependencies.get(data_type, data.__class__) - if issubclass(enum_type, Enum): + if issubclass(enum_type, Enum): # type: ignore[arg-type] return Serializer.serialize_enum(data, enum_obj=enum_type) iter_type = data_type[0] + data_type[-1] diff --git a/sdk/batch/azure-batch/azure/batch/_utils/model_base.py b/sdk/batch/azure-batch/azure/batch/_utils/model_base.py index 12926fa98dcf..5e1a2d0fa5ec 100644 --- a/sdk/batch/azure-batch/azure/batch/_utils/model_base.py +++ b/sdk/batch/azure-batch/azure/batch/_utils/model_base.py @@ -37,6 +37,7 @@ TZ_UTC = timezone.utc _T = typing.TypeVar("_T") +_NONE_TYPE = type(None) def _timedelta_as_isostr(td: timedelta) -> str: @@ -171,6 +172,21 @@ def default(self, o): # pylint: disable=too-many-return-statements r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" ) +_ARRAY_ENCODE_MAPPING = { + "pipeDelimited": "|", + "spaceDelimited": " ", + "commaDelimited": ",", + "newlineDelimited": "\n", +} + + +def _deserialize_array_encoded(delimit: str, attr): + if isinstance(attr, str): + if attr == "": + return [] + return attr.split(delimit) + return attr + def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: """Deserialize ISO-8601 formatted string into Datetime object. @@ -202,7 +218,7 @@ def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: test_utc = date_obj.utctimetuple() if test_utc.tm_year > 9999 or test_utc.tm_year < 1: raise OverflowError("Hit max or min date") - return date_obj + return date_obj # type: ignore[no-any-return] def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: @@ -256,7 +272,7 @@ def _deserialize_time(attr: typing.Union[str, time]) -> time: """ if isinstance(attr, time): return attr - return isodate.parse_time(attr) + return isodate.parse_time(attr) # type: ignore[no-any-return] def _deserialize_bytes(attr): @@ -315,6 +331,8 @@ def _deserialize_int_as_str(attr): def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): if annotation is int and rf and rf._format == "str": return _deserialize_int_as_str + if annotation is str and rf and rf._format in _ARRAY_ENCODE_MAPPING: + return functools.partial(_deserialize_array_encoded, _ARRAY_ENCODE_MAPPING[rf._format]) if rf and rf._format: return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore @@ -353,9 +371,39 @@ def __contains__(self, key: typing.Any) -> bool: return key in self._data def __getitem__(self, key: str) -> typing.Any: + # If this key has been deserialized (for mutable types), we need to handle serialization + if hasattr(self, "_attr_to_rest_field"): + cache_attr = f"_deserialized_{key}" + if hasattr(self, cache_attr): + rf = _get_rest_field(getattr(self, "_attr_to_rest_field"), key) + if rf: + value = self._data.get(key) + if isinstance(value, (dict, list, set)): + # For mutable types, serialize and return + # But also update _data with serialized form and clear flag + # so mutations via this returned value affect _data + serialized = _serialize(value, rf._format) + # If serialized form is same type (no transformation needed), + # return _data directly so mutations work + if isinstance(serialized, type(value)) and serialized == value: + return self._data.get(key) + # Otherwise return serialized copy and clear flag + try: + object.__delattr__(self, cache_attr) + except AttributeError: + pass + # Store serialized form back + self._data[key] = serialized + return serialized return self._data.__getitem__(key) def __setitem__(self, key: str, value: typing.Any) -> None: + # Clear any cached deserialized value when setting through dictionary access + cache_attr = f"_deserialized_{key}" + try: + object.__delattr__(self, cache_attr) + except AttributeError: + pass self._data.__setitem__(key, value) def __delitem__(self, key: str) -> None: @@ -483,6 +531,8 @@ def _is_model(obj: typing.Any) -> bool: def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements if isinstance(o, list): + if format in _ARRAY_ENCODE_MAPPING and all(isinstance(x, str) for x in o): + return _ARRAY_ENCODE_MAPPING[format].join(o) return [_serialize(x, format) for x in o] if isinstance(o, dict): return {k: _serialize(v, format) for k, v in o.items()} @@ -638,6 +688,10 @@ def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: if not rf._rest_name_input: rf._rest_name_input = attr cls._attr_to_rest_field: dict[str, _RestField] = dict(attr_to_rest_field.items()) + cls._backcompat_attr_to_rest_field: dict[str, _RestField] = { + Model._get_backcompat_attribute_name(cls._attr_to_rest_field, attr): rf + for attr, rf in cls._attr_to_rest_field.items() + } cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") return super().__new__(cls) @@ -647,6 +701,16 @@ def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: if hasattr(base, "__mapping__"): base.__mapping__[discriminator or cls.__name__] = cls # type: ignore + @classmethod + def _get_backcompat_attribute_name(cls, attr_to_rest_field: dict[str, "_RestField"], attr_name: str) -> str: + rest_field_obj = attr_to_rest_field.get(attr_name) # pylint: disable=protected-access + if rest_field_obj is None: + return attr_name + original_tsp_name = getattr(rest_field_obj, "_original_tsp_name", None) # pylint: disable=protected-access + if original_tsp_name: + return original_tsp_name + return attr_name + @classmethod def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: for v in cls.__dict__.values(): @@ -758,6 +822,14 @@ def _deserialize_multiple_sequence( return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) +def _is_array_encoded_deserializer(deserializer: functools.partial) -> bool: + return ( + isinstance(deserializer, functools.partial) + and isinstance(deserializer.args[0], functools.partial) + and deserializer.args[0].func == _deserialize_array_encoded # pylint: disable=comparison-with-callable + ) + + def _deserialize_sequence( deserializer: typing.Optional[typing.Callable], module: typing.Optional[str], @@ -767,6 +839,19 @@ def _deserialize_sequence( return obj if isinstance(obj, ET.Element): obj = list(obj) + + # encoded string may be deserialized to sequence + if isinstance(obj, str) and isinstance(deserializer, functools.partial): + # for list[str] + if _is_array_encoded_deserializer(deserializer): + return deserializer(obj) + + # for list[Union[...]] + if isinstance(deserializer.args[0], list): + for sub_deserializer in deserializer.args[0]: + if _is_array_encoded_deserializer(sub_deserializer): + return sub_deserializer(obj) + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) @@ -817,16 +902,16 @@ def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-retur # is it optional? try: - if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore + if any(a is _NONE_TYPE for a in annotation.__args__): # pyright: ignore if len(annotation.__args__) <= 2: # pyright: ignore if_obj_deserializer = _get_deserialize_callable_from_annotation( - next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore + next(a for a in annotation.__args__ if a is not _NONE_TYPE), module, rf # pyright: ignore ) return functools.partial(_deserialize_with_optional, if_obj_deserializer) # the type is Optional[Union[...]], we need to remove the None type from the Union annotation_copy = copy.copy(annotation) - annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore + annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a is not _NONE_TYPE] # pyright: ignore return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) except AttributeError: pass @@ -972,6 +1057,7 @@ def _failsafe_deserialize_xml( return None +# pylint: disable=too-many-instance-attributes class _RestField: def __init__( self, @@ -984,6 +1070,7 @@ def __init__( format: typing.Optional[str] = None, is_multipart_file_input: bool = False, xml: typing.Optional[dict[str, typing.Any]] = None, + original_tsp_name: typing.Optional[str] = None, ): self._type = type self._rest_name_input = name @@ -995,10 +1082,15 @@ def __init__( self._format = format self._is_multipart_file_input = is_multipart_file_input self._xml = xml if xml is not None else {} + self._original_tsp_name = original_tsp_name @property def _class_type(self) -> typing.Any: - return getattr(self._type, "args", [None])[0] + result = getattr(self._type, "args", [None])[0] + # type may be wrapped by nested functools.partial so we need to check for that + if isinstance(result, functools.partial): + return getattr(result, "args", [None])[0] + return result @property def _rest_name(self) -> str: @@ -1009,14 +1101,37 @@ def _rest_name(self) -> str: def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin # by this point, type and rest_name will have a value bc we default # them in __new__ of the Model class - item = obj.get(self._rest_name) + # Use _data.get() directly to avoid triggering __getitem__ which clears the cache + item = obj._data.get(self._rest_name) if item is None: return item if self._is_model: return item - return _deserialize(self._type, _serialize(item, self._format), rf=self) + + # For mutable types, we want mutations to directly affect _data + # Check if we've already deserialized this value + cache_attr = f"_deserialized_{self._rest_name}" + if hasattr(obj, cache_attr): + # Return the value from _data directly (it's been deserialized in place) + return obj._data.get(self._rest_name) + + deserialized = _deserialize(self._type, _serialize(item, self._format), rf=self) + + # For mutable types, store the deserialized value back in _data + # so mutations directly affect _data + if isinstance(deserialized, (dict, list, set)): + obj._data[self._rest_name] = deserialized + object.__setattr__(obj, cache_attr, True) # Mark as deserialized + return deserialized + + return deserialized def __set__(self, obj: Model, value) -> None: + # Clear the cached deserialized object when setting a new value + cache_attr = f"_deserialized_{self._rest_name}" + if hasattr(obj, cache_attr): + object.__delattr__(obj, cache_attr) + if value is None: # we want to wipe out entries if users set attr to None try: @@ -1046,6 +1161,7 @@ def rest_field( format: typing.Optional[str] = None, is_multipart_file_input: bool = False, xml: typing.Optional[dict[str, typing.Any]] = None, + original_tsp_name: typing.Optional[str] = None, ) -> typing.Any: return _RestField( name=name, @@ -1055,6 +1171,7 @@ def rest_field( format=format, is_multipart_file_input=is_multipart_file_input, xml=xml, + original_tsp_name=original_tsp_name, ) @@ -1184,7 +1301,7 @@ def _get_wrapped_element( _get_element(v, exclude_readonly, meta, wrapped_element) else: wrapped_element.text = _get_primitive_type_value(v) - return wrapped_element + return wrapped_element # type: ignore[no-any-return] def _get_primitive_type_value(v) -> str: @@ -1197,7 +1314,9 @@ def _get_primitive_type_value(v) -> str: return str(v) -def _create_xml_element(tag, prefix=None, ns=None): +def _create_xml_element( + tag: typing.Any, prefix: typing.Optional[str] = None, ns: typing.Optional[str] = None +) -> ET.Element: if prefix and ns: ET.register_namespace(prefix, ns) if ns: diff --git a/sdk/batch/azure-batch/azure/batch/_utils/serialization.py b/sdk/batch/azure-batch/azure/batch/_utils/serialization.py index 45a3e44e45cb..81ec1de5922b 100644 --- a/sdk/batch/azure-batch/azure/batch/_utils/serialization.py +++ b/sdk/batch/azure-batch/azure/batch/_utils/serialization.py @@ -821,13 +821,20 @@ def serialize_basic(cls, data, data_type, **kwargs): :param str data_type: Type of object in the iterable. :rtype: str, int, float, bool :return: serialized object + :raises TypeError: raise if data_type is not one of str, int, float, bool. """ custom_serializer = cls._get_custom_serializers(data_type, **kwargs) if custom_serializer: return custom_serializer(data) if data_type == "str": return cls.serialize_unicode(data) - return eval(data_type)(data) # nosec # pylint: disable=eval-used + if data_type == "int": + return int(data) + if data_type == "float": + return float(data) + if data_type == "bool": + return bool(data) + raise TypeError("Unknown basic data type: {}".format(data_type)) @classmethod def serialize_unicode(cls, data): @@ -1757,7 +1764,7 @@ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return :param str data_type: deserialization data type. :return: Deserialized basic type. :rtype: str, int, float or bool - :raises TypeError: if string format is not valid. + :raises TypeError: if string format is not valid or data_type is not one of str, int, float, bool. """ # If we're here, data is supposed to be a basic type. # If it's still an XML node, take the text @@ -1783,7 +1790,11 @@ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return if data_type == "str": return self.deserialize_unicode(attr) - return eval(data_type)(attr) # nosec # pylint: disable=eval-used + if data_type == "int": + return int(attr) + if data_type == "float": + return float(attr) + raise TypeError("Unknown basic data type: {}".format(data_type)) @staticmethod def deserialize_unicode(data): diff --git a/sdk/batch/azure-batch/azure/batch/_version.py b/sdk/batch/azure-batch/azure/batch/_version.py index fbf68d95da92..9040f382ff9c 100644 --- a/sdk/batch/azure-batch/azure/batch/_version.py +++ b/sdk/batch/azure-batch/azure/batch/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "15.1.0b2" +VERSION = "15.1.0b3" diff --git a/sdk/batch/azure-batch/azure/batch/models/_models.py b/sdk/batch/azure-batch/azure/batch/models/_models.py index 9c94471fb1b3..fc08c261b83f 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_models.py +++ b/sdk/batch/azure-batch/azure/batch/models/_models.py @@ -185,7 +185,7 @@ class AutoScaleRunError(_Model): """A message describing the autoscale error, intended to be suitable for display in a user interface.""" values_property: Optional[list["_models.NameValuePair"]] = rest_field( - name="values", visibility=["read", "create", "update", "delete", "query"] + name="values", visibility=["read", "create", "update", "delete", "query"], original_tsp_name="values" ) """A list of additional error details related to the autoscale error.""" @@ -640,7 +640,7 @@ class BatchCreateTaskCollectionResult(_Model): """ values_property: Optional[list["_models.BatchTaskCreateResult"]] = rest_field( - name="value", visibility=["read", "create", "update", "delete", "query"] + name="value", visibility=["read", "create", "update", "delete", "query"], original_tsp_name="values" ) """The results of the create Task collection operation.""" @@ -733,7 +733,7 @@ class BatchError(_Model): ) """A message describing the error, intended to be suitable for display in a user interface.""" values_property: Optional[list["_models.BatchErrorDetail"]] = rest_field( - name="values", visibility=["read", "create", "update", "delete", "query"] + name="values", visibility=["read", "create", "update", "delete", "query"], original_tsp_name="values" ) """A collection of key-value pairs containing additional details about the error.""" @@ -2929,12 +2929,12 @@ class BatchJobScheduleStatistics(_Model): :ivar write_iops: The total number of disk write operations made by all Tasks in all Jobs created under the schedule. Required. :vartype write_iops: int - :ivar read_io_gi_b: The total gibibytes read from disk by all Tasks in all Jobs created under + :ivar read_io_gib: The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. Required. - :vartype read_io_gi_b: float - :ivar write_io_gi_b: The total gibibytes written to disk by all Tasks in all Jobs created under + :vartype read_io_gib: float + :ivar write_io_gib: The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. Required. - :vartype write_io_gi_b: float + :vartype write_io_gib: float :ivar succeeded_tasks_count: The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. Required. @@ -2993,10 +2993,10 @@ class BatchJobScheduleStatistics(_Model): ) """The total number of disk write operations made by all Tasks in all Jobs created under the schedule. Required.""" - read_io_gi_b: float = rest_field(name="readIOGiB", visibility=["read", "create", "update", "delete", "query"]) + read_io_gib: float = rest_field(name="readIOGiB", visibility=["read", "create", "update", "delete", "query"]) """The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. Required.""" - write_io_gi_b: float = rest_field(name="writeIOGiB", visibility=["read", "create", "update", "delete", "query"]) + write_io_gib: float = rest_field(name="writeIOGiB", visibility=["read", "create", "update", "delete", "query"]) """The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. Required.""" succeeded_tasks_count: int = rest_field( @@ -3036,8 +3036,8 @@ def __init__( wall_clock_time: datetime.timedelta, read_iops: int, write_iops: int, - read_io_gi_b: float, - write_io_gi_b: float, + read_io_gib: float, + write_io_gib: float, succeeded_tasks_count: int, failed_tasks_count: int, task_retries_count: int, @@ -3400,12 +3400,12 @@ class BatchJobStatistics(_Model): :ivar write_iops: The total number of disk write operations made by all Tasks in the Job. Required. :vartype write_iops: int - :ivar read_io_gi_b: The total amount of data in GiB read from disk by all Tasks in the Job. + :ivar read_io_gib: The total amount of data in GiB read from disk by all Tasks in the Job. Required. - :vartype read_io_gi_b: float - :ivar write_io_gi_b: The total amount of data in GiB written to disk by all Tasks in the Job. + :vartype read_io_gib: float + :ivar write_io_gib: The total amount of data in GiB written to disk by all Tasks in the Job. Required. - :vartype write_io_gi_b: float + :vartype write_io_gib: float :ivar succeeded_tasks_count: The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. Required. :vartype succeeded_tasks_count: int @@ -3460,9 +3460,9 @@ class BatchJobStatistics(_Model): name="writeIOps", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of disk write operations made by all Tasks in the Job. Required.""" - read_io_gi_b: float = rest_field(name="readIOGiB", visibility=["read", "create", "update", "delete", "query"]) + read_io_gib: float = rest_field(name="readIOGiB", visibility=["read", "create", "update", "delete", "query"]) """The total amount of data in GiB read from disk by all Tasks in the Job. Required.""" - write_io_gi_b: float = rest_field(name="writeIOGiB", visibility=["read", "create", "update", "delete", "query"]) + write_io_gib: float = rest_field(name="writeIOGiB", visibility=["read", "create", "update", "delete", "query"]) """The total amount of data in GiB written to disk by all Tasks in the Job. Required.""" succeeded_tasks_count: int = rest_field( name="numSucceededTasks", visibility=["read", "create", "update", "delete", "query"], format="str" @@ -3499,8 +3499,8 @@ def __init__( wall_clock_time: datetime.timedelta, read_iops: int, write_iops: int, - read_io_gi_b: float, - write_io_gi_b: float, + read_io_gib: float, + write_io_gib: float, succeeded_tasks_count: int, failed_tasks_count: int, task_retries_count: int, @@ -3787,7 +3787,7 @@ class BatchNode(_Model): :ivar endpoint_configuration: The endpoint configuration for the Compute Node. :vartype endpoint_configuration: ~azure.batch.models.BatchNodeEndpointConfiguration :ivar node_agent_info: Information about the Compute Node agent version and the time the - Compute Node upgraded to a new version. Required. + Compute Node upgraded to a new version. :vartype node_agent_info: ~azure.batch.models.BatchNodeAgentInfo :ivar virtual_machine_info: Info about the current state of the virtual machine. Required. :vartype virtual_machine_info: ~azure.batch.models.VirtualMachineInfo @@ -3871,9 +3871,9 @@ class BatchNode(_Model): name="endpointConfiguration", visibility=["read"] ) """The endpoint configuration for the Compute Node.""" - node_agent_info: "_models.BatchNodeAgentInfo" = rest_field(name="nodeAgentInfo", visibility=["read"]) + node_agent_info: Optional["_models.BatchNodeAgentInfo"] = rest_field(name="nodeAgentInfo", visibility=["read"]) """Information about the Compute Node agent version and the time the Compute Node upgraded to a - new version. Required.""" + new version.""" virtual_machine_info: "_models.VirtualMachineInfo" = rest_field(name="virtualMachineInfo", visibility=["read"]) """Info about the current state of the virtual machine. Required.""" @@ -8079,7 +8079,7 @@ class BatchTaskGroup(_Model): """ values_property: list["_models.BatchTaskCreateOptions"] = rest_field( - name="value", visibility=["read", "create", "update", "delete", "query"] + name="value", visibility=["read", "create", "update", "delete", "query"], original_tsp_name="values" ) """The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has @@ -8311,10 +8311,10 @@ class BatchTaskStatistics(_Model): :vartype read_iops: int :ivar write_iops: The total number of disk write operations made by the Task. Required. :vartype write_iops: int - :ivar read_io_gi_b: The total gibibytes read from disk by the Task. Required. - :vartype read_io_gi_b: float - :ivar write_io_gi_b: The total gibibytes written to disk by the Task. Required. - :vartype write_io_gi_b: float + :ivar read_io_gib: The total gibibytes read from disk by the Task. Required. + :vartype read_io_gib: float + :ivar write_io_gib: The total gibibytes written to disk by the Task. Required. + :vartype write_io_gib: float :ivar wait_time: The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). @@ -8358,9 +8358,9 @@ class BatchTaskStatistics(_Model): name="writeIOps", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of disk write operations made by the Task. Required.""" - read_io_gi_b: float = rest_field(name="readIOGiB", visibility=["read", "create", "update", "delete", "query"]) + read_io_gib: float = rest_field(name="readIOGiB", visibility=["read", "create", "update", "delete", "query"]) """The total gibibytes read from disk by the Task. Required.""" - write_io_gi_b: float = rest_field(name="writeIOGiB", visibility=["read", "create", "update", "delete", "query"]) + write_io_gib: float = rest_field(name="writeIOGiB", visibility=["read", "create", "update", "delete", "query"]) """The total gibibytes written to disk by the Task. Required.""" wait_time: datetime.timedelta = rest_field( name="waitTime", visibility=["read", "create", "update", "delete", "query"] @@ -8381,8 +8381,8 @@ def __init__( wall_clock_time: datetime.timedelta, read_iops: int, write_iops: int, - read_io_gi_b: float, - write_io_gi_b: float, + read_io_gib: float, + write_io_gib: float, wait_time: datetime.timedelta, ) -> None: ... @@ -8404,15 +8404,15 @@ class BatchUefiSettings(_Model): :ivar secure_boot_enabled: Specifies whether secure boot should be enabled on the virtual machine. :vartype secure_boot_enabled: bool - :ivar v_tpm_enabled: Specifies whether vTPM should be enabled on the virtual machine. - :vartype v_tpm_enabled: bool + :ivar vtpm_enabled: Specifies whether vTPM should be enabled on the virtual machine. + :vartype vtpm_enabled: bool """ secure_boot_enabled: Optional[bool] = rest_field( name="secureBootEnabled", visibility=["read", "create", "update", "delete", "query"] ) """Specifies whether secure boot should be enabled on the virtual machine.""" - v_tpm_enabled: Optional[bool] = rest_field( + vtpm_enabled: Optional[bool] = rest_field( name="vTpmEnabled", visibility=["read", "create", "update", "delete", "query"] ) """Specifies whether vTPM should be enabled on the virtual machine.""" @@ -8422,7 +8422,7 @@ def __init__( self, *, secure_boot_enabled: Optional[bool] = None, - v_tpm_enabled: Optional[bool] = None, + vtpm_enabled: Optional[bool] = None, ) -> None: ... @overload @@ -8792,10 +8792,6 @@ class DataDisk(_Model): :vartype disk_size_gb: int :ivar managed_disk: The managed disk parameters. :vartype managed_disk: ~azure.batch.models.ManagedDisk - :ivar storage_account_type: The storage Account type to be used for the data disk. If omitted, - the default is "standard_lrs". Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - :vartype storage_account_type: str or ~azure.batch.models.StorageAccountType """ logical_unit_number: int = rest_field(name="lun", visibility=["read", "create", "update", "delete", "query"]) @@ -8816,11 +8812,6 @@ class DataDisk(_Model): name="managedDisk", visibility=["read", "create", "update", "delete", "query"] ) """The managed disk parameters.""" - storage_account_type: Optional[Union[str, "_models.StorageAccountType"]] = rest_field( - name="storageAccountType", visibility=["read", "create", "update", "delete", "query"] - ) - """The storage Account type to be used for the data disk. If omitted, the default is - \"standard_lrs\". Known values are: \"standard_lrs\", \"premium_lrs\", and \"standardssd_lrs\".""" @overload def __init__( @@ -8830,7 +8821,6 @@ def __init__( disk_size_gb: int, caching: Optional[Union[str, "_models.CachingType"]] = None, managed_disk: Optional["_models.ManagedDisk"] = None, - storage_account_type: Optional[Union[str, "_models.StorageAccountType"]] = None, ) -> None: ... @overload @@ -9945,6 +9935,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) +# TODO: Open Sphinx issue that needs to be addressed through TypeSpec. +# See: https://github.com/microsoft/typespec/issues/8654 +# Revert any changes made here with `*` and `\`. class OutputFile(_Model): """On every file uploads, Batch service writes two log files to the compute node, 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn more about a @@ -10284,7 +10277,7 @@ class ResizeError(_Model): """A message describing the Pool resize error, intended to be suitable for display in a user interface.""" values_property: Optional[list["_models.NameValuePair"]] = rest_field( - name="values", visibility=["read", "create", "update", "delete", "query"] + name="values", visibility=["read", "create", "update", "delete", "query"], original_tsp_name="values" ) """A list of additional error details related to the Pool resize error.""" @@ -10951,9 +10944,8 @@ class VirtualMachineConfiguration(_Model): `_. :vartype data_disks: list[~azure.batch.models.DataDisk] :ivar license_type: This only applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for the Compute - Nodes which will be deployed. If omitted, no on-premises licensing discount is - applied. Values are: + should only be used when you hold valid on-premises licenses for the Compute Nodes which will + be deployed. If omitted, no on-premises licensing discount is applied. Values are: Windows_Server - The on-premises license is for Windows Server. Windows_Client - The on-premises license is for Windows Client. :vartype license_type: str @@ -11020,12 +11012,11 @@ class VirtualMachineConfiguration(_Model): license_type: Optional[str] = rest_field( name="licenseType", visibility=["read", "create", "update", "delete", "query"] ) - """This only applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for the Compute - Nodes which will be deployed. If omitted, no on-premises licensing discount is - applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client.""" + """This only applies to Images that contain the Windows operating system, and should only be + used when you hold valid on-premises licenses for the Compute Nodes which will be deployed. If + omitted, no on-premises licensing discount is applied. Values are: Windows_Server - The + on-premises license is for Windows Server. Windows_Client - The on-premises license is for + Windows Client.""" container_configuration: Optional["_models.BatchContainerConfiguration"] = rest_field( name="containerConfiguration", visibility=["read", "create", "update", "delete", "query"] ) diff --git a/sdk/batch/azure-batch/pyproject.toml b/sdk/batch/azure-batch/pyproject.toml index 17ec8407e78b..1444efcc0a69 100644 --- a/sdk/batch/azure-batch/pyproject.toml +++ b/sdk/batch/azure-batch/pyproject.toml @@ -32,7 +32,7 @@ keywords = ["azure", "azure sdk"] dependencies = [ "isodate>=0.6.1", - "azure-core>=1.35.0", + "azure-core>=1.37.0", "typing-extensions>=4.6.0", ] dynamic = [ diff --git a/sdk/batch/azure-batch/tests/batch_preparers.py b/sdk/batch/azure-batch/tests/batch_preparers.py index ac756aa20aca..7358a6a9bdda 100644 --- a/sdk/batch/azure-batch/tests/batch_preparers.py +++ b/sdk/batch/azure-batch/tests/batch_preparers.py @@ -92,12 +92,10 @@ def create_resource(self, name, **kwargs): self.client = self.create_mgmt_client(azure.mgmt.batch.BatchManagementClient, base_url=AZURE_ARM_ENDPOINT) if self.existing_account_name: - self.resource = self.client.batch_account.get( - self.existing_resource_group, self.existing_account_name - ) - #keys = self.client.batch_account.get_keys( + self.resource = self.client.batch_account.get(self.existing_resource_group, self.existing_account_name) + # keys = self.client.batch_account.get_keys( # self.existing_resource_group, self.existing_account_name - #) + # ) # credentials = AzureNamedKeyCredential(keys.account_name, keys.primary) return {self.parameter_name: self.resource} @@ -195,7 +193,7 @@ def _get_batch_account(self, **kwargs): def create_resource(self, name, **kwargs): if self.pool_name: - name = self.pool_name + name = self.pool_name if self.is_live: diff --git a/sdk/batch/azure-batch/tests/test_batch.py b/sdk/batch/azure-batch/tests/test_batch.py index 7827aee94d67..02f3f7f920eb 100644 --- a/sdk/batch/azure-batch/tests/test_batch.py +++ b/sdk/batch/azure-batch/tests/test_batch.py @@ -62,6 +62,7 @@ def get_redacted_key(key): redacted_value += six.ensure_str(binascii.hexlify(digest))[:6] return redacted_value + # # Define these environment variables. They should point to a Mistral Large model # hosted on MaaS, or any other MaaS model that suppots chat completions with tools. @@ -74,7 +75,7 @@ def get_redacted_key(key): batch_diskencryptionset_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/batch-rg1/providers/Microsoft.Compute/diskEncryptionSets/diskEncryption", batch_usersub_account_name="batch-usersub-account-name", batch_resource_group="batch-rg", - ) +) class TestBatch(AzureMgmtRecordedTestCase): @@ -319,7 +320,7 @@ async def test_batch_create_pool_with_osdisksecurityprofile(self, client: BatchC ), uefi_settings=models.BatchUefiSettings( secure_boot_enabled=True, - v_tpm_enabled=True, + vtpm_enabled=True, ), ), image_reference=models.BatchVmImageReference( @@ -356,7 +357,6 @@ async def test_batch_create_pool_with_osdisksecurityprofile(self, client: BatchC is models.SecurityEncryptionTypes.VM_GUEST_STATE_ONLY ) - # to run this test you must first create the arm resources in test-resources.json. To create run the following command: # # azure-sdk-for-python\sdk\batch\azure-batch>..\..\..\eng\common\TestResources\New-TestResources.ps1 batch @@ -367,13 +367,25 @@ async def test_batch_create_pool_with_osdisksecurityprofile(self, client: BatchC # # now when you run this test VSCode it will have access to the environment variables @BatchEnviromentVariableLoader() - #@CachedResourceGroupPreparer(location=AZURE_LOCATION) - @AccountPreparer(location=AZURE_LOCATION, batch_environment=BATCH_ENVIRONMENT, existing_account_name=os.environ.get("BATCH_USERSUB_ACCOUNT_NAME","batch-usersub-account-name"), existing_resource_group=os.environ.get("BATCH_RESOURCE_GROUP","batch-rg")) + # @CachedResourceGroupPreparer(location=AZURE_LOCATION) + @AccountPreparer( + location=AZURE_LOCATION, + batch_environment=BATCH_ENVIRONMENT, + existing_account_name=os.environ.get("BATCH_USERSUB_ACCOUNT_NAME", "batch-usersub-account-name"), + existing_resource_group=os.environ.get("BATCH_RESOURCE_GROUP", "batch-rg"), + ) @pytest.mark.parametrize("BatchClient", [SyncBatchClient, AsyncBatchClient], ids=["sync", "async"]) @client_setup @recorded_by_proxy_async - async def test_batch_create_pool_with_osdiskdiskencryption(self, client: BatchClient, batch_diskencryptionset_id, batch_resource_group, batch_usersub_account_name, **kwargs): - + async def test_batch_create_pool_with_osdiskdiskencryption( + self, + client: BatchClient, + batch_diskencryptionset_id, + batch_resource_group, + batch_usersub_account_name, + **kwargs + ): + test_iaas_pool = models.BatchPoolCreateOptions( id="batch_iass_", vm_size=DEFAULT_VM_SIZE, @@ -386,7 +398,7 @@ async def test_batch_create_pool_with_osdiskdiskencryption(self, client: BatchCl ), uefi_settings=models.BatchUefiSettings( secure_boot_enabled=True, - v_tpm_enabled=True, + vtpm_enabled=True, ), ), image_reference=models.BatchVmImageReference( @@ -401,7 +413,7 @@ async def test_batch_create_pool_with_osdiskdiskencryption(self, client: BatchCl security_profile=models.BatchVmDiskSecurityProfile( security_encryption_type=models.SecurityEncryptionTypes.VM_GUEST_STATE_ONLY ), - disk_encryption_set=models.DiskEncryptionSetParameters(id=batch_diskencryptionset_id) + disk_encryption_set=models.DiskEncryptionSetParameters(id=batch_diskencryptionset_id), ), ), data_disks=[ @@ -422,7 +434,7 @@ async def test_batch_create_pool_with_osdiskdiskencryption(self, client: BatchCl response = await wrap_result(client.create_pool(test_iaas_pool)) assert response is None - pool = await wrap_result(client.get_pool(test_iaas_pool.id)) + pool = await wrap_result(client.get_pool(test_iaas_pool.id)) assert pool.virtual_machine_configuration.security_profile.security_type is models.SecurityTypes.CONFIDENTIAL_VM assert pool.virtual_machine_configuration.security_profile.proxy_agent_settings.enabled is False @@ -434,7 +446,8 @@ async def test_batch_create_pool_with_osdiskdiskencryption(self, client: BatchCl pool.virtual_machine_configuration.os_disk.managed_disk.disk_encryption_set.id == batch_diskencryptionset_id ) assert ( - pool.virtual_machine_configuration.data_disks[0].managed_disk.disk_encryption_set.id == batch_diskencryptionset_id + pool.virtual_machine_configuration.data_disks[0].managed_disk.disk_encryption_set.id + == batch_diskencryptionset_id ) poller = await wrap_result(client.begin_delete_pool(pool_id=pool.id, polling_interval=5)) diff --git a/sdk/batch/azure-batch/tsp-location.yaml b/sdk/batch/azure-batch/tsp-location.yaml index bf953fd8bbe2..c5c2f791eedd 100644 --- a/sdk/batch/azure-batch/tsp-location.yaml +++ b/sdk/batch/azure-batch/tsp-location.yaml @@ -1,4 +1,4 @@ -directory: specification/batch/Azure.Batch -commit: a569cbee809be8da35805444742938d7c9ab97b6 +directory: specification/batch/data-plane/Batch +commit: 93edc09ba3d879875fe0fcca6db36a61ff27b2d6 repo: Azure/azure-rest-api-specs additionalDirectories: