Skip to content

Commit af62ef0

Browse files
authored
Properties added (Azure#30252)
1 parent f5c2129 commit af62ef0

File tree

2 files changed

+4
-0
lines changed

2 files changed

+4
-0
lines changed

sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/parameterized_spark.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ class ParameterizedSparkSchema(PathAwareSchema):
103103
files = fields.List(fields.Str(required=True))
104104
archives = fields.List(fields.Str(required=True))
105105
conf = NestedField(SparkConfSchema, unknown=INCLUDE)
106+
properties = fields.Dict(keys=fields.Str(), values=fields.Raw())
106107
environment = UnionField(
107108
[
108109
NestedField(AnonymousEnvironmentSchema),

sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/spark_job.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,7 @@ def __init__(
139139

140140
super().__init__(**kwargs)
141141
self.conf = self.conf or {}
142+
self.properties = self.properties or {}
142143
self.driver_cores = driver_cores
143144
self.driver_memory = driver_memory
144145
self.executor_cores = executor_cores
@@ -231,6 +232,7 @@ def _to_rest_object(self) -> JobBase:
231232
archives=self.archives,
232233
identity=self.identity._to_job_rest_object() if self.identity else None,
233234
conf=conf,
235+
properties=self.properties,
234236
environment_id=self.environment,
235237
inputs=to_rest_dataset_literal_inputs(self.inputs, job_type=self.type),
236238
outputs=to_rest_data_outputs(self.outputs),
@@ -326,6 +328,7 @@ def _to_component(self, context: Optional[Dict] = None, **kwargs):
326328
dynamic_allocation_min_executors=self.dynamic_allocation_min_executors,
327329
dynamic_allocation_max_executors=self.dynamic_allocation_max_executors,
328330
conf=self.conf,
331+
properties=self.properties,
329332
environment=self.environment,
330333
inputs=self._to_inputs(inputs=self.inputs, pipeline_job_dict=pipeline_job_dict),
331334
outputs=self._to_outputs(outputs=self.outputs, pipeline_job_dict=pipeline_job_dict),

0 commit comments

Comments
 (0)