Skip to content

Commit 20bf5d1

Browse files
lllilithyangchenyangliao
andauthored
Check 1p component support register NodeOutput (Azure#28807)
* add name and version for output * add asset_name, asset_version, merge 2022-12-01 pr * resolve comments * add test and modify related codes * refine code * fix failed test * fix comments * fix condition._port_name * fix test and some comments * revert the modification of compute * fix comments * fix pylint error, add tests for CommandJob, Spark, Parallel, Sweep * delete one test, will add in another pr * modify error message * add e2e test * update recordings * update recording test_case2 * test reuse when don't write version for output * tmp commit * revert test_register_output_sdk_succeed, add modification in another pr * add new line * run black with 22.3.0 --------- Co-authored-by: chenyangliao <chenyangliao@microsoft.com>
1 parent e4ec7e6 commit 20bf5d1

9 files changed

+7115
-0
lines changed

sdk/ml/azure-ai-ml/tests/internal/e2etests/test_pipeline_job.py

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,21 @@
2727

2828
_dependent_datasets = {}
2929

30+
PARAMETERS_TO_TEST_WITH_OUTPUT_OUTPUT_RELATED = [
31+
("output_path", "Distributed_output", "1"),
32+
("scored_dataset", "Parallel_output", "1"),
33+
("SSPath", "Scope_output", "1"),
34+
("output_path", "HDInsight_output", "1"),
35+
("output1", "Hemera_output", "1"),
36+
("destination_data", "DataTransfer_output", "1"),
37+
("CosmosPath", "Starlite_output", "1"),
38+
("outputfolderEnc", "Ae365expool_output", "1"),
39+
]
40+
PARAMETERS_TO_TEST_WITH_OUTPUT = []
41+
# don't use PARAMETERS_TO_TEST[0] because this component doesn't have output
42+
for ori_tuple, output_tuple in zip(PARAMETERS_TO_TEST[1:], PARAMETERS_TO_TEST_WITH_OUTPUT_OUTPUT_RELATED):
43+
PARAMETERS_TO_TEST_WITH_OUTPUT.append(ori_tuple + output_tuple)
44+
3045

3146
@pytest.fixture
3247
def create_internal_sample_dependent_datasets(client: MLClient):
@@ -97,6 +112,47 @@ def pipeline_func():
97112
json.dumps(node_rest_dict, indent=2), json.dumps(mismatched_runsettings, indent=2)
98113
)
99114

115+
@pytest.mark.parametrize(
116+
"test_case",
117+
PARAMETERS_TO_TEST_WITH_OUTPUT,
118+
)
119+
@pytest.mark.disable_mock_code_hash
120+
def test_register_output_for_anonymous_internal_component(
121+
self,
122+
client: MLClient,
123+
test_case: tuple,
124+
):
125+
(
126+
yaml_path,
127+
inputs,
128+
runsettings_dict,
129+
pipeline_runsettings_dict,
130+
output_port_name,
131+
output_name,
132+
output_version,
133+
) = test_case
134+
node_func: InternalComponent = load_component(yaml_path)
135+
136+
@pipeline()
137+
def pipeline_func():
138+
node = node_func(**inputs)
139+
node.outputs[output_port_name].type = "uri_file" # use this, in case that the type is path
140+
node.outputs[output_port_name].name = output_name
141+
node.outputs[output_port_name].version = output_version
142+
set_run_settings(node, runsettings_dict)
143+
144+
dsl_pipeline: PipelineJob = pipeline_func()
145+
set_run_settings(dsl_pipeline.settings, pipeline_runsettings_dict)
146+
dsl_pipeline.settings.default_compute = "cpu-cluster"
147+
148+
result = dsl_pipeline._validate()
149+
assert result._to_dict() == {"result": "Succeeded"}
150+
151+
pipeline_job = assert_job_cancel(dsl_pipeline, client)
152+
153+
assert pipeline_job.jobs["node"].outputs[output_port_name].name == output_name
154+
assert pipeline_job.jobs["node"].outputs[output_port_name].version == output_version
155+
100156
@pytest.mark.parametrize(
101157
"test_case_i,test_case_name",
102158
TEST_CASE_NAME_ENUMERATE,

0 commit comments

Comments
 (0)