From 3cfbd57fa12a4fb5d57c93f62a24fb5c8a004aa2 Mon Sep 17 00:00:00 2001 From: Siddharth Sahu Date: Fri, 21 Nov 2025 14:17:42 +0530 Subject: [PATCH 1/9] Added function to calculate the remote host disk space --- lib/python/space_precheck.py | 54 +++++++++++++++++++++++++++--------- 1 file changed, 41 insertions(+), 13 deletions(-) diff --git a/lib/python/space_precheck.py b/lib/python/space_precheck.py index 3183384..b5f9bfa 100644 --- a/lib/python/space_precheck.py +++ b/lib/python/space_precheck.py @@ -50,6 +50,34 @@ def get_local_free_space_mb(self, directory_path: str) -> float: total, used, free = shutil.disk_usage(directory_path) return free / (1024 ** 2) + def get_remote_free_space_mb(self, hostname: str, remote_path: str = "/tmp") -> float: + """ + Check available free disk space on a remote host for a given directory. + + Args: + hostname (str): Remote host name. + remote_path (str): Remote path to check for free space. + + Returns: + MB as float. + + Uses: df -k | awk 'NR==2 {print $4}' + awk 'NR==2 {print $4} --> Extracts value of the fourth column and second row, i.e, available free space. + """ + cmd = f"df -k {remote_path} 2>/dev/null | awk 'NR==2 {{print $4}}'" + result = self.ssh.execute_ssh_command(hostname, cmd) #Returns in KB + + result_out = result.stdout.strip() + result_err = result.stderr.strip() + + if result.returncode != 0 or not result_out: + print(f"Error in checking available space on a remote host {hostname}: {result_err}") + return 0.0 + + free_kb = int(result_out) + return free_kb / 1024.0 + + def retrieve_remote_env_var_path(self, hostname: str, env_var_name: str) -> str: """ Resolve a remote environment variable to an absolute path. @@ -104,7 +132,7 @@ def run(self): """ # Gather all hosts from the infra JSON hosts = self.loader.get_machine_hostnames() - total_size_mb = 0.0 + total_archive_size_mb = 0.0 host_statuses = [] # collect per-host status # Prepare the local “out” folder path for free‐space checks @@ -115,7 +143,7 @@ def run(self): # Loop each host and measure remote directories for host in hosts: print(f"\n----- {host} -----") - host_size_mb = 0.0 # reset per‐host accumulator + host_archive_size_mb = 0.0 # reset per‐host accumulator max_host_archive_mb = 0.0 # Standard WebLogic env vars @@ -145,8 +173,8 @@ def run(self): size_bytes = self.get_remote_directory_size_bytes(host, path) size_mb = size_bytes / (1024 ** 2) # accumulate both per‐host and grand total - host_size_mb += size_mb - total_size_mb += size_mb + host_archive_size_mb += size_mb + total_archive_size_mb += size_mb max_host_archive_mb = max(max_host_archive_mb, size_mb) # Any extra paths defined in infra under “ExtraOSPaths” @@ -154,24 +182,24 @@ def run(self): for extra in extra_paths: size_bytes = self.get_remote_directory_size_bytes(host, extra) size_mb = size_bytes / (1024 ** 2) - host_size_mb += size_mb - total_size_mb += size_mb + host_archive_size_mb += size_mb + total_archive_size_mb += size_mb max_host_archive_mb = max(max_host_archive_mb, size_mb) # Report per‐host archive total and local free space - print(f"Total remote archive size for {host}: {host_size_mb:.2f} MB") + print(f"Total remote archive size for {host}: {host_archive_size_mb:.2f} MB") # Report the largest size of archive in the host print(f"Largest size of remote archive size for {host}: {max_host_archive_mb:.2f} MB") # show available local space per host - available_space_mb = self.get_local_free_space_mb(output_dir) - print(f"Available local disk space on {host}: {available_space_mb:.2f} MB") + available_host_space_mb = self.get_remote_free_space_mb(host) + print(f"Available disk space on {host}: {available_host_space_mb:.2f} MB") max_archive_mb = max(max_host_archive_mb, max_archive_mb) - # determine per-host status (0=sufficient,1=insufficient) - status = 0 if available_space_mb >= max_host_archive_mb * 1.2 else 1 + # determine if the host have sufficient space to store its largest archive. (0=sufficient,1=insufficient) + status = 0 if available_host_space_mb >= max_host_archive_mb * 1.2 else 1 host_statuses.append([host, status]) available_space_mb = self.get_local_free_space_mb(output_dir) @@ -179,11 +207,11 @@ def run(self): print(f"\n-----------------------------------------------") # Summary report print(f"\nLargest archive size among all the hosts: {max_archive_mb:.2f} MB") - print(f"\nTotal remote archive size combined: {total_size_mb:.2f} MB") + print(f"\nTotal remote archive size combined: {total_archive_size_mb:.2f} MB") print(f"Available local disk space on admin VM: {available_space_mb:.2f} MB") # Decision based on 20% safety buffer for combined size - overall_status = 0 if (available_space_mb >= total_size_mb * 1.2) else 1 + overall_status = 0 if (available_space_mb >= total_archive_size_mb * 1.2) else 1 if overall_status == 0: print("Sufficient space is available to store all nodes archives on the admin VM.") else: From cdfd51604a7349d66686bbd8b2319b7cc3de4ac5 Mon Sep 17 00:00:00 2001 From: Siddharth Sahu Date: Fri, 21 Nov 2025 14:24:10 +0530 Subject: [PATCH 2/9] Minor fixes in description --- lib/python/space_precheck.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/python/space_precheck.py b/lib/python/space_precheck.py index b5f9bfa..1c33564 100644 --- a/lib/python/space_precheck.py +++ b/lib/python/space_precheck.py @@ -62,7 +62,7 @@ def get_remote_free_space_mb(self, hostname: str, remote_path: str = "/tmp") -> MB as float. Uses: df -k | awk 'NR==2 {print $4}' - awk 'NR==2 {print $4} --> Extracts value of the fourth column and second row, i.e, available free space. + Extracts available free space (column 4, second row). """ cmd = f"df -k {remote_path} 2>/dev/null | awk 'NR==2 {{print $4}}'" result = self.ssh.execute_ssh_command(hostname, cmd) #Returns in KB @@ -198,7 +198,7 @@ def run(self): max_archive_mb = max(max_host_archive_mb, max_archive_mb) - # determine if the host have sufficient space to store its largest archive. (0=sufficient,1=insufficient) + # determine if the host have sufficient space to store its largest archive with 20% buffer. (0=sufficient,1=insufficient) status = 0 if available_host_space_mb >= max_host_archive_mb * 1.2 else 1 host_statuses.append([host, status]) From 796599d1b942f6318abb3424709b88935d2a2d2a Mon Sep 17 00:00:00 2001 From: Siddharth Sahu Date: Wed, 26 Nov 2025 15:59:02 +0530 Subject: [PATCH 3/9] Added modularity in cases --- lib/python/archive_infra.py | 247 ++++++++++++++---- .../migrate/data/wls_migration_archive.py | 6 +- lib/python/space_precheck.py | 22 +- 3 files changed, 211 insertions(+), 64 deletions(-) diff --git a/lib/python/archive_infra.py b/lib/python/archive_infra.py index ff9afd2..bd07e9f 100644 --- a/lib/python/archive_infra.py +++ b/lib/python/archive_infra.py @@ -464,6 +464,73 @@ def cleanup_archives(file_path, wls_domain_name): __logger.warning('WLSDPLY-05027', msg, class_name=_class_name, method_name=_method_name) +def process_archives(nodes, model, model_context, machine_nodes, base_location, init_argument_map, on_prem_values, + space_status, log_file, wls_domain_name, per_host_space_key, archive_types, do_upload): + + _method_name = 'process_archives' + global __logger + + # Define the archive file patterns + archive_patterns = ( + "*-%s-weblogic_home.tar.gz" % wls_domain_name, + "*-%s-java_home.tar.gz" % wls_domain_name, + "*-%s-domain_home.tar.gz" % wls_domain_name, + "*-%s-custom_dirs.tar.gz" % wls_domain_name + ) + + for machine in nodes: + node_details = OrderedDict() + listen_address = common.traverse(machine_nodes, machine, model_constants.NODE_MANAGER, model_constants.LISTEN_ADDRESS) + + init_argument_map[CommandLineArgUtil.SSH_HOST_SWITCH] = listen_address + is_encryption_supported = EncryptionUtils.isEncryptionSupported() + + if is_encryption_supported: + __logger.info('WLSDPLY-20044', init_argument_map, class_name=_class_name, method_name=_method_name) + else: + __logger.info('WLSDPLY-20045', init_argument_map, class_name=_class_name, method_name=_method_name) + + per_machine_model_context = __process_args(init_argument_map, is_encryption_supported) + archiver = WLSMigrationArchiver(machine, per_machine_model_context, node_details, base_location, model) + + # Check space policy for host + host_space_info = space_status.get( + listen_address, {"largest_archive": 1, "full_archives": 1} + ) + + if host_space_info.get(per_host_space_key, 1) == 1: + archiver.print_per_host_todo_commands() + __logger.warning('WLSDPLY-05027', + 'Not enough space on %s to create the archives. ' + 'Please run the commands manually mentioned in the TODO ' + 'to create the archive, scp to the admin host ' + 'and upload to bucket.' % machine, + class_name=_class_name, method_name=_method_name) + continue + + # Run archive(s) + for archive_type in archive_types: + result = archiver.archive(archive_type) + if not infra_constants.SUCCESS == result: + ex = exception_helper.create_cla_exception( + ExitCode.ERROR, 'WLSDPLY-32902', "Node archive failed") + __logger.throwing(ex, class_name=_class_name, + method_name=_method_name) + raise ex + + # Upload/delete if enabled + if do_upload: + node_dir = per_machine_model_context.get_local_output_dir() + + for fname in os.listdir(node_dir): + for pattern in archive_patterns: + if fnmatch.fnmatch(fname, "*%s" % pattern): + path = os.path.join(node_dir, fname) + upload_to_bucket(path, log_file,on_prem_values, wls_domain_name) + delete_local(path) + delete_remote_archives(per_machine_model_context, fname) + + def __archive_directories(model, model_context, helper): global init_argument_map """ @@ -500,14 +567,6 @@ def __archive_directories(model, model_context, helper): env_file = os.path.abspath(os.path.join(base_dir,'..', 'config', 'on-prem.env')) log_file = os.path.abspath(os.path.join(base_dir,'..', 'logs', 'upload_to_oci_archive.log')) - # Define the archive file patterns - archive_patterns = ( - "*-%s-weblogic_home.tar.gz" % wls_domain_name, - "*-%s-java_home.tar.gz" % wls_domain_name, - "*-%s-domain_home.tar.gz" % wls_domain_name, - "*-%s-custom_dirs.tar.gz" % wls_domain_name - ) - # Load the on-prem.env file on_prem_values = load_env_file(env_file) @@ -541,48 +600,50 @@ def __archive_directories(model, model_context, helper): __logger.throwing(ex, class_name=_class_name, method_name=_method_name) raise ex - # Case 1: Admin has enough space-just create all the archives, and if skip-transfer is true then don't upload or delete, else upload and delete - if space_admin_rc == 0: - if admin_machine in nodes: - #Do local Discovery. It should include any managed server registered. - archive_result=WLSMigrationArchiver(admin_machine,model_context, OrderedDict(), base_location, model).archive("all_archives") - if not infra_constants.SUCCESS == archive_result: - ex = exception_helper.create_cla_exception(ExitCode.ERROR, 'WLSDPLY-32902', "Admin archive failed") - __logger.throwing(ex, class_name=_class_name, method_name=_method_name) - raise ex - - for machine in nodes: - if not machine == admin_machine: - node_details = OrderedDict() - listen_address = common.traverse(machine_nodes, machine, model_constants.NODE_MANAGER, model_constants.LISTEN_ADDRESS) - init_argument_map[CommandLineArgUtil.SSH_HOST_SWITCH] = listen_address - is_encryption_supported = EncryptionUtils.isEncryptionSupported() - if is_encryption_supported: - __logger.info('WLSDPLY-20044', - init_argument_map, class_name=_class_name, method_name=_method_name) - else: - __logger.info('WLSDPLY-20045', - init_argument_map, class_name=_class_name, method_name=_method_name) - per_machine_model_context = __process_args(init_argument_map, is_encryption_supported) - host_result = WLSMigrationArchiver(machine, per_machine_model_context, node_details, base_location, model).archive("all_archives") - if not infra_constants.SUCCESS == host_result: - ex = exception_helper.create_cla_exception(ExitCode.ERROR, 'WLSDPLY-32902', "Node archive failed") - __logger.throwing(ex, class_name=_class_name, method_name=_method_name) - raise ex - - if not skip_transfer: - admin_out = model_context.get_local_output_dir() - for fname in os.listdir(admin_out): - for pattern in archive_patterns: - if fnmatch.fnmatch(fname, pattern): - upload_to_bucket(os.path.join(admin_out, fname), log_file, on_prem_values, wls_domain_name) - delete_local(os.path.join(admin_out, fname)) - # remote cleanup on per-host model context - if per_machine_model_context: - delete_remote_archives(per_machine_model_context, fname) - - # Case 2: Admin has NO space for all the archives together and skip_transfer = false (Selective remote per archive + upload + delete) - elif space_per_archive_rc == 0 and not skip_transfer: + # # Case 1: Admin has enough space-just create all the archives, and if skip-transfer is true then don't upload or delete, else upload and delete + # if space_admin_rc == 0: + # if admin_machine in nodes: + # #Do local Discovery. It should include any managed server registered. + # archive_result=WLSMigrationArchiver(admin_machine,model_context, OrderedDict(), base_location, model).archive("all_archives") + # if not infra_constants.SUCCESS == archive_result: + # ex = exception_helper.create_cla_exception(ExitCode.ERROR, 'WLSDPLY-32902', "Admin archive failed") + # __logger.throwing(ex, class_name=_class_name, method_name=_method_name) + # raise ex + # + # for machine in nodes: + # if not machine == admin_machine: + # node_details = OrderedDict() + # listen_address = common.traverse(machine_nodes, machine, model_constants.NODE_MANAGER, model_constants.LISTEN_ADDRESS) + # init_argument_map[CommandLineArgUtil.SSH_HOST_SWITCH] = listen_address + # is_encryption_supported = EncryptionUtils.isEncryptionSupported() + # if is_encryption_supported: + # __logger.info('WLSDPLY-20044', + # init_argument_map, class_name=_class_name, method_name=_method_name) + # else: + # __logger.info('WLSDPLY-20045', + # init_argument_map, class_name=_class_name, method_name=_method_name) + # per_machine_model_context = __process_args(init_argument_map, is_encryption_supported) + # host_result = WLSMigrationArchiver(machine, per_machine_model_context, node_details, base_location, model).archive("all_archives") + # if not infra_constants.SUCCESS == host_result: + # ex = exception_helper.create_cla_exception(ExitCode.ERROR, 'WLSDPLY-32902', "Node archive failed") + # __logger.throwing(ex, class_name=_class_name, method_name=_method_name) + # raise ex + # + # if not skip_transfer: + # admin_out = model_context.get_local_output_dir() + # for fname in os.listdir(admin_out): + # for pattern in archive_patterns: + # if fnmatch.fnmatch(fname, pattern): + # upload_to_bucket(os.path.join(admin_out, fname), log_file, on_prem_values, wls_domain_name) + # delete_local(os.path.join(admin_out, fname)) + # # remote cleanup on per-host model context + # if per_machine_model_context: + # delete_remote_archives(per_machine_model_context, fname) + +# Case 1: Admin has space to store the largest archive among all the hosts and skip_transfer = false (Selective remote per archive + upload + delete) + # a. If Managed host has space to hold its largest archive, then perform per node per archive upload + delete. + # b. For the Managed host which does not have sufficient space to hold its largest archive, it prints TODO messages for the host. + if space_per_archive_rc == 0 and not skip_transfer: for machine in nodes: node_details = OrderedDict() listen_address = common.traverse(machine_nodes, machine, model_constants.NODE_MANAGER, model_constants.LISTEN_ADDRESS) @@ -597,8 +658,9 @@ def __archive_directories(model, model_context, helper): per_machine_model_context = __process_args(init_argument_map, is_encryption_supported) archiver = WLSMigrationArchiver(machine, per_machine_model_context, node_details, base_location, model) - # checking per node space - if space_status.get(listen_address, 1) == 1: + # Checks if the host has enough space to hold the largest archive, if not then prints TODO messages. + host_space_info = space_status.get(listen_address, {"largest_archive": 1, "full_archives": 1}) + if host_space_info.get("largest_archive", 1) == 1: archiver.print_per_host_todo_commands() __logger.warning('WLSDPLY-05027', 'Not enough space on %s to create the archives. Please run the commands manually mentioned in the TODO to create the archive, ' @@ -625,12 +687,83 @@ def __archive_directories(model, model_context, helper): if per_machine_model_context: delete_remote_archives(per_machine_model_context, fname) - # Case 3: Admin has NO space or skip_transfer = true (Manual steps only) +# Case 2: Admin does not have space to store the largest archive among all the hosts and skip_transfer = false + # a. Managed hosts have enough space to hold all its archives then store it there. + # b. For Managed nodes doesn’t have space to hold all the archives, print TODO messages. + elif space_per_archive_rc == 1 and not skip_transfer or skip_transfer and admin_space_full_archives == 0: + for machine in nodes: + node_details = OrderedDict() + listen_address = common.traverse(machine_nodes, machine, model_constants.NODE_MANAGER, model_constants.LISTEN_ADDRESS) + init_argument_map[CommandLineArgUtil.SSH_HOST_SWITCH] = listen_address + is_encryption_supported = EncryptionUtils.isEncryptionSupported() + if is_encryption_supported: + __logger.info('WLSDPLY-20044', + init_argument_map, class_name=_class_name, method_name=_method_name) + else: + __logger.info('WLSDPLY-20045', + init_argument_map, class_name=_class_name, method_name=_method_name) + per_machine_model_context = __process_args(init_argument_map, is_encryption_supported) + archiver = WLSMigrationArchiver(machine, per_machine_model_context, node_details, base_location, model, transfer_to_admin=False) + + # Checks if the host has enough space to hold the all its archives, if not then prints TODO messages. + host_space_info = space_status.get(listen_address, {"largest_archive": 1, "full_archives": 1}) + if host_space_info.get("full_archives", 1) == 1: + archiver.print_per_host_todo_commands() + __logger.warning('WLSDPLY-05027', + 'Not enough space on %s to create the archives. Please run the commands manually mentioned in the TODO to create the archive, ' + 'scp to the admin host and upload to bucket.' % machine, + class_name=_class_name, method_name=_method_name) + continue + + result = archiver.archive("all_archives") + if not infra_constants.SUCCESS == result: + ex = exception_helper.create_cla_exception(ExitCode.ERROR, 'WLSDPLY-32902', "Node archive failed") + __logger.throwing(ex, class_name=_class_name, method_name=_method_name) + raise ex + +# Case 3: skip_transfer = true + # a. Admin have enough space to store all the archives, then all the archives are stored in the admin. (covered) + # b. Admin doesn’t have enough space to store all the archives but its own archives, then all nodes stores their respective archives including the admin. (Not covered) + # c. Print TODO messages for any node which doesn’t have enough space to create it’s archive. (Not covered) else : - __logger.warning('WLSDPLY-05027', - 'Admin VM has insufficient space and skip_transfer = true.\n', - class_name=_class_name, method_name=_method_name) - return + + if space_admin_rc == 0: + for machine in nodes: + node_details = OrderedDict() + listen_address = common.traverse(machine_nodes, machine, model_constants.NODE_MANAGER, model_constants.LISTEN_ADDRESS) + init_argument_map[CommandLineArgUtil.SSH_HOST_SWITCH] = listen_address + is_encryption_supported = EncryptionUtils.isEncryptionSupported() + if is_encryption_supported: + __logger.info('WLSDPLY-20044', + init_argument_map, class_name=_class_name, method_name=_method_name) + else: + __logger.info('WLSDPLY-20045', + init_argument_map, class_name=_class_name, method_name=_method_name) + per_machine_model_context = __process_args(init_argument_map, is_encryption_supported) + archiver = WLSMigrationArchiver(machine, per_machine_model_context, node_details, base_location, model) + + # Checks if the host has enough space to hold the all its archives, if not then prints TODO messages. + host_space_info = space_status.get(listen_address, {"largest_archive": 1, "full_archives": 1}) + if host_space_info.get("largest_archive", 1) == 1: + archiver.print_per_host_todo_commands() + __logger.warning('WLSDPLY-05027', + 'Not enough space on %s to create the archives. Please run the commands manually mentioned in the TODO to create the archive, ' + 'scp to the admin host and upload to bucket.' % machine, + class_name=_class_name, method_name=_method_name) + continue + + for archive_type in ("oracle_home", "weblogic_home", "java_home", "custom_dirs"): + result = archiver.archive(archive_type) + if not infra_constants.SUCCESS == result: + ex = exception_helper.create_cla_exception(ExitCode.ERROR, 'WLSDPLY-32902', "Node archive failed") + __logger.throwing(ex, class_name=_class_name, method_name=_method_name) + raise ex + + # __logger.warning('WLSDPLY-05027', + # 'Admin VM has insufficient space and skip_transfer = true.\n', + # class_name=_class_name, method_name=_method_name) + # return + if len(hosts_details) == 0: return diff --git a/lib/python/migrate/data/wls_migration_archive.py b/lib/python/migrate/data/wls_migration_archive.py index f27d017..59fc849 100644 --- a/lib/python/migrate/data/wls_migration_archive.py +++ b/lib/python/migrate/data/wls_migration_archive.py @@ -113,7 +113,7 @@ class WLSMigrationArchiver(object): - def __init__(self, machine, model_context, dictionary, base_location, model, wlst_mode=None, aliases=None, credential_injector=None): + def __init__(self, machine, model_context, dictionary, base_location, model, wlst_mode=None, aliases=None, credential_injector=None, transfer_to_admin=True): """ :param model_context: context about the model for this instance of discoverDomain :param base_location: to look for common WebLogic resources. By default, this is the global path or '/' @@ -139,6 +139,8 @@ def __init__(self, machine, model_context, dictionary, base_location, model, wls self._weblogic_helper = model_context.get_weblogic_helper() self._wlst_helper = WlstHelper(ExceptionType.DISCOVER) + self._transfer_to_admin = transfer_to_admin + # self._wls_version = model_context.get_effective_wls_version() self.path_helper = path_helper.get_path_helper() @@ -270,7 +272,7 @@ def __archive_directory(self, dir_to_compress, archive_file_name, ssh_download_d is_dry_run = self._model_context.is_skip_archive() response=self._cmd_helper.compress_archive(archive_file_name, dir_to_compress, is_dry_run) if not is_dry_run : - if self._model_context.is_ssh(): + if self._model_context.is_ssh() and self._transfer_to_admin: entry_path = self._cmd_helper.download_file_from_remote_server(self._model_context,archive_file_name, self._model_context.get_local_output_dir(), "") diff --git a/lib/python/space_precheck.py b/lib/python/space_precheck.py index 1c33564..6e9eff2 100644 --- a/lib/python/space_precheck.py +++ b/lib/python/space_precheck.py @@ -196,11 +196,20 @@ def run(self): available_host_space_mb = self.get_remote_free_space_mb(host) print(f"Available disk space on {host}: {available_host_space_mb:.2f} MB") + #Updating the largest archive size among all the hosts. max_archive_mb = max(max_host_archive_mb, max_archive_mb) - # determine if the host have sufficient space to store its largest archive with 20% buffer. (0=sufficient,1=insufficient) - status = 0 if available_host_space_mb >= max_host_archive_mb * 1.2 else 1 - host_statuses.append([host, status]) + # determine if the host has sufficient space to store its largest archive with 20% buffer. (0=sufficient,1=insufficient) + largest_archive_status = 0 if available_host_space_mb >= max_host_archive_mb * 1.2 else 1 + + #determine if the host has sufficient space to store all its archives with 20% buffer. (0=sufficient,1=insufficient) + full_archive_status = 0 if available_host_space_mb >= host_archive_size_mb * 1.2 else 1 + + host_statuses.append([host, { + "largest_archive": largest_archive_status, + "full_archives": full_archive_status + }]) + available_space_mb = self.get_local_free_space_mb(output_dir) @@ -210,7 +219,7 @@ def run(self): print(f"\nTotal remote archive size combined: {total_archive_size_mb:.2f} MB") print(f"Available local disk space on admin VM: {available_space_mb:.2f} MB") - # Decision based on 20% safety buffer for combined size + # Detemine if the admin have space to store all the archives. Decision based on 20% safety buffer for combined size overall_status = 0 if (available_space_mb >= total_archive_size_mb * 1.2) else 1 if overall_status == 0: print("Sufficient space is available to store all nodes archives on the admin VM.") @@ -225,7 +234,10 @@ def run(self): print(f"\n-----------------------------------------------") # Convert host_statuses to a dictionary - host_status_dict = {host: status for host, status in host_statuses} + host_status_dict = {} + for host, status_dict in host_statuses: + host_status_dict[host] = status_dict + return host_status_dict, per_archive_status, overall_status # return host_statuses , per archive status and overall status code From 60c13a9600f8f4b87eae3c1ea2649b94f3ebfd67 Mon Sep 17 00:00:00 2001 From: Siddharth Sahu Date: Wed, 26 Nov 2025 16:08:37 +0530 Subject: [PATCH 4/9] Minor fix --- lib/python/archive_infra.py | 175 ++++++------------------------------ 1 file changed, 26 insertions(+), 149 deletions(-) diff --git a/lib/python/archive_infra.py b/lib/python/archive_infra.py index bd07e9f..9ed87fa 100644 --- a/lib/python/archive_infra.py +++ b/lib/python/archive_infra.py @@ -600,169 +600,46 @@ def __archive_directories(model, model_context, helper): __logger.throwing(ex, class_name=_class_name, method_name=_method_name) raise ex - # # Case 1: Admin has enough space-just create all the archives, and if skip-transfer is true then don't upload or delete, else upload and delete - # if space_admin_rc == 0: - # if admin_machine in nodes: - # #Do local Discovery. It should include any managed server registered. - # archive_result=WLSMigrationArchiver(admin_machine,model_context, OrderedDict(), base_location, model).archive("all_archives") - # if not infra_constants.SUCCESS == archive_result: - # ex = exception_helper.create_cla_exception(ExitCode.ERROR, 'WLSDPLY-32902', "Admin archive failed") - # __logger.throwing(ex, class_name=_class_name, method_name=_method_name) - # raise ex - # - # for machine in nodes: - # if not machine == admin_machine: - # node_details = OrderedDict() - # listen_address = common.traverse(machine_nodes, machine, model_constants.NODE_MANAGER, model_constants.LISTEN_ADDRESS) - # init_argument_map[CommandLineArgUtil.SSH_HOST_SWITCH] = listen_address - # is_encryption_supported = EncryptionUtils.isEncryptionSupported() - # if is_encryption_supported: - # __logger.info('WLSDPLY-20044', - # init_argument_map, class_name=_class_name, method_name=_method_name) - # else: - # __logger.info('WLSDPLY-20045', - # init_argument_map, class_name=_class_name, method_name=_method_name) - # per_machine_model_context = __process_args(init_argument_map, is_encryption_supported) - # host_result = WLSMigrationArchiver(machine, per_machine_model_context, node_details, base_location, model).archive("all_archives") - # if not infra_constants.SUCCESS == host_result: - # ex = exception_helper.create_cla_exception(ExitCode.ERROR, 'WLSDPLY-32902', "Node archive failed") - # __logger.throwing(ex, class_name=_class_name, method_name=_method_name) - # raise ex - # - # if not skip_transfer: - # admin_out = model_context.get_local_output_dir() - # for fname in os.listdir(admin_out): - # for pattern in archive_patterns: - # if fnmatch.fnmatch(fname, pattern): - # upload_to_bucket(os.path.join(admin_out, fname), log_file, on_prem_values, wls_domain_name) - # delete_local(os.path.join(admin_out, fname)) - # # remote cleanup on per-host model context - # if per_machine_model_context: - # delete_remote_archives(per_machine_model_context, fname) - # Case 1: Admin has space to store the largest archive among all the hosts and skip_transfer = false (Selective remote per archive + upload + delete) # a. If Managed host has space to hold its largest archive, then perform per node per archive upload + delete. # b. For the Managed host which does not have sufficient space to hold its largest archive, it prints TODO messages for the host. if space_per_archive_rc == 0 and not skip_transfer: - for machine in nodes: - node_details = OrderedDict() - listen_address = common.traverse(machine_nodes, machine, model_constants.NODE_MANAGER, model_constants.LISTEN_ADDRESS) - init_argument_map[CommandLineArgUtil.SSH_HOST_SWITCH] = listen_address - is_encryption_supported = EncryptionUtils.isEncryptionSupported() - if is_encryption_supported: - __logger.info('WLSDPLY-20044', - init_argument_map, class_name=_class_name, method_name=_method_name) - else: - __logger.info('WLSDPLY-20045', - init_argument_map, class_name=_class_name, method_name=_method_name) - per_machine_model_context = __process_args(init_argument_map, is_encryption_supported) - archiver = WLSMigrationArchiver(machine, per_machine_model_context, node_details, base_location, model) - - # Checks if the host has enough space to hold the largest archive, if not then prints TODO messages. - host_space_info = space_status.get(listen_address, {"largest_archive": 1, "full_archives": 1}) - if host_space_info.get("largest_archive", 1) == 1: - archiver.print_per_host_todo_commands() - __logger.warning('WLSDPLY-05027', - 'Not enough space on %s to create the archives. Please run the commands manually mentioned in the TODO to create the archive, ' - 'scp to the admin host and upload to bucket.' % machine, - class_name=_class_name, method_name=_method_name) - continue - - for archive_type in ("oracle_home", "weblogic_home", "java_home", "custom_dirs"): - result = archiver.archive(archive_type) - if not infra_constants.SUCCESS == result: - ex = exception_helper.create_cla_exception(ExitCode.ERROR, 'WLSDPLY-32902', "Node archive failed") - __logger.throwing(ex, class_name=_class_name, method_name=_method_name) - raise ex - - # Upload and delete - node_dir = per_machine_model_context.get_local_output_dir() - for fname in os.listdir(node_dir): - for pattern in archive_patterns: - if fnmatch.fnmatch(fname, "*%s" % pattern): - path = os.path.join(node_dir, fname) - upload_to_bucket(path,log_file,on_prem_values) - delete_local(path) - # remote cleanup on per-host model context - if per_machine_model_context: - delete_remote_archives(per_machine_model_context, fname) - + process_archives( + nodes, model, model_context, machine_nodes, base_location,init_argument_map, on_prem_values, space_status,log_file, wls_domain_name, + per_host_space_key="largest_archive", + archive_types=("oracle_home", "weblogic_home","java_home", "custom_dirs"), + do_upload=True + ) # Case 2: Admin does not have space to store the largest archive among all the hosts and skip_transfer = false # a. Managed hosts have enough space to hold all its archives then store it there. # b. For Managed nodes doesn’t have space to hold all the archives, print TODO messages. - elif space_per_archive_rc == 1 and not skip_transfer or skip_transfer and admin_space_full_archives == 0: - for machine in nodes: - node_details = OrderedDict() - listen_address = common.traverse(machine_nodes, machine, model_constants.NODE_MANAGER, model_constants.LISTEN_ADDRESS) - init_argument_map[CommandLineArgUtil.SSH_HOST_SWITCH] = listen_address - is_encryption_supported = EncryptionUtils.isEncryptionSupported() - if is_encryption_supported: - __logger.info('WLSDPLY-20044', - init_argument_map, class_name=_class_name, method_name=_method_name) - else: - __logger.info('WLSDPLY-20045', - init_argument_map, class_name=_class_name, method_name=_method_name) - per_machine_model_context = __process_args(init_argument_map, is_encryption_supported) - archiver = WLSMigrationArchiver(machine, per_machine_model_context, node_details, base_location, model, transfer_to_admin=False) - - # Checks if the host has enough space to hold the all its archives, if not then prints TODO messages. - host_space_info = space_status.get(listen_address, {"largest_archive": 1, "full_archives": 1}) - if host_space_info.get("full_archives", 1) == 1: - archiver.print_per_host_todo_commands() - __logger.warning('WLSDPLY-05027', - 'Not enough space on %s to create the archives. Please run the commands manually mentioned in the TODO to create the archive, ' - 'scp to the admin host and upload to bucket.' % machine, - class_name=_class_name, method_name=_method_name) - continue - - result = archiver.archive("all_archives") - if not infra_constants.SUCCESS == result: - ex = exception_helper.create_cla_exception(ExitCode.ERROR, 'WLSDPLY-32902', "Node archive failed") - __logger.throwing(ex, class_name=_class_name, method_name=_method_name) - raise ex + elif space_per_archive_rc == 1 and not skip_transfer: + process_archives( + nodes, model, model_context, machine_nodes, base_location,init_argument_map, on_prem_values, space_status,log_file, wls_domain_name, + per_host_space_key="full_archives", + archive_types=("all_archives",), + do_upload=False + ) # Case 3: skip_transfer = true # a. Admin have enough space to store all the archives, then all the archives are stored in the admin. (covered) # b. Admin doesn’t have enough space to store all the archives but its own archives, then all nodes stores their respective archives including the admin. (Not covered) # c. Print TODO messages for any node which doesn’t have enough space to create it’s archive. (Not covered) else : - if space_admin_rc == 0: - for machine in nodes: - node_details = OrderedDict() - listen_address = common.traverse(machine_nodes, machine, model_constants.NODE_MANAGER, model_constants.LISTEN_ADDRESS) - init_argument_map[CommandLineArgUtil.SSH_HOST_SWITCH] = listen_address - is_encryption_supported = EncryptionUtils.isEncryptionSupported() - if is_encryption_supported: - __logger.info('WLSDPLY-20044', - init_argument_map, class_name=_class_name, method_name=_method_name) - else: - __logger.info('WLSDPLY-20045', - init_argument_map, class_name=_class_name, method_name=_method_name) - per_machine_model_context = __process_args(init_argument_map, is_encryption_supported) - archiver = WLSMigrationArchiver(machine, per_machine_model_context, node_details, base_location, model) - - # Checks if the host has enough space to hold the all its archives, if not then prints TODO messages. - host_space_info = space_status.get(listen_address, {"largest_archive": 1, "full_archives": 1}) - if host_space_info.get("largest_archive", 1) == 1: - archiver.print_per_host_todo_commands() - __logger.warning('WLSDPLY-05027', - 'Not enough space on %s to create the archives. Please run the commands manually mentioned in the TODO to create the archive, ' - 'scp to the admin host and upload to bucket.' % machine, - class_name=_class_name, method_name=_method_name) - continue - - for archive_type in ("oracle_home", "weblogic_home", "java_home", "custom_dirs"): - result = archiver.archive(archive_type) - if not infra_constants.SUCCESS == result: - ex = exception_helper.create_cla_exception(ExitCode.ERROR, 'WLSDPLY-32902', "Node archive failed") - __logger.throwing(ex, class_name=_class_name, method_name=_method_name) - raise ex - - # __logger.warning('WLSDPLY-05027', - # 'Admin VM has insufficient space and skip_transfer = true.\n', - # class_name=_class_name, method_name=_method_name) - # return + process_archives( + nodes, model, model_context, machine_nodes, base_location,init_argument_map, on_prem_values, space_status,log_file, wls_domain_name, + per_host_space_key="largest_archive", + archive_types=("oracle_home", "weblogic_home","java_home", "custom_dirs"), + do_upload=False + ) + else : + process_archives( + nodes, model, model_context, machine_nodes, base_location,init_argument_map, on_prem_values, space_status,log_file, wls_domain_name, + per_host_space_key="full_archives", + archive_types=("all_archives",), + do_upload=False + ) if len(hosts_details) == 0: From b81d5e7e0f3a38679e0fdd337416d450f1c1b5c6 Mon Sep 17 00:00:00 2001 From: Siddharth Sahu Date: Wed, 26 Nov 2025 17:21:21 +0530 Subject: [PATCH 5/9] Added clear log messages to track the Case. --- lib/python/archive_infra.py | 49 ++++++++++++++++++++++++++++++++++--- 1 file changed, 45 insertions(+), 4 deletions(-) diff --git a/lib/python/archive_infra.py b/lib/python/archive_infra.py index 9ed87fa..bbb1aa3 100644 --- a/lib/python/archive_infra.py +++ b/lib/python/archive_infra.py @@ -465,10 +465,9 @@ def cleanup_archives(file_path, wls_domain_name): def process_archives(nodes, model, model_context, machine_nodes, base_location, init_argument_map, on_prem_values, - space_status, log_file, wls_domain_name, per_host_space_key, archive_types, do_upload): + space_status, log_file, wls_domain_name, per_host_space_key, archive_types, transfer_to_admin, do_upload): _method_name = 'process_archives' - global __logger # Define the archive file patterns archive_patterns = ( @@ -491,7 +490,7 @@ def process_archives(nodes, model, model_context, machine_nodes, base_location, __logger.info('WLSDPLY-20045', init_argument_map, class_name=_class_name, method_name=_method_name) per_machine_model_context = __process_args(init_argument_map, is_encryption_supported) - archiver = WLSMigrationArchiver(machine, per_machine_model_context, node_details, base_location, model) + archiver = WLSMigrationArchiver(machine, per_machine_model_context, node_details, base_location, model, transfer_to_admin=transfer_to_admin) # Check space policy for host host_space_info = space_status.get( @@ -604,20 +603,45 @@ def __archive_directories(model, model_context, helper): # a. If Managed host has space to hold its largest archive, then perform per node per archive upload + delete. # b. For the Managed host which does not have sufficient space to hold its largest archive, it prints TODO messages for the host. if space_per_archive_rc == 0 and not skip_transfer: + __logger.info( + 'WLSDPLY-05027', + 'Admin has space for the largest archive. Managed hosts must have space for largest archive to upload to bucket.', + class_name=_class_name, method_name=_method_name + ) + + __logger.info( + 'WLSDPLY-05027', + 'Processing per-host per-archive generation AND upload for archive types: oracle_home, weblogic_home, java_home, custom_dirs', + class_name=_class_name, method_name=_method_name + ) process_archives( nodes, model, model_context, machine_nodes, base_location,init_argument_map, on_prem_values, space_status,log_file, wls_domain_name, per_host_space_key="largest_archive", archive_types=("oracle_home", "weblogic_home","java_home", "custom_dirs"), + transfer_to_admin=True, do_upload=True ) + # Case 2: Admin does not have space to store the largest archive among all the hosts and skip_transfer = false # a. Managed hosts have enough space to hold all its archives then store it there. - # b. For Managed nodes doesn’t have space to hold all the archives, print TODO messages. + # b. For Managed nodes which don’t have space to hold all the archives, print TODO messages. elif space_per_archive_rc == 1 and not skip_transfer: + __logger.info( + 'WLSDPLY-05027', + 'Admin does NOT have space for the largest archive. Each host will store its own full archive locally. NO transfer to admin.', + class_name=_class_name, method_name=_method_name + ) + + __logger.info( + 'WLSDPLY-05027', + 'Processing full archive ("all_archives") on each host with NO upload to bucket', + class_name=_class_name, method_name=_method_name + ) process_archives( nodes, model, model_context, machine_nodes, base_location,init_argument_map, on_prem_values, space_status,log_file, wls_domain_name, per_host_space_key="full_archives", archive_types=("all_archives",), + transfer_to_admin=False, do_upload=False ) @@ -626,18 +650,35 @@ def __archive_directories(model, model_context, helper): # b. Admin doesn’t have enough space to store all the archives but its own archives, then all nodes stores their respective archives including the admin. (Not covered) # c. Print TODO messages for any node which doesn’t have enough space to create it’s archive. (Not covered) else : + __logger.info( + 'WLSDPLY-05027', + 'skip_transfer=true. Archives will NOT be uploaded to the bucket.', + class_name=_class_name, method_name=_method_name + ) if space_admin_rc == 0: + __logger.info( + 'WLSDPLY-05027', + 'Admin have enough space to store all the archives. Managed hosts must have space for largest archive to transfer to admin.', + class_name=_class_name, method_name=_method_name + ) process_archives( nodes, model, model_context, machine_nodes, base_location,init_argument_map, on_prem_values, space_status,log_file, wls_domain_name, per_host_space_key="largest_archive", archive_types=("oracle_home", "weblogic_home","java_home", "custom_dirs"), + transfer_to_admin=True, do_upload=False ) else : + __logger.info( + 'WLSDPLY-05027', + 'Admin does not have enough space to store all the archives. Each nodes stores their own full archive locally.', + class_name=_class_name, method_name=_method_name + ) process_archives( nodes, model, model_context, machine_nodes, base_location,init_argument_map, on_prem_values, space_status,log_file, wls_domain_name, per_host_space_key="full_archives", archive_types=("all_archives",), + transfer_to_admin=False, do_upload=False ) From 855350504d51f9b86215c576e1b1f52cbd846a8b Mon Sep 17 00:00:00 2001 From: Siddharth Sahu Date: Mon, 1 Dec 2025 17:45:53 +0530 Subject: [PATCH 6/9] Minor bug fix and improved logs --- lib/python/archive_infra.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/lib/python/archive_infra.py b/lib/python/archive_infra.py index bbb1aa3..f681c05 100644 --- a/lib/python/archive_infra.py +++ b/lib/python/archive_infra.py @@ -248,7 +248,9 @@ def ensure_bucket(oci_bucket_name, oci_compartment_id, log_file): # 1) Try to 'get' the bucket; redirect stdout+stderr to our log_file get_cmd = "oci os bucket get --bucket-name %s >> %s 2>&1" % (oci_bucket_name, log_file) result = os.system(get_cmd) - if result != 0: + if result == 0: + return result + else: # bucket is not there, so try to create it __logger.info('WLSDPLY-05027', 'Bucket does not exist. Attempting to create bucket...', class_name=_class_name, method_name=_method_name) @@ -269,6 +271,7 @@ def ensure_bucket(oci_bucket_name, oci_compartment_id, log_file): # success __logger.info('WLSDPLY-05027',"Bucket created.", class_name=_class_name, method_name=_method_name) + return result2 def upload_to_bucket(file_path, log_file, on_prem_values, wls_domain_name): @@ -385,7 +388,7 @@ def delete_remote_archives(model_context, file_pattern): "rm -f %s/%s" % (remote_dir, file_pattern) ] - __logger.info('WLSDPLY-05027', 'Running remote cleanup: %s' % (" ".join(cmd_array)), + __logger.info('WLSDPLY-05027', 'Running remote cleanup: %s, filepattern %s' % (" ".join(cmd_array), file_pattern), class_name=_class_name, method_name=_method_name) runtime = Runtime.getRuntime() @@ -518,16 +521,16 @@ def process_archives(nodes, model, model_context, machine_nodes, base_location, raise ex # Upload/delete if enabled - if do_upload: - node_dir = per_machine_model_context.get_local_output_dir() + if do_upload: + node_dir = per_machine_model_context.get_local_output_dir() - for fname in os.listdir(node_dir): - for pattern in archive_patterns: - if fnmatch.fnmatch(fname, "*%s" % pattern): - path = os.path.join(node_dir, fname) - upload_to_bucket(path, log_file,on_prem_values, wls_domain_name) - delete_local(path) - delete_remote_archives(per_machine_model_context, fname) + for fname in os.listdir(node_dir): + for pattern in archive_patterns: + if fnmatch.fnmatch(fname, "*%s" % pattern): + path = os.path.join(node_dir, fname) + upload_to_bucket(path, log_file,on_prem_values, wls_domain_name) + delete_local(path) + delete_remote_archives(per_machine_model_context, fname) def __archive_directories(model, model_context, helper): @@ -617,7 +620,7 @@ def __archive_directories(model, model_context, helper): process_archives( nodes, model, model_context, machine_nodes, base_location,init_argument_map, on_prem_values, space_status,log_file, wls_domain_name, per_host_space_key="largest_archive", - archive_types=("oracle_home", "weblogic_home","java_home", "custom_dirs"), + archive_types=("domain_home","weblogic_home","java_home","custom_dirs"), transfer_to_admin=True, do_upload=True ) @@ -664,7 +667,7 @@ def __archive_directories(model, model_context, helper): process_archives( nodes, model, model_context, machine_nodes, base_location,init_argument_map, on_prem_values, space_status,log_file, wls_domain_name, per_host_space_key="largest_archive", - archive_types=("oracle_home", "weblogic_home","java_home", "custom_dirs"), + archive_types=("domain_home", "weblogic_home","java_home", "custom_dirs"), transfer_to_admin=True, do_upload=False ) From bae24f57615759722d0cb222c9ebc455d1d332a6 Mon Sep 17 00:00:00 2001 From: Siddharth Sahu Date: Tue, 2 Dec 2025 18:31:51 +0530 Subject: [PATCH 7/9] Minor fix in logs --- lib/python/archive_infra.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/python/archive_infra.py b/lib/python/archive_infra.py index f681c05..88fd96c 100644 --- a/lib/python/archive_infra.py +++ b/lib/python/archive_infra.py @@ -649,9 +649,9 @@ def __archive_directories(model, model_context, helper): ) # Case 3: skip_transfer = true - # a. Admin have enough space to store all the archives, then all the archives are stored in the admin. (covered) - # b. Admin doesn’t have enough space to store all the archives but its own archives, then all nodes stores their respective archives including the admin. (Not covered) - # c. Print TODO messages for any node which doesn’t have enough space to create it’s archive. (Not covered) + # a. Admin have enough space to store all the archives, then all the archives are stored in the admin. + # b. Admin doesn’t have enough space to store all the archives but its own archives, then all nodes stores their respective archives including the admin. + # c. Print TODO messages for any node which doesn’t have enough space to create it’s archive. else : __logger.info( 'WLSDPLY-05027', From 16a6566582a42079c25fb3b1ad8949456406eef6 Mon Sep 17 00:00:00 2001 From: Siddharth Sahu Date: Wed, 3 Dec 2025 18:26:30 +0530 Subject: [PATCH 8/9] Added function description and small fixes. --- lib/python/archive_infra.py | 41 ++++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/lib/python/archive_infra.py b/lib/python/archive_infra.py index 88fd96c..93fb4c5 100644 --- a/lib/python/archive_infra.py +++ b/lib/python/archive_infra.py @@ -388,7 +388,7 @@ def delete_remote_archives(model_context, file_pattern): "rm -f %s/%s" % (remote_dir, file_pattern) ] - __logger.info('WLSDPLY-05027', 'Running remote cleanup: %s, filepattern %s' % (" ".join(cmd_array), file_pattern), + __logger.info('WLSDPLY-05027', 'Running remote cleanup: %s, file pattern %s' % (" ".join(cmd_array), file_pattern), class_name=_class_name, method_name=_method_name) runtime = Runtime.getRuntime() @@ -469,6 +469,41 @@ def cleanup_archives(file_path, wls_domain_name): def process_archives(nodes, model, model_context, machine_nodes, base_location, init_argument_map, on_prem_values, space_status, log_file, wls_domain_name, per_host_space_key, archive_types, transfer_to_admin, do_upload): + """ + Execute archive generation and optional transfer/upload for each host. + + This function performs the following operations for each WebLogic machine node: + 1. Resolve SSH connection details to the host from the model context. + 2. Validate available disk space on the target host using space_status. + 3. Run WebLogic migration archiving for the selected archive types + (e.g. weblogic_home, java_home, custom_dirs, all_archives). + 4. Prints TODO messages for the node which doesn't have enough space. + 5. If enabled, upload each generated archive to OCI Object Storage. + 6. If upload is enabled and succeeds, cleanup the local and remote archive files. + + The behavior varies based on the arguments passed from __archive_directories(): + - per_host_space_key controls whether we validate largest_archive or full_archives. + - transfer_to_admin determines if archives should be staged on admin. + - do_upload triggers OCI bucket upload and later the cleanup workflow. + + :param nodes: dictionary of machine nodes from the model topology + :param model: WLSDeploy Model object + :param model_context: context containing CLI arguments and SSH settings + :param machine_nodes: dictionary from topology containing NODE_MANAGER infos + :param base_location: WDT LocationContext used for archive discovery + :param init_argument_map: processed CLI argument map used to configure SSH + :param on_prem_values: dictionary of on-prem.env configuration values + :param space_status: dictionary with disk space flags per host: + { "hostname": {"largest_archive": 0/1, "full_archives": 0/1}, ... } + :param log_file: path to output file where OCI CLI logs will be appended + :param wls_domain_name: domain name used to match archive filenames + :param per_host_space_key: "largest_archive" or "full_archives" + used to check if the host has enough available space + :param archive_types: tuple of archive identifiers to generate, + e.g. ("oracle_home", "weblogic_home", "java_home", "custom_dirs") + :param transfer_to_admin: if True, archive first stored on admin node before upload + :param do_upload: if True, upload archives to OCI and delete after success + """ _method_name = 'process_archives' @@ -649,7 +684,7 @@ def __archive_directories(model, model_context, helper): ) # Case 3: skip_transfer = true - # a. Admin have enough space to store all the archives, then all the archives are stored in the admin. + # a. Admin has enough space to store all the archives, then all the archives are stored in the admin. # b. Admin doesn’t have enough space to store all the archives but its own archives, then all nodes stores their respective archives including the admin. # c. Print TODO messages for any node which doesn’t have enough space to create it’s archive. else : @@ -661,7 +696,7 @@ def __archive_directories(model, model_context, helper): if space_admin_rc == 0: __logger.info( 'WLSDPLY-05027', - 'Admin have enough space to store all the archives. Managed hosts must have space for largest archive to transfer to admin.', + 'Admin has enough space to store all the archives. Managed hosts must have space for largest archive to transfer to admin.', class_name=_class_name, method_name=_method_name ) process_archives( From 1a09e9008b6519f2bef9816557e64af2f528182d Mon Sep 17 00:00:00 2001 From: Siddharth Sahu Date: Wed, 3 Dec 2025 18:34:24 +0530 Subject: [PATCH 9/9] minor changes --- lib/python/archive_infra.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/python/archive_infra.py b/lib/python/archive_infra.py index 93fb4c5..a03077a 100644 --- a/lib/python/archive_infra.py +++ b/lib/python/archive_infra.py @@ -481,11 +481,6 @@ def process_archives(nodes, model, model_context, machine_nodes, base_location, 5. If enabled, upload each generated archive to OCI Object Storage. 6. If upload is enabled and succeeds, cleanup the local and remote archive files. - The behavior varies based on the arguments passed from __archive_directories(): - - per_host_space_key controls whether we validate largest_archive or full_archives. - - transfer_to_admin determines if archives should be staged on admin. - - do_upload triggers OCI bucket upload and later the cleanup workflow. - :param nodes: dictionary of machine nodes from the model topology :param model: WLSDeploy Model object :param model_context: context containing CLI arguments and SSH settings