diff --git a/migration-tools/tts-backup-python/README.md b/migration-tools/tts-backup-python/README.md new file mode 100644 index 0000000..1969d60 --- /dev/null +++ b/migration-tools/tts-backup-python/README.md @@ -0,0 +1,179 @@ +# Transportable Tablespaces Using Backup + +Transportable Tablespaces can be used to move tablespaces from customer +on-premise or another database cloud service into ADB-S. Tablespaces can +be transported when creating a new database in ADB-S or as modify +operation on an existing database. + +## Step-by-step guide + +Transporting Tablespaces involves the following steps. + +1. Create Object Storage Buckets + +2. Create Dynamic Group and Policy + +3. Backup Tablespaces on Source Database + +4. Create or Modify database in ADB-S by specifying intent to transport + tablespaces using a tag + +### Create Object Storage Buckets + +Transportable Tablespaces needs two object storage buckets - one for +backups and another for metadata. Create the buckets in your Oracle +Storage Cloud Service account. Note the URLs of the buckets as they are +needed as inputs for the operation. Use [Oracle Cloud Infrastructure Object Storage Native URI Format](https://docs.oracle.com/en/cloud/paas/autonomous-database/serverless/adbsb/file-uri-formats.html) for the URL. + +Example: +[https://objectstorage.region.oraclecloud.com/n/\/b/\](https://objectstorage.region.oraclecloud.com/n/namespace-string/b/bucket/o/filename) + +To make Object Storage URI work, please [generate an API signing key](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/apisigningkey.htm)]. +Download the private key .pem file and the API signing key config file with below contents to source database host. + + [DEFAULT] + + user=ocid1.user.oc1..xxxxx + + fingerprint=f6:d6:e5:xxxxx + + tenancy=ocid1.tenancy.oc1..xxxxx + + region=us-ashburn-1 + + key_file= + +Note - User should have read and write access to the object storage buckets. + +## Create Dynamic Group and Policy + +Transportable Tablespaces functionality will download metadata from metadata bucket using [OCI Resource Principal](https://docs.oracle.com/en/cloud/paas/autonomous-database/serverless/adbsb/resource-principal.html)]. + +### Create Dynamic Group and Policy to allow access to the metadata bucket using the resource principal. + +1. Create a Dynamic Group **TTSDynamicGroup** with matching rule:\ + ALL {resource.type = \'autonomousdatabase\', [resource.compartment.id](http://resource.compartment.id) = \'your_Compartment_OCID\'} + +2. Create a Policy using the dynamic group with Policy Statement:\ + Allow dynamic-group **TTSDynamicGroup** to manage buckets in tenancy\ + Allow dynamic-group **TTSDynamicGroup** to manage objects in tenancy\ + \ + Prepend domain name to the dynamic group name if needed as below.\ + Allow dynamic-group \<*identity_domain_name\>*/**TTSDynamicGroup** to manage buckets in tenancy\ + Allow dynamic-group \<*identity_domain_name\>*/**TTSDynamicGroup** to manage objects in tenancy + +### Backup Tablespaces on Source Database + +#### Pre-requisites + +- Create a Project Directory that will used as staging location on the host running the source database. +- Download [Oracle Database Backup Cloud Module](https://www.oracle.com/database/technologies/oracle-cloud-backup-downloads.html) to the Project Directory. Unzip the downloaded opc_installer.zip in the project directory. +- Download [tts-backup-python.zip](file:////confluence/download/attachments/11465427748/tts-backup-python.zip%3fversion=33&modificationDate=1764740426000&api=v2) to the project directory. Unzip the tts-backup-python.zip in the project directory. +- Provide inputs for backup in the **tts-backup-env.txt** file. +- [tts-backup-python.zip for VodaFone](file:////confluence/download/attachments/11465427748/tts-backup-python.zip%3fversion=33&modificationDate=1764740426000&api=v2) + +#### TTS Backup Tool inputs + +Open tts-backup-env.txt file downloaded to the project directory and provide the following inputs in the file. + +##### Project and Tablespace inputs + +***PROJECT_NAME** ***: Name for transport tablespace project. (REQUIRED INPUT) \ +***DATABASE_NAME*** : Database Name containing the tablespaces. (REQUIRED INPUT) \ +***TABLESPACES*** : List of comma separated transportable tablespaces. (OPTIONAL INPUT) if empty all user tablespaces are added.\ +***SCHEMAS*** : List of comma separated transportable schemas. (OPTIONAL INPUT) if empty all required users are added. None of the schemas should be a common user. + +##### Database connection inputs + +***HOSTNAME*** : (REQUIRED INPUT) Host where database is running, used for connecting to the database.\ +***LSNR_PORT*** : (REQUIRED INPUT) Listener port, used for connecting to the database.  \ +***DB_SVC_NAME*** : (REQUIRED INPUT) Database service name, used for connecting to the database.  \ +***ORAHOME*** : (REQUIRED INPUT) Database home, \$ORACLE_HOME.\ +***DBUSER*** : (REQUIRED INPUT) Username for connecting to the database. User should have sysdba privileges. \ +***DBPASSWORD*** : (REQUIRED INPUT) Password for connection to the database. (Provide as CLI argument or Runtime input)\ +***DB_VERSION*** : (REQUIRED INPUT) DB Version, supported values are 11g, 12c, 19c, 23ai. + +##### Object Storage Service (OSS) inputs ( Required if using OSS for transport. Leave them empty if using FSS. ) + +***TTS_BACKUP_URL*** : (REQUIRED INPUT) Object storage bucket uri for backup files.      \ +***TTS_BUNDLE_URL*** : (REQUIRED INPUT) Object storage bucket uri for transportable tablespace bundle.\ +***OCI_INSTALLER_PATH :*** (REQUIRED INPUT) Path to oci_install.jar\ +***CONFIG_FILE :*** **(REQUIRED INPUT) Path to dowloaded API keys config file. Make sure to update the key_file parameter with the file path to your private key in the config file.\ +***COMPARTMENT_OCID :*** **(REQUIRED INPUT) Compartment OCID of bucket  (TTS_BACKUP_URL) that stores backup files. \ +***OCI_PROXY_HOST :** ***(OPTIONAL INPUT) HTTP proxy server.\ +***OCI_PROXY_HOST :** ***(OPTIONAL INPUT)  HTTP proxy server connection port. + +##### File Storage Service (FSS) inputs ( Required if using FSS for transport. Leave them empty if using OSS. ) + +***TTS_FSS_CONFIG*** : (REQUIRED INPUT) FSS configuration should given in the format FSS:\:\:\\ +***TTS_FSS_MOUNT_DIR*** : (REQUIRED INPUT) Absolute path where file system was mounted on source database host\ +\ +Refer to [] for details on how File System should be configured for use by ADB-S. + +##### TDE keys inputs + +***TDE_WALLET_STORE_PASSWORD*** : (REQUIRED only if any of the tablespaces are TDE Encrypted). Required to export TDE KEYS. (Provide as CLI argument or Runtime input) + +##### Final backup inputs  + +***FINAL_BACKUP*** : (REQUIRED INPUT) Final backup will be done only if FINAL_BACKUP=yes or YES. Accepted values YES, yes, NO, no. Used to indicate incremental operation. Specify YES for non-incremental operation. Specify NO for incremental backups. Last operation should be run with YES for schema to be exported. + +##### Performance inputs      + +***PARALLELISM*** : (OPTIONAL INPUT) Number of channels to be used for backup, parallelism = cpu_count \* instances.\ +***CPU_COUNT*** : (OPTIONAL INPUT) Number of cpus to be used from an instance (used if parallelism is not given). + +Leave these a blank unless really needed. + +### Create or Modify ADB-S database with TTS tag + +#### Create ADB-S database to transport tablespaces + +Use the below steps to transport tablespaces while creating an ADB-S database. + + 1. Go to OCI Console → Oracle Database → Autonomous Database + 2. Click on ***Create Autonomous Database*** + 3. Provide all the necessary inputs. + 4. Select database version that is equal to or greater than the source database. + 5. Specify ***Storage (TB)*** in ***Configure the database*** section to match size of tablespace(s) being transported. + 6. Click on ***Show advanced options*** at the bottom of the page. Click on ***Tags*** tab in the section. + 7. Select ***Tag namespace*** **as ***None (add a free-form tag)***, ***Tag key*** as ***ADB\$TTS_BUNDLE_URL***, + **Tag value** as metadata bundle url given by the TTS backup tool. + 8. Click on ***Add Tag.*** + 9. Submit ***Create Autonomous Database.*** + +The operation will first create the database and then trigger transport tablespaces job. + +#### Modify ADB-S database to transport tablespaces + +Use the below steps to transport tablespaces to an existing database. + + 1. Go to OCI Console → Oracle Database → Autonomous Database + 2. Select and click on the database for transporting tablespaces + 3. Verify ***Storage*** in ***Resource allocation*** section. Use ***Manage resource allocation*** tab to increase storage if needed. + 4. If this is the first time specifying **ADB\$TTS_BUNDLE_URL** tag on the database: + a. Go to ***More Actions* *→* *Tags** ****menu item on the Autonomous Database Details page.* + b. Select ***Tag namespace*** **as ***None (add a free-form tag)***, ***Tag key*** as ***ADB\$TTS_BUNDLE_URL***, \ + **Tag value** as metadata bundle url given by the TTS backup tool. + c. Click on ***Add Tag.*** + d. Submit ***Add Tags***. + 5. If **ADB\$TTS_BUNDLE_URL** tag was already specified during create or a previous update of the database: + a. Click on the **Tags** tab on the Autonomous Database Details page. + b. Click on **Free-Form tags** tab and edit the **ADB\$TTS_BUNDLE_URL** tag.  + c. Specify the new URL and submit **Save** action. + +The operation will trigger the transport tablespaces job on the database. + +#### To transport tablespaces using incremental database backups + +Create a database or update an existing database by specifying **ADB\$TTS_BUNDLE_URL** of level 0 backup.\ +For each incremental and final backup, edit the tag using the URL corresponding to that backup as mentioned in **Step 5** above. \ +Before taking the final backup, alter all tablespaces being transported as read-only. Specify FINAL_BACKUP as YES in tts-backup-env.txt.\ +Datafiles with incremental changes are restored to ADB-S from level 0 to final step. Metadata is imported on final step. + +#### To transport tablespaces using non-incremental database backups + +Non-incremental is a one time operation where datafiles are restored and metadata is imported to ADB-S.\ +Alter all tabalespaces being transported as read-only. Specify FINAL_BACKUP as YES in tts-backup-env.txt.\ +Create a database or update an existing database by specifying **ADB\$TTS_BUNDLE_URL** of one time backup. + diff --git a/migration-tools/tts-backup-python/tts-backup-env.txt b/migration-tools/tts-backup-python/tts-backup-env.txt new file mode 100755 index 0000000..e4ff8a1 --- /dev/null +++ b/migration-tools/tts-backup-python/tts-backup-env.txt @@ -0,0 +1,87 @@ +# +# $Header: pdbcs/no_ship_src/service/scripts/tts-backup-env.txt /main/15 2025/08/13 07:58:35 hkarniya Exp $ +# +# tts-backup-env.txt +# +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. +# +# NAME +# tts-backup-env.txt - +# +# DESCRIPTION +# +# +# NOTES +# +# +# MODIFIED (MM/DD/YY) +# hkarniya 08/04/25 - Bug 38274082: Option to exclude statistics +# hkarniya 07/11/25 - Bug 38179045: Option to exclude tables +# hkarniya 07/01/25 - Bug 37848678: Provide dry-run option +# hkarniya 06/15/25 - Bug 38076817: support section size backup for tts +# hkarniya 05/20/25 - Bug 37973559: Code refactor +# hkarniya 05/15/25 - Bug 37819695: Take passwords as cli arguments or runtime inputs +# hkarniya 04/23/25 - Bug 37866112: Add to option USE_ALL_RAC_INSTANCES +# cchowdar 04/07/25 - Bug 37803647: Added FSS support +# hkarniya 04/05/25 - Bug 37793929: Migrate to OCI installer +# hkarniya 03/24/25 - Bug 37746561: Validate plsql objects and +# add opc proxy host and port +# hkarniya 03/24/25 - Bug 37746539: Support 11g source database +# hkarniya 02/13/25 - Bug 37588601: support full transport of DB, +# TABLESPACES is optional input +# hkarniya 01/30/25 - Bug 37515587: Update schemas as optional input +# hkarniya 10/03/24 - Creation +# +[DEFAULT] +### Project and Tablespace inputs +PROJECT_NAME= +DATABASE_NAME= +TABLESPACES= +SCHEMAS= + + +#### Database connection inputs +HOSTNAME= +LSNR_PORT= +DB_SVC_NAME= +ORAHOME= +DBUSER= +DB_VERSION= + + +#### Object Storage Service (OSS) inputs +#### OCI Installer inputs +TTS_BACKUP_URL= +TTS_BUNDLE_URL= +OCI_INSTALLER_PATH= +CONFIG_FILE= +COMPARTMENT_OCID= +OCI_PROXY_HOST= +OCI_PROXY_PORT= + + +### File Storage Service (FSS) inputs +TTS_FSS_CONFIG= +TTS_FSS_MOUNT_DIR= + + +### Final backup inputs +FINAL_BACKUP= + + +### Performance inputs +PARALLELISM= +USE_ALL_RAC_INSTANCES= + + +### Dry run inputs +DRY_RUN= + + +### Exclude tables input (case-sensitive) +EXCLUDE_TABLES= + + +## Exclude statistics input (TRUE/FALSE) +## Default to FALSE +EXCLUDE_STATISTICS= \ No newline at end of file diff --git a/migration-tools/tts-backup-python/tts-backup.py b/migration-tools/tts-backup-python/tts-backup.py new file mode 100755 index 0000000..cbc18a3 --- /dev/null +++ b/migration-tools/tts-backup-python/tts-backup.py @@ -0,0 +1,2051 @@ +#!/usr/bin/python3 +# +# $Header: pdbcs/no_ship_src/service/scripts/tts-backup.py /main/28 2025/11/23 15:55:21 hkarniya Exp $ +# +# tts-backup.py +# +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. +# +# NAME +# tts-backup.py - +# +# DESCRIPTION +# Usage : echo -e "\n" | python3 tts-backup.py [OPTIONS] +# Options : --IGNORE_ERRORS= : Optional, provide true to ignore schema bould validation errors +# : --JDK8_PATH= : Optional, provide jdk8path to download wallet for objstore bucket +# +# NOTES +# +# +# MODIFIED (MM/DD/YY) +# hkarniya 11/18/25 - Bug 38667283: DRY_RUN enhancements (added expdp validation) +# hkarniya 11/18/25 - Bug 38667283: Update param-value to TRUE/FALSE for +# consistency +# sovaraka 10/07/25 - Bug 37924415, 37860857, 37925905: Added checks +# for redaction, ols, dvrealm policies +# hkarniya 09/11/25 - Bug 37893757: Add compatible validation +# sovaraka 08/15/25 - Bug 38150434: Added changes to use template +# hkarniya 08/04/25 - Bug 38274082: Fix imp errors +# hkarniya 07/30/25 - Bug 38258577: use scn for 11g incremental backup +# hkarniya 07/11/25 - Bug 38179045: Option to exclude tables +# hkarniya 07/01/25 - Bug 37848678: Provide dry-run option +# hkarniya 06/24/25 - Bug 38112133: Fix object validations and remove +# user input dependency +# hkarniya 06/15/25 - Bug 38076817: support section size backup for tts +# hkarniya 05/20/25 - Bug 37973559: Code refactor +# hkarniya 05/15/25 - Bug 37819695: Take passwords as cli arguments or runtime inputs +# hkarniya 05/15/25 - Bug 37860680: Limit project_name to 128 chars +# hkarniya 05/05/25 - Bug 37906886: Allow higher number of tablespaces +# for DRCC regions +# hkarniya 04/24/25 - Bug 37483994: allow higher db_files during tts +# hkarniya 04/23/25 - Bug 37866112: Add to option USE_ALL_RAC_INSTANCES, +# Fail tbs validation if DBTIME is different and contains TSLTZ. +# cchowdar 04/07/25 - Bug 37803647: Added FSS support +# hkarniya 04/05/25 - Bug 37793929: Migrate to OCI installer +# hkarniya 03/24/25 - Bug 37769080: Moved failed project dir to +# project_dir_failed to allow retry +# hkarniya 03/24/25 - Bug 37682974: Introduce additional storage +# for small file move operation +# hkarniya 03/24/25 - Bug 37746561: Validate plsql objects and +# add opc proxy host and port +# hkarniya 03/24/25 - Bug 37746539: Support 11g source database +# hkarniya 03/24/25 - Bug 37728223: Utilize multiple RAC instances +# hkarniya 03/13/25 - Bug 37584519: update complete ts_list to +# transport_set_check() to allow foreign key +# constraints +# hkarniya 02/13/25 - Bug 37588601: support full transport of DB, +# TABLESPACES is optional input +# hkarniya 02/11/25 - Bug 37549117: Validate SYS owned datatype columns +# hkarniya 01/31/25 - Bug 37544070: Add a check before export TDE Keys +# hkarniya 01/30/25 - Bug 37484020: Validate XMLType columns and +# XMLTYPE tables +# hkarniya 01/30/25 - Bug 37407438: Fix Bundle name with TimeDate +# hkarniya 01/30/25 - Bug 37515587: Update Schemas to optional and +# not a common user +# hkarniya 10/03/24 - Creation +# +import os +import sys +import json +try: + from configparser import ConfigParser +except ImportError: + from ConfigParser import ConfigParser +import string +import glob +import subprocess +import requests +from requests.auth import HTTPBasicAuth +import socket +import tarfile +import shutil +import oci +from urllib.parse import urlparse +import math +import getpass +import functools +from datetime import datetime + + +print = functools.partial(print, flush=True) +print_stderr = functools.partial(print, file=sys.stderr, flush=True) + +def secure_input(prompt): + # Interactive shell (e.g., manual input) + if sys.stdin.isatty(): + return getpass.getpass(prompt) + # Non-interactive (e.g., piped from echo or file) + else: + return sys.stdin.readline().strip() + +# Check if the current Python version meets the minimum requirement. +def check_python_version(min_version): + if sys.version_info < min_version: + raise EnvironmentError(f"This script requires Python version 3.6 or higher.") + +# Return the absolute path of the directory containing the script. +def scriptpath(): + return os.path.dirname(os.path.realpath(__file__)) + +# Return the single quotes comma-seperated item_list as string +def split_into_lines(items, to_upper=True, chunk_size=10): + """ + Join list of strings into comma-separated lines with line breaks every chunk_size items. + Optionally convert items to uppercase. + + Returns: + str: Joined string with commas and newlines. + """ + if to_upper: + item_array = [item.strip().upper() for item in items.split(',') if item.strip()] + else: + item_array = [item.strip() for item in items.split(',') if item.strip()] + chunk_size = 10 + chunks = [] + for i in range(0, len(item_array), chunk_size): + chunk = item_array[i:i+chunk_size] + chunks.append("'" + "','".join(chunk) + "'") + item_list = ",\n".join(chunks) + return item_list + + +class Environment: + """ + A class to load environment variables from a config file (tts-backup-env.txt) and expose them as attributes. + """ + + def __init__(self, args, env_file: str): + self.env_file = env_file + self._env = ConfigParser() + self._defaults = {} + + # Validate and load environment file + self._validate_env_file() + self._load_arg_variables(args) + self._load_env_variables() + self._preprocess() + + def __str__(self): + """Return a string representation of the loaded environment variables.""" + return "\n".join([f"{key}: {value}" for key, value in self.__dict__.items()]) + + def __getattr__(self,item): + """Handle missing attributes dynamically.""" + if item in self.__dict__: + return self.__dict__[item] + else: + raise AttributeError(f"{item} is not a valid attribute in the environment configuration.") + + def _validate_env_file(self): + """Validate the presence of the environment file.""" + if not os.path.isfile(self.env_file): + raise FileNotFoundError(f"Environment file '{self.env_file}' not found.") + + def usage(self): + print_stderr("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=") + print_stderr("optional arguments allowed : --IGNORE_ERRORS, --JDK8_PATH") + print_stderr("runtime required inputs : DBPASSWORD") + print_stderr("runtime optional inputs : TDE_WALLET_STORE_PASSWORD") + print_stderr("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=") + print_stderr("Provide inputs during runtime manually - ") + print_stderr(" Usage: python3 " + sys.argv[0] + " [OPTIONS]") + print_stderr(" Example: python3 " + sys.argv[0] + " --IGNORE_ERRORS= --JDK8_PATH= ") + print_stderr("") + print_stderr("Or provide inputs via standard input (e.g., for automation):") + print_stderr(" Usage: echo -e \"\\n\" | python3 " + sys.argv[0] + " [OPTIONS]") + print_stderr(" (If TDE not applicable) Usage: echo -e \"\" | python3 " + sys.argv[0] + " [OPTIONS]") + print_stderr("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=") + print_stderr(" ARGUMENTS DESCRIPTION ") + print_stderr("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=") + print_stderr("--IGNORE_ERRORS=") + print_stderr("--JDK8_PATH=") + print_stderr("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=") + sys.exit(1) + + def _load_arg_variables(self, args): + """Load environment variables from the arguments provided and + set them as class attributes.""" + arg_dict = {} + arg_vars = ['IGNORE_ERRORS', 'JDK8_PATH'] + + # Populate arg_dict from given args + for arg in args: + if arg.startswith('--') and '=' in arg: + key, value = arg[2:].split('=', 1) + key = key.strip() + value = value.strip() + if key not in arg_vars: + print(f"Invalid argument key: {key}") + self.usage() + arg_dict[key] = value + else: + print(f"Invalid argument format: {arg}") + self.usage() + + for key in arg_vars: + value = arg_dict.get(key) + setattr(self, key, value) + + + def _load_env_variables(self): + """Load environment variables from the config file and set them as class attributes.""" + self._env.read(self.env_file) + self._defaults = self._env['DEFAULT'] + + # Intialise Runtime Input vars... + runtime_vars = ['DBPASSWORD', 'TDE_WALLET_STORE_PASSWORD'] + for key in runtime_vars: + if key == "DBPASSWORD": + value = secure_input(f"Enter value for required variable {key}: ").strip() + if not value: + print(f"Missing required variable: {key}") + self.usage() + else: + value = secure_input( + f"Enter value for optional variable {key} \n" + f"Required only if any of the tablespaces are TDE encrypted " + f"(leave empty and press Enter if not applicable): " + ).strip() + setattr(self, key, value) + + # Define and load required variables + value_fss = self._defaults.get('TTS_FSS_CONFIG', '').strip() + value_objs = self._defaults.get('TTS_BACKUP_URL', '').strip() + if value_fss: + required_vars = [ + 'PROJECT_NAME', 'DATABASE_NAME', + 'HOSTNAME', 'LSNR_PORT', 'DB_SVC_NAME', 'ORAHOME', + 'DBUSER', 'TTS_FSS_CONFIG', 'TTS_FSS_MOUNT_DIR', + 'FINAL_BACKUP', 'DB_VERSION' + ] + elif value_objs: + required_vars = [ + 'PROJECT_NAME', 'DATABASE_NAME', + 'HOSTNAME', 'LSNR_PORT', 'DB_SVC_NAME', 'ORAHOME', + 'DBUSER', 'TTS_BACKUP_URL', 'TTS_BUNDLE_URL', + 'FINAL_BACKUP', 'DB_VERSION', + 'CONFIG_FILE', 'COMPARTMENT_OCID' + ] + else: + raise ValueError(f"Missing required environment variables related to file storage service or object storage service.") + + for var in required_vars: + value = self._defaults.get(var, '').strip() + if not value: + raise ValueError(f"Missing required environment variable: {var}") + setattr(self, var, value) + + # Limit project name to 128 characters + if len(self.PROJECT_NAME) > 128: + print_stderr("PROJECT_NAME exceeds the 128-character limit.") + exit(1) + + # Set one varable which defines customer choosen storage type disk ot objs + if value_fss: + setattr(self, 'STORAGE_TYPE', 'FSS') + elif value_objs: + setattr(self, 'STORAGE_TYPE', 'OBJECT_STORAGE') + + if value_objs: + if not os.path.isfile(self.CONFIG_FILE): + raise FileNotFoundError(f"CONFIG_FILE path '{self.CONFIG_FILE}' does not exist or is not a file.") + + # Read the OCI config file and set fingerprint, user, tenancy, region, and key_file attributes. + oci_config = ConfigParser() + oci_config.read(self.CONFIG_FILE) + if 'DEFAULT' not in oci_config: + raise ValueError(f"OCI config file {self.CONFIG_FILE} missing [DEFAULT] section.") + default_config = oci_config['DEFAULT'] + required_oci_keys = ['user', 'fingerprint', 'tenancy', 'region', 'key_file'] + for key in required_oci_keys: + value = default_config.get(key, '').strip() + if not value: + raise ValueError(f"Missing required key '{key}' in OCI config file {self.CONFIG_FILE}.") + # Set as USER, FINGERPRINT, TENANCY, REGION, KEY_FILE + setattr(self, key.upper(), value) + + # Load optional variables; set to empty if not found + optional_vars = ['SCHEMAS', 'TABLESPACES', 'OCI_INSTALLER_PATH', + 'OCI_PROXY_HOST', 'OCI_PROXY_PORT', 'USE_ALL_RAC_INSTANCES', + 'DRCC_REGION', 'DRY_RUN', 'EXCLUDE_TABLES', 'EXCLUDE_STATISTICS', + 'TRANSPORT_TABLES_PROTECTED_BY_REDACTION_POLICIES', 'TRANSPORT_TABLES_PROTECTED_BY_OLS_POLICIES', 'TRANSPORT_DB_PROTECTED_BY_DATABASE_VAULT','DVREALM_USER', 'DVREALM_PASSWORD'] + for var in optional_vars: + value = self._defaults.get(var, '').strip() + if value and (var == 'TABLESPACES' or var == 'SCHEMAS'): + value = value.replace(" ", "") + setattr(self, var, value) + + # Load optional int variables; set to 0 if not found + numeric_vars = ['PARALLELISM'] + for var in numeric_vars: + try: + value = self._defaults.get(var, '').strip() + if not value: + raise ValueError(f"Missing required environment variable: {var}") + else: + setattr(self, var, int(value)) + except ValueError as e: + print(f"Value Error for {var}: {e}. Expected an integer.") + + # Initialise CPU_COUNT + setattr(self, 'CPU_COUNT', 0) + + def _preprocess(self): + """Perform preprocessing tasks, removing files and setting default values.""" + project_dir = getattr(self, 'PROJECT_NAME') + project_dir_path = os.path.join(scriptpath(), project_dir) + setattr(self, 'PROJECT_DIR_PATH', project_dir_path) + + # Set default values + setattr(self, 'BACKUP_LEVEL', 0) + setattr(self, 'INCR_SCN', 0) + setattr(self, 'FINAL_BACKUP', getattr(self, 'FINAL_BACKUP', 'FALSE') or 'FALSE') + setattr(self, 'TRANSPORT_TABLES_PROTECTED_BY_REDACTION_POLICIES', getattr(self, 'TRANSPORT_TABLES_PROTECTED_BY_REDACTION_POLICIES', 'FALSE') or 'FALSE') + setattr(self, 'TRANSPORT_TABLES_PROTECTED_BY_OLS_POLICIES', getattr(self, 'TRANSPORT_TABLES_PROTECTED_BY_OLS_POLICIES', 'FALSE') or 'FALSE') + setattr(self, 'TRANSPORT_DB_PROTECTED_BY_DATABASE_VAULT', getattr(self, 'TRANSPORT_DB_PROTECTED_BY_DATABASE_VAULT', 'FALSE') or 'FALSE') + + final_backup = getattr(self, 'FINAL_BACKUP').strip().upper() + if final_backup not in ['TRUE', 'FALSE']: + raise ValueError(f"FINAL_BACKUP value should be one of ['TRUE' , 'FALSE' , 'true' , 'false'] but value is {final_backup}.") + + #todo for redaction and ols + redaction_policies = getattr(self, 'TRANSPORT_TABLES_PROTECTED_BY_REDACTION_POLICIES').strip().upper() + if redaction_policies not in ['TRUE', 'FALSE']: + raise ValueError(f"TRANSPORT_TABLES_PROTECTED_BY_REDACTION_POLICIES value should be one of ['TRUE' , 'FALSE' , 'true' , 'false'] but value is {redaction_policies}.") + + ols_policies = getattr(self, 'TRANSPORT_TABLES_PROTECTED_BY_OLS_POLICIES').strip().upper() + if ols_policies not in ['TRUE', 'FALSE']: + raise ValueError(f"TRANSPORT_TABLES_PROTECTED_BY_OLS_POLICIES value should be one of ['TRUE' , 'FALSE' , 'true' , 'false'] but value is {ols_policies}.") + + dvops_protection = getattr(self, 'TRANSPORT_DB_PROTECTED_BY_DATABASE_VAULT').strip().upper() + if dvops_protection not in ['TRUE', 'FALSE']: + raise ValueError(f"TRANSPORT_DB_PROTECTED_BY_DATABASE_VAULT value should be one of ['TRUE' , 'FALSE' , 'true' , 'false'] but value is {dvops_protection}.") + + db_version = getattr(self, 'DB_VERSION').strip().lower() + if db_version not in ['11g', '12c', '19c', '23ai']: + raise ValueError(f"DB_VERSION value should be one of ['11g', '12c', '19c', '23ai'] but value is {db_version}.") + + setattr(self, 'USE_ALL_RAC_INSTANCES', getattr(self, 'USE_ALL_RAC_INSTANCES', 'TRUE') or 'TRUE') + use_all_rac_instances = getattr(self, 'USE_ALL_RAC_INSTANCES').strip().upper() + if use_all_rac_instances not in ['TRUE', 'FALSE']: + raise ValueError(f"USE_ALL_RAC_INSTANCES value should be one of ['TRUE' , 'FALSE' , 'true' , 'false'] but value is {use_all_rac_instances}.") + + # Set optional var's + opt_vars = ['DRCC_REGION', 'DRY_RUN', 'EXCLUDE_STATISTICS'] + for var in opt_vars: + setattr(self, var, getattr(self, var, 'FALSE') or 'FALSE') + value = getattr(self, var).strip().upper() + if value not in ['TRUE', 'FALSE']: + raise ValueError(f"{var} value should be one of ['TRUE' , 'FALSE' , 'true' , 'false'] but value is {value}.") + + # Other default settings + setattr(self, 'OCI_INSTALLER_PATH', getattr(self, 'OCI_INSTALLER_PATH', '') or scriptpath()) + setattr(self, 'TTS_WALLET_CRED_ALIAS', '') + setattr(self, 'TTS_DIR_NAME', 'TTS_DUMP_DIR') + setattr(self, 'TDE_KEYS_FILE', "tde_keys.exp") + setattr(self, 'NEXT_SCN', 0) + setattr(self, 'MAX_CHANNELS', 200) + setattr(self, 'RMAN_LOGFILES', []) + + # Create project manifest JSON file + self._create_manifest_file() + + # Create directories and bundle files + self._setup_backup_directories() + + #create directories needed if the storage type is fss + if getattr(self, 'STORAGE_TYPE') == "FSS": + path = os.path.join(getattr(self, 'TTS_FSS_MOUNT_DIR'), getattr(self,'PROJECT_NAME')) + os.makedirs(path, exist_ok=True) + os.makedirs(f"{path}/datafile", exist_ok=True) + os.makedirs(f"{path}/metadata", exist_ok=True) + + def _create_manifest_file(self): + """Create or load the project manifest file.""" + + # Retrieve project directory path + project_dir_path = getattr(self, 'PROJECT_DIR_PATH') + if project_dir_path and not os.path.exists(project_dir_path): + os.makedirs(project_dir_path) + print(f"Created project directory: {project_dir_path} \n") + + # No need for manifest json in case of dry run + if getattr(self, 'DRY_RUN').strip().upper() == "TRUE": + print(f"DRY_RUN : Skipping Create/Load of project manifest file...\n") + return + + # Load project manifest JSON file and process it if already exists + tts_project_file = os.path.join(project_dir_path, f"{getattr(self, 'PROJECT_NAME')}.json") + setattr(self, 'TTS_PROJECT_FILE', tts_project_file) + + # Create a new manifest file if it doesn't exist + if not os.path.isfile(tts_project_file): + project_data = { + "project_name": getattr(self, 'PROJECT_NAME'), + "backup_level": getattr(self, 'BACKUP_LEVEL'), + "incr_scn": getattr(self, 'INCR_SCN') + } + with open(tts_project_file, 'w') as f: + json.dump(project_data, f, indent=2) + print(f"Created project manifest file: {tts_project_file} \n") + else: + # Load existing backup level and incr_scn from the manifest file + with open(tts_project_file, 'r') as f: + project_data = json.load(f) + setattr(self, 'BACKUP_LEVEL', project_data.get('backup_level')) + setattr(self, 'INCR_SCN', project_data.get('incr_scn')) + print((f"Loaded existing project manifest file: {tts_project_file} \n")) + + def _setup_backup_directories(self): + """Set up backup directories and bundle files.""" + + # Create project directory based on backup level + project_dir_path = getattr(self, 'PROJECT_DIR_PATH') + + if getattr(self, 'DRY_RUN').strip().upper() == "TRUE": + tts_dir_path = os.path.join(project_dir_path, f"{getattr(self, 'PROJECT_NAME')}_LEVEL_{getattr(self, 'BACKUP_LEVEL')}_DRY_RUN") + else: + tts_dir_path = os.path.join(project_dir_path, f"{getattr(self, 'PROJECT_NAME')}_LEVEL_{getattr(self, 'BACKUP_LEVEL')}") + + # Check if the directory exists + if os.path.exists(tts_dir_path): + failed_dir = f"{tts_dir_path}_FAILED" + # Ensure the failed directory name is unique to avoid overwriting old failures + counter = 1 + while os.path.exists(failed_dir): + failed_dir = f"{tts_dir_path}_FAILED_{counter}" + counter += 1 + # Move the directory + shutil.move(tts_dir_path, failed_dir) + print(f"Moved existing failed directory '{tts_dir_path}' to '{failed_dir}'") + + # Create bundle file for transport of backup files + if getattr(self, 'DRY_RUN').strip().upper() == "TRUE": + bundle_file_name = f"{getattr(self, 'PROJECT_NAME')}_LEVEL_{getattr(self, 'BACKUP_LEVEL')}_DRY_RUN.tgz" + else: + bundle_file_name = f"{getattr(self, 'PROJECT_NAME')}_LEVEL_{getattr(self, 'BACKUP_LEVEL')}.tgz" + tts_bundle_file = os.path.join(project_dir_path, bundle_file_name) + if os.path.isfile(tts_bundle_file): + now = subprocess.check_output("date +%d-%b-%Y_%H_%M_%S", shell=True).decode().strip() + if getattr(self, 'DRY_RUN').strip().upper() == "TRUE": + bundle_file_name = f"{getattr(self, 'PROJECT_NAME')}_LEVEL_{getattr(self, 'BACKUP_LEVEL')}_DRY_RUN_{now}.tgz" + else: + bundle_file_name = f"{getattr(self, 'PROJECT_NAME')}_LEVEL_{getattr(self, 'BACKUP_LEVEL')}_{now}.tgz" + tts_bundle_file = os.path.join(os.path.dirname(tts_dir_path), bundle_file_name) + if getattr(self, 'DRY_RUN').strip().upper() == "TRUE": + tts_dir_path = os.path.join(project_dir_path, f"{getattr(self, 'PROJECT_NAME')}_LEVEL_{getattr(self, 'BACKUP_LEVEL')}_DRY_RUN_{now}") + else: + tts_dir_path = os.path.join(project_dir_path, f"{getattr(self, 'PROJECT_NAME')}_LEVEL_{getattr(self, 'BACKUP_LEVEL')}_{now}") + + os.makedirs(tts_dir_path, exist_ok=True) + setattr(self, 'TTS_DIR_PATH', tts_dir_path) + print(f"Created project directory with backup level {getattr(self, 'BACKUP_LEVEL')} : {tts_dir_path}.") + + setattr(self, 'BUNDLE_FILE_NAME', bundle_file_name) + setattr(self, 'TTS_BUNDLE_FILE', tts_bundle_file) + + +class SqlPlus: + """ + A class to run SQL commands using Oracle's SQL*Plus command-line tool. + """ + def __init__(self, dbuser, dbpassword, hostname, port, service_name, orahome): + """Initialize with database connection details.""" + self.dbuser = dbuser + self.dbpassword = dbpassword + self.hostname = hostname + self.port = port + self.service_name = service_name + self.orahome = orahome + + if not os.path.isdir(self.orahome): + raise ValueError(f"Invalid ORAHOME/ORACLE_HOME directory: {self.orahome}") + self.sqlplus_path = os.path.join(self.orahome, "bin", "sqlplus") + + if not os.path.isfile(self.sqlplus_path): + raise FileNotFoundError(f"SQL*Plus not found at {self.sqlplus_path}") + + def run_sql(self, sql_script, log_file=None, dv_user=False): + """Build the SQL*Plus command string.""" + if dv_user: + conn_string = f"{self.dbuser}/{self.dbpassword}@{self.hostname}:{self.port}/{self.service_name}" + else: + conn_string = f"{self.dbuser}/{self.dbpassword}@{self.hostname}:{self.port}/{self.service_name} as SYSDBA" + + + command = f'{self.orahome}/bin/sqlplus -s "{conn_string}" << EOF\n' + command += "whenever sqlerror exit 1;\n" + command += "set heading off\nset feedback off\nset pagesize 0\nset serveroutput on\n" + + if log_file: + command += f"spool {log_file};\n" + + if not sql_script.strip(): + raise ValueError("Empty SQL query provided.") + command += sql_script + + if log_file: + command += "\nspool off;" + command += "\nEOF" + + process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + stdout, stderr = process.communicate() + + # Check for ORA- errors even if returncode is 0 + failed = False + for line in stdout.splitlines(): + if "ORA-" in line or "SP2-" in line: + print(f"[SQL*Plus ERROR] {line.strip()}") + failed = True + + if process.returncode != 0 or failed: + print("SQL execution failed with the following details:\n") + if stdout.strip(): + print(f"STDOUT (SQL*Plus Output):\n {stdout.strip()}\n") + if stderr.strip(): + print(f"STDERR (Additional Errors):\n {stderr.strip()}\n") + return False + + return True + + +class TTS_SRC_RUN_VALIDATIONS: + """ + A class to run schema and tablespace validations for transportable tablespaces. + """ + def __init__(self, env): + self._env = env + self._sqlplus = SqlPlus( + dbuser=self._env.DBUSER, + dbpassword=self._env.DBPASSWORD, + hostname=self._env.HOSTNAME, + port=self._env.LSNR_PORT, + service_name=self._env.DB_SVC_NAME, + orahome=self._env.ORAHOME + ) + self.log_file = os.path.join(self._env.TTS_DIR_PATH, f"{self._env.PROJECT_NAME}_validate.log") + + def _get_tablespaces(self, template): + """Return all tablespaces if not given""" + if self._env.TABLESPACES: + return self._env.TABLESPACES + + print(f"Tablespaces not provided. Fetching all tablespaces...") + try: + _log_file = os.path.join(self._env.TTS_DIR_PATH, f"{self._env.PROJECT_NAME}_get_tablespaces.log") + if not self._sqlplus.run_sql(template.get('get_tablespaces'), _log_file): + raise ValueError(f"Failed to fetch user tablespaces.") + + # Read the log file to get the SQL*Plus output + with open(_log_file, "r") as file: + lines = file.readlines() + tbs = [line.strip().upper() for line in lines if line.strip()] + + self._env.TABLESPACES = ",".join(tbs) + return self._env.TABLESPACES + + except Exception as e: + print(f"Error while fetching tablespaces : {e}") + raise + + def _get_schemas(self, template, user_type=None): + """Return Local/Common users""" + if user_type == "local": + if self._env.DB_VERSION == '11g': + sql_script = template.get('get_local_schemas_dbversion_11g') + else: + sql_script = template.get('get_local_schemas') + elif user_type == "common": + if self._env.DB_VERSION == '11g': + sql_script = template.get('get_common_schemas_dbversion_11g') + else: + sql_script = template.get('get_common_schemas') + elif user_type == "required": + ts_list = split_into_lines(self._env.TABLESPACES) + Configuration.substitutions = { + 'ts_list': ts_list.upper(), + } + sql_script = template.get('owners_in_tablespaces') + else: + if not self._env.SCHEMAS: + print("No schemas provided. Returning a list of required users.") + self._env.SCHEMAS = self._get_schemas(template, "required") + return self._env.SCHEMAS + + print(f"Fetching {user_type} users...") + try: + _log_file = os.path.join(self._env.TTS_DIR_PATH, f"{self._env.PROJECT_NAME}_get_{user_type}_schemas.log") + if not self._sqlplus.run_sql(sql_script, _log_file): + raise ValueError(f"Failed to fetch {user_type} users.") + + # Read the log file to get the SQL*Plus output + with open(_log_file, "r") as file: + lines = file.readlines() + users = [line.strip().upper() for line in lines if line.strip()] + + return ",".join(users) + except Exception as e: + print(f"Error while fetching {user_type} users: {e}") + raise + + def _validate_schemas(self, template): + """Validate schemas""" + print("Validating schemas...") + # Validate schemas for common users + common_users = self._get_schemas(template, "common") + required_schemas = self._get_schemas(template, "required") + + sc_list = split_into_lines(self._env.SCHEMAS) + + exc_tbl_list = split_into_lines(self._env.EXCLUDE_TABLES, False) + + exc_tbl_filter = f"and table_name not in ({exc_tbl_list})" if exc_tbl_list.strip() else "" + + _log_file = os.path.join(self._env.TTS_DIR_PATH, f"{self._env.PROJECT_NAME}_schema_validations.log") + for schema in self._env.SCHEMAS.split(','): + Configuration.substitutions = { + 'schema': schema.upper(), + 'exc_tbl_filter': exc_tbl_filter, + 'dry_run': self._env.DRY_RUN.strip().upper(), + } + if schema.upper() in common_users.split(','): + err_msg = f"Schema validation failed: {schema} is a common user. Common users are not allowed to transport." + if self._env.DRY_RUN.strip().upper() == "TRUE": + print(err_msg) + else: + raise ValueError(err_msg) + if not self._sqlplus.run_sql(template.get('validate_schemas'), _log_file): + print(f"Schema {schema.upper()} validations failed. \n") + self._print_log_and_exit(_log_file) + + if os.path.isfile(_log_file) and os.path.getsize(_log_file) > 0: + self._print_log_and_exit(_log_file, 0) + + for schema in required_schemas.split(','): + if schema.upper() in common_users.split(','): + err_msg = f"Schema validation failed: {schema} is a required schema to transport and is a common user. Common users are not allowed to transport. Please update TABLESPACES list in the env." + if self._env.DRY_RUN.strip().upper() == "TRUE": + print(err_msg) + else: + raise ValueError(err_msg) + if schema.upper() not in sc_list: + err_msg = f"Schema validation failed: {schema} is a required schema to transport." + if self._env.DRY_RUN.strip().upper() == "TRUE": + print(err_msg) + else: + raise ValueError(err_msg) + + def _validate_tablespaces(self, template): + """Tablespace validation""" + print("Validating tablespaces...") + ts_array = self._env.TABLESPACES.split(',') + ts_list = split_into_lines(self._env.TABLESPACES) + ts_count = len(ts_array) + + sc_list = split_into_lines(self._env.SCHEMAS) + + exc_tbl_list = split_into_lines(self._env.EXCLUDE_TABLES, False) + + self._validate_tablespace_count(ts_list, ts_count) + + print("Finding plsql objects that are not transported due to owner not in transport list") + self._run_object_validation(template, ts_list, sc_list) + + Configuration.substitutions = { + 'ts_list': ts_list.upper(), + 'final_backup': self._env.FINAL_BACKUP.upper(), + } + + if not self._sqlplus.run_sql(template.get('purge_dba_recyclebin')): + print(f"Validation failed: Found BIN$ objects in one or more tablespaces from tbs list..\n") + print(f"'Please purge dba_recyclebin to avoid move failures at ADBS'") + + for tablespace in ts_array: + tablespace = tablespace.strip().upper() + print(f"Validate if tablespace {tablespace} is ready for transport...") + self._run_tablespace_validation_script(template, tablespace, sc_list, exc_tbl_list) + if os.path.isfile(self.log_file) and os.path.getsize(self.log_file) > 0: + print(f"Tablespace validations failed.") + self._print_log_and_exit(self.log_file, 0) + + if self._env.DB_VERSION == '11g': + ts_list = "'{}'".format(",".join(self._env.TABLESPACES.split(','))) + Configuration.substitutions = { + 'ts_list': ts_list + } + if not self._sqlplus.run_sql(template.get('validate_tablespaces_dbversion_11g'), f"{self.log_file} append"): + print(f"Tablespace validations failed. Please review {self.log_file} for details\n") + self._print_log_and_exit(self.log_file) + if os.path.isfile(self.log_file) and os.path.getsize(self.log_file) > 0: + print(f"Tablespace validations failed.") + self._print_log_and_exit(self.log_file, 0) + + def _run_object_validation(self, template, ts_list, sc_list): + """Run SQL object validation for all tablespaces.""" + Configuration.substitutions = { + 'sc_list': sc_list.upper(), + 'ts_list': ts_list.upper(), + } + + if self._env.DRY_RUN.strip().upper() == "TRUE": + _log_file = os.path.join(self._env.PROJECT_DIR_PATH, f"{self._env.PROJECT_NAME}_LEVEL_{self._env.BACKUP_LEVEL}_dry_run_object_validations.log") + else: + _log_file = os.path.join(self._env.PROJECT_DIR_PATH, f"{self._env.PROJECT_NAME}_LEVEL_{self._env.BACKUP_LEVEL}_object_validations.log") + + if not self._sqlplus.run_sql(template.get('object_validation'), _log_file): + print(f"\n Object validations failed. Please review:") + print(f"{_log_file}\n") + raise RuntimeError(f"Failed to run object validations on tablespaces.") + + print(f"Object validations complete.\n") + + if os.path.isfile(_log_file) and os.path.getsize(_log_file) > 0: + print(f" Please review: {_log_file}\n") + print("This file contains a list of database objects that will NOT be transported.\n") + if self._env.IGNORE_ERRORS and self._env.IGNORE_ERRORS.upper() == 'TRUE': + print("Errors Ignored proceeding...\n") + else: + print("Please run with option --IGNORE_ERRORS=TRUE to ignore errors...\n") + print("IGNORE_ERRORS option not provided, exiting...") + print(f" Review: {_log_file}\n") + if self._env.DRY_RUN.strip().upper() == "FALSE": + sys.exit(1) + + def _run_tablespace_validation_script(self, template, tablespace, sc_list, exc_tbl_list): + """Run SQL validation for a single tablespace.""" + exc_tbl_filter = f"and t.table_name not in ({exc_tbl_list})" if exc_tbl_list.strip() else "" + exc_tbl_filter_seg = f"and c.table_name not in ({exc_tbl_list})" if exc_tbl_list.strip() else "" + exc_tbl_filter_dt = f"and atc.table_name not in ({exc_tbl_list})" if exc_tbl_list.strip() else "" + + Configuration.substitutions = { + 'tablespace': tablespace.upper(), + 'final_backup': self._env.FINAL_BACKUP.upper(), + 'sc_list': sc_list.upper(), + 'exc_tbl_filter': exc_tbl_filter, + 'exc_tbl_filter_seg': exc_tbl_filter_seg, + 'exc_tbl_filter_dt': exc_tbl_filter_dt, + 'dry_run': self._env.DRY_RUN.strip().upper(), + } + + if not self._sqlplus.run_sql(template.get('validate_tablespaces'), self.log_file): + print(f"Tablespace {tablespace} validations failed. \n") + self._print_log_and_exit(self.log_file, 0 if self._env.DRY_RUN.strip().upper() == "TRUE" else 1) + + def _validate_tablespace_count(self, ts_list, ts_count): + """Validate that the number of tablespaces does not exceed limits.""" + print("Validating tablespaces count...") + # check if tablespaces count will exceed 30 on ADWCS + # ADWCS will have 5 necessary tablespaces (SYSTEM, SYSAUX, UNDO, TEMP, DATA) + # ADWCS might have 2 more tablespace (SAMPLESCHEMA, DBFS_DATA) + # ADWCS will create one datafile for each of the transported tablespaces + # Maximum number of tablespaces that can be transported is (30 - 7 = 23) + if self._env.DRCC_REGION.upper() == 'TRUE': + # DRCC regions we allow tablespace limit upto 100 + # 100 - 7 (default) = 93 + ts_limit = 93 + else: + # NON DRCC regions we allow tablespace limit upto 30 + # 30 - 7 (default) = 23 + ts_limit = 23 + if ts_count > ts_limit: + print(f"Tablespaces count validation failed. \n") + print(f"ERROR : Total number of specified tablespaces are : {ts_count}. Max allowed tablespace count of 30 will be exceeded in ADWCS.") + if self._env.DRY_RUN.strip().upper() == "FALSE": + exit(1) + + def _validate_redaction_policies(self, template): + """Redaction Policy validation""" + print("Validating Redaction policies...") + sc_list = split_into_lines(self._env.SCHEMAS) + + Configuration.substitutions = { + 'sc_list': sc_list.upper(), + } + + log_file = os.path.join(self._env.TTS_DIR_PATH, f"{self._env.PROJECT_NAME}_redaction.log") + if not self._sqlplus.run_sql(template.get('validate_redaction_policies'), log_file): + print("Redaction Policies validation failed. \n") + self._print_log_and_exit(log_file) + + with open(log_file, 'r') as log: + redaction_pls = log.read().strip() + + if redaction_pls and self._env.TRANSPORT_TABLES_PROTECTED_BY_REDACTION_POLICIES.upper() == "FALSE": + print("Redaction Policies found in the database. You have to create the redaction policies in ADB-S database. Redacted data will be unprotected in ADB-S database otherwise. \n") + print(f"[ERROR] Please provide consent to transport tables protected by redaction policies by specifying TRANSPORT_TABLES_PROTECTED_BY_REDACTION_POLICIES=TRUE. Redaction policies found are : {redaction_pls}") + if self._env.DRY_RUN.strip().upper() == "FALSE": + exit(1) + return redaction_pls + + def _validate_ols_policies(self, template): + """OLS Policies validation""" + print("Validating OLS policies...") + sc_list = split_into_lines(self._env.SCHEMAS) + + Configuration.substitutions = { + 'sc_list': sc_list.upper(), + } + + log_file = os.path.join(self._env.TTS_DIR_PATH, f"{self._env.PROJECT_NAME}_ols.log") + if not self._sqlplus.run_sql(template.get('validate_ols_policies'), log_file): + print("OLS Policies validation failed. \n") + self._print_log_and_exit(log_file) + + with open(log_file, 'r') as log: + ols_pls = log.read().strip() + + if ols_pls and self._env.TRANSPORT_TABLES_PROTECTED_BY_OLS_POLICIES.upper() == "FALSE": + print("OLS Policies found in the database. You have to create the OLS policies in ADB-S database. Data protected by OLS will be unprotected in ADB-S otherwise.\n") + print(f"[ERROR] Please provide consent to transport tables protected by OLS policies by specifying TRANSPORT_TABLES_PROTECTED_BY_OLS_POLICIES=TRUE. OLS policies found are : {ols_pls}") + if self._env.DRY_RUN.strip().upper() == "FALSE": + exit(1) + + return ols_pls + + def _validate_dvrealm(self, template): + """DVREALM validation""" + print("Checking Database Vault protection...") + + log_file = os.path.join(self._env.TTS_DIR_PATH, f"{self._env.PROJECT_NAME}_dvops.log") + if not self._sqlplus.run_sql(template.get('validate_dvops_protection'), log_file): + print("Database Vault protection check failed. \n") + self._print_log_and_exit(log_file) + + with open(log_file, 'r') as log: + dvops_cnt = log.read().strip() + + if int(dvops_cnt) > 0 and self._env.TRANSPORT_DB_PROTECTED_BY_DATABASE_VAULT.upper() == "FALSE": + print("Database Vault protection is enabled in the database. You have to re-enable database vault protection on the ADB-S database. Transported data will be unprotected otherwise \n") + print("[ERROR] Please provide consent to transport Database Vault protected database by specifying TRANSPORT_DB_PROTECTED_BY_DATABASE_VAULT=TRUE.") + if self._env.DRY_RUN.strip().upper() == "FALSE": + exit(1) + + print("Checking Database Vault realms...") + log_file = os.path.join(self._env.TTS_DIR_PATH, f"{self._env.PROJECT_NAME}_dvrealm.log") + if not self._sqlplus.run_sql(template.get('validate_dvrealm_policies'), log_file): + print("Unable to check Database Vault realms. \n") + self._print_log_and_exit(log_file) + + with open(log_file, 'r') as log: + dvrealm_output = log.read().strip() + dvrealm_array = dvrealm_output.split(',') + + if int(dvrealm_array[0]) > 0 and int(dvrealm_array[1]) > 0 and int(dvrealm_array[2]) > 0: + if not self._env.DVREALM_USER.strip() or not self._env.DVREALM_PASSWORD.strip(): + print("[ERROR] Database Vault is enabled in the database. Please provide inputs for DVREALM_USER and DVREALM_PASSWORD.") + if self._env.DRY_RUN.strip().upper() == "FALSE": + exit(1) + + print("Validating of schemas protected by Database Vault realms...") + sqlplus = SqlPlus( + self._env.DVREALM_USER, + self._env.DVREALM_PASSWORD, + self._env.HOSTNAME, + self._env.LSNR_PORT, + self._env.DB_SVC_NAME, + self._env.ORAHOME + ) + log_file = os.path.join(self._env.TTS_DIR_PATH, f"{self._env.PROJECT_NAME}_dvschemas.log") + if not sqlplus.run_sql(template.get('get_dv_protected_schemas'), log_file, True): + print("Unable to check schemas protected by Database Vault realms \n") + self._print_log_and_exit(log_file) + + with open(log_file, 'r') as log: + dvschemas_output = log.read().strip() + + print(dvschemas_output) + if int(dvschemas_output) == 0: + print("[ERROR]Please authorize sys as an Data Pump user to transport the Database Vault protected schemas and retry the export\n") + if self._env.DRY_RUN.strip().upper() == "FALSE": + exit(1) + + def _print_log_and_exit(self, _log_file, _exit=1): + """Print log contents and exit.""" + with open(_log_file, 'r') as log: + print(log.read()) + if _exit == 1: + exit(1) + + +class TTS_SRC_CHECK_STORAGE_BUCKETS: + """ + Class to check if storage bucket exists + """ + def __init__(self, env): + self._env = env + self.tts_src_check_storage_buckets() + + def _validate_bucket(self, url): + """ + Helper function to check if a storage bucket exists. + """ + # Parse the URL + parsed_url = urlparse(url) + # Extract the region from the netloc (example: objectstorage.us-ashburn-1.oraclecloud) + region = parsed_url.netloc.split('.')[1] + # Extract the namespace from the path (after "/n/") + namespace = parsed_url.path.split('/')[2] + # Extract the bucket name from the path (after "/b/") + bucket_name = parsed_url.path.split('/')[4] + + config = oci.config.from_file(self._env.CONFIG_FILE, "DEFAULT") + config['region'] = region + try: + print(f"Validating storage bucket at URL: {url}...") + + object_storage_client = oci.object_storage.ObjectStorageClient(config=config, + service_endpoint=url.split('/n')[0]) + if self._env.OCI_PROXY_HOST and self._env.OCI_PROXY_PORT: + proxy_url = f"{self._env.OCI_PROXY_HOST}:{self._env.OCI_PROXY_PORT}" + object_storage_client.base_client.session.proxies = {'https': proxy_url} + + response = object_storage_client.get_bucket(namespace_name=namespace, bucket_name=bucket_name) + if response.status != 200: + raise ValueError(f"Failed to validate URI {url}. HTTP Status Code: {response.status} \n") + print(f"Successfully validated URI {url}.") + except Exception as e: + print(f"Error occurred while checking the bucket {url}: {str(e)}\n") + print("Check if the storage bucket exists and credentials are correct. \n") + sys.exit(1) + + def tts_src_check_storage_buckets(self): + """Function to check both backup and bundle storage buckets.""" + print("** Checking backup storage bucket... **") + self._validate_bucket(self._env.TTS_BACKUP_URL) + + print("** Checking bundle storage bucket... **") + self._validate_bucket(self._env.TTS_BUNDLE_URL) + + +class TTS_SRC_CREATE_WALLET: + """ + Class to check if storage bucket exists + """ + def __init__(self, env): + self._env = env + + def tts_src_create_backup_wallet(self): + # Check if the wallet file already exists + wallet_file_path = os.path.join(self._env.TTS_DIR_PATH, 'cwallet.sso') + if os.path.isfile(wallet_file_path): + print("Wallet file already exists in project directory. \n") + print("Please choose a different project name. \n") + exit(1) + + # Set JAVA_HOME and update PATH + os.environ['JAVA_HOME'] = os.path.join(self._env.ORAHOME, 'jdk') + os.environ['PATH'] = f"{os.environ['JAVA_HOME']}/bin:{os.environ['PATH']}" + + java_path = os.path.join(self._env.ORAHOME, 'jdk', 'bin', 'java') + # First attempt to run the Java command + if not self.run_java_oci_installer(java_path): + # If the first attempt fails, run with user provided jdk8 path and try again + if self._env.JDK8_PATH: + print("Running with provided JDK8_PATH...") + jdk8_path = self._env.JDK8_PATH + else: + print("\n Please run with option --JDK8_PATH=/path-to-jdk8 to retry with jdk8...\n") + print("JDK8_PATH option not provided, exiting...") + sys.exit(1) + + java_path = os.path.join(jdk8_path, 'bin', 'java') + + # Retry the Java command with JDK 8 + if not self.run_java_oci_installer(java_path): + print("Failed to install Oracle Database Cloud Backup Module using JDK 8 also. \n") + exit(1) + + print("Oracle Database Cloud Backup Module installed successfully. \n") + + oci_config_file = os.path.join(self._env.ORAHOME, 'dbs', f'opc{os.environ["ORACLE_SID"]}.ora') + try: + with open(oci_config_file, 'r') as file: + for line in file: + if "OPC_WALLET" in line: + self._env.TTS_WALLET_CRED_ALIAS = line.split("CREDENTIAL_ALIAS=")[1].strip().strip("'") + return self._env.TTS_WALLET_CRED_ALIAS + except Exception as e: + print(f"Error reading OPC config file: {e} \n") + return None + + def run_java_oci_installer(self, java_path): + command = [ + java_path, '-jar', self._env.OCI_INSTALLER_PATH, + '-host', self._env.TTS_BACKUP_URL.split('/n')[0], + '-pvtKeyFile', self._env.KEY_FILE, + '-pubFingerPrint', self._env.FINGERPRINT, + '-tOCID', self._env.TENANCY, + '-uOCID', self._env.USER, + '-cOCID', self._env.COMPARTMENT_OCID, + '-bucket', self._env.TTS_BACKUP_URL.split('/')[-1], + '-walletDir', self._env.TTS_DIR_PATH, + '-libDir', self._env.PROJECT_DIR_PATH, + '-import-all-trustcerts' + ] + if self._env.OCI_PROXY_HOST and self._env.OCI_PROXY_PORT: + command.extend(['-proxyHost', self._env.OCI_PROXY_HOST]) + command.extend(['-proxyPort', self._env.OCI_PROXY_PORT]) + + try: + process = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + bufsize=1 + ) + # Stream output line by line in real time + for line in process.stdout: + print(line.strip()) + + process.wait() + + if process.returncode != 0: + print(f"Failed to install Oracle Database Cloud Backup Module\n") + return False + print("OCI Backup Module installed successfully.") + return True + except Exception as e: + print(f"An error occurred while running the OCI installer: {e}") + return False + +class TTS_SRC_GATHER_DATA: + """ + Class to gather source database information + """ + def __init__(self, env): + self._env = env + + def tts_src_gather_data(self, template): + """Function to gather database properties and store them in the db_props_array""" + try: + sqlplus = SqlPlus( + self._env.DBUSER, + self._env.DBPASSWORD, + self._env.HOSTNAME, + self._env.LSNR_PORT, + self._env.DB_SVC_NAME, + self._env.ORAHOME + ) + + if self._env.DB_VERSION == '11g': + l_pdb_table = "database" + l_pdb_type = "dbid" + l_version_type = "version" + l_common_clause = "" + else: + l_pdb_table = "pdbs" + l_pdb_type = "guid" + l_version_type = "version_full" + l_common_clause = "where common='NO'" + + ts_list = split_into_lines(self._env.TABLESPACES) + sc_list = split_into_lines(self._env.SCHEMAS) + exc_tbl_list = split_into_lines(self._env.EXCLUDE_TABLES, False) + exc_tbl_filter = f"and t.table_name not in ({exc_tbl_list})" if exc_tbl_list.strip() else "" + + Configuration.substitutions = { + 'l_pdb_table': l_pdb_table, + 'l_pdb_type': l_pdb_type, + 'l_version_type': l_version_type, + 'ts_list': ts_list.upper(), + 'sc_list': sc_list.upper(), + 'database_name': self._env.DATABASE_NAME, + 'exc_tbl_filter': exc_tbl_filter, + 'l_common_clause': l_common_clause, + } + + log_file = os.path.join(self._env.TTS_DIR_PATH, f"{self._env.PROJECT_NAME}_data.log") + print(f"Executing SQL script to gather data into log file: {log_file}...") + + if not sqlplus.run_sql(template.get('tts_src_gather_data'), log_file): + print("Data gathering failed. Check the log file for more details. \n") + with open(log_file, 'r') as log: + print(log.read()) + return None + + # Read log and store output into an array + with open(log_file, 'r') as log: + db_properties = log.read().strip() + + # Split the output into an array + db_props_array = [prop.replace('\n', '').strip() for prop in db_properties.split(',')] + print("Data gathering completed successfully.") + return db_props_array + + except Exception as e: + print(f"EXCEPTION : An error occurred during data gathering: {str(e)} \n") + return None + + +class TTS_SRC_DIRECTORY_MANAGER: + """ + Class to create or drop a directory object in the database. + """ + def __init__(self, env, template): + self._env = env + self.tts_src_create_directory(template) + + def tts_src_create_directory(self, template): + """Function to create directory object in the database""" + sqlplus = SqlPlus(self._env.DBUSER, self._env.DBPASSWORD, + self._env.HOSTNAME, self._env.LSNR_PORT, + self._env.DB_SVC_NAME, self._env.ORAHOME) + + Configuration.substitutions = { + 'tts_dir_name': self._env.TTS_DIR_NAME, + 'tts_dir_path': self._env.TTS_DIR_PATH, + 'db_user': self._env.DBUSER, + } + + log_file = os.path.join(self._env.TTS_DIR_PATH, 'create_dir.log') + if not sqlplus.run_sql(template.get('tts_src_create_directory'), log_file): + print(f"Failed to create directory {self._env.TTS_DIR_NAME} \n") + self._log_error(log_file) + raise RuntimeError(f"Directory creation failed: {self._env.TTS_DIR_NAME}\n") + + print(f"Directory {self._env.TTS_DIR_NAME} created successfully.") + + def tts_src_drop_directory(self, template): + """Function to drop directory object in the database""" + sqlplus = SqlPlus(self._env.DBUSER, self._env.DBPASSWORD, + self._env.HOSTNAME, self._env.LSNR_PORT, + self._env.DB_SVC_NAME, self._env.ORAHOME) + + Configuration.substitutions = { + 'tts_dir_name': self._env.TTS_DIR_NAME, + } + if not sqlplus.run_sql(template.get('tts_src_drop_directory')): + print(f"Failed to drop directory {self._env.TTS_DIR_NAME} \n") + raise RuntimeError(f"Directory drop failed: {self._env.TTS_DIR_NAME}\n") + print(f"Directory {self._env.TTS_DIR_NAME} dropped successfully.") + + def _log_error(self, log_file): + """Helper function to log the error details from the log file.""" + if os.path.exists(log_file): + with open(log_file, 'r') as log: + error_details = log.read() + print(f"Error details:\n{error_details} \n") + else: + print(f"Log file {log_file} does not exist. \n") + + +class TTS_SRC_TDE_KEY_EXPORTER: + """ + A class to export Transparent Data Encryption (TDE) keys. + """ + def __init__(self, env, template): + self._env = env + self.tts_src_export_tde_keys(template) + + def tts_src_export_tde_keys(self, template): + """Export TDE keys to the specified path.""" + # Check if TDE_WALLET_STORE_PASSWORD is empty + if not self._env.TDE_WALLET_STORE_PASSWORD.strip(): + print("[ERROR] Please provide input for TDE_WALLET_STORE_PASSWORD. Encrypted tablespaces exists.") + self._env.usage() + exit(1) + + tde_keys_path = os.path.join(self._env.TTS_DIR_PATH, self._env.TDE_KEYS_FILE) + + if os.path.exists(tde_keys_path): + os.remove(tde_keys_path) + + sqlplus = SqlPlus(self._env.DBUSER, self._env.DBPASSWORD, + self._env.HOSTNAME, self._env.LSNR_PORT, + self._env.DB_SVC_NAME, self._env.ORAHOME) + + Configuration.substitutions = { + 'secret_code': self._env.DB_PROPS_ARRAY[0], + 'tde_keys_path': tde_keys_path, + 'tde_wallet_password': self._env.TDE_WALLET_STORE_PASSWORD + } + + try: + if not sqlplus.run_sql(template.get('tts_src_export_tde_keys')) or not os.path.exists(tde_keys_path): + print("Export TDE Keys failed. \n") + raise RuntimeError("TDE key export command failed.") + print(f"TDE keys exported successfully to {tde_keys_path}.") + except Exception as e: + print(f"An error occurred while exporting TDE keys: {e}. \n") + exit(1) + + def tts_src_export_tde_current_key(self, template): + """Export the current TDE key.""" + tde_keys_path = os.path.join(self._env.TTS_DIR_PATH, self._env.TDE_KEYS_FILE) + + if os.path.exists(tde_keys_path): + os.remove(tde_keys_path) + + sqlplus = SqlPlus(self._env.DBUSER, self._env.DBPASSWORD, + self._env.HOSTNAME, self._env.LSNR_PORT, + self._env.DB_SVC_NAME, self._env.ORAHOME) + + Configuration.substitutions = { + 'secret_code': self._env.DB_PROPS_ARRAY[0], + 'tde_keys_path': tde_keys_path, + 'tde_wallet_password': self._env.TDE_WALLET_STORE_PASSWORD, + 'dst_name': self._env.DB_PROPS_ARRAY[1] + } + + try: + if not sqlplus.run_sql(template.get('tts_src_export_tde_current_keys')): + print("Export TDE Current Key failed. \n") + raise RuntimeError("Current TDE key export command failed.") + print(f"Current TDE key exported successfully to {tde_keys_path}.") + except Exception as e: + print(f"An error occurred while exporting the current TDE key: {e} \n") + exit(1) + + +class TTS_SRC_WALLET_COPIER: + """ + A class to copy Oracle wallet files to specified hosts. + """ + def __init__(self, env): + self._env = env + self.hosts = self.parse_hosts(self._env.DB_PROPS_ARRAY[13]) + self.tts_src_copy_wallet() + + def parse_hosts(self, host_string): + """Parse host string into a list of hostnames.""" + print(f"Parsing host string: {host_string}.") + return [host.strip() for host in host_string.split(';') if host.strip()] + + def tts_src_copy_wallet(self): + """Copy the wallet to the specified hosts.""" + current_host = socket.gethostname() + print(f"Copy wallet into the host list : {self.hosts}.") + + for host in self.hosts: + host_name = host.split(':')[0] # Extract hostname + if current_host == host_name: + print(f"Skipping wallet copy to current host: {current_host}.") + continue + + if self._env.USE_ALL_RAC_INSTANCES.upper() == 'TRUE': + self.copy_to_host(host_name) + + def copy_to_host(self, host_name): + """Copy wallet to the specified host using SSH and SCP.""" + try: + # Create the remote directory + cmd_mkdir = f"ssh -oStrictHostKeyChecking=no {host_name} 'mkdir -p {self._env.TTS_DIR_PATH}'" + print(f"Creating remote directory on {host_name}...") + subprocess.run(cmd_mkdir, shell=True, check=True) + + # Use SCP to copy the wallet file to other instance + local_wallet_path = os.path.join(self._env.TTS_DIR_PATH, "cwallet.sso") + remote_wallet_path = os.path.join(self._env.TTS_DIR_PATH, "cwallet.sso") + cmd_scp = f"scp -oStrictHostKeyChecking=no {local_wallet_path} {host_name}:{remote_wallet_path}" + print(f"Copying {local_wallet_path} to {host_name}:{remote_wallet_path}...") + subprocess.run(cmd_scp, shell=True, check=True) + print(f"Successfully copied wallet to {host_name}.") + + # Use SCP to copy the libopc.so file to other instance + local_lipopc_path = os.path.join(self._env.PROJECT_DIR_PATH, "libopc.so") + remote_lipopc_path = os.path.join(self._env.PROJECT_DIR_PATH, "libopc.so") + cmd_scp = f"scp -oStrictHostKeyChecking=no {local_lipopc_path} {host_name}:{remote_lipopc_path}" + print(f"Copying {local_lipopc_path} to {host_name}:{remote_lipopc_path}...") + subprocess.run(cmd_scp, shell=True, check=True) + print(f"Successfully copied libopc.so to {host_name}.") + except subprocess.CalledProcessError as e: + print(f"Failed to copy wallet to {host_name}: {e} \n") + except Exception as e: + print(f"An unexpected error occurred while copying wallet to {host_name}: {e} \n") + + +class TTS_SRC_RMAN_BACKUP: + """ + Class to perform RMAN Backup, export schema and tablespaces, and create manifest + """ + def __init__(self, env): + self._env = env + self.SCNS_ARRAY = [] + self.host_array = [] + + def get_cpu_count(self): + """Get the number of processors available.""" + try: + process = subprocess.run("nproc", shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + cpu_count = int(process.stdout.decode().strip()) + print(f"CPU Count: {cpu_count}") + return cpu_count + except subprocess.CalledProcessError as e: + print(f"Failed to get CPU count: {e.stderr.decode()}\n") + return 1 # Fallback to 1 CPU if unable to determine + + def _execute_command(self, command, command_type): + """Executes the given shell command and handles errors.""" + try: + process = subprocess.Popen(command, shell=True, stdout=sys.stdout, stderr=sys.stderr, universal_newlines=True) + stdout, stderr = process.communicate() + + if process.returncode == 0 or process.returncode == 5: + print(f"{command_type} executed successfully.") + return 0 + else: + print(f"{command_type} failed with return code {process.returncode} \n") + return 1 + except Exception as e: + print(f"{command_type} execution encountered an error: {str(e)} \n") + return 1 + + def _append_log_to_backup(self, log_filename): + """Appends the specified log file to backup.log.""" + log_path = os.path.join(self._env.TTS_DIR_PATH, log_filename) + backup_log_path = os.path.join(self._env.TTS_DIR_PATH, 'backup.log') + + if os.path.exists(log_path): + with open(log_path, 'r') as log_file: + log_content = log_file.read() + with open(backup_log_path, 'a') as backup_file: + backup_file.write(log_content) + + def tts_src_get_scn(self, template): + """Get the scn for the next backup""" + ts_list = split_into_lines(self._env.TABLESPACES) + + sqlplus = SqlPlus(self._env.DBUSER, self._env.DBPASSWORD, + self._env.HOSTNAME, self._env.LSNR_PORT, + self._env.DB_SVC_NAME, self._env.ORAHOME) + Configuration.substitutions = { + 'ts_list': ts_list.upper(), + 'incr_scn': self._env.INCR_SCN, + } + + log_file = os.path.join(self._env.TTS_DIR_PATH, f"{self._env.PROJECT_NAME}_scn.log") + if not sqlplus.run_sql(template.get('tts_src_get_scn'), log_file): + print("SCN gathering failed. \n") + with open(log_file, 'r') as log: + print(log.read()) + exit(1) + + # Read log and store output into an array + with open(log_file, 'r') as log: + scn_output = log.read().strip() + + # Split the output into an array + self.SCNS_ARRAY = scn_output.split(',') + print(f"SCNs gathered: {self.SCNS_ARRAY}.") + + return True + + def tts_src_get_channel(self): + """Construct the Channel string""" + self.host_array = self._env.DB_PROPS_ARRAY[13].split(';') + self._env.CPU_COUNT = self.calculate_cpu_count() + + if not self.host_array or self._env.CPU_COUNT <= 0: + print("No instances or CPUs to construct channel string. Exiting...") + return False + + rman_parms = "" + if self._env.STORAGE_TYPE == "FSS": + rman_parms = f""" + parms='SBT_LIBRARY=oracle.disksbt, + ENV=(BACKUP_DIR={self._env.TTS_FSS_MOUNT_DIR}/{self._env.PROJECT_NAME}/datafile)'; + """ + else: + rman_parms = f""" + parms='SBT_LIBRARY={self._env.PROJECT_DIR_PATH}/libopc.so, + ENV=(OPC_WALLET="LOCATION=file:{self._env.TTS_DIR_PATH} CREDENTIAL_ALIAS={self._env.TTS_WALLET_CRED_ALIAS}", + OPC_HOST={self._env.TTS_BACKUP_URL.split('/b/')[0]}, + OPC_CONTAINER={self._env.TTS_BACKUP_URL.split('/b/')[1]}, + OPC_AUTH_SCHEME=BMC, + OPC_CHUNK_SIZE=524288000, + _OPC_BUFFER_WRITE=TRUE, + _OPC_BUFFER_READ=TRUE, + _OPC_TAG_METERING=FALSE)'; + """ + + count = 0 + current_host = socket.gethostname() + for c in range(1, self._env.CPU_COUNT + 1): + for ele in self.host_array: + if count >= self._env.MAX_CHANNELS: + break + host_name, instance_name = ele.split(':') + # Skip allocating channel in other racn instances + # if USE_ALL_RAC_INSTANCES is set to FALSE + if self._env.USE_ALL_RAC_INSTANCES.upper() == 'FALSE': + if host_name != current_host: + continue + + count += 1 + + if self._env.DB_VERSION == '11g': + l_conn_str = f"{host_name}:{self._env.LSNR_PORT}/{self._env.DB_SVC_NAME}" + else: + l_conn_str = f"{host_name}:{self._env.LSNR_PORT}/{self._env.DB_SVC_NAME} as sysdba" + + self._env.CHANNEL_STRING += f""" + allocate channel c_{instance_name}_{c} device type sbt + connect '{self._env.DBUSER}/{self._env.DBPASSWORD}@{l_conn_str}' + {rman_parms} + """ + + print(f"Channel string constructed with {count} channels.") + return True + + def calculate_cpu_count(self): + """Calculate the CPU count based on the environment configuration.""" + if self._env.PARALLELISM == 0: + cpu_count = self._env.CPU_COUNT + if cpu_count <= 0: + cpu_count = self.get_cpu_count() + # We use 25% of cpu processors with a cap of 4 + cpu_count = (cpu_count + 4 - 1) // 4 + return min(cpu_count, 4) + else: + # We adjust parallelism = instances * cpu_count + if self._env.USE_ALL_RAC_INSTANCES.upper() == 'FALSE': + return self._env.PARALLELISM + else: + return (self._env.PARALLELISM + len(self.host_array) - 1) // len(self.host_array) + + def check_rman_log_for_errors(self, log_file): + """Checks the RMAN log file for errors and raises an exception if any are found.""" + try: + with open(log_file, 'r') as log: + log_contents = log.read() + + for line in log_contents.splitlines(): + if "RMAN-" in line and "WARNING" not in line: + print(f"RMAN errors found. {line}") + print(f"Please check logs at {log_file}.") + return False + return True + except Exception as e: + print(f"Error reading RMAN log file {log_file}: {str(e)}") + return False + + def tts_src_backup_tablespaces(self, backup_type, template): + """Perform RMAN BACKUP""" + if not self.host_array: + print("No host array available for backup. \n") + return False + + ele = self.host_array[0] + host_name, instance_name = ele.split(':') + compressed = "" + allow_inconsistent = "" + tablespace_dump_clause = "" + backup_string = "" + encryption_on_clause = "set encryption on;" + backup_set_transport = "" + encryption_off_clause = "" + tablespace_clause = self._env.encrypted_tablespaces + + if backup_type == "unencrypted": + encryption_on_clause = f'set encryption on identified by "{self._env.DB_PROPS_ARRAY[0]}" only;' + tablespace_clause = self._env.unencrypted_tablespaces + + if self._env.FINAL_BACKUP.upper() == "TRUE": + if self._env.DB_VERSION != '11g': + tablespace_dump_clause = ( + f"datapump format '{self._env.PROJECT_NAME}_DATAPUMP_%d_%U' " + f"dump file 'tablespace.dmp' " + f"destination '{self._env.TTS_DIR_PATH}'" + ) + else: + compressed = "compressed" + if self._env.DB_VERSION != '11g': + allow_inconsistent = "allow inconsistent" + + if self._env.DB_VERSION != '11g': + backup_set_transport = "for transport " + + # TODO + # Generate dynamic section size based on parallelism + # section_size = size_of_largest_tbs // parallelism + # It generates # of backuppieces = parallelism + # Round off to next hundred + # retrieve the largest tbs size (datafile size) + # largest_tbs_size = max(float(self._env.DB_PROPS_ARRAY[19]),float(self._env.DB_PROPS_ARRAY[20])) + # calculate section size + # section_size = largest_tbs_size // self._env.PARALLELISM + # round off the section_size to next 100 + # section_size = math.ceil(section_size / 100) * 100 + # 200 <= section_size <= 500 + # section_size = max(section_size, 200) + # section_size = min(section_size, 500) + # section_size = int(section_size) + + # Use level backup for 11G database to utilise section size + if self._env.DB_VERSION == '11g': + if self._env.BACKUP_LEVEL == 0: + incremental_clause = "incremental level 0" + else: + incremental_clause = "incremental level 1" + else: + incremental_clause = f"incremental from scn {self._env.INCR_SCN}" + + backup_string += ( + f"backup as {compressed} backupset {backup_set_transport}" + f"{allow_inconsistent} {incremental_clause} " + f"section size 200G " + f"tablespace {tablespace_clause.upper()} " + f"format '{self._env.PROJECT_NAME}_%d_%U' " + f"{tablespace_dump_clause};" + ) + + if self._env.DB_VERSION == '11g': + l_conn_str = f"{host_name}:{self._env.LSNR_PORT}/{self._env.DB_SVC_NAME}" + else: + l_conn_str = f"{host_name}:{self._env.LSNR_PORT}/{self._env.DB_SVC_NAME} as sysdba" + + if self._env.STORAGE_TYPE == "FSS": + encryption_off_clause = "set encryption off;" + encryption_on_clause = "" + + Configuration.substitutions = { + 'oracle_home': self._env.ORAHOME, + 'tts_dir_name': self._env.TTS_DIR_PATH, + 'backup_type': backup_type, + 'db_user': self._env.DBUSER, + 'db_password': self._env.DBPASSWORD, + 'l_conn_str': l_conn_str, + 'project_name': self._env.PROJECT_NAME, + 'encryption_on_clause': encryption_on_clause, + 'encryption_off_clause': encryption_off_clause, + 'backup_string': backup_string, + 'channel_string': self._env.CHANNEL_STRING, + } + + return_val = self._execute_command(template.get('tts_src_backup_tablespaces'), "RMAN") + + # Check the log file for errors after execution + log_file = f"{self._env.TTS_DIR_PATH}/backup_{backup_type}.log" + + if not self.check_rman_log_for_errors(log_file): + print(f"Error found in RMAN log file: {log_file}. Backup failed.") + return 1 + + if backup_type == "encrypted": + self._env.RMAN_LOGFILES.append("backup_encrypted.log") + + if backup_type == "unencrypted": + self._env.RMAN_LOGFILES.append("backup_unencrypted.log") + + return return_val + + def tts_src_export_schema(self, template): + """Export schemas""" + schema_dump_path = os.path.join(self._env.TTS_DIR_PATH, 'schema.dmp') + if os.path.exists(schema_dump_path): + os.remove(schema_dump_path) + + l_conn_str = f"{self._env.HOSTNAME}:{self._env.LSNR_PORT}/{self._env.DB_SVC_NAME} AS SYSDBA" + + Configuration.substitutions = { + 'oracle_home': self._env.ORAHOME, + 'db_user': self._env.DBUSER, + 'db_password': self._env.DBPASSWORD, + 'l_conn_str': l_conn_str, + 'schemas': self._env.SCHEMAS.upper(), + 'tts_dir_name': self._env.TTS_DIR_NAME, + } + expdp_command = ' '.join(line.strip() for line in template.get("tts_src_export_schema").splitlines() if line.strip()) + + return_val = self._execute_command(expdp_command, "EXPDP") + self._append_log_to_backup('export.log') + return return_val + + + def tts_src_export_tablespaces(self, template, _validate=False): + """Export tablespaces""" + if _validate: + tablespace_dump_path = os.path.join(self._env.TTS_DIR_PATH, 'validate_tablespace.dmp') + else: + tablespace_dump_path = os.path.join(self._env.TTS_DIR_PATH, 'tablespace.dmp') + if os.path.exists(tablespace_dump_path): + os.remove(tablespace_dump_path) + + l_conn_str = f"{self._env.HOSTNAME}:{self._env.LSNR_PORT}/{self._env.DB_SVC_NAME} AS SYSDBA" + + exclude_table_list = [tbl.strip() for tbl in self._env.EXCLUDE_TABLES.split(',') if tbl.strip()] + quoted_tables = ",".join(f"\\\'{tbl}\\\'" for tbl in exclude_table_list) + + xml_table_exclude_clause = "" + if quoted_tables: + xml_table_exclude_clause = f'TABLE:\\\"IN \({quoted_tables}\)\\\"' + + exclude_clause = "" + if self._env.EXCLUDE_STATISTICS.strip().upper() == "TRUE": + if xml_table_exclude_clause: + exclude_clause = f'EXCLUDE=STATISTICS,INDEX_STATISTICS,TABLE_STATISTICS,{xml_table_exclude_clause}' + else: + exclude_clause = 'EXCLUDE=STATISTICS,INDEX_STATISTICS,TABLE_STATISTICS' + elif xml_table_exclude_clause: + exclude_clause = f'EXCLUDE={xml_table_exclude_clause}' + + expdp_log_file = 'validate_export_tablespace.log' if _validate else 'export_tablespace.log' + Configuration.substitutions = { + 'oracle_home': self._env.ORAHOME, + 'db_user': self._env.DBUSER, + 'db_password': self._env.DBPASSWORD, + 'l_conn_str': l_conn_str, + 'tablespaces': self._env.TABLESPACES.upper(), + 'tts_dir_name': self._env.TTS_DIR_NAME, + 'exclude_clause': exclude_clause, + 'dump_file': 'validate_tablespace.dmp' if _validate else 'tablespace.dmp', + 'expdp_log_file': expdp_log_file, + 'tts_closure_check': 'TTS_CLOSURE_CHECK=TEST_MODE' if _validate else '', + } + + expdp_command = ' '.join(line.strip() for line in template.get("tts_src_export_tablespaces").splitlines() if line.strip()) + print(expdp_command) + return_val = self._execute_command(expdp_command, "EXPDP") + self._append_log_to_backup(expdp_log_file) + return return_val + + def tts_src_create_manifest(self): + """Create manifest""" + ts_list = self._env.TABLESPACES.split(',') + schema_list = self._env.SCHEMAS.split(',') + + bf_ts_list = self._env.bigfile_tablespaces.split(',') + sf_ts_list = self._env.smallfile_tablespaces.split(',') + en_ts_list = self._env.encrypted_tablespaces.split(',') + ue_ts_list = self._env.unencrypted_tablespaces.split(',') + redaction_list = self._env.redaction_policies_list.split(',') + ols_list = self._env.ols_policies_list.split(',') + role_list = self._env.role_list.split(',') + mview_schemas = self._env.mview_schemas.split(',') + sched_cred_list = self._env.sched_cred_list.split(',') + + exclude_table_list = self._env.EXCLUDE_TABLES.split(',') + + new_backup_level = self._env.BACKUP_LEVEL + 1 + + if self._env.TBS_READ_ONLY == "true": + next_scn = self.SCNS_ARRAY[1] + else: + next_scn = self.SCNS_ARRAY[0] + + manifest_data = { + "epic_name": self._env.PROJECT_NAME, + "pdbname": self._env.DATABASE_NAME, + "pdb_guid": self._env.DB_PROPS_ARRAY[0], + "schemas": [sc.upper() for sc in schema_list], + "tablespaces": [ts.upper() for ts in ts_list], + "tde_keys_file": self._env.TDE_KEYS_FILE, + "uri": self._env.TTS_BACKUP_URL if self._env.STORAGE_TYPE == "OBJECT_STORAGE" else None, + "backupdir": self._env.TTS_FSS_MOUNT_DIR if self._env.STORAGE_TYPE == "FSS" else None, + "cred_alias": self._env.TTS_WALLET_CRED_ALIAS, + "dst_version": int(self._env.DB_PROPS_ARRAY[1]), + "platform": self._env.DB_PROPS_ARRAY[2], + "platform_id": int(self._env.DB_PROPS_ARRAY[3]), + "incr_scn": str(next_scn), + "nls_charset": self._env.DB_PROPS_ARRAY[5], + "nls_ncharset": self._env.DB_PROPS_ARRAY[6], + "db_edition": self._env.DB_PROPS_ARRAY[7], + "db_version": self._env.DB_PROPS_ARRAY[8], + "db_version_full": self._env.DB_PROPS_ARRAY[9], + "table_with_xml_type": self._env.DB_PROPS_ARRAY[10], + "dbtimezone": self._env.DB_PROPS_ARRAY[11], + "storage_size": self._env.DB_PROPS_ARRAY[12], + "sf_additional_size": self._env.DB_PROPS_ARRAY[19], + "bf_additional_size": self._env.DB_PROPS_ARRAY[20], + "backup_log": [], + "restore_log": {}, + "parallelism": self._env.PARALLELISM, + "tbs_read_only": self._env.TBS_READ_ONLY, + "final_backup": self._env.FINAL_BACKUP.upper(), + "backup_level": self._env.BACKUP_LEVEL, + "backup_logfile": self._env.RMAN_LOGFILES, + "bigfile_tablespaces": [ts.upper() for ts in bf_ts_list], + "smallfile_tablespaces": [ts.upper() for ts in sf_ts_list], + "encrypted_tablespaces": [ts.upper() for ts in en_ts_list], + "unencrypted_tablespaces": [ts.upper() for ts in ue_ts_list], + "role_list": [rl for rl in role_list], + "exclude_tables_list": [tbl for tbl in exclude_table_list], + "mview_schemas": [mvs for mvs in mview_schemas], + "sched_cred_list": [cred for cred in sched_cred_list], + "src_compatible": self._env.DB_PROPS_ARRAY[24], + "redaction_policies": [ps.upper() for ps in redaction_list], + "ols_policies": [ps.upper() for ps in ols_list] + } + + manifest_path = os.path.join(self._env.TTS_DIR_PATH, 'manifest.log') + with open(manifest_path, 'w') as manifest_file: + json.dump(manifest_data, manifest_file, indent=2) + + print("Manifest creation successful.") + + # Write project file + project_data = { + "project_name": self._env.PROJECT_NAME, + "backup_level": new_backup_level, + "incr_scn": next_scn + } + + project_file_path = self._env.TTS_PROJECT_FILE + with open(project_file_path, 'w') as project_file: + json.dump(project_data, project_file, indent=2) + + print(f"Updated {self._env.TTS_PROJECT_FILE} Successful.") + + + +class TTS_SRC_BUNDLE_MANAGER: + """ + A class to manage creating and uploading bundles for transport. + """ + def __init__(self, env): + self._env = env + self.tts_src_create_bundle() + + def tts_src_create_bundle(self): + """Create a bundle file for transport.""" + current_dir = os.getcwd() + print(f"Creating bundle file: {self._env.TTS_BUNDLE_FILE}.") + + # Change to project directory + os.chdir(self._env.PROJECT_DIR_PATH) + bundle_dir = os.path.basename(self._env.TTS_DIR_PATH) + + schema_dump = "" + tablespace_dump = "" + + # Determine if final backup is requested + if self._env.FINAL_BACKUP.upper() == "TRUE": + tablespace_dump = os.path.join(bundle_dir, "tablespace.dmp") + schema_dump = os.path.join(bundle_dir, "schema.dmp") + + tde_path = "" + + # Check if there are encrypted tablespaces + if len([ts for ts in self._env.encrypted_tablespaces.split(',') if ts.strip()]) > 0: + tde_path = os.path.join(bundle_dir, self._env.TDE_KEYS_FILE) + + # Create the tar.gz bundle + try: + with tarfile.open(self._env.TTS_BUNDLE_FILE, "w:gz") as tar: + tar.add(os.path.join(bundle_dir, "manifest.log"), arcname=f"{os.path.basename(bundle_dir)}/manifest.log") + for log_file in os.listdir(bundle_dir): + if "encrypted.log" in log_file: + tar.add(os.path.join(bundle_dir, log_file), arcname=f"{os.path.basename(bundle_dir)}/{log_file}") + if self._env.STORAGE_TYPE == "OBJECT_STORAGE": + tar.add(os.path.join(bundle_dir, "cwallet.sso"), arcname=f"{os.path.basename(bundle_dir)}/cwallet.sso") + if schema_dump: + tar.add(schema_dump, arcname=f"{os.path.basename(bundle_dir)}/schema.dmp") + if tablespace_dump: + tar.add(tablespace_dump, arcname=f"{os.path.basename(bundle_dir)}/tablespace.dmp") + if tde_path: + tar.add(tde_path, arcname=f"{os.path.basename(bundle_dir)}/{self._env.TDE_KEYS_FILE}") + print(f"Bundle created successfully: {self._env.TTS_BUNDLE_FILE}.") + except Exception as e: + print(f"An error occurred while creating the bundle: {e} \n") + exit(1) + + + # Clean up the bundle directory + try: + os.system(f"rm -rf {bundle_dir}") + print(f"Cleaned up the bundle directory: {bundle_dir}.") + except Exception as e: + print(f"Failed to clean up the bundle directory: {e} \n") + + # Change back to the original working directory + os.chdir(current_dir) + + # Cleanup libopc.so downloaded by OCI Installer + if self._env.STORAGE_TYPE == "OBJECT_STORAGE": + os.remove(os.path.join(self._env.PROJECT_DIR_PATH, 'libopc.so')) + + # Remove remote directory created on other rac instances + self.hosts = [host.strip() for host in self._env.DB_PROPS_ARRAY[13].split(';') if host.strip()] + current_host = socket.gethostname() + + for host in self.hosts: + host_name = host.split(':')[0] + if current_host == host_name: + continue + if self._env.USE_ALL_RAC_INSTANCES.upper() == 'TRUE': + rm_tts_dir = f"ssh -oStrictHostKeyChecking=no {host_name} 'rm -rf {self._env.PROJECT_DIR_PATH}'" + print(f"Removing remote directory on {host_name}...") + subprocess.run(rm_tts_dir, shell=True, check=True) + + def tts_src_upload_bundle(self): + """Upload the created bundle to object storage or fss.""" + try: + if self._env.STORAGE_TYPE == "OBJECT_STORAGE": + url = f"{self._env.TTS_BUNDLE_URL}/o/{self._env.BUNDLE_FILE_NAME}" + # Parse the URL + parsed_url = urlparse(url) + # Extract the region from the netloc (example: objectstorage.us-ashburn-1.oraclecloud) + region = parsed_url.netloc.split('.')[1] + # Extract the namespace from the path (after "/n/") + namespace = parsed_url.path.split('/')[2] + # Extract the bucket name from the path (after "/b/") + bucket_name = parsed_url.path.split('/')[4] + + config = oci.config.from_file(self._env.CONFIG_FILE, "DEFAULT") + config['region'] = region + + object_storage_client = oci.object_storage.ObjectStorageClient(config=config, + service_endpoint=url.split('/n')[0]) + if self._env.OCI_PROXY_HOST and self._env.OCI_PROXY_PORT: + proxy_url = f"{self._env.OCI_PROXY_HOST}:{self._env.OCI_PROXY_PORT}" + object_storage_client.base_client.session.proxies = {'https': proxy_url} + + with open(self._env.TTS_BUNDLE_FILE, 'rb') as file: + response = object_storage_client.put_object( + namespace_name=namespace, + bucket_name=bucket_name, + object_name=self._env.BUNDLE_FILE_NAME, + put_object_body=file + ) + + # Check HTTP response code + if response.status != 200: + raise ValueError(f"Uploading transport bundle to object storage failed with HTTP status: {response.status}...") + print(f"Bundle uploaded successfully to {url}.") + self.display_success_message(url) + elif self._env.STORAGE_TYPE == "FSS": + url = f"{self._env.TTS_FSS_MOUNT_DIR}/{self._env.PROJECT_NAME}/metadata/" + os.system(f"cp {self._env.TTS_BUNDLE_FILE} {url}") + print(f"Bundle copied successfully to {url}.") + self.display_success_message(url) + except Exception as e: + print(f"Error occurred during the upload_bundle: {e} \n") + sys.exit(1) + + def display_success_message(self, url): + """Display a message upon successful upload.""" + print("---------") + min_storage_size = float(self._env.DB_PROPS_ARRAY[12]) + max(float(self._env.DB_PROPS_ARRAY[19]), + float(self._env.DB_PROPS_ARRAY[20])) + print(f"Create a database in ADB-S cloud with minimum storage size of {min_storage_size}GB") + print("Specify tag name/value as") + if self._env.STORAGE_TYPE == "OBJECT_STORAGE": + print(f"ADB$TTS_BUNDLE_URL: {url}") + print("---------") + +class Template(object): + def __init__(self, filename): + self._configpath = os.path.join(scriptpath(), filename) + if not os.path.isfile(self._configpath): + raise Exception( + 'TemplateFileNotFound', + "Template file %s not found" % self._configpath) + self._config = ConfigParser(interpolation=None) + self._config.read(self._configpath) + + def get(self, entry): + template_str = self._config.get('template', entry) + s = string.Template(template_str) + return s.safe_substitute(Configuration.substitutions) + +class Configuration: + substitutions = {} + + + +""" +Main function +""" +def main(args): + """ + Main entry function for the program + """ + try: + # Check if python version is >= 3 + check_python_version((3,6)) + + # Template for sql scripts + template = Template(os.path.join(scriptpath(), 'ttsTemplate.txt')) + + # Configure or Load the environment + _env = Environment(args, os.path.join(scriptpath(), 'tts-backup-env.txt')) + + ### STEP 1: RUN tablespace validations ### + print("\n* Run tablespace validations...\n") + start_time = datetime.utcnow() + print(f"Start Time (UTC): {start_time}") + run_validations = TTS_SRC_RUN_VALIDATIONS(_env) + _env.TABLESPACES = run_validations._get_tablespaces(template) + _env.SCHEMAS = run_validations._get_schemas(template) + _env.ols_policies_list = run_validations._validate_ols_policies(template) + if _env.DB_VERSION != '11g': + _env.redaction_policies_list = run_validations._validate_redaction_policies(template) + else: + _env.redaction_policies_list = '' + if _env.DB_VERSION != '11g': + run_validations._validate_dvrealm(template) + run_validations._validate_schemas(template) + run_validations._validate_tablespaces(template) + end_time = datetime.utcnow() + elapsed = end_time - start_time + print(f"Complete Time (UTC): {end_time}") + print(f"Elapsed Time: {elapsed}") + + if _env.STORAGE_TYPE == 'OBJECT_STORAGE': + ### STEP 2: Check storage buckets ### + print("\n* Check backup and bundle storage buckets...\n") + storage_bucket_check = TTS_SRC_CHECK_STORAGE_BUCKETS(_env) + + ### STEP 3: Create wallet to store backup credentials ### + print("\n* Add credential alias to backup wallet...\n") + create_wallet = TTS_SRC_CREATE_WALLET(_env) + _env.TTS_WALLET_CRED_ALIAS = create_wallet.tts_src_create_backup_wallet() + if _env.TTS_WALLET_CRED_ALIAS is None: + print("Failed to create wallet.\n") + exit(1) + + ### STEP 4: Gather data ### + print("\n* Gather database, pdb and tablespace properties...\n") + gather_data = TTS_SRC_GATHER_DATA(_env) + _env.DB_PROPS_ARRAY = gather_data.tts_src_gather_data(template) + if _env.DB_PROPS_ARRAY is None: + print("Failed to gather data.\n") + exit(1) + + _env.bigfile_tablespaces = _env.DB_PROPS_ARRAY[15].replace(';', ',') + _env.smallfile_tablespaces = _env.DB_PROPS_ARRAY[16].replace(';', ',') + _env.encrypted_tablespaces = _env.DB_PROPS_ARRAY[17].replace(';', ',') + _env.unencrypted_tablespaces = _env.DB_PROPS_ARRAY[18].replace(';', ',') + _env.role_list = _env.DB_PROPS_ARRAY[21].replace(';', ',') + _env.mview_schemas = _env.DB_PROPS_ARRAY[22].replace(';', ',') + _env.sched_cred_list = _env.DB_PROPS_ARRAY[23].replace(';', ',') + + _env.TBS_READ_ONLY = _env.DB_PROPS_ARRAY[14] + _env.TBS_READ_ONLY = _env.TBS_READ_ONLY.rstrip() # Remove trailing whitespace + + ### STEP 5a: Create directory object ### + print("\n* Create directory object...\n") + directory_manager = TTS_SRC_DIRECTORY_MANAGER(_env, template) + + if _env.DRY_RUN.strip().upper() != "TRUE": + ### STEP 5b: Export TDE keys ### + if _env.encrypted_tablespaces.strip(): + print("\n* Export TDE keys...\n") + tde_keys_exporter = TTS_SRC_TDE_KEY_EXPORTER(_env, template) + + if _env.STORAGE_TYPE == 'OBJECT_STORAGE': + ### Step 6: Copy wallet into all the hosts ### + print("\n* Copy wallet into the host list...\n") + wallet_copier = TTS_SRC_WALLET_COPIER(_env) + + ### Step 7a: Get SCN for next incremental backup just before the RMAN execution ### + print("\n* Get SCNS for next incremental backup...\n") + _env.CHANNEL_STRING = '' + rman_backup = TTS_SRC_RMAN_BACKUP(_env) + rman_backup.tts_src_get_scn(template) + + ### Step 7b: Build channel string for RMAN ### + print("\n* Construct channel string...\n") + rman_backup.tts_src_get_channel() + + ### Step 7c: Perform RMAN Backup for Tablespace### + rman_log_pattern = os.path.join(_env.TTS_DIR_PATH, 'rman_*.log') + os.system(f'rm -rf {rman_log_pattern}') + print(f"\n* Perform RMAN backup of {_env.TABLESPACES} tablespace datafiles and schema...\n") + + start_time = datetime.utcnow() + print(f"Start Time (UTC): {start_time}") + + if len([ts for ts in _env.encrypted_tablespaces.split(',') if ts.strip()]) > 0: + print("Backup for encrypted tablespaces started\n") + if rman_backup.tts_src_backup_tablespaces("encrypted", template) == 1: + print("Backup for encrypted tablespaces Failed.\n") + exit(1) + print("Backup for encrypted tablespaces completed successfully\n") + + if len([ts for ts in _env.unencrypted_tablespaces.split(',') if ts.strip()]) > 0: + print("Backup for unencrypted tablespaces started\n") + if rman_backup.tts_src_backup_tablespaces("unencrypted", template) == 1: + print("Backup for unencrypted tablespaces Failed.\n") + exit(1) + print("Backup for unencrypted tablespaces completed successfully\n") + + end_time = datetime.utcnow() + elapsed = end_time - start_time + print(f"Complete Time (UTC): {end_time}") + print(f"Elapsed Time: {elapsed}") + + + ### STEP 7d: Export Schema ### + if _env.FINAL_BACKUP.upper() == "TRUE": + print(f"\n* Export {_env.SCHEMAS.upper()} schema using data pump...\n") + # rman_backup.tts_src_export_schema() + if rman_backup.tts_src_export_schema(template) == 1: + print("Export schema Failed.\n") + exit(1) + + print(f"\n* Export {_env.TABLESPACES.upper()} tablespaces schema using data pump...\n") + # rman_backup.tts_src_export_tablespaces() + if rman_backup.tts_src_export_tablespaces(template) == 1: + print("Export Tablespace schema Failed.\n") + exit(1) + + ### STEP 8: Create manifiest ### + print("\n* Create manifest...\n") + rman_backup.tts_src_create_manifest() + + ### STEP 9: Create transport bundle ### + print("\n* Create transport bundle...\n") + bundle_manager = TTS_SRC_BUNDLE_MANAGER(_env) + + ### STEP 10: Cleanup ### + if _env.FINAL_BACKUP.upper() == "TRUE": + print("\n* Final Backup, Dropped the project manifest json file...\n") + os.remove(_env.TTS_PROJECT_FILE) + print("\n* Dropped the TTS_DIR_NAME directory object...\n") + directory_manager.tts_src_drop_directory(template) + + ### STEP 11: Upload bundle to object store ### + print("\n* Upload transport bundle to object storage...\n") + bundle_manager.tts_src_upload_bundle() + + if _env.STORAGE_TYPE == "FSS": + print(f"ADB$TTS_BUNDLE_URL : {_env.TTS_FSS_CONFIG}/{_env.BUNDLE_FILE_NAME}") + print("---------") + else: + if _env.DB_VERSION != '11g': + rman_backup = TTS_SRC_RMAN_BACKUP(_env) + print(f"\n* Validation export for tablespaces metadata using datapump..\n") + if rman_backup.tts_src_export_tablespaces(template, True) == 1: + print("Export Tablespace metadata Failed.\n") + print("TTS BACKUP TOOL : Dry Run Completed Successfully...") + + except FileNotFoundError as err_msg: + print(f"File Not Found Error: {err_msg}") + except EnvironmentError as err_msg: + print(f"Environment Error: {err_msg}") + except ValueError as err_msg: + print(f"Value Error: {err_msg}") + except AttributeError as err_msg: + print(f"Attribute Error: {err_msg}") + except RuntimeError as err_msg: + print(f"Runtime Error: {err_msg}") + except Exception as err_msg: + print(f"Error: {err_msg}") + +if __name__ == '__main__': + """ + Run the main function for direct invocation of script + """ + main(sys.argv[1:]) diff --git a/migration-tools/tts-backup-python/ttsTemplate.txt b/migration-tools/tts-backup-python/ttsTemplate.txt new file mode 100755 index 0000000..6a0df14 --- /dev/null +++ b/migration-tools/tts-backup-python/ttsTemplate.txt @@ -0,0 +1,642 @@ +# +# +# TtsTemplate.txt +# +# Copyright (c) 2019, 2025, Oracle and/or its affiliates. +# +# NAME +# TtsTemplate.txt - sql scripts for tts-backup.py +# +# DESCRIPTION +# +# +# NOTES +# +# +# MODIFIED (MM/DD/YY) +# hkarniya 11/18/25 - Bug 38667283: DRY_RUN enhancements +# sovaraka 10/07/25 - Bug 37924415,37860857,37925905: Added checks +# for redaction, ols, dvrealm policies +# hkarniya 09/11/25 - Bug 37893757: Add compatible validation +# sovaraka 07/03/23 - Bug 38150434: Added sql scripts for tts-backup +# sovaraka 07/01/25 - Creation +# +# + +[template] + +get_tablespaces: + select tablespace_name from dba_tablespaces where contents = 'PERMANENT' and + tablespace_name not in ('SYSTEM', 'SYSAUX'); + +validate_schemas: + declare + l_cnt number := 0; + l_msg varchar2(30000); + begin + select count(*) into l_cnt from dba_users where username = '${schema}'; + if l_cnt = 0 then + l_msg := 'ERROR : Schema ${schema} not found in the database.'; + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line(l_msg); + else + raise_application_error(-20001, l_msg); + end if; + end if; + + -- check if there are any XMLSCHEMA tables + select count(*) into l_cnt from dba_xml_tables + where owner = '${schema}' + ${exc_tbl_filter}; + + if (l_cnt > 0) then + l_msg := 'ERROR : Validation failed for schema ${schema}: The schema contains XMLType tables linked to registered XMLSchemas. ' || + 'Autonomous Database does not support object-relational XMLType tables. ' || + 'Please use exclude_tables option to exclude these tables.. '; + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line(l_msg); + else + raise_application_error(-20001, l_msg); + end if; + end if; + end; + / + +tts_src_get_scn: + declare + l_current_scn v\$database.current_scn%TYPE; + l_current_ckp v\$datafile_header.checkpoint_change#%TYPE; + begin + select current_scn into l_current_scn from v\$database; + select min(checkpoint_change#) into l_current_ckp from v\$datafile_header where tablespace_name in (${ts_list}) and checkpoint_change# >= ${incr_scn}; + dbms_output.put_line(l_current_scn || ',' || l_current_ckp); + end; + / + +validate_tablespaces: + declare + l_platform_name v\$database.platform_name%TYPE; + l_ts_block_size dba_tablespaces.block_size%TYPE; + l_ts_status dba_tablespaces.status%TYPE; + l_ts_bigfile dba_tablespaces.bigfile%TYPE; + l_ts_encrypted dba_tablespaces.encrypted%TYPE; + l_default_ts_name user_users.default_tablespace%TYPE; + l_cnt number; + l_nls_charset database_properties.property_value%TYPE; + l_nls_ncharset database_properties.property_value%TYPE; + l_timezone varchar2(32); + l_tts_charset_status boolean; + l_error_msg varchar2(1024); + l_msg varchar2(30000); + begin + if ('${tablespace}' = 'SYSTEM' or '${tablespace}' = 'SYSAUX') then + l_msg := 'ERROR : Administrative tablespaces SYSTEM, SYSAUX cannot be transported'; + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line(l_msg); + else + raise_application_error(-20001, l_msg); + end if; + end if; + + -- Check if platform is supported for transport + begin + select d.platform_name into l_platform_name + from v\$database d, v\$transportable_platform t + where d.platform_id = t.platform_id; + exception + when no_data_found then + l_msg := 'ERROR : Source database platform ' || l_platform_name || ' is not supported for transportable tablespace.'; + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line(l_msg); + else + raise_application_error(-20001, l_msg); + end if; + end; + + -- Check tablespace properties + select block_size, bigfile, encrypted, status + into l_ts_block_size, l_ts_bigfile, l_ts_encrypted, l_ts_status + from dba_tablespaces where tablespace_name = '${tablespace}'; + + if (l_ts_block_size != 8192) then + l_msg := 'ERROR : Tablespace ${tablespace} has block size ' || l_ts_block_size || '. Only 8K block size is supported.'; + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line(l_msg); + else + raise_application_error(-20001, l_msg); + end if; + end if; + + if ('${final_backup}' = 'TRUE' and l_ts_status != 'READ ONLY') then + l_msg := 'ERROR : Tablespace ${tablespace} is not in read only mode in final backup'; + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line(l_msg); + else + raise_application_error(-20001, l_msg); + end if; + end if; + + -- check if the tablespace is default tablespace of current user + -- check if current user's default tablespace is in read write mode + select u.default_tablespace, t.status into l_default_ts_name, l_ts_status + from user_users u, dba_tablespaces t where u.default_tablespace = t.tablespace_name; + if (l_default_ts_name = '${tablespace}') then + l_msg := 'ERROR : Tablespace ${tablespace} can not be default tablespace of user running transport.'; + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line(l_msg); + else + raise_application_error(-20001, l_msg); + end if; + end if; + if (l_ts_status != 'ONLINE') then + l_msg := 'ERROR : Default tablespace ' || l_default_ts_name || ' of current user is not in READ WRITE mode'; + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line(l_msg); + else + raise_application_error(-20001, l_msg); + end if; + end if; + + -- check if xml type is used by the tablespace and XML DB is not supported by target + -- check if xml type col's exists for any table or table is xml type. + select count(distinct t.table_name) into l_cnt from dba_tables t + join dba_tab_columns c on t.table_name = c.table_name and t.owner = c.owner + where t.tablespace_name = '${tablespace}' + ${exc_tbl_filter} + and (t.table_name in (select table_name from dba_xml_tables) or c.data_type = 'XMLTYPE'); + if (l_cnt > 0) then + l_msg := 'ERROR : Tablespace ${tablespace} has tables containing XMLType data.'; + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line(l_msg); + else + raise_application_error(-20001, l_msg); + end if; + end if; + + -- check if xml type is used by the tablespace from dba_segments + select count(distinct c.table_name) into l_cnt from dba_segments s + join dba_tab_columns c on s.owner = c.owner and s.segment_name = c.table_name + where s.tablespace_name = '${tablespace}' + ${exc_tbl_filter_seg} + and c.data_type = 'XMLTYPE'; + if (l_cnt > 0) then + l_msg := 'ERROR : Tablespace ${tablespace} has tables containing XMLType data.'; + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line(l_msg); + else + raise_application_error(-20001, l_msg); + end if; + end if; + + -- check if any of the table column data type owners are not in schema list + select count(*) into l_cnt from all_tab_columns atc + JOIN all_tables at ON atc.table_name = at.table_name and atc.owner = at.owner + and at.tablespace_name = '${tablespace}' and atc.data_type_owner NOT IN (${sc_list}) + ${exc_tbl_filter_dt}; + if (l_cnt > 0) then + l_msg := + 'ERROR : Validation failed for tablespace "${tablespace}": Some columns reference data types owned by schemas that are not in the allowed transport list. ' || + 'Ensure all referenced data types belong to the transported schemas. ' || + 'Tablespace: "${tablespace}", Affected Columns: ' || l_cnt || '.'; + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line(l_msg); + else + raise_application_error(-20001, l_msg); + end if; + end if; + + -- check timezone is not Timpstamp with Local Time Zone (TSLTZ) + select dbtimezone into l_timezone from dual; + if (l_timezone != '+00:00') then + select count(*) into l_cnt from all_tab_columns atc + join all_tables at on atc.table_name = at.table_name + where atc.data_type = 'TIMESTAMP WITH LOCAL TIME ZONE' and + at.tablespace_name = '${tablespace}'; + if (l_cnt > 0) then + l_msg := 'ERROR : Source database is using local time zone (TSLTZ) : ' || l_timezone || + 'Tablespace : "${tablespace}", contains TIMESTAMP WITH LOCAL TIME ZONE data.'; + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line(l_msg); + else + raise_application_error(-20001, l_msg); + end if; + end if; + end if; + + -- run transport charset check + l_tts_charset_status := dbms_tts.transport_char_set_check(ts_list => '${tablespace}', + target_db_char_set_name => 'AL32UTF8', target_db_nchar_set_name => 'AL16UTF16', err_msg => l_error_msg); + if (l_tts_charset_status = false) then + select property_value into l_nls_charset from database_properties where property_name = 'NLS_CHARACTERSET'; + select property_value into l_nls_ncharset from database_properties where property_name = 'NLS_NCHAR_CHARACTERSET'; + l_msg := 'Transport tablespace charset check failed for tablespace ${tablespace}: ' || l_error_msg; + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line(l_msg); + else + raise_application_error(-20001, l_msg); + end if; + end if; + exception + when others then + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line('ERROR : ' ||SQLERRM); + else + raise_application_error(-20001, 'ERROR : ' || SQLERRM); + end if; + end; + / + +validate_tablespaces_dbversion_11g: + -- Perform TTS Check + begin + dbms_tts.transport_set_check(ts_list => ${ts_list}, incl_constraints => TRUE, full_check => TRUE); + end; + / + select 'VIOLATION: ' || violations FROM transport_set_violations; + declare + l_violations_cnt number; + l_msg varchar2(30000); + begin + select count(*) into l_violations_cnt from transport_set_violations; + if l_violations_cnt > 0 then + l_msg := 'Transport set check failed for tablespace list.'; + if ('${dry_run}' = 'TRUE') then + dbms_output.put_line(l_msg); + else + raise_application_error(-20001, l_msg); + end if; + end if; + end; + / + +get_common_schemas_dbversion_11g: + select username from dba_users where username in ('SYS','SYSTEM'); + +get_common_schemas: + select username from dba_users where common = 'YES'; + +get_local_schemas_dbversion_11g: + select username from dba_users where username not in ('SYS','SYSTEM'); + +get_local_schemas: + select username from dba_users where common = 'NO'; + +owners_in_tablespaces: + select distinct owner from dba_tables where tablespace_name in (${ts_list}); + +object_validation: + -- SEGMENTS : Physical Segments (tables, indexes, lobs, partitions etc.) + SELECT 'SEGMENT,' || owner || '.' || segment_name + FROM dba_segments + WHERE tablespace_name IN (${ts_list}) + AND owner NOT IN (${sc_list}); + + -- Indexes : already covered under dba_segments + + -- LOB_Segments : already covered under dba_segments + + -- Partitions : already covered under dba_segments + + -- Triggers : Logical triggers (not covered by dba_segments) + SELECT 'TRIGGER,' || tr.owner || '.' || tr.trigger_name + FROM dba_triggers tr + JOIN dba_tables ta ON tr.table_name = ta.table_name AND tr.owner = ta.owner + WHERE ta.tablespace_name IN (${ts_list}) + AND tr.owner NOT IN (${sc_list}); + + -- Materialized View Logs : Materialized view logs + SELECT 'MVIEW_LOG,' || l.log_owner || '.' || l.log_table + FROM dba_mview_logs l + JOIN dba_segments s ON l.log_owner = s.owner AND l.log_table = s.segment_name + WHERE s.tablespace_name IN (${ts_list}) + AND l.log_owner NOT IN (${sc_list}); + + -- Materialized Views + SELECT 'MVIEW,' || m.owner || '.' || m.mview_name + FROM dba_mviews m + JOIN dba_segments s ON m.owner = s.owner AND m.mview_name = s.segment_name + WHERE s.tablespace_name IN (${ts_list}) + AND m.owner NOT IN (${sc_list}); + + -- Synonyms + SELECT 'SYNONYM,' || sy.owner || '.' || sy.synonym_name + FROM dba_synonyms sy + JOIN dba_tables ta ON sy.table_name = ta.table_name AND sy.owner = ta.owner + WHERE ta.tablespace_name IN (${ts_list}) + AND sy.owner NOT IN (${sc_list}); + + -- Constraints : Select Constraints belongs to ts_list and owner doesn't belong to sc_list + -- Not required, Table validation is enough + + -- Procedure, Functions, Packages, Views, Sequences (schema bound objects) + SELECT 'OBJECT,' || owner || '.' || object_name || ',' || object_type AS OBJECT_NAME + FROM dba_objects + WHERE object_type IN ('PROCEDURE', 'FUNCTION', 'PACKAGE', 'PACKAGE BODY', 'VIEW', 'SEQUENCE') + AND owner NOT IN (${sc_list}) + AND object_name IN ( + SELECT segment_name FROM dba_segments + WHERE tablespace_name IN (${ts_list}) + ); + +tts_src_create_directory: + set feedback off + create or replace directory ${tts_dir_name} as '${tts_dir_path}'; + GRANT READ, WRITE ON DIRECTORY ${tts_dir_name} TO ${db_user}; + +tts_src_drop_directory: + set feedback off + drop directory ${tts_dir_name}; + +tts_src_export_tde_keys: + set feedback off + administer key management export encryption keys with secret "${secret_code}" + to '${tde_keys_path}' force keystore identified by "${tde_wallet_password}"; + +tts_src_export_tde_current_keys: + set feedback off + administer key management export encryption keys with secret "${secret_code}" + to '${tde_keys_path}' force keystore identified by "${tde_wallet_password}" + with identifier in (select key_id from v\\$encryption_keys k, v\\$pdbs p + where p.con_id = k.con_id and p.name = '${dst_name}' and backed_up = 'NO'); + +tts_src_gather_data: + declare + l_platform_name v\$database.platform_name%TYPE; + l_platform_id v\$database.platform_id%TYPE; + l_current_scn v\$database.current_scn%TYPE; + l_product product_component_version.product%TYPE; + l_version product_component_version.version%TYPE; + l_version_full VARCHAR2(160); + l_dst_version database_properties.property_value%TYPE; + l_nls_charset database_properties.property_value%TYPE; + l_nls_ncharset database_properties.property_value%TYPE; + l_pdb_guid v\$$${l_pdb_table}.${l_pdb_type}%TYPE; + l_xmltype_exists varchar2(10); + l_timezone varchar2(32); + l_storage_size dba_data_files.bytes%TYPE; + l_sf_additional_size dba_data_files.bytes%TYPE; + l_bf_additional_size dba_data_files.bytes%TYPE; + l_host_list varchar2(4000) := ''; + l_tbs_read_only varchar2(10); + l_bigfile_list varchar2(30000) := ''; + l_smallfile_list varchar2(30000) := ''; + l_encrypted_list varchar2(30000) := ''; + l_unencrypted_list varchar2(30000) := ''; + l_role_list clob := empty_clob(); + l_mview_schemas clob := empty_clob(); + l_sched_cred_list clob := empty_clob(); + l_src_compatible varchar2(160); + begin + select ${l_pdb_type} into l_pdb_guid from v\$$${l_pdb_table}; + select property_value into l_dst_version from database_properties where property_name = 'DST_PRIMARY_TT_VERSION'; + select platform_name, platform_id, current_scn into l_platform_name, l_platform_id, l_current_scn from v\$database; + select property_value into l_nls_charset from database_properties where property_name = 'NLS_CHARACTERSET'; + select property_value into l_nls_ncharset from database_properties where property_name = 'NLS_NCHAR_CHARACTERSET'; + select product, version into l_product, l_version from product_component_version where product like 'Oracle%Database%'; + BEGIN + EXECUTE IMMEDIATE 'SELECT ${l_version_type} FROM product_component_version where product like ''Oracle%Database%''' INTO l_version_full; + EXCEPTION + WHEN OTHERS THEN + l_version_full := l_version; + END; + select decode(count(*), 0, 'false', 'true') into l_xmltype_exists from dba_tables t + join dba_tab_columns c on t.table_name = c.table_name and t.owner = c.owner + where t.tablespace_name in (${ts_list}) + ${exc_tbl_filter} + and (t.table_name in (select table_name from dba_xml_tables) or c.data_type = 'XMLTYPE'); + + select dbtimezone into l_timezone from dual; + select (sum(bytes) / 1024 / 1024 / 1024) into l_storage_size from dba_data_files where tablespace_name in (${ts_list}); + + -- smallfile tablespaces move require additional size + select nvl(max(sum(df.bytes)/1024/1024/1024),0) into l_sf_additional_size + from dba_data_files df, dba_tablespaces dt + where df.tablespace_name = dt.tablespace_name and + df.tablespace_name in (${ts_list}) and dt.bigfile = 'NO' + group by df.tablespace_name; + + -- bigfile tablespaces move require additional size + select nvl(max(sum(df.bytes)/1024/1024/1024),0) into l_bf_additional_size + from dba_data_files df, dba_tablespaces dt + where df.tablespace_name = dt.tablespace_name and + df.tablespace_name in (${ts_list}) and dt.bigfile = 'YES' + group by df.tablespace_name; + + FOR rec IN (SELECT host_name, instance_name FROM gv\$$${l_pdb_table} pdb, gv\$instance inst + WHERE pdb.name = UPPER('${database_name}') + AND pdb.open_mode = 'READ WRITE' + AND pdb.inst_id = inst.inst_id) LOOP + l_host_list := l_host_list || ';' || rec.host_name || ':' || rec.instance_name; + END LOOP; + l_host_list := SUBSTR(l_host_list, 2); + select decode(count(*), 0, 'true', 'false') into l_tbs_read_only from dba_tablespaces where status != 'READ ONLY' and tablespace_name in (${ts_list}); + + FOR rec IN (SELECT tablespace_name, bigfile, encrypted FROM dba_tablespaces where tablespace_name IN (${ts_list})) LOOP + IF rec.bigfile = 'YES' THEN + l_bigfile_list := l_bigfile_list || ';' || rec.tablespace_name; + ELSE + l_smallfile_list := l_smallfile_list || ';' || rec.tablespace_name; + END IF; + + IF rec.encrypted = 'YES' THEN + l_encrypted_list := l_encrypted_list || ';' || rec.tablespace_name; + ELSE + l_unencrypted_list := l_unencrypted_list || ';' || rec.tablespace_name; + END IF; + END LOOP; + + IF LENGTH(l_bigfile_list) > 0 THEN + l_bigfile_list := SUBSTR(l_bigfile_list, 2); + END IF; + IF LENGTH(l_smallfile_list) > 0 THEN + l_smallfile_list := SUBSTR(l_smallfile_list, 2); + END IF; + IF LENGTH(l_encrypted_list) > 0 THEN + l_encrypted_list := SUBSTR(l_encrypted_list, 2); + END IF; + IF LENGTH(l_unencrypted_list) > 0 THEN + l_unencrypted_list := SUBSTR(l_unencrypted_list, 2); + END IF; + + FOR rec IN (SELECT distinct grantee from dba_tab_privs where owner in (${sc_list}) and + grantee in (select role from dba_roles ${l_common_clause})) loop + l_role_list := l_role_list || ';' || rec.grantee; + END LOOP; + IF DBMS_LOB.getlength(l_role_list) > 0 THEN + l_role_list := DBMS_LOB.SUBSTR(l_role_list, DBMS_LOB.getlength(l_role_list) - 1, 2); + END IF; + + FOR rec IN (SELECT distinct owner from dba_mviews where owner in (${sc_list})) loop + l_mview_schemas := l_mview_schemas || ';' || rec.owner; + END LOOP; + IF DBMS_LOB.getlength(l_mview_schemas) > 0 THEN + l_mview_schemas := DBMS_LOB.SUBSTR(l_mview_schemas, DBMS_LOB.getlength(l_mview_schemas) - 1, 2); + END IF; + + FOR rec IN (SELECT credential_name from dba_scheduler_credentials where owner in (${sc_list})) loop + l_sched_cred_list := l_sched_cred_list || ';' || rec.credential_name; + END LOOP; + IF DBMS_LOB.getlength(l_sched_cred_list) > 0 THEN + l_sched_cred_list := DBMS_LOB.SUBSTR(l_sched_cred_list, DBMS_LOB.getlength(l_sched_cred_list) - 1, 2); + END IF; + + select value into l_src_compatible from v\$parameter where name = 'compatible'; + + dbms_output.put_line(l_pdb_guid || ',' || + l_dst_version || ',' || + l_platform_name || ',' || + l_platform_id || ',' || + l_current_scn || ',' || + l_nls_charset || ',' || + l_nls_ncharset || ',' || + l_product || ',' || + l_version || ',' || + l_version_full || ',' || + l_xmltype_exists || ',' || + l_timezone || ',' || + l_storage_size || ',' || + l_host_list || ',' || + l_tbs_read_only || ',' || + l_bigfile_list || ',' || + l_smallfile_list || ',' || + l_encrypted_list || ',' || + l_unencrypted_list || ',' || + l_sf_additional_size || ',' || + l_bf_additional_size || ',' || + l_role_list || ',' || + l_mview_schemas || ',' || + l_sched_cred_list || ',' || + l_src_compatible); + end; + / + +tts_src_export_schema: + ${oracle_home}/bin/expdp + \"${db_user}/${db_password}@${l_conn_str}\" + SCHEMAS=${schemas} + DIRECTORY=${tts_dir_name} + DUMPFILE=schema.dmp + CONTENT=METADATA_ONLY + CLUSTER=NO + EXCLUDE=TABLE,INDEX + LOGFILE=export.log + +tts_src_export_tablespaces: + ${oracle_home}/bin/expdp + \"${db_user}/${db_password}@ + ${l_conn_str}\" + DIRECTORY=${tts_dir_name} + DUMPFILE=${dump_file} + TRANSPORT_TABLESPACES=${tablespaces} + ${exclude_clause} + CLUSTER=NO + LOGFILE=${expdp_log_file} + ${tts_closure_check} + +tts_src_backup_tablespaces: + ${oracle_home}/bin/rman << EOF | tee ${tts_dir_name}/backup_${backup_type}.log + set echo on; + connect target '${db_user}/${db_password}@${l_conn_str}'; + set command id to '${project_name}_${backup_type}'; + ${encryption_off_clause} + set nocfau; + ${encryption_on_clause} + run { + ${channel_string} + ${backup_string} + } + EOF + +purge_dba_recyclebin: + declare + l_cnt number; + begin + if '${final_backup}' = 'TRUE' then + select count(*) into l_cnt from dba_recyclebin + where ts_name in (${ts_list}); + + if l_cnt > 0 then + raise_application_error(-20001, + 'Validation failed: Found BIN$ objects in one or more tablespaces from tbs list. ' || + 'Please purge dba_recyclebin to avoid move failures at ADBS.' + ); + end if; + end if; + end; + / + +validate_ols_policies: + declare + l_ols_enable number := 0; + l_ols_schema number := 0; + l_ols_policy_list varchar2(30000) := ''; + begin + select count(*) into l_ols_enable from v\$$option where parameter = 'Oracle Label Security' and value = 'TRUE'; + select count(*) into l_ols_schema from dba_users where username = 'LBACSYS'; + IF l_ols_enable > 0 and l_ols_schema > 0 THEN + FOR rec IN (SELECT policy_name FROM all_sa_table_policies where SCHEMA_NAME in (${sc_list})) LOOP + l_ols_policy_list := l_ols_policy_list || ';' || rec.policy_name; + END LOOP; + IF LENGTH(l_ols_policy_list) > 0 THEN + l_ols_policy_list := SUBSTR(l_ols_policy_list, 2); + END IF; + END IF; + dbms_output.put_line(l_ols_policy_list); + end; + / + +validate_redaction_policies: + declare + l_redaction_policy_list varchar2(30000) := ''; + begin + FOR rec IN (SELECT policy_name FROM redaction_policies where object_owner IN (${sc_list})) LOOP + l_redaction_policy_list := l_redaction_policy_list || ';' || rec.policy_name; + END LOOP; + + IF LENGTH(l_redaction_policy_list) > 0 THEN + l_redaction_policy_list := SUBSTR(l_redaction_policy_list, 2); + END IF; + dbms_output.put_line(l_redaction_policy_list); + end; + / + +validate_dvops_protection: + declare + l_dvrealmapp_cnt number := 0; + begin + select count(*) into l_dvrealmapp_cnt from dba_dv_status WHERE name = 'DV_APP_PROTECTION' and status = 'ENABLED'; + dbms_output.put_line(l_dvrealmapp_cnt); + end; + / + +validate_dvrealm_policies: + declare + l_dvconf_cnt number := 0; + l_dvenabled_cnt number := 0; + l_dv_roles_granted number := 0; + begin + select count(*) into l_dvconf_cnt from dba_dv_status WHERE name = 'DV_CONFIGURE_STATUS' and status = 'TRUE'; + select count(*) into l_dvenabled_cnt from dba_dv_status WHERE name = 'DV_ENABLE_STATUS' and status = 'TRUE'; + select + case + when count(distinct granted_role) = 2 then 1 else 0 + end as dv_roles_granted + into l_dv_roles_granted + from dba_role_privs + where granted_role in ('DV_OWNER','DV_ACCTMGR'); + dbms_output.put_line(l_dvconf_cnt || ',' || l_dvenabled_cnt || ',' || l_dv_roles_granted); + end; + / + +get_dv_protected_schemas: + declare + l_dv_schemas number := 0; + l_datapump_user number := 1; + begin + select count(*) into l_dv_schemas from DBA_DV_REALM_object where owner in (${sc_list}); + IF l_dv_schemas > 0 THEN + select count(*) into l_datapump_user from DBA_DV_DATAPUMP_AUTH where grantee = 'SYS'; + END IF; + dbms_output.put_line(l_datapump_user); + end; + /