diff --git a/README.md b/README.md index 8543059..614daab 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,7 @@ ADDITIONAL_TF_OVERRIDE_LOCATIONS=/path/to/module1,path/to/module2 tflocal plan ## Change Log +* v0.23.0: Add support for `terraform_remote_state` with `s3` backend to read the state stored in local S3 backend; fix S3 backend config detection with multiple Terraform blocks * v0.22.0: Fix S3 backend forcing DynamoDB State Lock to be enabled by default * v0.21.0: Add ability to drop an override file in additional locations * v0.20.1: Fix list config rendering diff --git a/bin/tflocal b/bin/tflocal index 61bacda..ac4a386 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -18,7 +18,7 @@ import textwrap from packaging import version from urllib.parse import urlparse -from typing import Iterable, Optional +from typing import Iterable, Optional, Dict, Tuple PARENT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(__file__), "..")) if os.path.isdir(os.path.join(PARENT_FOLDER, ".venv")): @@ -75,6 +75,14 @@ terraform { } } """ +TF_REMOTE_STATE_CONFIG = """ +data "terraform_remote_state" "" { + backend = "s3" + + config = { + } +} +""" PROCESS = None # some services have aliases which are mutually exclusive to each other # see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/guides/custom-service-endpoints#available-endpoint-customizations @@ -215,6 +223,9 @@ def create_provider_config_file(provider_file_path: str, provider_aliases=None) # create s3 backend config tf_config += generate_s3_backend_config() + # create remote state config + tf_config += generate_remote_state_config() + # write temporary config file write_provider_config_file(provider_file_path, tf_config) @@ -261,22 +272,81 @@ def determine_provider_aliases() -> list: def generate_s3_backend_config() -> str: """Generate an S3 `backend {..}` block with local endpoints, if configured""" - is_tf_legacy = TF_VERSION < version.Version("1.6") - backend_config = None + s3_backend_config = {} tf_files = parse_tf_files() for filename, obj in tf_files.items(): if LS_PROVIDERS_FILE == filename: continue tf_configs = ensure_list(obj.get("terraform", [])) for tf_config in tf_configs: - tmp_backend_config = ensure_list(tf_config.get("backend")) - if tmp_backend_config[0]: - backend_config = tmp_backend_config[0] - break - backend_config = backend_config and backend_config.get("s3") - if not backend_config: + if tf_config.get("backend"): + backend_config = ensure_list(tf_config.get("backend"))[0] + if backend_config.get("s3"): + s3_backend_config = backend_config["s3"] + break + + if not s3_backend_config: return "" + config_values, config_string = _generate_s3_backend_config(s3_backend_config) + if not DRY_RUN: + get_or_create_bucket(config_values["bucket"]) + if "dynamodb_table" in config_values: + get_or_create_ddb_table( + config_values["dynamodb_table"], + region=config_values["region"], + ) + + result = TF_S3_BACKEND_CONFIG.replace("", config_string) + return result + + +def generate_remote_state_config() -> str: + """ + Generate configuration for terraform_remote_state data sources to use LocalStack endpoints. + Similar to generate_s3_backend_config but for terraform_remote_state blocks. + """ + + tf_files = parse_tf_files() + result = "" + for filename, obj in tf_files.items(): + if LS_PROVIDERS_FILE == filename: + continue + data_blocks = ensure_list(obj.get("data", [])) + for data_block in data_blocks: + terraform_remote_state = data_block.get("terraform_remote_state") + if not terraform_remote_state: + continue + for data_name, data_config in terraform_remote_state.items(): + if data_config.get("backend") != "s3": + continue + # Create override for S3 remote state + backend_config = data_config.get("config", {}) + if not backend_config: + continue + workspace = data_config.get("workspace", "") + if workspace: + if workspace[0] == "$": + workspace = workspace.lstrip('${').rstrip('}') + else: + workspace = f'"{workspace}"' + workspace = f"workspace = {workspace}" + + _, config_str = _generate_s3_backend_config(backend_config) + + # Create the final config + remote_state_config = TF_REMOTE_STATE_CONFIG.replace( + "", data_name + ) \ + .replace("", config_str) \ + .replace("", workspace) + result += remote_state_config + + return result + + +def _generate_s3_backend_config(backend_config: Dict) -> Tuple[Dict, str]: + is_tf_legacy = TF_VERSION < version.Version("1.6") legacy_endpoint_mappings = { "endpoint": "s3", "iam_endpoint": "iam", @@ -284,8 +354,8 @@ def generate_s3_backend_config() -> str: "dynamodb_endpoint": "dynamodb", } - configs = { - # note: default values, updated by `backend_config` further below... + # Set up default config + default_config = { "bucket": "tf-test-state", "key": "terraform.tfstate", "region": get_region(), @@ -300,6 +370,7 @@ def generate_s3_backend_config() -> str: "dynamodb": get_service_endpoint("dynamodb"), }, } + # Merge in legacy endpoint configs if not existing already if is_tf_legacy and backend_config.get("endpoints"): print( @@ -308,15 +379,15 @@ def generate_s3_backend_config() -> str: exit(1) for legacy_endpoint, endpoint in legacy_endpoint_mappings.items(): if ( - legacy_endpoint in backend_config - and backend_config.get("endpoints") - and endpoint in backend_config["endpoints"] + legacy_endpoint in backend_config + and backend_config.get("endpoints") + and endpoint in backend_config["endpoints"] ): del backend_config[legacy_endpoint] continue if legacy_endpoint in backend_config and ( - not backend_config.get("endpoints") - or endpoint not in backend_config["endpoints"] + not backend_config.get("endpoints") + or endpoint not in backend_config["endpoints"] ): if not backend_config.get("endpoints"): backend_config["endpoints"] = {} @@ -324,40 +395,37 @@ def generate_s3_backend_config() -> str: {endpoint: backend_config[legacy_endpoint]} ) del backend_config[legacy_endpoint] + # Add any missing default endpoints if backend_config.get("endpoints"): backend_config["endpoints"] = { k: backend_config["endpoints"].get(k) or v - for k, v in configs["endpoints"].items() + for k, v in default_config["endpoints"].items() } + backend_config["access_key"] = ( get_access_key(backend_config) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY ) - configs.update(backend_config) - if not DRY_RUN: - get_or_create_bucket(configs["bucket"]) - if "dynamodb_table" in configs: - get_or_create_ddb_table(configs["dynamodb_table"], region=configs["region"]) - result = TF_S3_BACKEND_CONFIG - config_options = "" - for key, value in sorted(configs.items()): + + # Update with user-provided configs + default_config.update(backend_config) + # Generate config string + config_string = "" + for key, value in sorted(default_config.items()): if isinstance(value, bool): value = str(value).lower() elif isinstance(value, dict): if key == "endpoints" and is_tf_legacy: for legacy_endpoint, endpoint in legacy_endpoint_mappings.items(): - config_options += ( - f'\n {legacy_endpoint} = "{configs[key][endpoint]}"' - ) + config_string += f'\n {legacy_endpoint} = "{default_config[key][endpoint]}"' continue else: + joined_values = "\n".join([f' {k} = "{v}"' for k, v in value.items()]) value = textwrap.indent( - text=f"{key} = {{\n" - + "\n".join([f' {k} = "{v}"' for k, v in value.items()]) - + "\n}", + text=f"{key} = {{\n{joined_values}\n}}", prefix=" " * 4, ) - config_options += f"\n{value}" + config_string += f"\n{value}" continue elif isinstance(value, list): # TODO this will break if it's a list of dicts or other complex object @@ -366,13 +434,13 @@ def generate_s3_backend_config() -> str: value = f"[{', '.join(as_string)}]" else: value = f'"{str(value)}"' - config_options += f"\n {key} = {value}" - result = result.replace("", config_options) - return result + config_string += f"\n {key} = {value}" + + return default_config, config_string def check_override_file(providers_file: str) -> None: - """Checks override file existance""" + """Checks override file existence""" if os.path.exists(providers_file): msg = f"Providers override file {providers_file} already exists" err_msg = msg + " - please delete it first, exiting..." diff --git a/setup.cfg b/setup.cfg index a568975..dad1bb1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = terraform-local -version = 0.22.0 +version = 0.23.0 url = https://github.com/localstack/terraform-local author = LocalStack Team author_email = info@localstack.cloud diff --git a/tests/conftest.py b/tests/conftest.py index 977eb81..5b3eea6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,6 +5,7 @@ @pytest.fixture(scope="session", autouse=True) def start_localstack(): subprocess.check_output(["localstack", "start", "-d"]) + subprocess.check_output(["localstack", "wait"]) yield diff --git a/tests/test_apply.py b/tests/test_apply.py index 0dc2865..a37584a 100644 --- a/tests/test_apply.py +++ b/tests/test_apply.py @@ -205,6 +205,47 @@ def test_s3_backend(): assert result["ResponseMetadata"]["HTTPStatusCode"] == 200 +def test_s3_backend_with_multiple_backends_in_files(): + state_bucket = f"tf-state-{short_uid()}" + bucket_name = f"bucket-{short_uid()}" + config_main = """ + terraform { + backend "s3" { + bucket = "%s" + key = "terraform.tfstate" + region = "us-east-2" + skip_credentials_validation = true + } + } + resource "aws_s3_bucket" "test-bucket" { + bucket = "%s" + } + """ % (state_bucket, bucket_name) + + config_other_file = """ + terraform {} + """ + + # we manually need to add different files and create them to verify the fix + with tempfile.TemporaryDirectory(delete=True) as temp_dir: + with ( + open(os.path.join(temp_dir, "test.tf"), "w") as f, + open(os.path.join(temp_dir, "test_2.tf"), "w") as f2, + ): + f.write(config_main) + f2.write(config_other_file) + + kwargs = {"cwd": temp_dir, "env": dict(os.environ)} + run([TFLOCAL_BIN, "init"], **kwargs) + run([TFLOCAL_BIN, "apply", "-auto-approve"], **kwargs) + + # assert that bucket with state file exists + s3 = client("s3", region_name="us-east-2") + result = s3.list_objects(Bucket=state_bucket) + keys = [obj["Key"] for obj in result["Contents"]] + assert "terraform.tfstate" in keys + + def test_s3_backend_state_lock_default(): state_bucket = f"tf-state-{short_uid()}" bucket_name = f"bucket-{short_uid()}" @@ -242,6 +283,113 @@ def test_s3_backend_state_lock_default(): assert result["ResponseMetadata"]["HTTPStatusCode"] == 200 +def test_s3_remote_data_source(): + state_bucket = f"tf-data-source-{short_uid()}" + bucket_name = f"bucket-{short_uid()}" + object_key = f"obj-{short_uid()}" + config = """ + terraform { + backend "s3" { + bucket = "%s" + key = "terraform.tfstate" + region = "us-east-1" + skip_credentials_validation = true + } + } + + resource "aws_s3_bucket" "test_bucket" { + bucket = "%s" + } + + output "bucket_name" { + value = aws_s3_bucket.test_bucket.bucket + } + """ % (state_bucket, bucket_name) + deploy_tf_script(config) + + # assert that bucket with state file exists + s3 = client("s3", region_name="us-east-2") + result = s3.list_objects(Bucket=state_bucket) + keys = [obj["Key"] for obj in result["Contents"]] + assert "terraform.tfstate" in keys + + # assert that S3 resource has been created + s3 = client("s3") + result = s3.head_bucket(Bucket=bucket_name) + assert result["ResponseMetadata"]["HTTPStatusCode"] == 200 + + # now deploy the part that needs the S3 remote state + config += """ + data "terraform_remote_state" "remote_data" { + backend = "s3" + + config = { + bucket = "%s" + key = "terraform.tfstate" + region = "us-east-1" + } + } + + resource "aws_s3_object" "foo" { + bucket = data.terraform_remote_state.remote_data.outputs.bucket_name + key = "%s" + content = "test" + } + + """ % (state_bucket, object_key) + deploy_tf_script(config) + + get_obj = s3.get_object(Bucket=bucket_name, Key=object_key) + assert get_obj["Body"].read() == b"test" + + +def test_s3_remote_data_source_with_workspace(monkeypatch): + monkeypatch.setenv("DRY_RUN", "1") + state_bucket = f"tf-data-source-{short_uid()}" + config = """ + terraform { + backend "s3" { + bucket = "%s" + key = "terraform.tfstate" + region = "us-east-1" + skip_credentials_validation = true + } + } + + data "terraform_remote_state" "terraform_infra" { + backend = "s3" + workspace = terraform.workspace + + config = { + bucket = "" + workspace_key_prefix = "terraform-infrastructure/place" + key = "terraform.tfstate" + } + } + + data "terraform_remote_state" "build_infra" { + backend = "s3" + workspace = "build" + + config = { + bucket = "" + workspace_key_prefix = "terraform-infrastructure" + key = "terraform.tfstate" + } + } + + """.replace("", state_bucket) + + temp_dir = deploy_tf_script(config, cleanup=False, user_input="yes") + override_file = os.path.join(temp_dir, "localstack_providers_override.tf") + assert check_override_file_exists(override_file) + + with open(override_file, "r") as fp: + result = hcl2.load(fp) + assert result["data"][0]["terraform_remote_state"]["terraform_infra"]["workspace"] == "${terraform.workspace}" + assert result["data"][1]["terraform_remote_state"]["build_infra"]["workspace"] == "build" + + def test_dry_run(monkeypatch): monkeypatch.setenv("DRY_RUN", "1") state_bucket = "tf-state-dry-run" @@ -269,7 +417,7 @@ def test_dry_run(monkeypatch): override_file = os.path.join(temp_dir, "localstack_providers_override.tf") assert check_override_file_exists(override_file) - assert check_override_file_backend_endpoints_content(override_file, is_legacy=is_legacy_tf) + check_override_file_backend_endpoints_content(override_file, is_legacy=is_legacy_tf) # assert that bucket with state file exists s3 = client("s3", region_name="us-east-2") @@ -400,7 +548,7 @@ def test_s3_backend_endpoints_merge(monkeypatch, endpoints: str): temp_dir = deploy_tf_script(config, cleanup=False, user_input="yes") override_file = os.path.join(temp_dir, "localstack_providers_override.tf") assert check_override_file_exists(override_file) - assert check_override_file_backend_endpoints_content(override_file, is_legacy=is_legacy_tf) + check_override_file_backend_endpoints_content(override_file, is_legacy=is_legacy_tf) rmtree(temp_dir) @@ -409,19 +557,19 @@ def check_override_file_exists(override_file): def check_override_file_backend_endpoints_content(override_file, is_legacy: bool = False): - legacy_options = ( + legacy_options = { "endpoint", "iam_endpoint", "dynamodb_endpoint", "sts_endpoint", - ) - new_options = ( + } + new_options = { "iam", "dynamodb", "s3", "sso", "sts", - ) + } try: with open(override_file, "r") as fp: result = hcl2.load(fp) @@ -429,14 +577,14 @@ def check_override_file_backend_endpoints_content(override_file, is_legacy: bool except Exception as e: print(f'Unable to parse "{override_file}" as HCL file: {e}') - new_options_check = "endpoints" in result and all(map(lambda x: x in result.get("endpoints"), new_options)) - if is_legacy: - legacy_options_check = all(map(lambda x: x in result, legacy_options)) - return not new_options_check and legacy_options_check + assert "endpoints" not in result + assert legacy_options <= set(result) - legacy_options_check = any(map(lambda x: x in result, legacy_options)) - return new_options_check and not legacy_options_check + else: + assert "endpoints" in result + assert new_options <= set(result["endpoints"]) + assert not legacy_options & set(result) def test_provider_aliases_ignored(monkeypatch):