From c65904cff8a595420596ac54b8beb1dbbd9b893e Mon Sep 17 00:00:00 2001 From: Jessie Moss Date: Thu, 15 May 2025 19:22:36 -0700 Subject: [PATCH 01/16] Add support for use_lockfile for s3 backend --- bin/tflocal | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/bin/tflocal b/bin/tflocal index 84db893..9642634 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -55,7 +55,8 @@ LOCALSTACK_HOSTNAME = ( or os.environ.get("LOCALSTACK_HOSTNAME") or "localhost" ) -EDGE_PORT = int(urlparse(AWS_ENDPOINT_URL).port or os.environ.get("EDGE_PORT") or 4566) +EDGE_PORT = int( + urlparse(AWS_ENDPOINT_URL).port or os.environ.get("EDGE_PORT") or 4566) TF_VERSION: Optional[version.Version] = None TF_PROVIDER_CONFIG = """ provider "aws" { @@ -186,7 +187,8 @@ def create_provider_config_file(provider_file_path: str, provider_aliases=None) for provider in provider_aliases: provider_config = TF_PROVIDER_CONFIG.replace( "", - get_access_key(provider) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY, + get_access_key( + provider) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY, ) endpoints = "\n".join( [f' {s} = "{get_service_endpoint(s)}"' for s in services] @@ -288,7 +290,7 @@ def generate_s3_backend_config() -> str: # note: default values, updated by `backend_config` further below... "bucket": "tf-test-state", "key": "terraform.tfstate", - "dynamodb_table": "tf-test-state", + "use_lockfile": True, "region": get_region(), "skip_credentials_validation": True, "skip_metadata_api_check": True, @@ -332,12 +334,16 @@ def generate_s3_backend_config() -> str: for k, v in configs["endpoints"].items() } backend_config["access_key"] = ( - get_access_key(backend_config) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY + get_access_key( + backend_config) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY ) configs.update(backend_config) if not DRY_RUN: get_or_create_bucket(configs["bucket"]) - get_or_create_ddb_table(configs["dynamodb_table"], region=configs["region"]) + if "dynamodb_table" in configs: + del configs["use_lockfile"] + get_or_create_ddb_table( + configs["dynamodb_table"], region=configs["region"]) result = TF_S3_BACKEND_CONFIG config_options = "" for key, value in sorted(configs.items()): @@ -491,7 +497,8 @@ def get_or_create_bucket(bucket_name: str): region = s3_client.meta.region_name kwargs = {} if region != "us-east-1": - kwargs = {"CreateBucketConfiguration": {"LocationConstraint": region}} + kwargs = {"CreateBucketConfiguration": { + "LocationConstraint": region}} return s3_client.create_bucket(Bucket=bucket_name, **kwargs) @@ -505,7 +512,8 @@ def get_or_create_ddb_table(table_name: str, region: str = None): TableName=table_name, BillingMode="PAY_PER_REQUEST", KeySchema=[{"AttributeName": "LockID", "KeyType": "HASH"}], - AttributeDefinitions=[{"AttributeName": "LockID", "AttributeType": "S"}], + AttributeDefinitions=[ + {"AttributeName": "LockID", "AttributeType": "S"}], ) @@ -615,7 +623,8 @@ def main(): if not TF_VERSION: raise ValueError except (FileNotFoundError, ValueError) as e: - print(f"Unable to determine version. See error message for details: {e}") + print( + f"Unable to determine version. See error message for details: {e}") exit(1) config_override_files = [] From c6b256c53995a5bf31f3ec7f6ae8d184ab87d1ff Mon Sep 17 00:00:00 2001 From: Jessie Moss Date: Thu, 15 May 2025 19:28:42 -0700 Subject: [PATCH 02/16] Fix overzealous linter --- bin/tflocal | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/bin/tflocal b/bin/tflocal index 9642634..28becd0 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -55,8 +55,7 @@ LOCALSTACK_HOSTNAME = ( or os.environ.get("LOCALSTACK_HOSTNAME") or "localhost" ) -EDGE_PORT = int( - urlparse(AWS_ENDPOINT_URL).port or os.environ.get("EDGE_PORT") or 4566) +EDGE_PORT = int(urlparse(AWS_ENDPOINT_URL).port or os.environ.get("EDGE_PORT") or 4566) TF_VERSION: Optional[version.Version] = None TF_PROVIDER_CONFIG = """ provider "aws" { @@ -187,8 +186,7 @@ def create_provider_config_file(provider_file_path: str, provider_aliases=None) for provider in provider_aliases: provider_config = TF_PROVIDER_CONFIG.replace( "", - get_access_key( - provider) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY, + get_access_key(provider) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY, ) endpoints = "\n".join( [f' {s} = "{get_service_endpoint(s)}"' for s in services] @@ -334,8 +332,7 @@ def generate_s3_backend_config() -> str: for k, v in configs["endpoints"].items() } backend_config["access_key"] = ( - get_access_key( - backend_config) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY + get_access_key(backend_config) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY ) configs.update(backend_config) if not DRY_RUN: @@ -497,8 +494,7 @@ def get_or_create_bucket(bucket_name: str): region = s3_client.meta.region_name kwargs = {} if region != "us-east-1": - kwargs = {"CreateBucketConfiguration": { - "LocationConstraint": region}} + kwargs = {"CreateBucketConfiguration": {"LocationConstraint": region}} return s3_client.create_bucket(Bucket=bucket_name, **kwargs) @@ -512,8 +508,7 @@ def get_or_create_ddb_table(table_name: str, region: str = None): TableName=table_name, BillingMode="PAY_PER_REQUEST", KeySchema=[{"AttributeName": "LockID", "KeyType": "HASH"}], - AttributeDefinitions=[ - {"AttributeName": "LockID", "AttributeType": "S"}], + AttributeDefinitions=[{"AttributeName": "LockID", "AttributeType": "S"}], ) @@ -623,8 +618,7 @@ def main(): if not TF_VERSION: raise ValueError except (FileNotFoundError, ValueError) as e: - print( - f"Unable to determine version. See error message for details: {e}") + print(f"Unable to determine version. See error message for details: {e}") exit(1) config_override_files = [] From 758d48b231b323cbb5eb0c69b553fc501d5d2915 Mon Sep 17 00:00:00 2001 From: Jessie Moss Date: Fri, 16 May 2025 08:03:58 -0700 Subject: [PATCH 03/16] Removing default locking config --- bin/tflocal | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/bin/tflocal b/bin/tflocal index 28becd0..588b463 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -288,7 +288,6 @@ def generate_s3_backend_config() -> str: # note: default values, updated by `backend_config` further below... "bucket": "tf-test-state", "key": "terraform.tfstate", - "use_lockfile": True, "region": get_region(), "skip_credentials_validation": True, "skip_metadata_api_check": True, @@ -338,9 +337,7 @@ def generate_s3_backend_config() -> str: if not DRY_RUN: get_or_create_bucket(configs["bucket"]) if "dynamodb_table" in configs: - del configs["use_lockfile"] - get_or_create_ddb_table( - configs["dynamodb_table"], region=configs["region"]) + get_or_create_ddb_table(configs["dynamodb_table"], region=configs["region"]) result = TF_S3_BACKEND_CONFIG config_options = "" for key, value in sorted(configs.items()): From 1da51437101b706cf090e60b122b912f164177bd Mon Sep 17 00:00:00 2001 From: Benjamin Simon Date: Fri, 16 May 2025 17:53:59 +0200 Subject: [PATCH 04/16] bump version --- README.md | 1 + setup.cfg | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 30efc33..8543059 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,7 @@ ADDITIONAL_TF_OVERRIDE_LOCATIONS=/path/to/module1,path/to/module2 tflocal plan ## Change Log +* v0.22.0: Fix S3 backend forcing DynamoDB State Lock to be enabled by default * v0.21.0: Add ability to drop an override file in additional locations * v0.20.1: Fix list config rendering * v0.20.0: Fix S3 backend option merging diff --git a/setup.cfg b/setup.cfg index 03d4c11..a568975 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = terraform-local -version = 0.21.0 +version = 0.22.0 url = https://github.com/localstack/terraform-local author = LocalStack Team author_email = info@localstack.cloud From 8626b2666ed1dad6a9897eb9837c88b037b6ffde Mon Sep 17 00:00:00 2001 From: Benjamin Simon Date: Fri, 16 May 2025 18:04:44 +0200 Subject: [PATCH 05/16] add test for no state lock --- tests/test_apply.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/tests/test_apply.py b/tests/test_apply.py index afe292b..0dc2865 100644 --- a/tests/test_apply.py +++ b/tests/test_apply.py @@ -205,6 +205,43 @@ def test_s3_backend(): assert result["ResponseMetadata"]["HTTPStatusCode"] == 200 +def test_s3_backend_state_lock_default(): + state_bucket = f"tf-state-{short_uid()}" + bucket_name = f"bucket-{short_uid()}" + state_region = "us-west-1" + dynamodb_client = client("dynamodb", region_name=state_region) + table_amount = len(dynamodb_client.list_tables()["TableNames"]) + config = """ + terraform { + backend "s3" { + bucket = "%s" + key = "terraform.tfstate" + region = "%s" + skip_credentials_validation = true + } + } + resource "aws_s3_bucket" "test-bucket" { + bucket = "%s" + } + """ % (state_bucket, state_region, bucket_name) + deploy_tf_script(config) + + # assert that bucket with state file exists + s3 = client("s3", region_name=state_region) + result = s3.list_objects(Bucket=state_bucket) + keys = [obj["Key"] for obj in result["Contents"]] + assert "terraform.tfstate" in keys + + # assert that DynamoDB table with state file locks has not been created by default + result = dynamodb_client.list_tables() + assert len(result["TableNames"]) == table_amount + + # assert that S3 resource has been created + s3 = client("s3") + result = s3.head_bucket(Bucket=bucket_name) + assert result["ResponseMetadata"]["HTTPStatusCode"] == 200 + + def test_dry_run(monkeypatch): monkeypatch.setenv("DRY_RUN", "1") state_bucket = "tf-state-dry-run" From e6b43e1a8e10d8022ad37df63f54476ec3a6e467 Mon Sep 17 00:00:00 2001 From: hubcaps Date: Sun, 18 May 2025 08:51:05 -0700 Subject: [PATCH 06/16] First iteration of remote state endpoint injection --- bin/tflocal | 65 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/bin/tflocal b/bin/tflocal index 588b463..68ad61f 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -75,6 +75,13 @@ terraform { } } """ +TF_REMOTE_STATE_CONFIG = """ +data "terraform_remote_state" "" { + backend = "s3" + config = { + } +} +""" PROCESS = None # some services have aliases which are mutually exclusive to each other # see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/guides/custom-service-endpoints#available-endpoint-customizations @@ -215,6 +222,9 @@ def create_provider_config_file(provider_file_path: str, provider_aliases=None) # create s3 backend config tf_config += generate_s3_backend_config() + # create remote state config + tf_config += generate_remote_state_config() + # write temporary config file write_provider_config_file(provider_file_path, tf_config) @@ -370,6 +380,61 @@ def generate_s3_backend_config() -> str: result = result.replace("", config_options) return result +def generate_remote_state_config() -> str: + """ + Generate configuration for terraform_remote_state data sources to use LocalStack endpoints. + Similar to generate_s3_backend_config but for terraform_remote_state blocks. + """ + tf_files = parse_tf_files() + if not tf_files: + return "" + + result = "" + for filename, obj in tf_files.items(): + if LS_PROVIDERS_FILE == filename: + continue + + data_blocks = ensure_list(obj.get("data", [])) + for data_block in data_blocks: + terraform_remote_state = data_block.get("terraform_remote_state") + if not terraform_remote_state: + continue + + for data_name, data_config in terraform_remote_state.items(): + if data_config.get("backend") != "s3": + continue + + # Create override for S3 remote state + config_attrs = data_config.get("config", {}) + if not config_attrs: + continue + + # Set up default configs + configs = { + "bucket": config_attrs.get("bucket", "tf-test-state"), + "key": config_attrs.get("key", "terraform.tfstate"), + "region": config_attrs.get("region", get_region()), + "endpoint": get_service_endpoint("s3"), + } + + # Update with user-provided configs + configs.update(config_attrs) + + # Generate config string + config_options = "" + for key, value in sorted(configs.items()): + if isinstance(value, bool): + value = str(value).lower() + elif isinstance(value, (str, int, float)): + value = f'"{value}"' + config_options += f'\n {key} = {value}' + + # Create the final config + remote_state_config = TF_REMOTE_STATE_CONFIG.replace("", data_name) + remote_state_config = remote_state_config.replace("", config_options) + result += remote_state_config + + return result def check_override_file(providers_file: str) -> None: """Checks override file existance""" From 15a6029ce9c95436be122113dfee3814cef935c2 Mon Sep 17 00:00:00 2001 From: Jessie Moss Date: Thu, 15 May 2025 19:22:36 -0700 Subject: [PATCH 07/16] Add support for use_lockfile for s3 backend --- bin/tflocal | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/bin/tflocal b/bin/tflocal index 68ad61f..33ff936 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -55,7 +55,8 @@ LOCALSTACK_HOSTNAME = ( or os.environ.get("LOCALSTACK_HOSTNAME") or "localhost" ) -EDGE_PORT = int(urlparse(AWS_ENDPOINT_URL).port or os.environ.get("EDGE_PORT") or 4566) +EDGE_PORT = int( + urlparse(AWS_ENDPOINT_URL).port or os.environ.get("EDGE_PORT") or 4566) TF_VERSION: Optional[version.Version] = None TF_PROVIDER_CONFIG = """ provider "aws" { @@ -193,7 +194,8 @@ def create_provider_config_file(provider_file_path: str, provider_aliases=None) for provider in provider_aliases: provider_config = TF_PROVIDER_CONFIG.replace( "", - get_access_key(provider) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY, + get_access_key( + provider) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY, ) endpoints = "\n".join( [f' {s} = "{get_service_endpoint(s)}"' for s in services] @@ -341,13 +343,15 @@ def generate_s3_backend_config() -> str: for k, v in configs["endpoints"].items() } backend_config["access_key"] = ( - get_access_key(backend_config) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY + get_access_key( + backend_config) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY ) configs.update(backend_config) if not DRY_RUN: get_or_create_bucket(configs["bucket"]) if "dynamodb_table" in configs: - get_or_create_ddb_table(configs["dynamodb_table"], region=configs["region"]) + get_or_create_ddb_table( + configs["dynamodb_table"], region=configs["region"]) result = TF_S3_BACKEND_CONFIG config_options = "" for key, value in sorted(configs.items()): @@ -556,7 +560,8 @@ def get_or_create_bucket(bucket_name: str): region = s3_client.meta.region_name kwargs = {} if region != "us-east-1": - kwargs = {"CreateBucketConfiguration": {"LocationConstraint": region}} + kwargs = {"CreateBucketConfiguration": { + "LocationConstraint": region}} return s3_client.create_bucket(Bucket=bucket_name, **kwargs) @@ -570,7 +575,8 @@ def get_or_create_ddb_table(table_name: str, region: str = None): TableName=table_name, BillingMode="PAY_PER_REQUEST", KeySchema=[{"AttributeName": "LockID", "KeyType": "HASH"}], - AttributeDefinitions=[{"AttributeName": "LockID", "AttributeType": "S"}], + AttributeDefinitions=[ + {"AttributeName": "LockID", "AttributeType": "S"}], ) @@ -680,7 +686,8 @@ def main(): if not TF_VERSION: raise ValueError except (FileNotFoundError, ValueError) as e: - print(f"Unable to determine version. See error message for details: {e}") + print( + f"Unable to determine version. See error message for details: {e}") exit(1) config_override_files = [] From fbb5dc670a8dec37d9f62d5203e7fe0acc264173 Mon Sep 17 00:00:00 2001 From: Jessie Moss Date: Fri, 16 May 2025 08:03:58 -0700 Subject: [PATCH 08/16] Removing default locking config --- bin/tflocal | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/tflocal b/bin/tflocal index 33ff936..4f6256e 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -350,8 +350,7 @@ def generate_s3_backend_config() -> str: if not DRY_RUN: get_or_create_bucket(configs["bucket"]) if "dynamodb_table" in configs: - get_or_create_ddb_table( - configs["dynamodb_table"], region=configs["region"]) + get_or_create_ddb_table(configs["dynamodb_table"], region=configs["region"]) result = TF_S3_BACKEND_CONFIG config_options = "" for key, value in sorted(configs.items()): From d69ae045e4551dc1e189b72ded1b2181761ebbbc Mon Sep 17 00:00:00 2001 From: Jessie Moss Date: Mon, 19 May 2025 19:26:53 -0700 Subject: [PATCH 09/16] Bugfixes for remote_state blocks --- bin/tflocal | 126 ++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 98 insertions(+), 28 deletions(-) diff --git a/bin/tflocal b/bin/tflocal index 4f6256e..4e9dbb1 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -55,8 +55,7 @@ LOCALSTACK_HOSTNAME = ( or os.environ.get("LOCALSTACK_HOSTNAME") or "localhost" ) -EDGE_PORT = int( - urlparse(AWS_ENDPOINT_URL).port or os.environ.get("EDGE_PORT") or 4566) +EDGE_PORT = int(urlparse(AWS_ENDPOINT_URL).port or os.environ.get("EDGE_PORT") or 4566) TF_VERSION: Optional[version.Version] = None TF_PROVIDER_CONFIG = """ provider "aws" { @@ -194,8 +193,7 @@ def create_provider_config_file(provider_file_path: str, provider_aliases=None) for provider in provider_aliases: provider_config = TF_PROVIDER_CONFIG.replace( "", - get_access_key( - provider) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY, + get_access_key(provider) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY, ) endpoints = "\n".join( [f' {s} = "{get_service_endpoint(s)}"' for s in services] @@ -343,8 +341,7 @@ def generate_s3_backend_config() -> str: for k, v in configs["endpoints"].items() } backend_config["access_key"] = ( - get_access_key( - backend_config) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY + get_access_key(backend_config) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY ) configs.update(backend_config) if not DRY_RUN: @@ -383,62 +380,138 @@ def generate_s3_backend_config() -> str: result = result.replace("", config_options) return result + def generate_remote_state_config() -> str: """ Generate configuration for terraform_remote_state data sources to use LocalStack endpoints. Similar to generate_s3_backend_config but for terraform_remote_state blocks. """ + + is_tf_legacy = TF_VERSION < version.Version("1.6") tf_files = parse_tf_files() - if not tf_files: - return "" + + legacy_endpoint_mappings = { + "endpoint": "s3", + "iam_endpoint": "iam", + "sts_endpoint": "sts", + "dynamodb_endpoint": "dynamodb", + } result = "" for filename, obj in tf_files.items(): if LS_PROVIDERS_FILE == filename: continue - data_blocks = ensure_list(obj.get("data", [])) for data_block in data_blocks: terraform_remote_state = data_block.get("terraform_remote_state") if not terraform_remote_state: continue - for data_name, data_config in terraform_remote_state.items(): if data_config.get("backend") != "s3": continue - # Create override for S3 remote state config_attrs = data_config.get("config", {}) if not config_attrs: continue - + # Merge in legacy endpoint configs if not existing already + if is_tf_legacy and config_attrs.get("endpoints"): + print( + "Warning: Unsupported backend option(s) detected (`endpoints`). Please make sure you always use the corresponding options to your Terraform version." + ) + exit(1) + for legacy_endpoint, endpoint in legacy_endpoint_mappings.items(): + if ( + legacy_endpoint in config_attrs + and config_attrs.get("endpoints") + and endpoint in config_attrs["endpoints"] + ): + del config_attrs[legacy_endpoint] + continue + if legacy_endpoint in config_attrs and ( + not config_attrs.get("endpoints") + or endpoint not in config_attrs["endpoints"] + ): + if not config_attrs.get("endpoints"): + config_attrs["endpoints"] = {} + config_attrs["endpoints"].update( + {endpoint: config_attrs[legacy_endpoint]} + ) + del config_attrs[legacy_endpoint] + # Set up default configs configs = { "bucket": config_attrs.get("bucket", "tf-test-state"), "key": config_attrs.get("key", "terraform.tfstate"), "region": config_attrs.get("region", get_region()), - "endpoint": get_service_endpoint("s3"), + "endpoints": { + "s3": get_service_endpoint("s3"), + "iam": get_service_endpoint("iam"), + "sso": get_service_endpoint("sso"), + "sts": get_service_endpoint("sts"), + }, } - + + # Add any missing default endpoints + if config_attrs.get("endpoints"): + config_attrs["endpoints"] = { + k: config_attrs["endpoints"].get(k) or v + for k, v in configs["endpoints"].items() + } + # Update with user-provided configs configs.update(config_attrs) - + # Generate config string config_options = "" + # for key, value in sorted(configs.items()): + # if isinstance(value, bool): + # value = str(value).lower() + # elif isinstance(value, (str, int, float)): + # value = f'"{value}"' + # config_options += f"\n {key} = {value}" for key, value in sorted(configs.items()): if isinstance(value, bool): value = str(value).lower() - elif isinstance(value, (str, int, float)): - value = f'"{value}"' - config_options += f'\n {key} = {value}' - + elif isinstance(value, dict): + if key == "endpoints" and is_tf_legacy: + for ( + legacy_endpoint, + endpoint, + ) in legacy_endpoint_mappings.items(): + config_options += f'\n {legacy_endpoint} = "{configs[key][endpoint]}"' + continue + else: + value = textwrap.indent( + text=f"{key} = {{\n" + + "\n".join( + [f' {k} = "{v}"' for k, v in value.items()] + ) + + "\n}", + prefix=" " * 4, + ) + config_options += f"\n{value}" + continue + elif isinstance(value, list): + # TODO this will break if it's a list of dicts or other complex object + # this serialization logic should probably be moved to a separate recursive function + as_string = [f'"{item}"' for item in value] + value = f"[{', '.join(as_string)}]" + else: + value = f'"{str(value)}"' + config_options += f"\n {key} = {value}" + # Create the final config - remote_state_config = TF_REMOTE_STATE_CONFIG.replace("", data_name) - remote_state_config = remote_state_config.replace("", config_options) + remote_state_config = TF_REMOTE_STATE_CONFIG.replace( + "", data_name + ) + remote_state_config = remote_state_config.replace( + "", config_options + ) result += remote_state_config - + return result + def check_override_file(providers_file: str) -> None: """Checks override file existance""" if os.path.exists(providers_file): @@ -559,8 +632,7 @@ def get_or_create_bucket(bucket_name: str): region = s3_client.meta.region_name kwargs = {} if region != "us-east-1": - kwargs = {"CreateBucketConfiguration": { - "LocationConstraint": region}} + kwargs = {"CreateBucketConfiguration": {"LocationConstraint": region}} return s3_client.create_bucket(Bucket=bucket_name, **kwargs) @@ -574,8 +646,7 @@ def get_or_create_ddb_table(table_name: str, region: str = None): TableName=table_name, BillingMode="PAY_PER_REQUEST", KeySchema=[{"AttributeName": "LockID", "KeyType": "HASH"}], - AttributeDefinitions=[ - {"AttributeName": "LockID", "AttributeType": "S"}], + AttributeDefinitions=[{"AttributeName": "LockID", "AttributeType": "S"}], ) @@ -685,8 +756,7 @@ def main(): if not TF_VERSION: raise ValueError except (FileNotFoundError, ValueError) as e: - print( - f"Unable to determine version. See error message for details: {e}") + print(f"Unable to determine version. See error message for details: {e}") exit(1) config_override_files = [] From 3e5c4d188074d9b64e2ab87388e9c1f0e5788c7e Mon Sep 17 00:00:00 2001 From: Jessie Moss Date: Tue, 20 May 2025 10:44:17 -0700 Subject: [PATCH 10/16] Removing commented logic --- bin/tflocal | 6 ------ 1 file changed, 6 deletions(-) diff --git a/bin/tflocal b/bin/tflocal index 4e9dbb1..c0acd14 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -463,12 +463,6 @@ def generate_remote_state_config() -> str: # Generate config string config_options = "" - # for key, value in sorted(configs.items()): - # if isinstance(value, bool): - # value = str(value).lower() - # elif isinstance(value, (str, int, float)): - # value = f'"{value}"' - # config_options += f"\n {key} = {value}" for key, value in sorted(configs.items()): if isinstance(value, bool): value = str(value).lower() From a2adb2834c83d98b466ecfedc962a58c9a59e27d Mon Sep 17 00:00:00 2001 From: Benjamin Simon Date: Thu, 22 May 2025 00:25:50 +0200 Subject: [PATCH 11/16] refactor code + add tests + fix multiple terraform block for S3 backends --- bin/tflocal | 269 +++++++++++++++++--------------------------- tests/conftest.py | 1 + tests/test_apply.py | 101 +++++++++++++++++ 3 files changed, 205 insertions(+), 166 deletions(-) diff --git a/bin/tflocal b/bin/tflocal index c0acd14..3a792dd 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -18,7 +18,7 @@ import textwrap from packaging import version from urllib.parse import urlparse -from typing import Iterable, Optional +from typing import Iterable, Optional, Dict PARENT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(__file__), "..")) if os.path.isdir(os.path.join(PARENT_FOLDER, ".venv")): @@ -271,30 +271,23 @@ def determine_provider_aliases() -> list: def generate_s3_backend_config() -> str: """Generate an S3 `backend {..}` block with local endpoints, if configured""" - is_tf_legacy = TF_VERSION < version.Version("1.6") - backend_config = None + s3_backend_config = {} tf_files = parse_tf_files() for filename, obj in tf_files.items(): if LS_PROVIDERS_FILE == filename: continue tf_configs = ensure_list(obj.get("terraform", [])) for tf_config in tf_configs: - backend_config = ensure_list(tf_config.get("backend")) - if backend_config: - backend_config = backend_config[0] - break - backend_config = backend_config and backend_config.get("s3") - if not backend_config: - return "" + if tf_config.get("backend"): + backend_config = ensure_list(tf_config.get("backend"))[0] + if backend_config.get("s3"): + s3_backend_config = backend_config["s3"] + break - legacy_endpoint_mappings = { - "endpoint": "s3", - "iam_endpoint": "iam", - "sts_endpoint": "sts", - "dynamodb_endpoint": "dynamodb", - } + if not s3_backend_config: + return "" - configs = { + backend_default_config = { # note: default values, updated by `backend_config` further below... "bucket": "tf-test-state", "key": "terraform.tfstate", @@ -310,6 +303,81 @@ def generate_s3_backend_config() -> str: "dynamodb": get_service_endpoint("dynamodb"), }, } + + config_options = _generate_s3_backend_config(s3_backend_config, backend_default_config) + if not DRY_RUN: + get_or_create_bucket(backend_default_config["bucket"]) + if "dynamodb_table" in backend_default_config: + get_or_create_ddb_table( + backend_default_config["dynamodb_table"], + region=backend_default_config["region"], + ) + + result = TF_S3_BACKEND_CONFIG.replace("", config_options) + return result + + +def generate_remote_state_config() -> str: + """ + Generate configuration for terraform_remote_state data sources to use LocalStack endpoints. + Similar to generate_s3_backend_config but for terraform_remote_state blocks. + """ + + tf_files = parse_tf_files() + + result = "" + for filename, obj in tf_files.items(): + if LS_PROVIDERS_FILE == filename: + continue + data_blocks = ensure_list(obj.get("data", [])) + for data_block in data_blocks: + terraform_remote_state = data_block.get("terraform_remote_state") + if not terraform_remote_state: + continue + for data_name, data_config in terraform_remote_state.items(): + if data_config.get("backend") != "s3": + continue + # Create override for S3 remote state + backend_config = data_config.get("config", {}) + if not backend_config: + continue + + # Set up default configs + remote_state_default_config = { + "bucket": "tf-test-state", + "key": "terraform.tfstate", + "region": get_region(), + "skip_credentials_validation": True, + "skip_metadata_api_check": True, + "secret_key": "test", + "endpoints": { + "s3": get_service_endpoint("s3"), + "iam": get_service_endpoint("iam"), + "sso": get_service_endpoint("sso"), + "sts": get_service_endpoint("sts"), + }, + } + + config_options = _generate_s3_backend_config(backend_config, remote_state_default_config) + + # Create the final config + remote_state_config = TF_REMOTE_STATE_CONFIG.replace( + "", data_name + ).replace("", config_options) + result += remote_state_config + + return result + + +def _generate_s3_backend_config(backend_config: Dict, default_config: Dict) -> str: + is_tf_legacy = TF_VERSION < version.Version("1.6") + legacy_endpoint_mappings = { + "endpoint": "s3", + "iam_endpoint": "iam", + "sts_endpoint": "sts", + "dynamodb_endpoint": "dynamodb", + } + # Merge in legacy endpoint configs if not existing already if is_tf_legacy and backend_config.get("endpoints"): print( @@ -318,15 +386,15 @@ def generate_s3_backend_config() -> str: exit(1) for legacy_endpoint, endpoint in legacy_endpoint_mappings.items(): if ( - legacy_endpoint in backend_config - and backend_config.get("endpoints") - and endpoint in backend_config["endpoints"] + legacy_endpoint in backend_config + and backend_config.get("endpoints") + and endpoint in backend_config["endpoints"] ): del backend_config[legacy_endpoint] continue if legacy_endpoint in backend_config and ( - not backend_config.get("endpoints") - or endpoint not in backend_config["endpoints"] + not backend_config.get("endpoints") + or endpoint not in backend_config["endpoints"] ): if not backend_config.get("endpoints"): backend_config["endpoints"] = {} @@ -334,37 +402,31 @@ def generate_s3_backend_config() -> str: {endpoint: backend_config[legacy_endpoint]} ) del backend_config[legacy_endpoint] + # Add any missing default endpoints if backend_config.get("endpoints"): - backend_config["endpoints"] = { - k: backend_config["endpoints"].get(k) or v - for k, v in configs["endpoints"].items() - } + default_config["endpoints"].update(backend_config["endpoints"]) + backend_config["access_key"] = ( get_access_key(backend_config) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY ) - configs.update(backend_config) - if not DRY_RUN: - get_or_create_bucket(configs["bucket"]) - if "dynamodb_table" in configs: - get_or_create_ddb_table(configs["dynamodb_table"], region=configs["region"]) - result = TF_S3_BACKEND_CONFIG + + # Update with user-provided configs + default_config.update(backend_config) + # Generate config string config_options = "" - for key, value in sorted(configs.items()): + for key, value in sorted(default_config.items()): if isinstance(value, bool): value = str(value).lower() elif isinstance(value, dict): if key == "endpoints" and is_tf_legacy: for legacy_endpoint, endpoint in legacy_endpoint_mappings.items(): - config_options += ( - f'\n {legacy_endpoint} = "{configs[key][endpoint]}"' - ) + config_options += f'\n {legacy_endpoint} = "{default_config[key][endpoint]}"' continue else: + joined_values = "\n".join([f' {k} = "{v}"' for k, v in value.items()]) value = textwrap.indent( - text=f"{key} = {{\n" - + "\n".join([f' {k} = "{v}"' for k, v in value.items()]) - + "\n}", + text=f"{key} = {{\n{joined_values}\n}}", prefix=" " * 4, ) config_options += f"\n{value}" @@ -377,137 +439,12 @@ def generate_s3_backend_config() -> str: else: value = f'"{str(value)}"' config_options += f"\n {key} = {value}" - result = result.replace("", config_options) - return result - -def generate_remote_state_config() -> str: - """ - Generate configuration for terraform_remote_state data sources to use LocalStack endpoints. - Similar to generate_s3_backend_config but for terraform_remote_state blocks. - """ - - is_tf_legacy = TF_VERSION < version.Version("1.6") - tf_files = parse_tf_files() - - legacy_endpoint_mappings = { - "endpoint": "s3", - "iam_endpoint": "iam", - "sts_endpoint": "sts", - "dynamodb_endpoint": "dynamodb", - } - - result = "" - for filename, obj in tf_files.items(): - if LS_PROVIDERS_FILE == filename: - continue - data_blocks = ensure_list(obj.get("data", [])) - for data_block in data_blocks: - terraform_remote_state = data_block.get("terraform_remote_state") - if not terraform_remote_state: - continue - for data_name, data_config in terraform_remote_state.items(): - if data_config.get("backend") != "s3": - continue - # Create override for S3 remote state - config_attrs = data_config.get("config", {}) - if not config_attrs: - continue - # Merge in legacy endpoint configs if not existing already - if is_tf_legacy and config_attrs.get("endpoints"): - print( - "Warning: Unsupported backend option(s) detected (`endpoints`). Please make sure you always use the corresponding options to your Terraform version." - ) - exit(1) - for legacy_endpoint, endpoint in legacy_endpoint_mappings.items(): - if ( - legacy_endpoint in config_attrs - and config_attrs.get("endpoints") - and endpoint in config_attrs["endpoints"] - ): - del config_attrs[legacy_endpoint] - continue - if legacy_endpoint in config_attrs and ( - not config_attrs.get("endpoints") - or endpoint not in config_attrs["endpoints"] - ): - if not config_attrs.get("endpoints"): - config_attrs["endpoints"] = {} - config_attrs["endpoints"].update( - {endpoint: config_attrs[legacy_endpoint]} - ) - del config_attrs[legacy_endpoint] - - # Set up default configs - configs = { - "bucket": config_attrs.get("bucket", "tf-test-state"), - "key": config_attrs.get("key", "terraform.tfstate"), - "region": config_attrs.get("region", get_region()), - "endpoints": { - "s3": get_service_endpoint("s3"), - "iam": get_service_endpoint("iam"), - "sso": get_service_endpoint("sso"), - "sts": get_service_endpoint("sts"), - }, - } - - # Add any missing default endpoints - if config_attrs.get("endpoints"): - config_attrs["endpoints"] = { - k: config_attrs["endpoints"].get(k) or v - for k, v in configs["endpoints"].items() - } - - # Update with user-provided configs - configs.update(config_attrs) - - # Generate config string - config_options = "" - for key, value in sorted(configs.items()): - if isinstance(value, bool): - value = str(value).lower() - elif isinstance(value, dict): - if key == "endpoints" and is_tf_legacy: - for ( - legacy_endpoint, - endpoint, - ) in legacy_endpoint_mappings.items(): - config_options += f'\n {legacy_endpoint} = "{configs[key][endpoint]}"' - continue - else: - value = textwrap.indent( - text=f"{key} = {{\n" - + "\n".join( - [f' {k} = "{v}"' for k, v in value.items()] - ) - + "\n}", - prefix=" " * 4, - ) - config_options += f"\n{value}" - continue - elif isinstance(value, list): - # TODO this will break if it's a list of dicts or other complex object - # this serialization logic should probably be moved to a separate recursive function - as_string = [f'"{item}"' for item in value] - value = f"[{', '.join(as_string)}]" - else: - value = f'"{str(value)}"' - config_options += f"\n {key} = {value}" - - # Create the final config - remote_state_config = TF_REMOTE_STATE_CONFIG.replace( - "", data_name - ) - remote_state_config = remote_state_config.replace( - "", config_options - ) - result += remote_state_config - - return result + return config_options def check_override_file(providers_file: str) -> None: - """Checks override file existance""" + """Checks override file existence""" if os.path.exists(providers_file): msg = f"Providers override file {providers_file} already exists" err_msg = msg + " - please delete it first, exiting..." diff --git a/tests/conftest.py b/tests/conftest.py index 977eb81..5b3eea6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,6 +5,7 @@ @pytest.fixture(scope="session", autouse=True) def start_localstack(): subprocess.check_output(["localstack", "start", "-d"]) + subprocess.check_output(["localstack", "wait"]) yield diff --git a/tests/test_apply.py b/tests/test_apply.py index 0dc2865..b5558d5 100644 --- a/tests/test_apply.py +++ b/tests/test_apply.py @@ -205,6 +205,47 @@ def test_s3_backend(): assert result["ResponseMetadata"]["HTTPStatusCode"] == 200 +def test_s3_backend_with_multiple_backends_in_files(): + state_bucket = f"tf-state-{short_uid()}" + bucket_name = f"bucket-{short_uid()}" + config_main = """ + terraform { + backend "s3" { + bucket = "%s" + key = "terraform.tfstate" + region = "us-east-2" + skip_credentials_validation = true + } + } + resource "aws_s3_bucket" "test-bucket" { + bucket = "%s" + } + """ % (state_bucket, bucket_name) + + config_other_file = """ + terraform {} + """ + + # we manually need to add different files and create them to verify the fix + with tempfile.TemporaryDirectory(delete=True) as temp_dir: + with ( + open(os.path.join(temp_dir, "test.tf"), "w") as f, + open(os.path.join(temp_dir, "test_2.tf"), "w") as f2, + ): + f.write(config_main) + f2.write(config_other_file) + + kwargs = {"cwd": temp_dir, "env": dict(os.environ)} + run([TFLOCAL_BIN, "init"], **kwargs) + run([TFLOCAL_BIN, "apply", "-auto-approve"], **kwargs) + + # assert that bucket with state file exists + s3 = client("s3", region_name="us-east-2") + result = s3.list_objects(Bucket=state_bucket) + keys = [obj["Key"] for obj in result["Contents"]] + assert "terraform.tfstate" in keys + + def test_s3_backend_state_lock_default(): state_bucket = f"tf-state-{short_uid()}" bucket_name = f"bucket-{short_uid()}" @@ -242,6 +283,66 @@ def test_s3_backend_state_lock_default(): assert result["ResponseMetadata"]["HTTPStatusCode"] == 200 +def test_s3_remote_data_source(): + state_bucket = f"tf-data-source-{short_uid()}" + bucket_name = f"bucket-{short_uid()}" + object_key = f"obj-{short_uid()}" + config = """ + terraform { + backend "s3" { + bucket = "%s" + key = "terraform.tfstate" + region = "us-east-1" + skip_credentials_validation = true + } + } + + resource "aws_s3_bucket" "test_bucket" { + bucket = "%s" + } + + output "bucket_name" { + value = aws_s3_bucket.test_bucket.bucket + } + """ % (state_bucket, bucket_name) + deploy_tf_script(config) + + # assert that bucket with state file exists + s3 = client("s3", region_name="us-east-2") + result = s3.list_objects(Bucket=state_bucket) + keys = [obj["Key"] for obj in result["Contents"]] + assert "terraform.tfstate" in keys + + # assert that S3 resource has been created + s3 = client("s3") + result = s3.head_bucket(Bucket=bucket_name) + assert result["ResponseMetadata"]["HTTPStatusCode"] == 200 + + # now deploy the part that needs the S3 remote state + config += """ + data "terraform_remote_state" "remote_data" { + backend = "s3" + + config = { + bucket = "%s" + key = "terraform.tfstate" + region = "us-east-1" + } + } + + resource "aws_s3_object" "foo" { + bucket = data.terraform_remote_state.remote_data.outputs.bucket_name + key = "%s" + content = "test" + } + + """ % (state_bucket, object_key) + deploy_tf_script(config) + + get_obj = s3.get_object(Bucket=bucket_name, Key=object_key) + assert get_obj["Body"].read() == b"test" + + def test_dry_run(monkeypatch): monkeypatch.setenv("DRY_RUN", "1") state_bucket = "tf-state-dry-run" From 83087dc9f6db36c4690bed95bd2f1616cb6736c4 Mon Sep 17 00:00:00 2001 From: Benjamin Simon Date: Thu, 22 May 2025 00:48:10 +0200 Subject: [PATCH 12/16] fix bad change in endpoints merge logic and refactor tests to be more readable when failing --- bin/tflocal | 5 ++++- tests/test_apply.py | 24 ++++++++++++------------ 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/bin/tflocal b/bin/tflocal index 3a792dd..6b06d93 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -405,7 +405,10 @@ def _generate_s3_backend_config(backend_config: Dict, default_config: Dict) -> s # Add any missing default endpoints if backend_config.get("endpoints"): - default_config["endpoints"].update(backend_config["endpoints"]) + backend_config["endpoints"] = { + k: backend_config["endpoints"].get(k) or v + for k, v in default_config["endpoints"].items() + } backend_config["access_key"] = ( get_access_key(backend_config) if CUSTOMIZE_ACCESS_KEY else DEFAULT_ACCESS_KEY diff --git a/tests/test_apply.py b/tests/test_apply.py index b5558d5..31e4772 100644 --- a/tests/test_apply.py +++ b/tests/test_apply.py @@ -370,7 +370,7 @@ def test_dry_run(monkeypatch): override_file = os.path.join(temp_dir, "localstack_providers_override.tf") assert check_override_file_exists(override_file) - assert check_override_file_backend_endpoints_content(override_file, is_legacy=is_legacy_tf) + check_override_file_backend_endpoints_content(override_file, is_legacy=is_legacy_tf) # assert that bucket with state file exists s3 = client("s3", region_name="us-east-2") @@ -501,7 +501,7 @@ def test_s3_backend_endpoints_merge(monkeypatch, endpoints: str): temp_dir = deploy_tf_script(config, cleanup=False, user_input="yes") override_file = os.path.join(temp_dir, "localstack_providers_override.tf") assert check_override_file_exists(override_file) - assert check_override_file_backend_endpoints_content(override_file, is_legacy=is_legacy_tf) + check_override_file_backend_endpoints_content(override_file, is_legacy=is_legacy_tf) rmtree(temp_dir) @@ -510,19 +510,19 @@ def check_override_file_exists(override_file): def check_override_file_backend_endpoints_content(override_file, is_legacy: bool = False): - legacy_options = ( + legacy_options = { "endpoint", "iam_endpoint", "dynamodb_endpoint", "sts_endpoint", - ) - new_options = ( + } + new_options = { "iam", "dynamodb", "s3", "sso", "sts", - ) + } try: with open(override_file, "r") as fp: result = hcl2.load(fp) @@ -530,14 +530,14 @@ def check_override_file_backend_endpoints_content(override_file, is_legacy: bool except Exception as e: print(f'Unable to parse "{override_file}" as HCL file: {e}') - new_options_check = "endpoints" in result and all(map(lambda x: x in result.get("endpoints"), new_options)) - if is_legacy: - legacy_options_check = all(map(lambda x: x in result, legacy_options)) - return not new_options_check and legacy_options_check + assert "endpoints" not in result + assert legacy_options <= set(result) - legacy_options_check = any(map(lambda x: x in result, legacy_options)) - return new_options_check and not legacy_options_check + else: + assert "endpoints" in result + assert new_options <= set(result["endpoints"]) + assert not legacy_options & set(result) def test_provider_aliases_ignored(monkeypatch): From e849f6ce34b6feedcadb622ef53cd3896ae3093e Mon Sep 17 00:00:00 2001 From: Benjamin Simon Date: Thu, 22 May 2025 01:30:50 +0200 Subject: [PATCH 13/16] add support for workspace argument of terraform_remote_state --- bin/tflocal | 13 +++++++++++-- tests/test_apply.py | 47 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/bin/tflocal b/bin/tflocal index 6b06d93..4613233 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -78,6 +78,7 @@ terraform { TF_REMOTE_STATE_CONFIG = """ data "terraform_remote_state" "" { backend = "s3" + config = { } } @@ -324,7 +325,6 @@ def generate_remote_state_config() -> str: """ tf_files = parse_tf_files() - result = "" for filename, obj in tf_files.items(): if LS_PROVIDERS_FILE == filename: @@ -341,6 +341,13 @@ def generate_remote_state_config() -> str: backend_config = data_config.get("config", {}) if not backend_config: continue + workspace = data_config.get("workspace", "") + if workspace: + if workspace[0] == "$": + workspace = workspace.lstrip('${').rstrip('}') + else: + workspace = f'"{workspace}"' + workspace = f"workspace = {workspace}" # Set up default configs remote_state_default_config = { @@ -363,7 +370,9 @@ def generate_remote_state_config() -> str: # Create the final config remote_state_config = TF_REMOTE_STATE_CONFIG.replace( "", data_name - ).replace("", config_options) + ) \ + .replace("", config_options) \ + .replace("", workspace) result += remote_state_config return result diff --git a/tests/test_apply.py b/tests/test_apply.py index 31e4772..a37584a 100644 --- a/tests/test_apply.py +++ b/tests/test_apply.py @@ -343,6 +343,53 @@ def test_s3_remote_data_source(): assert get_obj["Body"].read() == b"test" +def test_s3_remote_data_source_with_workspace(monkeypatch): + monkeypatch.setenv("DRY_RUN", "1") + state_bucket = f"tf-data-source-{short_uid()}" + config = """ + terraform { + backend "s3" { + bucket = "%s" + key = "terraform.tfstate" + region = "us-east-1" + skip_credentials_validation = true + } + } + + data "terraform_remote_state" "terraform_infra" { + backend = "s3" + workspace = terraform.workspace + + config = { + bucket = "" + workspace_key_prefix = "terraform-infrastructure/place" + key = "terraform.tfstate" + } + } + + data "terraform_remote_state" "build_infra" { + backend = "s3" + workspace = "build" + + config = { + bucket = "" + workspace_key_prefix = "terraform-infrastructure" + key = "terraform.tfstate" + } + } + + """.replace("", state_bucket) + + temp_dir = deploy_tf_script(config, cleanup=False, user_input="yes") + override_file = os.path.join(temp_dir, "localstack_providers_override.tf") + assert check_override_file_exists(override_file) + + with open(override_file, "r") as fp: + result = hcl2.load(fp) + assert result["data"][0]["terraform_remote_state"]["terraform_infra"]["workspace"] == "${terraform.workspace}" + assert result["data"][1]["terraform_remote_state"]["build_infra"]["workspace"] == "build" + + def test_dry_run(monkeypatch): monkeypatch.setenv("DRY_RUN", "1") state_bucket = "tf-state-dry-run" From 36c9d6f288d281875a0e343128410a36ca253a46 Mon Sep 17 00:00:00 2001 From: Benjamin Simon Date: Thu, 22 May 2025 01:43:38 +0200 Subject: [PATCH 14/16] refactor S3 generation to support legacy dynamodb for remote state --- bin/tflocal | 70 +++++++++++++++++++++-------------------------------- 1 file changed, 27 insertions(+), 43 deletions(-) diff --git a/bin/tflocal b/bin/tflocal index 4613233..fbe1c97 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -18,7 +18,7 @@ import textwrap from packaging import version from urllib.parse import urlparse -from typing import Iterable, Optional, Dict +from typing import Iterable, Optional, Dict, Tuple PARENT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(__file__), "..")) if os.path.isdir(os.path.join(PARENT_FOLDER, ".venv")): @@ -288,33 +288,16 @@ def generate_s3_backend_config() -> str: if not s3_backend_config: return "" - backend_default_config = { - # note: default values, updated by `backend_config` further below... - "bucket": "tf-test-state", - "key": "terraform.tfstate", - "region": get_region(), - "skip_credentials_validation": True, - "skip_metadata_api_check": True, - "secret_key": "test", - "endpoints": { - "s3": get_service_endpoint("s3"), - "iam": get_service_endpoint("iam"), - "sso": get_service_endpoint("sso"), - "sts": get_service_endpoint("sts"), - "dynamodb": get_service_endpoint("dynamodb"), - }, - } - - config_options = _generate_s3_backend_config(s3_backend_config, backend_default_config) + config_values, config_string = _generate_s3_backend_config(s3_backend_config) if not DRY_RUN: - get_or_create_bucket(backend_default_config["bucket"]) - if "dynamodb_table" in backend_default_config: + get_or_create_bucket(config_values["bucket"]) + if "dynamodb_table" in config_values: get_or_create_ddb_table( - backend_default_config["dynamodb_table"], - region=backend_default_config["region"], + config_values["dynamodb_table"], + region=config_values["region"], ) - result = TF_S3_BACKEND_CONFIG.replace("", config_options) + result = TF_S3_BACKEND_CONFIG.replace("", config_string) return result @@ -349,36 +332,20 @@ def generate_remote_state_config() -> str: workspace = f'"{workspace}"' workspace = f"workspace = {workspace}" - # Set up default configs - remote_state_default_config = { - "bucket": "tf-test-state", - "key": "terraform.tfstate", - "region": get_region(), - "skip_credentials_validation": True, - "skip_metadata_api_check": True, - "secret_key": "test", - "endpoints": { - "s3": get_service_endpoint("s3"), - "iam": get_service_endpoint("iam"), - "sso": get_service_endpoint("sso"), - "sts": get_service_endpoint("sts"), - }, - } - - config_options = _generate_s3_backend_config(backend_config, remote_state_default_config) + _, config_str = _generate_s3_backend_config(backend_config) # Create the final config remote_state_config = TF_REMOTE_STATE_CONFIG.replace( "", data_name ) \ - .replace("", config_options) \ + .replace("", config_str) \ .replace("", workspace) result += remote_state_config return result -def _generate_s3_backend_config(backend_config: Dict, default_config: Dict) -> str: +def _generate_s3_backend_config(backend_config: Dict) -> Tuple[Dict, str]: is_tf_legacy = TF_VERSION < version.Version("1.6") legacy_endpoint_mappings = { "endpoint": "s3", @@ -387,6 +354,23 @@ def _generate_s3_backend_config(backend_config: Dict, default_config: Dict) -> s "dynamodb_endpoint": "dynamodb", } + # Set up default config + default_config = { + "bucket": "tf-test-state", + "key": "terraform.tfstate", + "region": get_region(), + "skip_credentials_validation": True, + "skip_metadata_api_check": True, + "secret_key": "test", + "endpoints": { + "s3": get_service_endpoint("s3"), + "iam": get_service_endpoint("iam"), + "sso": get_service_endpoint("sso"), + "sts": get_service_endpoint("sts"), + "dynamodb": get_service_endpoint("dynamodb"), + }, + } + # Merge in legacy endpoint configs if not existing already if is_tf_legacy and backend_config.get("endpoints"): print( From cc583b62803e8a7b81c560529bab00584729cbad Mon Sep 17 00:00:00 2001 From: Benjamin Simon Date: Thu, 22 May 2025 01:49:54 +0200 Subject: [PATCH 15/16] bump --- README.md | 1 + setup.cfg | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 8543059..614daab 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,7 @@ ADDITIONAL_TF_OVERRIDE_LOCATIONS=/path/to/module1,path/to/module2 tflocal plan ## Change Log +* v0.23.0: Add support for `terraform_remote_state` with `s3` backend to read the state stored in local S3 backend; fix S3 backend config detection with multiple Terraform blocks * v0.22.0: Fix S3 backend forcing DynamoDB State Lock to be enabled by default * v0.21.0: Add ability to drop an override file in additional locations * v0.20.1: Fix list config rendering diff --git a/setup.cfg b/setup.cfg index a568975..dad1bb1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = terraform-local -version = 0.22.0 +version = 0.23.0 url = https://github.com/localstack/terraform-local author = LocalStack Team author_email = info@localstack.cloud From b5280f7d69fbdbd83c8be0929a98f05fc16b3c83 Mon Sep 17 00:00:00 2001 From: Benjamin Simon Date: Thu, 22 May 2025 02:12:09 +0200 Subject: [PATCH 16/16] bump + fix return values --- bin/tflocal | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bin/tflocal b/bin/tflocal index fbe1c97..ac4a386 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -410,14 +410,14 @@ def _generate_s3_backend_config(backend_config: Dict) -> Tuple[Dict, str]: # Update with user-provided configs default_config.update(backend_config) # Generate config string - config_options = "" + config_string = "" for key, value in sorted(default_config.items()): if isinstance(value, bool): value = str(value).lower() elif isinstance(value, dict): if key == "endpoints" and is_tf_legacy: for legacy_endpoint, endpoint in legacy_endpoint_mappings.items(): - config_options += f'\n {legacy_endpoint} = "{default_config[key][endpoint]}"' + config_string += f'\n {legacy_endpoint} = "{default_config[key][endpoint]}"' continue else: joined_values = "\n".join([f' {k} = "{v}"' for k, v in value.items()]) @@ -425,7 +425,7 @@ def _generate_s3_backend_config(backend_config: Dict) -> Tuple[Dict, str]: text=f"{key} = {{\n{joined_values}\n}}", prefix=" " * 4, ) - config_options += f"\n{value}" + config_string += f"\n{value}" continue elif isinstance(value, list): # TODO this will break if it's a list of dicts or other complex object @@ -434,9 +434,9 @@ def _generate_s3_backend_config(backend_config: Dict) -> Tuple[Dict, str]: value = f"[{', '.join(as_string)}]" else: value = f'"{str(value)}"' - config_options += f"\n {key} = {value}" + config_string += f"\n {key} = {value}" - return config_options + return default_config, config_string def check_override_file(providers_file: str) -> None: