Skip to content

Commit c4179d3

Browse files
committed
change to datadog_managed tag and don't include tags
1 parent 9c67779 commit c4179d3

File tree

14 files changed

+125
-125
lines changed

14 files changed

+125
-125
lines changed

aws/alb/main.tf

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ resource "datadog_monitor" "http_5xx_responses" {
1212
count = var.http_5xx_responses_enabled ? 1 : 0
1313

1414
name = join("", [local.title_prefix, "ALB 5xx Responses - {{loadbalancer.name}}", local.title_suffix])
15-
include_tags = true
15+
include_tags = false
1616
message = local.query_alert_base_message
1717
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
1818
type = "query alert"
@@ -27,8 +27,8 @@ resource "datadog_monitor" "http_5xx_responses" {
2727

2828
query = <<END
2929
min(${var.http_5xx_responses_evaluation_window}):
30-
default(avg:aws.applicationelb.httpcode_elb_5xx${local.query_filter} by {aws_account,env,datadog_critical,loadbalancer,region}.as_rate(), 0) / (
31-
default(avg:aws.applicationelb.request_count${local.query_filter} by {aws_account,env,datadog_critical,loadbalancer,region}.as_rate(), 1)
30+
default(avg:aws.applicationelb.httpcode_elb_5xx${local.query_filter} by {aws_account,env,datadog_managed,loadbalancer,region}.as_rate(), 0) / (
31+
default(avg:aws.applicationelb.request_count${local.query_filter} by {aws_account,env,datadog_managed,loadbalancer,region}.as_rate(), 1)
3232
) * 100 > ${var.http_5xx_responses_threshold_critical}
3333
END
3434

@@ -42,7 +42,7 @@ resource "datadog_monitor" "http_5xx_tg_responses" {
4242
count = var.http_5xx_tg_responses_enabled ? 1 : 0
4343

4444
name = join("", [local.title_prefix, "ALB Target Group 5xx Responses - {{loadbalancer.name}}", local.title_suffix])
45-
include_tags = true
45+
include_tags = false
4646
message = local.query_alert_base_message
4747
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
4848
type = "query alert"
@@ -57,8 +57,8 @@ resource "datadog_monitor" "http_5xx_tg_responses" {
5757

5858
query = <<END
5959
min(${var.http_5xx_tg_responses_evaluation_window}):
60-
default(avg:aws.applicationelb.httpcode_elb_5xx${local.query_filter} by {loadbalancer,region,aws_account,targetgroup,env,datadog_critical}.as_rate(), 0) / (
61-
default(avg:aws.applicationelb.request_count${local.query_filter} by {loadbalancer,region,aws_account,targetgroup,env,datadog_critical}.as_rate(), 1)
60+
default(avg:aws.applicationelb.httpcode_elb_5xx${local.query_filter} by {loadbalancer,region,aws_account,targetgroup,env,datadog_managed}.as_rate(), 0) / (
61+
default(avg:aws.applicationelb.request_count${local.query_filter} by {loadbalancer,region,aws_account,targetgroup,env,datadog_managed}.as_rate(), 1)
6262
) * 100 > ${var.http_5xx_tg_responses_threshold_critical}
6363
END
6464

@@ -73,7 +73,7 @@ resource "datadog_monitor" "latency" {
7373
count = var.latency_enabled ? 1 : 0
7474

7575
name = join("", [local.title_prefix, "ALB latency - {{loadbalancer.name}} {{value}}s ", local.title_suffix])
76-
include_tags = true
76+
include_tags = false
7777
message = local.query_alert_base_message
7878
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
7979
type = "query alert"
@@ -88,7 +88,7 @@ resource "datadog_monitor" "latency" {
8888

8989
query = <<END
9090
avg(${var.latency_evaluation_window}):
91-
default(avg:aws.applicationelb.target_response_time.average${local.query_filter} by {aws_account,env,datadog_critical,loadbalancer,region}, 0
91+
default(avg:aws.applicationelb.target_response_time.average${local.query_filter} by {aws_account,env,datadog_managed,loadbalancer,region}, 0
9292
) > ${var.latency_threshold_critical}
9393
END
9494

@@ -102,7 +102,7 @@ resource "datadog_monitor" "no_healthy_instances" {
102102
count = var.no_healthy_instances_enabled ? 1 : 0
103103

104104
name = join("", [local.title_prefix, "ALB available healthy instances - {{loadbalancer.name}} {{value}}%", local.title_suffix])
105-
include_tags = true
105+
include_tags = false
106106
message = local.query_alert_base_message
107107
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
108108
type = "query alert"
@@ -117,9 +117,9 @@ resource "datadog_monitor" "no_healthy_instances" {
117117

118118
query = <<END
119119
min(${var.no_healthy_instances_evaluation_window}): (
120-
sum:aws.applicationelb.healthy_host_count.minimum${local.query_filter} by {aws_account,env,datadog_critical,region,loadbalancer} / (
121-
sum:aws.applicationelb.healthy_host_count.minimum${local.query_filter} by {aws_account,env,datadog_critical,region,loadbalancer} +
122-
sum:aws.applicationelb.un_healthy_host_count.maximum${local.query_filter} by {aws_account,env,datadog_critical,region,loadbalancer} )
120+
sum:aws.applicationelb.healthy_host_count.minimum${local.query_filter} by {aws_account,env,datadog_managed,region,loadbalancer} / (
121+
sum:aws.applicationelb.healthy_host_count.minimum${local.query_filter} by {aws_account,env,datadog_managed,region,loadbalancer} +
122+
sum:aws.applicationelb.un_healthy_host_count.maximum${local.query_filter} by {aws_account,env,datadog_managed,region,loadbalancer} )
123123
) * 100 <= ${var.no_healthy_instances_threshold_critical}
124124
END
125125

aws/apigateway/main.tf

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ resource "datadog_monitor" "http_5xx_responses" {
1212
count = var.http_5xx_responses_enabled ? 1 : 0
1313

1414
name = join("", [local.title_prefix, "API Gateway 5xx Responses - {{apiname.name}}", local.title_suffix])
15-
include_tags = true
15+
include_tags = false
1616
message = local.query_alert_base_message
1717
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
1818
type = "query alert"
@@ -27,8 +27,8 @@ resource "datadog_monitor" "http_5xx_responses" {
2727

2828
query = <<END
2929
min(${var.http_5xx_responses_evaluation_window}):
30-
default(avg:aws.apigateway.5xxerror{${local.query_filter}} by {stage,apiname,region,aws_account,env,datadog_critical}.as_rate(), 0) / (
31-
default(avg:aws.apigateway.count{${local.query_filter}} by {stage,apiname,region,aws_account,env,datadog_critical}.as_rate(), 1)
30+
default(avg:aws.apigateway.5xxerror{${local.query_filter}} by {stage,apiname,region,aws_account,env,datadog_managed}.as_rate(), 0) / (
31+
default(avg:aws.apigateway.count{${local.query_filter}} by {stage,apiname,region,aws_account,env,datadog_managed}.as_rate(), 1)
3232
) * 100 > ${var.http_5xx_responses_threshold_critical}
3333
END
3434

@@ -42,7 +42,7 @@ resource "datadog_monitor" "latency" {
4242
count = var.latency_enabled ? 1 : 0
4343

4444
name = join("", [local.title_prefix, "API Gateway latency - {{apiname.name}}", local.title_suffix])
45-
include_tags = true
45+
include_tags = false
4646
message = local.query_alert_base_message
4747
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
4848
type = "query alert"
@@ -57,7 +57,7 @@ resource "datadog_monitor" "latency" {
5757

5858
query = <<END
5959
avg(${var.latency_evaluation_window}):
60-
default(avg:aws.apigateway.latency{${local.query_filter}} by {stage,apiname,region,aws_account,env,datadog_critical}, 0)
60+
default(avg:aws.apigateway.latency{${local.query_filter}} by {stage,apiname,region,aws_account,env,datadog_managed}, 0)
6161
) > ${var.latency_threshold_critical}
6262
END
6363

aws/beanstalk/main.tf

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ resource "datadog_monitor" "health" {
2525
count = var.health_enabled ? 1 : 0
2626

2727
name = join("", [local.title_prefix, "Beanstalk Health Events - {{environmentname.name}}", local.title_suffix])
28-
include_tags = true
28+
include_tags = false
2929
message = local.query_alert_base_message
3030
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
3131
type = "metric alert"
@@ -40,7 +40,7 @@ resource "datadog_monitor" "health" {
4040

4141
query = <<END
4242
min(${var.health_evaluation_window}):
43-
min:aws.elasticbeanstalk.environment_health${local.query_filter} by {environmentname,region,aws_account,env,datadog_critical}
43+
min:aws.elasticbeanstalk.environment_health${local.query_filter} by {environmentname,region,aws_account,env,datadog_managed}
4444
>= ${var.health_threshold_critical}
4545
END
4646

@@ -54,7 +54,7 @@ resource "datadog_monitor" "http_5xx_responses" {
5454
count = var.http_5xx_responses_enabled ? 1 : 0
5555

5656
name = join("", [local.title_prefix, "ALB 5xx Responses - {{environmentname.name}}", local.title_suffix])
57-
include_tags = true
57+
include_tags = false
5858
message = local.query_alert_base_message
5959
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
6060
type = "query alert"
@@ -69,8 +69,8 @@ resource "datadog_monitor" "http_5xx_responses" {
6969

7070
query = <<END
7171
min(${var.http_5xx_responses_evaluation_window}):(
72-
default(sum:aws.elasticbeanstalk.application_requests_5xx${local.query_filter} by {environmentname,region,aws_account,env,datadog_critical}.as_rate(), 0) /
73-
default(sum:aws.elasticbeanstalk.application_requests_total${local.query_filter} by {environmentname,region,aws_account,env,datadog_critical}.as_rate(), 1)
72+
default(sum:aws.elasticbeanstalk.application_requests_5xx${local.query_filter} by {environmentname,region,aws_account,env,datadog_managed}.as_rate(), 0) /
73+
default(sum:aws.elasticbeanstalk.application_requests_total${local.query_filter} by {environmentname,region,aws_account,env,datadog_managed}.as_rate(), 1)
7474
) * 100 > ${var.http_5xx_responses_threshold_critical}
7575
END
7676

@@ -84,7 +84,7 @@ resource "datadog_monitor" "latency" {
8484
count = var.latency_enabled ? 1 : 0
8585

8686
name = join("", [local.title_prefix, "Beanstalk Latency - {{environmentname.name}}", local.title_suffix])
87-
include_tags = true
87+
include_tags = false
8888
message = local.query_alert_base_message
8989
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
9090
type = "query alert"
@@ -98,7 +98,7 @@ resource "datadog_monitor" "latency" {
9898
timeout_h = var.timeout_h
9999

100100
query = <<END
101-
min:${var.latency_evaluation_window}):min:aws.elasticbeanstalk.${local.latency_metric}${local.query_filter} by {environmentname,region,aws_account,env,datadog_critical}
101+
min:${var.latency_evaluation_window}):min:aws.elasticbeanstalk.${local.latency_metric}${local.query_filter} by {environmentname,region,aws_account,env,datadog_managed}
102102
>= ${var.latency_threshold_critical}
103103
END
104104

@@ -112,7 +112,7 @@ resource "datadog_monitor" "root_disk_usage" {
112112
count = var.root_disk_usage_enabled ? 1 : 0
113113

114114
name = join("", [local.title_prefix, "Beanstalk Instance Root Disk Usage - {{environmentname.name}}", local.title_suffix])
115-
include_tags = true
115+
include_tags = false
116116
message = local.query_alert_base_message
117117
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
118118
type = "query alert"
@@ -127,7 +127,7 @@ resource "datadog_monitor" "root_disk_usage" {
127127

128128
query = <<END
129129
max:${var.latency_evaluation_window}):
130-
min:aws.elasticbeanstalk.root_filesystem_util${local.query_filter} by {host,environmentname,region,aws_account,env,datadog_critical}
130+
min:aws.elasticbeanstalk.root_filesystem_util${local.query_filter} by {host,environmentname,region,aws_account,env,datadog_managed}
131131
>= ${var.root_disk_usage_threshold_critical}
132132
END
133133

aws/ec2/main.tf

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ resource "datadog_monitor" "status_failed_check" {
1212
count = var.status_failed_check_enabled ? 1 : 0
1313

1414
name = join("", [local.title_prefix, "EC2 instance status - status check failure - {{name.name}}({{instance_id.name}})", local.title_suffix])
15-
include_tags = true
15+
include_tags = false
1616
message = local.query_alert_base_message
1717
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
1818
type = "query alert"
@@ -26,7 +26,7 @@ resource "datadog_monitor" "status_failed_check" {
2626

2727
query = <<END
2828
max(${var.status_failed_check_evaluation_window}):
29-
max:aws.ec2.status_check_failed${local.query_filter} by {aws_account,env,instance_id,name,region,env,datadog_critical}
29+
max:aws.ec2.status_check_failed${local.query_filter} by {aws_account,env,instance_id,name,region,env,datadog_managed}
3030
>= 1
3131
END
3232

@@ -39,7 +39,7 @@ resource "datadog_monitor" "status_failed_instance" {
3939
count = var.status_failed_instance_enabled ? 1 : 0
4040

4141
name = join("", [local.title_prefix, "EC2 instance status - instance failure - {{name.name}}({{instance_id.name}})", local.title_suffix])
42-
include_tags = true
42+
include_tags = false
4343
message = local.query_alert_base_message
4444
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
4545
type = "query alert"
@@ -53,7 +53,7 @@ resource "datadog_monitor" "status_failed_instance" {
5353

5454
query = <<END
5555
max(${var.status_failed_instance_evaluation_window}):
56-
max:aws.ec2.status_check_failed_instance${local.query_filter} by {aws_account,env,instance_id,name,region,env,datadog_critical}
56+
max:aws.ec2.status_check_failed_instance${local.query_filter} by {aws_account,env,instance_id,name,region,env,datadog_managed}
5757
>= 1
5858
END
5959

@@ -66,7 +66,7 @@ resource "datadog_monitor" "status_failed_system" {
6666
count = var.status_failed_system_enabled ? 1 : 0
6767

6868
name = join("", [local.title_prefix, "EC2 instance status - host failure - {{name.name}}({{instance_id.name}})", local.title_suffix])
69-
include_tags = true
69+
include_tags = false
7070
message = local.query_alert_base_message
7171
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
7272
type = "query alert"
@@ -80,7 +80,7 @@ resource "datadog_monitor" "status_failed_system" {
8080

8181
query = <<END
8282
max(${var.status_failed_system_evaluation_window}):
83-
max:aws.ec2.status_check_failed_system${local.query_filter} by {aws_account,env,instance_id,name,region,env,datadog_critical}
83+
max:aws.ec2.status_check_failed_system${local.query_filter} by {aws_account,env,instance_id,name,region,env,datadog_managed}
8484
>= 1
8585
END
8686

@@ -93,7 +93,7 @@ resource "datadog_monitor" "status_failed_volume" {
9393
count = var.status_failed_volume_enabled ? 1 : 0
9494

9595
name = join("", [local.title_prefix, "EC2 instance status - volume failure - {{name.name}}({{instance_id.name}})", local.title_suffix])
96-
include_tags = true
96+
include_tags = false
9797
message = local.query_alert_base_message
9898
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
9999
type = "query alert"
@@ -107,7 +107,7 @@ resource "datadog_monitor" "status_failed_volume" {
107107

108108
query = <<END
109109
max(${var.status_failed_volume_evaluation_window}):
110-
max:aws.ec2.status_check_failed_attached_ebs${local.query_filter} by {aws_account,env,instance_id,name,region,env,datadog_critical}
110+
max:aws.ec2.status_check_failed_attached_ebs${local.query_filter} by {aws_account,env,instance_id,name,region,env,datadog_managed}
111111
>= 1
112112
END
113113

aws/ecs-cluster/main.tf

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ resource "datadog_monitor" "agent_status" {
1313
count = var.agent_status_enabled ? 1 : 0
1414

1515
name = join("", [local.title_prefix, "ECS Agent disconnected - {{clustername.name}}", local.title_suffix])
16-
include_tags = true
16+
include_tags = false
1717
message = local.query_alert_base_message
1818
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
1919
type = "service check"
@@ -27,7 +27,7 @@ resource "datadog_monitor" "agent_status" {
2727
timeout_h = var.timeout_h
2828

2929
query = <<EOQ
30-
"aws.ecs.agent_connected"${local.service_filter}.by("clustername","instance_id",env,datadog_critical).last(6).count_by_status()
30+
"aws.ecs.agent_connected"${local.service_filter}.by("clustername","instance_id",env,datadog_managed).last(6).count_by_status()
3131
EOQ
3232

3333
monitor_thresholds {
@@ -40,7 +40,7 @@ resource "datadog_monitor" "cpu_utilization" {
4040
count = var.cpu_utilization_enabled ? 1 : 0
4141

4242
name = join("", [local.title_prefix, "ECS Cluster CPU Utilization - {{clustername.name}} - {{value}}%", local.title_suffix])
43-
include_tags = true
43+
include_tags = false
4444
message = local.query_alert_base_message
4545
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
4646
type = "query alert"
@@ -55,7 +55,7 @@ resource "datadog_monitor" "cpu_utilization" {
5555

5656
query = <<END
5757
min(${var.cpu_utilization_evaluation_window}):
58-
avg:aws.ecs.cluster.cpuutilization${local.query_filter} by {clustername,region,aws_account,env,datadog_critical}
58+
avg:aws.ecs.cluster.cpuutilization${local.query_filter} by {clustername,region,aws_account,env,datadog_managed}
5959
> ${var.cpu_utilization_threshold_critical}
6060
END
6161

@@ -69,7 +69,7 @@ resource "datadog_monitor" "cpu_utilization_anomaly" {
6969
count = var.cpu_utilization_anomaly_enabled ? 1 : 0
7070

7171
name = join("", [local.title_prefix, "ECS cluster CPU utilization anomalous activity - {{clustername.name}}", local.title_suffix])
72-
include_tags = true
72+
include_tags = false
7373
message = local.query_alert_base_message
7474
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
7575
type = "query alert"
@@ -84,7 +84,7 @@ resource "datadog_monitor" "cpu_utilization_anomaly" {
8484

8585
query = <<END
8686
avg(${var.cpu_utilization_anomaly_evaluation_window}):anomalies(
87-
avg:aws.ecs.cluster.cpuutilization${local.query_filter} by {clustername,region,aws_account,env,datadog_critical}, 'agile', ${var.cpu_utilization_anomaly_deviations},
87+
avg:aws.ecs.cluster.cpuutilization${local.query_filter} by {clustername,region,aws_account,env,datadog_managed}, 'agile', ${var.cpu_utilization_anomaly_deviations},
8888
direction='above', count_default_zero='true', interval=${var.cpu_utilization_anomaly_rollup},
8989
seasonality='${var.cpu_utilization_anomaly_seasonality}'
9090
) >= ${var.cpu_utilization_anomaly_threshold_critical}
@@ -105,7 +105,7 @@ resource "datadog_monitor" "memory_reservation" {
105105
count = var.memory_reservation_enabled ? 1 : 0
106106

107107
name = join("", [local.title_prefix, "ECS Cluster Memory Reservation High - {{clustername.name}} - {{value}}%", local.title_suffix])
108-
include_tags = true
108+
include_tags = false
109109
message = local.query_alert_base_message
110110
tags = concat(local.common_tags, var.base_tags, var.additional_tags)
111111
type = "query alert"
@@ -120,7 +120,7 @@ resource "datadog_monitor" "memory_reservation" {
120120

121121
query = <<END
122122
min(${var.memory_reservation_evaluation_window}):
123-
avg:aws.ecs.cluster.memory_reservation${local.query_filter} by {clustername,region,aws_account,env,datadog_critical}
123+
avg:aws.ecs.cluster.memory_reservation${local.query_filter} by {clustername,region,aws_account,env,datadog_managed}
124124
> ${var.memory_reservation_threshold_critical}
125125
END
126126

0 commit comments

Comments
 (0)