diff --git a/.github/workflows/self-hosted.yml b/.github/workflows/self-hosted.yml index 24a34b6383678f..90e45d10929bd2 100644 --- a/.github/workflows/self-hosted.yml +++ b/.github/workflows/self-hosted.yml @@ -5,6 +5,7 @@ on: - master - releases/** pull_request: + workflow_dispatch: # Cancel in progress workflows on pull_requests. # https://docs.github.com/en/actions/using-jobs/using-concurrency#example-using-a-fallback-value @@ -83,5 +84,12 @@ jobs: --tag ghcr.io/getsentry/sentry-self-hosted:${{ github.sha }} \ --file self-hosted/Dockerfile \ --build-arg SOURCE_COMMIt=${{ github.sha }} \ - "${args[@]}" \ + --output type=docker,dest=self-hosted-docker-image \ . + + - name: Upload docker image + uses: actions/upload-artifact@v4 + with: + retention-days: 1 + name: self-hosted-docker-image + path: "self-hosted-docker-image" diff --git a/bin/send_metrics.py b/bin/send_metrics.py index d01d0ccc55ebab..830b0de18158af 100644 --- a/bin/send_metrics.py +++ b/bin/send_metrics.py @@ -30,7 +30,7 @@ def make_counter_payload(use_case, org_id, rand_str): "type": "c", "value": 1, "org_id": org_id, - "retention_days": 90, + "retention_days": 730, "project_id": 3, } @@ -60,7 +60,7 @@ def make_dist_payload(use_case, org_id, rand_str, value_len, b64_encode): } ), "org_id": org_id, - "retention_days": 90, + "retention_days": 730, "project_id": 3, } @@ -93,7 +93,7 @@ def make_set_payload(use_case, org_id, rand_str, value_len, b64_encode): } ), "org_id": org_id, - "retention_days": 90, + "retention_days": 730, "project_id": 3, } @@ -116,7 +116,7 @@ def make_gauge_payload(use_case, org_id, rand_str): "last": 1, }, "org_id": org_id, - "retention_days": 90, + "retention_days": 730, "project_id": 3, } diff --git a/src/sentry/api/endpoints/organization_releases.py b/src/sentry/api/endpoints/organization_releases.py index b46c29f0049897..e8cee937bb1e6d 100644 --- a/src/sentry/api/endpoints/organization_releases.py +++ b/src/sentry/api/endpoints/organization_releases.py @@ -353,7 +353,7 @@ def qs_load_func(queryset, total_offset, qs_offset, limit): release_versions, filter_params["start"] if filter_params["start"] - else datetime.utcnow() - timedelta(days=90), + else datetime.utcnow() - timedelta(days=730), filter_params["end"] if filter_params["end"] else datetime.utcnow(), ) valid_versions = [ diff --git a/src/sentry/api/endpoints/organization_transaction_anomaly_detection.py b/src/sentry/api/endpoints/organization_transaction_anomaly_detection.py index d1e5cc2048087d..3d73d142f47cfc 100644 --- a/src/sentry/api/endpoints/organization_transaction_anomaly_detection.py +++ b/src/sentry/api/endpoints/organization_transaction_anomaly_detection.py @@ -54,7 +54,7 @@ def get_time_params(start: datetime, end: datetime) -> MappedParams: anomaly_detection_range = end - start if anomaly_detection_range > timedelta(days=14): - snuba_range = timedelta(days=90) + snuba_range = timedelta(days=730) granularity = 3600 elif anomaly_detection_range > timedelta(days=1): @@ -67,7 +67,7 @@ def get_time_params(start: datetime, end: datetime) -> MappedParams: additional_time_needed = snuba_range - anomaly_detection_range now = datetime.now(timezone.utc) - start_limit = now - timedelta(days=90) + start_limit = now - timedelta(days=730) end_limit = now start = max(start, start_limit) end = min(end, end_limit) @@ -77,7 +77,7 @@ def get_time_params(start: datetime, end: datetime) -> MappedParams: # If window will go back farther than 90 days, use today - 90 as start if start - window_increase < start_limit: - query_start = now - timedelta(days=90) + query_start = now - timedelta(days=730) additional_time_needed -= start - query_start window_increase = additional_time_needed # If window extends beyond today, use today as end diff --git a/src/sentry/api/endpoints/project_details.py b/src/sentry/api/endpoints/project_details.py index b8c5318152eef1..76181bcaa26220 100644 --- a/src/sentry/api/endpoints/project_details.py +++ b/src/sentry/api/endpoints/project_details.py @@ -376,7 +376,7 @@ def validate_secondaryGroupingExpiry(self, value): "Grouping expiry must be sometime within the next 90 days and not in the past. Perhaps you specified the timestamp not in seconds?" ) - max_expiry_date = now + (91 * 24 * 3600) + max_expiry_date = now + (731 * 24 * 3600) if value > max_expiry_date: value = max_expiry_date diff --git a/src/sentry/api/endpoints/team_groups_old.py b/src/sentry/api/endpoints/team_groups_old.py index 529f87a5a275c8..e8f6645ecfc1f2 100644 --- a/src/sentry/api/endpoints/team_groups_old.py +++ b/src/sentry/api/endpoints/team_groups_old.py @@ -35,7 +35,7 @@ def get(self, request: Request, team) -> Response: .filter( group_environment_filter, status=GroupStatus.UNRESOLVED, - last_seen__gt=datetime.now(UTC) - timedelta(days=90), + last_seen__gt=datetime.now(UTC) - timedelta(days=730), ) .order_by("first_seen")[:limit] ) diff --git a/src/sentry/api/endpoints/team_unresolved_issue_age.py b/src/sentry/api/endpoints/team_unresolved_issue_age.py index 82bd68afca803a..d7c29e1f28c659 100644 --- a/src/sentry/api/endpoints/team_unresolved_issue_age.py +++ b/src/sentry/api/endpoints/team_unresolved_issue_age.py @@ -52,7 +52,7 @@ def get(self, request: Request, team: Team) -> Response: .filter( group_environment_filter, status=GroupStatus.UNRESOLVED, - last_seen__gt=datetime.now(UTC) - timedelta(days=90), + last_seen__gt=datetime.now(UTC) - timedelta(days=730), ) .annotate( bucket=Case( diff --git a/src/sentry/api/serializers/models/group.py b/src/sentry/api/serializers/models/group.py index 90caf296a36ba8..fa584ccc69dd08 100644 --- a/src/sentry/api/serializers/models/group.py +++ b/src/sentry/api/serializers/models/group.py @@ -548,7 +548,7 @@ def _get_start_from_seen_stats(seen_stats: Mapping[Group, SeenStats] | None): return max( min(last_seen - timedelta(days=1), datetime.now(timezone.utc) - timedelta(days=14)), - datetime.now(timezone.utc) - timedelta(days=90), + datetime.now(timezone.utc) - timedelta(days=730), ) @staticmethod diff --git a/src/sentry/api/utils.py b/src/sentry/api/utils.py index 72d5b274187d4b..029b0f920a70d8 100644 --- a/src/sentry/api/utils.py +++ b/src/sentry/api/utils.py @@ -63,7 +63,7 @@ logger = logging.getLogger(__name__) -MAX_STATS_PERIOD = timedelta(days=90) +MAX_STATS_PERIOD = timedelta(days=730) def get_datetime_from_stats_period( diff --git a/src/sentry/conf/server.py b/src/sentry/conf/server.py index b1545d48ec3e57..cfa04ac7b79420 100644 --- a/src/sentry/conf/server.py +++ b/src/sentry/conf/server.py @@ -1720,7 +1720,7 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: # (time in seconds, samples to keep) (10, 360), # 60 minutes at 10 seconds (3600, 24 * 7), # 7 days at 1 hour - (3600 * 24, 90), # 90 days at 1 day + (3600 * 24, 730), # 90 days at 1 day ) # Internal metrics diff --git a/src/sentry/eventstore/snuba/backend.py b/src/sentry/eventstore/snuba/backend.py index 6b994253430ee6..587479d14d2da4 100644 --- a/src/sentry/eventstore/snuba/backend.py +++ b/src/sentry/eventstore/snuba/backend.py @@ -428,7 +428,7 @@ def get_adjacent_event_ids(self, event, filter): prev_filter.conditions.extend(get_before_event_condition(event)) # We only store 90 days of data, add a few extra days just in case - prev_filter.start = event.datetime - timedelta(days=100) + prev_filter.start = event.datetime - timedelta(days=740) # the previous event can have the same timestamp, add 1 second # to the end condition since it uses a less than condition prev_filter.end = event.datetime + timedelta(seconds=1) diff --git a/src/sentry/incidents/logic.py b/src/sentry/incidents/logic.py index bd6fa7d9549c95..437db6ac61518d 100644 --- a/src/sentry/incidents/logic.py +++ b/src/sentry/incidents/logic.py @@ -393,7 +393,7 @@ def calculate_incident_time_range(incident, start=None, end=None, windowed_stats start = end - timedelta(seconds=time_window * WINDOWED_STATS_DATA_POINTS) - retention = quotas.get_event_retention(organization=incident.organization) or 90 + retention = quotas.get_event_retention(organization=incident.organization) or 730 start = max( start.replace(tzinfo=timezone.utc), datetime.now(timezone.utc) - timedelta(days=retention), diff --git a/src/sentry/ingest/transaction_clusterer/rules.py b/src/sentry/ingest/transaction_clusterer/rules.py index 3b6f84b79f02e4..39f3908d54956b 100644 --- a/src/sentry/ingest/transaction_clusterer/rules.py +++ b/src/sentry/ingest/transaction_clusterer/rules.py @@ -16,7 +16,7 @@ RuleSet = Mapping[ReplacementRule, int] #: How long a transaction name rule lasts, in seconds. -TRANSACTION_NAME_RULE_TTL_SECS = 90 * 24 * 60 * 60 # 90 days +TRANSACTION_NAME_RULE_TTL_SECS = 730 * 24 * 60 * 60 # 90 days class RuleStore(Protocol): diff --git a/src/sentry/integrations/slack/unfurl/discover.py b/src/sentry/integrations/slack/unfurl/discover.py index f4fe6d71edfe2f..41f4d63a414f21 100644 --- a/src/sentry/integrations/slack/unfurl/discover.py +++ b/src/sentry/integrations/slack/unfurl/discover.py @@ -200,7 +200,7 @@ def unfurl_discover( else: interval = saved_query.get("interval") validated_interval = None - delta = timedelta(days=90) + delta = timedelta(days=730) if "statsPeriod" in params: if (parsed_period := parse_stats_period(params["statsPeriod"])) is not None: delta = parsed_period diff --git a/src/sentry/integrations/time_utils.py b/src/sentry/integrations/time_utils.py index 39ef6fadfe73f7..373f2416864ef8 100644 --- a/src/sentry/integrations/time_utils.py +++ b/src/sentry/integrations/time_utils.py @@ -40,7 +40,7 @@ def time_since(value: datetime): def get_relative_time( - anchor: int, relative_days: int, retention_days: int = 90 + anchor: int, relative_days: int, retention_days: int = 730 ) -> Mapping[str, datetime]: max_time = time.time() min_time = max_time - retention_days * DAY_IN_SEC diff --git a/src/sentry/issues/endpoints/group_events.py b/src/sentry/issues/endpoints/group_events.py index ba559d1ea1b9c7..40c4db33526451 100644 --- a/src/sentry/issues/endpoints/group_events.py +++ b/src/sentry/issues/endpoints/group_events.py @@ -91,7 +91,7 @@ def _get_events_snuba( end: datetime | None, ) -> Response: default_end = timezone.now() - default_start = default_end - timedelta(days=90) + default_start = default_end - timedelta(days=730) params: ParamsType = { "project_id": [group.project_id], "organization_id": group.project.organization_id, diff --git a/src/sentry/issues/issue_velocity.py b/src/sentry/issues/issue_velocity.py index 015cb332e2d00f..eb7e6f0c7ec3b5 100644 --- a/src/sentry/issues/issue_velocity.py +++ b/src/sentry/issues/issue_velocity.py @@ -58,7 +58,7 @@ def calculate_threshold(project: Project) -> float | None: now = datetime.now() one_hour_ago = now - timedelta(hours=1) one_week_ago = now - timedelta(days=7) - ninety_days_ago = now - timedelta(days=90) + ninety_days_ago = now - timedelta(days=730) subquery = Query( match=Entity(EntityKey.Events.value), diff --git a/src/sentry/profiles/task.py b/src/sentry/profiles/task.py index 3ab17e66c4e00e..a8f544a3f42048 100644 --- a/src/sentry/profiles/task.py +++ b/src/sentry/profiles/task.py @@ -345,7 +345,7 @@ def _normalize_profile(profile: Profile, organization: Organization, project: Pr @metrics.wraps("process_profile.normalize") def _normalize(profile: Profile, organization: Organization) -> None: - profile["retention_days"] = quotas.backend.get_event_retention(organization=organization) or 90 + profile["retention_days"] = quotas.backend.get_event_retention(organization=organization) or 730 platform = profile["platform"] version = profile.get("version") diff --git a/src/sentry/release_health/base.py b/src/sentry/release_health/base.py index d9fd7530fb122b..f686c4ec67c595 100644 --- a/src/sentry/release_health/base.py +++ b/src/sentry/release_health/base.py @@ -109,6 +109,7 @@ class SessionsQueryResult(TypedDict): "14d", "30d", "90d", + "730d", ] OverviewStat = Literal["users", "sessions"] diff --git a/src/sentry/release_health/metrics.py b/src/sentry/release_health/metrics.py index 0c3b0e7e67e7a3..7895aff0477ee6 100644 --- a/src/sentry/release_health/metrics.py +++ b/src/sentry/release_health/metrics.py @@ -538,7 +538,7 @@ def check_has_health_data( if now is None: now = datetime.now(timezone.utc) - start = now - timedelta(days=90) + start = now - timedelta(days=730) projects_list = list(projects_list) @@ -1264,7 +1264,7 @@ def get_oldest_health_data_for_releases( now = datetime.now(timezone.utc) # TODO: assumption about retention? - start = now - timedelta(days=90) + start = now - timedelta(days=730) project_ids = [proj_id for proj_id, _release in project_releases] projects, org_id = self._get_projects_and_org_id(project_ids) diff --git a/src/sentry/replays/lib/event_linking.py b/src/sentry/replays/lib/event_linking.py index 3f361458602a70..bb9b0f20f1c3b5 100644 --- a/src/sentry/replays/lib/event_linking.py +++ b/src/sentry/replays/lib/event_linking.py @@ -130,7 +130,7 @@ def _make_json_binary_payload() -> PayloadUnionType: "replay_id": replay_id, "project_id": event.project.id, "segment_id": None, - "retention_days": 90, + "retention_days": 730, "payload": _make_json_binary_payload(), } diff --git a/src/sentry/replays/usecases/events.py b/src/sentry/replays/usecases/events.py index a670bb3c5eb9c2..a76dc1754a94e3 100644 --- a/src/sentry/replays/usecases/events.py +++ b/src/sentry/replays/usecases/events.py @@ -49,7 +49,7 @@ def _replay_event(project_id: int, replay_id: str, event: dict[str, Any]) -> str "replay_id": replay_id, "project_id": project_id, "segment_id": None, - "retention_days": 90, + "retention_days": 730, "payload": list(json.dumps(event).encode()), } ) diff --git a/src/sentry/replays/usecases/reader.py b/src/sentry/replays/usecases/reader.py index 507dbd7583c8e6..01a20489a2e786 100644 --- a/src/sentry/replays/usecases/reader.py +++ b/src/sentry/replays/usecases/reader.py @@ -184,7 +184,7 @@ def has_archived_segment(project_id: int, replay_id: str) -> bool: # We request the full 90 day range. This is effectively an unbounded timestamp # range. Condition(Column("timestamp"), Op.LT, datetime.now()), - Condition(Column("timestamp"), Op.GTE, datetime.now() - timedelta(days=90)), + Condition(Column("timestamp"), Op.GTE, datetime.now() - timedelta(days=730)), ], granularity=Granularity(3600), ), @@ -218,7 +218,7 @@ def _fetch_segments_from_snuba( # We request the full 90 day range. This is effectively an unbounded timestamp # range. Condition(Column("timestamp"), Op.LT, datetime.now()), - Condition(Column("timestamp"), Op.GTE, datetime.now() - timedelta(days=90)), + Condition(Column("timestamp"), Op.GTE, datetime.now() - timedelta(days=730)), # Used to dynamically pass the "segment_id" condition for details requests. *conditions, ], diff --git a/src/sentry/search/events/builder/discover.py b/src/sentry/search/events/builder/discover.py index 6738ca457f26f3..9283a8a1b25c18 100644 --- a/src/sentry/search/events/builder/discover.py +++ b/src/sentry/search/events/builder/discover.py @@ -1003,7 +1003,7 @@ def custom_measurement_map(self) -> list[MetricMeta]: result: list[MetricMeta] = get_custom_measurements( project_ids=self.params.project_ids, organization_id=self.organization_id, - start=datetime.today() - timedelta(days=90), + start=datetime.today() - timedelta(days=730), end=datetime.today(), ) # Don't fully fail if we can't get the CM, but still capture the exception diff --git a/src/sentry/search/snuba/executors.py b/src/sentry/search/snuba/executors.py index a7951de7a7d4f0..cfb76462e1eb06 100644 --- a/src/sentry/search/snuba/executors.py +++ b/src/sentry/search/snuba/executors.py @@ -812,7 +812,7 @@ def query( # retention date, which may be closer than 90 days in the past, but # apparently `retention_window_start` can be None(?), so we need a # fallback. - retention_date = max(_f for _f in [retention_window_start, now - timedelta(days=90)] if _f) + retention_date = max(_f for _f in [retention_window_start, now - timedelta(days=730)] if _f) start_params = [ date_from, retention_date, @@ -1565,7 +1565,7 @@ def calculate_start_end( if not end: end = now + ALLOWED_FUTURE_DELTA - retention_date = max(_f for _f in [retention_window_start, now - timedelta(days=90)] if _f) + retention_date = max(_f for _f in [retention_window_start, now - timedelta(days=730)] if _f) start_params = [date_from, retention_date, get_search_filter(search_filters, "date", ">")] start = max(_f for _f in start_params if _f) end = max([retention_date, end]) diff --git a/src/sentry/sentry_metrics/aggregation_option_registry.py b/src/sentry/sentry_metrics/aggregation_option_registry.py index d302036f945dc2..cc02538b7f307b 100644 --- a/src/sentry/sentry_metrics/aggregation_option_registry.py +++ b/src/sentry/sentry_metrics/aggregation_option_registry.py @@ -16,11 +16,12 @@ class TimeWindow(Enum): FOURTEEN_DAYS = "14d" THIRTY_DAYS = "30d" NINETY_DAYS = "90d" + TWO_YEARS = "730d" METRIC_ID_AGG_OPTION = { - "d:transactions/measurements.fcp@millisecond": {AggregationOption.HIST: TimeWindow.NINETY_DAYS}, - "d:transactions/measurements.lcp@millisecond": {AggregationOption.HIST: TimeWindow.NINETY_DAYS}, + "d:transactions/measurements.fcp@millisecond": {AggregationOption.HIST: TimeWindow.TWO_YEARS}, + "d:transactions/measurements.lcp@millisecond": {AggregationOption.HIST: TimeWindow.TWO_YEARS}, "d:spans/webvital.inp@millisecond": None, } @@ -37,7 +38,7 @@ def get_aggregation_options(mri: str) -> dict[AggregationOption, TimeWindow] | N return METRIC_ID_AGG_OPTION[mri] # Then move to use case-level disabled percentiles elif use_case_id.value in options.get("sentry-metrics.drop-percentiles.per-use-case"): - return {AggregationOption.DISABLE_PERCENTILES: TimeWindow.NINETY_DAYS} + return {AggregationOption.DISABLE_PERCENTILES: TimeWindow.TWO_YEARS} # And finally 10s granularity if none of the above apply for custom elif (use_case_id == UseCaseID.CUSTOM) and options.get("sentry-metrics.10s-granularity"): return {AggregationOption.TEN_SECOND: TimeWindow.SEVEN_DAYS} diff --git a/src/sentry/sentry_metrics/client/kafka.py b/src/sentry/sentry_metrics/client/kafka.py index 9e9a445f1262ad..0279eb77ca207f 100644 --- a/src/sentry/sentry_metrics/client/kafka.py +++ b/src/sentry/sentry_metrics/client/kafka.py @@ -33,7 +33,7 @@ def get_retention_from_org_id(org_id: int) -> int: else: # the default in Snuba is 90 days, and if there is no # org-configured retention stored, we use that default - retention = quotas.backend.get_event_retention(organization=org_id) or 90 + retention = quotas.backend.get_event_retention(organization=org_id) or 730 cache.set(cache_key, retention) return retention diff --git a/src/sentry/sentry_metrics/client/snuba.py b/src/sentry/sentry_metrics/client/snuba.py index 7b4780e1268560..b9026098621296 100644 --- a/src/sentry/sentry_metrics/client/snuba.py +++ b/src/sentry/sentry_metrics/client/snuba.py @@ -26,7 +26,7 @@ def get_retention_from_org_id(org_id: int) -> int: else: # the default in Snuba is 90 days, and if there is no # org-configured retention stored, we use that default - retention = quotas.backend.get_event_retention(organization=org_id) or 90 + retention = quotas.backend.get_event_retention(organization=org_id) or 730 cache.set(cache_key, retention) return retention diff --git a/src/sentry/sentry_metrics/consumers/indexer/batch.py b/src/sentry/sentry_metrics/consumers/indexer/batch.py index 7dcb54b97300c3..02b5819b4d8aee 100644 --- a/src/sentry/sentry_metrics/consumers/indexer/batch.py +++ b/src/sentry/sentry_metrics/consumers/indexer/batch.py @@ -457,7 +457,7 @@ def reconstruct_messages( new_payload_v1: Metric = { "tags": cast(dict[str, int], new_tags), # XXX: relay actually sends this value unconditionally - "retention_days": old_payload_value.get("retention_days", 90), + "retention_days": old_payload_value.get("retention_days", 730), "mapping_meta": output_message_meta, "use_case_id": old_payload_value["use_case_id"].value, "metric_id": numeric_metric_id, @@ -477,7 +477,7 @@ def reconstruct_messages( new_payload_v2: GenericMetric = { "tags": cast(dict[str, str], new_tags), "version": 2, - "retention_days": old_payload_value.get("retention_days", 90), + "retention_days": old_payload_value.get("retention_days", 730), "mapping_meta": output_message_meta, "use_case_id": old_payload_value["use_case_id"].value, "metric_id": numeric_metric_id, diff --git a/src/sentry/sentry_metrics/indexer/postgres/models.py b/src/sentry/sentry_metrics/indexer/postgres/models.py index 7575951f74c3a5..298869731e070b 100644 --- a/src/sentry/sentry_metrics/indexer/postgres/models.py +++ b/src/sentry/sentry_metrics/indexer/postgres/models.py @@ -50,7 +50,7 @@ class BaseIndexer(Model): organization_id = BoundedBigIntegerField() date_added = models.DateTimeField(default=timezone.now) last_seen = models.DateTimeField(default=timezone.now, db_index=True) - retention_days = models.IntegerField(default=90) + retention_days = models.IntegerField(default=730) objects: ClassVar[BaseManager[Self]] = BaseManager( cache_fields=("pk",), cache_ttl=settings.SENTRY_METRICS_INDEXER_CACHE_TTL diff --git a/src/sentry/snuba/metrics_layer/query.py b/src/sentry/snuba/metrics_layer/query.py index 72fd7df6ac732c..038cdc64616567 100644 --- a/src/sentry/snuba/metrics_layer/query.py +++ b/src/sentry/snuba/metrics_layer/query.py @@ -586,7 +586,7 @@ def _query_meta_table( Condition(Column("org_id"), Op.EQ, org_id), Condition(Column("project_id"), Op.IN, project_ids), Condition(Column("use_case_id"), Op.EQ, use_case_id.value), - Condition(Column("timestamp"), Op.GTE, datetime.now(UTC) - timedelta(days=90)), + Condition(Column("timestamp"), Op.GTE, datetime.now(UTC) - timedelta(days=730)), Condition(Column("timestamp"), Op.LT, datetime.now(UTC) + timedelta(days=1)), ] if extra_condition: @@ -674,7 +674,7 @@ def fetch_metric_tag_values( Condition(Column("project_id"), Op.IN, project_ids), Condition(Column("metric_id"), Op.EQ, metric_id), Condition(Column("tag_key"), Op.EQ, tag_key_id), - Condition(Column("timestamp"), Op.GTE, datetime.now(UTC) - timedelta(days=90)), + Condition(Column("timestamp"), Op.GTE, datetime.now(UTC) - timedelta(days=730)), Condition(Column("timestamp"), Op.LT, datetime.now(UTC) + timedelta(days=1)), ] diff --git a/src/sentry/snuba/sessions.py b/src/sentry/snuba/sessions.py index eeaf7bd4912b7f..1d6ad84aabd204 100644 --- a/src/sentry/snuba/sessions.py +++ b/src/sentry/snuba/sessions.py @@ -22,6 +22,8 @@ def _make_stats(start, rollup, buckets, default=0): "14d": (86400, 14), "30d": (86400, 30), "90d": (259200, 30), + "365d": (86400, 365), + "730d": (86400, 730) } diff --git a/src/sentry/tasks/collect_project_platforms.py b/src/sentry/tasks/collect_project_platforms.py index 71dde99188f5f2..6e0c274cce788d 100644 --- a/src/sentry/tasks/collect_project_platforms.py +++ b/src/sentry/tasks/collect_project_platforms.py @@ -52,4 +52,4 @@ def collect_project_platforms(paginate=1000, **kwargs): ) # remove (likely) unused platform associations - ProjectPlatform.objects.filter(last_seen__lte=now - timedelta(days=90)).delete() + ProjectPlatform.objects.filter(last_seen__lte=now - timedelta(days=730)).delete() diff --git a/src/sentry/tasks/embeddings_grouping/utils.py b/src/sentry/tasks/embeddings_grouping/utils.py index c1e895a517947c..728f3af29b0e0a 100644 --- a/src/sentry/tasks/embeddings_grouping/utils.py +++ b/src/sentry/tasks/embeddings_grouping/utils.py @@ -119,7 +119,7 @@ def get_current_batch_groups_from_postgres(project, last_processed_index, batch_ project_id=project.id, type=ErrorGroupType.type_id, times_seen__gt=1, - last_seen__gt=(datetime.now(UTC) - timedelta(days=90)), + last_seen__gt=(datetime.now(UTC) - timedelta(days=730)), ) .exclude(status__in=[GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS]) .values_list("id", "data") diff --git a/src/sentry/tasks/integrations/github/open_pr_comment.py b/src/sentry/tasks/integrations/github/open_pr_comment.py index 0291aad5045037..1b21743ec4fb12 100644 --- a/src/sentry/tasks/integrations/github/open_pr_comment.py +++ b/src/sentry/tasks/integrations/github/open_pr_comment.py @@ -289,7 +289,7 @@ def get_top_5_issues_by_count_for_file( group_ids = list( Group.objects.filter( - first_seen__gte=datetime.now(UTC) - timedelta(days=90), + first_seen__gte=datetime.now(UTC) - timedelta(days=730), last_seen__gte=datetime.now(UTC) - timedelta(days=14), status=GroupStatus.UNRESOLVED, project__in=projects, diff --git a/src/sentry/testutils/cases.py b/src/sentry/testutils/cases.py index 7cb53dbc6ab218..36adfbfbb170ad 100644 --- a/src/sentry/testutils/cases.py +++ b/src/sentry/testutils/cases.py @@ -1315,7 +1315,7 @@ def build_session(self, **kwargs): "distinct_id": str(uuid4()), "status": "ok", "seq": 0, - "retention_days": 90, + "retention_days": 730, "duration": 60.0, "errors": 0, "started": time.time() // 60 * 60, @@ -1518,7 +1518,7 @@ def store_segment( "received": timezone.now().timestamp(), "start_timestamp_ms": int(timestamp.timestamp() * 1000), "sentry_tags": {"transaction": transaction or "/hello"}, - "retention_days": 90, + "retention_days": 730, } if tags: @@ -1586,7 +1586,7 @@ def store_indexed_span( "op": op or "http", "group": group, }, - "retention_days": 90, + "retention_days": 730, } if tags: @@ -1764,7 +1764,7 @@ def tag_value(name): "tags": {tag_key(key): tag_value(value) for key, value in tags.items()}, "type": {"counter": "c", "set": "s", "distribution": "d", "gauge": "g"}[type], "value": value, - "retention_days": 90, + "retention_days": 730, "use_case_id": use_case_id.value, # making up a sentry_received_timestamp, but it should be sometime # after the timestamp of the event @@ -2376,7 +2376,7 @@ def store_functions( "profile_id": profile_id, "project_id": project.id, "received": int(timezone.now().timestamp()), - "retention_days": 90, + "retention_days": 730, "timestamp": int(timestamp), "transaction_name": transaction["transaction"], } @@ -3226,7 +3226,7 @@ class SpanTestCase(BaseTestCase): # Some base data for create_span base_span: dict[str, Any] = { "is_segment": False, - "retention_days": 90, + "retention_days": 730, "tags": {}, "sentry_tags": {}, "measurements": {}, diff --git a/src/sentry/testutils/metrics_backend.py b/src/sentry/testutils/metrics_backend.py index dc0b1a12aeec18..8d4ca817a31153 100644 --- a/src/sentry/testutils/metrics_backend.py +++ b/src/sentry/testutils/metrics_backend.py @@ -11,7 +11,7 @@ class GenericMetricsTestMixIn: counter_value = 5 dist_values = [5, 3] metrics_tags = {"a": "b"} - retention_days = 90 + retention_days = 730 unit = "millisecond" def get_mri(self, metric_name: str, metric_type: str, use_case_id: UseCaseID, unit: str | None): diff --git a/static/app/components/charts/intervalSelector.tsx b/static/app/components/charts/intervalSelector.tsx index 7a39386164b68e..9d76562a7c2e6c 100644 --- a/static/app/components/charts/intervalSelector.tsx +++ b/static/app/components/charts/intervalSelector.tsx @@ -68,7 +68,7 @@ type IntervalOption = { const INTERVAL_OPTIONS: IntervalOption[] = [ { - rangeStart: 90 * 24, + rangeStart: 730 * 24, min: 1, default: '4h', options: ['1h', '4h', '1d', '5d'], diff --git a/static/app/components/timeRangeSelector/index.tsx b/static/app/components/timeRangeSelector/index.tsx index db42ee6156815d..b82f8db4a89437 100644 --- a/static/app/components/timeRangeSelector/index.tsx +++ b/static/app/components/timeRangeSelector/index.tsx @@ -150,7 +150,7 @@ export function TimeRangeSelector({ showRelative = true, defaultAbsolute, defaultPeriod = DEFAULT_STATS_PERIOD, - maxPickableDays = 90, + maxPickableDays = 730, maxDateRange, disallowArbitraryRelativeRanges = false, trigger, diff --git a/static/app/constants/index.tsx b/static/app/constants/index.tsx index e77d0aaf9ce79a..65150c9213fac8 100644 --- a/static/app/constants/index.tsx +++ b/static/app/constants/index.tsx @@ -218,7 +218,7 @@ export const MENU_CLOSE_DELAY = 200; export const SLOW_TOOLTIP_DELAY = 1000; -export const MAX_PICKABLE_DAYS = 90; +export const MAX_PICKABLE_DAYS = 730; export const DEFAULT_STATS_PERIOD = '14d'; @@ -234,6 +234,7 @@ export const DEFAULT_RELATIVE_PERIODS = { '14d': t('Last 14 days'), '30d': t('Last 30 days'), '90d': t('Last 90 days'), + '730d': t('Last 730 days'), }; export const DEFAULT_RELATIVE_PERIODS_PAGE_FILTER = { diff --git a/static/app/utils/profiling/hooks/useRelativeDateTime.tsx b/static/app/utils/profiling/hooks/useRelativeDateTime.tsx index c4b894a342f596..43631338b1f61e 100644 --- a/static/app/utils/profiling/hooks/useRelativeDateTime.tsx +++ b/static/app/utils/profiling/hooks/useRelativeDateTime.tsx @@ -21,7 +21,7 @@ export function useRelativeDateTime({ // Make sure to memo this. Otherwise, each re-render will have // a different min/max date time, causing the query to refetch. const maxDateTime = useMemo(() => Date.now(), []); - const minDateTime = maxDateTime - (retentionDays ?? 90) * DAY; + const minDateTime = maxDateTime - (retentionDays ?? 730) * DAY; const beforeTime = anchorTime - relativeDays * DAY; const beforeDateTime = diff --git a/tests/js/fixtures/checkinProcessingError.ts b/tests/js/fixtures/checkinProcessingError.ts index a33b3c805c58c1..98b114be549691 100644 --- a/tests/js/fixtures/checkinProcessingError.ts +++ b/tests/js/fixtures/checkinProcessingError.ts @@ -10,7 +10,7 @@ export function CheckinProcessingErrorFixture( message_type: 'check_in', payload: '', project_id: 1, - retention_days: 90, + retention_days: 730, sdk: '', start_time: 171659668, type: 'check_in', diff --git a/tests/sentry/api/endpoints/test_team_groups_old.py b/tests/sentry/api/endpoints/test_team_groups_old.py index e750dea275cf4a..a47f0704c342e6 100644 --- a/tests/sentry/api/endpoints/test_team_groups_old.py +++ b/tests/sentry/api/endpoints/test_team_groups_old.py @@ -43,7 +43,7 @@ def test_simple(self): last_seen_too_old_group = self.create_group( project=project1, first_seen=datetime(2018, 1, 12, 3, 8, 25, tzinfo=UTC), - last_seen=datetime.now(UTC) - timedelta(days=91), + last_seen=datetime.now(UTC) - timedelta(days=731), ) GroupAssignee.objects.assign(last_seen_too_old_group, self.user) diff --git a/tests/sentry/api/endpoints/test_team_unresolved_issue_age.py b/tests/sentry/api/endpoints/test_team_unresolved_issue_age.py index 401d50d570db36..573f1d3b35d0f6 100644 --- a/tests/sentry/api/endpoints/test_team_unresolved_issue_age.py +++ b/tests/sentry/api/endpoints/test_team_unresolved_issue_age.py @@ -33,7 +33,7 @@ def test_simple(self): project=project2, status=GroupStatus.RESOLVED, first_seen=before_now(weeks=60) ) # This group shouldn't be counted since it hasn't been seen for more than 90 days - last_seen_too_old_group = self.create_group(project=project1, last_seen=before_now(days=91)) + last_seen_too_old_group = self.create_group(project=project1, last_seen=before_now(days=731)) GroupAssignee.objects.assign(group1, self.user) GroupAssignee.objects.assign(group2, self.user) GroupAssignee.objects.assign(group3, self.team)