diff --git a/cmd/elasticsearch/list_snapshots_test.go b/cmd/elasticsearch/list_snapshots_test.go index 1766370..50269c3 100644 --- a/cmd/elasticsearch/list_snapshots_test.go +++ b/cmd/elasticsearch/list_snapshots_test.go @@ -69,6 +69,26 @@ victoriaMetrics: requests: cpu: "500m" memory: "1Gi" +settings: + bucket: sts-settings-backup + s3Prefix: "" + restore: + scaleDownLabelSelector: "app=settings" + loggingConfigConfigMap: logging-config + baseUrl: "http://server:7070" + receiverBaseUrl: "http://receiver:7077" + platformVersion: "5.2.0" + zookeeperQuorum: "zookeeper:2181" + job: + image: settings-backup:latest + waitImage: wait:latest + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "500m" + memory: "1Gi" ` // mockESClient is a simple mock for testing commands diff --git a/cmd/root.go b/cmd/root.go index daf729b..394da47 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -5,6 +5,7 @@ import ( "github.com/spf13/cobra" "github.com/stackvista/stackstate-backup-cli/cmd/elasticsearch" + "github.com/stackvista/stackstate-backup-cli/cmd/settings" "github.com/stackvista/stackstate-backup-cli/cmd/stackgraph" "github.com/stackvista/stackstate-backup-cli/cmd/version" "github.com/stackvista/stackstate-backup-cli/cmd/victoriametrics" @@ -40,6 +41,10 @@ func init() { addBackupConfigFlags(stackgraphCmd) rootCmd.AddCommand(stackgraphCmd) + settingsCmd := settings.Cmd(flags) + addBackupConfigFlags(settingsCmd) + rootCmd.AddCommand(settingsCmd) + victoriaMetricsCmd := victoriametrics.Cmd(flags) addBackupConfigFlags(victoriaMetricsCmd) rootCmd.AddCommand(victoriaMetricsCmd) diff --git a/cmd/settings/check_and_finalize.go b/cmd/settings/check_and_finalize.go new file mode 100644 index 0000000..4317693 --- /dev/null +++ b/cmd/settings/check_and_finalize.go @@ -0,0 +1,65 @@ +package settings + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/stackvista/stackstate-backup-cli/internal/app" + "github.com/stackvista/stackstate-backup-cli/internal/foundation/config" + "github.com/stackvista/stackstate-backup-cli/internal/orchestration/restore" +) + +// Check and finalize command flags +var ( + checkJobName string + waitForJob bool +) + +func checkAndFinalizeCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { + cmd := &cobra.Command{ + Use: "check-and-finalize", + Short: "Check and finalize a Settings restore job", + Long: `Check the status of a background Settings restore job and clean up resources. + +This command is useful when a restore job was started with --background flag or was interrupted (Ctrl+C). +It will check the job status, print logs if it failed, and clean up the job and PVC resources. + +Examples: + # Check job status without waiting + sts-backup settings check-and-finalize --job settings-restore-20250128t143000 -n my-namespace + + # Wait for job completion and cleanup + sts-backup settings check-and-finalize --job settings-restore-20250128t143000 --wait -n my-namespace`, + Run: func(_ *cobra.Command, _ []string) { + appCtx, err := app.NewContext(globalFlags) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + if err := runCheckAndFinalize(appCtx); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + }, + } + + cmd.Flags().StringVarP(&checkJobName, "job", "j", "", "Settings restore job name (required)") + cmd.Flags().BoolVarP(&waitForJob, "wait", "w", false, "Wait for job to complete before cleanup") + _ = cmd.MarkFlagRequired("job") + + return cmd +} + +func runCheckAndFinalize(appCtx *app.Context) error { + return restore.CheckAndFinalize(restore.CheckAndFinalizeParams{ + K8sClient: appCtx.K8sClient, + Namespace: appCtx.Namespace, + JobName: checkJobName, + ServiceName: "settings", + ScaleSelector: appCtx.Config.Settings.Restore.ScaleDownLabelSelector, + CleanupPVC: true, + WaitForJob: waitForJob, + Log: appCtx.Logger, + }) +} diff --git a/cmd/settings/list.go b/cmd/settings/list.go new file mode 100644 index 0000000..dda81f4 --- /dev/null +++ b/cmd/settings/list.go @@ -0,0 +1,97 @@ +package settings + +import ( + "context" + "fmt" + "os" + "sort" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/spf13/cobra" + "github.com/stackvista/stackstate-backup-cli/internal/app" + s3client "github.com/stackvista/stackstate-backup-cli/internal/clients/s3" + "github.com/stackvista/stackstate-backup-cli/internal/foundation/config" + "github.com/stackvista/stackstate-backup-cli/internal/foundation/output" + "github.com/stackvista/stackstate-backup-cli/internal/orchestration/portforward" +) + +const ( + isMultiPartArchive = false +) + +func listCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { + return &cobra.Command{ + Use: "list", + Short: "List available Settings backups from S3/Minio", + Run: func(_ *cobra.Command, _ []string) { + appCtx, err := app.NewContext(globalFlags) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + if err := runList(appCtx); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + }, + } +} + +func runList(appCtx *app.Context) error { + // Setup port-forward to Minio + serviceName := appCtx.Config.Minio.Service.Name + localPort := appCtx.Config.Minio.Service.LocalPortForwardPort + remotePort := appCtx.Config.Minio.Service.Port + + pf, err := portforward.SetupPortForward(appCtx.K8sClient, appCtx.Namespace, serviceName, localPort, remotePort, appCtx.Logger) + if err != nil { + return err + } + defer close(pf.StopChan) + + // List objects in bucket + bucket := appCtx.Config.Settings.Bucket + prefix := appCtx.Config.Settings.S3Prefix + + appCtx.Logger.Infof("Listing Settings backups in bucket '%s'...", bucket) + + input := &s3.ListObjectsV2Input{ + Bucket: aws.String(bucket), + Prefix: aws.String(prefix), + } + + result, err := appCtx.S3Client.ListObjectsV2(context.Background(), input) + if err != nil { + return fmt.Errorf("failed to list S3 objects: %w", err) + } + + // Filter objects based on whether the archive is split or not + filteredObjects := s3client.FilterBackupObjects(result.Contents, isMultiPartArchive) + + // Sort by LastModified time (most recent first) + sort.Slice(filteredObjects, func(i, j int) bool { + return filteredObjects[i].LastModified.After(filteredObjects[j].LastModified) + }) + + if len(filteredObjects) == 0 { + appCtx.Formatter.PrintMessage("No backups found") + return nil + } + + table := output.Table{ + Headers: []string{"NAME", "LAST MODIFIED", "SIZE"}, + Rows: make([][]string, 0, len(filteredObjects)), + } + + for _, obj := range filteredObjects { + row := []string{ + obj.Key, + obj.LastModified.Format("2006-01-02 15:04:05 MST"), + output.FormatBytes(obj.Size), + } + table.Rows = append(table.Rows, row) + } + + return appCtx.Formatter.PrintTable(table) +} diff --git a/cmd/settings/restore.go b/cmd/settings/restore.go new file mode 100644 index 0000000..71b15f8 --- /dev/null +++ b/cmd/settings/restore.go @@ -0,0 +1,317 @@ +package settings + +import ( + "context" + "fmt" + "os" + "sort" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/spf13/cobra" + "github.com/stackvista/stackstate-backup-cli/internal/app" + "github.com/stackvista/stackstate-backup-cli/internal/clients/k8s" + s3client "github.com/stackvista/stackstate-backup-cli/internal/clients/s3" + "github.com/stackvista/stackstate-backup-cli/internal/foundation/config" + "github.com/stackvista/stackstate-backup-cli/internal/foundation/logger" + "github.com/stackvista/stackstate-backup-cli/internal/orchestration/portforward" + "github.com/stackvista/stackstate-backup-cli/internal/orchestration/restore" + "github.com/stackvista/stackstate-backup-cli/internal/orchestration/scale" + corev1 "k8s.io/api/core/v1" +) + +const ( + jobNameTemplate = "settings-restore" + configMapDefaultFileMode = 0755 +) + +// Restore command flags +var ( + archiveName string + useLatest bool + background bool + skipConfirmation bool +) + +func restoreCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { + cmd := &cobra.Command{ + Use: "restore", + Short: "Restore Settings from a backup archive", + Long: `Restore Settings data from a backup archive stored in S3/Minio. Can use --latest or --archive to specify which backup to restore.`, + Run: func(_ *cobra.Command, _ []string) { + appCtx, err := app.NewContext(globalFlags) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + if err := runRestore(appCtx); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + }, + } + + cmd.Flags().StringVar(&archiveName, "archive", "", "Specific archive name to restore (e.g., sts-backup-20251117-1404.sty)") + cmd.Flags().BoolVar(&useLatest, "latest", false, "Restore from the most recent backup") + cmd.Flags().BoolVar(&background, "background", false, "Run restore job in background without waiting for completion") + cmd.Flags().BoolVarP(&skipConfirmation, "yes", "y", false, "Skip confirmation prompt") + cmd.MarkFlagsMutuallyExclusive("archive", "latest") + cmd.MarkFlagsOneRequired("archive", "latest") + + return cmd +} + +func runRestore(appCtx *app.Context) error { + // Determine which archive to restore + backupFile := archiveName + if useLatest { + appCtx.Logger.Infof("Finding latest backup...") + latest, err := getLatestBackup(appCtx.K8sClient, appCtx.Namespace, appCtx.Config, appCtx.Logger) + if err != nil { + return err + } + backupFile = latest + appCtx.Logger.Infof("Using latest backup: %s", backupFile) + } + + // Warn user and ask for confirmation + if !skipConfirmation { + appCtx.Logger.Println() + appCtx.Logger.Warningf("WARNING: Restoring from backup will PURGE all existing Settings data!") + appCtx.Logger.Warningf("This operation cannot be undone.") + appCtx.Logger.Println() + appCtx.Logger.Infof("Backup to restore: %s", backupFile) + appCtx.Logger.Infof("Namespace: %s", appCtx.Namespace) + appCtx.Logger.Println() + + if !restore.PromptForConfirmation() { + return fmt.Errorf("restore operation cancelled by user") + } + } + + // Scale down deployments before restore + appCtx.Logger.Println() + scaleDownLabelSelector := appCtx.Config.Settings.Restore.ScaleDownLabelSelector + scaledDeployments, err := scale.ScaleDown(appCtx.K8sClient, appCtx.Namespace, scaleDownLabelSelector, appCtx.Logger) + if err != nil { + return err + } + + // Ensure deployments are scaled back up on exit (even if restore fails) + defer func() { + if len(scaledDeployments) > 0 && !background { + appCtx.Logger.Println() + if err := scale.ScaleUpFromAnnotations(appCtx.K8sClient, appCtx.Namespace, scaleDownLabelSelector, appCtx.Logger); err != nil { + appCtx.Logger.Warningf("Failed to scale up deployments: %v", err) + } + } + }() + + // Setup Kubernetes resources for restore job + appCtx.Logger.Println() + if err := restore.EnsureRestoreResources(appCtx.K8sClient, appCtx.Namespace, appCtx.Config, appCtx.Logger); err != nil { + return err + } + + // Create restore job + appCtx.Logger.Println() + appCtx.Logger.Infof("Creating restore job for backup: %s", backupFile) + + jobName := fmt.Sprintf("%s-%s", jobNameTemplate, time.Now().Format("20060102t150405")) + + if err = createRestoreJob(appCtx.K8sClient, appCtx.Namespace, jobName, backupFile, appCtx.Config); err != nil { + return fmt.Errorf("failed to create restore job: %w", err) + } + + appCtx.Logger.Successf("Restore job created: %s", jobName) + + if background { + restore.PrintRunningJobStatus(appCtx.Logger, "settings", jobName, appCtx.Namespace, 0) + return nil + } + + return waitAndCleanupRestoreJob(appCtx.K8sClient, appCtx.Namespace, jobName, appCtx.Logger) +} + +// waitAndCleanupRestoreJob waits for job completion and cleans up resources +func waitAndCleanupRestoreJob(k8sClient *k8s.Client, namespace, jobName string, log *logger.Logger) error { + restore.PrintWaitingMessage(log, "settings", jobName, namespace) + return restore.WaitAndCleanup(k8sClient, namespace, jobName, log, true) +} + +// getLatestBackup retrieves the most recent backup from S3 +func getLatestBackup(k8sClient *k8s.Client, namespace string, config *config.Config, log *logger.Logger) (string, error) { + // Setup port-forward to Minio + serviceName := config.Minio.Service.Name + localPort := config.Minio.Service.LocalPortForwardPort + remotePort := config.Minio.Service.Port + + pf, err := portforward.SetupPortForward(k8sClient, namespace, serviceName, localPort, remotePort, log) + if err != nil { + return "", err + } + defer close(pf.StopChan) + + // Create S3 client + endpoint := fmt.Sprintf("http://localhost:%d", pf.LocalPort) + s3Client, err := s3client.NewClient(endpoint, config.Minio.AccessKey, config.Minio.SecretKey) + if err != nil { + return "", err + } + + // List objects in bucket + bucket := config.Settings.Bucket + prefix := config.Settings.S3Prefix + + input := &s3.ListObjectsV2Input{ + Bucket: aws.String(bucket), + Prefix: aws.String(prefix), + } + + result, err := s3Client.ListObjectsV2(context.Background(), input) + if err != nil { + return "", fmt.Errorf("failed to list S3 objects: %w", err) + } + + // Filter objects based on whether the archive is split or not + filteredObjects := s3client.FilterBackupObjects(result.Contents, isMultiPartArchive) + + if len(filteredObjects) == 0 { + return "", fmt.Errorf("no backups found in bucket %s", bucket) + } + + // Sort by LastModified time (most recent first) + sort.Slice(filteredObjects, func(i, j int) bool { + return filteredObjects[i].LastModified.After(filteredObjects[j].LastModified) + }) + + return filteredObjects[0].Key, nil +} + +// createRestoreJob creates a Kubernetes Job and PVC for restoring from backup +func createRestoreJob(k8sClient *k8s.Client, namespace, jobName, backupFile string, config *config.Config) error { + defaultMode := int32(configMapDefaultFileMode) + + // Merge common labels with resource-specific labels + jobLabels := k8s.MergeLabels(config.Kubernetes.CommonLabels, config.Settings.Restore.Job.Labels) + + // Build job spec using configuration + spec := k8s.BackupJobSpec{ + Name: jobName, + Labels: jobLabels, + ImagePullSecrets: k8s.ConvertImagePullSecrets(config.Settings.Restore.Job.ImagePullSecrets), + SecurityContext: k8s.ConvertPodSecurityContext(&config.Settings.Restore.Job.SecurityContext), + NodeSelector: config.Settings.Restore.Job.NodeSelector, + Tolerations: k8s.ConvertTolerations(config.Settings.Restore.Job.Tolerations), + Affinity: k8s.ConvertAffinity(config.Settings.Restore.Job.Affinity), + Containers: buildRestoreContainers(backupFile, config), + InitContainers: buildRestoreInitContainers(config), + Volumes: buildRestoreVolumes(config, defaultMode), + } + + // Create job + if _, err := k8sClient.CreateBackupJob(namespace, spec); err != nil { + return fmt.Errorf("failed to create job: %w", err) + } + + return nil +} + +// buildRestoreEnvVars constructs environment variables for the restore job +func buildRestoreEnvVars(backupFile string, config *config.Config) []corev1.EnvVar { + return []corev1.EnvVar{ + {Name: "BACKUP_FILE", Value: backupFile}, + {Name: "BACKUP_CONFIGURATION_BUCKET_NAME", Value: config.Settings.Bucket}, + {Name: "BACKUP_CONFIGURATION_S3_PREFIX", Value: config.Settings.S3Prefix}, + {Name: "MINIO_ENDPOINT", Value: fmt.Sprintf("%s:%d", config.Minio.Service.Name, config.Minio.Service.Port)}, + {Name: "STACKSTATE_BASE_URL", Value: config.Settings.Restore.BaseURL}, + {Name: "RECEIVER_BASE_URL", Value: config.Settings.Restore.ReceiverBaseURL}, + {Name: "PLATFORM_VERSION", Value: config.Settings.Restore.PlatformVersion}, + {Name: "ZOOKEEPER_QUORUM", Value: config.Settings.Restore.ZookeeperQuorum}, + } +} + +// buildRestoreVolumeMounts constructs volume mounts for the restore job container +func buildRestoreVolumeMounts() []corev1.VolumeMount { + return []corev1.VolumeMount{ + {Name: "backup-log", MountPath: "/opt/docker/etc_log"}, + {Name: "backup-restore-scripts", MountPath: "/backup-restore-scripts"}, + {Name: "minio-keys", MountPath: "/aws-keys"}, + {Name: "tmp-data", MountPath: "/tmp-data"}, + } +} + +// buildRestoreInitContainers constructs init containers for the restore job +func buildRestoreInitContainers(config *config.Config) []corev1.Container { + return []corev1.Container{ + { + Name: "wait", + Image: config.Settings.Restore.Job.WaitImage, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{ + "sh", + "-c", + fmt.Sprintf("/entrypoint -c %s:%d -t 300", config.Minio.Service.Name, config.Minio.Service.Port), + }, + SecurityContext: k8s.ConvertSecurityContext(config.Settings.Restore.Job.ContainerSecurityContext), + }, + } +} + +// buildRestoreVolumes constructs volumes for the restore job pod +func buildRestoreVolumes(config *config.Config, defaultMode int32) []corev1.Volume { + return []corev1.Volume{ + { + Name: "backup-log", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: config.Settings.Restore.LoggingConfigConfigMapName, + }, + }, + }, + }, + { + Name: "backup-restore-scripts", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: restore.RestoreScriptsConfigMap, + }, + DefaultMode: &defaultMode, + }, + }, + }, + { + Name: "minio-keys", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: restore.MinioKeysSecretName, + }, + }, + }, + { + Name: "tmp-data", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + } +} + +// buildRestoreContainers constructs containers for the restore job +func buildRestoreContainers(backupFile string, config *config.Config) []corev1.Container { + return []corev1.Container{ + { + Name: "restore", + Image: config.Settings.Restore.Job.Image, + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: k8s.ConvertSecurityContext(config.Settings.Restore.Job.ContainerSecurityContext), + Command: []string{"/backup-restore-scripts/restore-settings-backup.sh"}, + Env: buildRestoreEnvVars(backupFile, config), + Resources: k8s.ConvertResources(config.Settings.Restore.Job.Resources), + VolumeMounts: buildRestoreVolumeMounts(), + }, + } +} diff --git a/cmd/settings/settings.go b/cmd/settings/settings.go new file mode 100644 index 0000000..6ed4702 --- /dev/null +++ b/cmd/settings/settings.go @@ -0,0 +1,19 @@ +package settings + +import ( + "github.com/spf13/cobra" + "github.com/stackvista/stackstate-backup-cli/internal/foundation/config" +) + +func Cmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { + cmd := &cobra.Command{ + Use: "settings", + Short: "Settings backup and restore operations", + } + + cmd.AddCommand(listCmd(globalFlags)) + cmd.AddCommand(restoreCmd(globalFlags)) + cmd.AddCommand(checkAndFinalizeCmd(globalFlags)) + + return cmd +} diff --git a/cmd/stackgraph/list.go b/cmd/stackgraph/list.go index 79faf7f..0d45f45 100644 --- a/cmd/stackgraph/list.go +++ b/cmd/stackgraph/list.go @@ -85,25 +85,10 @@ func runList(appCtx *app.Context) error { row := []string{ obj.Key, obj.LastModified.Format("2006-01-02 15:04:05 MST"), - formatBytes(obj.Size), + output.FormatBytes(obj.Size), } table.Rows = append(table.Rows, row) } return appCtx.Formatter.PrintTable(table) } - -// formatBytes formats bytes to human-readable format without spaces (e.g., "624MiB") -func formatBytes(bytes int64) string { - const unit = 1024 - if bytes < unit { - return fmt.Sprintf("%dB", bytes) - } - div, exp := int64(unit), 0 - for n := bytes / unit; n >= unit; n /= unit { - div *= unit - exp++ - } - units := []string{"KiB", "MiB", "GiB", "TiB", "PiB"} - return fmt.Sprintf("%.0f%s", float64(bytes)/float64(div), units[exp]) -} diff --git a/internal/foundation/config/config.go b/internal/foundation/config/config.go index c1d6bc5..68383f9 100644 --- a/internal/foundation/config/config.go +++ b/internal/foundation/config/config.go @@ -20,6 +20,7 @@ type Config struct { Elasticsearch ElasticsearchConfig `yaml:"elasticsearch" validate:"required"` Minio MinioConfig `yaml:"minio" validate:"required"` Stackgraph StackgraphConfig `yaml:"stackgraph" validate:"required"` + Settings SettingsConfig `yaml:"settings" validate:"required"` VictoriaMetrics VictoriaMetricsConfig `yaml:"victoriaMetrics" validate:"required"` } @@ -117,6 +118,22 @@ type StackgraphRestoreConfig struct { PVC PVCConfig `yaml:"pvc" validate:"required"` } +type SettingsConfig struct { + Bucket string `yaml:"bucket" validate:"required"` + S3Prefix string `yaml:"s3Prefix"` + Restore SettingsRestoreConfig `yaml:"restore" validate:"required"` +} + +type SettingsRestoreConfig struct { + ScaleDownLabelSelector string `yaml:"scaleDownLabelSelector" validate:"required"` + LoggingConfigConfigMapName string `yaml:"loggingConfigConfigMap" validate:"required"` + BaseURL string `yaml:"baseUrl" validate:"required"` + ReceiverBaseURL string `yaml:"receiverBaseUrl" validate:"required"` + PlatformVersion string `yaml:"platformVersion" validate:"required"` + ZookeeperQuorum string `yaml:"zookeeperQuorum" validate:"required"` + Job JobConfig `yaml:"job" validate:"required"` +} + // PVCConfig holds PersistentVolumeClaim configuration type PVCConfig struct { Size string `yaml:"size" validate:"required"` diff --git a/internal/foundation/config/config_test.go b/internal/foundation/config/config_test.go index af5086b..bac1352 100644 --- a/internal/foundation/config/config_test.go +++ b/internal/foundation/config/config_test.go @@ -425,6 +425,32 @@ func TestConfig_StructValidation(t *testing.T) { }, }, }, + Settings: SettingsConfig{ + Bucket: "settings-backup", + S3Prefix: "", + Restore: SettingsRestoreConfig{ + ScaleDownLabelSelector: "app=settings", + LoggingConfigConfigMapName: "logging-config", + BaseURL: "http://server:7070", + ReceiverBaseURL: "http://receiver:7077", + PlatformVersion: "5.2.0", + ZookeeperQuorum: "zookeeper:2181", + Job: JobConfig{ + Image: "settings-backup:latest", + WaitImage: "wait:latest", + Resources: ResourceRequirements{ + Limits: ResourceList{ + CPU: "1", + Memory: "2Gi", + }, + Requests: ResourceList{ + CPU: "500m", + Memory: "1Gi", + }, + }, + }, + }, + }, }, expectError: false, }, diff --git a/internal/foundation/config/testdata/validConfigMapConfig.yaml b/internal/foundation/config/testdata/validConfigMapConfig.yaml index 66115c3..58368b8 100644 --- a/internal/foundation/config/testdata/validConfigMapConfig.yaml +++ b/internal/foundation/config/testdata/validConfigMapConfig.yaml @@ -136,3 +136,27 @@ victoriaMetrics: requests: cpu: "500m" memory: "1Gi" + +# Settings backup configuration +settings: + bucket: sts-settings-backup + s3Prefix: "" + restore: + scaleDownLabelSelector: "observability.suse.com/scalable-during-settings-restore=true" + loggingConfigConfigMap: suse-observability-logging + baseUrl: "http://suse-observability-server:7070" + receiverBaseUrl: "http://suse-observability-receiver:7077" + platformVersion: "5.2.0" + zookeeperQuorum: "suse-observability-zookeeper:2181" + job: + labels: + app: settings-restore + image: quay.io/stackstate/settings-backup:latest + waitImage: quay.io/stackstate/wait:latest + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "500m" + memory: "1Gi" diff --git a/internal/foundation/config/testdata/validConfigMapOnly.yaml b/internal/foundation/config/testdata/validConfigMapOnly.yaml index 9aa76d6..6af0c75 100644 --- a/internal/foundation/config/testdata/validConfigMapOnly.yaml +++ b/internal/foundation/config/testdata/validConfigMapOnly.yaml @@ -126,3 +126,27 @@ victoriaMetrics: requests: cpu: "500m" memory: "1Gi" + +# Settings backup configuration +settings: + bucket: sts-settings-backup + s3Prefix: "" + restore: + scaleDownLabelSelector: "observability.suse.com/scalable-during-settings-restore=true" + loggingConfigConfigMap: suse-observability-logging + baseUrl: "http://suse-observability-server:7070" + receiverBaseUrl: "http://suse-observability-receiver:7077" + platformVersion: "5.2.0" + zookeeperQuorum: "suse-observability-zookeeper:2181" + job: + labels: + app: settings-restore + image: quay.io/stackstate/settings-backup:latest + waitImage: quay.io/stackstate/wait:latest + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "500m" + memory: "1Gi" diff --git a/internal/foundation/output/helpers.go b/internal/foundation/output/helpers.go new file mode 100644 index 0000000..a618184 --- /dev/null +++ b/internal/foundation/output/helpers.go @@ -0,0 +1,18 @@ +package output + +import "fmt" + +// FormatBytes formats bytes to human-readable format without spaces (e.g., "624MiB") +func FormatBytes(bytes int64) string { + const unit = 1024 + if bytes < unit { + return fmt.Sprintf("%dB", bytes) + } + div, exp := int64(unit), 0 + for n := bytes / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + units := []string{"KiB", "MiB", "GiB", "TiB", "PiB"} + return fmt.Sprintf("%.0f%s", float64(bytes)/float64(div), units[exp]) +} diff --git a/internal/foundation/output/helpers_test.go b/internal/foundation/output/helpers_test.go new file mode 100644 index 0000000..20a489a --- /dev/null +++ b/internal/foundation/output/helpers_test.go @@ -0,0 +1,223 @@ +package output + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +//nolint:funlen +func TestFormatBytes(t *testing.T) { + tests := []struct { + name string + bytes int64 + expected string + }{ + // Bytes (< 1024) + { + name: "zero bytes", + bytes: 0, + expected: "0B", + }, + { + name: "single byte", + bytes: 1, + expected: "1B", + }, + { + name: "multiple bytes", + bytes: 512, + expected: "512B", + }, + { + name: "max bytes before KiB", + bytes: 1023, + expected: "1023B", + }, + + // KiB (1024 to < 1024²) + { + name: "exactly 1 KiB", + bytes: 1024, + expected: "1KiB", + }, + { + name: "1.5 KiB", + bytes: 1536, // 1024 * 1.5 + expected: "2KiB", + }, + { + name: "100 KiB", + bytes: 102400, // 1024 * 100 + expected: "100KiB", + }, + { + name: "max KiB before MiB", + bytes: 1048575, // 1024² - 1 + expected: "1024KiB", + }, + + // MiB (1024² to < 1024³) + { + name: "exactly 1 MiB", + bytes: 1048576, // 1024² + expected: "1MiB", + }, + { + name: "10 MiB", + bytes: 10485760, // 1024² * 10 + expected: "10MiB", + }, + { + name: "624 MiB", + bytes: 654311424, // 1024² * 624 + expected: "624MiB", + }, + { + name: "max MiB before GiB", + bytes: 1073741823, // 1024³ - 1 + expected: "1024MiB", + }, + + // GiB (1024³ to < 1024⁴) + { + name: "exactly 1 GiB", + bytes: 1073741824, // 1024³ + expected: "1GiB", + }, + { + name: "5 GiB", + bytes: 5368709120, // 1024³ * 5 + expected: "5GiB", + }, + { + name: "100 GiB", + bytes: 107374182400, // 1024³ * 100 + expected: "100GiB", + }, + { + name: "max GiB before TiB", + bytes: 1099511627775, // 1024⁴ - 1 + expected: "1024GiB", + }, + + // TiB (1024⁴ to < 1024⁵) + { + name: "exactly 1 TiB", + bytes: 1099511627776, // 1024⁴ + expected: "1TiB", + }, + { + name: "2 TiB", + bytes: 2199023255552, // 1024⁴ * 2 + expected: "2TiB", + }, + { + name: "10 TiB", + bytes: 10995116277760, // 1024⁴ * 10 + expected: "10TiB", + }, + { + name: "max TiB before PiB", + bytes: 1125899906842623, // 1024⁵ - 1 + expected: "1024TiB", + }, + + // PiB (1024⁵+) + { + name: "exactly 1 PiB", + bytes: 1125899906842624, // 1024⁵ + expected: "1PiB", + }, + { + name: "5 PiB", + bytes: 5629499534213120, // 1024⁵ * 5 + expected: "5PiB", + }, + { + name: "1000 PiB", + bytes: 1125899906842624000, // 1024⁵ * 1000 + expected: "1000PiB", + }, + + // Rounding tests + { + name: "rounds down KiB", + bytes: 1024 + 256, // 1.25 KiB + expected: "1KiB", + }, + { + name: "rounds up KiB", + bytes: 1024 + 512, // 1.5 KiB + expected: "2KiB", + }, + { + name: "rounds down MiB", + bytes: 1048576 + 262144, // 1.25 MiB + expected: "1MiB", + }, + { + name: "rounds up MiB", + bytes: 1048576 + 524288, // 1.5 MiB + expected: "2MiB", + }, + { + name: "rounds down GiB", + bytes: 1073741824 + 268435456, // 1.25 GiB + expected: "1GiB", + }, + { + name: "rounds up GiB", + bytes: 1073741824 + 536870912, // 1.5 GiB + expected: "2GiB", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := FormatBytes(tt.bytes) + assert.Equal(t, tt.expected, result, "FormatBytes(%d) should return %s", tt.bytes, tt.expected) + }) + } +} + +// TestFormatBytes_Consistency verifies that the function produces consistent results +func TestFormatBytes_Consistency(t *testing.T) { + testCases := []int64{0, 1, 1024, 1048576, 1073741824} + + for _, bytes := range testCases { + result1 := FormatBytes(bytes) + result2 := FormatBytes(bytes) + assert.Equal(t, result1, result2, "FormatBytes should be deterministic for %d bytes", bytes) + } +} + +// TestFormatBytes_NoSpaces verifies that output has no spaces (as per requirement) +func TestFormatBytes_NoSpaces(t *testing.T) { + testCases := []int64{0, 512, 1024, 1536, 1048576, 1073741824} + + for _, bytes := range testCases { + result := FormatBytes(bytes) + assert.NotContains(t, result, " ", "FormatBytes(%d) should not contain spaces, got: %s", bytes, result) + } +} + +// TestFormatBytes_UnitsArray verifies all unit suffixes are present in output +func TestFormatBytes_UnitsArray(t *testing.T) { + tests := []struct { + bytes int64 + expectedUnit string + }{ + {100, "B"}, + {1024, "KiB"}, + {1048576, "MiB"}, + {1073741824, "GiB"}, + {1099511627776, "TiB"}, + {1125899906842624, "PiB"}, + } + + for _, tt := range tests { + result := FormatBytes(tt.bytes) + assert.Contains(t, result, tt.expectedUnit, "FormatBytes(%d) should contain unit %s", tt.bytes, tt.expectedUnit) + } +} diff --git a/internal/scripts/scripts/restore-settings-backup.sh b/internal/scripts/scripts/restore-settings-backup.sh new file mode 100644 index 0000000..3502181 --- /dev/null +++ b/internal/scripts/scripts/restore-settings-backup.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -Eeuo pipefail + +export BACKUP_DIR=/settings-backup-data +export TMP_DIR=/tmp-data + +RESTORE_FILE="${BACKUP_DIR}/${BACKUP_FILE}" + +export AWS_ACCESS_KEY_ID +AWS_ACCESS_KEY_ID="$(cat /aws-keys/accesskey)" +export AWS_SECRET_ACCESS_KEY +AWS_SECRET_ACCESS_KEY="$(cat /aws-keys/secretkey)" + +echo "=== Downloading Settings backup \"${BACKUP_FILE}\" from bucket \"${BACKUP_CONFIGURATION_BUCKET_NAME}\"..." +sts-toolbox aws s3 --endpoint "http://${MINIO_ENDPOINT}" --region minio cp "s3://${BACKUP_CONFIGURATION_BUCKET_NAME}/${BACKUP_CONFIGURATION_S3_PREFIX}${BACKUP_FILE}" "${TMP_DIR}/${BACKUP_FILE}" +RESTORE_FILE="${TMP_DIR}/${BACKUP_FILE}" + +if [ ! -f "${RESTORE_FILE}" ]; then +echo "=== Backup file \"${RESTORE_FILE}\" not found, exiting..." +exit 1 +fi + +echo "=== Restoring settings backup from \"${BACKUP_FILE}\"..." +/opt/docker/bin/settings-backup -Dlogback.configurationFile=/opt/docker/etc_log/logback.xml -restore "${RESTORE_FILE}" +echo "==="