2222COMMANDS:
2323 baseline Low load, verify monitoring works
2424 services Test each service individually
25+ autoscaling Test HPA scaling under load
2526 mixed Realistic scenario
2627 stress Find breaking points
2728 soak Long-running stability
@@ -41,21 +42,167 @@ EXAMPLES:
4142 # Test individual services
4243 $( basename " $0 " ) services --debug
4344
45+ # Test autoscaling behavior
46+ $( basename " $0 " ) autoscaling --debug
47+
4448 # Run all load tests
4549 $( basename " $0 " ) all
4650EOF
4751}
4852
53+ get_base_url () {
54+ # Try localhost first (most common in local dev)
55+ if curl -s -f -m 3 " http://localhost/stac" > /dev/null 2>&1 ; then
56+ echo " http://localhost"
57+ return 0
58+ fi
59+
60+ # Try ingress if configured
61+ local host
62+ host=$( kubectl get ingress -n " $NAMESPACE " -o jsonpath=' {.items[0].spec.rules[0].host}' 2> /dev/null || echo " " )
63+ if [[ -n " $host " ]] && curl -s -f -m 3 " http://$host /stac" > /dev/null 2>&1 ; then
64+ echo " http://$host "
65+ return 0
66+ fi
67+
68+ return 1
69+ }
70+
71+ test_endpoint () {
72+ local url=" $1 "
73+ local duration=" ${2:- 30} "
74+ local concurrency=" ${3:- 2} "
75+
76+ if ! command_exists hey; then
77+ log_error " hey not found. Install with: go install github.com/rakyll/hey@latest"
78+ return 1
79+ fi
80+
81+ log_info " Testing $url (${duration} s, ${concurrency} c)"
82+ hey -z " ${duration} s" -c " $concurrency " " $url " 2> /dev/null | grep -E " (Total:|Requests/sec:|Average:|Status code)"
83+ }
84+
85+ monitor_during_test () {
86+ local duration=" $1 "
87+ log_info " Monitor with: watch kubectl get pods -n $NAMESPACE "
88+ sleep " $duration " &
89+ local sleep_pid=$!
90+
91+ # Show initial state
92+ kubectl get hpa -n " $NAMESPACE " 2> /dev/null | head -2 || true
93+
94+ wait $sleep_pid
95+ }
96+
4997load_baseline () {
5098 log_info " Running baseline load test..."
51- # TODO: Implement baseline load testing
99+
100+ validate_cluster || return 1
101+ validate_namespace " $NAMESPACE " || return 1
102+
103+ local base_url
104+ if ! base_url=$( get_base_url) ; then
105+ log_error " Cannot reach eoAPI endpoints"
106+ return 1
107+ fi
108+ log_info " Using base URL: $base_url "
109+
110+ # Wait for deployments
111+ for service in stac raster vector; do
112+ kubectl wait --for=condition=Available deployment/" ${RELEASE_NAME} -${service} " -n " $NAMESPACE " --timeout=60s 2> /dev/null || \
113+ log_warn " Service $service may not be ready"
114+ done
115+
116+ log_info " Running light load tests..."
117+ log_info " Monitor pods: kubectl get pods -n $NAMESPACE -w"
118+
119+ # STAC collections (30s, 2 concurrent)
120+ test_endpoint " $base_url /stac/collections" &
121+ monitor_during_test 30
122+ wait
123+
124+ # STAC search (60s, 3 concurrent)
125+ if command_exists curl && command_exists hey; then
126+ log_info " Testing STAC search (60s, 3c)"
127+ hey -z 60s -c 3 -m POST -H " Content-Type: application/json" -d ' {"limit":10}' " $base_url /stac/search" 2> /dev/null | \
128+ grep -E " (Total:|Requests/sec:|Average:|Status code)"
129+ fi
130+
131+ # Health checks
132+ test_endpoint " $base_url /raster/healthz"
133+ test_endpoint " $base_url /vector/healthz"
134+
135+ log_success " Baseline load test completed"
52136}
53137
54138load_services () {
55139 log_info " Running service-specific load tests..."
56140 # TODO: Implement individual service testing
57141}
58142
143+ load_autoscaling () {
144+ log_info " Running autoscaling tests..."
145+
146+ validate_cluster || return 1
147+ validate_namespace " $NAMESPACE " || return 1
148+
149+ # Check HPA exists
150+ if ! kubectl get hpa -n " $NAMESPACE " > /dev/null 2>&1 || [[ $( kubectl get hpa -n " $NAMESPACE " --no-headers 2> /dev/null | wc -l) -eq 0 ]]; then
151+ log_error " No HPA resources found. Deploy with autoscaling enabled."
152+ return 1
153+ fi
154+
155+ # Check metrics server
156+ if ! kubectl get deployment -A | grep -q metrics-server; then
157+ log_error " metrics-server required for autoscaling tests"
158+ return 1
159+ fi
160+
161+ local base_url
162+ if ! base_url=$( get_base_url) ; then
163+ log_error " Cannot reach eoAPI endpoints"
164+ return 1
165+ fi
166+ log_info " Using base URL: $base_url "
167+
168+ # Wait for services
169+ for service in stac raster vector; do
170+ kubectl wait --for=condition=Available deployment/" ${RELEASE_NAME} -${service} " -n " $NAMESPACE " --timeout=90s || return 1
171+ done
172+
173+ log_info " Current HPA status:"
174+ kubectl get hpa -n " $NAMESPACE "
175+
176+ log_info " Generating sustained load to trigger autoscaling..."
177+
178+ # Generate load that should trigger HPA (10 min, 15 concurrent)
179+ if command_exists hey; then
180+ log_info " Starting sustained load test (10 minutes)..."
181+ hey -z 600s -c 15 " $base_url /stac/search" -m POST \
182+ -H " Content-Type: application/json" -d ' {"limit":100}' &
183+ local load_pid=$!
184+
185+ # Monitor HPA changes every 30s
186+ log_info " Monitoring HPA scaling..."
187+ for i in {1..20}; do
188+ sleep 30
189+ log_info " HPA status after ${i} x30s:"
190+ kubectl get hpa -n " $NAMESPACE " --no-headers | awk ' {print $1 ": " $6 "/" $7 " replicas, CPU: " $3}'
191+ done
192+
193+ # Stop load test
194+ kill $load_pid 2> /dev/null || true
195+ wait $load_pid 2> /dev/null || true
196+
197+ log_info " Final HPA status:"
198+ kubectl get hpa -n " $NAMESPACE "
199+ log_success " Autoscaling test completed"
200+ else
201+ log_error " hey required for autoscaling tests"
202+ return 1
203+ fi
204+ }
205+
59206load_mixed () {
60207 log_info " Running mixed load test scenario..."
61208 # TODO: Implement realistic mixed scenario
@@ -83,6 +230,7 @@ load_all() {
83230
84231 load_baseline || (( failed++ ))
85232 load_services || (( failed++ ))
233+ load_autoscaling || (( failed++ ))
86234 load_mixed || (( failed++ ))
87235 load_stress || (( failed++ ))
88236 load_soak || (( failed++ ))
@@ -119,7 +267,7 @@ main() {
119267 RELEASE_NAME=" $2 "
120268 shift 2
121269 ;;
122- baseline|services|mixed|stress|soak|chaos|all)
270+ baseline|services|autoscaling| mixed|stress|soak|chaos|all)
123271 command=" $1 "
124272 shift
125273 break
@@ -141,6 +289,9 @@ main() {
141289 services)
142290 load_services
143291 ;;
292+ autoscaling)
293+ load_autoscaling
294+ ;;
144295 mixed)
145296 load_mixed
146297 ;;
0 commit comments