diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml
new file mode 100644
index 0000000..69b379e
--- /dev/null
+++ b/.github/workflows/benchmark.yml
@@ -0,0 +1,223 @@
+name: Benchmark
+
+on:
+ push:
+ branches: [ "master" ]
+ pull_request:
+ branches: [ "master" ]
+ workflow_dispatch:
+ schedule:
+ # Run benchmarks weekly on Sundays at 2 AM UTC
+ - cron: '0 2 * * 0'
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ benchmark:
+ name: Performance Benchmark
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out the repo
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Install MetaCall Linux
+ run: curl -sL https://raw.githubusercontent.com/metacall/install/master/install.sh | sh
+
+ - name: Install Rust
+ uses: actions-rs/toolchain@v1
+ with:
+ toolchain: stable
+ override: true
+ components: rustfmt, clippy
+
+ - name: Install Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: '18'
+ cache: 'npm'
+ cache-dependency-path: tests/web-app/package-lock.json
+
+ - name: Install wrk (load testing tool)
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y wrk
+
+ - name: Build MetaSSR
+ run: cargo build --release
+
+ - name: Setup benchmark test app
+ working-directory: ./tests/web-app
+ run: |
+ npm install
+ npm run build
+
+ - name: Start MetaSSR server
+ working-directory: ./tests/web-app
+ run: |
+ npm run start &
+ echo $! > metassr.pid
+ sleep 10
+
+ - name: Warm up MetaSSR server
+ run: |
+ curl -s http://localhost:8080 > /dev/null
+ sleep 2
+
+ - name: Benchmark MetaSSR
+ run: |
+ echo "=== MetaSSR Benchmark ===" > benchmark_results.txt
+ wrk -t12 -c100 -d30s --latency http://localhost:8080 >> benchmark_results.txt
+ echo "" >> benchmark_results.txt
+
+ - name: Benchmark MetaSSR (Light Load)
+ run: |
+ echo "=== MetaSSR Light Load (1 thread, 10 connections) ===" >> benchmark_results.txt
+ wrk -t1 -c10 -d30s --latency http://localhost:8080 >> benchmark_results.txt
+ echo "" >> benchmark_results.txt
+
+ - name: Benchmark MetaSSR (Medium Load)
+ run: |
+ echo "=== MetaSSR Medium Load (4 threads, 50 connections) ===" >> benchmark_results.txt
+ wrk -t4 -c50 -d30s --latency http://localhost:8080 >> benchmark_results.txt
+ echo "" >> benchmark_results.txt
+
+ - name: Benchmark MetaSSR (Heavy Load)
+ run: |
+ echo "=== MetaSSR Heavy Load (12 threads, 1000 connections) ===" >> benchmark_results.txt
+ wrk -t12 -c1000 -d30s --latency http://localhost:8080 >> benchmark_results.txt
+ echo "" >> benchmark_results.txt
+
+ - name: Benchmark MetaSSR (Sustained Load)
+ run: |
+ echo "=== MetaSSR Sustained Load (8 threads, 200 connections, 2 minutes) ===" >> benchmark_results.txt
+ wrk -t8 -c200 -d120s --latency http://localhost:8080 >> benchmark_results.txt
+ echo "" >> benchmark_results.txt
+
+ - name: Stop MetaSSR server
+ working-directory: ./tests/web-app
+ run: |
+ if [ -f metassr.pid ]; then
+ kill $(cat metassr.pid) || true
+ rm metassr.pid
+ fi
+ sleep 5
+
+ - name: Install Python dependencies
+ run: |
+ pip install pandas matplotlib seaborn numpy
+
+ - name: Setup benchmark results directory
+ run: |
+ mkdir -p benchmark-results
+ # Move any existing results from wrong location
+ ./benchmarks/move-results.sh
+
+ - name: Run comprehensive benchmarks
+ run: |
+ ./benchmarks/run-benchmarks.sh --skip-build --analyze
+ env:
+ RESULTS_DIR: benchmark-results
+
+ - name: Generate PR benchmark summary
+ run: python3 benchmarks/generate-pr-summary.py
+ env:
+ GITHUB_SHA: ${{ github.sha }}
+ RUNNER_OS: ${{ runner.os }}
+
+ - name: Display results
+ run: |
+ echo "Benchmark completed successfully!"
+ echo "=== PR Summary ==="
+ cat pr_benchmark_summary.md
+
+ - name: Upload benchmark results
+ uses: actions/upload-artifact@v4
+ with:
+ name: benchmark-results-${{ github.run_id }}
+ path: |
+ benchmark-results/
+ benchmarks/benchmark-config.json
+ retention-days: 30
+
+ - name: Comment benchmark results on PR
+ if: github.event_name == 'pull_request'
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const fs = require('fs');
+
+ try {
+ const summary = fs.readFileSync('pr_benchmark_summary.md', 'utf8');
+
+ await github.rest.issues.createComment({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body: summary
+ });
+
+ console.log('Successfully posted benchmark results to PR');
+ } catch (error) {
+ console.error('Failed to post benchmark results:', error);
+ }
+
+ memory-benchmark:
+ name: Memory Usage Benchmark
+ runs-on: ubuntu-latest
+ timeout-minutes: 20
+
+ steps:
+ - name: Check out the repo
+ uses: actions/checkout@v4
+
+ - name: Install MetaCall Linux
+ run: curl -sL https://raw.githubusercontent.com/metacall/install/master/install.sh | sh
+
+ - name: Install Rust
+ uses: actions-rs/toolchain@v1
+ with:
+ toolchain: stable
+ override: true
+
+ - name: Install Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: '18'
+
+ - name: Build MetaSSR
+ run: cargo build --release
+
+ - name: Setup test app
+ working-directory: ./tests/web-app
+ run: |
+ npm install
+ npm run build
+
+ - name: Monitor MetaSSR memory usage
+ working-directory: ./tests/web-app
+ run: |
+ npm run start &
+ SERVER_PID=$!
+ sleep 10
+
+ # Monitor memory for 60 seconds
+ echo "timestamp,memory_mb" > memory_usage.csv
+ for i in {1..60}; do
+ memory=$(ps -o rss= -p $SERVER_PID | awk '{print $1/1024}')
+ echo "$i,$memory" >> memory_usage.csv
+ sleep 1
+ done
+
+ kill $SERVER_PID
+
+ - name: Upload memory benchmark
+ uses: actions/upload-artifact@v4
+ with:
+ name: memory-benchmark-${{ github.run_id }}
+ path: tests/web-app/memory_usage.csv
+ retention-days: 30
\ No newline at end of file
diff --git a/benchmarks/README.md b/benchmarks/README.md
old mode 100644
new mode 100755
index ff9de4e..03c47e8
--- a/benchmarks/README.md
+++ b/benchmarks/README.md
@@ -1,14 +1,79 @@
-# Benchmarks
+# MetaSSR Benchmarks
-TODO: Implement docker compose, meanwhile you can use this for building and running:
+This directory contains all benchmark-related scripts and configurations for MetaSSR performance testing.
-Client:
-```sh
-docker build -t metacall/metassr_benchmarks:client .
-```
+## Scripts Overview
+
+- `run-benchmarks.sh` - Main automated benchmark runner
+- `benchmark.sh` - Core benchmark execution script
+- `analyze-benchmarks.py` - Results analysis and reporting
+- `generate-pr-summary.py` - Generate PR comment summaries
+- `benchmark-config.json` - Test scenarios configuration
+- `requirements.txt` - Python dependencies
+
+## Quick Start
+
+```bash
+# Run full benchmark suite
+./benchmarks/run-benchmarks.sh
+
+# Run with custom options
+./benchmarks/run-benchmarks.sh --port 3000 --build debug --graphs
-Next.js:
-```sh
-docker build -t nextjs-docker .
-docker run -p 3000:3000 nextjs-docker
+# Analyze existing results
+python3 benchmarks/analyze-benchmarks.py benchmark-results/results.json --plots
```
+
+## Dependencies
+
+### System Requirements
+- `wrk` - HTTP benchmarking tool
+- `jq` - JSON processor
+- `curl` - HTTP client
+- `lsof` - List open files (for process monitoring)
+
+### Python Requirements
+Install with: `pip install -r benchmarks/requirements.txt`
+- pandas - Data analysis
+- matplotlib - Plotting
+- seaborn - Statistical visualization
+- numpy - Numerical computing
+
+## Benchmark Scenarios
+
+Configured in `benchmark-config.json`:
+
+| Scenario | Purpose | Threads | Connections | Duration |
+|----------|---------|---------|-------------|----------|
+| Light Load | Basic functionality | 1 | 10 | 30s |
+| Medium Load | Typical usage | 4 | 50 | 30s |
+| Standard Load | Standard testing | 8 | 100 | 30s |
+| Heavy Load | Peak performance | 12 | 500 | 30s |
+| Extreme Load | Stress testing | 16 | 1000 | 30s |
+| Sustained Load | Stability testing | 8 | 200 | 2min |
+| Endurance Test | Long-term stability | 4 | 100 | 5min |
+
+## Output Formats
+
+- **JSON** - Structured results for analysis
+- **CSV** - Tabular data for spreadsheets
+- **Markdown** - Human-readable reports
+- **PNG** - Performance charts (with --plots)
+
+## CI/CD Integration
+
+The benchmarks are automatically run via GitHub Actions on:
+- Push to master
+- Pull requests
+- Weekly schedule
+- Manual workflow dispatch
+
+Results are posted as PR comments and stored as workflow artifacts.
+
+## Contributing
+
+When modifying benchmarks:
+1. Test locally first
+2. Update configuration if adding scenarios
+3. Ensure scripts remain executable
+4. Update documentation accordingly
\ No newline at end of file
diff --git a/benchmarks/analyze-benchmarks.py b/benchmarks/analyze-benchmarks.py
new file mode 100755
index 0000000..8723cde
--- /dev/null
+++ b/benchmarks/analyze-benchmarks.py
@@ -0,0 +1,300 @@
+#!/usr/bin/env python3
+"""
+MetaSSR Benchmark Results Analyzer
+Analyzes benchmark results and generates comprehensive reports
+"""
+
+import json
+import csv
+import argparse
+import os
+import sys
+from datetime import datetime
+from pathlib import Path
+import statistics
+import matplotlib.pyplot as plt
+import pandas as pd
+import seaborn as sns
+
+class BenchmarkAnalyzer:
+ def __init__(self, results_dir="benchmark-results"):
+ self.results_dir = Path(results_dir)
+ self.results_dir.mkdir(exist_ok=True)
+
+ def load_results(self, result_file):
+ """Load benchmark results from JSON file"""
+ with open(result_file, 'r') as f:
+ return json.load(f)
+
+ def analyze_performance(self, results):
+ """Analyze performance metrics"""
+ analysis = {
+ 'summary': {},
+ 'trends': {},
+ 'recommendations': []
+ }
+
+ tests = results['tests']
+
+ # Calculate overall statistics
+ rps_values = [float(test['results']['requests_per_sec'].replace(',', ''))
+ for test in tests if test['results']['requests_per_sec']]
+
+ analysis['summary'] = {
+ 'total_tests': len(tests),
+ 'max_rps': max(rps_values) if rps_values else 0,
+ 'avg_rps': statistics.mean(rps_values) if rps_values else 0,
+ 'min_rps': min(rps_values) if rps_values else 0
+ }
+
+ # Analyze each test
+ for test in tests:
+ test_name = test['name']
+ results_data = test['results']
+
+ # Convert latency to milliseconds
+ avg_latency = self.parse_latency(results_data['avg_latency'])
+ p99_latency = self.parse_latency(results_data['latency_percentiles']['p99'])
+
+ analysis['trends'][test_name] = {
+ 'rps': float(results_data['requests_per_sec'].replace(',', '') or 0),
+ 'avg_latency_ms': avg_latency,
+ 'p99_latency_ms': p99_latency,
+ 'errors': int(results_data['total_errors'] or 0),
+ 'total_requests': int(results_data['total_requests'].replace(',', '') or 0)
+ }
+
+ # Generate recommendations
+ analysis['recommendations'] = self.generate_recommendations(analysis)
+
+ return analysis
+
+ def parse_latency(self, latency_str):
+ """Parse latency string and convert to milliseconds"""
+ if not latency_str:
+ return 0
+
+ latency_str = latency_str.lower()
+ if 'ms' in latency_str:
+ return float(latency_str.replace('ms', ''))
+ elif 'us' in latency_str:
+ return float(latency_str.replace('us', '')) / 1000
+ elif 's' in latency_str:
+ return float(latency_str.replace('s', '')) * 1000
+ else:
+ return float(latency_str)
+
+ def generate_recommendations(self, analysis):
+ """Generate performance recommendations"""
+ recommendations = []
+ trends = analysis['trends']
+
+ # Check for high latency
+ high_latency_tests = [name for name, data in trends.items()
+ if data['avg_latency_ms'] > 100]
+ if high_latency_tests:
+ recommendations.append({
+ 'type': 'warning',
+ 'message': f"High average latency detected in: {', '.join(high_latency_tests)}",
+ 'suggestion': "Consider optimizing server response time or reducing load"
+ })
+
+ # Check for errors
+ error_tests = [name for name, data in trends.items() if data['errors'] > 0]
+ if error_tests:
+ recommendations.append({
+ 'type': 'critical',
+ 'message': f"Errors detected in: {', '.join(error_tests)}",
+ 'suggestion': "Investigate error causes and improve error handling"
+ })
+
+ # Check performance scaling
+ rps_values = [(name, data['rps']) for name, data in trends.items()]
+ rps_values.sort(key=lambda x: x[1], reverse=True)
+
+ if len(rps_values) > 1:
+ best_test = rps_values[0]
+ recommendations.append({
+ 'type': 'info',
+ 'message': f"Best performance: {best_test[0]} with {best_test[1]:.0f} RPS",
+ 'suggestion': "Use this configuration as baseline for optimization"
+ })
+
+ return recommendations
+
+ def generate_plots(self, analysis, output_dir):
+ """Generate performance visualization plots"""
+ plt.style.use('seaborn-v0_8')
+ output_dir = Path(output_dir)
+ output_dir.mkdir(exist_ok=True)
+
+ trends = analysis['trends']
+ test_names = list(trends.keys())
+
+ # RPS vs Test scenario
+ fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12))
+
+ # Requests per second
+ rps_values = [trends[name]['rps'] for name in test_names]
+ ax1.bar(test_names, rps_values, color='skyblue')
+ ax1.set_title('Requests per Second by Test Scenario')
+ ax1.set_ylabel('Requests/sec')
+ ax1.tick_params(axis='x', rotation=45)
+
+ # Average latency
+ latency_values = [trends[name]['avg_latency_ms'] for name in test_names]
+ ax2.bar(test_names, latency_values, color='lightcoral')
+ ax2.set_title('Average Latency by Test Scenario')
+ ax2.set_ylabel('Latency (ms)')
+ ax2.tick_params(axis='x', rotation=45)
+
+ # P99 latency
+ p99_values = [trends[name]['p99_latency_ms'] for name in test_names]
+ ax3.bar(test_names, p99_values, color='lightgreen')
+ ax3.set_title('P99 Latency by Test Scenario')
+ ax3.set_ylabel('P99 Latency (ms)')
+ ax3.tick_params(axis='x', rotation=45)
+
+ # Error count
+ error_values = [trends[name]['errors'] for name in test_names]
+ ax4.bar(test_names, error_values, color='orange')
+ ax4.set_title('Errors by Test Scenario')
+ ax4.set_ylabel('Error Count')
+ ax4.tick_params(axis='x', rotation=45)
+
+ plt.tight_layout()
+ plt.savefig(output_dir / 'performance_overview.png', dpi=300, bbox_inches='tight')
+ plt.close()
+
+ # Performance trend line
+ plt.figure(figsize=(12, 6))
+ plt.plot(test_names, rps_values, marker='o', linewidth=2, markersize=8)
+ plt.title('Performance Trend Across Test Scenarios')
+ plt.xlabel('Test Scenario')
+ plt.ylabel('Requests per Second')
+ plt.xticks(rotation=45)
+ plt.grid(True, alpha=0.3)
+ plt.tight_layout()
+ plt.savefig(output_dir / 'performance_trend.png', dpi=300, bbox_inches='tight')
+ plt.close()
+
+ def generate_report(self, results, analysis, output_file):
+ """Generate comprehensive markdown report"""
+ with open(output_file, 'w') as f:
+ f.write("# MetaSSR Benchmark Report\n\n")
+
+ # Metadata
+ metadata = results['metadata']
+ f.write("## System Information\n\n")
+ f.write(f"- **Timestamp:** {metadata['timestamp']}\n")
+ f.write(f"- **Hostname:** {metadata['hostname']}\n")
+ f.write(f"- **OS:** {metadata['os']}\n")
+ f.write(f"- **Architecture:** {metadata['arch']}\n")
+ f.write(f"- **CPU Cores:** {metadata['cpu_cores']}\n")
+ f.write(f"- **Memory:** {metadata['memory_gb']} GB\n\n")
+
+ # Summary
+ summary = analysis['summary']
+ f.write("## Performance Summary\n\n")
+ f.write(f"- **Total Tests:** {summary['total_tests']}\n")
+ f.write(f"- **Maximum RPS:** {summary['max_rps']:,.0f}\n")
+ f.write(f"- **Average RPS:** {summary['avg_rps']:,.0f}\n")
+ f.write(f"- **Minimum RPS:** {summary['min_rps']:,.0f}\n\n")
+
+ # Detailed results
+ f.write("## Detailed Results\n\n")
+ f.write("| Test Scenario | RPS | Avg Latency | P99 Latency | Errors | Total Requests |\n")
+ f.write("|---------------|-----|-------------|-------------|--------|-----------------|\n")
+
+ for name, data in analysis['trends'].items():
+ f.write(f"| {name} | {data['rps']:,.0f} | {data['avg_latency_ms']:.2f}ms | "
+ f"{data['p99_latency_ms']:.2f}ms | {data['errors']} | {data['total_requests']:,} |\n")
+
+ # Recommendations
+ if analysis['recommendations']:
+ f.write("\n## Recommendations\n\n")
+ for rec in analysis['recommendations']:
+ icon = "đ´" if rec['type'] == 'critical' else "â ī¸" if rec['type'] == 'warning' else "âšī¸"
+ f.write(f"{icon} **{rec['message']}**\n")
+ f.write(f" {rec['suggestion']}\n\n")
+
+ def export_csv(self, analysis, output_file):
+ """Export results to CSV format"""
+ with open(output_file, 'w', newline='') as f:
+ writer = csv.writer(f)
+ writer.writerow(['Test', 'RPS', 'Avg_Latency_ms', 'P99_Latency_ms', 'Errors', 'Total_Requests'])
+
+ for name, data in analysis['trends'].items():
+ writer.writerow([
+ name,
+ data['rps'],
+ data['avg_latency_ms'],
+ data['p99_latency_ms'],
+ data['errors'],
+ data['total_requests']
+ ])
+
+def main():
+ parser = argparse.ArgumentParser(description='Analyze MetaSSR benchmark results')
+ parser.add_argument('result_file', help='Path to benchmark results JSON file')
+ parser.add_argument('-o', '--output', default='analysis_report',
+ help='Output directory for analysis results')
+ parser.add_argument('--plots', action='store_true',
+ help='Generate performance plots')
+
+ args = parser.parse_args()
+
+ if not os.path.exists(args.result_file):
+ print(f"Error: Result file {args.result_file} not found")
+ sys.exit(1)
+
+ # Create analyzer
+ analyzer = BenchmarkAnalyzer()
+
+ # Load and analyze results
+ print("Loading benchmark results...")
+ results = analyzer.load_results(args.result_file)
+
+ print("Analyzing performance...")
+ analysis = analyzer.analyze_performance(results)
+
+ # Create output directory
+ output_dir = Path(args.output)
+ output_dir.mkdir(exist_ok=True)
+
+ # Generate reports
+ print("Generating reports...")
+
+ # Markdown report
+ report_file = output_dir / 'benchmark_report.md'
+ analyzer.generate_report(results, analysis, report_file)
+ print(f"Generated report: {report_file}")
+
+ # CSV export
+ csv_file = output_dir / 'benchmark_results.csv'
+ analyzer.export_csv(analysis, csv_file)
+ print(f"Generated CSV: {csv_file}")
+
+ # Generate plots if requested
+ if args.plots:
+ try:
+ print("Generating performance plots...")
+ analyzer.generate_plots(analysis, output_dir)
+ print(f"Generated plots in: {output_dir}")
+ except ImportError:
+ print("Warning: matplotlib/seaborn not available, skipping plots")
+
+ # Print summary
+ print("\n=== Performance Summary ===")
+ summary = analysis['summary']
+ print(f"Maximum RPS: {summary['max_rps']:,.0f}")
+ print(f"Average RPS: {summary['avg_rps']:,.0f}")
+ print(f"Total Tests: {summary['total_tests']}")
+
+ if analysis['recommendations']:
+ print("\n=== Recommendations ===")
+ for rec in analysis['recommendations']:
+ print(f"- {rec['message']}")
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/benchmarks/benchmark-config.json b/benchmarks/benchmark-config.json
new file mode 100755
index 0000000..cfb9e5c
--- /dev/null
+++ b/benchmarks/benchmark-config.json
@@ -0,0 +1,85 @@
+{
+ "benchmark": {
+ "scenarios": [
+ {
+ "name": "light_load",
+ "description": "Light load testing for basic functionality",
+ "threads": 1,
+ "connections": 10,
+ "duration": 30,
+ "expected_rps": 1000
+ },
+ {
+ "name": "medium_load",
+ "description": "Medium load testing for typical usage",
+ "threads": 4,
+ "connections": 50,
+ "duration": 30,
+ "expected_rps": 5000
+ },
+ {
+ "name": "standard_load",
+ "description": "Standard load testing",
+ "threads": 8,
+ "connections": 100,
+ "duration": 30,
+ "expected_rps": 10000
+ },
+ {
+ "name": "heavy_load",
+ "description": "Heavy load testing for peak performance",
+ "threads": 12,
+ "connections": 500,
+ "duration": 30,
+ "expected_rps": 50000
+ },
+ {
+ "name": "extreme_load",
+ "description": "Extreme load testing to find limits",
+ "threads": 16,
+ "connections": 1000,
+ "duration": 30,
+ "expected_rps": 80000
+ },
+ {
+ "name": "sustained_load",
+ "description": "Sustained load for stability testing",
+ "threads": 8,
+ "connections": 200,
+ "duration": 120,
+ "expected_rps": 15000
+ },
+ {
+ "name": "endurance_test",
+ "description": "Long-running endurance test",
+ "threads": 4,
+ "connections": 100,
+ "duration": 300,
+ "expected_rps": 8000
+ }
+ ],
+ "thresholds": {
+ "max_avg_latency_ms": 50,
+ "max_p99_latency_ms": 200,
+ "min_success_rate": 99.9,
+ "max_error_rate": 0.1
+ },
+ "monitoring": {
+ "memory_threshold_mb": 512,
+ "cpu_threshold_percent": 80,
+ "sample_interval_seconds": 1
+ }
+ },
+ "server": {
+ "url": "http://localhost:8080",
+ "warmup_requests": 10,
+ "health_check_timeout": 30,
+ "startup_wait": 10
+ },
+ "output": {
+ "results_directory": "benchmark-results",
+ "formats": ["json", "csv", "markdown"],
+ "include_raw_output": true,
+ "retention_days": 30
+ }
+}
\ No newline at end of file
diff --git a/benchmarks/benchmark.sh b/benchmarks/benchmark.sh
new file mode 100755
index 0000000..afaaf04
--- /dev/null
+++ b/benchmarks/benchmark.sh
@@ -0,0 +1,318 @@
+#!/bin/bash
+
+# MetaSSR Benchmark Suite
+# Comprehensive performance testing for MetaSSR framework
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+# Configuration
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
+DEFAULT_RESULTS_DIR="$PROJECT_ROOT/benchmark-results"
+RESULTS_DIR="${RESULTS_DIR:-$DEFAULT_RESULTS_DIR}"
+TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
+RESULT_FILE="$RESULTS_DIR/benchmark_$TIMESTAMP.json"
+LOG_FILE="$RESULTS_DIR/benchmark_$TIMESTAMP.log"
+
+# Create results directory
+mkdir -p "$RESULTS_DIR"
+
+echo -e "${BLUE}=== MetaSSR Benchmark Suite ===${NC}"
+echo "Starting comprehensive performance testing..."
+echo "Results will be saved to: $RESULT_FILE"
+echo "Logs will be saved to: $LOG_FILE"
+
+# Initialize results JSON
+cat > "$RESULT_FILE" << EOF
+{
+ "metadata": {
+ "timestamp": "$(date -Iseconds)",
+ "hostname": "$(hostname)",
+ "os": "$(uname -s)",
+ "arch": "$(uname -m)",
+ "cpu_cores": $(nproc),
+ "memory_gb": $(free -g | awk '/^Mem:/{print $2}')
+ },
+ "tests": []
+}
+EOF
+
+# Function to run benchmark and parse results
+run_benchmark() {
+ local test_name="$1"
+ local threads="$2"
+ local connections="$3"
+ local duration="$4"
+ local url="$5"
+
+ echo -e "${YELLOW}Running $test_name...${NC}"
+ echo "Threads: $threads, Connections: $connections, Duration: ${duration}s"
+
+ # Run wrk and capture output
+ local wrk_output=$(wrk -t"$threads" -c"$connections" -d"${duration}s" --latency "$url" 2>&1)
+
+ # Parse wrk output
+ local requests_per_sec=$(echo "$wrk_output" | grep "Requests/sec:" | awk '{print $2}')
+ local transfer_per_sec=$(echo "$wrk_output" | grep "Transfer/sec:" | awk '{print $2}')
+ local avg_latency=$(echo "$wrk_output" | grep "Latency" | head -1 | awk '{print $2}')
+ local total_requests=$(echo "$wrk_output" | grep "requests in" | awk '{print $1}')
+ local total_errors=$(echo "$wrk_output" | grep "Socket errors:" | awk '{print $3}' || echo "0")
+
+ # Extract percentile latencies
+ local latency_50=$(echo "$wrk_output" | grep "50%" | awk '{print $2}')
+ local latency_75=$(echo "$wrk_output" | grep "75%" | awk '{print $2}')
+ local latency_90=$(echo "$wrk_output" | grep "90%" | awk '{print $2}')
+ local latency_99=$(echo "$wrk_output" | grep "99%" | awk '{print $2}')
+
+ # Add to results JSON
+ local test_json=$(cat << EOF
+{
+ "name": "$test_name",
+ "config": {
+ "threads": $threads,
+ "connections": $connections,
+ "duration": $duration,
+ "url": "$url"
+ },
+ "results": {
+ "requests_per_sec": "$requests_per_sec",
+ "transfer_per_sec": "$transfer_per_sec",
+ "avg_latency": "$avg_latency",
+ "total_requests": "$total_requests",
+ "total_errors": "$total_errors",
+ "latency_percentiles": {
+ "p50": "$latency_50",
+ "p75": "$latency_75",
+ "p90": "$latency_90",
+ "p99": "$latency_99"
+ }
+ },
+ "raw_output": $(echo "$wrk_output" | jq -Rs .)
+}
+EOF
+)
+
+ # Update results file
+ jq ".tests += [$test_json]" "$RESULT_FILE" > "${RESULT_FILE}.tmp" && mv "${RESULT_FILE}.tmp" "$RESULT_FILE"
+
+ # Log output
+ echo "=== $test_name ===" >> "$LOG_FILE"
+ echo "$wrk_output" >> "$LOG_FILE"
+ echo "" >> "$LOG_FILE"
+
+ echo -e "${GREEN}â $test_name completed${NC}"
+ echo " Requests/sec: $requests_per_sec"
+ echo " Avg Latency: $avg_latency"
+ echo ""
+}
+
+# Function to check server health
+check_server() {
+ local url="$1"
+ local max_attempts=30
+ local attempt=1
+
+ echo -e "${YELLOW}Checking server health at $url...${NC}"
+
+ while [ $attempt -le $max_attempts ]; do
+ if curl -s --max-time 5 "$url" > /dev/null 2>&1; then
+ echo -e "${GREEN}â Server is responding${NC}"
+ return 0
+ fi
+ echo -n "."
+ sleep 1
+ attempt=$((attempt + 1))
+ done
+
+ echo -e "\n${RED}â Server is not responding after $max_attempts attempts${NC}"
+ return 1
+}
+
+# Function to warm up server
+warmup_server() {
+ local url="$1"
+ echo -e "${YELLOW}Warming up server...${NC}"
+
+ for i in {1..10}; do
+ curl -s "$url" > /dev/null 2>&1 || true
+ sleep 0.5
+ done
+
+ echo -e "${GREEN}â Server warmed up${NC}"
+}
+
+# Function to monitor system resources
+monitor_resources() {
+ local pid="$1"
+ local duration="$2"
+ local output_file="$3"
+
+ echo "timestamp,cpu_percent,memory_mb,memory_percent" > "$output_file"
+
+ for i in $(seq 1 $duration); do
+ local cpu=$(ps -p "$pid" -o %cpu --no-headers 2>/dev/null || echo "0")
+ local memory_kb=$(ps -p "$pid" -o rss --no-headers 2>/dev/null || echo "0")
+ local memory_mb=$((memory_kb / 1024))
+ local memory_percent=$(ps -p "$pid" -o %mem --no-headers 2>/dev/null || echo "0")
+
+ echo "$i,$cpu,$memory_mb,$memory_percent" >> "$output_file"
+ sleep 1
+ done
+}
+
+# Main execution
+main() {
+ local server_url="http://localhost:8080"
+ local server_pid=""
+
+ echo -e "${BLUE}Starting MetaSSR server...${NC}"
+
+ # Check if server is already running
+ if ! check_server "$server_url" 2>/dev/null; then
+ echo "Server not running, please start MetaSSR server first"
+ exit 1
+ fi
+
+ # Get server PID for monitoring
+ server_pid=$(lsof -ti:8080 || echo "")
+
+ # Warm up server
+ warmup_server "$server_url"
+
+ # Start resource monitoring in background
+ if [ -n "$server_pid" ]; then
+ monitor_resources "$server_pid" 300 "$RESULTS_DIR/resources_$TIMESTAMP.csv" &
+ monitor_pid=$!
+ fi
+
+ # Run benchmark suite
+ echo -e "${BLUE}Starting benchmark tests...${NC}"
+
+ # Light load test
+ run_benchmark "Light Load" 1 10 30 "$server_url"
+
+ # Medium load test
+ run_benchmark "Medium Load" 4 50 30 "$server_url"
+
+ # Standard load test
+ run_benchmark "Standard Load" 8 100 30 "$server_url"
+
+ # Heavy load test
+ run_benchmark "Heavy Load" 12 500 30 "$server_url"
+
+ # Extreme load test
+ run_benchmark "Extreme Load" 16 1000 30 "$server_url"
+
+ # Sustained load test
+ run_benchmark "Sustained Load" 8 200 120 "$server_url"
+
+ # Endurance test
+ run_benchmark "Endurance Test" 4 100 300 "$server_url"
+
+ # Stop resource monitoring
+ if [ -n "$monitor_pid" ]; then
+ kill "$monitor_pid" 2>/dev/null || true
+ fi
+
+ echo -e "${GREEN}=== Benchmark Suite Completed ===${NC}"
+ echo "Results saved to: $RESULT_FILE"
+ echo "Logs saved to: $LOG_FILE"
+
+ # Generate summary
+ generate_summary
+}
+
+# Function to generate benchmark summary
+generate_summary() {
+ local summary_file="$RESULTS_DIR/summary_$TIMESTAMP.md"
+
+ cat > "$summary_file" << EOF
+# MetaSSR Benchmark Summary
+
+**Generated:** $(date)
+**Duration:** Multiple test scenarios
+
+## System Information
+- **OS:** $(uname -s) $(uname -r)
+- **Architecture:** $(uname -m)
+- **CPU Cores:** $(nproc)
+- **Memory:** $(free -h | awk '/^Mem:/{print $2}')
+
+## Test Results
+
+EOF
+
+ # Parse and format results
+ jq -r '.tests[] | "### \(.name)\n- **Requests/sec:** \(.results.requests_per_sec)\n- **Avg Latency:** \(.results.avg_latency)\n- **Total Requests:** \(.results.total_requests)\n- **Errors:** \(.results.total_errors)\n- **P99 Latency:** \(.results.latency_percentiles.p99)\n"' "$RESULT_FILE" >> "$summary_file"
+
+ echo "Summary generated: $summary_file"
+}
+
+# Check dependencies
+check_dependencies() {
+ local deps=("wrk" "jq" "curl" "lsof")
+
+ for dep in "${deps[@]}"; do
+ if ! command -v "$dep" &> /dev/null; then
+ echo -e "${RED}Error: $dep is not installed${NC}"
+ echo "Please install required dependencies:"
+ echo " Ubuntu/Debian: sudo apt-get install wrk jq curl lsof"
+ echo " macOS: brew install wrk jq curl lsof"
+ exit 1
+ fi
+ done
+}
+
+# Help function
+show_help() {
+ cat << EOF
+MetaSSR Benchmark Suite
+
+Usage: $0 [options]
+
+Options:
+ -h, --help Show this help message
+ -u, --url Server URL (default: http://localhost:8080)
+ -o, --output Output directory (default: benchmark-results)
+
+Examples:
+ $0 # Run full benchmark suite
+ $0 -u http://localhost:3000 # Test different server
+ $0 -o custom-results # Custom output directory
+
+EOF
+}
+
+# Parse command line arguments
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ -h|--help)
+ show_help
+ exit 0
+ ;;
+ -u|--url)
+ SERVER_URL="$2"
+ shift 2
+ ;;
+ -o|--output)
+ RESULTS_DIR="$2"
+ shift 2
+ ;;
+ *)
+ echo "Unknown option: $1"
+ show_help
+ exit 1
+ ;;
+ esac
+done
+
+# Check dependencies and run
+check_dependencies
+main "$@"
\ No newline at end of file
diff --git a/benchmarks/client/Dockerfile b/benchmarks/client/Dockerfile
deleted file mode 100644
index fe95ccf..0000000
--- a/benchmarks/client/Dockerfile
+++ /dev/null
@@ -1,42 +0,0 @@
-# Build image
-FROM debian:bookworm-slim AS builder
-
-# Image descriptor
-LABEL copyright.name="Vicente Eduardo Ferrer Garcia" \
- copyright.address="vic798@gmail.com" \
- maintainer.name="Vicente Eduardo Ferrer Garcia" \
- maintainer.address="vic798@gmail.com" \
- vendor="MetaCall Inc." \
- version="0.1"
-
-# Install wrk dependencies
-RUN apt-get update && apt-get install -y --no-install-recommends \
- build-essential \
- libssl-dev \
- git \
- zlib1g-dev \
- ca-certificates
-
-# Build wrk2
-RUN git clone https://github.com/giltene/wrk2.git \
- && cd wrk2 && make -j$(grep -c processor /proc/cpuinfo)
-
-# Client image
-FROM debian:bookworm-slim AS client
-
-# Install wrk dependencies
-RUN apt-get update && apt-get install -y --no-install-recommends \
- libssl3 \
- ca-certificates
-
-# Copy wrk executable
-COPY --from=builder /wrk2/wrk /usr/bin
-
-# Raise the limits to successfully run benchmarks
-RUN ulimit -c -m -s -t unlimited
-
-# Copy wrk scripts
-COPY scripts /usr/bin
-
-# Entry point
-ENTRYPOINT ["/usr/bin/wrk"]
diff --git a/benchmarks/client/scripts/post.lua b/benchmarks/client/scripts/post.lua
deleted file mode 100644
index 489ab63..0000000
--- a/benchmarks/client/scripts/post.lua
+++ /dev/null
@@ -1,15 +0,0 @@
--- Define request method, body and headers
-wrk.method = "POST"
-wrk.body = "{ \"x\": \"abc\" }"
-wrk.headers["Content-Type"] = "application/json"
-
--- Response method
-function response(status, headers, body)
- -- Strip whitespaces and newlines
- str = body:gsub("^%s*(.-)%s*$", "%1")
-
- -- Check if response is correct
- if str ~= "\"abc\"" then
- error("invalid body: " .. str)
- end
-end
diff --git a/benchmarks/generate-pr-summary.py b/benchmarks/generate-pr-summary.py
new file mode 100755
index 0000000..9423e8c
--- /dev/null
+++ b/benchmarks/generate-pr-summary.py
@@ -0,0 +1,278 @@
+#!/usr/bin/env python3
+"""
+Generate PR benchmark summary from benchmark results
+"""
+
+import json
+import os
+import sys
+from pathlib import Path
+from datetime import datetime
+
+def find_latest_results(results_dir="benchmark-results"):
+ """Find the latest benchmark results file"""
+ # Try multiple possible locations
+ possible_dirs = [
+ results_dir,
+ "benchmark-results",
+ "tests/web-app/benchmark-results",
+ "../benchmark-results"
+ ]
+
+ for dir_path in possible_dirs:
+ results_path = Path(dir_path)
+ if results_path.exists():
+ json_files = list(results_path.glob("benchmark_*.json"))
+ if json_files:
+ # Return the most recent file
+ return max(json_files, key=os.path.getmtime)
+
+ return None
+
+def parse_latency(latency_str):
+ """Parse latency string and convert to milliseconds"""
+ if not latency_str or latency_str == "N/A":
+ return 0
+
+ latency_str = str(latency_str).lower().strip()
+
+ try:
+ # Check for most specific patterns first
+ if 'ms' in latency_str:
+ return float(latency_str.replace('ms', ''))
+ elif 'us' in latency_str:
+ return float(latency_str.replace('us', '')) / 1000
+ elif latency_str.endswith('s'):
+ # Only for pure seconds (not microseconds or milliseconds)
+ return float(latency_str.replace('s', '')) * 1000
+ else:
+ # Try to parse as plain number (assume milliseconds)
+ return float(latency_str)
+ except (ValueError, TypeError):
+ return 0
+
+def format_rps(rps_str):
+ """Format RPS for display"""
+ if not rps_str or rps_str == "N/A":
+ return "N/A"
+ try:
+ rps = float(str(rps_str).replace(',', ''))
+ if rps >= 1000:
+ return f"{rps:,.0f}"
+ else:
+ return f"{rps:.1f}"
+ except:
+ return str(rps_str)
+
+def generate_performance_indicator(rps):
+ """Generate performance indicator based on performance"""
+ try:
+ rps_num = float(str(rps).replace(',', ''))
+ if rps_num >= 50000:
+ return "EXCELLENT"
+ elif rps_num >= 20000:
+ return "HIGH"
+ elif rps_num >= 10000:
+ return "GOOD"
+ elif rps_num >= 5000:
+ return "FAIR"
+ else:
+ return "LOW"
+ except:
+ return "UNKNOWN"
+
+def generate_pr_summary(results_file, commit_sha="", runner_os="ubuntu"):
+ """Generate a comprehensive PR summary"""
+
+ with open(results_file, 'r') as f:
+ data = json.load(f)
+
+ summary = []
+
+ # Header
+ summary.append("# MetaSSR Benchmark Results")
+ summary.append("")
+ summary.append(f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}")
+ summary.append(f"**Runner:** {runner_os}")
+ if commit_sha:
+ summary.append(f"**Commit:** `{commit_sha[:8]}`")
+ summary.append("")
+
+ # System info
+ metadata = data.get('metadata', {})
+ if metadata:
+ summary.append("## System Information")
+ summary.append("")
+ summary.append(f"- **OS:** {metadata.get('os', 'Unknown')} {metadata.get('arch', '')}")
+ summary.append(f"- **CPU Cores:** {metadata.get('cpu_cores', 'Unknown')}")
+ summary.append(f"- **Memory:** {metadata.get('memory_gb', 'Unknown')} GB")
+ summary.append("")
+
+ # Performance overview
+ tests = data.get('tests', [])
+ if tests:
+ summary.append("## Performance Overview")
+ summary.append("")
+
+ # Find best performance
+ best_rps = 0
+ best_test = ""
+ total_requests = 0
+
+ for test in tests:
+ try:
+ rps = float(str(test['results'].get('requests_per_sec', '0')).replace(',', ''))
+ if rps > best_rps:
+ best_rps = rps
+ best_test = test['name']
+ total_requests += int(str(test['results'].get('total_requests', '0')).replace(',', ''))
+ except:
+ continue
+
+ summary.append(f"**Best Performance:** {best_test} - {format_rps(best_rps)} RPS")
+ summary.append(f"**Total Requests Processed:** {total_requests:,}")
+ summary.append("")
+
+ # Detailed results table
+ if tests:
+ summary.append("## Detailed Results")
+ summary.append("")
+ summary.append("| Test Scenario | Performance | RPS | Avg Latency | P99 Latency | Errors |")
+ summary.append("|---------------|-------------|-----|-------------|-------------|--------|")
+
+ for test in tests:
+ name = test['name']
+ results = test['results']
+
+ rps = format_rps(results.get('requests_per_sec', 'N/A'))
+ indicator = generate_performance_indicator(results.get('requests_per_sec', '0'))
+
+ avg_lat = results.get('avg_latency', 'N/A')
+ p99_lat = results.get('latency_percentiles', {}).get('p99', 'N/A')
+ errors = results.get('total_errors', '0')
+
+ # Format latencies
+ if avg_lat != 'N/A':
+ avg_lat_ms = parse_latency(avg_lat)
+ avg_lat = f"{avg_lat_ms:.2f}ms" if avg_lat_ms > 0 else avg_lat
+
+ if p99_lat != 'N/A':
+ p99_lat_ms = parse_latency(p99_lat)
+ p99_lat = f"{p99_lat_ms:.2f}ms" if p99_lat_ms > 0 else p99_lat
+
+ summary.append(f"| {name} | {indicator} | **{rps}** | {avg_lat} | {p99_lat} | {errors} |")
+
+ summary.append("")
+
+ # Performance insights
+ if tests:
+ summary.append("## Performance Insights")
+ summary.append("")
+
+ # Calculate some insights
+ latencies = []
+ error_tests = []
+ high_perf_tests = []
+
+ for test in tests:
+ results = test['results']
+
+ # Check for errors
+ errors = int(str(results.get('total_errors', '0')))
+ if errors > 0:
+ error_tests.append(f"{test['name']} ({errors} errors)")
+
+ # Check for high performance
+ try:
+ rps = float(str(results.get('requests_per_sec', '0')).replace(',', ''))
+ if rps >= 20000:
+ high_perf_tests.append(f"{test['name']} ({format_rps(rps)} RPS)")
+ except:
+ pass
+
+ # Collect latencies
+ avg_lat = parse_latency(results.get('avg_latency', '0'))
+ if avg_lat > 0:
+ latencies.append(avg_lat)
+
+ # Generate insights
+ if high_perf_tests:
+ summary.append("**High Performance Tests:**")
+ for test in high_perf_tests:
+ summary.append(f" - {test}")
+ summary.append("")
+
+ if latencies:
+ avg_latency = sum(latencies) / len(latencies)
+ if avg_latency < 50:
+ summary.append("**Excellent latency performance** - Average latency under 50ms")
+ elif avg_latency < 100:
+ summary.append("**Good latency performance** - Average latency under 100ms")
+ else:
+ summary.append("**Consider latency optimization** - Average latency above 100ms")
+ summary.append("")
+
+ if error_tests:
+ summary.append("**Tests with errors:**")
+ for test in error_tests:
+ summary.append(f" - {test}")
+ summary.append("")
+ elif tests:
+ summary.append("**All tests completed without errors**")
+ summary.append("")
+
+ # Footer
+ summary.append("---")
+ summary.append("*Detailed benchmark data and analysis reports are available in the workflow artifacts.*")
+ summary.append("")
+ summary.append("")
+ summary.append("How to reproduce these benchmarks
")
+ summary.append("")
+ summary.append("```bash")
+ summary.append("# Clone the repository")
+ summary.append("git clone https://github.com/metacall/metassr.git")
+ summary.append("cd metassr")
+ summary.append("")
+ summary.append("# Run benchmarks")
+ summary.append("./benchmarks/run-benchmarks.sh")
+ summary.append("```")
+ summary.append("")
+ summary.append(" ")
+
+ return "\n".join(summary)
+
+def main():
+ # Get environment variables
+ commit_sha = os.environ.get('GITHUB_SHA', '')
+ runner_os = os.environ.get('RUNNER_OS', 'ubuntu')
+
+ # Find latest results
+ results_file = find_latest_results()
+
+ if not results_file:
+ # Generate fallback summary
+ summary = f"""# MetaSSR Benchmark Results
+
+**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}
+**Runner:** {runner_os}
+**Commit:** `{commit_sha[:8] if commit_sha else 'unknown'}`
+
+**No benchmark results found**
+
+The benchmark process completed but no results file was generated.
+Please check the workflow logs for details.
+
+*Check the workflow artifacts for any available benchmark data.*
+"""
+ else:
+ summary = generate_pr_summary(results_file, commit_sha, runner_os)
+
+ # Write to file
+ with open('pr_benchmark_summary.md', 'w') as f:
+ f.write(summary)
+
+ print("Generated PR benchmark summary")
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main())
\ No newline at end of file
diff --git a/benchmarks/metassr/Dockerfile b/benchmarks/metassr/Dockerfile
deleted file mode 100644
index e7c4d51..0000000
--- a/benchmarks/metassr/Dockerfile
+++ /dev/null
@@ -1,32 +0,0 @@
-FROM rust:slim-bookworm
-
-# Image descriptor
-LABEL copyright.name="Vicente Eduardo Ferrer Garcia" \
- copyright.address="vic798@gmail.com" \
- maintainer.name="Vicente Eduardo Ferrer Garcia" \
- maintainer.address="vic798@gmail.com" \
- vendor="MetaCall Inc." \
- version="0.1"
-
-# Install MetaCall dependencies
-RUN apt-get update \
- && apt-get install -y --no-install-recommends \
- ca-certificates \
- git
-
-# Set working directory to root
-WORKDIR /root
-
-# Clone and build the project
-RUN git clone --branch v0.9.14 https://github.com/metacall/core \
- && mkdir core/build && cd core/build \
- && ../tools/metacall-environment.sh release base nodejs python backtrace \
- && ../tools/metacall-configure.sh release nodejs python ports install \
- && ../tools/metacall-build.sh release nodejs python ports install \
- && cd ../.. \
- && rm -rf core
-
-# Clone and build the project
-RUN git clone https://github.com/metacall/metassr \
- && cd metassr \
- && cargo build --release
diff --git a/benchmarks/move-results.sh b/benchmarks/move-results.sh
new file mode 100755
index 0000000..3c71712
--- /dev/null
+++ b/benchmarks/move-results.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# Move benchmark results from tests/web-app/benchmark-results to project root
+# This fixes results that were created in the wrong location
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
+
+SOURCE_DIR="$PROJECT_ROOT/tests/web-app/benchmark-results"
+TARGET_DIR="$PROJECT_ROOT/benchmark-results"
+
+if [ -d "$SOURCE_DIR" ]; then
+ echo "Found benchmark results in $SOURCE_DIR"
+ echo "Moving to $TARGET_DIR..."
+
+ # Create target directory if it doesn't exist
+ mkdir -p "$TARGET_DIR"
+
+ # Move all files
+ if [ "$(ls -A "$SOURCE_DIR" 2>/dev/null)" ]; then
+ mv "$SOURCE_DIR"/* "$TARGET_DIR/" 2>/dev/null || true
+ echo "Files moved successfully"
+
+ # Remove empty source directory
+ rmdir "$SOURCE_DIR" 2>/dev/null || true
+ echo "Cleanup completed"
+ else
+ echo "Source directory is empty"
+ fi
+else
+ echo "No benchmark results found in tests/web-app/benchmark-results"
+fi
+
+echo "Benchmark results are now centralized in: $TARGET_DIR"
\ No newline at end of file
diff --git a/benchmarks/nextjs/.dockerignore b/benchmarks/nextjs/.dockerignore
deleted file mode 100644
index c550055..0000000
--- a/benchmarks/nextjs/.dockerignore
+++ /dev/null
@@ -1,7 +0,0 @@
-Dockerfile
-.dockerignore
-node_modules
-npm-debug.log
-README.md
-.next
-.git
diff --git a/benchmarks/nextjs/.gitignore b/benchmarks/nextjs/.gitignore
deleted file mode 100644
index 8777267..0000000
--- a/benchmarks/nextjs/.gitignore
+++ /dev/null
@@ -1,40 +0,0 @@
-# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
-
-# dependencies
-/node_modules
-/.pnp
-.pnp.*
-.yarn/*
-!.yarn/patches
-!.yarn/plugins
-!.yarn/releases
-!.yarn/versions
-
-# testing
-/coverage
-
-# next.js
-/.next/
-/out/
-
-# production
-/build
-
-# misc
-.DS_Store
-*.pem
-
-# debug
-npm-debug.log*
-yarn-debug.log*
-yarn-error.log*
-
-# local env files
-.env*.local
-
-# vercel
-.vercel
-
-# typescript
-*.tsbuildinfo
-next-env.d.ts
diff --git a/benchmarks/nextjs/Dockerfile b/benchmarks/nextjs/Dockerfile
deleted file mode 100644
index b70d7e7..0000000
--- a/benchmarks/nextjs/Dockerfile
+++ /dev/null
@@ -1,66 +0,0 @@
-# syntax=docker.io/docker/dockerfile:1
-
-FROM node:18-alpine AS base
-
-# Install dependencies only when needed
-FROM base AS deps
-# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed.
-RUN apk add --no-cache libc6-compat
-WORKDIR /app
-
-# Install dependencies based on the preferred package manager
-COPY package.json yarn.lock* package-lock.json* pnpm-lock.yaml* .npmrc* ./
-RUN \
- if [ -f yarn.lock ]; then yarn --frozen-lockfile; \
- elif [ -f package-lock.json ]; then npm ci; \
- elif [ -f pnpm-lock.yaml ]; then corepack enable pnpm && pnpm i --frozen-lockfile; \
- else echo "Lockfile not found." && exit 1; \
- fi
-
-
-# Rebuild the source code only when needed
-FROM base AS builder
-WORKDIR /app
-COPY --from=deps /app/node_modules ./node_modules
-COPY . .
-
-# Next.js collects completely anonymous telemetry data about general usage.
-# Learn more here: https://nextjs.org/telemetry
-# Uncomment the following line in case you want to disable telemetry during the build.
-# ENV NEXT_TELEMETRY_DISABLED=1
-
-RUN \
- if [ -f yarn.lock ]; then yarn run build; \
- elif [ -f package-lock.json ]; then npm run build; \
- elif [ -f pnpm-lock.yaml ]; then corepack enable pnpm && pnpm run build; \
- else echo "Lockfile not found." && exit 1; \
- fi
-
-# Production image, copy all the files and run next
-FROM base AS runner
-WORKDIR /app
-
-ENV NODE_ENV=production
-# Uncomment the following line in case you want to disable telemetry during runtime.
-# ENV NEXT_TELEMETRY_DISABLED=1
-
-RUN addgroup --system --gid 1001 nodejs
-RUN adduser --system --uid 1001 nextjs
-
-COPY --from=builder /app/public ./public
-
-# Automatically leverage output traces to reduce image size
-# https://nextjs.org/docs/advanced-features/output-file-tracing
-COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
-COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
-
-USER nextjs
-
-EXPOSE 3000
-
-ENV PORT=3000
-
-# server.js is created by next build from the standalone output
-# https://nextjs.org/docs/pages/api-reference/config/next-config-js/output
-ENV HOSTNAME="0.0.0.0"
-CMD ["node", "server.js"]
diff --git a/benchmarks/nextjs/README.md b/benchmarks/nextjs/README.md
deleted file mode 100644
index d61fc6b..0000000
--- a/benchmarks/nextjs/README.md
+++ /dev/null
@@ -1,70 +0,0 @@
-# With Docker
-
-This examples shows how to use Docker with Next.js based on the [deployment documentation](https://nextjs.org/docs/deployment#docker-image). Additionally, it contains instructions for deploying to Google Cloud Run. However, you can use any container-based deployment host.
-
-## How to use
-
-Execute [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app) with [npm](https://docs.npmjs.com/cli/init), [Yarn](https://yarnpkg.com/lang/en/docs/cli/create/), or [pnpm](https://pnpm.io) to bootstrap the example:
-
-```bash
-npx create-next-app --example with-docker nextjs-docker
-```
-
-```bash
-yarn create next-app --example with-docker nextjs-docker
-```
-
-```bash
-pnpm create next-app --example with-docker nextjs-docker
-```
-
-## Using Docker
-
-1. [Install Docker](https://docs.docker.com/get-docker/) on your machine.
-1. Build your container: `docker build -t nextjs-docker .`.
-1. Run your container: `docker run -p 3000:3000 nextjs-docker`.
-
-You can view your images created with `docker images`.
-
-### In existing projects
-
-To add support for Docker to an existing project, just copy the [`Dockerfile`](https://github.com/vercel/next.js/blob/canary/examples/with-docker/Dockerfile) into the root of the project and add the following to the `next.config.js` file:
-
-```js
-// next.config.js
-module.exports = {
- // ... rest of the configuration.
- output: "standalone",
-};
-```
-
-This will build the project as a standalone app inside the Docker image.
-
-## Deploying to Google Cloud Run
-
-1. Install the [Google Cloud SDK](https://cloud.google.com/sdk/docs/install) so you can use `gcloud` on the command line.
-1. Run `gcloud auth login` to log in to your account.
-1. [Create a new project](https://cloud.google.com/run/docs/quickstarts/build-and-deploy) in Google Cloud Run (e.g. `nextjs-docker`). Ensure billing is turned on.
-1. Build your container image using Cloud Build: `gcloud builds submit --tag gcr.io/PROJECT-ID/helloworld --project PROJECT-ID`. This will also enable Cloud Build for your project.
-1. Deploy to Cloud Run: `gcloud run deploy --image gcr.io/PROJECT-ID/helloworld --project PROJECT-ID --platform managed --allow-unauthenticated`. Choose a region of your choice.
-
- - You will be prompted for the service name: press Enter to accept the default name, `helloworld`.
- - You will be prompted for [region](https://cloud.google.com/run/docs/quickstarts/build-and-deploy#follow-cloud-run): select the region of your choice, for example `us-central1`.
-
-## Running Locally
-
-First, run the development server:
-
-```bash
-npm run dev
-# or
-yarn dev
-```
-
-Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
-
-You can start editing the page by modifying `pages/index.js`. The page auto-updates as you edit the file.
-
-[API routes](https://nextjs.org/docs/api-routes/introduction) can be accessed on [http://localhost:3000/api/hello](http://localhost:3000/api/hello). This endpoint can be edited in `pages/api/hello.js`.
-
-The `pages/api` directory is mapped to `/api/*`. Files in this directory are treated as [API routes](https://nextjs.org/docs/api-routes/introduction) instead of React pages.
diff --git a/benchmarks/nextjs/app.json b/benchmarks/nextjs/app.json
deleted file mode 100644
index 5f394f2..0000000
--- a/benchmarks/nextjs/app.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "name": "nextjs",
- "options": {
- "allow-unauthenticated": true,
- "memory": "256Mi",
- "cpu": "1",
- "port": 3000,
- "http2": false
- }
-}
diff --git a/benchmarks/nextjs/next.config.js b/benchmarks/nextjs/next.config.js
deleted file mode 100644
index b9f2896..0000000
--- a/benchmarks/nextjs/next.config.js
+++ /dev/null
@@ -1,4 +0,0 @@
-/** @type {import('next').NextConfig} */
-module.exports = {
- output: "standalone",
-};
diff --git a/benchmarks/nextjs/package.json b/benchmarks/nextjs/package.json
deleted file mode 100644
index 2726338..0000000
--- a/benchmarks/nextjs/package.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "private": true,
- "scripts": {
- "dev": "next dev",
- "build": "next build"
- },
- "dependencies": {
- "next": "latest",
- "react": "^18.2.0",
- "react-dom": "^18.2.0"
- }
-}
diff --git a/benchmarks/nextjs/pages/_app.js b/benchmarks/nextjs/pages/_app.js
deleted file mode 100644
index 2fc3e07..0000000
--- a/benchmarks/nextjs/pages/_app.js
+++ /dev/null
@@ -1,7 +0,0 @@
-import "../styles/globals.css";
-
-function MyApp({ Component, pageProps }) {
- return ;
-}
-
-export default MyApp;
diff --git a/benchmarks/nextjs/pages/api/hello.js b/benchmarks/nextjs/pages/api/hello.js
deleted file mode 100644
index ba6cf4f..0000000
--- a/benchmarks/nextjs/pages/api/hello.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
-
-export default function hello(req, res) {
- res.status(200).json({ name: "John Doe" });
-}
diff --git a/benchmarks/nextjs/pages/index.js b/benchmarks/nextjs/pages/index.js
deleted file mode 100644
index 93d4fbb..0000000
--- a/benchmarks/nextjs/pages/index.js
+++ /dev/null
@@ -1,67 +0,0 @@
-import Head from "next/head";
-import styles from "../styles/Home.module.css";
-
-export default function Home() {
- return (
-
-
-
Create Next App
-
-
-
-
-
- Welcome to Next.js on Docker!
-
-
-
- Get started by editing{" "}
- pages/index.js
-
-
-
-
-
-
-
- );
-}
diff --git a/benchmarks/nextjs/public/favicon.ico b/benchmarks/nextjs/public/favicon.ico
deleted file mode 100644
index 4965832..0000000
Binary files a/benchmarks/nextjs/public/favicon.ico and /dev/null differ
diff --git a/benchmarks/nextjs/public/vercel.svg b/benchmarks/nextjs/public/vercel.svg
deleted file mode 100644
index fbf0e25..0000000
--- a/benchmarks/nextjs/public/vercel.svg
+++ /dev/null
@@ -1,4 +0,0 @@
-
\ No newline at end of file
diff --git a/benchmarks/nextjs/styles/Home.module.css b/benchmarks/nextjs/styles/Home.module.css
deleted file mode 100644
index 78b997f..0000000
--- a/benchmarks/nextjs/styles/Home.module.css
+++ /dev/null
@@ -1,131 +0,0 @@
-.container {
- min-height: 100vh;
- padding: 0 0.5rem;
- display: flex;
- flex-direction: column;
- justify-content: center;
- align-items: center;
-}
-
-.main {
- padding: 5rem 0;
- flex: 1;
- display: flex;
- flex-direction: column;
- justify-content: center;
- align-items: center;
-}
-
-.footer {
- width: 100%;
- height: 100px;
- border-top: 1px solid #eaeaea;
- display: flex;
- justify-content: center;
- align-items: center;
-}
-
-.footer img {
- margin-left: 0.5rem;
-}
-
-.footer a {
- display: flex;
- justify-content: center;
- align-items: center;
-}
-
-.title a {
- color: #0070f3;
- text-decoration: none;
-}
-
-.title a:hover,
-.title a:focus,
-.title a:active {
- text-decoration: underline;
-}
-
-.title {
- margin: 0;
- line-height: 1.15;
- font-size: 4rem;
-}
-
-.title,
-.description {
- text-align: center;
-}
-
-.description {
- line-height: 1.5;
- font-size: 1.5rem;
-}
-
-.code {
- background: #fafafa;
- border-radius: 5px;
- padding: 0.75rem;
- font-size: 1.1rem;
- font-family:
- Menlo,
- Monaco,
- Lucida Console,
- Liberation Mono,
- DejaVu Sans Mono,
- Bitstream Vera Sans Mono,
- Courier New,
- monospace;
-}
-
-.grid {
- display: flex;
- align-items: center;
- justify-content: center;
- flex-wrap: wrap;
- max-width: 800px;
- margin-top: 3rem;
-}
-
-.card {
- margin: 1rem;
- flex-basis: 45%;
- padding: 1.5rem;
- text-align: left;
- color: inherit;
- text-decoration: none;
- border: 1px solid #eaeaea;
- border-radius: 10px;
- transition:
- color 0.15s ease,
- border-color 0.15s ease;
-}
-
-.card:hover,
-.card:focus,
-.card:active {
- color: #0070f3;
- border-color: #0070f3;
-}
-
-.card h3 {
- margin: 0 0 1rem 0;
- font-size: 1.5rem;
-}
-
-.card p {
- margin: 0;
- font-size: 1.25rem;
- line-height: 1.5;
-}
-
-.logo {
- height: 1em;
-}
-
-@media (max-width: 600px) {
- .grid {
- width: 100%;
- flex-direction: column;
- }
-}
diff --git a/benchmarks/nextjs/styles/globals.css b/benchmarks/nextjs/styles/globals.css
deleted file mode 100644
index 51a2a4e..0000000
--- a/benchmarks/nextjs/styles/globals.css
+++ /dev/null
@@ -1,26 +0,0 @@
-html,
-body {
- padding: 0;
- margin: 0;
- font-family:
- -apple-system,
- BlinkMacSystemFont,
- Segoe UI,
- Roboto,
- Oxygen,
- Ubuntu,
- Cantarell,
- Fira Sans,
- Droid Sans,
- Helvetica Neue,
- sans-serif;
-}
-
-a {
- color: inherit;
- text-decoration: none;
-}
-
-* {
- box-sizing: border-box;
-}
diff --git a/benchmarks/requirements.txt b/benchmarks/requirements.txt
new file mode 100755
index 0000000..c648971
--- /dev/null
+++ b/benchmarks/requirements.txt
@@ -0,0 +1,5 @@
+# Python dependencies for MetaSSR benchmark analysis tools
+pandas>=1.5.0
+matplotlib>=3.6.0
+seaborn>=0.12.0
+numpy>=1.24.0
\ No newline at end of file
diff --git a/benchmarks/run-benchmarks.sh b/benchmarks/run-benchmarks.sh
new file mode 100755
index 0000000..b22eaae
--- /dev/null
+++ b/benchmarks/run-benchmarks.sh
@@ -0,0 +1,331 @@
+#!/bin/bash
+
+# MetaSSR Automated Benchmark Runner
+# Orchestrates the complete benchmarking process
+
+set -e
+
+# Configuration
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
+BENCHMARK_SCRIPT="$SCRIPT_DIR/benchmark.sh"
+ANALYZER_SCRIPT="$SCRIPT_DIR/analyze-benchmarks.py"
+CONFIG_FILE="$SCRIPT_DIR/benchmark-config.json"
+
+# Default values
+SERVER_PORT=8080
+BUILD_TYPE="release"
+ANALYZE_RESULTS=true
+GENERATE_PLOTS=false
+OUTPUT_DIR="benchmark-results"
+
+# Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m'
+
+usage() {
+ cat << EOF
+MetaSSR Automated Benchmark Runner
+
+Usage: $0 [options]
+
+Options:
+ -p, --port PORT Server port (default: 8080)
+ -b, --build TYPE Build type: debug|release (default: release)
+ -o, --output DIR Output directory (default: benchmark-results)
+ -a, --analyze Analyze results after benchmarking (default: true)
+ -g, --graphs Generate performance graphs
+ -s, --skip-build Skip building the project
+ -h, --help Show this help
+
+Examples:
+ $0 # Full benchmark with default settings
+ $0 -p 3000 -b debug # Debug build on port 3000
+ $0 -g # Include performance graphs
+ $0 -s # Skip build step
+
+EOF
+}
+
+log() {
+ echo -e "${BLUE}[$(date '+%H:%M:%S')]${NC} $1"
+}
+
+error() {
+ echo -e "${RED}[ERROR]${NC} $1" >&2
+}
+
+success() {
+ echo -e "${GREEN}[SUCCESS]${NC} $1"
+}
+
+warn() {
+ echo -e "${YELLOW}[WARNING]${NC} $1"
+}
+
+check_dependencies() {
+ local deps=("cargo" "npm" "wrk" "jq" "curl" "lsof")
+ local missing=()
+
+ for dep in "${deps[@]}"; do
+ if ! command -v "$dep" &> /dev/null; then
+ missing+=("$dep")
+ fi
+ done
+
+ if [ ${#missing[@]} -ne 0 ]; then
+ error "Missing dependencies: ${missing[*]}"
+ echo "Please install the missing dependencies and try again."
+ exit 1
+ fi
+}
+
+build_project() {
+ log "Building MetaSSR project..."
+ cd "$PROJECT_ROOT"
+
+ if [ "$BUILD_TYPE" = "release" ]; then
+ cargo build --release
+ else
+ cargo build
+ fi
+
+ success "Project built successfully"
+}
+
+setup_test_app() {
+ log "Setting up test application..."
+ cd "$PROJECT_ROOT/tests/web-app"
+
+ if [ ! -f "package.json" ]; then
+ error "Test app not found at tests/web-app"
+ exit 1
+ fi
+
+ npm install
+
+ if [ "$BUILD_TYPE" = "release" ]; then
+ npm run build
+ else
+ npm run build:debug
+ fi
+
+ success "Test application ready"
+}
+
+start_server() {
+ log "Starting MetaSSR server on port $SERVER_PORT..."
+ cd "$PROJECT_ROOT/tests/web-app"
+
+ # Check if server is already running
+ if curl -s "http://localhost:$SERVER_PORT" > /dev/null 2>&1; then
+ warn "Server already running on port $SERVER_PORT, using existing instance"
+ return 0
+ fi
+
+ if [ "$BUILD_TYPE" = "release" ]; then
+ npm start &
+ else
+ npm run start:debug &
+ fi
+
+ SERVER_PID=$!
+ echo $SERVER_PID > metassr_benchmark.pid
+
+ # Wait for server to start
+ local max_wait=30
+ local count=0
+
+ while ! curl -s "http://localhost:$SERVER_PORT" > /dev/null 2>&1; do
+ if [ $count -ge $max_wait ]; then
+ error "Server failed to start within $max_wait seconds"
+ cleanup
+ exit 1
+ fi
+ sleep 1
+ count=$((count + 1))
+ echo -n "."
+ done
+
+ echo ""
+ success "Server started successfully (PID: $SERVER_PID)"
+}
+
+stop_server() {
+ log "Stopping MetaSSR server..."
+ cd "$PROJECT_ROOT/tests/web-app"
+
+ # Only stop server if we started it
+ if [ -f "metassr_benchmark.pid" ]; then
+ local pid=$(cat metassr_benchmark.pid)
+ if kill -0 "$pid" 2>/dev/null; then
+ kill "$pid"
+ # Wait for graceful shutdown
+ local count=0
+ while kill -0 "$pid" 2>/dev/null && [ $count -lt 10 ]; do
+ sleep 1
+ count=$((count + 1))
+ done
+
+ # Force kill if still running
+ if kill -0 "$pid" 2>/dev/null; then
+ kill -9 "$pid"
+ fi
+ fi
+ rm metassr_benchmark.pid
+ success "Server stopped"
+ else
+ log "No server PID file found, server was not started by this script"
+ fi
+}
+
+run_benchmarks() {
+ log "Running benchmark suite..."
+
+ # Set absolute path for output directory
+ local abs_output_dir="$PROJECT_ROOT/$OUTPUT_DIR"
+
+ # Ensure output directory exists
+ mkdir -p "$abs_output_dir"
+
+ # Run the benchmark script with absolute path
+ RESULTS_DIR="$abs_output_dir" "$BENCHMARK_SCRIPT" -u "http://localhost:$SERVER_PORT" -o "$abs_output_dir"
+
+ success "Benchmarks completed"
+}
+
+analyze_results() {
+ if [ "$ANALYZE_RESULTS" = false ]; then
+ return
+ fi
+
+ log "Analyzing benchmark results..."
+
+ # Use absolute path for results
+ local abs_output_dir="$PROJECT_ROOT/$OUTPUT_DIR"
+
+ # Find the latest results file
+ local latest_result=$(ls -t "$abs_output_dir"/benchmark_*.json 2>/dev/null | head -1)
+
+ if [ -z "$latest_result" ]; then
+ warn "No benchmark results found to analyze"
+ return
+ fi
+
+ # Create analysis directory
+ local analysis_dir="$abs_output_dir/analysis_$(date +%Y%m%d_%H%M%S)"
+
+ # Run analyzer
+ local analyzer_args=("$latest_result" "-o" "$analysis_dir")
+
+ if [ "$GENERATE_PLOTS" = true ]; then
+ analyzer_args+=("--plots")
+ fi
+
+ if command -v python3 &> /dev/null; then
+ python3 "$ANALYZER_SCRIPT" "${analyzer_args[@]}"
+ success "Analysis completed: $analysis_dir"
+ else
+ warn "Python3 not available, skipping analysis"
+ fi
+}
+
+cleanup() {
+ log "Cleaning up..."
+ stop_server
+
+ # Kill any remaining processes
+ pkill -f "npm.*start" 2>/dev/null || true
+
+ success "Cleanup completed"
+}
+
+# Signal handlers
+trap cleanup EXIT
+trap 'error "Interrupted"; exit 130' INT TERM
+
+main() {
+ log "Starting MetaSSR automated benchmark..."
+
+ # Parse command line arguments
+ while [[ $# -gt 0 ]]; do
+ case $1 in
+ -p|--port)
+ SERVER_PORT="$2"
+ shift 2
+ ;;
+ -b|--build)
+ BUILD_TYPE="$2"
+ shift 2
+ ;;
+ -o|--output)
+ OUTPUT_DIR="$2"
+ shift 2
+ ;;
+ -a|--analyze)
+ ANALYZE_RESULTS=true
+ shift
+ ;;
+ -g|--graphs)
+ GENERATE_PLOTS=true
+ shift
+ ;;
+ -s|--skip-build)
+ SKIP_BUILD=true
+ shift
+ ;;
+ -h|--help)
+ usage
+ exit 0
+ ;;
+ *)
+ error "Unknown option: $1"
+ usage
+ exit 1
+ ;;
+ esac
+ done
+
+ # Validate build type
+ if [[ "$BUILD_TYPE" != "debug" && "$BUILD_TYPE" != "release" ]]; then
+ error "Invalid build type: $BUILD_TYPE. Must be 'debug' or 'release'"
+ exit 1
+ fi
+
+ # Check dependencies
+ check_dependencies
+
+ # Build project if not skipped
+ if [ "$SKIP_BUILD" != true ]; then
+ build_project
+ setup_test_app
+ fi
+
+ # Start server
+ start_server
+
+ # Run benchmarks
+ run_benchmarks
+
+ # Stop server
+ stop_server
+
+ # Analyze results
+ analyze_results
+
+ success "Automated benchmark completed successfully!"
+ echo ""
+ echo "Results location: $PROJECT_ROOT/$OUTPUT_DIR"
+
+ if [ "$ANALYZE_RESULTS" = true ]; then
+ echo "Analysis reports generated in: $PROJECT_ROOT/$OUTPUT_DIR/analysis_*"
+ fi
+}
+
+# Check if script is being sourced or executed
+if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
+ main "$@"
+fi
\ No newline at end of file
diff --git a/crates/metassr-server/src/handler.rs b/crates/metassr-server/src/handler.rs
index db27087..4c84611 100644
--- a/crates/metassr-server/src/handler.rs
+++ b/crates/metassr-server/src/handler.rs
@@ -49,7 +49,7 @@ impl<'a, S: Clone + Send + Sync + 'static> PagesHandler<'a, S> {
let handler =
move |Query(params): Query>,
Path(path): Path>| async move {
- dbg!(¶ms, &path);
+ // dbg!(¶ms, &path);
Html(*html)
};