From 329c455b04f3e100bb59fdd1e33a86f092868a65 Mon Sep 17 00:00:00 2001 From: Raffaele Di Fazio Date: Fri, 31 Oct 2025 17:25:27 +0100 Subject: [PATCH 1/8] end to end testing with local provider implementation Signed-off-by: Raffaele Di Fazio --- .github/workflows/end-to-end-tests.yml | 19 + e2e/deployment.yaml | 19 + e2e/service.yaml | 16 + provider/local/README.md | 5 + provider/local/main.go | 324 +++++++++++++ provider/local/main_test.go | 603 +++++++++++++++++++++++++ scripts/e2e-test.sh | 239 ++++++++++ 7 files changed, 1225 insertions(+) create mode 100644 .github/workflows/end-to-end-tests.yml create mode 100644 e2e/deployment.yaml create mode 100644 e2e/service.yaml create mode 100644 provider/local/README.md create mode 100644 provider/local/main.go create mode 100644 provider/local/main_test.go create mode 100755 scripts/e2e-test.sh diff --git a/.github/workflows/end-to-end-tests.yml b/.github/workflows/end-to-end-tests.yml new file mode 100644 index 0000000000..1974cb73e5 --- /dev/null +++ b/.github/workflows/end-to-end-tests.yml @@ -0,0 +1,19 @@ +name: end to end tets with local provider + +on: + push: + branches: + pull_request: + branches: [ main ] + workflow_dispatch: + +jobs: + e2e-tests: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: e2e + run: | + ./scripts/e2e-test.sh diff --git a/e2e/deployment.yaml b/e2e/deployment.yaml new file mode 100644 index 0000000000..5288cddddb --- /dev/null +++ b/e2e/deployment.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: demo-app + name: demo-app +spec: + replicas: 1 + selector: + matchLabels: + app: demo-app + template: + metadata: + labels: + app: demo-app + spec: + containers: + - image: ghcr.io/raffo/http-server-demo-app:latest # minimal demo app + name: demo-app diff --git a/e2e/service.yaml b/e2e/service.yaml new file mode 100644 index 0000000000..9484d69b99 --- /dev/null +++ b/e2e/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: demo-app + name: demo-app + annotations: + external-dns.alpha.kubernetes.io/hostname: externaldns-e2e.external.dns +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: demo-app + clusterIP: None diff --git a/provider/local/README.md b/provider/local/README.md new file mode 100644 index 0000000000..5334babc03 --- /dev/null +++ b/provider/local/README.md @@ -0,0 +1,5 @@ +# Local Webhook Provider + +A demo provider that allows to store records in memory and query them via a locally exposed simple DNS server implementation. + +**NOTE**: this provider is not intended for any production usage. diff --git a/provider/local/main.go b/provider/local/main.go new file mode 100644 index 0000000000..4482c35342 --- /dev/null +++ b/provider/local/main.go @@ -0,0 +1,324 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "log" + "net" + "net/http" + "strings" + "sync" + + "github.com/miekg/dns" + "sigs.k8s.io/external-dns/endpoint" + "sigs.k8s.io/external-dns/plan" + "sigs.k8s.io/external-dns/provider/webhook/api" +) + +// DNSRecord represents a single DNS record with its type and targets +type DNSRecord struct { + Type string `json:"type"` + Targets []string `json:"targets"` +} + +// DomainRecords holds all record types for a specific domain +type DomainRecords struct { + Records map[string]DNSRecord `json:"records"` // recordType -> DNSRecord +} + +// DNSRecordStore holds all DNS records in memory +type DNSRecordStore struct { + mu sync.RWMutex + domains map[string]*DomainRecords // domain -> DomainRecords +} + +// NewDNSRecordStore creates a new in-memory record store +func NewDNSRecordStore() *DNSRecordStore { + return &DNSRecordStore{ + domains: make(map[string]*DomainRecords), + } +} + +// AddRecord adds or updates a DNS record +func (s *DNSRecordStore) AddRecord(domain, recordType string, targets []string) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.domains[domain] == nil { + s.domains[domain] = &DomainRecords{ + Records: make(map[string]DNSRecord), + } + } + s.domains[domain].Records[recordType] = DNSRecord{ + Type: recordType, + Targets: targets, + } +} + +// RemoveRecord removes a DNS record +func (s *DNSRecordStore) RemoveRecord(domain, recordType string) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.domains[domain] != nil { + delete(s.domains[domain].Records, recordType) + if len(s.domains[domain].Records) == 0 { + delete(s.domains, domain) + } + } +} + +// GetRecord retrieves targets for a specific domain and record type +func (s *DNSRecordStore) GetRecord(domain, recordType string) []string { + s.mu.RLock() + defer s.mu.RUnlock() + + if s.domains[domain] != nil { + if record, exists := s.domains[domain].Records[recordType]; exists { + return record.Targets + } + } + return nil +} + +// GetAllRecords returns all records as endpoints for webhook responses +func (s *DNSRecordStore) GetAllRecords() []endpoint.Endpoint { + s.mu.RLock() + defer s.mu.RUnlock() + + var endpoints []endpoint.Endpoint + for domain, domainRecords := range s.domains { + for _, record := range domainRecords.Records { + endpoints = append(endpoints, endpoint.Endpoint{ + DNSName: domain, + RecordType: record.Type, + Targets: record.Targets, + }) + } + } + return endpoints +} + +// startDNSServer starts the DNS server in a goroutine +func startDNSServer(store *DNSRecordStore, address string, port int, defaultTTL uint32) error { + mux := dns.NewServeMux() + mux.HandleFunc(".", func(w dns.ResponseWriter, r *dns.Msg) { + handleDNSQuery(store, w, r, defaultTTL) + }) + + server := &dns.Server{ + Addr: fmt.Sprintf("%s:%d", address, port), + Net: "udp", + Handler: mux, + } + + log.Printf("Starting DNS server on %s (UDP)\n", server.Addr) + + go func() { + if err := server.ListenAndServe(); err != nil { + log.Printf("DNS server error: %v", err) + } + }() + + // Also start TCP server + tcpServer := &dns.Server{ + Addr: fmt.Sprintf("%s:%d", address, port), + Net: "tcp", + Handler: mux, + } + + go func() { + if err := tcpServer.ListenAndServe(); err != nil { + log.Printf("DNS TCP server error: %v", err) + } + }() + + return nil +} + +// handleDNSQuery handles incoming DNS queries +func handleDNSQuery(store *DNSRecordStore, w dns.ResponseWriter, r *dns.Msg, defaultTTL uint32) { + m := new(dns.Msg) + m.SetReply(r) + m.Authoritative = true + + for _, q := range r.Question { + domain := strings.TrimSuffix(q.Name, ".") + recordType := dns.TypeToString[q.Qtype] + + targets := store.GetRecord(domain, recordType) + + if len(targets) > 0 { + for _, target := range targets { + switch q.Qtype { + case dns.TypeA: + if ip := net.ParseIP(target); ip != nil && ip.To4() != nil { + rr := &dns.A{ + Hdr: dns.RR_Header{ + Name: q.Name, + Rrtype: dns.TypeA, + Class: dns.ClassINET, + Ttl: defaultTTL, + }, + A: ip.To4(), + } + m.Answer = append(m.Answer, rr) + } + case dns.TypeAAAA: + if ip := net.ParseIP(target); ip != nil && ip.To16() != nil && ip.To4() == nil { + rr := &dns.AAAA{ + Hdr: dns.RR_Header{ + Name: q.Name, + Rrtype: dns.TypeAAAA, + Class: dns.ClassINET, + Ttl: defaultTTL, + }, + AAAA: ip.To16(), + } + m.Answer = append(m.Answer, rr) + } + case dns.TypeCNAME: + rr := &dns.CNAME{ + Hdr: dns.RR_Header{ + Name: q.Name, + Rrtype: dns.TypeCNAME, + Class: dns.ClassINET, + Ttl: defaultTTL, + }, + Target: dns.Fqdn(target), + } + m.Answer = append(m.Answer, rr) + } + } + } else { + // No records found, set NXDOMAIN + m.Rcode = dns.RcodeNameError + } + } + + w.WriteMsg(m) +} + +func main() { + listenAddress := flag.String("listen-address", "127.0.0.1", "Address to listen on") + port := flag.Int("port", 8888, "Port to listen on") + dnsAddress := flag.String("dns-address", "127.0.0.1", "DNS server address") + dnsPort := flag.Int("dns-port", 5353, "DNS server port") + dnsTTL := flag.Int("dns-ttl", 300, "Default TTL for DNS responses") + flag.Parse() + + // Create shared record store + recordStore := NewDNSRecordStore() + + // Start DNS server + if err := startDNSServer(recordStore, *dnsAddress, *dnsPort, uint32(*dnsTTL)); err != nil { + log.Fatalf("Failed to start DNS server: %v", err) + } + + // Setup HTTP handlers + http.HandleFunc("/", negotiateHandler) + http.HandleFunc("/records", func(w http.ResponseWriter, r *http.Request) { + recordsHandler(w, r, recordStore) + }) + http.HandleFunc("/adjustendpoints", adjustEndpointsHandler) + http.HandleFunc("/healthz", healthzHandler) + + addr := fmt.Sprintf("%s:%d", *listenAddress, *port) + log.Printf("Starting webhook provider on %s\n", addr) + log.Fatal(http.ListenAndServe(addr, nil)) +} + +func negotiateHandler(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", api.MediaTypeFormatAndVersion) + // Return your supported DomainFilter here + json.NewEncoder(w).Encode(endpoint.DomainFilter{}) +} + +func recordsHandler(w http.ResponseWriter, r *http.Request, store *DNSRecordStore) { + if r.Method == http.MethodGet { + w.Header().Set("Content-Type", api.MediaTypeFormatAndVersion) + endpoints := store.GetAllRecords() + json.NewEncoder(w).Encode(endpoints) + return + } + + if r.Method == http.MethodPost { + w.Header().Set("Content-Type", api.MediaTypeFormatAndVersion) + var changes plan.Changes + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + err = json.Unmarshal(body, &changes) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // Process deletions + for _, del := range changes.Delete { + store.RemoveRecord(del.DNSName, del.RecordType) + } + + // Process updates (treat as delete + create) + for _, update := range changes.UpdateOld { + store.RemoveRecord(update.DNSName, update.RecordType) + } + for _, update := range changes.UpdateNew { + if len(update.Targets) > 0 { + store.AddRecord(update.DNSName, update.RecordType, update.Targets) + } + } + + // Process creations + for _, create := range changes.Create { + if len(create.Targets) > 0 { + store.AddRecord(create.DNSName, create.RecordType, create.Targets) + } + } + + w.WriteHeader(http.StatusNoContent) + return + } + + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) +} + +func adjustEndpointsHandler(w http.ResponseWriter, r *http.Request) { + // read the endpoints from the input, return them straight back + var endpoints []endpoint.Endpoint + if err := json.NewDecoder(r.Body).Decode(&endpoints); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", api.MediaTypeFormatAndVersion) + json.NewEncoder(w).Encode(endpoints) +} + +func healthzHandler(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte("ok")) +} diff --git a/provider/local/main_test.go b/provider/local/main_test.go new file mode 100644 index 0000000000..c47470802f --- /dev/null +++ b/provider/local/main_test.go @@ -0,0 +1,603 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "encoding/json" + "net" + "net/http" + "net/http/httptest" + "testing" + + "github.com/miekg/dns" + "sigs.k8s.io/external-dns/endpoint" + "sigs.k8s.io/external-dns/plan" + "sigs.k8s.io/external-dns/provider/webhook/api" +) + +func TestNewDNSRecordStore(t *testing.T) { + store := NewDNSRecordStore() + if store == nil { + t.Fatal("NewDNSRecordStore returned nil") + } + if store.domains == nil { + t.Fatal("domains map not initialized") + } +} + +func TestDNSRecordStore_AddRecord(t *testing.T) { + store := NewDNSRecordStore() + + // Test adding a simple A record + store.AddRecord("example.com", "A", []string{"192.168.1.1"}) + + targets := store.GetRecord("example.com", "A") + if len(targets) != 1 || targets[0] != "192.168.1.1" { + t.Errorf("Expected [192.168.1.1], got %v", targets) + } + + // Test adding multiple targets + store.AddRecord("example.com", "A", []string{"192.168.1.1", "192.168.1.2"}) + targets = store.GetRecord("example.com", "A") + if len(targets) != 2 { + t.Errorf("Expected 2 targets, got %d", len(targets)) + } + + // Test adding different record type for same domain + store.AddRecord("example.com", "AAAA", []string{"::1"}) + ipv6Targets := store.GetRecord("example.com", "AAAA") + if len(ipv6Targets) != 1 || ipv6Targets[0] != "::1" { + t.Errorf("Expected [::1], got %v", ipv6Targets) + } + + // Ensure A records still exist + targets = store.GetRecord("example.com", "A") + if len(targets) != 2 { + t.Errorf("A records should still exist, got %d", len(targets)) + } +} + +func TestDNSRecordStore_RemoveRecord(t *testing.T) { + store := NewDNSRecordStore() + + // Add some records + store.AddRecord("example.com", "A", []string{"192.168.1.1"}) + store.AddRecord("example.com", "AAAA", []string{"::1"}) + store.AddRecord("test.com", "A", []string{"10.0.0.1"}) + + // Remove one record type + store.RemoveRecord("example.com", "A") + targets := store.GetRecord("example.com", "A") + if len(targets) != 0 { + t.Errorf("A record should be removed, got %v", targets) + } + + // Ensure other records still exist + ipv6Targets := store.GetRecord("example.com", "AAAA") + if len(ipv6Targets) != 1 { + t.Errorf("AAAA record should still exist, got %v", ipv6Targets) + } + + testTargets := store.GetRecord("test.com", "A") + if len(testTargets) != 1 { + t.Errorf("test.com A record should still exist, got %v", testTargets) + } + + // Remove last record for domain + store.RemoveRecord("example.com", "AAAA") + + // Domain should be completely removed + if store.domains["example.com"] != nil { + t.Error("Domain should be removed when no records remain") + } +} + +func TestDNSRecordStore_GetRecord_NotFound(t *testing.T) { + store := NewDNSRecordStore() + + // Test getting non-existent record + targets := store.GetRecord("nonexistent.com", "A") + if targets != nil { + t.Errorf("Expected nil for non-existent record, got %v", targets) + } + + // Test getting non-existent record type for existing domain + store.AddRecord("example.com", "A", []string{"192.168.1.1"}) + targets = store.GetRecord("example.com", "CNAME") + if targets != nil { + t.Errorf("Expected nil for non-existent record type, got %v", targets) + } +} + +func TestDNSRecordStore_GetAllRecords(t *testing.T) { + store := NewDNSRecordStore() + + // Test empty store + endpoints := store.GetAllRecords() + if len(endpoints) != 0 { + t.Errorf("Expected 0 endpoints for empty store, got %d", len(endpoints)) + } + + // Add some records + store.AddRecord("example.com", "A", []string{"192.168.1.1", "192.168.1.2"}) + store.AddRecord("example.com", "AAAA", []string{"::1"}) + store.AddRecord("test.com", "CNAME", []string{"example.com"}) + + endpoints = store.GetAllRecords() + if len(endpoints) != 3 { + t.Errorf("Expected 3 endpoints, got %d", len(endpoints)) + } + + // Verify endpoint content + endpointMap := make(map[string]endpoint.Endpoint) + for _, ep := range endpoints { + key := ep.DNSName + ":" + ep.RecordType + endpointMap[key] = ep + } + + if ep, exists := endpointMap["example.com:A"]; !exists { + t.Error("Missing example.com A record") + } else if len(ep.Targets) != 2 { + t.Errorf("Expected 2 targets for A record, got %d", len(ep.Targets)) + } + + if ep, exists := endpointMap["example.com:AAAA"]; !exists { + t.Error("Missing example.com AAAA record") + } else if len(ep.Targets) != 1 || ep.Targets[0] != "::1" { + t.Errorf("Expected [::1] for AAAA record, got %v", ep.Targets) + } + + if ep, exists := endpointMap["test.com:CNAME"]; !exists { + t.Error("Missing test.com CNAME record") + } else if len(ep.Targets) != 1 || ep.Targets[0] != "example.com" { + t.Errorf("Expected [example.com] for CNAME record, got %v", ep.Targets) + } +} + +func TestNegotiateHandler(t *testing.T) { + tests := []struct { + name string + method string + expectedStatus int + expectedHeader string + }{ + { + name: "Valid GET request", + method: http.MethodGet, + expectedStatus: http.StatusOK, + expectedHeader: api.MediaTypeFormatAndVersion, + }, + { + name: "Invalid POST request", + method: http.MethodPost, + expectedStatus: http.StatusMethodNotAllowed, + expectedHeader: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := httptest.NewRequest(tt.method, "/", nil) + w := httptest.NewRecorder() + + negotiateHandler(w, req) + + res := w.Result() + if res.StatusCode != tt.expectedStatus { + t.Errorf("Expected status %d, got %d", tt.expectedStatus, res.StatusCode) + } + + if tt.expectedHeader != "" { + contentType := res.Header.Get("Content-Type") + if contentType != tt.expectedHeader { + t.Errorf("Expected header %s, got %s", tt.expectedHeader, contentType) + } + + defer res.Body.Close() + var domainFilter endpoint.DomainFilter + err := json.NewDecoder(res.Body).Decode(&domainFilter) + if err != nil { + t.Errorf("Failed to decode response: %v", err) + } + } + }) + } +} + +func TestRecordsHandler_GET(t *testing.T) { + store := NewDNSRecordStore() + store.AddRecord("example.com", "A", []string{"192.168.1.1"}) + store.AddRecord("test.com", "CNAME", []string{"example.com"}) + + req := httptest.NewRequest("GET", "/records", nil) + w := httptest.NewRecorder() + + recordsHandler(w, req, store) + + if w.Code != http.StatusOK { + t.Errorf("Expected status 200, got %d", w.Code) + } + + var endpoints []endpoint.Endpoint + if err := json.Unmarshal(w.Body.Bytes(), &endpoints); err != nil { + t.Fatalf("Failed to unmarshal response: %v", err) + } + + if len(endpoints) != 2 { + t.Errorf("Expected 2 endpoints, got %d", len(endpoints)) + } +} + +func TestRecordsHandler_POST_Create(t *testing.T) { + store := NewDNSRecordStore() + + changes := plan.Changes{ + Create: []*endpoint.Endpoint{ + { + DNSName: "example.com", + RecordType: "A", + Targets: []string{"192.168.1.1"}, + }, + { + DNSName: "test.com", + RecordType: "CNAME", + Targets: []string{"example.com"}, + }, + }, + } + + body, _ := json.Marshal(changes) + req := httptest.NewRequest("POST", "/records", bytes.NewReader(body)) + w := httptest.NewRecorder() + + recordsHandler(w, req, store) + + if w.Code != http.StatusNoContent { + t.Errorf("Expected status 204, got %d", w.Code) + } + + // Verify records were created + targets := store.GetRecord("example.com", "A") + if len(targets) != 1 || targets[0] != "192.168.1.1" { + t.Errorf("A record not created correctly, got %v", targets) + } + + targets = store.GetRecord("test.com", "CNAME") + if len(targets) != 1 || targets[0] != "example.com" { + t.Errorf("CNAME record not created correctly, got %v", targets) + } +} + +func TestRecordsHandler_POST_Delete(t *testing.T) { + store := NewDNSRecordStore() + store.AddRecord("example.com", "A", []string{"192.168.1.1"}) + store.AddRecord("test.com", "CNAME", []string{"example.com"}) + + changes := plan.Changes{ + Delete: []*endpoint.Endpoint{ + { + DNSName: "example.com", + RecordType: "A", + }, + }, + } + + body, _ := json.Marshal(changes) + req := httptest.NewRequest("POST", "/records", bytes.NewReader(body)) + w := httptest.NewRecorder() + + recordsHandler(w, req, store) + + if w.Code != http.StatusNoContent { + t.Errorf("Expected status 204, got %d", w.Code) + } + + // Verify record was deleted + targets := store.GetRecord("example.com", "A") + if len(targets) != 0 { + t.Errorf("A record should be deleted, got %v", targets) + } + + // Verify other record still exists + targets = store.GetRecord("test.com", "CNAME") + if len(targets) != 1 { + t.Errorf("CNAME record should still exist, got %v", targets) + } +} + +func TestRecordsHandler_POST_Update(t *testing.T) { + store := NewDNSRecordStore() + store.AddRecord("example.com", "A", []string{"192.168.1.1"}) + + changes := plan.Changes{ + UpdateOld: []*endpoint.Endpoint{ + { + DNSName: "example.com", + RecordType: "A", + Targets: []string{"192.168.1.1"}, + }, + }, + UpdateNew: []*endpoint.Endpoint{ + { + DNSName: "example.com", + RecordType: "A", + Targets: []string{"192.168.1.2", "192.168.1.3"}, + }, + }, + } + + body, _ := json.Marshal(changes) + req := httptest.NewRequest("POST", "/records", bytes.NewReader(body)) + w := httptest.NewRecorder() + + recordsHandler(w, req, store) + + if w.Code != http.StatusNoContent { + t.Errorf("Expected status 204, got %d", w.Code) + } + + // Verify record was updated + targets := store.GetRecord("example.com", "A") + if len(targets) != 2 { + t.Errorf("Expected 2 targets after update, got %d", len(targets)) + } + + expectedTargets := map[string]bool{"192.168.1.2": true, "192.168.1.3": true} + for _, target := range targets { + if !expectedTargets[target] { + t.Errorf("Unexpected target after update: %s", target) + } + } +} + +func TestRecordsHandler_InvalidMethod(t *testing.T) { + store := NewDNSRecordStore() + + req := httptest.NewRequest("PUT", "/records", nil) + w := httptest.NewRecorder() + + recordsHandler(w, req, store) + + if w.Code != http.StatusMethodNotAllowed { + t.Errorf("Expected status 405, got %d", w.Code) + } +} + +func TestHandleDNSQuery_ARecord(t *testing.T) { + store := NewDNSRecordStore() + store.AddRecord("example.com", "A", []string{"192.168.1.1", "192.168.1.2"}) + + // Create DNS query + m := new(dns.Msg) + m.SetQuestion(dns.Fqdn("example.com"), dns.TypeA) + + // Create test response writer + responseWriter := &testResponseWriter{} + + // Handle the query + handleDNSQuery(store, responseWriter, m, 300) + + // Verify response + response := responseWriter.msg + if response == nil { + t.Fatal("No response received") + } + + if len(response.Answer) != 2 { + t.Errorf("Expected 2 answers, got %d", len(response.Answer)) + } + + if response.Authoritative != true { + t.Error("Response should be authoritative") + } + + // Check answer records + ips := make(map[string]bool) + for _, rr := range response.Answer { + if a, ok := rr.(*dns.A); ok { + ips[a.A.String()] = true + if a.Hdr.Ttl != 300 { + t.Errorf("Expected TTL 300, got %d", a.Hdr.Ttl) + } + } else { + t.Errorf("Expected A record, got %T", rr) + } + } + + if !ips["192.168.1.1"] || !ips["192.168.1.2"] { + t.Error("Expected both IP addresses in response") + } +} + +func TestHandleDNSQuery_AAAARecord(t *testing.T) { + store := NewDNSRecordStore() + store.AddRecord("example.com", "AAAA", []string{"2001:db8::1"}) + + // Create DNS query + m := new(dns.Msg) + m.SetQuestion(dns.Fqdn("example.com"), dns.TypeAAAA) + + // Create test response writer + responseWriter := &testResponseWriter{} + + // Handle the query + handleDNSQuery(store, responseWriter, m, 300) + + // Verify response + response := responseWriter.msg + if response == nil { + t.Fatal("No response received") + } + + if len(response.Answer) != 1 { + t.Errorf("Expected 1 answer, got %d", len(response.Answer)) + } + + if aaaa, ok := response.Answer[0].(*dns.AAAA); ok { + if aaaa.AAAA.String() != "2001:db8::1" { + t.Errorf("Expected 2001:db8::1, got %s", aaaa.AAAA.String()) + } + } else { + t.Errorf("Expected AAAA record, got %T", response.Answer[0]) + } +} + +func TestHandleDNSQuery_CNAMERecord(t *testing.T) { + store := NewDNSRecordStore() + store.AddRecord("www.example.com", "CNAME", []string{"example.com"}) + + // Create DNS query + m := new(dns.Msg) + m.SetQuestion(dns.Fqdn("www.example.com"), dns.TypeCNAME) + + // Create test response writer + responseWriter := &testResponseWriter{} + + // Handle the query + handleDNSQuery(store, responseWriter, m, 300) + + // Verify response + response := responseWriter.msg + if response == nil { + t.Fatal("No response received") + } + + if len(response.Answer) != 1 { + t.Errorf("Expected 1 answer, got %d", len(response.Answer)) + } + + if cname, ok := response.Answer[0].(*dns.CNAME); ok { + if cname.Target != "example.com." { + t.Errorf("Expected example.com., got %s", cname.Target) + } + } else { + t.Errorf("Expected CNAME record, got %T", response.Answer[0]) + } +} + +func TestHandleDNSQuery_NXDOMAIN(t *testing.T) { + store := NewDNSRecordStore() + + // Create DNS query for non-existent domain + m := new(dns.Msg) + m.SetQuestion(dns.Fqdn("nonexistent.com"), dns.TypeA) + + // Create test response writer + responseWriter := &testResponseWriter{} + + // Handle the query + handleDNSQuery(store, responseWriter, m, 300) + + // Verify response + response := responseWriter.msg + if response == nil { + t.Fatal("No response received") + } + + if response.Rcode != dns.RcodeNameError { + t.Errorf("Expected NXDOMAIN (rcode %d), got %d", dns.RcodeNameError, response.Rcode) + } + + if len(response.Answer) != 0 { + t.Errorf("Expected 0 answers for NXDOMAIN, got %d", len(response.Answer)) + } +} + +func TestHealthzHandler(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/healthz", nil) + w := httptest.NewRecorder() + + healthzHandler(w, req) + + res := w.Result() + if res.StatusCode != http.StatusOK { + t.Errorf("Expected status 200, got %d", res.StatusCode) + } + + if w.Body.String() != "ok" { + t.Errorf("Expected 'ok', got '%s'", w.Body.String()) + } +} + +func TestAdjustEndpointsHandler(t *testing.T) { + endpoints := []endpoint.Endpoint{ + { + DNSName: "test.example.com", + RecordType: "A", + Targets: []string{"192.168.1.1", "192.168.1.2"}, + }, + } + + endpointsJSON, err := json.Marshal(endpoints) + if err != nil { + t.Fatalf("Failed to marshal endpoints: %v", err) + } + + req := httptest.NewRequest(http.MethodPost, "/adjustendpoints", bytes.NewReader(endpointsJSON)) + w := httptest.NewRecorder() + + adjustEndpointsHandler(w, req) + + res := w.Result() + if res.StatusCode != http.StatusOK { + t.Errorf("Expected status 200, got %d", res.StatusCode) + } + + var returnedEndpoints []endpoint.Endpoint + if err := json.Unmarshal(w.Body.Bytes(), &returnedEndpoints); err != nil { + t.Fatalf("Failed to unmarshal response: %v", err) + } + + if len(returnedEndpoints) != 1 { + t.Errorf("Expected 1 endpoint returned, got %d", len(returnedEndpoints)) + } +} + +// testResponseWriter implements dns.ResponseWriter for testing +type testResponseWriter struct { + msg *dns.Msg +} + +func (w *testResponseWriter) LocalAddr() net.Addr { + return nil +} + +func (w *testResponseWriter) RemoteAddr() net.Addr { + return nil +} + +func (w *testResponseWriter) WriteMsg(m *dns.Msg) error { + w.msg = m + return nil +} + +func (w *testResponseWriter) Write([]byte) (int, error) { + return 0, nil +} + +func (w *testResponseWriter) Close() error { + return nil +} + +func (w *testResponseWriter) TsigStatus() error { + return nil +} + +func (w *testResponseWriter) TsigTimersOnly(bool) {} + +func (w *testResponseWriter) Hijack() {} diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh new file mode 100755 index 0000000000..42b63644cd --- /dev/null +++ b/scripts/e2e-test.sh @@ -0,0 +1,239 @@ +#!/bin/bash + +set -e + +KO_VERSION="0.18.0" +KIND_VERSION="0.30.0" +GO_VERSION="1.25" +ALPINE_VERSION="3.18" + +echo "Starting end-to-end tests for external-dns with local provider..." + +# Install kind +echo "Installing kind..." +curl -Lo ./kind https://kind.sigs.k8s.io/dl/v${KIND_VERSION}/kind-linux-amd64 +chmod +x ./kind +sudo mv ./kind /usr/local/bin/kind + +# Create kind cluster +echo "Creating kind cluster..." +kind create cluster + +# Install kubectl +echo "Installing kubectl..." +curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" +chmod +x kubectl +sudo mv kubectl /usr/local/bin/kubectl + +# Install ko +echo "Installing ko..." +curl -sSfL "https://github.com/ko-build/ko/releases/download/v${KO_VERSION}/ko_${KO_VERSION}_linux_x86_64.tar.gz" > ko.tar.gz +tar xzf ko.tar.gz ko +chmod +x ./ko +sudo mv ko /usr/local/bin/ko + +# Build external-dns +echo "Building external-dns..." +make build.image + +# Build a webhook image with the local provider +docker build -t webhook:v1 -f - . < "$TEMP_KUSTOMIZE_DIR/deployment-args-patch.yaml" +apiVersion: apps/v1 +kind: Deployment +metadata: + name: external-dns +spec: + template: + spec: + hostNetwork: true + containers: + - name: external-dns + args: + - --source=service + - --provider=webhook + - --txt-owner-id=external.dns + - --policy=sync + - --log-level=debug + - name: webhook + image: webhook:v1 + ports: + - containerPort: 8888 + name: http + - containerPort: 5353 + name: dns-udp + protocol: UDP + - containerPort: 5353 + name: dns-tcp + protocol: TCP + args: + - --listen-address=0.0.0.0 + - --port=8888 + - --dns-address=0.0.0.0 + - --dns-port=5353 + - --dns-ttl=300 +EOF + +# Update kustomization.yaml to include the patch +cat < "$TEMP_KUSTOMIZE_DIR/kustomization.yaml" +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +images: + - name: registry.k8s.io/external-dns/external-dns + newTag: v0.18.0 # needs to be the real version + +resources: + - ./external-dns-deployment.yaml + - ./external-dns-serviceaccount.yaml + - ./external-dns-clusterrole.yaml + - ./external-dns-clusterrolebinding.yaml + +patchesStrategicMerge: + - ./deployment-args-patch.yaml +EOF + +# Apply the kustomization +kubectl kustomize "$TEMP_KUSTOMIZE_DIR" | kubectl apply -f - + +# add a wait for the deployment to be available +kubectl wait --for=condition=available --timeout=60s deployment/external-dns || true + +kubectl describe pods -l app=external-dns +kubectl describe deployment external-dns +kubectl logs -l app=external-dns + +# Cleanup temporary directory +rm -rf "$TEMP_KUSTOMIZE_DIR" + +# Apply kubernetes yaml with service +echo "Applying Kubernetes service..." +kubectl apply -f e2e + +# Wait for convergence +echo "Waiting for convergence (90 seconds)..." +sleep 90 # normal loop is 60 seconds, this is enough and should not cause flakes + +# Check that the records are present +echo "Checking services again..." +kubectl get svc -owide +kubectl logs -l app=external-dns + +# Check that the DNS records are present using our DNS server +echo "Testing DNS server functionality..." + +# Get the node IP where the pod is running (since we're using hostNetwork) +NODE_IP=$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}') +echo "Node IP: $NODE_IP" + +# Test our DNS server with dig +echo "Testing DNS server with dig..." + +# Create DNS test job that uses dig to query our DNS server +cat </dev/null || true + fi + if [ ! -z "$LOCAL_PROVIDER_PID" ]; then + kill $LOCAL_PROVIDER_PID 2>/dev/null || true + fi + kind delete cluster 2>/dev/null || true +} + +# Set trap to cleanup on script exit +trap cleanup EXIT From c814e42a4bc9203a9b94fe9c07b680d978a307a1 Mon Sep 17 00:00:00 2001 From: Raffaele Di Fazio Date: Sat, 8 Nov 2025 18:03:38 +0100 Subject: [PATCH 2/8] Update .github/workflows/end-to-end-tests.yml Co-authored-by: Michel Loiseleur <97035654+mloiseleur@users.noreply.github.com> --- .github/workflows/end-to-end-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/end-to-end-tests.yml b/.github/workflows/end-to-end-tests.yml index 1974cb73e5..b465472816 100644 --- a/.github/workflows/end-to-end-tests.yml +++ b/.github/workflows/end-to-end-tests.yml @@ -1,4 +1,4 @@ -name: end to end tets with local provider +name: end to end test with local provider on: push: From 41f83f3c961f3ab41938aa2561e8df9b87ecebe6 Mon Sep 17 00:00:00 2001 From: Raffaele Di Fazio Date: Sat, 8 Nov 2025 18:03:47 +0100 Subject: [PATCH 3/8] Update e2e/deployment.yaml Co-authored-by: Michel Loiseleur <97035654+mloiseleur@users.noreply.github.com> --- e2e/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/deployment.yaml b/e2e/deployment.yaml index 5288cddddb..0885e8a937 100644 --- a/e2e/deployment.yaml +++ b/e2e/deployment.yaml @@ -15,5 +15,5 @@ spec: app: demo-app spec: containers: - - image: ghcr.io/raffo/http-server-demo-app:latest # minimal demo app + - image: traefik/whoami:latest # minimal demo app name: demo-app From f73ad8940150c0ea78d7ee0f77fd1960efa73837 Mon Sep 17 00:00:00 2001 From: Raffaele Di Fazio Date: Sat, 15 Nov 2025 17:52:59 +0100 Subject: [PATCH 4/8] move e2e to coredns Signed-off-by: Raffaele Di Fazio --- e2e/provider/coredns.yaml | 98 ++++++ e2e/provider/etcd.yaml | 310 ++++++++++++++++++ provider/local/README.md | 5 - provider/local/main.go | 324 ------------------- provider/local/main_test.go | 603 ------------------------------------ scripts/e2e-test.sh | 53 ++-- 6 files changed, 428 insertions(+), 965 deletions(-) create mode 100644 e2e/provider/coredns.yaml create mode 100644 e2e/provider/etcd.yaml delete mode 100644 provider/local/README.md delete mode 100644 provider/local/main.go delete mode 100644 provider/local/main_test.go diff --git a/e2e/provider/coredns.yaml b/e2e/provider/coredns.yaml new file mode 100644 index 0000000000..14d02737fb --- /dev/null +++ b/e2e/provider/coredns.yaml @@ -0,0 +1,98 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: default +data: + Corefile: | + external.dns:5353 { + errors + log + etcd { + stubzones + path /skydns + endpoint http://etcd-0.etcd:2379 + } + cache 30 + forward . /etc/resolv.conf + } +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coredns + namespace: default + labels: + app: coredns +spec: + replicas: 1 + selector: + matchLabels: + app: coredns + template: + metadata: + labels: + app: coredns + spec: + hostNetwork: true + dnsPolicy: Default + containers: + - name: coredns + image: coredns/coredns:1.13.1 + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + ports: + - containerPort: 5353 + name: dns + protocol: UDP + - containerPort: 5353 + name: dns-tcp + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + volumes: + - name: config-volume + configMap: + name: coredns + items: + - key: Corefile + path: Corefile +--- +apiVersion: v1 +kind: Service +metadata: + name: coredns + namespace: default + labels: + app: coredns +spec: + selector: + app: coredns + ports: + - name: dns + port: 5353 + targetPort: 5353 + protocol: UDP + - name: dns-tcp + port: 5353 + targetPort: 5353 + protocol: TCP diff --git a/e2e/provider/etcd.yaml b/e2e/provider/etcd.yaml new file mode 100644 index 0000000000..1833199be0 --- /dev/null +++ b/e2e/provider/etcd.yaml @@ -0,0 +1,310 @@ +# file: etcd.yaml +--- +apiVersion: v1 +kind: Service +metadata: + name: etcd + namespace: default +spec: + type: ClusterIP + clusterIP: None + selector: + app: etcd + ## + ## Ideally we would use SRV records to do peer discovery for initialization. + ## Unfortunately discovery will not work without logic to wait for these to + ## populate in the container. This problem is relatively easy to overcome by + ## making changes to prevent the etcd process from starting until the records + ## have populated. The documentation on statefulsets briefly talk about it. + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id + publishNotReadyAddresses: true + ## + ## The naming scheme of the client and server ports match the scheme that etcd + ## uses when doing discovery with SRV records. + ports: + - name: etcd-client + port: 2379 + - name: etcd-server + port: 2380 + - name: etcd-metrics + port: 8080 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + namespace: default + name: etcd +spec: + ## + ## The service name is being set to leverage the service headlessly. + ## https://kubernetes.io/docs/concepts/services-networking/service/#headless-services + serviceName: etcd + ## + ## If you are increasing the replica count of an existing cluster, you should + ## also update the --initial-cluster-state flag as noted further down in the + ## container configuration. + replicas: 1 + ## + ## For initialization, the etcd pods must be available to eachother before + ## they are "ready" for traffic. The "Parallel" policy makes this possible. + podManagementPolicy: Parallel + ## + ## To ensure availability of the etcd cluster, the rolling update strategy + ## is used. For availability, there must be at least 51% of the etcd nodes + ## online at any given time. + updateStrategy: + type: RollingUpdate + ## + ## This is label query over pods that should match the replica count. + ## It must match the pod template's labels. For more information, see the + ## following documentation: + ## https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + selector: + matchLabels: + app: etcd + ## + ## Pod configuration template. + template: + metadata: + ## + ## The labeling here is tied to the "matchLabels" of this StatefulSet and + ## "affinity" configuration of the pod that will be created. + ## + ## This example's labeling scheme is fine for one etcd cluster per + ## namespace, but should you desire multiple clusters per namespace, you + ## will need to update the labeling schema to be unique per etcd cluster. + labels: + app: etcd + annotations: + ## + ## This gets referenced in the etcd container's configuration as part of + ## the DNS name. It must match the service name created for the etcd + ## cluster. The choice to place it in an annotation instead of the env + ## settings is because there should only be 1 service per etcd cluster. + serviceName: etcd + spec: + ## + ## Configuring the node affinity is necessary to prevent etcd servers from + ## ending up on the same hardware together. + ## + ## See the scheduling documentation for more information about this: + ## https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + affinity: + ## The podAntiAffinity is a set of rules for scheduling that describe + ## when NOT to place a pod from this StatefulSet on a node. + podAntiAffinity: + ## + ## When preparing to place the pod on a node, the scheduler will check + ## for other pods matching the rules described by the labelSelector + ## separated by the chosen topology key. + requiredDuringSchedulingIgnoredDuringExecution: + ## This label selector is looking for app=etcd + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - etcd + ## This topology key denotes a common label used on nodes in the + ## cluster. The podAntiAffinity configuration essentially states + ## that if another pod has a label of app=etcd on the node, the + ## scheduler should not place another pod on the node. + ## https://kubernetes.io/docs/reference/labels-annotations-taints/#kubernetesiohostname + topologyKey: "kubernetes.io/hostname" + ## + ## Containers in the pod + containers: + ## This example only has this etcd container. + - name: etcd + image: quay.io/coreos/etcd:v3.6.0 + imagePullPolicy: IfNotPresent + ports: + - name: etcd-client + containerPort: 2379 + - name: etcd-server + containerPort: 2380 + - name: etcd-metrics + containerPort: 8080 + ## + ## These probes will fail over TLS for self-signed certificates, so etcd + ## is configured to deliver metrics over port 8080 further down. + ## + ## As mentioned in the "Monitoring etcd" page, /readyz and /livez were + ## added in v3.5.12. Prior to this, monitoring required extra tooling + ## inside the container to make these probes work. + ## + ## The values in this readiness probe should be further validated, it + ## is only an example configuration. + readinessProbe: + httpGet: + path: /readyz + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 30 + ## The values in this liveness probe should be further validated, it + ## is only an example configuration. + livenessProbe: + httpGet: + path: /livez + port: 8080 + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + env: + ## + ## Environment variables defined here can be used by other parts of the + ## container configuration. They are interpreted by Kubernetes, instead + ## of in the container environment. + ## + ## These env vars pass along information about the pod. + - name: K8S_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SERVICE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.annotations['serviceName'] + ## + ## Configuring etcdctl inside the container to connect to the etcd node + ## in the container reduces confusion when debugging. + - name: ETCDCTL_ENDPOINTS + value: $(HOSTNAME).$(SERVICE_NAME):2379 + ## + ## TLS client configuration for etcdctl in the container. + ## These files paths are part of the "etcd-client-certs" volume mount. + # - name: ETCDCTL_KEY + # value: /etc/etcd/certs/client/tls.key + # - name: ETCDCTL_CERT + # value: /etc/etcd/certs/client/tls.crt + # - name: ETCDCTL_CACERT + # value: /etc/etcd/certs/client/ca.crt + ## + ## Use this URI_SCHEME value for non-TLS clusters. + - name: URI_SCHEME + value: "http" + ## TLS: Use this URI_SCHEME for TLS clusters. + # - name: URI_SCHEME + # value: "https" + ## + ## If you're using a different container, the executable may be in a + ## different location. This example uses the full path to help remove + ## ambiguity to you, the reader. + ## Often you can just use "etcd" instead of "/usr/local/bin/etcd" and it + ## will work because the $PATH includes a directory containing "etcd". + command: + - /usr/local/bin/etcd + ## + ## Arguments used with the etcd command inside the container. + args: + ## + ## Configure the name of the etcd server. + - --name=$(HOSTNAME) + ## + ## Configure etcd to use the persistent storage configured below. + - --data-dir=/data + ## + ## In this example we're consolidating the WAL into sharing space with + ## the data directory. This is not ideal in production environments and + ## should be placed in it's own volume. + - --wal-dir=/data/wal + ## + ## URL configurations are parameterized here and you shouldn't need to + ## do anything with these. + - --listen-peer-urls=$(URI_SCHEME)://0.0.0.0:2380 + - --listen-client-urls=$(URI_SCHEME)://0.0.0.0:2379 + - --advertise-client-urls=$(URI_SCHEME)://$(HOSTNAME).$(SERVICE_NAME):2379 + ## + ## This must be set to "new" for initial cluster bootstrapping. To scale + ## the cluster up, this should be changed to "existing" when the replica + ## count is increased. If set incorrectly, etcd makes an attempt to + ## start but fail safely. + - --initial-cluster-state=new + ## + ## Token used for cluster initialization. The recommendation for this is + ## to use a unique token for every cluster. This example parameterized + ## to be unique to the namespace, but if you are deploying multiple etcd + ## clusters in the same namespace, you should do something extra to + ## ensure uniqueness amongst clusters. + - --initial-cluster-token=etcd-$(K8S_NAMESPACE) + ## + ## The initial cluster flag needs to be updated to match the number of + ## replicas configured. When combined, these are a little hard to read. + ## Here is what a single parameterized peer looks like: + ## etcd-0=$(URI_SCHEME)://etcd-0.$(SERVICE_NAME):2380 + - --initial-cluster=etcd-0=$(URI_SCHEME)://etcd-0.$(SERVICE_NAME):2380 + ## + ## The peer urls flag should be fine as-is. + - --initial-advertise-peer-urls=$(URI_SCHEME)://$(HOSTNAME).$(SERVICE_NAME):2380 + ## + ## This avoids probe failure if you opt to configure TLS. + - --listen-metrics-urls=http://0.0.0.0:8080 + ## + ## These are some configurations you may want to consider enabling, but + ## should look into further to identify what settings are best for you. + # - --auto-compaction-mode=periodic + # - --auto-compaction-retention=10m + ## + ## TLS client configuration for etcd, reusing the etcdctl env vars. + # - --client-cert-auth + # - --trusted-ca-file=$(ETCDCTL_CACERT) + # - --cert-file=$(ETCDCTL_CERT) + # - --key-file=$(ETCDCTL_KEY) + ## + ## TLS server configuration for etcdctl in the container. + ## These files paths are part of the "etcd-server-certs" volume mount. + # - --peer-client-cert-auth + # - --peer-trusted-ca-file=/etc/etcd/certs/server/ca.crt + # - --peer-cert-file=/etc/etcd/certs/server/tls.crt + # - --peer-key-file=/etc/etcd/certs/server/tls.key + ## + ## This is the mount configuration. + volumeMounts: + - name: etcd-data + mountPath: /data + ## + ## TLS client configuration for etcdctl + # - name: etcd-client-tls + # mountPath: "/etc/etcd/certs/client" + # readOnly: true + ## + ## TLS server configuration + # - name: etcd-server-tls + # mountPath: "/etc/etcd/certs/server" + # readOnly: true + volumes: + ## + ## TLS client configuration + # - name: etcd-client-tls + # secret: + # secretName: etcd-client-tls + # optional: false + ## + ## TLS server configuration + # - name: etcd-server-tls + # secret: + # secretName: etcd-server-tls + # optional: false + ## + ## This StatefulSet will uses the volumeClaimTemplate field to create a PVC in + ## the cluster for each replica. These PVCs can not be easily resized later. + volumeClaimTemplates: + - metadata: + name: etcd-data + spec: + accessModes: ["ReadWriteOnce"] + ## + ## In some clusters, it is necessary to explicitly set the storage class. + ## This example will end up using the default storage class. + # storageClassName: "" + resources: + requests: + storage: 1Gi diff --git a/provider/local/README.md b/provider/local/README.md deleted file mode 100644 index 5334babc03..0000000000 --- a/provider/local/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Local Webhook Provider - -A demo provider that allows to store records in memory and query them via a locally exposed simple DNS server implementation. - -**NOTE**: this provider is not intended for any production usage. diff --git a/provider/local/main.go b/provider/local/main.go deleted file mode 100644 index 4482c35342..0000000000 --- a/provider/local/main.go +++ /dev/null @@ -1,324 +0,0 @@ -/* -Copyright 2025 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "encoding/json" - "flag" - "fmt" - "io" - "log" - "net" - "net/http" - "strings" - "sync" - - "github.com/miekg/dns" - "sigs.k8s.io/external-dns/endpoint" - "sigs.k8s.io/external-dns/plan" - "sigs.k8s.io/external-dns/provider/webhook/api" -) - -// DNSRecord represents a single DNS record with its type and targets -type DNSRecord struct { - Type string `json:"type"` - Targets []string `json:"targets"` -} - -// DomainRecords holds all record types for a specific domain -type DomainRecords struct { - Records map[string]DNSRecord `json:"records"` // recordType -> DNSRecord -} - -// DNSRecordStore holds all DNS records in memory -type DNSRecordStore struct { - mu sync.RWMutex - domains map[string]*DomainRecords // domain -> DomainRecords -} - -// NewDNSRecordStore creates a new in-memory record store -func NewDNSRecordStore() *DNSRecordStore { - return &DNSRecordStore{ - domains: make(map[string]*DomainRecords), - } -} - -// AddRecord adds or updates a DNS record -func (s *DNSRecordStore) AddRecord(domain, recordType string, targets []string) { - s.mu.Lock() - defer s.mu.Unlock() - - if s.domains[domain] == nil { - s.domains[domain] = &DomainRecords{ - Records: make(map[string]DNSRecord), - } - } - s.domains[domain].Records[recordType] = DNSRecord{ - Type: recordType, - Targets: targets, - } -} - -// RemoveRecord removes a DNS record -func (s *DNSRecordStore) RemoveRecord(domain, recordType string) { - s.mu.Lock() - defer s.mu.Unlock() - - if s.domains[domain] != nil { - delete(s.domains[domain].Records, recordType) - if len(s.domains[domain].Records) == 0 { - delete(s.domains, domain) - } - } -} - -// GetRecord retrieves targets for a specific domain and record type -func (s *DNSRecordStore) GetRecord(domain, recordType string) []string { - s.mu.RLock() - defer s.mu.RUnlock() - - if s.domains[domain] != nil { - if record, exists := s.domains[domain].Records[recordType]; exists { - return record.Targets - } - } - return nil -} - -// GetAllRecords returns all records as endpoints for webhook responses -func (s *DNSRecordStore) GetAllRecords() []endpoint.Endpoint { - s.mu.RLock() - defer s.mu.RUnlock() - - var endpoints []endpoint.Endpoint - for domain, domainRecords := range s.domains { - for _, record := range domainRecords.Records { - endpoints = append(endpoints, endpoint.Endpoint{ - DNSName: domain, - RecordType: record.Type, - Targets: record.Targets, - }) - } - } - return endpoints -} - -// startDNSServer starts the DNS server in a goroutine -func startDNSServer(store *DNSRecordStore, address string, port int, defaultTTL uint32) error { - mux := dns.NewServeMux() - mux.HandleFunc(".", func(w dns.ResponseWriter, r *dns.Msg) { - handleDNSQuery(store, w, r, defaultTTL) - }) - - server := &dns.Server{ - Addr: fmt.Sprintf("%s:%d", address, port), - Net: "udp", - Handler: mux, - } - - log.Printf("Starting DNS server on %s (UDP)\n", server.Addr) - - go func() { - if err := server.ListenAndServe(); err != nil { - log.Printf("DNS server error: %v", err) - } - }() - - // Also start TCP server - tcpServer := &dns.Server{ - Addr: fmt.Sprintf("%s:%d", address, port), - Net: "tcp", - Handler: mux, - } - - go func() { - if err := tcpServer.ListenAndServe(); err != nil { - log.Printf("DNS TCP server error: %v", err) - } - }() - - return nil -} - -// handleDNSQuery handles incoming DNS queries -func handleDNSQuery(store *DNSRecordStore, w dns.ResponseWriter, r *dns.Msg, defaultTTL uint32) { - m := new(dns.Msg) - m.SetReply(r) - m.Authoritative = true - - for _, q := range r.Question { - domain := strings.TrimSuffix(q.Name, ".") - recordType := dns.TypeToString[q.Qtype] - - targets := store.GetRecord(domain, recordType) - - if len(targets) > 0 { - for _, target := range targets { - switch q.Qtype { - case dns.TypeA: - if ip := net.ParseIP(target); ip != nil && ip.To4() != nil { - rr := &dns.A{ - Hdr: dns.RR_Header{ - Name: q.Name, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: defaultTTL, - }, - A: ip.To4(), - } - m.Answer = append(m.Answer, rr) - } - case dns.TypeAAAA: - if ip := net.ParseIP(target); ip != nil && ip.To16() != nil && ip.To4() == nil { - rr := &dns.AAAA{ - Hdr: dns.RR_Header{ - Name: q.Name, - Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, - Ttl: defaultTTL, - }, - AAAA: ip.To16(), - } - m.Answer = append(m.Answer, rr) - } - case dns.TypeCNAME: - rr := &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: q.Name, - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - Ttl: defaultTTL, - }, - Target: dns.Fqdn(target), - } - m.Answer = append(m.Answer, rr) - } - } - } else { - // No records found, set NXDOMAIN - m.Rcode = dns.RcodeNameError - } - } - - w.WriteMsg(m) -} - -func main() { - listenAddress := flag.String("listen-address", "127.0.0.1", "Address to listen on") - port := flag.Int("port", 8888, "Port to listen on") - dnsAddress := flag.String("dns-address", "127.0.0.1", "DNS server address") - dnsPort := flag.Int("dns-port", 5353, "DNS server port") - dnsTTL := flag.Int("dns-ttl", 300, "Default TTL for DNS responses") - flag.Parse() - - // Create shared record store - recordStore := NewDNSRecordStore() - - // Start DNS server - if err := startDNSServer(recordStore, *dnsAddress, *dnsPort, uint32(*dnsTTL)); err != nil { - log.Fatalf("Failed to start DNS server: %v", err) - } - - // Setup HTTP handlers - http.HandleFunc("/", negotiateHandler) - http.HandleFunc("/records", func(w http.ResponseWriter, r *http.Request) { - recordsHandler(w, r, recordStore) - }) - http.HandleFunc("/adjustendpoints", adjustEndpointsHandler) - http.HandleFunc("/healthz", healthzHandler) - - addr := fmt.Sprintf("%s:%d", *listenAddress, *port) - log.Printf("Starting webhook provider on %s\n", addr) - log.Fatal(http.ListenAndServe(addr, nil)) -} - -func negotiateHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "method not allowed", http.StatusMethodNotAllowed) - return - } - w.Header().Set("Content-Type", api.MediaTypeFormatAndVersion) - // Return your supported DomainFilter here - json.NewEncoder(w).Encode(endpoint.DomainFilter{}) -} - -func recordsHandler(w http.ResponseWriter, r *http.Request, store *DNSRecordStore) { - if r.Method == http.MethodGet { - w.Header().Set("Content-Type", api.MediaTypeFormatAndVersion) - endpoints := store.GetAllRecords() - json.NewEncoder(w).Encode(endpoints) - return - } - - if r.Method == http.MethodPost { - w.Header().Set("Content-Type", api.MediaTypeFormatAndVersion) - var changes plan.Changes - body, err := io.ReadAll(r.Body) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - err = json.Unmarshal(body, &changes) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // Process deletions - for _, del := range changes.Delete { - store.RemoveRecord(del.DNSName, del.RecordType) - } - - // Process updates (treat as delete + create) - for _, update := range changes.UpdateOld { - store.RemoveRecord(update.DNSName, update.RecordType) - } - for _, update := range changes.UpdateNew { - if len(update.Targets) > 0 { - store.AddRecord(update.DNSName, update.RecordType, update.Targets) - } - } - - // Process creations - for _, create := range changes.Create { - if len(create.Targets) > 0 { - store.AddRecord(create.DNSName, create.RecordType, create.Targets) - } - } - - w.WriteHeader(http.StatusNoContent) - return - } - - http.Error(w, "method not allowed", http.StatusMethodNotAllowed) -} - -func adjustEndpointsHandler(w http.ResponseWriter, r *http.Request) { - // read the endpoints from the input, return them straight back - var endpoints []endpoint.Endpoint - if err := json.NewDecoder(r.Body).Decode(&endpoints); err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", api.MediaTypeFormatAndVersion) - json.NewEncoder(w).Encode(endpoints) -} - -func healthzHandler(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - w.Write([]byte("ok")) -} diff --git a/provider/local/main_test.go b/provider/local/main_test.go deleted file mode 100644 index c47470802f..0000000000 --- a/provider/local/main_test.go +++ /dev/null @@ -1,603 +0,0 @@ -/* -Copyright 2025 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "bytes" - "encoding/json" - "net" - "net/http" - "net/http/httptest" - "testing" - - "github.com/miekg/dns" - "sigs.k8s.io/external-dns/endpoint" - "sigs.k8s.io/external-dns/plan" - "sigs.k8s.io/external-dns/provider/webhook/api" -) - -func TestNewDNSRecordStore(t *testing.T) { - store := NewDNSRecordStore() - if store == nil { - t.Fatal("NewDNSRecordStore returned nil") - } - if store.domains == nil { - t.Fatal("domains map not initialized") - } -} - -func TestDNSRecordStore_AddRecord(t *testing.T) { - store := NewDNSRecordStore() - - // Test adding a simple A record - store.AddRecord("example.com", "A", []string{"192.168.1.1"}) - - targets := store.GetRecord("example.com", "A") - if len(targets) != 1 || targets[0] != "192.168.1.1" { - t.Errorf("Expected [192.168.1.1], got %v", targets) - } - - // Test adding multiple targets - store.AddRecord("example.com", "A", []string{"192.168.1.1", "192.168.1.2"}) - targets = store.GetRecord("example.com", "A") - if len(targets) != 2 { - t.Errorf("Expected 2 targets, got %d", len(targets)) - } - - // Test adding different record type for same domain - store.AddRecord("example.com", "AAAA", []string{"::1"}) - ipv6Targets := store.GetRecord("example.com", "AAAA") - if len(ipv6Targets) != 1 || ipv6Targets[0] != "::1" { - t.Errorf("Expected [::1], got %v", ipv6Targets) - } - - // Ensure A records still exist - targets = store.GetRecord("example.com", "A") - if len(targets) != 2 { - t.Errorf("A records should still exist, got %d", len(targets)) - } -} - -func TestDNSRecordStore_RemoveRecord(t *testing.T) { - store := NewDNSRecordStore() - - // Add some records - store.AddRecord("example.com", "A", []string{"192.168.1.1"}) - store.AddRecord("example.com", "AAAA", []string{"::1"}) - store.AddRecord("test.com", "A", []string{"10.0.0.1"}) - - // Remove one record type - store.RemoveRecord("example.com", "A") - targets := store.GetRecord("example.com", "A") - if len(targets) != 0 { - t.Errorf("A record should be removed, got %v", targets) - } - - // Ensure other records still exist - ipv6Targets := store.GetRecord("example.com", "AAAA") - if len(ipv6Targets) != 1 { - t.Errorf("AAAA record should still exist, got %v", ipv6Targets) - } - - testTargets := store.GetRecord("test.com", "A") - if len(testTargets) != 1 { - t.Errorf("test.com A record should still exist, got %v", testTargets) - } - - // Remove last record for domain - store.RemoveRecord("example.com", "AAAA") - - // Domain should be completely removed - if store.domains["example.com"] != nil { - t.Error("Domain should be removed when no records remain") - } -} - -func TestDNSRecordStore_GetRecord_NotFound(t *testing.T) { - store := NewDNSRecordStore() - - // Test getting non-existent record - targets := store.GetRecord("nonexistent.com", "A") - if targets != nil { - t.Errorf("Expected nil for non-existent record, got %v", targets) - } - - // Test getting non-existent record type for existing domain - store.AddRecord("example.com", "A", []string{"192.168.1.1"}) - targets = store.GetRecord("example.com", "CNAME") - if targets != nil { - t.Errorf("Expected nil for non-existent record type, got %v", targets) - } -} - -func TestDNSRecordStore_GetAllRecords(t *testing.T) { - store := NewDNSRecordStore() - - // Test empty store - endpoints := store.GetAllRecords() - if len(endpoints) != 0 { - t.Errorf("Expected 0 endpoints for empty store, got %d", len(endpoints)) - } - - // Add some records - store.AddRecord("example.com", "A", []string{"192.168.1.1", "192.168.1.2"}) - store.AddRecord("example.com", "AAAA", []string{"::1"}) - store.AddRecord("test.com", "CNAME", []string{"example.com"}) - - endpoints = store.GetAllRecords() - if len(endpoints) != 3 { - t.Errorf("Expected 3 endpoints, got %d", len(endpoints)) - } - - // Verify endpoint content - endpointMap := make(map[string]endpoint.Endpoint) - for _, ep := range endpoints { - key := ep.DNSName + ":" + ep.RecordType - endpointMap[key] = ep - } - - if ep, exists := endpointMap["example.com:A"]; !exists { - t.Error("Missing example.com A record") - } else if len(ep.Targets) != 2 { - t.Errorf("Expected 2 targets for A record, got %d", len(ep.Targets)) - } - - if ep, exists := endpointMap["example.com:AAAA"]; !exists { - t.Error("Missing example.com AAAA record") - } else if len(ep.Targets) != 1 || ep.Targets[0] != "::1" { - t.Errorf("Expected [::1] for AAAA record, got %v", ep.Targets) - } - - if ep, exists := endpointMap["test.com:CNAME"]; !exists { - t.Error("Missing test.com CNAME record") - } else if len(ep.Targets) != 1 || ep.Targets[0] != "example.com" { - t.Errorf("Expected [example.com] for CNAME record, got %v", ep.Targets) - } -} - -func TestNegotiateHandler(t *testing.T) { - tests := []struct { - name string - method string - expectedStatus int - expectedHeader string - }{ - { - name: "Valid GET request", - method: http.MethodGet, - expectedStatus: http.StatusOK, - expectedHeader: api.MediaTypeFormatAndVersion, - }, - { - name: "Invalid POST request", - method: http.MethodPost, - expectedStatus: http.StatusMethodNotAllowed, - expectedHeader: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := httptest.NewRequest(tt.method, "/", nil) - w := httptest.NewRecorder() - - negotiateHandler(w, req) - - res := w.Result() - if res.StatusCode != tt.expectedStatus { - t.Errorf("Expected status %d, got %d", tt.expectedStatus, res.StatusCode) - } - - if tt.expectedHeader != "" { - contentType := res.Header.Get("Content-Type") - if contentType != tt.expectedHeader { - t.Errorf("Expected header %s, got %s", tt.expectedHeader, contentType) - } - - defer res.Body.Close() - var domainFilter endpoint.DomainFilter - err := json.NewDecoder(res.Body).Decode(&domainFilter) - if err != nil { - t.Errorf("Failed to decode response: %v", err) - } - } - }) - } -} - -func TestRecordsHandler_GET(t *testing.T) { - store := NewDNSRecordStore() - store.AddRecord("example.com", "A", []string{"192.168.1.1"}) - store.AddRecord("test.com", "CNAME", []string{"example.com"}) - - req := httptest.NewRequest("GET", "/records", nil) - w := httptest.NewRecorder() - - recordsHandler(w, req, store) - - if w.Code != http.StatusOK { - t.Errorf("Expected status 200, got %d", w.Code) - } - - var endpoints []endpoint.Endpoint - if err := json.Unmarshal(w.Body.Bytes(), &endpoints); err != nil { - t.Fatalf("Failed to unmarshal response: %v", err) - } - - if len(endpoints) != 2 { - t.Errorf("Expected 2 endpoints, got %d", len(endpoints)) - } -} - -func TestRecordsHandler_POST_Create(t *testing.T) { - store := NewDNSRecordStore() - - changes := plan.Changes{ - Create: []*endpoint.Endpoint{ - { - DNSName: "example.com", - RecordType: "A", - Targets: []string{"192.168.1.1"}, - }, - { - DNSName: "test.com", - RecordType: "CNAME", - Targets: []string{"example.com"}, - }, - }, - } - - body, _ := json.Marshal(changes) - req := httptest.NewRequest("POST", "/records", bytes.NewReader(body)) - w := httptest.NewRecorder() - - recordsHandler(w, req, store) - - if w.Code != http.StatusNoContent { - t.Errorf("Expected status 204, got %d", w.Code) - } - - // Verify records were created - targets := store.GetRecord("example.com", "A") - if len(targets) != 1 || targets[0] != "192.168.1.1" { - t.Errorf("A record not created correctly, got %v", targets) - } - - targets = store.GetRecord("test.com", "CNAME") - if len(targets) != 1 || targets[0] != "example.com" { - t.Errorf("CNAME record not created correctly, got %v", targets) - } -} - -func TestRecordsHandler_POST_Delete(t *testing.T) { - store := NewDNSRecordStore() - store.AddRecord("example.com", "A", []string{"192.168.1.1"}) - store.AddRecord("test.com", "CNAME", []string{"example.com"}) - - changes := plan.Changes{ - Delete: []*endpoint.Endpoint{ - { - DNSName: "example.com", - RecordType: "A", - }, - }, - } - - body, _ := json.Marshal(changes) - req := httptest.NewRequest("POST", "/records", bytes.NewReader(body)) - w := httptest.NewRecorder() - - recordsHandler(w, req, store) - - if w.Code != http.StatusNoContent { - t.Errorf("Expected status 204, got %d", w.Code) - } - - // Verify record was deleted - targets := store.GetRecord("example.com", "A") - if len(targets) != 0 { - t.Errorf("A record should be deleted, got %v", targets) - } - - // Verify other record still exists - targets = store.GetRecord("test.com", "CNAME") - if len(targets) != 1 { - t.Errorf("CNAME record should still exist, got %v", targets) - } -} - -func TestRecordsHandler_POST_Update(t *testing.T) { - store := NewDNSRecordStore() - store.AddRecord("example.com", "A", []string{"192.168.1.1"}) - - changes := plan.Changes{ - UpdateOld: []*endpoint.Endpoint{ - { - DNSName: "example.com", - RecordType: "A", - Targets: []string{"192.168.1.1"}, - }, - }, - UpdateNew: []*endpoint.Endpoint{ - { - DNSName: "example.com", - RecordType: "A", - Targets: []string{"192.168.1.2", "192.168.1.3"}, - }, - }, - } - - body, _ := json.Marshal(changes) - req := httptest.NewRequest("POST", "/records", bytes.NewReader(body)) - w := httptest.NewRecorder() - - recordsHandler(w, req, store) - - if w.Code != http.StatusNoContent { - t.Errorf("Expected status 204, got %d", w.Code) - } - - // Verify record was updated - targets := store.GetRecord("example.com", "A") - if len(targets) != 2 { - t.Errorf("Expected 2 targets after update, got %d", len(targets)) - } - - expectedTargets := map[string]bool{"192.168.1.2": true, "192.168.1.3": true} - for _, target := range targets { - if !expectedTargets[target] { - t.Errorf("Unexpected target after update: %s", target) - } - } -} - -func TestRecordsHandler_InvalidMethod(t *testing.T) { - store := NewDNSRecordStore() - - req := httptest.NewRequest("PUT", "/records", nil) - w := httptest.NewRecorder() - - recordsHandler(w, req, store) - - if w.Code != http.StatusMethodNotAllowed { - t.Errorf("Expected status 405, got %d", w.Code) - } -} - -func TestHandleDNSQuery_ARecord(t *testing.T) { - store := NewDNSRecordStore() - store.AddRecord("example.com", "A", []string{"192.168.1.1", "192.168.1.2"}) - - // Create DNS query - m := new(dns.Msg) - m.SetQuestion(dns.Fqdn("example.com"), dns.TypeA) - - // Create test response writer - responseWriter := &testResponseWriter{} - - // Handle the query - handleDNSQuery(store, responseWriter, m, 300) - - // Verify response - response := responseWriter.msg - if response == nil { - t.Fatal("No response received") - } - - if len(response.Answer) != 2 { - t.Errorf("Expected 2 answers, got %d", len(response.Answer)) - } - - if response.Authoritative != true { - t.Error("Response should be authoritative") - } - - // Check answer records - ips := make(map[string]bool) - for _, rr := range response.Answer { - if a, ok := rr.(*dns.A); ok { - ips[a.A.String()] = true - if a.Hdr.Ttl != 300 { - t.Errorf("Expected TTL 300, got %d", a.Hdr.Ttl) - } - } else { - t.Errorf("Expected A record, got %T", rr) - } - } - - if !ips["192.168.1.1"] || !ips["192.168.1.2"] { - t.Error("Expected both IP addresses in response") - } -} - -func TestHandleDNSQuery_AAAARecord(t *testing.T) { - store := NewDNSRecordStore() - store.AddRecord("example.com", "AAAA", []string{"2001:db8::1"}) - - // Create DNS query - m := new(dns.Msg) - m.SetQuestion(dns.Fqdn("example.com"), dns.TypeAAAA) - - // Create test response writer - responseWriter := &testResponseWriter{} - - // Handle the query - handleDNSQuery(store, responseWriter, m, 300) - - // Verify response - response := responseWriter.msg - if response == nil { - t.Fatal("No response received") - } - - if len(response.Answer) != 1 { - t.Errorf("Expected 1 answer, got %d", len(response.Answer)) - } - - if aaaa, ok := response.Answer[0].(*dns.AAAA); ok { - if aaaa.AAAA.String() != "2001:db8::1" { - t.Errorf("Expected 2001:db8::1, got %s", aaaa.AAAA.String()) - } - } else { - t.Errorf("Expected AAAA record, got %T", response.Answer[0]) - } -} - -func TestHandleDNSQuery_CNAMERecord(t *testing.T) { - store := NewDNSRecordStore() - store.AddRecord("www.example.com", "CNAME", []string{"example.com"}) - - // Create DNS query - m := new(dns.Msg) - m.SetQuestion(dns.Fqdn("www.example.com"), dns.TypeCNAME) - - // Create test response writer - responseWriter := &testResponseWriter{} - - // Handle the query - handleDNSQuery(store, responseWriter, m, 300) - - // Verify response - response := responseWriter.msg - if response == nil { - t.Fatal("No response received") - } - - if len(response.Answer) != 1 { - t.Errorf("Expected 1 answer, got %d", len(response.Answer)) - } - - if cname, ok := response.Answer[0].(*dns.CNAME); ok { - if cname.Target != "example.com." { - t.Errorf("Expected example.com., got %s", cname.Target) - } - } else { - t.Errorf("Expected CNAME record, got %T", response.Answer[0]) - } -} - -func TestHandleDNSQuery_NXDOMAIN(t *testing.T) { - store := NewDNSRecordStore() - - // Create DNS query for non-existent domain - m := new(dns.Msg) - m.SetQuestion(dns.Fqdn("nonexistent.com"), dns.TypeA) - - // Create test response writer - responseWriter := &testResponseWriter{} - - // Handle the query - handleDNSQuery(store, responseWriter, m, 300) - - // Verify response - response := responseWriter.msg - if response == nil { - t.Fatal("No response received") - } - - if response.Rcode != dns.RcodeNameError { - t.Errorf("Expected NXDOMAIN (rcode %d), got %d", dns.RcodeNameError, response.Rcode) - } - - if len(response.Answer) != 0 { - t.Errorf("Expected 0 answers for NXDOMAIN, got %d", len(response.Answer)) - } -} - -func TestHealthzHandler(t *testing.T) { - req := httptest.NewRequest(http.MethodGet, "/healthz", nil) - w := httptest.NewRecorder() - - healthzHandler(w, req) - - res := w.Result() - if res.StatusCode != http.StatusOK { - t.Errorf("Expected status 200, got %d", res.StatusCode) - } - - if w.Body.String() != "ok" { - t.Errorf("Expected 'ok', got '%s'", w.Body.String()) - } -} - -func TestAdjustEndpointsHandler(t *testing.T) { - endpoints := []endpoint.Endpoint{ - { - DNSName: "test.example.com", - RecordType: "A", - Targets: []string{"192.168.1.1", "192.168.1.2"}, - }, - } - - endpointsJSON, err := json.Marshal(endpoints) - if err != nil { - t.Fatalf("Failed to marshal endpoints: %v", err) - } - - req := httptest.NewRequest(http.MethodPost, "/adjustendpoints", bytes.NewReader(endpointsJSON)) - w := httptest.NewRecorder() - - adjustEndpointsHandler(w, req) - - res := w.Result() - if res.StatusCode != http.StatusOK { - t.Errorf("Expected status 200, got %d", res.StatusCode) - } - - var returnedEndpoints []endpoint.Endpoint - if err := json.Unmarshal(w.Body.Bytes(), &returnedEndpoints); err != nil { - t.Fatalf("Failed to unmarshal response: %v", err) - } - - if len(returnedEndpoints) != 1 { - t.Errorf("Expected 1 endpoint returned, got %d", len(returnedEndpoints)) - } -} - -// testResponseWriter implements dns.ResponseWriter for testing -type testResponseWriter struct { - msg *dns.Msg -} - -func (w *testResponseWriter) LocalAddr() net.Addr { - return nil -} - -func (w *testResponseWriter) RemoteAddr() net.Addr { - return nil -} - -func (w *testResponseWriter) WriteMsg(m *dns.Msg) error { - w.msg = m - return nil -} - -func (w *testResponseWriter) Write([]byte) (int, error) { - return 0, nil -} - -func (w *testResponseWriter) Close() error { - return nil -} - -func (w *testResponseWriter) TsigStatus() error { - return nil -} - -func (w *testResponseWriter) TsigTimersOnly(bool) {} - -func (w *testResponseWriter) Hijack() {} diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index 42b63644cd..c0faf6761e 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -4,7 +4,6 @@ set -e KO_VERSION="0.18.0" KIND_VERSION="0.30.0" -GO_VERSION="1.25" ALPINE_VERSION="3.18" echo "Starting end-to-end tests for external-dns with local provider..." @@ -34,18 +33,19 @@ sudo mv ko /usr/local/bin/ko # Build external-dns echo "Building external-dns..." -make build.image - -# Build a webhook image with the local provider -docker build -t webhook:v1 -f - . < Date: Sat, 15 Nov 2025 18:01:38 +0100 Subject: [PATCH 5/8] newlines Signed-off-by: Raffaele Di Fazio --- scripts/e2e-test.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index c0faf6761e..fb85f3b443 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -171,7 +171,7 @@ spec: - -c - | echo "Testing DNS server at $NODE_IP:5353" - + echo "=== Testing DNS server with dig ===" echo "Querying: externaldns-e2e.external.dns A record" if dig @$NODE_IP -p 5353 externaldns-e2e.external.dns A +short +timeout=5; then @@ -181,7 +181,7 @@ spec: echo "DNS query failed" exit 1 fi - + echo "DNS server tests completed" exit 0 EOF From eafeafbd97e972105eb7f50c1dde3bfa42072f18 Mon Sep 17 00:00:00 2001 From: Raffaele Di Fazio Date: Mon, 17 Nov 2025 10:37:18 +0100 Subject: [PATCH 6/8] Update scripts/e2e-test.sh Co-authored-by: Michel Loiseleur <97035654+mloiseleur@users.noreply.github.com> --- scripts/e2e-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index fb85f3b443..2e5ba49025 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -4,7 +4,7 @@ set -e KO_VERSION="0.18.0" KIND_VERSION="0.30.0" -ALPINE_VERSION="3.18" +ALPINE_VERSION="3.22" echo "Starting end-to-end tests for external-dns with local provider..." From f0d649f861a24dbd1e83c6c20e071350c8148aec Mon Sep 17 00:00:00 2001 From: Raffaele Di Fazio Date: Mon, 17 Nov 2025 10:58:27 +0100 Subject: [PATCH 7/8] drop all comments Signed-off-by: Raffaele Di Fazio --- .github/workflows/end-to-end-tests.yml | 2 +- e2e/provider/etcd.yaml | 189 ------------------------- 2 files changed, 1 insertion(+), 190 deletions(-) diff --git a/.github/workflows/end-to-end-tests.yml b/.github/workflows/end-to-end-tests.yml index b465472816..756989af22 100644 --- a/.github/workflows/end-to-end-tests.yml +++ b/.github/workflows/end-to-end-tests.yml @@ -1,4 +1,4 @@ -name: end to end test with local provider +name: end to end test on: push: diff --git a/e2e/provider/etcd.yaml b/e2e/provider/etcd.yaml index 1833199be0..6a58c2e3a9 100644 --- a/e2e/provider/etcd.yaml +++ b/e2e/provider/etcd.yaml @@ -1,4 +1,3 @@ -# file: etcd.yaml --- apiVersion: v1 kind: Service @@ -10,17 +9,7 @@ spec: clusterIP: None selector: app: etcd - ## - ## Ideally we would use SRV records to do peer discovery for initialization. - ## Unfortunately discovery will not work without logic to wait for these to - ## populate in the container. This problem is relatively easy to overcome by - ## making changes to prevent the etcd process from starting until the records - ## have populated. The documentation on statefulsets briefly talk about it. - ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id publishNotReadyAddresses: true - ## - ## The naming scheme of the client and server ports match the scheme that etcd - ## uses when doing discovery with SRV records. ports: - name: etcd-client port: 2379 @@ -35,86 +24,32 @@ metadata: namespace: default name: etcd spec: - ## - ## The service name is being set to leverage the service headlessly. - ## https://kubernetes.io/docs/concepts/services-networking/service/#headless-services serviceName: etcd - ## - ## If you are increasing the replica count of an existing cluster, you should - ## also update the --initial-cluster-state flag as noted further down in the - ## container configuration. replicas: 1 - ## - ## For initialization, the etcd pods must be available to eachother before - ## they are "ready" for traffic. The "Parallel" policy makes this possible. podManagementPolicy: Parallel - ## - ## To ensure availability of the etcd cluster, the rolling update strategy - ## is used. For availability, there must be at least 51% of the etcd nodes - ## online at any given time. updateStrategy: type: RollingUpdate - ## - ## This is label query over pods that should match the replica count. - ## It must match the pod template's labels. For more information, see the - ## following documentation: - ## https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors selector: matchLabels: app: etcd - ## - ## Pod configuration template. template: metadata: - ## - ## The labeling here is tied to the "matchLabels" of this StatefulSet and - ## "affinity" configuration of the pod that will be created. - ## - ## This example's labeling scheme is fine for one etcd cluster per - ## namespace, but should you desire multiple clusters per namespace, you - ## will need to update the labeling schema to be unique per etcd cluster. labels: app: etcd annotations: - ## - ## This gets referenced in the etcd container's configuration as part of - ## the DNS name. It must match the service name created for the etcd - ## cluster. The choice to place it in an annotation instead of the env - ## settings is because there should only be 1 service per etcd cluster. serviceName: etcd spec: - ## - ## Configuring the node affinity is necessary to prevent etcd servers from - ## ending up on the same hardware together. - ## - ## See the scheduling documentation for more information about this: - ## https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity affinity: - ## The podAntiAffinity is a set of rules for scheduling that describe - ## when NOT to place a pod from this StatefulSet on a node. podAntiAffinity: - ## - ## When preparing to place the pod on a node, the scheduler will check - ## for other pods matching the rules described by the labelSelector - ## separated by the chosen topology key. requiredDuringSchedulingIgnoredDuringExecution: - ## This label selector is looking for app=etcd - labelSelector: matchExpressions: - key: app operator: In values: - etcd - ## This topology key denotes a common label used on nodes in the - ## cluster. The podAntiAffinity configuration essentially states - ## that if another pod has a label of app=etcd on the node, the - ## scheduler should not place another pod on the node. - ## https://kubernetes.io/docs/reference/labels-annotations-taints/#kubernetesiohostname topologyKey: "kubernetes.io/hostname" - ## - ## Containers in the pod containers: - ## This example only has this etcd container. - name: etcd image: quay.io/coreos/etcd:v3.6.0 imagePullPolicy: IfNotPresent @@ -125,16 +60,6 @@ spec: containerPort: 2380 - name: etcd-metrics containerPort: 8080 - ## - ## These probes will fail over TLS for self-signed certificates, so etcd - ## is configured to deliver metrics over port 8080 further down. - ## - ## As mentioned in the "Monitoring etcd" page, /readyz and /livez were - ## added in v3.5.12. Prior to this, monitoring required extra tooling - ## inside the container to make these probes work. - ## - ## The values in this readiness probe should be further validated, it - ## is only an example configuration. readinessProbe: httpGet: path: /readyz @@ -144,8 +69,6 @@ spec: timeoutSeconds: 5 successThreshold: 1 failureThreshold: 30 - ## The values in this liveness probe should be further validated, it - ## is only an example configuration. livenessProbe: httpGet: path: /livez @@ -155,12 +78,6 @@ spec: timeoutSeconds: 5 failureThreshold: 3 env: - ## - ## Environment variables defined here can be used by other parts of the - ## container configuration. They are interpreted by Kubernetes, instead - ## of in the container environment. - ## - ## These env vars pass along information about the pod. - name: K8S_NAMESPACE valueFrom: fieldRef: @@ -173,138 +90,32 @@ spec: valueFrom: fieldRef: fieldPath: metadata.annotations['serviceName'] - ## - ## Configuring etcdctl inside the container to connect to the etcd node - ## in the container reduces confusion when debugging. - name: ETCDCTL_ENDPOINTS value: $(HOSTNAME).$(SERVICE_NAME):2379 - ## - ## TLS client configuration for etcdctl in the container. - ## These files paths are part of the "etcd-client-certs" volume mount. - # - name: ETCDCTL_KEY - # value: /etc/etcd/certs/client/tls.key - # - name: ETCDCTL_CERT - # value: /etc/etcd/certs/client/tls.crt - # - name: ETCDCTL_CACERT - # value: /etc/etcd/certs/client/ca.crt - ## - ## Use this URI_SCHEME value for non-TLS clusters. - name: URI_SCHEME value: "http" - ## TLS: Use this URI_SCHEME for TLS clusters. - # - name: URI_SCHEME - # value: "https" - ## - ## If you're using a different container, the executable may be in a - ## different location. This example uses the full path to help remove - ## ambiguity to you, the reader. - ## Often you can just use "etcd" instead of "/usr/local/bin/etcd" and it - ## will work because the $PATH includes a directory containing "etcd". command: - /usr/local/bin/etcd - ## - ## Arguments used with the etcd command inside the container. args: - ## - ## Configure the name of the etcd server. - --name=$(HOSTNAME) - ## - ## Configure etcd to use the persistent storage configured below. - --data-dir=/data - ## - ## In this example we're consolidating the WAL into sharing space with - ## the data directory. This is not ideal in production environments and - ## should be placed in it's own volume. - --wal-dir=/data/wal - ## - ## URL configurations are parameterized here and you shouldn't need to - ## do anything with these. - --listen-peer-urls=$(URI_SCHEME)://0.0.0.0:2380 - --listen-client-urls=$(URI_SCHEME)://0.0.0.0:2379 - --advertise-client-urls=$(URI_SCHEME)://$(HOSTNAME).$(SERVICE_NAME):2379 - ## - ## This must be set to "new" for initial cluster bootstrapping. To scale - ## the cluster up, this should be changed to "existing" when the replica - ## count is increased. If set incorrectly, etcd makes an attempt to - ## start but fail safely. - --initial-cluster-state=new - ## - ## Token used for cluster initialization. The recommendation for this is - ## to use a unique token for every cluster. This example parameterized - ## to be unique to the namespace, but if you are deploying multiple etcd - ## clusters in the same namespace, you should do something extra to - ## ensure uniqueness amongst clusters. - --initial-cluster-token=etcd-$(K8S_NAMESPACE) - ## - ## The initial cluster flag needs to be updated to match the number of - ## replicas configured. When combined, these are a little hard to read. - ## Here is what a single parameterized peer looks like: - ## etcd-0=$(URI_SCHEME)://etcd-0.$(SERVICE_NAME):2380 - --initial-cluster=etcd-0=$(URI_SCHEME)://etcd-0.$(SERVICE_NAME):2380 - ## - ## The peer urls flag should be fine as-is. - --initial-advertise-peer-urls=$(URI_SCHEME)://$(HOSTNAME).$(SERVICE_NAME):2380 - ## - ## This avoids probe failure if you opt to configure TLS. - --listen-metrics-urls=http://0.0.0.0:8080 - ## - ## These are some configurations you may want to consider enabling, but - ## should look into further to identify what settings are best for you. - # - --auto-compaction-mode=periodic - # - --auto-compaction-retention=10m - ## - ## TLS client configuration for etcd, reusing the etcdctl env vars. - # - --client-cert-auth - # - --trusted-ca-file=$(ETCDCTL_CACERT) - # - --cert-file=$(ETCDCTL_CERT) - # - --key-file=$(ETCDCTL_KEY) - ## - ## TLS server configuration for etcdctl in the container. - ## These files paths are part of the "etcd-server-certs" volume mount. - # - --peer-client-cert-auth - # - --peer-trusted-ca-file=/etc/etcd/certs/server/ca.crt - # - --peer-cert-file=/etc/etcd/certs/server/tls.crt - # - --peer-key-file=/etc/etcd/certs/server/tls.key - ## - ## This is the mount configuration. volumeMounts: - name: etcd-data mountPath: /data - ## - ## TLS client configuration for etcdctl - # - name: etcd-client-tls - # mountPath: "/etc/etcd/certs/client" - # readOnly: true - ## - ## TLS server configuration - # - name: etcd-server-tls - # mountPath: "/etc/etcd/certs/server" - # readOnly: true - volumes: - ## - ## TLS client configuration - # - name: etcd-client-tls - # secret: - # secretName: etcd-client-tls - # optional: false - ## - ## TLS server configuration - # - name: etcd-server-tls - # secret: - # secretName: etcd-server-tls - # optional: false - ## - ## This StatefulSet will uses the volumeClaimTemplate field to create a PVC in - ## the cluster for each replica. These PVCs can not be easily resized later. volumeClaimTemplates: - metadata: name: etcd-data spec: accessModes: ["ReadWriteOnce"] - ## - ## In some clusters, it is necessary to explicitly set the storage class. - ## This example will end up using the default storage class. - # storageClassName: "" resources: requests: storage: 1Gi From 006f5a7e462d059a97f30453b844d74b690063dc Mon Sep 17 00:00:00 2001 From: Raffaele Di Fazio Date: Wed, 19 Nov 2025 09:54:31 +0100 Subject: [PATCH 8/8] Update .github/workflows/end-to-end-tests.yml Co-authored-by: Michel Loiseleur <97035654+mloiseleur@users.noreply.github.com> --- .github/workflows/end-to-end-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/end-to-end-tests.yml b/.github/workflows/end-to-end-tests.yml index 756989af22..d31f09b967 100644 --- a/.github/workflows/end-to-end-tests.yml +++ b/.github/workflows/end-to-end-tests.yml @@ -4,7 +4,7 @@ on: push: branches: pull_request: - branches: [ main ] + branches: [ master ] workflow_dispatch: jobs: