|
14 | 14 | package collector |
15 | 15 |
|
16 | 16 | import ( |
| 17 | + "context" |
17 | 18 | "encoding/json" |
18 | 19 | "fmt" |
19 | | - "io" |
20 | 20 | "log/slog" |
21 | 21 | "net/http" |
22 | 22 | "net/url" |
23 | | - "path" |
24 | 23 |
|
25 | 24 | "github.com/prometheus/client_golang/prometheus" |
26 | 25 | ) |
27 | 26 |
|
28 | | -type dataStreamMetric struct { |
29 | | - Type prometheus.ValueType |
30 | | - Desc *prometheus.Desc |
31 | | - Value func(dataStreamStats DataStreamStatsDataStream) float64 |
32 | | - Labels func(dataStreamStats DataStreamStatsDataStream) []string |
33 | | -} |
34 | | - |
35 | 27 | var ( |
36 | | - defaultDataStreamLabels = []string{"data_stream"} |
37 | | - defaultDataStreamLabelValues = func(dataStreamStats DataStreamStatsDataStream) []string { |
38 | | - return []string{dataStreamStats.DataStream} |
39 | | - } |
| 28 | + dataStreamBackingIndicesTotal = prometheus.NewDesc( |
| 29 | + prometheus.BuildFQName(namespace, "data_stream", "backing_indices_total"), |
| 30 | + "Number of backing indices", |
| 31 | + []string{"data_stream"}, |
| 32 | + nil, |
| 33 | + ) |
| 34 | + dataStreamStoreSizeBytes = prometheus.NewDesc( |
| 35 | + prometheus.BuildFQName(namespace, "data_stream", "store_size_bytes"), |
| 36 | + "Store size of data stream", |
| 37 | + []string{"data_stream"}, |
| 38 | + nil, |
| 39 | + ) |
40 | 40 | ) |
41 | 41 |
|
| 42 | +func init() { |
| 43 | + registerCollector("data-stream", defaultDisabled, NewDataStream) |
| 44 | +} |
| 45 | + |
42 | 46 | // DataStream Information Struct |
43 | 47 | type DataStream struct { |
44 | 48 | logger *slog.Logger |
45 | | - client *http.Client |
46 | | - url *url.URL |
47 | | - |
48 | | - dataStreamMetrics []*dataStreamMetric |
| 49 | + hc *http.Client |
| 50 | + u *url.URL |
49 | 51 | } |
50 | 52 |
|
51 | 53 | // NewDataStream defines DataStream Prometheus metrics |
52 | | -func NewDataStream(logger *slog.Logger, client *http.Client, url *url.URL) *DataStream { |
| 54 | +func NewDataStream(logger *slog.Logger, u *url.URL, hc *http.Client) (Collector, error) { |
53 | 55 | return &DataStream{ |
54 | 56 | logger: logger, |
55 | | - client: client, |
56 | | - url: url, |
57 | | - |
58 | | - dataStreamMetrics: []*dataStreamMetric{ |
59 | | - { |
60 | | - Type: prometheus.CounterValue, |
61 | | - Desc: prometheus.NewDesc( |
62 | | - prometheus.BuildFQName(namespace, "data_stream", "backing_indices_total"), |
63 | | - "Number of backing indices", |
64 | | - defaultDataStreamLabels, nil, |
65 | | - ), |
66 | | - Value: func(dataStreamStats DataStreamStatsDataStream) float64 { |
67 | | - return float64(dataStreamStats.BackingIndices) |
68 | | - }, |
69 | | - Labels: defaultDataStreamLabelValues, |
70 | | - }, |
71 | | - { |
72 | | - Type: prometheus.CounterValue, |
73 | | - Desc: prometheus.NewDesc( |
74 | | - prometheus.BuildFQName(namespace, "data_stream", "store_size_bytes"), |
75 | | - "Store size of data stream", |
76 | | - defaultDataStreamLabels, nil, |
77 | | - ), |
78 | | - Value: func(dataStreamStats DataStreamStatsDataStream) float64 { |
79 | | - return float64(dataStreamStats.StoreSizeBytes) |
80 | | - }, |
81 | | - Labels: defaultDataStreamLabelValues, |
82 | | - }, |
83 | | - }, |
84 | | - } |
| 57 | + hc: hc, |
| 58 | + u: u, |
| 59 | + }, nil |
85 | 60 | } |
86 | 61 |
|
87 | | -// Describe adds DataStream metrics descriptions |
88 | | -func (ds *DataStream) Describe(ch chan<- *prometheus.Desc) { |
89 | | - for _, metric := range ds.dataStreamMetrics { |
90 | | - ch <- metric.Desc |
91 | | - } |
| 62 | +// DataStreamStatsResponse is a representation of the Data Stream stats |
| 63 | +type DataStreamStatsResponse struct { |
| 64 | + Shards DataStreamStatsShards `json:"_shards"` |
| 65 | + DataStreamCount int64 `json:"data_stream_count"` |
| 66 | + BackingIndices int64 `json:"backing_indices"` |
| 67 | + TotalStoreSizeBytes int64 `json:"total_store_size_bytes"` |
| 68 | + DataStreamStats []DataStreamStatsDataStream `json:"data_streams"` |
92 | 69 | } |
93 | 70 |
|
94 | | -func (ds *DataStream) fetchAndDecodeDataStreamStats() (DataStreamStatsResponse, error) { |
95 | | - var dsr DataStreamStatsResponse |
| 71 | +// DataStreamStatsShards defines data stream stats shards information structure |
| 72 | +type DataStreamStatsShards struct { |
| 73 | + Total int64 `json:"total"` |
| 74 | + Successful int64 `json:"successful"` |
| 75 | + Failed int64 `json:"failed"` |
| 76 | +} |
96 | 77 |
|
97 | | - u := *ds.url |
98 | | - u.Path = path.Join(u.Path, "/_data_stream/*/_stats") |
99 | | - res, err := ds.client.Get(u.String()) |
100 | | - if err != nil { |
101 | | - return dsr, fmt.Errorf("failed to get data stream stats health from %s://%s:%s%s: %s", |
102 | | - u.Scheme, u.Hostname(), u.Port(), u.Path, err) |
103 | | - } |
| 78 | +// DataStreamStatsDataStream defines the structure of per data stream stats |
| 79 | +type DataStreamStatsDataStream struct { |
| 80 | + DataStream string `json:"data_stream"` |
| 81 | + BackingIndices int64 `json:"backing_indices"` |
| 82 | + StoreSizeBytes int64 `json:"store_size_bytes"` |
| 83 | + MaximumTimestamp int64 `json:"maximum_timestamp"` |
| 84 | +} |
104 | 85 |
|
105 | | - defer func() { |
106 | | - err = res.Body.Close() |
107 | | - if err != nil { |
108 | | - ds.logger.Warn( |
109 | | - "failed to close http.Client", |
110 | | - "err", err, |
111 | | - ) |
112 | | - } |
113 | | - }() |
114 | | - |
115 | | - if res.StatusCode != http.StatusOK { |
116 | | - return dsr, fmt.Errorf("HTTP Request failed with code %d", res.StatusCode) |
117 | | - } |
| 86 | +func (ds *DataStream) Update(ctx context.Context, ch chan<- prometheus.Metric) error { |
| 87 | + var dsr DataStreamStatsResponse |
| 88 | + |
| 89 | + u := ds.u.ResolveReference(&url.URL{Path: "/_data_stream/*/_stats"}) |
118 | 90 |
|
119 | | - bts, err := io.ReadAll(res.Body) |
| 91 | + resp, err := getURL(ctx, ds.hc, ds.logger, u.String()) |
120 | 92 | if err != nil { |
121 | | - return dsr, err |
| 93 | + return err |
122 | 94 | } |
123 | 95 |
|
124 | | - if err := json.Unmarshal(bts, &dsr); err != nil { |
125 | | - return dsr, err |
| 96 | + if err := json.Unmarshal(resp, &dsr); err != nil { |
| 97 | + return err |
126 | 98 | } |
127 | 99 |
|
128 | | - return dsr, nil |
129 | | -} |
| 100 | + for _, dataStream := range dsr.DataStreamStats { |
| 101 | + fmt.Printf("Metric: %+v", dataStream) |
130 | 102 |
|
131 | | -// Collect gets DataStream metric values |
132 | | -func (ds *DataStream) Collect(ch chan<- prometheus.Metric) { |
| 103 | + ch <- prometheus.MustNewConstMetric( |
| 104 | + dataStreamBackingIndicesTotal, |
| 105 | + prometheus.CounterValue, |
| 106 | + float64(dataStream.BackingIndices), |
| 107 | + dataStream.DataStream, |
| 108 | + ) |
133 | 109 |
|
134 | | - dataStreamStatsResp, err := ds.fetchAndDecodeDataStreamStats() |
135 | | - if err != nil { |
136 | | - ds.logger.Warn( |
137 | | - "failed to fetch and decode data stream stats", |
138 | | - "err", err, |
| 110 | + ch <- prometheus.MustNewConstMetric( |
| 111 | + dataStreamStoreSizeBytes, |
| 112 | + prometheus.CounterValue, |
| 113 | + float64(dataStream.StoreSizeBytes), |
| 114 | + dataStream.DataStream, |
139 | 115 | ) |
140 | | - return |
141 | | - } |
142 | 116 |
|
143 | | - for _, metric := range ds.dataStreamMetrics { |
144 | | - for _, dataStream := range dataStreamStatsResp.DataStreamStats { |
145 | | - fmt.Printf("Metric: %+v", dataStream) |
146 | | - ch <- prometheus.MustNewConstMetric( |
147 | | - metric.Desc, |
148 | | - metric.Type, |
149 | | - metric.Value(dataStream), |
150 | | - metric.Labels(dataStream)..., |
151 | | - ) |
152 | | - } |
153 | 117 | } |
| 118 | + |
| 119 | + return nil |
| 120 | + |
154 | 121 | } |
0 commit comments