@@ -16,16 +16,17 @@ package collector
1616import (
1717 "encoding/json"
1818 "fmt"
19- "github.com/go-kit/log"
20- "github.com/go-kit/log/level"
21- "github.com/prometheus-community/elasticsearch_exporter/pkg/clusterinfo"
22- "github.com/prometheus/client_golang/prometheus"
2319 "io"
2420 "net/http"
2521 "net/url"
2622 "path"
2723 "sort"
2824 "strconv"
25+
26+ "github.com/go-kit/log"
27+ "github.com/go-kit/log/level"
28+ "github.com/prometheus-community/elasticsearch_exporter/pkg/clusterinfo"
29+ "github.com/prometheus/client_golang/prometheus"
2930)
3031
3132type labels struct {
@@ -64,10 +65,6 @@ type Indices struct {
6465 clusterInfoCh chan * clusterinfo.Response
6566 lastClusterInfo * clusterinfo.Response
6667
67- up prometheus.Gauge
68- totalScrapes prometheus.Counter
69- jsonParseFailures prometheus.Counter
70-
7168 indexMetrics []* indexMetric
7269 shardMetrics []* shardMetric
7370 aliasMetrics []* aliasMetric
@@ -129,19 +126,6 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo
129126 ClusterName : "unknown_cluster" ,
130127 },
131128
132- up : prometheus .NewGauge (prometheus.GaugeOpts {
133- Name : prometheus .BuildFQName (namespace , "index_stats" , "up" ),
134- Help : "Was the last scrape of the Elasticsearch index endpoint successful." ,
135- }),
136- totalScrapes : prometheus .NewCounter (prometheus.CounterOpts {
137- Name : prometheus .BuildFQName (namespace , "index_stats" , "total_scrapes" ),
138- Help : "Current total Elasticsearch index scrapes." ,
139- }),
140- jsonParseFailures : prometheus .NewCounter (prometheus.CounterOpts {
141- Name : prometheus .BuildFQName (namespace , "index_stats" , "json_parse_failures" ),
142- Help : "Number of errors while parsing JSON." ,
143- }),
144-
145129 indexMetrics : []* indexMetric {
146130 {
147131 Type : prometheus .GaugeValue ,
@@ -1117,9 +1101,10 @@ func (i *Indices) Describe(ch chan<- *prometheus.Desc) {
11171101 for _ , metric := range i .indexMetrics {
11181102 ch <- metric .Desc
11191103 }
1120- ch <- i .up .Desc ()
1121- ch <- i .totalScrapes .Desc ()
1122- ch <- i .jsonParseFailures .Desc ()
1104+ for _ , metric := range i .aliasMetrics {
1105+ ch <- metric .Desc
1106+ }
1107+
11231108}
11241109
11251110func (i * Indices ) fetchAndDecodeIndexStats () (indexStatsResponse , error ) {
@@ -1139,7 +1124,6 @@ func (i *Indices) fetchAndDecodeIndexStats() (indexStatsResponse, error) {
11391124 }
11401125
11411126 if err := json .Unmarshal (bts , & isr ); err != nil {
1142- i .jsonParseFailures .Inc ()
11431127 return isr , err
11441128 }
11451129
@@ -1179,7 +1163,6 @@ func (i *Indices) fetchAndDecodeAliases() (aliasesResponse, error) {
11791163 }
11801164
11811165 if err := json .Unmarshal (bts , & asr ); err != nil {
1182- i .jsonParseFailures .Inc ()
11831166 return asr , err
11841167 }
11851168
@@ -1217,24 +1200,15 @@ func (i *Indices) queryURL(u *url.URL) ([]byte, error) {
12171200
12181201// Collect gets Indices metric values
12191202func (i * Indices ) Collect (ch chan <- prometheus.Metric ) {
1220- i .totalScrapes .Inc ()
1221- defer func () {
1222- ch <- i .up
1223- ch <- i .totalScrapes
1224- ch <- i .jsonParseFailures
1225- }()
1226-
12271203 // indices
12281204 indexStatsResp , err := i .fetchAndDecodeIndexStats ()
12291205 if err != nil {
1230- i .up .Set (0 )
12311206 level .Warn (i .logger ).Log (
12321207 "msg" , "failed to fetch and decode index stats" ,
12331208 "err" , err ,
12341209 )
12351210 return
12361211 }
1237- i .up .Set (1 )
12381212
12391213 // Alias stats
12401214 if i .aliases {
0 commit comments