diff --git a/cmd/huatuo-bamai/main.go b/cmd/huatuo-bamai/main.go index 2eb15022..8fbff0c1 100644 --- a/cmd/huatuo-bamai/main.go +++ b/cmd/huatuo-bamai/main.go @@ -83,6 +83,11 @@ func mainAction(ctx *cli.Context) error { } blackListed := conf.Get().Tracing.BlackList + prom, err := InitMetricsCollector(blackListed, conf.Region) + if err != nil { + return fmt.Errorf("InitMetricsCollector: %w", err) + } + mgr, err := tracing.NewMgrTracingEvent(blackListed) if err != nil { return err @@ -92,13 +97,7 @@ func mainAction(ctx *cli.Context) error { return err } - prom, err := InitMetricsCollector(blackListed, conf.Region) - if err != nil { - return fmt.Errorf("InitMetricsCollector: %w", err) - } - log.Infof("Initialize the Metrics collector: %v", prom) - services.Start(conf.Get().APIServer.TCPAddr, mgr, prom) // update cpu quota diff --git a/pkg/metric/collector.go b/pkg/metric/collector.go index 7efa7af7..0c0ee9bf 100644 --- a/pkg/metric/collector.go +++ b/pkg/metric/collector.go @@ -43,11 +43,17 @@ type CollectorWrapper struct { type CollectorManager struct { collectors map[string]*CollectorWrapper hostname string + region string scrapeDurationDesc *prometheus.Desc scrapeSuccessDesc *prometheus.Desc } func NewCollectorManager(blackListed []string, region string) (*CollectorManager, error) { + // Init defaultRegion, defaultHostname firstly, + // NewGaugeData may be used for data caching in tracing.NewRegister. + hostname, _ := os.Hostname() + defaultRegion, defaultHostname = region, hostname + tracings, err := tracing.NewRegister(blackListed) if err != nil { return nil, err @@ -68,23 +74,20 @@ func NewCollectorManager(blackListed []string, region string) (*CollectorManager scrapeDurationDesc := prometheus.NewDesc( prometheus.BuildFQName(promNamespace, "scrape", "collector_duration_seconds"), promNamespace+": Duration of a collector scrape.", - []string{LabelHost, "collector"}, + []string{LabelHost, LabelRegion, "collector"}, nil, ) scrapeSuccessDesc := prometheus.NewDesc( prometheus.BuildFQName(promNamespace, "scrape", "collector_success"), promNamespace+": Whether a collector succeeded.", - []string{LabelHost, "collector"}, + []string{LabelHost, LabelRegion, "collector"}, nil, ) - hostname, _ := os.Hostname() - defaultRegion = region - defaultHostname = hostname - return &CollectorManager{ collectors: collectors, hostname: hostname, + region: region, scrapeDurationDesc: scrapeDurationDesc, scrapeSuccessDesc: scrapeSuccessDesc, }, nil @@ -143,6 +146,6 @@ func (m *CollectorManager) doCollect(collectorName string, c *CollectorWrapper, success = 1 } - ch <- prometheus.MustNewConstMetric(m.scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), m.hostname, collectorName) - ch <- prometheus.MustNewConstMetric(m.scrapeSuccessDesc, prometheus.GaugeValue, success, m.hostname, collectorName) + ch <- prometheus.MustNewConstMetric(m.scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), m.hostname, m.region, collectorName) + ch <- prometheus.MustNewConstMetric(m.scrapeSuccessDesc, prometheus.GaugeValue, success, m.hostname, m.region, collectorName) }