chore(exporter): rework exporter to fix wrong histogram usage and cache metric data
All checks were successful
ci/woodpecker/push/lint Pipeline was successful
ci/woodpecker/push/test Pipeline was successful
ci/woodpecker/push/build Pipeline was successful
ci/woodpecker/push/deploy Pipeline was successful

This commit is contained in:
Tom Neuber 2025-01-10 14:33:44 +01:00
parent 4d2a7acebc
commit 8b7f45563a
Signed by: tom
GPG key ID: F17EFE4272D89FF6
6 changed files with 180 additions and 312 deletions

View file

@ -2,9 +2,7 @@ package exporter
import (
"log"
"time"
"git.ar21.de/yolokube/country-geo-locations/internal/database"
"github.com/prometheus/client_golang/prometheus"
)
@ -13,205 +11,102 @@ const (
cacheSubsystem string = "cache"
dbSubsystem string = "db"
metricLabelCacheTTL string = "ttl"
metricLabelCurrentlyCached string = "currently_cached"
metricLabelDatabaseTimestamp string = "timestamp"
metricLabelDatabaseReady string = "ready"
metricLabelRequestsTotal string = "requests_total"
metricLabelRequestLatency string = "request_latency"
metricNameCacheTTL string = "ttl"
metricNameCurrentlyCached string = "currently_cached"
metricNameDatabaseTimestamp string = "timestamp"
metricNameDatabaseReady string = "ready"
metricNameRequestsTotal string = "requests_total"
metricNameRequestLatency string = "request_latency"
)
type Metrics struct {
metricCacheTTL *prometheus.Desc
metricCurrentlyCached *prometheus.Desc
metricDatabaseTimestamp *prometheus.Desc
metricDatabaseReady *prometheus.Desc
metricRequestsTotal *prometheus.Desc
metricRequestLatency *prometheus.Desc
exporter *Exporter
counter uint
metricCacheTTL prometheus.Gauge
metricCurrentlyCached prometheus.Gauge
metricDatabaseTimestamp *prometheus.GaugeVec
metricDatabaseReady prometheus.Gauge
metricRequestsTotal prometheus.Counter
metricRequestLatency prometheus.Histogram
}
func NewMetrics() *Metrics {
func NewMetrics(e *Exporter) *Metrics {
return &Metrics{
metricCacheTTL: prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
cacheSubsystem,
metricLabelCacheTTL,
),
"Duration for cached requests",
nil,
nil,
exporter: e,
metricCacheTTL: prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: cacheSubsystem,
Name: metricNameCacheTTL,
Help: "Duration for cached requests",
},
),
metricCurrentlyCached: prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
cacheSubsystem,
metricLabelCurrentlyCached,
),
"Number of cached entries",
nil,
nil,
metricCurrentlyCached: prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: cacheSubsystem,
Name: metricNameCurrentlyCached,
Help: "Number of cached entries",
},
),
metricDatabaseTimestamp: prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
dbSubsystem,
metricLabelDatabaseTimestamp,
),
"Timestamp of the CSV file",
[]string{metricLabelDatabaseTimestamp},
nil,
metricDatabaseTimestamp: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: dbSubsystem,
Name: metricNameDatabaseTimestamp,
Help: "Timestamp of the CSV file",
},
[]string{metricNameDatabaseTimestamp},
),
metricDatabaseReady: prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
dbSubsystem,
metricLabelDatabaseReady,
),
"Ready status of the database",
nil,
nil,
metricDatabaseReady: prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: dbSubsystem,
Name: metricNameDatabaseReady,
Help: "Ready status of the database",
},
),
metricRequestsTotal: prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
"",
metricLabelRequestsTotal,
),
"Counter for total requests",
nil,
nil,
metricRequestsTotal: prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Name: metricNameRequestsTotal,
Help: "Counter for total requests",
},
),
metricRequestLatency: prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
"",
metricLabelRequestLatency,
),
"Latency statistics for requests",
nil,
nil,
metricRequestLatency: prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: namespace,
Name: metricNameRequestLatency,
Help: "Latency statistics for requests",
Buckets: prometheus.ExponentialBuckets(10, 1.5, 30),
},
),
}
}
func (m *Metrics) collectCacheTTLMetric(ch chan<- prometheus.Metric, ttl float64) {
ch <- prometheus.MustNewConstMetric(
m.metricCacheTTL,
prometheus.GaugeValue,
ttl,
)
func (m *Metrics) collectCacheTTLMetric() {
m.metricCacheTTL.Set(m.exporter.config.CacheTTL.Seconds())
}
func (m *Metrics) collectCurrentlyCachedMetric(ch chan<- prometheus.Metric, count float64) {
ch <- prometheus.MustNewConstMetric(
m.metricCurrentlyCached,
prometheus.GaugeValue,
count,
)
func (m *Metrics) collectCurrentlyCachedMetric() {
m.metricCurrentlyCached.Set(float64(m.exporter.cache.Count()))
}
func (m *Metrics) collectDatabaseTimestampMetric(ch chan<- prometheus.Metric, db *database.Database) {
timestamp, err := db.Timestamp()
if err == nil {
ch <- prometheus.MustNewConstMetric(
m.metricDatabaseTimestamp,
prometheus.GaugeValue,
float64(timestamp.Unix()),
timestamp.String(),
)
} else {
log.Printf("failed to read file timestamp: %v\n", err)
func (m *Metrics) collectDatabaseTimestampMetric() {
timestamp, err := m.exporter.database.Timestamp()
if err != nil {
log.Printf("failed to read file timestamp: %v", err)
return
}
m.metricDatabaseTimestamp.WithLabelValues(timestamp.String()).Set(float64(timestamp.Unix()))
}
func (m *Metrics) collectDatabaseReadyMetric(ch chan<- prometheus.Metric, ready bool) {
var dbReady uint8
if ready {
func (m *Metrics) collectDatabaseReadyMetric() {
var dbReady float64
if m.exporter.database.IsReady() {
dbReady = 1
}
ch <- prometheus.MustNewConstMetric(
m.metricDatabaseReady,
prometheus.GaugeValue,
float64(dbReady),
)
}
func (m *Metrics) collectReqeustDataMetrics(ch chan<- prometheus.Metric, queue *RequestDataQueue) {
var (
count uint64
sum float64
)
buckets := make(map[float64]uint64)
bucketBounds := []float64{
10,
20,
30,
40,
50,
60,
70,
80,
90,
100,
200,
300,
400,
500,
600,
700,
800,
900,
1000,
1500,
2000,
2500,
3000,
3500,
4000,
4500,
5000,
10000,
20000,
30000,
40000,
50000,
100000,
200000,
300000,
400000,
500000,
1000000,
}
data := queue.ConsumeAll()
for _, r := range data {
latency := float64(r.Latency.Microseconds())
sum += latency
count++
for _, bound := range bucketBounds {
if latency <= bound {
buckets[bound]++
}
}
}
m.counter += uint(len(data))
ch <- prometheus.MustNewConstMetric(
m.metricRequestsTotal,
prometheus.CounterValue,
float64(m.counter),
)
ch <- prometheus.MustNewConstHistogramWithCreatedTimestamp(
m.metricRequestLatency,
count,
sum,
buckets,
time.Now(),
)
m.metricDatabaseReady.Set(dbReady)
}