country-geo-locations/internal/exporter/metrics.go
Tom Neuber 53954090a3
All checks were successful
ci/woodpecker/push/lint Pipeline was successful
ci/woodpecker/push/test Pipeline was successful
ci/woodpecker/push/build Pipeline was successful
fix(metrics): correct cumulative count calculation
2024-12-30 23:42:52 +01:00

223 lines
4.2 KiB
Go

package exporter
import (
"log"
"time"
"git.ar21.de/yolokube/country-geo-locations/internal/database"
"github.com/prometheus/client_golang/prometheus"
)
const (
namespace string = "country_geo_locations"
cacheSubsystem string = "cache"
dbSubsystem string = "db"
metricLabelCacheTTL string = "ttl"
metricLabelCurrentlyCached string = "currently_cached"
metricLabelDatabaseTimestamp string = "timestamp"
metricLabelDatabaseReady string = "ready"
metricLabelRequestsTotal string = "requests_total"
metricLabelRequestLatency string = "request_latency"
)
type Metrics struct {
metricCacheTTL *prometheus.Desc
metricCurrentlyCached *prometheus.Desc
metricDatabaseTimestamp *prometheus.Desc
metricDatabaseReady *prometheus.Desc
metricRequestsTotal *prometheus.Desc
metricRequestLatency *prometheus.Desc
counter uint
}
func NewMetrics() *Metrics {
return &Metrics{
metricCacheTTL: prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
cacheSubsystem,
metricLabelCacheTTL,
),
"Duration for cached requests",
nil,
nil,
),
metricCurrentlyCached: prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
cacheSubsystem,
metricLabelCurrentlyCached,
),
"Number of cached entries",
nil,
nil,
),
metricDatabaseTimestamp: prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
dbSubsystem,
metricLabelDatabaseTimestamp,
),
"Timestamp of the CSV file",
[]string{metricLabelDatabaseTimestamp},
nil,
),
metricDatabaseReady: prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
dbSubsystem,
metricLabelDatabaseReady,
),
"Ready status of the database",
nil,
nil,
),
metricRequestsTotal: prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
"",
metricLabelRequestsTotal,
),
"Counter for total requests",
nil,
nil,
),
metricRequestLatency: prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
"",
metricLabelRequestLatency,
),
"Latency statistics for requests",
nil,
nil,
),
}
}
func (m *Metrics) collectCacheTTLMetric(ch chan<- prometheus.Metric, ttl float64) {
ch <- prometheus.MustNewConstMetric(
m.metricCacheTTL,
prometheus.GaugeValue,
ttl,
)
}
func (m *Metrics) collectCurrentlyCachedMetric(ch chan<- prometheus.Metric, count float64) {
ch <- prometheus.MustNewConstMetric(
m.metricCurrentlyCached,
prometheus.GaugeValue,
count,
)
}
func (m *Metrics) collectDatabaseTimestampMetric(ch chan<- prometheus.Metric, db *database.Database) {
timestamp, err := db.Timestamp()
if err == nil {
ch <- prometheus.MustNewConstMetric(
m.metricDatabaseTimestamp,
prometheus.GaugeValue,
float64(timestamp.Unix()),
timestamp.String(),
)
} else {
log.Printf("failed to read file timestamp: %v\n", err)
}
}
func (m *Metrics) collectDatabaseReadyMetric(ch chan<- prometheus.Metric, ready bool) {
var dbReady uint8
if ready {
dbReady = 1
}
ch <- prometheus.MustNewConstMetric(
m.metricDatabaseReady,
prometheus.GaugeValue,
float64(dbReady),
)
}
func (m *Metrics) collectReqeustDataMetrics(ch chan<- prometheus.Metric, queue *RequestDataQueue) {
var (
count uint64
sum float64
)
buckets := make(map[float64]uint64)
bucketBounds := []float64{
10,
20,
30,
40,
50,
60,
70,
80,
90,
100,
200,
300,
400,
500,
600,
700,
800,
900,
1000,
1500,
2000,
2500,
3000,
3500,
4000,
4500,
5000,
10000,
20000,
30000,
40000,
50000,
100000,
200000,
300000,
400000,
500000,
1000000,
}
data := queue.ConsumeAll()
for _, r := range data {
latency := float64(r.Latency.Microseconds())
sum += latency
count++
for _, bound := range bucketBounds {
if latency <= bound {
buckets[bound]++
}
}
}
var cumulativeCount uint64
for _, bound := range bucketBounds {
cumulativeCount += buckets[bound]
buckets[bound] = cumulativeCount
}
m.counter += uint(len(data))
ch <- prometheus.MustNewConstMetric(
m.metricRequestsTotal,
prometheus.CounterValue,
float64(m.counter),
)
ch <- prometheus.MustNewConstHistogramWithCreatedTimestamp(
m.metricRequestLatency,
count,
sum,
buckets,
time.Now(),
)
}