feat(exporter): add prometheus exporter for cache, database & webserver stats
This commit is contained in:
parent
95a3a16fdc
commit
4bbda96dc7
8 changed files with 405 additions and 18 deletions
9
internal/cache/cache.go
vendored
9
internal/cache/cache.go
vendored
|
@ -16,6 +16,15 @@ func NewCache(ttl time.Duration) *Cache {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Cache) Count() uint {
|
||||
var counter uint
|
||||
c.store.Range(func(_, _ any) bool {
|
||||
counter++
|
||||
return true
|
||||
})
|
||||
return counter
|
||||
}
|
||||
|
||||
func (c *Cache) Set(key, value uint) {
|
||||
c.store.Store(key, value)
|
||||
time.AfterFunc(c.ttl, func() {
|
||||
|
|
|
@ -2,7 +2,10 @@ package database
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"git.ar21.de/yolokube/country-geo-locations/internal/cmd"
|
||||
"git.ar21.de/yolokube/country-geo-locations/pkg/geoloc"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
@ -13,10 +16,13 @@ var (
|
|||
)
|
||||
|
||||
type Database struct {
|
||||
db *memdb.MemDB
|
||||
ready bool
|
||||
|
||||
config *cmd.AppSettings
|
||||
db *memdb.MemDB
|
||||
}
|
||||
|
||||
func NewDatabase() (*Database, error) {
|
||||
func NewDatabase(config *cmd.AppSettings) (*Database, error) {
|
||||
database, err := memdb.NewMemDB(
|
||||
&memdb.DBSchema{
|
||||
Tables: map[string]*memdb.TableSchema{
|
||||
|
@ -38,7 +44,9 @@ func NewDatabase() (*Database, error) {
|
|||
}
|
||||
|
||||
return &Database{
|
||||
db: database,
|
||||
ready: false,
|
||||
config: config,
|
||||
db: database,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -53,9 +61,14 @@ func (d *Database) Load(ipinfos []geoloc.IPInfo) error {
|
|||
}
|
||||
|
||||
txn.Commit()
|
||||
d.ready = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Database) IsReady() bool {
|
||||
return d.ready
|
||||
}
|
||||
|
||||
func (d *Database) SearchIPNet(ipnetnum uint) (*geoloc.IPInfo, error) {
|
||||
txn := d.db.Txn(false)
|
||||
defer txn.Abort()
|
||||
|
@ -87,3 +100,17 @@ func (d *Database) SearchIPNet(ipnetnum uint) (*geoloc.IPInfo, error) {
|
|||
|
||||
return &ipinfo, nil
|
||||
}
|
||||
|
||||
func (d *Database) Timestamp() (time.Time, error) {
|
||||
file, err := os.Open(d.config.DataFile)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
stats, err := file.Stat()
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
return stats.ModTime(), nil
|
||||
}
|
||||
|
|
46
internal/exporter/collector.go
Normal file
46
internal/exporter/collector.go
Normal file
|
@ -0,0 +1,46 @@
|
|||
package exporter
|
||||
|
||||
import (
|
||||
"git.ar21.de/yolokube/country-geo-locations/internal/cache"
|
||||
"git.ar21.de/yolokube/country-geo-locations/internal/cmd"
|
||||
"git.ar21.de/yolokube/country-geo-locations/internal/database"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type Collector struct {
|
||||
config *cmd.AppSettings
|
||||
cache *cache.Cache
|
||||
db *database.Database
|
||||
metrics *Metrics
|
||||
queue *RequestDataQueue
|
||||
}
|
||||
|
||||
func NewCollector(
|
||||
config *cmd.AppSettings,
|
||||
cache *cache.Cache,
|
||||
db *database.Database,
|
||||
queue *RequestDataQueue,
|
||||
) *Collector {
|
||||
return &Collector{
|
||||
config: config,
|
||||
cache: cache,
|
||||
db: db,
|
||||
metrics: NewMetrics(),
|
||||
queue: queue,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Collector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- c.metrics.metricCacheTTL
|
||||
ch <- c.metrics.metricCurrentlyCached
|
||||
ch <- c.metrics.metricDatabaseTimestamp
|
||||
ch <- c.metrics.metricDatabaseReady
|
||||
}
|
||||
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) {
|
||||
c.metrics.collectCacheTTLMetric(ch, c.config.CacheTTL.Seconds())
|
||||
c.metrics.collectCurrentlyCachedMetric(ch, float64(c.cache.Count()))
|
||||
c.metrics.collectDatabaseTimestampMetric(ch, c.db)
|
||||
c.metrics.collectDatabaseReadyMetric(ch, c.db.IsReady())
|
||||
c.metrics.collectReqeustDataMetrics(ch, c.queue)
|
||||
}
|
175
internal/exporter/metrics.go
Normal file
175
internal/exporter/metrics.go
Normal file
|
@ -0,0 +1,175 @@
|
|||
package exporter
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"git.ar21.de/yolokube/country-geo-locations/internal/database"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
namespace string = "country_geo_locations"
|
||||
cacheSubsystem string = "cache"
|
||||
dbSubsystem string = "db"
|
||||
|
||||
metricLabelCacheTTL string = "ttl"
|
||||
metricLabelCurrentlyCached string = "currently_cached"
|
||||
metricLabelDatabaseTimestamp string = "timestamp"
|
||||
metricLabelDatabaseReady string = "ready"
|
||||
metricLabelRequestsTotal string = "requests_total"
|
||||
metricLabelRequestLatency string = "request_latency"
|
||||
)
|
||||
|
||||
type Metrics struct {
|
||||
metricCacheTTL *prometheus.Desc
|
||||
metricCurrentlyCached *prometheus.Desc
|
||||
metricDatabaseTimestamp *prometheus.Desc
|
||||
metricDatabaseReady *prometheus.Desc
|
||||
metricRequestsTotal *prometheus.Desc
|
||||
metricRequestLatency *prometheus.Desc
|
||||
}
|
||||
|
||||
func NewMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
metricCacheTTL: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(
|
||||
namespace,
|
||||
cacheSubsystem,
|
||||
metricLabelCacheTTL,
|
||||
),
|
||||
"Duration for cached requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
metricCurrentlyCached: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(
|
||||
namespace,
|
||||
cacheSubsystem,
|
||||
metricLabelCurrentlyCached,
|
||||
),
|
||||
"Number of cached entries",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
metricDatabaseTimestamp: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(
|
||||
namespace,
|
||||
dbSubsystem,
|
||||
metricLabelDatabaseTimestamp,
|
||||
),
|
||||
"Timestamp of the CSV file",
|
||||
[]string{metricLabelDatabaseTimestamp},
|
||||
nil,
|
||||
),
|
||||
metricDatabaseReady: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(
|
||||
namespace,
|
||||
dbSubsystem,
|
||||
metricLabelDatabaseReady,
|
||||
),
|
||||
"Ready status of the database",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
metricRequestsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(
|
||||
namespace,
|
||||
"",
|
||||
metricLabelRequestsTotal,
|
||||
),
|
||||
"Counter for total requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
metricRequestLatency: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(
|
||||
namespace,
|
||||
"",
|
||||
metricLabelRequestLatency,
|
||||
),
|
||||
"Latency statistics for requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Metrics) collectCacheTTLMetric(ch chan<- prometheus.Metric, ttl float64) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
m.metricCacheTTL,
|
||||
prometheus.GaugeValue,
|
||||
ttl,
|
||||
)
|
||||
}
|
||||
|
||||
func (m *Metrics) collectCurrentlyCachedMetric(ch chan<- prometheus.Metric, count float64) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
m.metricCurrentlyCached,
|
||||
prometheus.GaugeValue,
|
||||
count,
|
||||
)
|
||||
}
|
||||
|
||||
func (m *Metrics) collectDatabaseTimestampMetric(ch chan<- prometheus.Metric, db *database.Database) {
|
||||
timestamp, err := db.Timestamp()
|
||||
if err == nil {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
m.metricDatabaseTimestamp,
|
||||
prometheus.GaugeValue,
|
||||
float64(timestamp.Unix()),
|
||||
timestamp.String(),
|
||||
)
|
||||
} else {
|
||||
log.Printf("failed to read file timestamp: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Metrics) collectDatabaseReadyMetric(ch chan<- prometheus.Metric, ready bool) {
|
||||
var dbReady uint8
|
||||
if ready {
|
||||
dbReady = 1
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
m.metricDatabaseReady,
|
||||
prometheus.GaugeValue,
|
||||
float64(dbReady),
|
||||
)
|
||||
}
|
||||
|
||||
func (m *Metrics) collectReqeustDataMetrics(ch chan<- prometheus.Metric, queue *RequestDataQueue) {
|
||||
var (
|
||||
count uint64
|
||||
sum float64
|
||||
)
|
||||
buckets := make(map[float64]uint64)
|
||||
bucketBounds := prometheus.DefBuckets
|
||||
|
||||
data := queue.ConsumeAll()
|
||||
for _, r := range data {
|
||||
latency := r.Latency.Seconds()
|
||||
sum += latency
|
||||
count++
|
||||
|
||||
for _, bound := range bucketBounds {
|
||||
if latency <= bound {
|
||||
buckets[bound]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
m.metricRequestsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(len(data)),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstHistogramWithCreatedTimestamp(
|
||||
m.metricRequestLatency,
|
||||
count,
|
||||
sum,
|
||||
buckets,
|
||||
time.Now(),
|
||||
)
|
||||
}
|
68
internal/exporter/middleware.go
Normal file
68
internal/exporter/middleware.go
Normal file
|
@ -0,0 +1,68 @@
|
|||
package exporter
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
)
|
||||
|
||||
type RequestData struct {
|
||||
Latency time.Duration
|
||||
Request *http.Request
|
||||
Start time.Time
|
||||
}
|
||||
|
||||
type RequestDataQueue struct {
|
||||
mu sync.Mutex
|
||||
data []RequestData
|
||||
}
|
||||
|
||||
func NewRequestDataQueue() *RequestDataQueue {
|
||||
return &RequestDataQueue{
|
||||
data: []RequestData{},
|
||||
}
|
||||
}
|
||||
|
||||
func (q *RequestDataQueue) Add(data RequestData) {
|
||||
q.mu.Lock()
|
||||
defer q.mu.Unlock()
|
||||
q.data = append(q.data, data)
|
||||
}
|
||||
|
||||
func (q *RequestDataQueue) ConsumeAll() []RequestData {
|
||||
q.mu.Lock()
|
||||
defer q.mu.Unlock()
|
||||
data := q.data
|
||||
q.data = nil
|
||||
return data
|
||||
}
|
||||
|
||||
type Middleware struct {
|
||||
queue *RequestDataQueue
|
||||
}
|
||||
|
||||
func NewMiddleware(queue *RequestDataQueue) func(next http.Handler) http.Handler {
|
||||
m := Middleware{
|
||||
queue: queue,
|
||||
}
|
||||
return m.handler
|
||||
}
|
||||
|
||||
func (m Middleware) handler(next http.Handler) http.Handler {
|
||||
fn := func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
ww := middleware.NewWrapResponseWriter(w, r.ProtoMajor)
|
||||
next.ServeHTTP(ww, r)
|
||||
|
||||
m.queue.Add(
|
||||
RequestData{
|
||||
Latency: time.Since(start),
|
||||
Request: r,
|
||||
Start: start,
|
||||
},
|
||||
)
|
||||
}
|
||||
return http.HandlerFunc(fn)
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue