package exporter

import (
	"log"
	"time"

	"git.ar21.de/yolokube/country-geo-locations/internal/database"
	"github.com/prometheus/client_golang/prometheus"
)

const (
	namespace      string = "country_geo_locations"
	cacheSubsystem string = "cache"
	dbSubsystem    string = "db"

	metricLabelCacheTTL          string = "ttl"
	metricLabelCurrentlyCached   string = "currently_cached"
	metricLabelDatabaseTimestamp string = "timestamp"
	metricLabelDatabaseReady     string = "ready"
	metricLabelRequestsTotal     string = "requests_total"
	metricLabelRequestLatency    string = "request_latency"
)

type Metrics struct {
	metricCacheTTL          *prometheus.Desc
	metricCurrentlyCached   *prometheus.Desc
	metricDatabaseTimestamp *prometheus.Desc
	metricDatabaseReady     *prometheus.Desc
	metricRequestsTotal     *prometheus.Desc
	metricRequestLatency    *prometheus.Desc
}

func NewMetrics() *Metrics {
	return &Metrics{
		metricCacheTTL: prometheus.NewDesc(
			prometheus.BuildFQName(
				namespace,
				cacheSubsystem,
				metricLabelCacheTTL,
			),
			"Duration for cached requests",
			nil,
			nil,
		),
		metricCurrentlyCached: prometheus.NewDesc(
			prometheus.BuildFQName(
				namespace,
				cacheSubsystem,
				metricLabelCurrentlyCached,
			),
			"Number of cached entries",
			nil,
			nil,
		),
		metricDatabaseTimestamp: prometheus.NewDesc(
			prometheus.BuildFQName(
				namespace,
				dbSubsystem,
				metricLabelDatabaseTimestamp,
			),
			"Timestamp of the CSV file",
			[]string{metricLabelDatabaseTimestamp},
			nil,
		),
		metricDatabaseReady: prometheus.NewDesc(
			prometheus.BuildFQName(
				namespace,
				dbSubsystem,
				metricLabelDatabaseReady,
			),
			"Ready status of the database",
			nil,
			nil,
		),
		metricRequestsTotal: prometheus.NewDesc(
			prometheus.BuildFQName(
				namespace,
				"",
				metricLabelRequestsTotal,
			),
			"Counter for total requests",
			nil,
			nil,
		),
		metricRequestLatency: prometheus.NewDesc(
			prometheus.BuildFQName(
				namespace,
				"",
				metricLabelRequestLatency,
			),
			"Latency statistics for requests",
			nil,
			nil,
		),
	}
}

func (m *Metrics) collectCacheTTLMetric(ch chan<- prometheus.Metric, ttl float64) {
	ch <- prometheus.MustNewConstMetric(
		m.metricCacheTTL,
		prometheus.GaugeValue,
		ttl,
	)
}

func (m *Metrics) collectCurrentlyCachedMetric(ch chan<- prometheus.Metric, count float64) {
	ch <- prometheus.MustNewConstMetric(
		m.metricCurrentlyCached,
		prometheus.GaugeValue,
		count,
	)
}

func (m *Metrics) collectDatabaseTimestampMetric(ch chan<- prometheus.Metric, db *database.Database) {
	timestamp, err := db.Timestamp()
	if err == nil {
		ch <- prometheus.MustNewConstMetric(
			m.metricDatabaseTimestamp,
			prometheus.GaugeValue,
			float64(timestamp.Unix()),
			timestamp.String(),
		)
	} else {
		log.Printf("failed to read file timestamp: %v\n", err)
	}
}

func (m *Metrics) collectDatabaseReadyMetric(ch chan<- prometheus.Metric, ready bool) {
	var dbReady uint8
	if ready {
		dbReady = 1
	}

	ch <- prometheus.MustNewConstMetric(
		m.metricDatabaseReady,
		prometheus.GaugeValue,
		float64(dbReady),
	)
}

func (m *Metrics) collectReqeustDataMetrics(ch chan<- prometheus.Metric, queue *RequestDataQueue) {
	var (
		count uint64
		sum   float64
	)
	buckets := make(map[float64]uint64)
	bucketBounds := prometheus.DefBuckets

	data := queue.ConsumeAll()
	for _, r := range data {
		latency := r.Latency.Seconds()
		sum += latency
		count++

		for _, bound := range bucketBounds {
			if latency <= bound {
				buckets[bound]++
			}
		}
	}

	ch <- prometheus.MustNewConstMetric(
		m.metricRequestsTotal,
		prometheus.CounterValue,
		float64(len(data)),
	)

	ch <- prometheus.MustNewConstHistogramWithCreatedTimestamp(
		m.metricRequestLatency,
		count,
		sum,
		buckets,
		time.Now(),
	)
}