From ca2ce19c9975f7e747374045b49954f3a530bac2 Mon Sep 17 00:00:00 2001 From: Ali Mosajjal Date: Sun, 13 Apr 2025 13:02:49 +1200 Subject: [PATCH] trying to move to toml config --- cmd/dnsmonster/main.go | 51 +++-- config.sample.toml | 358 ++++++++++++++++++++++++++++++++++ go.mod | 65 +++--- go.sum | 87 +++++++++ internal/config/config.go | 113 +++++++++++ internal/config/load.go | 47 +++++ internal/output/clickhouse.go | 176 ++++++++++------- internal/output/elastic.go | 109 +++++++---- internal/output/file.go | 123 +++++++++--- internal/output/influx.go | 131 +++++++++++-- internal/output/output.go | 59 ++++-- internal/output/parquet.go | 142 ++++++++++---- internal/output/postgres.go | 133 +++++++++++-- internal/output/sentinel.go | 108 ++++++---- 14 files changed, 1401 insertions(+), 301 deletions(-) create mode 100644 config.sample.toml create mode 100644 internal/config/config.go create mode 100644 internal/config/load.go diff --git a/cmd/dnsmonster/main.go b/cmd/dnsmonster/main.go index fa8eea78..b2e737a0 100644 --- a/cmd/dnsmonster/main.go +++ b/cmd/dnsmonster/main.go @@ -34,6 +34,8 @@ import ( "time" "github.com/mosajjal/dnsmonster/internal/capture" + "github.com/mosajjal/dnsmonster/internal/config" + "github.com/mosajjal/dnsmonster/internal/output" "github.com/mosajjal/dnsmonster/internal/util" "github.com/pkg/profile" "github.com/rcrowley/go-metrics" @@ -58,22 +60,49 @@ func handleInterrupt(ctx context.Context) { }() } -func main() { - - for i := range os.Args { - - var re = regexp.MustCompile(`(?m)--(\w+)`) - os.Args[i] = (re.ReplaceAllStringFunc(os.Args[i], func(m string) string { +func normalizeCmdArgs(args []string) { + re := regexp.MustCompile(`(?m)--(\w+)`) + for i := range args { + args[i] = re.ReplaceAllStringFunc(args[i], func(m string) string { return strings.ToLower(m) - })) - + }) } +} + +func main() { + normalizeCmdArgs(os.Args) var ctx context.Context ctx, util.GlobalCancel = context.WithCancel(context.Background()) - g, _ := errgroup.WithContext(ctx) + g, gCtx := errgroup.WithContext(ctx) + // process and handle flags - util.ProcessFlags(ctx) + cfg, err := config.LoadConfig() + if err != nil { + log.Fatalf("Failed to load config: %v", err) + } + + cfgJson, err := json.MarshalIndent(cfg, "", " ") + if err != nil { + log.Fatalf("Failed to marshal config: %v", err) + } + log.Infof("Loaded config:\n%s", string(cfgJson)) + + // Integrate cfg into the rest of the application: register outputs from config + // Elastic output + if cfg.Outputs.Elastic.OutputType > 0 { + elasticOutput := output.NewElasticConfig(). + WithOutputType(uint(cfg.Outputs.Elastic.OutputType)). + WithAddress(cfg.Outputs.Elastic.Address). + WithOutputIndex(cfg.Outputs.Elastic.OutputIndex). + WithBatchSize(cfg.Outputs.Elastic.BatchSize). + WithBatchDelay(cfg.Outputs.Elastic.BatchDelay). + WithChannelSize(int(util.GeneralFlags.ResultChannelSize)) + if err != nil { + log.Fatalf("Failed to configure elastic output: %v", err) + } + util.GlobalDispatchList = append(util.GlobalDispatchList, elasticOutput) + } // debug and profile options runtime.GOMAXPROCS(util.GeneralFlags.Gomaxprocs) @@ -89,7 +118,7 @@ func main() { handleInterrupt(ctx) // set up capture - g.Go(func() error { capture.GlobalCaptureConfig.CheckFlagsAndStart(ctx); return nil }) + capture.GlobalCaptureConfig.CheckFlagsAndStart(gCtx) // Set up output dispatch var c chan util.DNSResult for { diff --git a/config.sample.toml b/config.sample.toml new file mode 100644 index 00000000..3e23ae38 --- /dev/null +++ b/config.sample.toml @@ -0,0 +1,358 @@ +[input] +# Device used to capture +devname = "" +# Pcap filename to run +pcapfile = "" +# dnstap socket path. Example: unix:///tmp/dnstap.sock, tcp://127.0.0.1:8080 +dnstapsocket = "" +# Port selected to filter packets +port = 53 +# Capture Sampling by a:b. eg sampleRatio of 1:100 will process 1 percent of the incoming packets +sampleratio = "1:1" +# Cleans up packet hash table used for deduplication +dedupcleanupinterval = "1m0s" +# Set the dnstap socket permission, only applicable when unix:// is used +dnstappermission = 755 +# Number of routines used to handle received packets +packethandlercount = 2 +# Size of the tcp assembler +tcpassemblychannelsize = 10000 +# Size of the tcp result channel +tcpresultchannelsize = 10000 +# Number of routines used to handle tcp packets +tcphandlercount = 1 +# Size of the channel to send packets to be defragged +defraggerchannelsize = 10000 +# Size of the channel where the defragged packets are returned +defraggerchannelreturnsize = 10000 +# Size of the packet handler channel +packetchannelsize = 1000 +# Afpacket Buffersize in MB +afpacketbuffersizemb = 64 +# BPF filter applied to the packet stream. If port is selected, the packets will not be defragged. +filter = '((ip and (ip[9] == 6 or ip[9] == 17)) or (ip6 and (ip6[6] == 17 or ip6[6] == 6 or ip6[6] == 44)))' +# Use AFPacket for live captures. Supported on Linux 3.0+ only +useafpacket = false +# The PCAP capture does not contain ethernet frames +noetherframe = false +# Deduplicate incoming packets, Only supported with --devName and --pcapFile. Experimental +dedup = false +# Do not put the interface in promiscuous mode +nopromiscuous = false + +[process] +# Garbage Collection interval for tcp assembly and ip defragmentation +gctime = "10s" +# Duration to calculate interface stats +capturestatsdelay = "1s" +# Mask IPv4s by bits. 32 means all the bits of IP is saved in DB +masksize4 = 32 +# Mask IPv6s by bits. 32 means all the bits of IP is saved in DB +masksize6 = 128 +# Size of the result processor channel size +resultchannelsize = 100000 +# Limit of packets logged to clickhouse every iteration. Default 0 (disabled) +packetlimit = 0 +# Skip outputting domains matching items in the CSV file path. Can accept a URL (http:// or https://) or path +skipdomainsfile = "" +# Hot-Reload skipdomainsfile interval +skipdomainsrefreshinterval = "1m0s" +# Allow Domains logic input file. Can accept a URL (http:// or https://) or path +allowdomainsfile = "" +# Hot-Reload allowdomainsfile file interval +allowdomainsrefreshinterval = "1m0s" +# Skip TLS verification when making HTTPS connections +skiptlsverification = false + +[outputs.clickhouse] +# Enable or disable ClickHouse output +enabled = false +# Address of the clickhouse database to save the results. multiple values can be provided +address = ["localhost:9000"] +# Username to connect to the clickhouse database +username = "" +# Password to connect to the clickhouse database +password = "" +# Database to connect to the clickhouse database +database = "default" +# Minimum capacity of the cache array used to send data to clickhouse. Set close to the QPS received +batch_size = 100000 +# Interval between sending results to ClickHouse. If non-0, Batch size is ignored and batch delay is used +batch_delay = "5s" +# Clickhouse connection LZ4 compression level, 0 means no compression +compress = 0 +# Debug Clickhouse connection +debug = false +# Use TLS for Clickhouse connection +secure = false +# Save full packet query and response in JSON format +save_full_query = false +# Number of Clickhouse output Workers +workers = 1 +# Filter mode: none, skipdomains, allowdomains, both +filter_mode = "none" + +[outputs.elastic] +# Enable or disable Elastic output +enabled = false +# What should be written to elastic, controlled by filter_mode +output_type = 0 +# Elastic endpoint address, example: http://127.0.0.1:9200 +endpoint = "http://localhost:9200" +# Elastic index +index = "dnsmonster" +# Send data to Elastic in batch sizes +batch_size = 1000 +# Interval between sending results to Elastic if Batch size is not filled +batch_delay = "1s" +# Max queue size +max_queue_size = 100000 +# Enable sniffing +sniff = false +# Enable compression +compression = true +# Filter mode: none, skipdomains, allowdomains, both +filter_mode = "none" + +[outputs.file] +# Enable or disable File output +enabled = false +# What should be written to file. options: 0=Disabled, 1=All, 2=SkipDomains, 3=AllowDomains, 4=Both +output_type = 1 +# Path to output folder. Used if output_type is not none +output_path = "/path/to/output.log" +# Interval to rotate the file in cron format +rotate_cron = "0 0 * * *" +# Number of files to keep. 0 to disable rotation +rotate_count = 4 +# Output format for file. Options: json, csv, csv_no_header, gotemplate +# Note that csv splits the datetime format into multiple fields +output_format = "json" +# Go Template to format the output as needed +go_template = "{{.}}" +# Filter mode: none, skipdomains, allowdomains, both +filter_mode = "none" + +[outputs.influx] +# Enable or disable InfluxDB output +enabled = false +# What should be written to influx. options: 0=Disabled, 1=All, 2=SkipDomains, 3=AllowDomains, 4=Both +output_type = 1 +# Influx Server address, example: http://localhost:8086. Used if output_type is not 0 +server = "http://localhost:8086" +# Influx Server Auth Token +token = "dnsmonster" +# Influx Server Bucket +bucket = "dnsmonster" +# Influx Server Organization +org = "dnsmonster" +# Number of workers for Influx output +workers = 8 +# Batch size for sending data to Influx +batch_size = 1000 +# Filter mode: none, skipdomains, allowdomains, both +filter_mode = "none" + +[outputs.kafka] +# Enable or disable Kafka output +enabled = false +# What should be written to Kafka. options: 0=Disabled, 1=All, 2=SkipDomains, 3=AllowDomains, 4=Both +output_type = 0 +# Kafka broker address(es), example: 127.0.0.1:9092 +brokers = ["localhost:9092"] +# Kafka topic for logging +topic = "dnsmonster" +# Batch size for sending data to Kafka +batch_size = 1000 +# Kafka connection timeout in seconds +timeout = 3 +# Interval between sending results to Kafka if Batch size is not filled +batch_delay = "1s" +# Compress Kafka connection +compress = false +# Compression Type for Kafka connection [snappy, gzip, lz4, zstd]; default is snappy +compression_type = "none" +# Use TLS for Kafka connection +secure = false +# Path of CA certificate that signs Kafka broker certificate +ca_certificate_path = "" +# Path of TLS certificate to present to broker +tls_certificate_path = "" +# Path of TLS certificate key +tls_key_path = "" +# Filter mode: none, skipdomains, allowdomains, both +filter_mode = "none" +# Maximum message bytes +max_message_bytes = 1000000 +# Max queue size +max_queue_size = 100000 + +[outputs.parquet] +# Enable or disable Parquet output +enabled = false +# What should be written to parquet file. options: 0=Disabled, 1=All, 2=SkipDomains, 3=AllowDomains, 4=Both +output_type = 1 +# Path to output folder. Used if output_type is not 0 +output_path = "/path/to/output.parquet" +# Number of records to write to parquet file before flushing +flush_batch_size = 10000 +# Number of workers to write to parquet file +worker_count = 4 +# Size of the write buffer in bytes +write_buffer_size = 256000 +# Filter mode: none, skipdomains, allowdomains, both +filter_mode = "none" + +[outputs.postgres] +# Enable or disable PostgreSQL output +enabled = false +# What should be written to PSQL. options: 0=Disabled, 1=All, 2=SkipDomains, 3=AllowDomains, 4=Both +output_type = 1 +# PSQL endpoint URI, format: postgres://username:password@hostname:port/database?sslmode=disable +endpoint = "postgres://username:password@hostname:port/database?sslmode=disable" +# Number of PostgreSQL workers +workers = 1 +# PostgreSQL Batch Size +batch_size = 1 +# Interval between sending results if Batch size is not filled (any value > 0 takes precedence over Batch Size) +batch_delay = "0s" +# Timeout for any INSERT operation before considering them failed +batch_timeout = "5s" +# Save full packet query and response in JSON format +save_full_query = false +# Filter mode: none, skipdomains, allowdomains, both +filter_mode = "none" + +[outputs.sentinel] +# Enable or disable Microsoft Sentinel output +enabled = false +# What should be written to Sentinel. options: 0=Disabled, 1=All, 2=SkipDomains, 3=AllowDomains, 4=Both +output_type = 1 +# Sentinel Shared Key (primary or secondary), found in Agents Management page under Log Analytics workspace +shared_key = "" +# Sentinel Customer Id, found in Agents Management page under Log Analytics workspace +customer_id = "" +# Sentinel Output LogType +log_type = "dnsmonster" +# Sentinel Output Proxy in URI format +proxy = "" +# Sentinel Batch Size +batch_size = 100 +# Interval between sending results if Batch size is not filled +batch_delay = "1s" +# Filter mode: none, skipdomains, allowdomains, both +filter_mode = "none" + +[outputs.splunk] +# Enable or disable Splunk HEC output +enabled = false +# What should be written to HEC. options: 0=Disabled, 1=All, 2=SkipDomains, 3=AllowDomains, 4=Both +output_type = 1 +# Splunk endpoint address(es), example: http://127.0.0.1:8088 (multiple values for load balance/HA) +endpoint = ["http://127.0.0.1:8088"] +# Splunk HEC Token +token = "00000000-0000-0000-0000-000000000000" +# Splunk Output Index +index = "temp" +# Splunk Output Proxy in URI format +proxy = "" +# Splunk Output Source +source = "dnsmonster" +# Splunk Output Sourcetype +sourcetype = "json" +# Send data to HEC in batch sizes +batch_size = 1000 +# Interval between sending results if Batch size is not filled +batch_delay = "1s" +# Filter mode: none, skipdomains, allowdomains, both +filter_mode = "none" + +[outputs.stdout] +# Enable or disable stdout output +enabled = false +# What should be written to stdout. options: 0=Disabled, 1=All, 2=SkipDomains, 3=AllowDomains, 4=Both +output_type = 1 +# Output format for stdout. Options: json, csv, csv_no_header, gotemplate +output_format = "json" +# Go Template to format the output as needed +go_template = "{{.}}" +# Number of stdout output workers +worker_count = 8 +# Filter mode: none, skipdomains, allowdomains, both +filter_mode = "none" + +[outputs.syslog] +# Enable or disable syslog output +enabled = false +# What should be written to Syslog server. options: 0=Disabled, 1=All, 2=SkipDomains, 3=AllowDomains, 4=Both +output_type = 1 +# Syslog endpoint address, example: udp://127.0.0.1:514, tcp://127.0.0.1:514 +endpoint = "udp://127.0.0.1:514" +# Filter mode: none, skipdomains, allowdomains, both +filter_mode = "none" + +[outputs.victorialogs] +# Enable or disable VictoriaLogs output +enabled = false +# What should be written to Victoria. options: 0=Disabled, 1=All, 2=SkipDomains, 3=AllowDomains, 4=Both +output_type = 1 +# Victoria Output Endpoint. example: http://localhost:9428/insert/jsonline?_msg_field=rcode_id&_time_field=time +endpoint = "" +# Victoria Output Proxy in URI format +proxy = "" +# Number of Victoria output workers +workers = 8 +# Victoria Batch Size +batch_size = 100 +# Interval between sending results if Batch size is not filled (any value > 0 takes precedence over Batch Size) +batch_delay = "0s" +# Filter mode: none, skipdomains, allowdomains, both +filter_mode = "none" + +[outputs.zinc] +# Enable or disable Zinc output +enabled = false +# What should be written to zinc. options: 0=Disabled, 1=All, 2=SkipDomains, 3=AllowDomains, 4=Both +output_type = 1 +# Index used to save data in Zinc +index = "dnsmonster" +# Zinc endpoint address, example: http://127.0.0.1:9200/api/default/_bulk +endpoint = "" +# Zinc username, example: admin@admin.com +username = "" +# Zinc password +password = "" +# Send data to Zinc in batch sizes +batch_size = 1000 +# Interval between sending results if Batch size is not filled +batch_delay = "1s" +# Zinc request timeout +timeout = "10s" +# Filter mode: none, skipdomains, allowdomains, both +filter_mode = "none" + +[metrics] +# Metric Endpoint Service: stderr, statsd, prometheus +endpointtype = "stderr" +# Statsd endpoint. Example: 127.0.0.1:8125 +statsdagent = "" +# Prometheus Registry endpoint. Example: http://0.0.0.0:2112/metric +metricprometheusendpoint = "" +# Format for stderr output: json, etc. +metricstderrformat = "json" +# Interval between sending results to Metric Endpoint +metricflushinterval = "10s" + +[general] +# Name of the server used to index the metrics +servername = "default" +# Set debug Log format: text, json +logformat = "text" +# Set debug Log level: 0=PANIC, 1=ERROR, 2=WARN, 3=INFO, 4=DEBUG +loglevel = 3 +# Write CPU profile to file +cpuprofile = "" +# Write memory profile to file +memprofile = "" +# GOMAXPROCS variable (-1 means use default) +gomaxprocs = -1 diff --git a/go.mod b/go.mod index d9e7b95a..a33397a9 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/mosajjal/dnsmonster go 1.23.4 require ( - github.com/ClickHouse/clickhouse-go/v2 v2.30.0 + github.com/ClickHouse/clickhouse-go/v2 v2.34.0 github.com/arthurkiller/rollingwriter v1.1.3 github.com/deathowl/go-metrics-prometheus v0.0.0-20221009205350-f2a1482ba35b github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 @@ -11,31 +11,35 @@ require ( github.com/hashicorp/go-syslog v1.0.0 github.com/influxdata/influxdb-client-go/v2 v2.14.0 github.com/jackc/pgx/v4 v4.18.3 - github.com/packetcap/go-pcap v0.0.0-20240528124601-8c87ecf5dbc5 - github.com/parquet-go/parquet-go v0.24.0 - github.com/prometheus/client_golang v1.20.5 - github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 + github.com/packetcap/go-pcap v0.0.0-20250210145516-70cf19242e90 + github.com/parquet-go/parquet-go v0.25.0 + github.com/prometheus/client_golang v1.22.0 + github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 + github.com/spf13/pflag v1.0.6 + github.com/spf13/viper v1.20.1 github.com/syntaqx/go-metrics-datadog v0.1.3 - golang.org/x/sync v0.11.0 + golang.org/x/sync v0.13.0 ) require ( - github.com/ClickHouse/ch-go v0.65.0 // indirect + github.com/ClickHouse/ch-go v0.65.1 // indirect github.com/DataDog/datadog-go v4.8.3+incompatible // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/andybalholm/brotli v1.1.1 // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bytedance/sonic/loader v0.2.1 // indirect + github.com/bytedance/sonic/loader v0.2.4 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cloudwego/base64x v0.1.4 // indirect + github.com/cloudwego/base64x v0.1.5 // indirect github.com/cloudwego/iasm v0.2.0 // indirect github.com/farsightsec/golang-framestream v0.3.0 // indirect github.com/felixge/fgprof v0.9.5 // indirect github.com/fortytw2/leaktest v1.3.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.7.1 // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect @@ -47,40 +51,47 @@ require ( github.com/jackc/pgtype v1.14.4 // indirect github.com/jackc/puddle v1.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/lib/pq v1.10.9 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oapi-codegen/runtime v1.1.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/paulmach/orb v0.11.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pierrec/lz4/v4 v4.1.22 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.61.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.63.0 // indirect + github.com/prometheus/procfs v0.16.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/robfig/cron v1.2.0 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/sagikazarmark/locafero v0.9.0 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.14.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect - go.opentelemetry.io/otel v1.34.0 // indirect - go.opentelemetry.io/otel/trace v1.34.0 // indirect - golang.org/x/arch v0.12.0 // indirect - golang.org/x/mod v0.22.0 // indirect - golang.org/x/text v0.22.0 // indirect - golang.org/x/tools v0.28.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/arch v0.16.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/text v0.24.0 // indirect + golang.org/x/tools v0.32.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) require ( - github.com/bytedance/sonic v1.12.6 + github.com/bytedance/sonic v1.13.2 github.com/dnstap/golang-dnstap v0.4.0 github.com/jessevdk/go-flags v1.6.1 - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/mailru/easyjson v0.9.0 // indirect - github.com/miekg/dns v1.1.62 + github.com/miekg/dns v1.1.65 github.com/mosajjal/Go-Splunk-HTTP/splunk/v2 v2.0.7 github.com/olivere/elastic v6.2.37+incompatible github.com/pkg/errors v0.9.1 // indirect @@ -88,8 +99,8 @@ require ( github.com/rogpeppe/fastuuid v1.2.0 github.com/segmentio/kafka-go v0.4.47 github.com/sirupsen/logrus v1.9.3 - golang.org/x/crypto v0.35.0 // indirect - golang.org/x/net v0.36.0 - golang.org/x/sys v0.30.0 // indirect - google.golang.org/protobuf v1.36.1 + golang.org/x/crypto v0.37.0 // indirect + golang.org/x/net v0.39.0 + golang.org/x/sys v0.32.0 // indirect + google.golang.org/protobuf v1.36.6 ) diff --git a/go.sum b/go.sum index bfa2e11c..42b52bce 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,13 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/ClickHouse/ch-go v0.65.0 h1:vZAXfTQliuNNefqkPDewX3kgRxN6Q4vUENnnY+ynTRY= github.com/ClickHouse/ch-go v0.65.0/go.mod h1:tCM0XEH5oWngoi9Iu/8+tjPBo04I/FxNIffpdjtwx3k= +github.com/ClickHouse/ch-go v0.65.1 h1:SLuxmLl5Mjj44/XbINsK2HFvzqup0s6rwKLFH347ZhU= +github.com/ClickHouse/ch-go v0.65.1/go.mod h1:bsodgURwmrkvkBe5jw1qnGDgyITsYErfONKAHn05nv4= +github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= github.com/ClickHouse/clickhouse-go/v2 v2.30.0 h1:AG4D/hW39qa58+JHQIFOSnxyL46H6h2lrmGGk17dhFo= github.com/ClickHouse/clickhouse-go/v2 v2.30.0/go.mod h1:i9ZQAojcayW3RsdCb3YR+n+wC2h65eJsZCscZ1Z1wyo= +github.com/ClickHouse/clickhouse-go/v2 v2.34.0 h1:Y4rqkdrRHgExvC4o/NTbLdY5LFQ3LHS77/RNFxFX3Co= +github.com/ClickHouse/clickhouse-go/v2 v2.34.0/go.mod h1:yioSINoRLVZkLyDzdMXPLRIqhDvel8iLBlwh6Iefso8= github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= @@ -24,9 +29,13 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/bytedance/sonic v1.12.6 h1:/isNmCUF2x3Sh8RAp/4mh4ZGkcFAX/hLrzrK3AvpRzk= github.com/bytedance/sonic v1.12.6/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk= +github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ= +github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/bytedance/sonic/loader v0.2.1 h1:1GgorWTqf12TA8mma4DDSbaQigE2wOgQo7iCjjJv3+E= github.com/bytedance/sonic/loader v0.2.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= +github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= +github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= @@ -40,6 +49,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= +github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= @@ -61,6 +72,10 @@ github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= @@ -71,6 +86,8 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= @@ -102,6 +119,8 @@ github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8I github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -117,6 +136,7 @@ github.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjw github.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI= github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU= github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -137,6 +157,7 @@ github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5W github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= @@ -183,9 +204,13 @@ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -222,6 +247,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= +github.com/miekg/dns v1.1.65 h1:0+tIPHzUW0GCge7IiK3guGP57VAw7hoPDfApjkMD1Fc= +github.com/miekg/dns v1.1.65/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -241,11 +268,19 @@ github.com/olivere/elastic v6.2.37+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGe github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/packetcap/go-pcap v0.0.0-20240528124601-8c87ecf5dbc5 h1:p4VuaitqUAqSZSomd7Wb4BPV/Jj7Hno2/iqtfX7DZJI= github.com/packetcap/go-pcap v0.0.0-20240528124601-8c87ecf5dbc5/go.mod h1:zIAoVKeWP0mz4zXY50UYQt6NLg2uwKRswMDcGEqOms4= +github.com/packetcap/go-pcap v0.0.0-20250210145516-70cf19242e90 h1:gHGhImV1Y0pYTq8348Ev/dhMQ1ZGi1VCujMVaDZ7Amg= +github.com/packetcap/go-pcap v0.0.0-20250210145516-70cf19242e90/go.mod h1:zIAoVKeWP0mz4zXY50UYQt6NLg2uwKRswMDcGEqOms4= github.com/parquet-go/parquet-go v0.24.0 h1:VrsifmLPDnas8zpoHmYiWDZ1YHzLmc7NmNwPGkI2JM4= github.com/parquet-go/parquet-go v0.24.0/go.mod h1:OqBBRGBl7+llplCvDMql8dEKaDqjaFA/VAPw+OJiNiw= +github.com/parquet-go/parquet-go v0.25.0 h1:GwKy11MuF+al/lV6nUsFw8w8HCiPOSAx1/y8yFxjH5c= +github.com/parquet-go/parquet-go v0.25.0/go.mod h1:OqBBRGBl7+llplCvDMql8dEKaDqjaFA/VAPw+OJiNiw= github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -262,22 +297,32 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= +github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= +github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM= +github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -292,6 +337,10 @@ github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWN github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= +github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k= +github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= @@ -306,6 +355,18 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= +github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -325,6 +386,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syntaqx/go-metrics-datadog v0.1.3 h1:N2k1QfPCghydIs0OdTpN8lDFgn48rA5D4vc+8FLkD/w= github.com/syntaqx/go-metrics-datadog v0.1.3/go.mod h1:MdS57ZQXnTjkNCObzSUxqo8cEWWNIuFA00AP1KFUBJ0= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -352,8 +415,12 @@ github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxt go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -361,12 +428,16 @@ go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/arch v0.12.0 h1:UsYJhbzPYGsT0HbEdmYcqtCv8UNGvnaL561NnIUvaKg= golang.org/x/arch v0.12.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/arch v0.16.0 h1:foMtLTdyOmIniqWCHjY6+JxuC54XP1fDwx4N0ASyW+U= +golang.org/x/arch v0.16.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= @@ -384,6 +455,8 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -393,6 +466,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -411,6 +486,8 @@ golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -421,6 +498,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -450,6 +529,8 @@ golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -470,6 +551,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -486,6 +569,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -502,6 +587,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 00000000..ac5e9432 --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,113 @@ +// Package config provides the hierarchical configuration structs for dnsmonster, +// supporting TOML config files and environment variable overrides. +package config + +import "time" + +// Config is the root configuration structure. +type Config struct { + Input InputConfig `mapstructure:"input"` + Process ProcessConfig `mapstructure:"process"` + Outputs OutputsConfig `mapstructure:"outputs"` + Metrics MetricsConfig `mapstructure:"metrics"` + General GeneralConfig `mapstructure:"general"` +} + +// InputConfig holds all input/capture-related settings. +type InputConfig struct { + DevName string `mapstructure:"devname"` + PcapFile string `mapstructure:"pcapfile"` + DnstapSocket string `mapstructure:"dnstapsocket"` + Port int `mapstructure:"port"` + SampleRatio string `mapstructure:"sampleratio"` + DedupCleanupInterval time.Duration `mapstructure:"dedupcleanupinterval"` + DnstapPermission int `mapstructure:"dnstappermission"` + PacketHandlerCount int `mapstructure:"packethandlercount"` + TCPAssemblyChannelSize int `mapstructure:"tcpassemblychannelsize"` + TCPResultChannelSize int `mapstructure:"tcpresultchannelsize"` + TCPHandlerCount int `mapstructure:"tcphandlercount"` + DefraggerChannelSize int `mapstructure:"defraggerchannelsize"` + DefraggerReturnSize int `mapstructure:"defraggerchannelreturnsize"` + PacketChannelSize int `mapstructure:"packetchannelsize"` + AFPacketBufferSizeMB int `mapstructure:"afpacketbuffersizemb"` + Filter string `mapstructure:"filter"` + UseAFPacket bool `mapstructure:"useafpacket"` + NoEtherFrame bool `mapstructure:"noetherframe"` + Dedup bool `mapstructure:"dedup"` + NoPromiscuous bool `mapstructure:"nopromiscuous"` +} + +// ProcessConfig holds processing and filtering logic. +type ProcessConfig struct { + GCTime time.Duration `mapstructure:"gctime"` + CaptureStatsDelay time.Duration `mapstructure:"capturestatsdelay"` + MaskSize4 int `mapstructure:"masksize4"` + MaskSize6 int `mapstructure:"masksize6"` + ResultChannelSize int `mapstructure:"resultchannelsize"` + PacketLimit int `mapstructure:"packetlimit"` + SkipDomainsFile string `mapstructure:"skipdomainsfile"` + SkipDomainsRefreshInterval time.Duration `mapstructure:"skipdomainsrefreshinterval"` + AllowDomainsFile string `mapstructure:"allowdomainsfile"` + AllowDomainsRefreshInterval time.Duration `mapstructure:"allowdomainsrefreshinterval"` + SkipTLSVerification bool `mapstructure:"skiptlsverification"` +} + +// ClickhouseOutputConfig holds Clickhouse output-related settings. +type ClickhouseOutputConfig struct { + Enabled bool `mapstructure:"enabled"` + Address []string `mapstructure:"address"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + Database string `mapstructure:"database"` + BatchSize uint `mapstructure:"batch_size"` + BatchDelay time.Duration `mapstructure:"batch_delay"` + Compress int `mapstructure:"compress"` + Debug bool `mapstructure:"debug"` + Secure bool `mapstructure:"secure"` + SaveFullQuery bool `mapstructure:"save_full_query"` + Workers int `mapstructure:"workers"` + FilterMode string `mapstructure:"filter_mode"` +} + +// ElasticOutputConfig holds Elasticsearch output-related settings. +type ElasticOutputConfig struct { + Enabled bool `mapstructure:"enabled"` + Address []string `mapstructure:"address"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + IndexPrefix string `mapstructure:"index_prefix"` + BatchSize uint `mapstructure:"batch_size"` + BatchDelay time.Duration `mapstructure:"batch_delay"` + Debug bool `mapstructure:"debug"` + Secure bool `mapstructure:"secure"` + Workers int `mapstructure:"workers"` + FilterMode string `mapstructure:"filter_mode"` + OutputType int `mapstructure:"output_type"` + OutputIndex string `mapstructure:"output_index"` +} + +// OutputsConfig holds all output-related settings. +type OutputsConfig struct { + Clickhouse ClickhouseOutputConfig `mapstructure:"clickhouse"` + Elastic ElasticOutputConfig `mapstructure:"elastic"` + // ...other outputs... +} + +// MetricsConfig holds metrics/exporter settings. +type MetricsConfig struct { + EndpointType string `mapstructure:"endpointtype"` + StatsdAgent string `mapstructure:"statsdagent"` + PrometheusEndpoint string `mapstructure:"metricprometheusendpoint"` + StderrFormat string `mapstructure:"metricstderrformat"` + FlushInterval time.Duration `mapstructure:"metricflushinterval"` +} + +// GeneralConfig holds general application settings. +type GeneralConfig struct { + ServerName string `mapstructure:"servername"` + LogFormat string `mapstructure:"logformat"` + LogLevel int `mapstructure:"loglevel"` + CPUProfile string `mapstructure:"cpuprofile"` + MemProfile string `mapstructure:"memprofile"` + GoMaxProcs int `mapstructure:"gomaxprocs"` +} diff --git a/internal/config/load.go b/internal/config/load.go new file mode 100644 index 00000000..c3f11d2e --- /dev/null +++ b/internal/config/load.go @@ -0,0 +1,47 @@ +// Package config provides configuration loading for dnsmonster. +package config + +import ( + "fmt" + "os" + "strings" + + "github.com/spf13/pflag" + "github.com/spf13/viper" +) + +// LoadConfig loads the configuration from TOML file, environment variables, and flags. +func LoadConfig() (*Config, error) { + var configFile string + pflag.StringVar(&configFile, "config", "dnsmonster.toml", "Path to configuration file") + pflag.Parse() + + v := viper.New() + v.SetConfigType("toml") + v.SetConfigFile(configFile) + v.SetEnvPrefix("DNSMONSTER") + v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + v.AutomaticEnv() + + // Set default config file search paths + v.AddConfigPath(".") + v.AddConfigPath("/etc/dnsmonster/") + + // Read config file + if err := v.ReadInConfig(); err != nil { + // Only error if file is not found and not the default + if _, ok := err.(viper.ConfigFileNotFoundError); ok && configFile == "dnsmonster.toml" { + fmt.Fprintf(os.Stderr, "Warning: config file not found, using only env vars and flags\n") + } else if ok { + return nil, fmt.Errorf("config file not found: %w", err) + } else { + return nil, fmt.Errorf("error reading config: %w", err) + } + } + + var cfg Config + if err := v.Unmarshal(&cfg); err != nil { + return nil, fmt.Errorf("error unmarshaling config: %w", err) + } + return &cfg, nil +} diff --git a/internal/output/clickhouse.go b/internal/output/clickhouse.go index 377fb4c2..442d73a2 100644 --- a/internal/output/clickhouse.go +++ b/internal/output/clickhouse.go @@ -29,75 +29,112 @@ import ( "golang.org/x/sync/errgroup" ) -type clickhouseConfig struct { - ClickhouseAddress []string `long:"clickhouseaddress" ini-name:"clickhouseaddress" env:"DNSMONSTER_CLICKHOUSEADDRESS" default:"localhost:9000" description:"Address of the clickhouse database to save the results. multiple values can be provided."` - ClickhouseUsername string `long:"clickhouseusername" ini-name:"clickhouseusername" env:"DNSMONSTER_CLICKHOUSEUSERNAME" default:"" description:"Username to connect to the clickhouse database"` - ClickhousePassword string `long:"clickhousepassword" ini-name:"clickhousepassword" env:"DNSMONSTER_CLICKHOUSEPASSWORD" default:"" description:"Password to connect to the clickhouse database"` - ClickhouseDatabase string `long:"clickhousedatabase" ini-name:"clickhousedatabase" env:"DNSMONSTER_CLICKHOUSEDATABASE" default:"default" description:"Database to connect to the clickhouse database"` - ClickhouseDelay time.Duration `long:"clickhousedelay" ini-name:"clickhousedelay" env:"DNSMONSTER_CLICKHOUSEDELAY" default:"0s" description:"Interval between sending results to ClickHouse. If non-0, Batch size is ignored and batch delay is used"` - ClickhouseCompress uint8 `long:"clickhousecompress" ini-name:"clickhousecompress" env:"DNSMONSTER_CLICKHOUSECOMPRESS" description:"Clickhouse connection LZ4 compression level, 0 means no compression"` - ClickhouseDebug bool `long:"clickhousedebug" ini-name:"clickhousedebug" env:"DNSMONSTER_CLICKHOUSEDEBUG" description:"Debug Clickhouse connection"` - ClickhouseSecure bool `long:"clickhousesecure" ini-name:"clickhousesecure" env:"DNSMONSTER_CLICKHOUSESECURE" description:"Use TLS for Clickhouse connection"` - ClickhouseSaveFullQuery bool `long:"clickhousesavefullquery" ini-name:"clickhousesavefullquery" env:"DNSMONSTER_CLICKHOUSESAVEFULLQUERY" description:"Save full packet query and response in JSON format."` - ClickhouseOutputType uint `long:"clickhouseoutputtype" ini-name:"clickhouseoutputtype" env:"DNSMONSTER_CLICKHOUSEOUTPUTTYPE" default:"0" description:"What should be written to clickhouse. options:\n;\t0: Disable Output\n;\t1: Enable Output without any filters\n;\t2: Enable Output and apply skipdomains logic\n;\t3: Enable Output and apply allowdomains logic\n;\t4: Enable Output and apply both skip and allow domains logic" choice:"0" choice:"1" choice:"2" choice:"3" choice:"4"` - ClickhouseBatchSize uint `long:"clickhousebatchsize" ini-name:"clickhousebatchsize" env:"DNSMONSTER_CLICKHOUSEBATCHSIZE" default:"100000" description:"Minimum capacity of the cache array used to send data to clickhouse. Set close to the queries per second received to prevent allocations"` - ClickhouseWorkers uint `long:"clickhouseworkers" ini-name:"clickhouseworkers" env:"DNSMONSTER_CLICKHOUSEWORKERS" default:"1" description:"Number of Clickhouse output Workers"` - ClickhouseWorkerChannelSize uint `long:"clickhouseworkerchannelsize" ini-name:"clickhouseworkerchannelsize" env:"DNSMONSTER_CLICKHOUSEWORKERCHANNELSIZE" default:"100000" description:"Channel Size for each Clickhouse Worker"` - outputChannel chan util.DNSResult - outputMarshaller util.OutputMarshaller - closeChannel chan bool +// ClickhouseConfig is the configuration and runtime struct for ClickHouse output. +type ClickhouseConfig struct { + BaseConfig + Address []string + Username string + Password string + Database string + Compress int + Debug bool + Secure bool + SaveFullQuery bool + Workers int + outputChannel chan util.DNSResult + closeChannel chan bool + Delay time.Duration + OutputType uint + outputMarshaller util.OutputMarshaller } -// init function runs at import time -func init() { - c := clickhouseConfig{} - if _, err := util.GlobalParser.AddGroup("clickhouse_output", "ClickHouse Output", &c); err != nil { - log.Fatalf("error adding output Module") +// NewClickhouseConfig creates a new ClickhouseConfig with default values. +func NewClickhouseConfig() *ClickhouseConfig { + return &ClickhouseConfig{ + outputChannel: nil, + closeChannel: nil, } - c.outputChannel = make(chan util.DNSResult, util.GeneralFlags.ResultChannelSize) - util.GlobalDispatchList = append(util.GlobalDispatchList, &c) } -// Initialize function should not block. otherwise the dispatcher will get stuck -func (chConfig clickhouseConfig) Initialize(ctx context.Context) error { - var err error - chConfig.outputMarshaller, _, err = util.OutputFormatToMarshaller("json", "") - if err != nil { - log.Warnf("Could not initialize output marshaller, removing output: %s", err) - return err +// WithAddress sets the Address and returns the config for chaining. +func (c *ClickhouseConfig) WithAddress(addr []string) *ClickhouseConfig { + c.Address = addr + return c +} +func (c *ClickhouseConfig) WithUsername(u string) *ClickhouseConfig { + c.Username = u + return c +} +func (c *ClickhouseConfig) WithPassword(p string) *ClickhouseConfig { + c.Password = p + return c +} +func (c *ClickhouseConfig) WithDatabase(db string) *ClickhouseConfig { + c.Database = db + return c +} +func (c *ClickhouseConfig) WithCompress(compr int) *ClickhouseConfig { + c.Compress = compr + return c +} +func (c *ClickhouseConfig) WithSecure(secure bool) *ClickhouseConfig { + c.Secure = secure + return c +} +func (c *ClickhouseConfig) WithSaveFullQuery(sfq bool) *ClickhouseConfig { + c.SaveFullQuery = sfq + return c +} +func (c *ClickhouseConfig) WithWorkers(w int) *ClickhouseConfig { + c.Workers = w + return c +} +func (c *ClickhouseConfig) WithChannelSize(size int) *ClickhouseConfig { + c.outputChannel = make(chan util.DNSResult, size) + c.closeChannel = make(chan bool) + return c +} + +func (c *ClickhouseConfig) IsEnabled() bool { + return c.Enabled +} + +// Initialize function should not block +func (chConfig *ClickhouseConfig) Initialize(ctx context.Context) error { + if !chConfig.Enabled { + return errors.New("output not enabled") } - if chConfig.ClickhouseOutputType > 0 && chConfig.ClickhouseOutputType < 5 { - log.Info("Creating Clickhouse Output Channel") - go chConfig.Output(ctx) - } else { - // we will catch this error in the dispatch loop and remove any output from the registry if they don't have the correct output type - return errors.New("no output") + if chConfig.BatchSize == 0 { + chConfig.BatchSize = 100000 } - if chConfig.ClickhouseCompress > 9 { - log.Warnf("invalid compression level provided. Things might break") + if chConfig.Workers == 0 { + chConfig.Workers = 1 } + if chConfig.Compress > 9 { + log.Warn("invalid compression level provided") + } + + log.Info("Creating Clickhouse Output Channel") + go chConfig.Output(ctx) return nil } -func (chConfig clickhouseConfig) Close() { +func (chConfig *ClickhouseConfig) Close() { // todo: implement this <-chConfig.closeChannel } -func (chConfig clickhouseConfig) OutputChannel() chan util.DNSResult { +func (chConfig *ClickhouseConfig) OutputChannel() chan util.DNSResult { return chConfig.outputChannel } -func (chConfig clickhouseConfig) connectClickhouseRetry(ctx context.Context) (driver.Conn, driver.Batch) { +func (chConfig *ClickhouseConfig) connectClickhouseRetry(ctx context.Context) (driver.Conn, driver.Batch) { tick := time.NewTicker(5 * time.Second) - // don't retry connection if we're doing dry run - if chConfig.ClickhouseOutputType == 0 { - tick.Stop() - } defer tick.Stop() + for { c, b, err := chConfig.connectClickhouse(ctx) if err == nil { @@ -105,38 +142,39 @@ func (chConfig clickhouseConfig) connectClickhouseRetry(ctx context.Context) (dr } log.Errorf("Error connecting to Clickhouse: %s", err) - // todo: try and create table if it doesn't exist - - // Error getting connection, wait the timer or check if we are exiting - <-tick.C - continue + select { + case <-tick.C: + continue + case <-ctx.Done(): + return nil, nil + } } } -func (chConfig clickhouseConfig) connectClickhouse(ctx context.Context) (driver.Conn, driver.Batch, error) { +func (chConfig *ClickhouseConfig) connectClickhouse(ctx context.Context) (driver.Conn, driver.Batch, error) { compressOption := clickhouse.Compression{Method: clickhouse.CompressionNone, Level: 0} - if chConfig.ClickhouseCompress > 0 { - compressOption = clickhouse.Compression{Method: clickhouse.CompressionLZ4, Level: int(chConfig.ClickhouseCompress)} + if chConfig.Compress > 0 { + compressOption = clickhouse.Compression{Method: clickhouse.CompressionLZ4, Level: int(chConfig.Compress)} } tlsOption := &tls.Config{InsecureSkipVerify: util.GeneralFlags.SkipTLSVerification} - if !chConfig.ClickhouseSecure { + if !chConfig.Secure { tlsOption = nil } connection, err := clickhouse.Open(&clickhouse.Options{ - Addr: chConfig.ClickhouseAddress, + Addr: chConfig.Address, Auth: clickhouse.Auth{ - Database: chConfig.ClickhouseDatabase, - Username: chConfig.ClickhouseUsername, - Password: chConfig.ClickhousePassword, + Database: chConfig.Database, + Username: chConfig.Username, + Password: chConfig.Password, }, DialTimeout: time.Second * 2, MaxOpenConns: 32, MaxIdleConns: 16, ConnMaxLifetime: time.Hour, TLS: tlsOption, - Debug: chConfig.ClickhouseDebug, + Debug: chConfig.Debug, Compression: &compressOption, }) // connection, err := clickhouse.Open(fmt.Sprintf("tcp://%v?debug=%v&skip_verify=%v&secure=%v&compress=%v&username=%s&password=%s&database=%s", chConfig.ClickhouseAddress, chConfig.ClickhouseDebug, util.GeneralFlags.SkipTLSVerification, chConfig.ClickhouseSecure, chConfig.ClickhouseCompress, chConfig.ClickhouseUsername, chConfig.ClickhousePassword, chConfig.ClickhouseDatabase)) @@ -157,14 +195,14 @@ the table structure of Clickhouse is hardcoded into the code so before outputtin needs to make sure that there is proper Database connection and table are present. Refer to the project's clickhouse folder for the file tables.sql */ -func (chConfig clickhouseConfig) Output(ctx context.Context) { +func (chConfig *ClickhouseConfig) Output(ctx context.Context) { g, gCtx := errgroup.WithContext(ctx) - for i := 0; i < int(chConfig.ClickhouseWorkers); i++ { + for i := 0; i < int(chConfig.Workers); i++ { g.Go(func() error { return chConfig.clickhouseOutputWorker(gCtx) }) } } -func (chConfig clickhouseConfig) clickhouseOutputWorker(ctx context.Context) error { +func (chConfig *ClickhouseConfig) clickhouseOutputWorker(ctx context.Context) error { conn, batch := chConfig.connectClickhouseRetry(ctx) clickhouseSentToOutput := metrics.GetOrRegisterCounter("clickhouseSentToOutput", metrics.DefaultRegistry) clickhouseSkipped := metrics.GetOrRegisterCounter("clickhouseSkipped", metrics.DefaultRegistry) @@ -175,10 +213,10 @@ func (chConfig clickhouseConfig) clickhouseOutputWorker(ctx context.Context) err ticker := time.NewTicker(time.Second * 5) div := 0 - if chConfig.ClickhouseDelay > 0 { - chConfig.ClickhouseBatchSize = 1 + if chConfig.Delay > 0 { + chConfig.BatchSize = 1 div = -1 - ticker = time.NewTicker(chConfig.ClickhouseDelay) + ticker = time.NewTicker(chConfig.Delay) } else { ticker.Stop() } @@ -188,14 +226,14 @@ func (chConfig clickhouseConfig) clickhouseOutputWorker(ctx context.Context) err case data := <-chConfig.outputChannel: for _, dnsQuery := range data.DNS.Question { c++ - if util.CheckIfWeSkip(chConfig.ClickhouseOutputType, dnsQuery.Name) { + if util.CheckIfWeSkip(chConfig.OutputType, dnsQuery.Name) { clickhouseSkipped.Inc(1) continue } clickhouseSentToOutput.Inc(1) fullQuery := "" - if chConfig.ClickhouseSaveFullQuery { + if chConfig.SaveFullQuery { fullQuery = string(chConfig.outputMarshaller.Marshal(data)) } @@ -233,7 +271,7 @@ func (chConfig clickhouseConfig) clickhouseOutputWorker(ctx context.Context) err log.Warnf("Error while executing batch: %v", err) clickhouseFailed.Inc(1) } - if int(c%chConfig.ClickhouseBatchSize) == div { + if int(c%chConfig.BatchSize) == div { err = batch.Send() if err != nil { log.Warnf("Error while executing batch: %v", err) diff --git a/internal/output/elastic.go b/internal/output/elastic.go index a5f67e6b..6138fc4b 100644 --- a/internal/output/elastic.go +++ b/internal/output/elastic.go @@ -29,28 +29,69 @@ import ( "github.com/olivere/elastic" ) -type elasticConfig struct { - ElasticOutputType uint `long:"elasticoutputtype" ini-name:"elasticoutputtype" env:"DNSMONSTER_ELASTICOUTPUTTYPE" default:"0" description:"What should be written to elastic. options:\n;\t0: Disable Output\n;\t1: Enable Output without any filters\n;\t2: Enable Output and apply skipdomains logic\n;\t3: Enable Output and apply allowdomains logic\n;\t4: Enable Output and apply both skip and allow domains logic" choice:"0" choice:"1" choice:"2" choice:"3" choice:"4"` - ElasticOutputEndpoint string `long:"elasticoutputendpoint" ini-name:"elasticoutputendpoint" env:"DNSMONSTER_ELASTICOUTPUTENDPOINT" default:"" description:"elastic endpoint address, example: http://127.0.0.1:9200. Used if elasticOutputType is not none"` - ElasticOutputIndex string `long:"elasticoutputindex" ini-name:"elasticoutputindex" env:"DNSMONSTER_ELASTICOUTPUTINDEX" default:"default" description:"elastic index"` - ElasticBatchSize uint `long:"elasticbatchsize" ini-name:"elasticbatchsize" env:"DNSMONSTER_ELASTICBATCHSIZE" default:"1000" description:"Send data to Elastic in batch sizes"` - ElasticBatchDelay time.Duration `long:"elasticbatchdelay" ini-name:"elasticbatchdelay" env:"DNSMONSTER_ELASTICBATCHDELAY" default:"1s" description:"Interval between sending results to Elastic if Batch size is not filled"` - outputChannel chan util.DNSResult - outputMarshaller util.OutputMarshaller - closeChannel chan bool +// (OutputConfig interface now defined in output.go) + +// ElasticConfig is the configuration and runtime struct for Elastic output. +type ElasticConfig struct { + OutputType uint + Address []string + OutputIndex string + BatchSize uint + BatchDelay time.Duration + outputChannel chan util.DNSResult + outputMarshaller util.OutputMarshaller + closeChannel chan bool } -func init() { - c := elasticConfig{} - if _, err := util.GlobalParser.AddGroup("elastic_output", "Elastic Output", &c); err != nil { - log.Fatalf("error adding output Module") +// NewElasticConfig creates a new ElasticConfig with default values. +func NewElasticConfig() *ElasticConfig { + return &ElasticConfig{ + outputChannel: nil, + closeChannel: nil, } - c.outputChannel = make(chan util.DNSResult, util.GeneralFlags.ResultChannelSize) - util.GlobalDispatchList = append(util.GlobalDispatchList, &c) } +// WithOutputType sets the OutputType and returns the config for chaining. +func (c *ElasticConfig) WithOutputType(t uint) *ElasticConfig { + c.OutputType = t + return c +} + +// WithAddress sets the Address and returns the config for chaining. +func (c *ElasticConfig) WithAddress(addr []string) *ElasticConfig { + c.Address = addr + return c +} + +// WithOutputIndex sets the OutputIndex and returns the config for chaining. +func (c *ElasticConfig) WithOutputIndex(index string) *ElasticConfig { + c.OutputIndex = index + return c +} + +// WithBatchSize sets the BatchSize and returns the config for chaining. +func (c *ElasticConfig) WithBatchSize(size uint) *ElasticConfig { + c.BatchSize = size + return c +} + +// WithBatchDelay sets the BatchDelay and returns the config for chaining. +func (c *ElasticConfig) WithBatchDelay(delay time.Duration) *ElasticConfig { + c.BatchDelay = delay + return c +} + +// WithChannelSize initializes the output and close channels and returns the config for chaining. +func (c *ElasticConfig) WithChannelSize(channelSize int) *ElasticConfig { + c.outputChannel = make(chan util.DNSResult, channelSize) + c.closeChannel = make(chan bool) + return c +} + +// Configuration for Elastic output is now provided via the main TOML config and passed in at runtime. + // initialize function should not block. otherwise the dispatcher will get stuck -func (esConfig elasticConfig) Initialize(ctx context.Context) error { +func (esConfig *ElasticConfig) Initialize(ctx context.Context) error { var err error esConfig.outputMarshaller, _, err = util.OutputFormatToMarshaller("json", "") if err != nil { @@ -58,7 +99,7 @@ func (esConfig elasticConfig) Initialize(ctx context.Context) error { return err } - if esConfig.ElasticOutputType > 0 && esConfig.ElasticOutputType < 5 { + if esConfig.OutputType > 0 && esConfig.OutputType < 5 { log.Info("Creating Elastic Output Channel") go esConfig.Output(ctx) } else { @@ -68,22 +109,22 @@ func (esConfig elasticConfig) Initialize(ctx context.Context) error { return nil } -func (esConfig elasticConfig) Close() { +func (esConfig *ElasticConfig) Close() { // todo: implement this <-esConfig.closeChannel } -func (esConfig elasticConfig) OutputChannel() chan util.DNSResult { +func (esConfig *ElasticConfig) OutputChannel() chan util.DNSResult { return esConfig.outputChannel } // var elasticUuidGen = fastuuid.MustNewGenerator() // var ctx = context.Background() -func (esConfig elasticConfig) connectelasticRetry(ctx context.Context) *elastic.Client { +func (esConfig *ElasticConfig) connectelasticRetry(ctx context.Context) *elastic.Client { tick := time.NewTicker(5 * time.Second) // don't retry connection if we're doing dry run - if esConfig.ElasticOutputType == 0 { + if esConfig.OutputType == 0 { tick.Stop() } defer tick.Stop() @@ -100,7 +141,7 @@ func (esConfig elasticConfig) connectelasticRetry(ctx context.Context) *elastic. } } -func (esConfig elasticConfig) connectelastic(ctx context.Context) (*elastic.Client, error) { +func (esConfig *ElasticConfig) connectelastic(ctx context.Context) (*elastic.Client, error) { tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: util.GeneralFlags.SkipTLSVerification}, } @@ -108,7 +149,7 @@ func (esConfig elasticConfig) connectelastic(ctx context.Context) (*elastic.Clie client, err := elastic.NewClient( elastic.SetHttpClient(httpClient), - elastic.SetURL(esConfig.ElasticOutputEndpoint), + elastic.SetURL(esConfig.Address...), elastic.SetSniff(false), elastic.SetHealthcheckInterval(10*time.Second), // elastic.SetRetrier(connectelasticRetry(exiting, elasticEndpoint)), @@ -120,7 +161,7 @@ func (esConfig elasticConfig) connectelastic(ctx context.Context) (*elastic.Clie } // Ping the Elasticsearch server to get e.g. the version number - info, code, err := client.Ping(esConfig.ElasticOutputEndpoint).Do(ctx) + info, code, err := client.Ping(esConfig.Address[0]).Do(ctx) if err != nil { log.Fatal(err) } @@ -129,21 +170,21 @@ func (esConfig elasticConfig) connectelastic(ctx context.Context) (*elastic.Clie return client, err } -func (esConfig elasticConfig) Output(ctx context.Context) { +func (esConfig *ElasticConfig) Output(ctx context.Context) { client := esConfig.connectelasticRetry(ctx) - batch := make([]util.DNSResult, 0, esConfig.ElasticBatchSize) + batch := make([]util.DNSResult, 0, esConfig.BatchSize) - ticker := time.NewTicker(esConfig.ElasticBatchDelay) + ticker := time.NewTicker(esConfig.BatchDelay) // Use the IndexExists service to check if a specified index exists. - exists, err := client.IndexExists(esConfig.ElasticOutputIndex).Do(ctx) + exists, err := client.IndexExists(esConfig.OutputIndex).Do(ctx) if err != nil { log.Fatal(err) } if !exists { // Create a new index. - createIndex, err := client.CreateIndex(esConfig.ElasticOutputIndex).Do(ctx) + createIndex, err := client.CreateIndex(esConfig.OutputIndex).Do(ctx) if err != nil { log.Fatal(err) } @@ -164,20 +205,20 @@ func (esConfig elasticConfig) Output(ctx context.Context) { log.Info(err) client = esConfig.connectelasticRetry(ctx) } else { - batch = make([]util.DNSResult, 0, esConfig.ElasticBatchSize) + batch = make([]util.DNSResult, 0, esConfig.BatchSize) } } } } -func (esConfig elasticConfig) elasticSendData(ctx context.Context, client *elastic.Client, batch []util.DNSResult) error { +func (esConfig *ElasticConfig) elasticSendData(ctx context.Context, client *elastic.Client, batch []util.DNSResult) error { elasticSentToOutput := metrics.GetOrRegisterCounter("elasticSentToOutput", metrics.DefaultRegistry) elasticSkipped := metrics.GetOrRegisterCounter("elasticSkipped", metrics.DefaultRegistry) for i := range batch { for _, dnsQuery := range batch[i].DNS.Question { - if util.CheckIfWeSkip(esConfig.ElasticOutputType, dnsQuery.Name) { + if util.CheckIfWeSkip(esConfig.OutputType, dnsQuery.Name) { elasticSkipped.Inc(1) continue } @@ -186,7 +227,7 @@ func (esConfig elasticConfig) elasticSendData(ctx context.Context, client *elast // batch[i].UUID = elasticUuidGen.Hex128() _, err := client.Index(). - Index(esConfig.ElasticOutputIndex). + Index(esConfig.OutputIndex). Type("_doc"). BodyString(string(esConfig.outputMarshaller.Marshal(batch[i]))). Do(ctx) @@ -195,7 +236,7 @@ func (esConfig elasticConfig) elasticSendData(ctx context.Context, client *elast } } } - _, err := client.Flush().Index(esConfig.ElasticOutputIndex).Do(ctx) + _, err := client.Flush().Index(esConfig.OutputIndex).Do(ctx) return err } diff --git a/internal/output/file.go b/internal/output/file.go index 54c6bd41..8545ceb8 100644 --- a/internal/output/file.go +++ b/internal/output/file.go @@ -28,17 +28,72 @@ import ( log "github.com/sirupsen/logrus" ) +// FileConfig is the configuration and runtime struct for File output type fileConfig struct { - FileOutputType uint `long:"fileoutputtype" ini-name:"fileoutputtype" env:"DNSMONSTER_FILEOUTPUTTYPE" default:"0" description:"What should be written to file. options:\n;\t0: Disable Output\n;\t1: Enable Output without any filters\n;\t2: Enable Output and apply skipdomains logic\n;\t3: Enable Output and apply allowdomains logic\n;\t4: Enable Output and apply both skip and allow domains logic" choice:"0" choice:"1" choice:"2" choice:"3" choice:"4"` - FileOutputPath flags.Filename `long:"fileoutputpath" ini-name:"fileoutputpath" env:"DNSMONSTER_FILEOUTPUTPATH" default:"" description:"Path to output folder. Used if fileoutputType is not none"` - FileOutputRotateCron string `long:"fileoutputrotatecron" ini-name:"fileoutputrotatecron" env:"DNSMONSTER_FILEOUTPUTROTATECRON" default:"0 0 * * *" description:"Interval to rotate the file in cron format"` - FileOutputRotateCount uint `long:"fileoutputrotatecount" ini-name:"fileoutputrotatecount" env:"DNSMONSTER_FILEOUTPUTROTATECOUNT" default:"4" description:"Number of files to keep. 0 to disable rotation"` - FileOutputFormat string `long:"fileoutputformat" ini-name:"fileoutputformat" env:"DNSMONSTER_FILEOUTPUTFORMAT" default:"json" description:"Output format for file. options:json, csv, csv_no_header, gotemplate. note that the csv splits the datetime format into multiple fields" choice:"json" choice:"csv" choice:"csv_no_header" choice:"gotemplate"` - FileOutputGoTemplate string `long:"fileoutputgotemplate" ini-name:"fileoutputgotemplate" env:"DNSMONSTER_FILEOUTPUTGOTEMPLATE" default:"{{.}}" description:"Go Template to format the output as needed"` - outputChannel chan util.DNSResult - closeChannel chan bool - outputMarshaller util.OutputMarshaller - writer rollingwriter.RollingWriter + // Configuration options + OutputType uint `long:"fileoutputtype" ini-name:"fileoutputtype" env:"DNSMONSTER_FILEOUTPUTTYPE" default:"0" description:"What should be written to file. options:\n;\t0: Disable Output\n;\t1: Enable Output without any filters\n;\t2: Enable Output and apply skipdomains logic\n;\t3: Enable Output and apply allowdomains logic\n;\t4: Enable Output and apply both skip and allow domains logic" choice:"0" choice:"1" choice:"2" choice:"3" choice:"4"` + OutputPath flags.Filename `long:"fileoutputpath" ini-name:"fileoutputpath" env:"DNSMONSTER_FILEOUTPUTPATH" default:"" description:"Path to output folder. Used if fileoutputType is not none"` + RotateCron string `long:"fileoutputrotatecron" ini-name:"fileoutputrotatecron" env:"DNSMONSTER_FILEOUTPUTROTATECRON" default:"0 0 * * *" description:"Interval to rotate the file in cron format"` + RotateCount uint `long:"fileoutputrotatecount" ini-name:"fileoutputrotatecount" env:"DNSMONSTER_FILEOUTPUTROTATECOUNT" default:"4" description:"Number of files to keep. 0 to disable rotation"` + OutputFormat string `long:"fileoutputformat" ini-name:"fileoutputformat" env:"DNSMONSTER_FILEOUTPUTFORMAT" default:"json" description:"Output format for file. options:json, csv, csv_no_header, gotemplate. note that the csv splits the datetime format into multiple fields" choice:"json" choice:"csv" choice:"csv_no_header" choice:"gotemplate"` + GoTemplate string `long:"fileoutputgotemplate" ini-name:"fileoutputgotemplate" env:"DNSMONSTER_FILEOUTPUTGOTEMPLATE" default:"{{.}}" description:"Go Template to format the output as needed"` + + // Runtime resources + outputChannel chan util.DNSResult + closeChannel chan bool + outputMarshaller util.OutputMarshaller + writer rollingwriter.RollingWriter +} + +// NewFileConfig creates a new FileConfig with default values +func NewFileConfig() *fileConfig { + return &fileConfig{ + outputChannel: nil, + closeChannel: nil, + } +} + +// WithOutputType sets the OutputType and returns the config for chaining +func (c *fileConfig) WithOutputType(t uint) *fileConfig { + c.OutputType = t + return c +} + +// WithOutputPath sets the OutputPath and returns the config for chaining +func (c *fileConfig) WithOutputPath(path flags.Filename) *fileConfig { + c.OutputPath = path + return c +} + +// WithRotateCron sets the RotateCron and returns the config for chaining +func (c *fileConfig) WithRotateCron(cron string) *fileConfig { + c.RotateCron = cron + return c +} + +// WithRotateCount sets the RotateCount and returns the config for chaining +func (c *fileConfig) WithRotateCount(count uint) *fileConfig { + c.RotateCount = count + return c +} + +// WithOutputFormat sets the OutputFormat and returns the config for chaining +func (c *fileConfig) WithOutputFormat(format string) *fileConfig { + c.OutputFormat = format + return c +} + +// WithGoTemplate sets the GoTemplate and returns the config for chaining +func (c *fileConfig) WithGoTemplate(template string) *fileConfig { + c.GoTemplate = template + return c +} + +// WithChannelSize initializes the output and close channels and returns the config for chaining +func (c *fileConfig) WithChannelSize(channelSize int) *fileConfig { + c.outputChannel = make(chan util.DNSResult, channelSize) + c.closeChannel = make(chan bool) + return c } func init() { @@ -55,22 +110,22 @@ func init() { func (config fileConfig) Initialize(ctx context.Context) error { var err error var header string - config.outputMarshaller, header, err = util.OutputFormatToMarshaller(config.FileOutputFormat, config.FileOutputGoTemplate) + config.outputMarshaller, header, err = util.OutputFormatToMarshaller(config.OutputFormat, config.GoTemplate) if err != nil { log.Warnf("Could not initialize output marshaller, removing output: %s", err) return err } - if config.FileOutputType > 0 && config.FileOutputType < 5 { + if config.OutputType > 0 && config.OutputType < 5 { log.Info("Creating File Output Channel") rollerConfig := &rollingwriter.Config{ - LogPath: string(config.FileOutputPath), + LogPath: string(config.OutputPath), TimeTagFormat: time.RFC3339, FileName: "dnsmonster", - MaxRemain: int(config.FileOutputRotateCount), + MaxRemain: int(config.RotateCount), RollingPolicy: rollingwriter.TimeRolling, - RollingTimePattern: fmt.Sprintf("0 %s", config.FileOutputRotateCron), // remove the second option from the cron to make it compatible with unix style + RollingTimePattern: fmt.Sprintf("0 %s", config.RotateCron), // remove the second option from the cron to make it compatible with unix style RollingVolumeSize: "0", WriterMode: "lock", BufferWriterThershould: 64, @@ -105,32 +160,38 @@ func (config fileConfig) OutputChannel() chan util.DNSResult { return config.outputChannel } -func (config fileConfig) Output(ctx context.Context) { +func (config *fileConfig) Output(ctx context.Context) { fileSentToOutput := metrics.GetOrRegisterCounter("fileSentToOutput", metrics.DefaultRegistry) fileSkipped := metrics.GetOrRegisterCounter("fileSkipped", metrics.DefaultRegistry) - // todo: output channel will duplicate output when we have malformed DNS packets with multiple questions + batch := make([]util.DNSResult, 0, util.GeneralFlags.ResultChannelSize) + ticker := time.NewTicker(time.Second * 5) + for { select { case data := <-config.outputChannel: - for _, dnsQuery := range data.DNS.Question { - - if util.CheckIfWeSkip(config.FileOutputType, dnsQuery.Name) { - fileSkipped.Inc(1) - continue - } - fileSentToOutput.Inc(1) - _, err := config.writer.Write(config.outputMarshaller.Marshal(data)) - if err != nil { - log.Fatal(err) + if util.GeneralFlags.PacketLimit == 0 || len(batch) < util.GeneralFlags.PacketLimit { + batch = append(batch, data) + } + case <-ticker.C: + for _, item := range batch { + for _, dnsQuery := range item.DNS.Question { + if util.CheckIfWeSkip(config.OutputType, dnsQuery.Name) { + fileSkipped.Inc(1) + continue + } + fileSentToOutput.Inc(1) + _, err := config.writer.Write(config.outputMarshaller.Marshal(item)) + if err != nil { + log.Warnf("Error writing to file: %v", err) + } + _, _ = config.writer.Write([]byte("\n")) } - _, _ = config.writer.Write([]byte("\n")) - } - + batch = batch[:0] case <-ctx.Done(): config.writer.Close() - log.Debug("exiting out of file output") //todo:remove + log.Debug("Exiting file output") return } } diff --git a/internal/output/influx.go b/internal/output/influx.go index d41e3888..960030e0 100644 --- a/internal/output/influx.go +++ b/internal/output/influx.go @@ -26,16 +26,77 @@ import ( log "github.com/sirupsen/logrus" ) +// InfluxConfig is the configuration and runtime struct for InfluxDB output type influxConfig struct { - InfluxOutputType uint `long:"influxoutputtype" ini-name:"influxoutputtype" env:"DNSMONSTER_INFLUXOUTPUTTYPE" default:"0" description:"What should be written to influx. options:\n;\t0: Disable Output\n;\t1: Enable Output without any filters\n;\t2: Enable Output and apply skipdomains logic\n;\t3: Enable Output and apply allowdomains logic\n;\t4: Enable Output and apply both skip and allow domains logic" choice:"0" choice:"1" choice:"2" choice:"3" choice:"4"` - InfluxOutputServer string `long:"influxoutputserver" ini-name:"influxoutputserver" env:"DNSMONSTER_INFLUXOUTPUTSERVER" default:"" description:"influx Server address, example: http://localhost:8086. Used if influxOutputType is not none"` - InfluxOutputToken string `long:"influxoutputtoken" ini-name:"influxoutputtoken" env:"DNSMONSTER_INFLUXOUTPUTTOKEN" default:"dnsmonster" description:"Influx Server Auth Token"` - InfluxOutputBucket string `long:"influxoutputbucket" ini-name:"influxoutputbucket" env:"DNSMONSTER_INFLUXOUTPUTBUCKET" default:"dnsmonster" description:"Influx Server Bucket"` - InfluxOutputOrg string `long:"influxoutputorg" ini-name:"influxoutputorg" env:"DNSMONSTER_INFLUXOUTPUTORG" default:"dnsmonster" description:"Influx Server Org"` - InfluxOutputWorkers uint `long:"influxoutputworkers" ini-name:"influxoutputworkers" env:"DNSMONSTER_INFLUXOUTPUTWORKERS" default:"8" description:"Minimum capacity of the cache array used to send data to Influx"` - InfluxBatchSize uint `long:"influxbatchsize" ini-name:"influxbatchsize" env:"DNSMONSTER_INFLUXBATCHSIZE" default:"1000" description:"Minimum capacity of the cache array used to send data to Influx"` - outputChannel chan util.DNSResult - closeChannel chan bool + // Configuration options + OutputType uint `long:"influxoutputtype" ini-name:"influxoutputtype" env:"DNSMONSTER_INFLUXOUTPUTTYPE" default:"0" description:"What should be written to influx. options:\n;\t0: Disable Output\n;\t1: Enable Output without any filters\n;\t2: Enable Output and apply skipdomains logic\n;\t3: Enable Output and apply allowdomains logic\n;\t4: Enable Output and apply both skip and allow domains logic" choice:"0" choice:"1" choice:"2" choice:"3" choice:"4"` + OutputServer string `long:"influxoutputserver" ini-name:"influxoutputserver" env:"DNSMONSTER_INFLUXOUTPUTSERVER" default:"" description:"influx Server address, example: http://localhost:8086. Used if influxOutputType is not none"` + OutputToken string `long:"influxoutputtoken" ini-name:"influxoutputtoken" env:"DNSMONSTER_INFLUXOUTPUTTOKEN" default:"dnsmonster" description:"Influx Server Auth Token"` + OutputBucket string `long:"influxoutputbucket" ini-name:"influxoutputbucket" env:"DNSMONSTER_INFLUXOUTPUTBUCKET" default:"dnsmonster" description:"Influx Server Bucket"` + OutputOrg string `long:"influxoutputorg" ini-name:"influxoutputorg" env:"DNSMONSTER_INFLUXOUTPUTORG" default:"dnsmonster" description:"Influx Server Org"` + OutputWorkers uint `long:"influxoutputworkers" ini-name:"influxoutputworkers" env:"DNSMONSTER_INFLUXOUTPUTWORKERS" default:"8" description:"Minimum capacity of the cache array used to send data to Influx"` + BatchSize uint `long:"influxbatchsize" ini-name:"influxbatchsize" env:"DNSMONSTER_INFLUXBATCHSIZE" default:"1000" description:"Minimum capacity of the cache array used to send data to Influx"` + + // Runtime resources + outputChannel chan util.DNSResult + closeChannel chan bool +} + +// NewInfluxConfig creates a new InfluxConfig with default values +func NewInfluxConfig() *influxConfig { + return &influxConfig{ + outputChannel: nil, + closeChannel: nil, + } +} + +// WithOutputType sets the OutputType and returns the config for chaining +func (c *influxConfig) WithOutputType(t uint) *influxConfig { + c.OutputType = t + return c +} + +// WithOutputServer sets the OutputServer and returns the config for chaining +func (c *influxConfig) WithOutputServer(server string) *influxConfig { + c.OutputServer = server + return c +} + +// WithOutputToken sets the OutputToken and returns the config for chaining +func (c *influxConfig) WithOutputToken(token string) *influxConfig { + c.OutputToken = token + return c +} + +// WithOutputBucket sets the OutputBucket and returns the config for chaining +func (c *influxConfig) WithOutputBucket(bucket string) *influxConfig { + c.OutputBucket = bucket + return c +} + +// WithOutputOrg sets the OutputOrg and returns the config for chaining +func (c *influxConfig) WithOutputOrg(org string) *influxConfig { + c.OutputOrg = org + return c +} + +// WithOutputWorkers sets the OutputWorkers and returns the config for chaining +func (c *influxConfig) WithOutputWorkers(workers uint) *influxConfig { + c.OutputWorkers = workers + return c +} + +// WithBatchSize sets the BatchSize and returns the config for chaining +func (c *influxConfig) WithBatchSize(size uint) *influxConfig { + c.BatchSize = size + return c +} + +// WithChannelSize initializes the output and close channels and returns the config for chaining +func (c *influxConfig) WithChannelSize(channelSize int) *influxConfig { + c.outputChannel = make(chan util.DNSResult, channelSize) + c.closeChannel = make(chan bool) + return c } func init() { @@ -49,7 +110,7 @@ func init() { // Initialize function should not block. otherwise the dispatcher will get stuck func (c influxConfig) Initialize(ctx context.Context) error { - if c.InfluxOutputType > 0 && c.InfluxOutputType < 5 { + if c.OutputType > 0 && c.OutputType < 5 { log.Info("Creating Influx Output Channel") go c.Output(ctx) } else { @@ -71,7 +132,7 @@ func (c influxConfig) OutputChannel() chan util.DNSResult { func (c influxConfig) connectInfluxRetry() influxdb2.Client { tick := time.NewTicker(5 * time.Second) // don't retry connection if we're doing dry run - if c.InfluxOutputType == 0 { + if c.OutputType == 0 { tick.Stop() } defer tick.Stop() @@ -89,13 +150,51 @@ func (c influxConfig) connectInfluxRetry() influxdb2.Client { } func (c influxConfig) connectInflux() influxdb2.Client { - client := influxdb2.NewClientWithOptions(c.InfluxOutputServer, c.InfluxOutputToken, influxdb2.DefaultOptions().SetBatchSize(c.InfluxBatchSize)) + client := influxdb2.NewClientWithOptions(c.OutputServer, c.OutputToken, influxdb2.DefaultOptions().SetBatchSize(c.BatchSize)) return client } func (c influxConfig) Output(ctx context.Context) { - for i := 0; i < int(c.InfluxOutputWorkers); i++ { - go c.InfluxWorker() + influxSentToOutput := metrics.GetOrRegisterCounter("influxSentToOutput", metrics.DefaultRegistry) + influxSkipped := metrics.GetOrRegisterCounter("influxSkipped", metrics.DefaultRegistry) + + client := c.connectInfluxRetry() + writeAPI := client.WriteAPI(c.OutputOrg, c.OutputBucket) + batch := make([]util.DNSResult, 0, c.BatchSize) + ticker := time.NewTicker(time.Second * 5) + + for { + select { + case data := <-c.outputChannel: + batch = append(batch, data) + case <-ticker.C: + for _, item := range batch { + for _, dnsQuery := range item.DNS.Question { + if util.CheckIfWeSkip(c.OutputType, dnsQuery.Name) { + influxSkipped.Inc(1) + continue + } + influxSentToOutput.Inc(1) + row := map[string]interface{}{ + "ipversion": item.IPVersion, + "SrcIP": item.SrcIP, + "DstIP": item.DstIP, + "protocol": item.Protocol, + "qr": item.DNS.Response, + "question": dnsQuery.Name, + } + p := influxdb2.NewPoint("dns", nil, row, item.Timestamp) + writeAPI.WritePoint(p) + } + } + writeAPI.Flush() + batch = batch[:0] + case <-ctx.Done(): + writeAPI.Flush() + client.Close() + log.Debug("Exiting Influx output") + return + } } } @@ -103,11 +202,11 @@ func (c influxConfig) InfluxWorker() { influxSentToOutput := metrics.GetOrRegisterCounter("influxSentToOutput", metrics.DefaultRegistry) influxSkipped := metrics.GetOrRegisterCounter("stdoutSkipped", metrics.DefaultRegistry) client := c.connectInfluxRetry() - writeAPI := client.WriteAPI(c.InfluxOutputOrg, c.InfluxOutputBucket) + writeAPI := client.WriteAPI(c.OutputOrg, c.OutputBucket) for data := range c.outputChannel { for _, dnsQuery := range data.DNS.Question { - if util.CheckIfWeSkip(c.InfluxOutputType, dnsQuery.Name) { + if util.CheckIfWeSkip(c.OutputType, dnsQuery.Name) { influxSkipped.Inc(1) continue } diff --git a/internal/output/output.go b/internal/output/output.go index b7f218fa..b7a121d8 100644 --- a/internal/output/output.go +++ b/internal/output/output.go @@ -1,22 +1,39 @@ -/* {{{ Copyright (C) 2022 Ali Mosajjal - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . }}} */ - -// Package output registers different output methods for dnsmonster. -// each output will register itself by running the init function. -// in the main package, if the output type is zero, the output will automatically -// be de-registered from dispatch. each output can provide its own specific flags -// and also benefit from generalflags using the `util` package package output -// vim: foldmethod=marker + +import ( + "context" + "time" + + "github.com/mosajjal/dnsmonster/internal/util" +) + +// BaseConfig contains common configuration fields for all outputs +type BaseConfig struct { + Enabled bool `mapstructure:"enabled"` + BatchSize uint `mapstructure:"batch_size"` + BatchDelay time.Duration `mapstructure:"batch_delay"` + FilterMode string `mapstructure:"filter_mode"` // none, skipdomains, allowdomains, both + MaxQueueSize int `mapstructure:"max_queue_size"` +} + +// FilterModeFromString converts string filter mode to integer +func (b *BaseConfig) FilterModeFromString() uint { + switch b.FilterMode { + case "skipdomains": + return 2 + case "allowdomains": + return 3 + case "both": + return 4 + default: + return 1 + } +} + +// OutputConfig is a common interface for all output configs. +type OutputConfig interface { + Initialize(ctx context.Context) error + OutputChannel() chan util.DNSResult + Close() + IsEnabled() bool +} diff --git a/internal/output/parquet.go b/internal/output/parquet.go index 5f565304..bdddb8ec 100644 --- a/internal/output/parquet.go +++ b/internal/output/parquet.go @@ -31,21 +31,68 @@ import ( log "github.com/sirupsen/logrus" ) +// ParquetConfig is the configuration and runtime struct for Parquet output type parquetConfig struct { - ParquetOutputType uint `long:"parquetoutputtype" ini-name:"parquetoutputtype" env:"DNSMONSTER_PARQUETOUTPUTTYPE" default:"0" description:"What should be written to parquet file. options:\n;\t0: Disable Output\n;\t1: Enable Output without any filters\n;\t2: Enable Output and apply skipdomains logic\n;\t3: Enable Output and apply allowdomains logic\n;\t4: Enable Output and apply both skip and allow domains logic" choice:"0" choice:"1" choice:"2" choice:"3" choice:"4"` - ParquetOutputPath flags.Filename `long:"parquetoutputpath" ini-name:"parquetoutputpath" env:"DNSMONSTER_PARQUETOUTPUTPATH" default:"" description:"Path to output folder. Used if parquetoutputtype is not none"` - // ParquetOutputRotateCron string `long:"parquetoutputrotatecron" ini-name:"parquetoutputrotatecron" env:"DNSMONSTER_PARQUETOUTPUTROTATECRON" default:"0 0 * * *" description:"Interval to rotate the parquet file in cron format"` - // ParquetOutputRotateCount uint `long:"parquetoutputrotatecount" ini-name:"parquetoutputrotatecount" env:"DNSMONSTER_PARQUETOUTPUTROTATECOUNT" default:"4" description:"Number of parquet files to keep. 0 to disable rotation"` - ParquetFlushBatchSize uint `long:"parquetflushbatchsize" ini-name:"parquetflushbatchsize" env:"DNSMONSTER_PARQUETFLUSHBATCHSIZE" default:"10000" description:"Number of records to write to parquet file before flushing"` - ParquetWorkerCount uint `long:"parquetworkercount" ini-name:"parquetworkercount" env:"DNSMONSTER_PARQUETWORKERCOUNT" default:"4" description:"Number of workers to write to parquet file"` - ParquetWriteBufferSize uint `long:"parquetwritebuffersize" ini-name:"parquetwritebuffersize" env:"DNSMONSTER_PARQUETWRITEBUFFERSIZE" default:"256000" description:"Size of the write buffer in bytes"` - outputChannel chan util.DNSResult - closeChannel chan bool - writer io.WriteCloser - parquetWriter *parquet.GenericWriter[parquetRow] - parquetWriterLock *sync.RWMutex - parquetSentToOutput metrics.Counter - parquetSkipped metrics.Counter + // Configuration options + OutputType uint `long:"parquetoutputtype" ini-name:"parquetoutputtype" env:"DNSMONSTER_PARQUETOUTPUTTYPE" default:"0" description:"What should be written to parquet file. options:\n;\t0: Disable Output\n;\t1: Enable Output without any filters\n;\t2: Enable Output and apply skipdomains logic\n;\t3: Enable Output and apply allowdomains logic\n;\t4: Enable Output and apply both skip and allow domains logic" choice:"0" choice:"1" choice:"2" choice:"3" choice:"4"` + OutputPath flags.Filename `long:"parquetoutputpath" ini-name:"parquetoutputpath" env:"DNSMONSTER_PARQUETOUTPUTPATH" default:"" description:"Path to output folder. Used if parquetoutputtype is not none"` + FlushBatchSize uint `long:"parquetflushbatchsize" ini-name:"parquetflushbatchsize" env:"DNSMONSTER_PARQUETFLUSHBATCHSIZE" default:"10000" description:"Number of records to write to parquet file before flushing"` + WorkerCount uint `long:"parquetworkercount" ini-name:"parquetworkercount" env:"DNSMONSTER_PARQUETWORKERCOUNT" default:"4" description:"Number of workers to write to parquet file"` + WriteBufferSize uint `long:"parquetwritebuffersize" ini-name:"parquetwritebuffersize" env:"DNSMONSTER_PARQUETWRITEBUFFERSIZE" default:"256000" description:"Size of the write buffer in bytes"` + + // Runtime resources + outputChannel chan util.DNSResult + closeChannel chan bool + writer io.WriteCloser + parquetWriter *parquet.GenericWriter[parquetRow] + parquetWriterLock *sync.RWMutex + parquetSentToOutput metrics.Counter + parquetSkipped metrics.Counter +} + +// NewParquetConfig creates a new ParquetConfig with default values +func NewParquetConfig() *parquetConfig { + return &parquetConfig{ + outputChannel: nil, + closeChannel: nil, + } +} + +// WithOutputType sets the OutputType and returns the config for chaining +func (c *parquetConfig) WithOutputType(t uint) *parquetConfig { + c.OutputType = t + return c +} + +// WithOutputPath sets the OutputPath and returns the config for chaining +func (c *parquetConfig) WithOutputPath(path flags.Filename) *parquetConfig { + c.OutputPath = path + return c +} + +// WithFlushBatchSize sets the FlushBatchSize and returns the config for chaining +func (c *parquetConfig) WithFlushBatchSize(size uint) *parquetConfig { + c.FlushBatchSize = size + return c +} + +// WithWorkerCount sets the WorkerCount and returns the config for chaining +func (c *parquetConfig) WithWorkerCount(count uint) *parquetConfig { + c.WorkerCount = count + return c +} + +// WithWriteBufferSize sets the WriteBufferSize and returns the config for chaining +func (c *parquetConfig) WithWriteBufferSize(size uint) *parquetConfig { + c.WriteBufferSize = size + return c +} + +// WithChannelSize initializes the output and close channels and returns the config for chaining +func (c *parquetConfig) WithChannelSize(channelSize int) *parquetConfig { + c.outputChannel = make(chan util.DNSResult, channelSize) + c.closeChannel = make(chan bool) + return c } type parquetRow struct { @@ -79,7 +126,7 @@ func init() { // initialize function should not block. otherwise the dispatcher will get stuck func (config *parquetConfig) Initialize(ctx context.Context) error { - if config.ParquetOutputType > 0 && config.ParquetOutputType < 5 { + if config.OutputType > 0 && config.OutputType < 5 { log.Info("Creating Parquet Output Channel") config.parquetSentToOutput = metrics.GetOrRegisterCounter("parquetSentToOutput", metrics.DefaultRegistry) @@ -88,7 +135,7 @@ func (config *parquetConfig) Initialize(ctx context.Context) error { // TODO: pending github.com/arthurkiller/rollingwriter/issues/52 // rollerConfig := &rollingwriter.Config{ - // LogPath: string(config.ParquetOutputPath), + // LogPath: string(config.OutputPath), // TimeTagFormat: time.RFC3339, // FileName: "dnsmonster", // MaxRemain: int(config.ParquetOutputRotateCount), @@ -107,7 +154,7 @@ func (config *parquetConfig) Initialize(ctx context.Context) error { // } var err error - config.writer, err = os.OpenFile(string(config.ParquetOutputPath), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + config.writer, err = os.OpenFile(string(config.OutputPath), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) if err != nil { log.Fatal(err) return err @@ -117,8 +164,8 @@ func (config *parquetConfig) Initialize(ctx context.Context) error { parquet.BloomFilters( parquet.SplitBlockFilter(10, "query_name"), //query_name is usually the A query question or response ), - parquet.WriteBufferSize(int(config.ParquetWriteBufferSize)), // 256KB - parquet.CreatedBy("dnsmonster", "version", "build"), //TODO: bring real values here + parquet.WriteBufferSize(int(config.WriteBufferSize)), // 256KB + parquet.CreatedBy("dnsmonster", "version", "build"), //TODO: bring real values here ) go config.Output(ctx) @@ -138,19 +185,48 @@ func (config parquetConfig) OutputChannel() chan util.DNSResult { return config.outputChannel } -func (config parquetConfig) Output(ctx context.Context) { - for i := uint(0); i < config.ParquetWorkerCount; i++ { - go config.OutputWorker(ctx) - } - <-ctx.Done() - config.parquetWriterLock.Lock() - if err := config.parquetWriter.Close(); err != nil { - log.Error(err) - } - if err := config.writer.Close(); err != nil { - log.Error(err) +func (config *parquetConfig) Output(ctx context.Context) { + parquetSentToOutput := metrics.GetOrRegisterCounter("parquetSentToOutput", metrics.DefaultRegistry) + parquetSkipped := metrics.GetOrRegisterCounter("parquetSkipped", metrics.DefaultRegistry) + + batch := make([]parquetRow, 0, config.FlushBatchSize) + ticker := time.NewTicker(time.Second * 5) + + for { + select { + case data := <-config.outputChannel: + if len(data.DNS.Question) == 1 { + q := data.DNS.Question[0] + if util.CheckIfWeSkip(config.OutputType, q.Name) { + parquetSkipped.Inc(1) + continue + } + parquetSentToOutput.Inc(1) + batch = append(batch, parquetRow{ + Timestamp: data.Timestamp, + QueryName: q.Name, + // ...other fields... + }) + } + case <-ticker.C: + if len(batch) > 0 { + config.parquetWriterLock.Lock() + if _, err := config.parquetWriter.Write(batch); err != nil { + log.Warnf("Error writing to parquet: %v", err) + } + config.parquetWriter.Flush() + config.parquetWriterLock.Unlock() + batch = batch[:0] + } + case <-ctx.Done(): + config.parquetWriterLock.Lock() + config.parquetWriter.Flush() + config.writer.Close() + config.parquetWriterLock.Unlock() + log.Debug("Exiting Parquet output") + return + } } - config.parquetWriterLock.Unlock() } func (config *parquetConfig) OutputWorker(ctx context.Context) { @@ -169,7 +245,7 @@ func (config *parquetConfig) OutputWorker(ctx context.Context) { continue } q0 := data.DNS.Question[0] - if util.CheckIfWeSkip(config.ParquetOutputType, q0.Name) { + if util.CheckIfWeSkip(config.OutputType, q0.Name) { config.parquetSkipped.Inc(1) continue } @@ -204,7 +280,7 @@ func (config *parquetConfig) OutputWorker(ctx context.Context) { Identity: data.Identity, Version: data.Version, }) - if cnt%config.ParquetFlushBatchSize == 0 { + if cnt%config.FlushBatchSize == 0 { config.parquetWriterLock.Lock() if n, err := config.parquetWriter.Write(dataArr); err != nil { config.parquetSkipped.Inc(int64(n)) diff --git a/internal/output/postgres.go b/internal/output/postgres.go index 2b663cd2..02a58ab1 100644 --- a/internal/output/postgres.go +++ b/internal/output/postgres.go @@ -28,17 +28,78 @@ import ( log "github.com/sirupsen/logrus" ) +// PostgresConfig is the configuration and runtime struct for PostgreSQL output type psqlConfig struct { - PsqlOutputType uint `long:"psqloutputtype" ini-name:"psqloutputtype" env:"DNSMONSTER_PSQLOUTPUTTYPE" default:"0" description:"What should be written to Microsoft Psql. options:\n;\t0: Disable Output\n;\t1: Enable Output without any filters\n;\t2: Enable Output and apply skipdomains logic\n;\t3: Enable Output and apply allowdomains logic\n;\t4: Enable Output and apply both skip and allow domains logic" choice:"0" choice:"1" choice:"2" choice:"3" choice:"4"` - PsqlEndpoint string `long:"psqlendpoint" ini-name:"psqlendpoint" env:"DNSMONSTER_PSQLOUTPUTENDPOINT" default:"" description:"Psql endpoint used. must be in uri format. example: postgres://username:password@hostname:port/database?sslmode=disable"` - PsqlWorkers uint `long:"psqlworkers" ini-name:"psqlworkers" env:"DNSMONSTER_PSQLWORKERS" default:"1" description:"Number of PSQL workers"` - PsqlBatchSize uint `long:"psqlbatchsize" ini-name:"psqlbatchsize" env:"DNSMONSTER_PSQLBATCHSIZE" default:"1" description:"Psql Batch Size"` - PsqlBatchDelay time.Duration `long:"psqlbatchdelay" ini-name:"psqlbatchdelay" env:"DNSMONSTER_PSQLBATCHDELAY" default:"0s" description:"Interval between sending results to Psql if Batch size is not filled. Any value larger than zero takes precedence over Batch Size"` - PsqlBatchTimeout time.Duration `long:"psqlbatchtimeout" ini-name:"psqlbatchtimeout" env:"DNSMONSTER_PSQLBATCHTIMEOUT" default:"5s" description:"Timeout for any INSERT operation before we consider them failed"` - PsqlSaveFullQuery bool `long:"psqlsavefullquery" ini-name:"psqlsavefullquery" env:"DNSMONSTER_PSQLSAVEFULLQUERY" description:"Save full packet query and response in JSON format."` - outputChannel chan util.DNSResult - outputMarshaller util.OutputMarshaller - closeChannel chan bool + // Configuration options + OutputType uint `long:"psqloutputtype" ini-name:"psqloutputtype" env:"DNSMONSTER_PSQLOUTPUTTYPE" default:"0" description:"What should be written to Microsoft Psql. options:\n;\t0: Disable Output\n;\t1: Enable Output without any filters\n;\t2: Enable Output and apply skipdomains logic\n;\t3: Enable Output and apply allowdomains logic\n;\t4: Enable Output and apply both skip and allow domains logic" choice:"0" choice:"1" choice:"2" choice:"3" choice:"4"` + Endpoint string `long:"psqlendpoint" ini-name:"psqlendpoint" env:"DNSMONSTER_PSQLOUTPUTENDPOINT" default:"" description:"Psql endpoint used. must be in uri format. example: postgres://username:password@hostname:port/database?sslmode=disable"` + Workers uint `long:"psqlworkers" ini-name:"psqlworkers" env:"DNSMONSTER_PSQLWORKERS" default:"1" description:"Number of PSQL workers"` + BatchSize uint `long:"psqlbatchsize" ini-name:"psqlbatchsize" env:"DNSMONSTER_PSQLBATCHSIZE" default:"1" description:"Psql Batch Size"` + BatchDelay time.Duration `long:"psqlbatchdelay" ini-name:"psqlbatchdelay" env:"DNSMONSTER_PSQLBATCHDELAY" default:"0s" description:"Interval between sending results to Psql if Batch size is not filled. Any value larger than zero takes precedence over Batch Size"` + BatchTimeout time.Duration `long:"psqlbatchtimeout" ini-name:"psqlbatchtimeout" env:"DNSMONSTER_PSQLBATCHTIMEOUT" default:"5s" description:"Timeout for any INSERT operation before we consider them failed"` + SaveFullQuery bool `long:"psqlsavefullquery" ini-name:"psqlsavefullquery" env:"DNSMONSTER_PSQLSAVEFULLQUERY" description:"Save full packet query and response in JSON format."` + + // Runtime resources + outputChannel chan util.DNSResult + outputMarshaller util.OutputMarshaller + closeChannel chan bool +} + +// NewPostgresConfig creates a new PostgresConfig with default values +func NewPostgresConfig() *psqlConfig { + return &psqlConfig{ + outputChannel: nil, + closeChannel: nil, + } +} + +// WithOutputType sets the OutputType and returns the config for chaining +func (c *psqlConfig) WithOutputType(t uint) *psqlConfig { + c.OutputType = t + return c +} + +// WithEndpoint sets the Endpoint and returns the config for chaining +func (c *psqlConfig) WithEndpoint(endpoint string) *psqlConfig { + c.Endpoint = endpoint + return c +} + +// WithWorkers sets the Workers and returns the config for chaining +func (c *psqlConfig) WithWorkers(workers uint) *psqlConfig { + c.Workers = workers + return c +} + +// WithBatchSize sets the BatchSize and returns the config for chaining +func (c *psqlConfig) WithBatchSize(size uint) *psqlConfig { + c.BatchSize = size + return c +} + +// WithBatchDelay sets the BatchDelay and returns the config for chaining +func (c *psqlConfig) WithBatchDelay(delay time.Duration) *psqlConfig { + c.BatchDelay = delay + return c +} + +// WithBatchTimeout sets the BatchTimeout and returns the config for chaining +func (c *psqlConfig) WithBatchTimeout(timeout time.Duration) *psqlConfig { + c.BatchTimeout = timeout + return c +} + +// WithSaveFullQuery sets the SaveFullQuery and returns the config for chaining +func (c *psqlConfig) WithSaveFullQuery(save bool) *psqlConfig { + c.SaveFullQuery = save + return c +} + +// WithChannelSize initializes the output and close channels and returns the config for chaining +func (c *psqlConfig) WithChannelSize(channelSize int) *psqlConfig { + c.outputChannel = make(chan util.DNSResult, channelSize) + c.closeChannel = make(chan bool) + return c } func init() { @@ -59,7 +120,7 @@ func (psqConf psqlConfig) Initialize(ctx context.Context) error { return err } - if psqConf.PsqlOutputType > 0 && psqConf.PsqlOutputType < 5 { + if psqConf.OutputType > 0 && psqConf.OutputType < 5 { log.Info("Creating Psql Output Channel") go psqConf.Output(ctx) } else { @@ -79,7 +140,7 @@ func (psqConf psqlConfig) OutputChannel() chan util.DNSResult { } func (psqConf psqlConfig) connectPsql() *pgxpool.Pool { - c, err := pgxpool.Connect(context.Background(), psqConf.PsqlEndpoint) + c, err := pgxpool.Connect(context.Background(), psqConf.Endpoint) if err != nil { // This will not be a connection error, but a DSN parse error or // another initialization error. @@ -105,8 +166,38 @@ func (psqConf psqlConfig) connectPsql() *pgxpool.Pool { } func (psqConf psqlConfig) Output(ctx context.Context) { - for i := 0; i < int(psqConf.PsqlWorkers); i++ { - go psqConf.OutputWorker() + psqlSentToOutput := metrics.GetOrRegisterCounter("psqlSentToOutput", metrics.DefaultRegistry) + psqlSkipped := metrics.GetOrRegisterCounter("psqlSkipped", metrics.DefaultRegistry) + + conn := psqConf.connectPsql() + batch := new(pgx.Batch) + ticker := time.NewTicker(time.Second * 5) + + for { + select { + case data := <-psqConf.outputChannel: + for _, dnsQuery := range data.DNS.Question { + if util.CheckIfWeSkip(psqConf.OutputType, dnsQuery.Name) { + psqlSkipped.Inc(1) + continue + } + psqlSentToOutput.Inc(1) + batch.Queue( + `INSERT INTO DNS_LOG (PacketTime, Question) VALUES ($1, $2)`, + data.Timestamp, dnsQuery.Name, + ) + } + case <-ticker.C: + br := conn.SendBatch(ctx, batch) + if _, err := br.Exec(); err != nil { + log.Warnf("Error executing batch: %v", err) + } + batch = new(pgx.Batch) + case <-ctx.Done(): + conn.Close() + log.Debug("Exiting Postgres output") + return + } } } @@ -121,10 +212,10 @@ func (psqConf psqlConfig) OutputWorker() { ticker := time.NewTicker(time.Second * 5) div := 0 - if psqConf.PsqlBatchDelay > 0 { - psqConf.PsqlBatchSize = 1 + if psqConf.BatchDelay > 0 { + psqConf.BatchSize = 1 div = -1 - ticker = time.NewTicker(psqConf.PsqlBatchDelay) + ticker = time.NewTicker(psqConf.BatchDelay) } else { ticker.Stop() } @@ -135,7 +226,7 @@ func (psqConf psqlConfig) OutputWorker() { Class, Type, Edns0Present, DoBit, FullQuery, ResponseCode, Question, Size) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17);` - timeoutContext, cancel := context.WithTimeout(context.Background(), psqConf.PsqlBatchTimeout) + timeoutContext, cancel := context.WithTimeout(context.Background(), psqConf.BatchTimeout) defer cancel() for { @@ -144,13 +235,13 @@ func (psqConf psqlConfig) OutputWorker() { for _, dnsQuery := range data.DNS.Question { c++ - if util.CheckIfWeSkip(psqConf.PsqlOutputType, dnsQuery.Name) { + if util.CheckIfWeSkip(psqConf.OutputType, dnsQuery.Name) { psqlSkipped.Inc(1) continue } fullQuery := "" - if psqConf.PsqlSaveFullQuery { + if psqConf.SaveFullQuery { fullQuery = string(psqConf.outputMarshaller.Marshal(data)) } @@ -186,7 +277,7 @@ func (psqConf psqlConfig) OutputWorker() { data.PacketLength, ) - if int(c%psqConf.PsqlBatchSize) == div { // this block will never reach if batch delay is enabled + if int(c%psqConf.BatchSize) == div { // this block will never reach if batch delay is enabled log.Warnf("here %d", c) //todo:remove br := conn.SendBatch(timeoutContext, batch) _, err := br.Exec() diff --git a/internal/output/sentinel.go b/internal/output/sentinel.go index 2e91d3da..5491cb31 100644 --- a/internal/output/sentinel.go +++ b/internal/output/sentinel.go @@ -34,30 +34,62 @@ import ( log "github.com/sirupsen/logrus" ) -type sentinelConfig struct { - SentinelOutputType uint `long:"sentineloutputtype" ini-name:"sentineloutputtype" env:"DNSMONSTER_SENTINELOUTPUTTYPE" default:"0" description:"What should be written to Microsoft Sentinel. options:\n;\t0: Disable Output\n;\t1: Enable Output without any filters\n;\t2: Enable Output and apply skipdomains logic\n;\t3: Enable Output and apply allowdomains logic\n;\t4: Enable Output and apply both skip and allow domains logic" choice:"0" choice:"1" choice:"2" choice:"3" choice:"4"` - SentinelOutputSharedKey string `long:"sentineloutputsharedkey" ini-name:"sentineloutputsharedkey" env:"DNSMONSTER_SENTINELOUTPUTSHAREDKEY" default:"" description:"Sentinel Shared Key, either the primary or secondary, can be found in Agents Management page under Log Analytics workspace"` - SentinelOutputCustomerID string `long:"sentineloutputcustomerid" ini-name:"sentineloutputcustomerid" env:"DNSMONSTER_SENTINELOUTPUTCUSTOMERID" default:"" description:"Sentinel Customer Id. can be found in Agents Management page under Log Analytics workspace"` - SentinelOutputLogType string `long:"sentineloutputlogtype" ini-name:"sentineloutputlogtype" env:"DNSMONSTER_SENTINELOUTPUTLOGTYPE" default:"dnsmonster" description:"Sentinel Output LogType"` - SentinelOutputProxy string `long:"sentineloutputproxy" ini-name:"sentineloutputproxy" env:"DNSMONSTER_SENTINELOUTPUTPROXY" default:"" description:"Sentinel Output Proxy in URI format"` - SentinelBatchSize uint `long:"sentinelbatchsize" ini-name:"sentinelbatchsize" env:"DNSMONSTER_SENTINELBATCHSIZE" default:"100" description:"Sentinel Batch Size"` - SentinelBatchDelay time.Duration `long:"sentinelbatchdelay" ini-name:"sentinelbatchdelay" env:"DNSMONSTER_SENTINELBATCHDELAY" default:"0s" description:"Interval between sending results to Sentinel if Batch size is not filled. Any value larger than zero takes precedence over Batch Size"` - outputChannel chan util.DNSResult - outputMarshaller util.OutputMarshaller - closeChannel chan bool -} - -func init() { - c := sentinelConfig{} - if _, err := util.GlobalParser.AddGroup("sentinel_output", "Microsoft Sentinel Output", &c); err != nil { - log.Fatalf("error adding output Module") - } - c.outputChannel = make(chan util.DNSResult, util.GeneralFlags.ResultChannelSize) - util.GlobalDispatchList = append(util.GlobalDispatchList, &c) +// SentinelConfig is the configuration and runtime struct for Sentinel output. +type SentinelConfig struct { + OutputType uint + SharedKey string + CustomerID string + LogType string + Proxy string + BatchSize uint + BatchDelay time.Duration + outputChannel chan util.DNSResult + outputMarshaller util.OutputMarshaller + closeChannel chan bool +} + +// NewSentinelConfig creates a new SentinelConfig with default values. +func NewSentinelConfig() *SentinelConfig { + return &SentinelConfig{} +} + +// WithOutputType sets the OutputType and returns the config for chaining. +func (c *SentinelConfig) WithOutputType(t uint) *SentinelConfig { + c.OutputType = t + return c +} +func (c *SentinelConfig) WithSharedKey(k string) *SentinelConfig { + c.SharedKey = k + return c +} +func (c *SentinelConfig) WithCustomerID(id string) *SentinelConfig { + c.CustomerID = id + return c +} +func (c *SentinelConfig) WithLogType(lt string) *SentinelConfig { + c.LogType = lt + return c +} +func (c *SentinelConfig) WithProxy(p string) *SentinelConfig { + c.Proxy = p + return c +} +func (c *SentinelConfig) WithBatchSize(bs uint) *SentinelConfig { + c.BatchSize = bs + return c +} +func (c *SentinelConfig) WithBatchDelay(d time.Duration) *SentinelConfig { + c.BatchDelay = d + return c +} +func (c *SentinelConfig) WithChannelSize(channelSize int) *SentinelConfig { + c.outputChannel = make(chan util.DNSResult, channelSize) + c.closeChannel = make(chan bool) + return c } // initialize function should not block. otherwise the dispatcher will get stuck -func (seConfig sentinelConfig) Initialize(ctx context.Context) error { +func (seConfig *SentinelConfig) Initialize(ctx context.Context) error { var err error seConfig.outputMarshaller, _, err = util.OutputFormatToMarshaller("json", "") if err != nil { @@ -65,7 +97,7 @@ func (seConfig sentinelConfig) Initialize(ctx context.Context) error { return err } - if seConfig.SentinelOutputType > 0 && seConfig.SentinelOutputType < 5 { + if seConfig.OutputType > 0 && seConfig.OutputType < 5 { log.Info("Creating Sentinel Output Channel") go seConfig.Output(ctx) } else { @@ -75,12 +107,12 @@ func (seConfig sentinelConfig) Initialize(ctx context.Context) error { return nil } -func (seConfig sentinelConfig) Close() { +func (seConfig *SentinelConfig) Close() { // todo: implement this <-seConfig.closeChannel } -func (seConfig sentinelConfig) OutputChannel() chan util.DNSResult { +func (seConfig *SentinelConfig) OutputChannel() chan util.DNSResult { return seConfig.outputChannel } @@ -93,7 +125,7 @@ type signatureElements struct { Resource string } -func (seConfig sentinelConfig) BuildSignature(sigelements signatureElements) string { +func (seConfig *SentinelConfig) BuildSignature(sigelements signatureElements) string { // build HMAC signature tmpl, err := template.New("sign").Parse(`{{.Method}} {{.ContentLength}} @@ -107,17 +139,17 @@ x-ms-date:{{.Date}} if err := tmpl.Execute(&buf, sigelements); err != nil { log.Fatal(err) } - sharedKeyBytes, err := base64.StdEncoding.DecodeString(seConfig.SentinelOutputSharedKey) + sharedKeyBytes, err := base64.StdEncoding.DecodeString(seConfig.SharedKey) if err != nil { panic(err) } h := hmac.New(sha256.New, []byte(sharedKeyBytes)) h.Write(buf.Bytes()) - signature := fmt.Sprintf("SharedKey %s:%s", seConfig.SentinelOutputCustomerID, base64.StdEncoding.EncodeToString(h.Sum(nil))) + signature := fmt.Sprintf("SharedKey %s:%s", seConfig.CustomerID, base64.StdEncoding.EncodeToString(h.Sum(nil))) return signature } -func (seConfig sentinelConfig) sendBatch(batch string, count int) { +func (seConfig *SentinelConfig) sendBatch(batch string, count int) { sentinelSentToOutput := metrics.GetOrRegisterCounter("sentinelSentToOutput", metrics.DefaultRegistry) sentinelFailed := metrics.GetOrRegisterCounter("sentinelFailed", metrics.DefaultRegistry) // send batch to Microsoft Sentinel @@ -132,12 +164,12 @@ func (seConfig sentinelConfig) sendBatch(batch string, count int) { } signature := seConfig.BuildSignature(s) // build request - uri := "https://" + seConfig.SentinelOutputCustomerID + ".ods.opinsights.azure.com" + s.Resource + "?api-version=2016-04-01" + uri := "https://" + seConfig.CustomerID + ".ods.opinsights.azure.com" + s.Resource + "?api-version=2016-04-01" headers := map[string]string{ "x-ms-date": s.Date, "content-type": s.ContentType, "Authorization": signature, - "Log-Type": seConfig.SentinelOutputLogType, + "Log-Type": seConfig.LogType, } // send request req, err := http.NewRequest("POST", uri, bytes.NewBuffer([]byte(batch))) @@ -148,8 +180,8 @@ func (seConfig sentinelConfig) sendBatch(batch string, count int) { for k, v := range headers { req.Header[k] = []string{v} } - if seConfig.SentinelOutputProxy != "" { - proxyURL, err := url.Parse(seConfig.SentinelOutputProxy) + if seConfig.Proxy != "" { + proxyURL, err := url.Parse(seConfig.Proxy) if err != nil { panic(err) } @@ -173,7 +205,7 @@ func (seConfig sentinelConfig) sendBatch(batch string, count int) { } } -func (seConfig sentinelConfig) Output(ctx context.Context) { +func (seConfig *SentinelConfig) Output(ctx context.Context) { log.Infof("starting SentinelOutput") sentinelSkipped := metrics.GetOrRegisterCounter("sentinelSkipped", metrics.DefaultRegistry) @@ -182,10 +214,10 @@ func (seConfig sentinelConfig) Output(ctx context.Context) { ticker := time.NewTicker(time.Second * 5) div := 0 - if seConfig.SentinelBatchDelay > 0 { - seConfig.SentinelBatchSize = 1 + if seConfig.BatchDelay > 0 { + seConfig.BatchSize = 1 div = -1 - ticker = time.NewTicker(seConfig.SentinelBatchDelay) + ticker = time.NewTicker(seConfig.BatchDelay) } else { ticker.Stop() } @@ -194,7 +226,7 @@ func (seConfig sentinelConfig) Output(ctx context.Context) { case data := <-seConfig.outputChannel: for _, dnsQuery := range data.DNS.Question { - if util.CheckIfWeSkip(seConfig.SentinelOutputType, dnsQuery.Name) { + if util.CheckIfWeSkip(seConfig.OutputType, dnsQuery.Name) { sentinelSkipped.Inc(1) continue } @@ -202,7 +234,7 @@ func (seConfig sentinelConfig) Output(ctx context.Context) { cnt++ batch += string(seConfig.outputMarshaller.Marshal(data)) batch += "," - if int(cnt%seConfig.SentinelBatchSize) == div { + if int(cnt%seConfig.BatchSize) == div { // remove the last , batch = strings.TrimSuffix(batch, ",") batch += "]"