mirror of
https://github.com/nikdoof/vsphere-influxdb-go.git
synced 2025-12-20 22:19:23 +00:00
add vendoring with go dep
This commit is contained in:
103
vendor/github.com/influxdata/influxdb/services/subscriber/config.go
generated
vendored
Normal file
103
vendor/github.com/influxdata/influxdb/services/subscriber/config.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
package subscriber
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/monitor/diagnostics"
|
||||
"github.com/influxdata/influxdb/toml"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultHTTPTimeout is the default HTTP timeout for a Config.
|
||||
DefaultHTTPTimeout = 30 * time.Second
|
||||
|
||||
// DefaultWriteConcurrency is the default write concurrency for a Config.
|
||||
DefaultWriteConcurrency = 40
|
||||
|
||||
// DefaultWriteBufferSize is the default write buffer size for a Config.
|
||||
DefaultWriteBufferSize = 1000
|
||||
)
|
||||
|
||||
// Config represents a configuration of the subscriber service.
|
||||
type Config struct {
|
||||
// Whether to enable to Subscriber service
|
||||
Enabled bool `toml:"enabled"`
|
||||
|
||||
HTTPTimeout toml.Duration `toml:"http-timeout"`
|
||||
|
||||
// InsecureSkipVerify gets passed to the http client, if true, it will
|
||||
// skip https certificate verification. Defaults to false
|
||||
InsecureSkipVerify bool `toml:"insecure-skip-verify"`
|
||||
|
||||
// configure the path to the PEM encoded CA certs file. If the
|
||||
// empty string, the default system certs will be used
|
||||
CaCerts string `toml:"ca-certs"`
|
||||
|
||||
// The number of writer goroutines processing the write channel.
|
||||
WriteConcurrency int `toml:"write-concurrency"`
|
||||
|
||||
// The number of in-flight writes buffered in the write channel.
|
||||
WriteBufferSize int `toml:"write-buffer-size"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of a subscriber config.
|
||||
func NewConfig() Config {
|
||||
return Config{
|
||||
Enabled: true,
|
||||
HTTPTimeout: toml.Duration(DefaultHTTPTimeout),
|
||||
InsecureSkipVerify: false,
|
||||
CaCerts: "",
|
||||
WriteConcurrency: DefaultWriteConcurrency,
|
||||
WriteBufferSize: DefaultWriteBufferSize,
|
||||
}
|
||||
}
|
||||
|
||||
// Validate returns an error if the config is invalid.
|
||||
func (c Config) Validate() error {
|
||||
if c.HTTPTimeout <= 0 {
|
||||
return errors.New("http-timeout must be greater than 0")
|
||||
}
|
||||
|
||||
if c.CaCerts != "" && !fileExists(c.CaCerts) {
|
||||
abspath, err := filepath.Abs(c.CaCerts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ca-certs file %s does not exist. Wrapped Error: %v", c.CaCerts, err)
|
||||
}
|
||||
return fmt.Errorf("ca-certs file %s does not exist", abspath)
|
||||
}
|
||||
|
||||
if c.WriteBufferSize <= 0 {
|
||||
return errors.New("write-buffer-size must be greater than 0")
|
||||
}
|
||||
|
||||
if c.WriteConcurrency <= 0 {
|
||||
return errors.New("write-concurrency must be greater than 0")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fileExists(fileName string) bool {
|
||||
info, err := os.Stat(fileName)
|
||||
return err == nil && !info.IsDir()
|
||||
}
|
||||
|
||||
// Diagnostics returns a diagnostics representation of a subset of the Config.
|
||||
func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {
|
||||
if !c.Enabled {
|
||||
return diagnostics.RowFromMap(map[string]interface{}{
|
||||
"enabled": false,
|
||||
}), nil
|
||||
}
|
||||
|
||||
return diagnostics.RowFromMap(map[string]interface{}{
|
||||
"enabled": true,
|
||||
"http-timeout": c.HTTPTimeout,
|
||||
"write-concurrency": c.WriteConcurrency,
|
||||
"write-buffer-size": c.WriteBufferSize,
|
||||
}), nil
|
||||
}
|
||||
111
vendor/github.com/influxdata/influxdb/services/subscriber/config_test.go
generated
vendored
Normal file
111
vendor/github.com/influxdata/influxdb/services/subscriber/config_test.go
generated
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
package subscriber_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/influxdata/influxdb/services/subscriber"
|
||||
)
|
||||
|
||||
func TestConfig_Parse(t *testing.T) {
|
||||
// Parse configuration.
|
||||
var c subscriber.Config
|
||||
if _, err := toml.Decode(`
|
||||
enabled = false
|
||||
`, &c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Validate configuration.
|
||||
if c.Enabled != false {
|
||||
t.Errorf("unexpected enabled state: %v", c.Enabled)
|
||||
}
|
||||
if c.InsecureSkipVerify == true {
|
||||
t.Errorf("InsecureSkipVerify: expected %v. got %v", false, c.InsecureSkipVerify)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_ParseTLSConfig(t *testing.T) {
|
||||
abspath, err := filepath.Abs("/path/to/ca-certs.pem")
|
||||
if err != nil {
|
||||
t.Fatalf("Could not construct absolute path. %v", err)
|
||||
}
|
||||
|
||||
// Parse configuration.
|
||||
var c subscriber.Config
|
||||
if _, err := toml.Decode(fmt.Sprintf(`
|
||||
http-timeout = "60s"
|
||||
enabled = true
|
||||
ca-certs = '%s'
|
||||
insecure-skip-verify = true
|
||||
write-buffer-size = 1000
|
||||
write-concurrency = 10
|
||||
`, abspath), &c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Validate configuration.
|
||||
if c.Enabled != true {
|
||||
t.Errorf("unexpected enabled state: %v", c.Enabled)
|
||||
}
|
||||
if c.CaCerts != abspath {
|
||||
t.Errorf("CaCerts: expected %s. got %s", abspath, c.CaCerts)
|
||||
}
|
||||
if c.InsecureSkipVerify != true {
|
||||
t.Errorf("InsecureSkipVerify: expected %v. got %v", true, c.InsecureSkipVerify)
|
||||
}
|
||||
err = c.Validate()
|
||||
if err == nil {
|
||||
t.Errorf("Expected Validation to fail (%s doesn't exist)", abspath)
|
||||
}
|
||||
|
||||
if err.Error() != fmt.Sprintf("ca-certs file %s does not exist", abspath) {
|
||||
t.Errorf("Expected descriptive validation error. Instead got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_ParseTLSConfigValidCerts(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "ca-certs.crt")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create temp file. error was: %v", err)
|
||||
}
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
if _, err := tmpfile.Write([]byte("=== BEGIN CERTIFICATE ===\n=== END CERTIFICATE ===")); err != nil {
|
||||
t.Fatalf("could not write temp file. error was: %v", err)
|
||||
}
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
t.Fatalf("could not close temp file. error was %v", err)
|
||||
}
|
||||
|
||||
// Parse configuration.
|
||||
var c subscriber.Config
|
||||
if _, err := toml.Decode(fmt.Sprintf(`
|
||||
http-timeout = "60s"
|
||||
enabled = true
|
||||
ca-certs = '%s'
|
||||
insecure-skip-verify = false
|
||||
write-buffer-size = 1000
|
||||
write-concurrency = 10
|
||||
`, tmpfile.Name()), &c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Validate configuration.
|
||||
if c.Enabled != true {
|
||||
t.Errorf("unexpected enabled state: %v", c.Enabled)
|
||||
}
|
||||
if c.CaCerts != tmpfile.Name() {
|
||||
t.Errorf("CaCerts: expected %v. got %v", tmpfile.Name(), c.CaCerts)
|
||||
}
|
||||
if c.InsecureSkipVerify != false {
|
||||
t.Errorf("InsecureSkipVerify: expected %v. got %v", false, c.InsecureSkipVerify)
|
||||
}
|
||||
if err := c.Validate(); err != nil {
|
||||
t.Errorf("Expected Validation to succeed. Instead was: %v", err)
|
||||
}
|
||||
}
|
||||
75
vendor/github.com/influxdata/influxdb/services/subscriber/http.go
generated
vendored
Normal file
75
vendor/github.com/influxdata/influxdb/services/subscriber/http.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
package subscriber
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/influxdb/coordinator"
|
||||
)
|
||||
|
||||
// HTTP supports writing points over HTTP using the line protocol.
|
||||
type HTTP struct {
|
||||
c client.Client
|
||||
}
|
||||
|
||||
// NewHTTP returns a new HTTP points writer with default options.
|
||||
func NewHTTP(addr string, timeout time.Duration) (*HTTP, error) {
|
||||
return NewHTTPS(addr, timeout, false, "")
|
||||
}
|
||||
|
||||
// NewHTTPS returns a new HTTPS points writer with default options and HTTPS configured.
|
||||
func NewHTTPS(addr string, timeout time.Duration, unsafeSsl bool, caCerts string) (*HTTP, error) {
|
||||
tlsConfig, err := createTLSConfig(caCerts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf := client.HTTPConfig{
|
||||
Addr: addr,
|
||||
Timeout: timeout,
|
||||
InsecureSkipVerify: unsafeSsl,
|
||||
TLSConfig: tlsConfig,
|
||||
}
|
||||
|
||||
c, err := client.NewHTTPClient(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &HTTP{c: c}, nil
|
||||
}
|
||||
|
||||
// WritePoints writes points over HTTP transport.
|
||||
func (h *HTTP) WritePoints(p *coordinator.WritePointsRequest) (err error) {
|
||||
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
|
||||
Database: p.Database,
|
||||
RetentionPolicy: p.RetentionPolicy,
|
||||
})
|
||||
for _, pt := range p.Points {
|
||||
bp.AddPoint(client.NewPointFrom(pt))
|
||||
}
|
||||
err = h.c.Write(bp)
|
||||
return
|
||||
}
|
||||
|
||||
func createTLSConfig(caCerts string) (*tls.Config, error) {
|
||||
if caCerts == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return loadCaCerts(caCerts)
|
||||
}
|
||||
|
||||
func loadCaCerts(caCerts string) (*tls.Config, error) {
|
||||
caCert, err := ioutil.ReadFile(caCerts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
|
||||
return &tls.Config{
|
||||
RootCAs: caCertPool,
|
||||
}, nil
|
||||
}
|
||||
451
vendor/github.com/influxdata/influxdb/services/subscriber/service.go
generated
vendored
Normal file
451
vendor/github.com/influxdata/influxdb/services/subscriber/service.go
generated
vendored
Normal file
@@ -0,0 +1,451 @@
|
||||
// Package subscriber implements the subscriber service
|
||||
// to forward incoming data to remote services.
|
||||
package subscriber // import "github.com/influxdata/influxdb/services/subscriber"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/coordinator"
|
||||
"github.com/influxdata/influxdb/models"
|
||||
"github.com/influxdata/influxdb/monitor"
|
||||
"github.com/influxdata/influxdb/services/meta"
|
||||
"github.com/uber-go/zap"
|
||||
)
|
||||
|
||||
// Statistics for the Subscriber service.
|
||||
const (
|
||||
statCreateFailures = "createFailures"
|
||||
statPointsWritten = "pointsWritten"
|
||||
statWriteFailures = "writeFailures"
|
||||
)
|
||||
|
||||
// PointsWriter is an interface for writing points to a subscription destination.
|
||||
// Only WritePoints() needs to be satisfied. PointsWriter implementations
|
||||
// must be goroutine safe.
|
||||
type PointsWriter interface {
|
||||
WritePoints(p *coordinator.WritePointsRequest) error
|
||||
}
|
||||
|
||||
// subEntry is a unique set that identifies a given subscription.
|
||||
type subEntry struct {
|
||||
db string
|
||||
rp string
|
||||
name string
|
||||
}
|
||||
|
||||
// Service manages forking the incoming data from InfluxDB
|
||||
// to defined third party destinations.
|
||||
// Subscriptions are defined per database and retention policy.
|
||||
type Service struct {
|
||||
MetaClient interface {
|
||||
Databases() []meta.DatabaseInfo
|
||||
WaitForDataChanged() chan struct{}
|
||||
}
|
||||
NewPointsWriter func(u url.URL) (PointsWriter, error)
|
||||
Logger zap.Logger
|
||||
update chan struct{}
|
||||
stats *Statistics
|
||||
points chan *coordinator.WritePointsRequest
|
||||
wg sync.WaitGroup
|
||||
closed bool
|
||||
closing chan struct{}
|
||||
mu sync.Mutex
|
||||
conf Config
|
||||
|
||||
subs map[subEntry]chanWriter
|
||||
subMu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewService returns a subscriber service with given settings
|
||||
func NewService(c Config) *Service {
|
||||
s := &Service{
|
||||
Logger: zap.New(zap.NullEncoder()),
|
||||
closed: true,
|
||||
stats: &Statistics{},
|
||||
conf: c,
|
||||
}
|
||||
s.NewPointsWriter = s.newPointsWriter
|
||||
return s
|
||||
}
|
||||
|
||||
// Open starts the subscription service.
|
||||
func (s *Service) Open() error {
|
||||
if !s.conf.Enabled {
|
||||
return nil // Service disabled.
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.MetaClient == nil {
|
||||
return errors.New("no meta store")
|
||||
}
|
||||
|
||||
s.closed = false
|
||||
|
||||
s.closing = make(chan struct{})
|
||||
s.update = make(chan struct{})
|
||||
s.points = make(chan *coordinator.WritePointsRequest, 100)
|
||||
|
||||
s.wg.Add(2)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
s.run()
|
||||
}()
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
s.waitForMetaUpdates()
|
||||
}()
|
||||
|
||||
s.Logger.Info("opened service")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close terminates the subscription service.
|
||||
// It will panic if called multiple times or without first opening the service.
|
||||
func (s *Service) Close() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.closed {
|
||||
return nil // Already closed.
|
||||
}
|
||||
|
||||
s.closed = true
|
||||
|
||||
close(s.points)
|
||||
close(s.closing)
|
||||
|
||||
s.wg.Wait()
|
||||
s.Logger.Info("closed service")
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithLogger sets the logger on the service.
|
||||
func (s *Service) WithLogger(log zap.Logger) {
|
||||
s.Logger = log.With(zap.String("service", "subscriber"))
|
||||
}
|
||||
|
||||
// Statistics maintains the statistics for the subscriber service.
|
||||
type Statistics struct {
|
||||
CreateFailures int64
|
||||
PointsWritten int64
|
||||
WriteFailures int64
|
||||
}
|
||||
|
||||
// Statistics returns statistics for periodic monitoring.
|
||||
func (s *Service) Statistics(tags map[string]string) []models.Statistic {
|
||||
statistics := []models.Statistic{{
|
||||
Name: "subscriber",
|
||||
Tags: tags,
|
||||
Values: map[string]interface{}{
|
||||
statCreateFailures: atomic.LoadInt64(&s.stats.CreateFailures),
|
||||
statPointsWritten: atomic.LoadInt64(&s.stats.PointsWritten),
|
||||
statWriteFailures: atomic.LoadInt64(&s.stats.WriteFailures),
|
||||
},
|
||||
}}
|
||||
|
||||
s.subMu.RLock()
|
||||
defer s.subMu.RUnlock()
|
||||
|
||||
for _, sub := range s.subs {
|
||||
statistics = append(statistics, sub.Statistics(tags)...)
|
||||
}
|
||||
return statistics
|
||||
}
|
||||
|
||||
func (s *Service) waitForMetaUpdates() {
|
||||
for {
|
||||
ch := s.MetaClient.WaitForDataChanged()
|
||||
select {
|
||||
case <-ch:
|
||||
err := s.Update()
|
||||
if err != nil {
|
||||
s.Logger.Info(fmt.Sprint("error updating subscriptions: ", err))
|
||||
}
|
||||
case <-s.closing:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update will start new and stop deleted subscriptions.
|
||||
func (s *Service) Update() error {
|
||||
// signal update
|
||||
select {
|
||||
case s.update <- struct{}{}:
|
||||
return nil
|
||||
case <-s.closing:
|
||||
return errors.New("service closed cannot update")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) createSubscription(se subEntry, mode string, destinations []string) (PointsWriter, error) {
|
||||
var bm BalanceMode
|
||||
switch mode {
|
||||
case "ALL":
|
||||
bm = ALL
|
||||
case "ANY":
|
||||
bm = ANY
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown balance mode %q", mode)
|
||||
}
|
||||
writers := make([]PointsWriter, 0, len(destinations))
|
||||
stats := make([]writerStats, 0, len(destinations))
|
||||
// add only valid destinations
|
||||
for _, dest := range destinations {
|
||||
u, err := url.Parse(dest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse destination: %s", dest)
|
||||
}
|
||||
w, err := s.NewPointsWriter(*u)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create writer for destination: %s", dest)
|
||||
}
|
||||
writers = append(writers, w)
|
||||
stats = append(stats, writerStats{dest: dest})
|
||||
}
|
||||
|
||||
return &balancewriter{
|
||||
bm: bm,
|
||||
writers: writers,
|
||||
stats: stats,
|
||||
defaultTags: models.StatisticTags{
|
||||
"database": se.db,
|
||||
"retention_policy": se.rp,
|
||||
"name": se.name,
|
||||
"mode": mode,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Points returns a channel into which write point requests can be sent.
|
||||
func (s *Service) Points() chan<- *coordinator.WritePointsRequest {
|
||||
return s.points
|
||||
}
|
||||
|
||||
// run read points from the points channel and writes them to the subscriptions.
|
||||
func (s *Service) run() {
|
||||
var wg sync.WaitGroup
|
||||
s.subs = make(map[subEntry]chanWriter)
|
||||
// Perform initial update
|
||||
s.updateSubs(&wg)
|
||||
for {
|
||||
select {
|
||||
case <-s.update:
|
||||
s.updateSubs(&wg)
|
||||
case p, ok := <-s.points:
|
||||
if !ok {
|
||||
// Close out all chanWriters
|
||||
s.close(&wg)
|
||||
return
|
||||
}
|
||||
for se, cw := range s.subs {
|
||||
if p.Database == se.db && p.RetentionPolicy == se.rp {
|
||||
select {
|
||||
case cw.writeRequests <- p:
|
||||
default:
|
||||
atomic.AddInt64(&s.stats.WriteFailures, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// close closes the existing channel writers.
|
||||
func (s *Service) close(wg *sync.WaitGroup) {
|
||||
s.subMu.Lock()
|
||||
defer s.subMu.Unlock()
|
||||
|
||||
for _, cw := range s.subs {
|
||||
cw.Close()
|
||||
}
|
||||
// Wait for them to finish
|
||||
wg.Wait()
|
||||
s.subs = nil
|
||||
}
|
||||
|
||||
func (s *Service) updateSubs(wg *sync.WaitGroup) {
|
||||
s.subMu.Lock()
|
||||
defer s.subMu.Unlock()
|
||||
|
||||
if s.subs == nil {
|
||||
s.subs = make(map[subEntry]chanWriter)
|
||||
}
|
||||
|
||||
dbis := s.MetaClient.Databases()
|
||||
allEntries := make(map[subEntry]bool, 0)
|
||||
// Add in new subscriptions
|
||||
for _, dbi := range dbis {
|
||||
for _, rpi := range dbi.RetentionPolicies {
|
||||
for _, si := range rpi.Subscriptions {
|
||||
se := subEntry{
|
||||
db: dbi.Name,
|
||||
rp: rpi.Name,
|
||||
name: si.Name,
|
||||
}
|
||||
allEntries[se] = true
|
||||
if _, ok := s.subs[se]; ok {
|
||||
continue
|
||||
}
|
||||
sub, err := s.createSubscription(se, si.Mode, si.Destinations)
|
||||
if err != nil {
|
||||
atomic.AddInt64(&s.stats.CreateFailures, 1)
|
||||
s.Logger.Info(fmt.Sprintf("Subscription creation failed for '%s' with error: %s", si.Name, err))
|
||||
continue
|
||||
}
|
||||
cw := chanWriter{
|
||||
writeRequests: make(chan *coordinator.WritePointsRequest, s.conf.WriteBufferSize),
|
||||
pw: sub,
|
||||
pointsWritten: &s.stats.PointsWritten,
|
||||
failures: &s.stats.WriteFailures,
|
||||
logger: s.Logger,
|
||||
}
|
||||
for i := 0; i < s.conf.WriteConcurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
cw.Run()
|
||||
}()
|
||||
}
|
||||
s.subs[se] = cw
|
||||
s.Logger.Info(fmt.Sprintf("added new subscription for %s %s", se.db, se.rp))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove deleted subs
|
||||
for se := range s.subs {
|
||||
if !allEntries[se] {
|
||||
// Close the chanWriter
|
||||
s.subs[se].Close()
|
||||
|
||||
// Remove it from the set
|
||||
delete(s.subs, se)
|
||||
s.Logger.Info(fmt.Sprintf("deleted old subscription for %s %s", se.db, se.rp))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newPointsWriter returns a new PointsWriter from the given URL.
|
||||
func (s *Service) newPointsWriter(u url.URL) (PointsWriter, error) {
|
||||
switch u.Scheme {
|
||||
case "udp":
|
||||
return NewUDP(u.Host), nil
|
||||
case "http":
|
||||
return NewHTTP(u.String(), time.Duration(s.conf.HTTPTimeout))
|
||||
case "https":
|
||||
if s.conf.InsecureSkipVerify {
|
||||
s.Logger.Info("WARNING: 'insecure-skip-verify' is true. This will skip all certificate verifications.")
|
||||
}
|
||||
return NewHTTPS(u.String(), time.Duration(s.conf.HTTPTimeout), s.conf.InsecureSkipVerify, s.conf.CaCerts)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown destination scheme %s", u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
// chanWriter sends WritePointsRequest to a PointsWriter received over a channel.
|
||||
type chanWriter struct {
|
||||
writeRequests chan *coordinator.WritePointsRequest
|
||||
pw PointsWriter
|
||||
pointsWritten *int64
|
||||
failures *int64
|
||||
logger zap.Logger
|
||||
}
|
||||
|
||||
// Close closes the chanWriter.
|
||||
func (c chanWriter) Close() {
|
||||
close(c.writeRequests)
|
||||
}
|
||||
|
||||
func (c chanWriter) Run() {
|
||||
for wr := range c.writeRequests {
|
||||
err := c.pw.WritePoints(wr)
|
||||
if err != nil {
|
||||
c.logger.Info(err.Error())
|
||||
atomic.AddInt64(c.failures, 1)
|
||||
} else {
|
||||
atomic.AddInt64(c.pointsWritten, int64(len(wr.Points)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Statistics returns statistics for periodic monitoring.
|
||||
func (c chanWriter) Statistics(tags map[string]string) []models.Statistic {
|
||||
if m, ok := c.pw.(monitor.Reporter); ok {
|
||||
return m.Statistics(tags)
|
||||
}
|
||||
return []models.Statistic{}
|
||||
}
|
||||
|
||||
// BalanceMode specifies what balance mode to use on a subscription.
|
||||
type BalanceMode int
|
||||
|
||||
const (
|
||||
// ALL indicates to send writes to all subscriber destinations.
|
||||
ALL BalanceMode = iota
|
||||
|
||||
// ANY indicates to send writes to a single subscriber destination, round robin.
|
||||
ANY
|
||||
)
|
||||
|
||||
type writerStats struct {
|
||||
dest string
|
||||
failures int64
|
||||
pointsWritten int64
|
||||
}
|
||||
|
||||
// balances writes across PointsWriters according to BalanceMode
|
||||
type balancewriter struct {
|
||||
bm BalanceMode
|
||||
writers []PointsWriter
|
||||
stats []writerStats
|
||||
defaultTags models.StatisticTags
|
||||
i int
|
||||
}
|
||||
|
||||
func (b *balancewriter) WritePoints(p *coordinator.WritePointsRequest) error {
|
||||
var lastErr error
|
||||
for range b.writers {
|
||||
// round robin through destinations.
|
||||
i := b.i
|
||||
w := b.writers[i]
|
||||
b.i = (b.i + 1) % len(b.writers)
|
||||
|
||||
// write points to destination.
|
||||
err := w.WritePoints(p)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
atomic.AddInt64(&b.stats[i].failures, 1)
|
||||
} else {
|
||||
atomic.AddInt64(&b.stats[i].pointsWritten, int64(len(p.Points)))
|
||||
if b.bm == ANY {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
// Statistics returns statistics for periodic monitoring.
|
||||
func (b *balancewriter) Statistics(tags map[string]string) []models.Statistic {
|
||||
statistics := make([]models.Statistic, len(b.stats))
|
||||
for i := range b.stats {
|
||||
subTags := b.defaultTags.Merge(tags)
|
||||
subTags["destination"] = b.stats[i].dest
|
||||
statistics[i] = models.Statistic{
|
||||
Name: "subscriber",
|
||||
Tags: subTags,
|
||||
Values: map[string]interface{}{
|
||||
statPointsWritten: atomic.LoadInt64(&b.stats[i].pointsWritten),
|
||||
statWriteFailures: atomic.LoadInt64(&b.stats[i].failures),
|
||||
},
|
||||
}
|
||||
}
|
||||
return statistics
|
||||
}
|
||||
443
vendor/github.com/influxdata/influxdb/services/subscriber/service_test.go
generated
vendored
Normal file
443
vendor/github.com/influxdata/influxdb/services/subscriber/service_test.go
generated
vendored
Normal file
@@ -0,0 +1,443 @@
|
||||
package subscriber_test
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/coordinator"
|
||||
"github.com/influxdata/influxdb/services/meta"
|
||||
"github.com/influxdata/influxdb/services/subscriber"
|
||||
)
|
||||
|
||||
type MetaClient struct {
|
||||
DatabasesFn func() []meta.DatabaseInfo
|
||||
WaitForDataChangedFn func() chan struct{}
|
||||
}
|
||||
|
||||
func (m MetaClient) Databases() []meta.DatabaseInfo {
|
||||
return m.DatabasesFn()
|
||||
}
|
||||
|
||||
func (m MetaClient) WaitForDataChanged() chan struct{} {
|
||||
return m.WaitForDataChangedFn()
|
||||
}
|
||||
|
||||
type Subscription struct {
|
||||
WritePointsFn func(*coordinator.WritePointsRequest) error
|
||||
}
|
||||
|
||||
func (s Subscription) WritePoints(p *coordinator.WritePointsRequest) error {
|
||||
return s.WritePointsFn(p)
|
||||
}
|
||||
|
||||
func TestService_IgnoreNonMatch(t *testing.T) {
|
||||
dataChanged := make(chan struct{})
|
||||
ms := MetaClient{}
|
||||
ms.WaitForDataChangedFn = func() chan struct{} {
|
||||
return dataChanged
|
||||
}
|
||||
ms.DatabasesFn = func() []meta.DatabaseInfo {
|
||||
return []meta.DatabaseInfo{
|
||||
{
|
||||
Name: "db0",
|
||||
RetentionPolicies: []meta.RetentionPolicyInfo{
|
||||
{
|
||||
Name: "rp0",
|
||||
Subscriptions: []meta.SubscriptionInfo{
|
||||
{Name: "s0", Mode: "ANY", Destinations: []string{"udp://h0:9093", "udp://h1:9093"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
prs := make(chan *coordinator.WritePointsRequest, 2)
|
||||
urls := make(chan url.URL, 2)
|
||||
newPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) {
|
||||
sub := Subscription{}
|
||||
sub.WritePointsFn = func(p *coordinator.WritePointsRequest) error {
|
||||
prs <- p
|
||||
return nil
|
||||
}
|
||||
urls <- u
|
||||
return sub, nil
|
||||
}
|
||||
|
||||
s := subscriber.NewService(subscriber.NewConfig())
|
||||
s.MetaClient = ms
|
||||
s.NewPointsWriter = newPointsWriter
|
||||
s.Open()
|
||||
defer s.Close()
|
||||
|
||||
// Signal that data has changed
|
||||
dataChanged <- struct{}{}
|
||||
|
||||
for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093"} {
|
||||
var u url.URL
|
||||
expURL, _ := url.Parse(expURLStr)
|
||||
select {
|
||||
case u = <-urls:
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
t.Fatal("expected urls")
|
||||
}
|
||||
if expURL.String() != u.String() {
|
||||
t.Fatalf("unexpected url: got %s exp %s", u.String(), expURL.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Write points that don't match any subscription.
|
||||
s.Points() <- &coordinator.WritePointsRequest{
|
||||
Database: "db1",
|
||||
RetentionPolicy: "rp0",
|
||||
}
|
||||
s.Points() <- &coordinator.WritePointsRequest{
|
||||
Database: "db0",
|
||||
RetentionPolicy: "rp2",
|
||||
}
|
||||
|
||||
// Shouldn't get any prs back
|
||||
select {
|
||||
case pr := <-prs:
|
||||
t.Fatalf("unexpected points request %v", pr)
|
||||
default:
|
||||
}
|
||||
close(dataChanged)
|
||||
}
|
||||
|
||||
func TestService_ModeALL(t *testing.T) {
|
||||
dataChanged := make(chan struct{})
|
||||
ms := MetaClient{}
|
||||
ms.WaitForDataChangedFn = func() chan struct{} {
|
||||
return dataChanged
|
||||
}
|
||||
ms.DatabasesFn = func() []meta.DatabaseInfo {
|
||||
return []meta.DatabaseInfo{
|
||||
{
|
||||
Name: "db0",
|
||||
RetentionPolicies: []meta.RetentionPolicyInfo{
|
||||
{
|
||||
Name: "rp0",
|
||||
Subscriptions: []meta.SubscriptionInfo{
|
||||
{Name: "s0", Mode: "ALL", Destinations: []string{"udp://h0:9093", "udp://h1:9093"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
prs := make(chan *coordinator.WritePointsRequest, 2)
|
||||
urls := make(chan url.URL, 2)
|
||||
newPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) {
|
||||
sub := Subscription{}
|
||||
sub.WritePointsFn = func(p *coordinator.WritePointsRequest) error {
|
||||
prs <- p
|
||||
return nil
|
||||
}
|
||||
urls <- u
|
||||
return sub, nil
|
||||
}
|
||||
|
||||
s := subscriber.NewService(subscriber.NewConfig())
|
||||
s.MetaClient = ms
|
||||
s.NewPointsWriter = newPointsWriter
|
||||
s.Open()
|
||||
defer s.Close()
|
||||
|
||||
// Signal that data has changed
|
||||
dataChanged <- struct{}{}
|
||||
|
||||
for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093"} {
|
||||
var u url.URL
|
||||
expURL, _ := url.Parse(expURLStr)
|
||||
select {
|
||||
case u = <-urls:
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
t.Fatal("expected urls")
|
||||
}
|
||||
if expURL.String() != u.String() {
|
||||
t.Fatalf("unexpected url: got %s exp %s", u.String(), expURL.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Write points that match subscription with mode ALL
|
||||
expPR := &coordinator.WritePointsRequest{
|
||||
Database: "db0",
|
||||
RetentionPolicy: "rp0",
|
||||
}
|
||||
s.Points() <- expPR
|
||||
|
||||
// Should get pr back twice
|
||||
for i := 0; i < 2; i++ {
|
||||
var pr *coordinator.WritePointsRequest
|
||||
select {
|
||||
case pr = <-prs:
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
t.Fatalf("expected points request: got %d exp 2", i)
|
||||
}
|
||||
if pr != expPR {
|
||||
t.Errorf("unexpected points request: got %v, exp %v", pr, expPR)
|
||||
}
|
||||
}
|
||||
close(dataChanged)
|
||||
}
|
||||
|
||||
func TestService_ModeANY(t *testing.T) {
|
||||
dataChanged := make(chan struct{})
|
||||
ms := MetaClient{}
|
||||
ms.WaitForDataChangedFn = func() chan struct{} {
|
||||
return dataChanged
|
||||
}
|
||||
ms.DatabasesFn = func() []meta.DatabaseInfo {
|
||||
return []meta.DatabaseInfo{
|
||||
{
|
||||
Name: "db0",
|
||||
RetentionPolicies: []meta.RetentionPolicyInfo{
|
||||
{
|
||||
Name: "rp0",
|
||||
Subscriptions: []meta.SubscriptionInfo{
|
||||
{Name: "s0", Mode: "ANY", Destinations: []string{"udp://h0:9093", "udp://h1:9093"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
prs := make(chan *coordinator.WritePointsRequest, 2)
|
||||
urls := make(chan url.URL, 2)
|
||||
newPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) {
|
||||
sub := Subscription{}
|
||||
sub.WritePointsFn = func(p *coordinator.WritePointsRequest) error {
|
||||
prs <- p
|
||||
return nil
|
||||
}
|
||||
urls <- u
|
||||
return sub, nil
|
||||
}
|
||||
|
||||
s := subscriber.NewService(subscriber.NewConfig())
|
||||
s.MetaClient = ms
|
||||
s.NewPointsWriter = newPointsWriter
|
||||
s.Open()
|
||||
defer s.Close()
|
||||
|
||||
// Signal that data has changed
|
||||
dataChanged <- struct{}{}
|
||||
|
||||
for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093"} {
|
||||
var u url.URL
|
||||
expURL, _ := url.Parse(expURLStr)
|
||||
select {
|
||||
case u = <-urls:
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
t.Fatal("expected urls")
|
||||
}
|
||||
if expURL.String() != u.String() {
|
||||
t.Fatalf("unexpected url: got %s exp %s", u.String(), expURL.String())
|
||||
}
|
||||
}
|
||||
// Write points that match subscription with mode ANY
|
||||
expPR := &coordinator.WritePointsRequest{
|
||||
Database: "db0",
|
||||
RetentionPolicy: "rp0",
|
||||
}
|
||||
s.Points() <- expPR
|
||||
|
||||
// Validate we get the pr back just once
|
||||
var pr *coordinator.WritePointsRequest
|
||||
select {
|
||||
case pr = <-prs:
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
t.Fatal("expected points request")
|
||||
}
|
||||
if pr != expPR {
|
||||
t.Errorf("unexpected points request: got %v, exp %v", pr, expPR)
|
||||
}
|
||||
|
||||
// shouldn't get it a second time
|
||||
select {
|
||||
case pr = <-prs:
|
||||
t.Fatalf("unexpected points request %v", pr)
|
||||
default:
|
||||
}
|
||||
close(dataChanged)
|
||||
}
|
||||
|
||||
func TestService_Multiple(t *testing.T) {
|
||||
dataChanged := make(chan struct{})
|
||||
ms := MetaClient{}
|
||||
ms.WaitForDataChangedFn = func() chan struct{} {
|
||||
return dataChanged
|
||||
}
|
||||
ms.DatabasesFn = func() []meta.DatabaseInfo {
|
||||
return []meta.DatabaseInfo{
|
||||
{
|
||||
Name: "db0",
|
||||
RetentionPolicies: []meta.RetentionPolicyInfo{
|
||||
{
|
||||
Name: "rp0",
|
||||
Subscriptions: []meta.SubscriptionInfo{
|
||||
{Name: "s0", Mode: "ANY", Destinations: []string{"udp://h0:9093", "udp://h1:9093"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rp1",
|
||||
Subscriptions: []meta.SubscriptionInfo{
|
||||
{Name: "s1", Mode: "ALL", Destinations: []string{"udp://h2:9093", "udp://h3:9093"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
prs := make(chan *coordinator.WritePointsRequest, 4)
|
||||
urls := make(chan url.URL, 4)
|
||||
newPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) {
|
||||
sub := Subscription{}
|
||||
sub.WritePointsFn = func(p *coordinator.WritePointsRequest) error {
|
||||
prs <- p
|
||||
return nil
|
||||
}
|
||||
urls <- u
|
||||
return sub, nil
|
||||
}
|
||||
|
||||
s := subscriber.NewService(subscriber.NewConfig())
|
||||
s.MetaClient = ms
|
||||
s.NewPointsWriter = newPointsWriter
|
||||
s.Open()
|
||||
defer s.Close()
|
||||
|
||||
// Signal that data has changed
|
||||
dataChanged <- struct{}{}
|
||||
|
||||
for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093", "udp://h2:9093", "udp://h3:9093"} {
|
||||
var u url.URL
|
||||
expURL, _ := url.Parse(expURLStr)
|
||||
select {
|
||||
case u = <-urls:
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("expected urls")
|
||||
}
|
||||
if expURL.String() != u.String() {
|
||||
t.Fatalf("unexpected url: got %s exp %s", u.String(), expURL.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Write points that don't match any subscription.
|
||||
s.Points() <- &coordinator.WritePointsRequest{
|
||||
Database: "db1",
|
||||
RetentionPolicy: "rp0",
|
||||
}
|
||||
s.Points() <- &coordinator.WritePointsRequest{
|
||||
Database: "db0",
|
||||
RetentionPolicy: "rp2",
|
||||
}
|
||||
|
||||
// Write points that match subscription with mode ANY
|
||||
expPR := &coordinator.WritePointsRequest{
|
||||
Database: "db0",
|
||||
RetentionPolicy: "rp0",
|
||||
}
|
||||
s.Points() <- expPR
|
||||
|
||||
// Validate we get the pr back just once
|
||||
var pr *coordinator.WritePointsRequest
|
||||
select {
|
||||
case pr = <-prs:
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("expected points request")
|
||||
}
|
||||
if pr != expPR {
|
||||
t.Errorf("unexpected points request: got %v, exp %v", pr, expPR)
|
||||
}
|
||||
|
||||
// shouldn't get it a second time
|
||||
select {
|
||||
case pr = <-prs:
|
||||
t.Fatalf("unexpected points request %v", pr)
|
||||
default:
|
||||
}
|
||||
|
||||
// Write points that match subscription with mode ALL
|
||||
expPR = &coordinator.WritePointsRequest{
|
||||
Database: "db0",
|
||||
RetentionPolicy: "rp1",
|
||||
}
|
||||
s.Points() <- expPR
|
||||
|
||||
// Should get pr back twice
|
||||
for i := 0; i < 2; i++ {
|
||||
select {
|
||||
case pr = <-prs:
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatalf("expected points request: got %d exp 2", i)
|
||||
}
|
||||
if pr != expPR {
|
||||
t.Errorf("unexpected points request: got %v, exp %v", pr, expPR)
|
||||
}
|
||||
}
|
||||
close(dataChanged)
|
||||
}
|
||||
|
||||
func TestService_WaitForDataChanged(t *testing.T) {
|
||||
dataChanged := make(chan struct{}, 1)
|
||||
ms := MetaClient{}
|
||||
ms.WaitForDataChangedFn = func() chan struct{} {
|
||||
return dataChanged
|
||||
}
|
||||
calls := make(chan bool, 2)
|
||||
ms.DatabasesFn = func() []meta.DatabaseInfo {
|
||||
calls <- true
|
||||
return nil
|
||||
}
|
||||
|
||||
s := subscriber.NewService(subscriber.NewConfig())
|
||||
s.MetaClient = ms
|
||||
// Explicitly closed below for testing
|
||||
s.Open()
|
||||
|
||||
// Should be called once during open
|
||||
select {
|
||||
case <-calls:
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
t.Fatal("expected call")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-calls:
|
||||
t.Fatal("unexpected call")
|
||||
case <-time.After(time.Millisecond):
|
||||
}
|
||||
|
||||
// Signal that data has changed
|
||||
dataChanged <- struct{}{}
|
||||
|
||||
// Should be called once more after data changed
|
||||
select {
|
||||
case <-calls:
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
t.Fatal("expected call")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-calls:
|
||||
t.Fatal("unexpected call")
|
||||
case <-time.After(time.Millisecond):
|
||||
}
|
||||
|
||||
//Close service ensure not called
|
||||
s.Close()
|
||||
dataChanged <- struct{}{}
|
||||
select {
|
||||
case <-calls:
|
||||
t.Fatal("unexpected call")
|
||||
case <-time.After(time.Millisecond):
|
||||
}
|
||||
|
||||
close(dataChanged)
|
||||
}
|
||||
42
vendor/github.com/influxdata/influxdb/services/subscriber/udp.go
generated
vendored
Normal file
42
vendor/github.com/influxdata/influxdb/services/subscriber/udp.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package subscriber
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/influxdata/influxdb/coordinator"
|
||||
)
|
||||
|
||||
// UDP supports writing points over UDP using the line protocol.
|
||||
type UDP struct {
|
||||
addr string
|
||||
}
|
||||
|
||||
// NewUDP returns a new UDP listener with default options.
|
||||
func NewUDP(addr string) *UDP {
|
||||
return &UDP{addr: addr}
|
||||
}
|
||||
|
||||
// WritePoints writes points over UDP transport.
|
||||
func (u *UDP) WritePoints(p *coordinator.WritePointsRequest) (err error) {
|
||||
var addr *net.UDPAddr
|
||||
var con *net.UDPConn
|
||||
addr, err = net.ResolveUDPAddr("udp", u.addr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
con, err = net.DialUDP("udp", nil, addr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer con.Close()
|
||||
|
||||
for _, p := range p.Points {
|
||||
_, err = con.Write([]byte(p.String()))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
Reference in New Issue
Block a user