mirror of
https://github.com/zalando-incubator/kube-metrics-adapter.git
synced 2025-01-03 07:40:09 +00:00
Introduce context for collector interface
Signed-off-by: Mikkel Oscar Lyderik Larsen <mikkel.larsen@zalando.de>
This commit is contained in:
@ -32,8 +32,8 @@ func NewAWSCollectorPlugin(configs map[string]aws.Config) *AWSCollectorPlugin {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewCollector initializes a new skipper collector from the specified HPA.
|
// NewCollector initializes a new skipper collector from the specified HPA.
|
||||||
func (c *AWSCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
func (c *AWSCollectorPlugin) NewCollector(ctx context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||||
return NewAWSSQSCollector(c.configs, hpa, config, interval)
|
return NewAWSSQSCollector(ctx, c.configs, hpa, config, interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
type sqsiface interface {
|
type sqsiface interface {
|
||||||
@ -50,7 +50,7 @@ type AWSSQSCollector struct {
|
|||||||
metricType autoscalingv2.MetricSourceType
|
metricType autoscalingv2.MetricSourceType
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAWSSQSCollector(configs map[string]aws.Config, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*AWSSQSCollector, error) {
|
func NewAWSSQSCollector(ctx context.Context, configs map[string]aws.Config, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*AWSSQSCollector, error) {
|
||||||
if config.Metric.Selector == nil {
|
if config.Metric.Selector == nil {
|
||||||
return nil, fmt.Errorf("selector for queue is not specified")
|
return nil, fmt.Errorf("selector for queue is not specified")
|
||||||
}
|
}
|
||||||
@ -90,13 +90,13 @@ func NewAWSSQSCollector(configs map[string]aws.Config, hpa *autoscalingv2.Horizo
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *AWSSQSCollector) GetMetrics() ([]CollectedMetric, error) {
|
func (c *AWSSQSCollector) GetMetrics(ctx context.Context) ([]CollectedMetric, error) {
|
||||||
params := &sqs.GetQueueAttributesInput{
|
params := &sqs.GetQueueAttributesInput{
|
||||||
QueueUrl: aws.String(c.queueURL),
|
QueueUrl: aws.String(c.queueURL),
|
||||||
AttributeNames: []types.QueueAttributeName{types.QueueAttributeNameApproximateNumberOfMessages},
|
AttributeNames: []types.QueueAttributeName{types.QueueAttributeNameApproximateNumberOfMessages},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := c.sqs.GetQueueAttributes(context.TODO(), params)
|
resp, err := c.sqs.GetQueueAttributes(ctx, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -50,7 +51,7 @@ func NewCollectorFactory() *CollectorFactory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type CollectorPlugin interface {
|
type CollectorPlugin interface {
|
||||||
NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error)
|
NewCollector(ctx context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type PluginNotFoundError struct {
|
type PluginNotFoundError struct {
|
||||||
@ -120,38 +121,38 @@ func (c *CollectorFactory) RegisterExternalCollector(metrics []string, plugin Co
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *CollectorFactory) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
func (c *CollectorFactory) NewCollector(ctx context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||||
switch config.Type {
|
switch config.Type {
|
||||||
case autoscalingv2.PodsMetricSourceType:
|
case autoscalingv2.PodsMetricSourceType:
|
||||||
// first try to find a plugin by format
|
// first try to find a plugin by format
|
||||||
if plugin, ok := c.podsPlugins.Named[config.CollectorType]; ok {
|
if plugin, ok := c.podsPlugins.Named[config.CollectorType]; ok {
|
||||||
return plugin.NewCollector(hpa, config, interval)
|
return plugin.NewCollector(ctx, hpa, config, interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
// else try to use the default plugin if set
|
// else try to use the default plugin if set
|
||||||
if c.podsPlugins.Any != nil {
|
if c.podsPlugins.Any != nil {
|
||||||
return c.podsPlugins.Any.NewCollector(hpa, config, interval)
|
return c.podsPlugins.Any.NewCollector(ctx, hpa, config, interval)
|
||||||
}
|
}
|
||||||
case autoscalingv2.ObjectMetricSourceType:
|
case autoscalingv2.ObjectMetricSourceType:
|
||||||
// first try to find a plugin by kind
|
// first try to find a plugin by kind
|
||||||
if kinds, ok := c.objectPlugins.Named[config.ObjectReference.Kind]; ok {
|
if kinds, ok := c.objectPlugins.Named[config.ObjectReference.Kind]; ok {
|
||||||
if plugin, ok := kinds.Named[config.CollectorType]; ok {
|
if plugin, ok := kinds.Named[config.CollectorType]; ok {
|
||||||
return plugin.NewCollector(hpa, config, interval)
|
return plugin.NewCollector(ctx, hpa, config, interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
if kinds.Any != nil {
|
if kinds.Any != nil {
|
||||||
return kinds.Any.NewCollector(hpa, config, interval)
|
return kinds.Any.NewCollector(ctx, hpa, config, interval)
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
// else try to find a default plugin for this kind
|
// else try to find a default plugin for this kind
|
||||||
if plugin, ok := c.objectPlugins.Any.Named[config.CollectorType]; ok {
|
if plugin, ok := c.objectPlugins.Any.Named[config.CollectorType]; ok {
|
||||||
return plugin.NewCollector(hpa, config, interval)
|
return plugin.NewCollector(ctx, hpa, config, interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.objectPlugins.Any.Any != nil {
|
if c.objectPlugins.Any.Any != nil {
|
||||||
return c.objectPlugins.Any.Any.NewCollector(hpa, config, interval)
|
return c.objectPlugins.Any.Any.NewCollector(ctx, hpa, config, interval)
|
||||||
}
|
}
|
||||||
case autoscalingv2.ExternalMetricSourceType:
|
case autoscalingv2.ExternalMetricSourceType:
|
||||||
// First type to get metric type from the `type` label,
|
// First type to get metric type from the `type` label,
|
||||||
@ -169,7 +170,7 @@ func (c *CollectorFactory) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscal
|
|||||||
}
|
}
|
||||||
|
|
||||||
if plugin, ok := c.externalPlugins[pluginKey]; ok {
|
if plugin, ok := c.externalPlugins[pluginKey]; ok {
|
||||||
return plugin.NewCollector(hpa, config, interval)
|
return plugin.NewCollector(ctx, hpa, config, interval)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -189,7 +190,7 @@ type CollectedMetric struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Collector interface {
|
type Collector interface {
|
||||||
GetMetrics() ([]CollectedMetric, error)
|
GetMetrics(ctx context.Context) ([]CollectedMetric, error)
|
||||||
Interval() time.Duration
|
Interval() time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -13,7 +14,7 @@ type mockCollectorPlugin struct {
|
|||||||
Name string
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *mockCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
func (c *mockCollectorPlugin) NewCollector(_ context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||||
return &mockCollector{Name: c.Name}, nil
|
return &mockCollector{Name: c.Name}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -21,7 +22,7 @@ type mockCollector struct {
|
|||||||
Name string
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *mockCollector) GetMetrics() ([]CollectedMetric, error) {
|
func (c *mockCollector) GetMetrics(_ context.Context) ([]CollectedMetric, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,7 +115,7 @@ func TestNewCollector(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, configs, 1)
|
require.Len(t, configs, 1)
|
||||||
|
|
||||||
collector, err := collectorFactory.NewCollector(tc.hpa, configs[0], 0)
|
collector, err := collectorFactory.NewCollector(context.Background(), tc.hpa, configs[0], 0)
|
||||||
if tc.expectedCollector == "" {
|
if tc.expectedCollector == "" {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
} else {
|
} else {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -48,6 +49,7 @@ func NewExternalRPSCollectorPlugin(
|
|||||||
|
|
||||||
// NewCollector initializes a new skipper collector from the specified HPA.
|
// NewCollector initializes a new skipper collector from the specified HPA.
|
||||||
func (p *ExternalRPSCollectorPlugin) NewCollector(
|
func (p *ExternalRPSCollectorPlugin) NewCollector(
|
||||||
|
ctx context.Context,
|
||||||
hpa *autoscalingv2.HorizontalPodAutoscaler,
|
hpa *autoscalingv2.HorizontalPodAutoscaler,
|
||||||
config *MetricConfig,
|
config *MetricConfig,
|
||||||
interval time.Duration,
|
interval time.Duration,
|
||||||
@ -95,7 +97,7 @@ func (p *ExternalRPSCollectorPlugin) NewCollector(
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := p.promPlugin.NewCollector(hpa, &confCopy, interval)
|
c, err := p.promPlugin.NewCollector(ctx, hpa, &confCopy, interval)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -107,8 +109,8 @@ func (p *ExternalRPSCollectorPlugin) NewCollector(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetMetrics gets hostname metrics from Prometheus
|
// GetMetrics gets hostname metrics from Prometheus
|
||||||
func (c *ExternalRPSCollector) GetMetrics() ([]CollectedMetric, error) {
|
func (c *ExternalRPSCollector) GetMetrics(ctx context.Context) ([]CollectedMetric, error) {
|
||||||
v, err := c.promCollector.GetMetrics()
|
v, err := c.promCollector.GetMetrics(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
@ -100,6 +101,7 @@ func TestExternalRPSPluginNewCollector(tt *testing.T) {
|
|||||||
} {
|
} {
|
||||||
tt.Run(testcase.msg, func(t *testing.T) {
|
tt.Run(testcase.msg, func(t *testing.T) {
|
||||||
c, err := plugin.NewCollector(
|
c, err := plugin.NewCollector(
|
||||||
|
context.Background(),
|
||||||
&autoscalingv2.HorizontalPodAutoscaler{},
|
&autoscalingv2.HorizontalPodAutoscaler{},
|
||||||
testcase.config,
|
testcase.config,
|
||||||
interval,
|
interval,
|
||||||
@ -156,7 +158,7 @@ func TestExternalRPSCollectorGetMetrics(tt *testing.T) {
|
|||||||
tt.Run(testcase.msg, func(t *testing.T) {
|
tt.Run(testcase.msg, func(t *testing.T) {
|
||||||
fake := makeCollectorWithStub(testcase.stub)
|
fake := makeCollectorWithStub(testcase.stub)
|
||||||
c := &ExternalRPSCollector{promCollector: fake}
|
c := &ExternalRPSCollector{promCollector: fake}
|
||||||
m, err := c.GetMetrics()
|
m, err := c.GetMetrics(context.Background())
|
||||||
|
|
||||||
if testcase.shouldWork {
|
if testcase.shouldWork {
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
@ -182,6 +184,7 @@ func TestExternalRPSCollectorInterval(t *testing.T) {
|
|||||||
pattern: pattern,
|
pattern: pattern,
|
||||||
}
|
}
|
||||||
c, err := plugin.NewCollector(
|
c, err := plugin.NewCollector(
|
||||||
|
context.Background(),
|
||||||
&autoscalingv2.HorizontalPodAutoscaler{},
|
&autoscalingv2.HorizontalPodAutoscaler{},
|
||||||
&MetricConfig{Config: map[string]string{"hostnames": "foo.bar.baz"}},
|
&MetricConfig{Config: map[string]string{"hostnames": "foo.bar.baz"}},
|
||||||
interval,
|
interval,
|
||||||
@ -227,7 +230,7 @@ func TestExternalRPSCollectorAndCollectorFabricInteraction(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, conf, 1)
|
require.Len(t, conf, 1)
|
||||||
|
|
||||||
c, err := factory.NewCollector(hpa, conf[0], 0)
|
c, err := factory.NewCollector(context.Background(), hpa, conf[0], 0)
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, ok := c.(*ExternalRPSCollector)
|
_, ok := c.(*ExternalRPSCollector)
|
||||||
@ -288,9 +291,9 @@ func TestExternalRPSPrometheusCollectorInteraction(t *testing.T) {
|
|||||||
require.Len(t, conf, 2)
|
require.Len(t, conf, 2)
|
||||||
|
|
||||||
collectors := make(map[string]Collector)
|
collectors := make(map[string]Collector)
|
||||||
collectors["hostname"], err = factory.NewCollector(hpa, conf[0], 0)
|
collectors["hostname"], err = factory.NewCollector(context.Background(), hpa, conf[0], 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
collectors["prom"], err = factory.NewCollector(hpa, conf[1], 0)
|
collectors["prom"], err = factory.NewCollector(context.Background(), hpa, conf[1], 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
prom, ok := collectors["prom"].(*PrometheusCollector)
|
prom, ok := collectors["prom"].(*PrometheusCollector)
|
||||||
|
@ -3,6 +3,7 @@ package collector
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/metrics/pkg/apis/custom_metrics"
|
"k8s.io/metrics/pkg/apis/custom_metrics"
|
||||||
@ -19,7 +20,7 @@ type FakeCollector struct {
|
|||||||
stub func() ([]CollectedMetric, error)
|
stub func() ([]CollectedMetric, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FakeCollector) GetMetrics() ([]CollectedMetric, error) {
|
func (c *FakeCollector) GetMetrics(_ context.Context) ([]CollectedMetric, error) {
|
||||||
if c.stub != nil {
|
if c.stub != nil {
|
||||||
v, err := c.stub()
|
v, err := c.stub()
|
||||||
return v, err
|
return v, err
|
||||||
@ -33,6 +34,7 @@ func (FakeCollector) Interval() time.Duration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *FakeCollectorPlugin) NewCollector(
|
func (p *FakeCollectorPlugin) NewCollector(
|
||||||
|
_ context.Context,
|
||||||
hpa *autoscalingv2.HorizontalPodAutoscaler,
|
hpa *autoscalingv2.HorizontalPodAutoscaler,
|
||||||
config *MetricConfig,
|
config *MetricConfig,
|
||||||
interval time.Duration,
|
interval time.Duration,
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
@ -26,7 +27,7 @@ func NewHTTPCollectorPlugin() (*HTTPCollectorPlugin, error) {
|
|||||||
return &HTTPCollectorPlugin{}, nil
|
return &HTTPCollectorPlugin{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *HTTPCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
func (p *HTTPCollectorPlugin) NewCollector(_ context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||||
collector := &HTTPCollector{
|
collector := &HTTPCollector{
|
||||||
namespace: hpa.Namespace,
|
namespace: hpa.Namespace,
|
||||||
}
|
}
|
||||||
@ -78,7 +79,7 @@ type HTTPCollector struct {
|
|||||||
metric autoscalingv2.MetricIdentifier
|
metric autoscalingv2.MetricIdentifier
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HTTPCollector) GetMetrics() ([]CollectedMetric, error) {
|
func (c *HTTPCollector) GetMetrics(ctx context.Context) ([]CollectedMetric, error) {
|
||||||
metric, err := c.metricsGetter.GetMetric(*c.endpoint)
|
metric, err := c.metricsGetter.GetMetric(*c.endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
@ -66,9 +67,9 @@ func TestHTTPCollector(t *testing.T) {
|
|||||||
Namespace: "default",
|
Namespace: "default",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
collector, err := plugin.NewCollector(hpa, testConfig, testInterval)
|
collector, err := plugin.NewCollector(context.Background(), hpa, testConfig, testInterval)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
metrics, err := collector.GetMetrics()
|
metrics, err := collector.GetMetrics(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, metrics)
|
require.NotNil(t, metrics)
|
||||||
require.Len(t, metrics, 1)
|
require.Len(t, metrics, 1)
|
||||||
|
@ -38,8 +38,8 @@ func NewInfluxDBCollectorPlugin(client kubernetes.Interface, address, token, org
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *InfluxDBCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
func (p *InfluxDBCollectorPlugin) NewCollector(ctx context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||||
return NewInfluxDBCollector(hpa, p.address, p.token, p.org, config, interval)
|
return NewInfluxDBCollector(ctx, hpa, p.address, p.token, p.org, config, interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
type InfluxDBCollector struct {
|
type InfluxDBCollector struct {
|
||||||
@ -55,7 +55,7 @@ type InfluxDBCollector struct {
|
|||||||
namespace string
|
namespace string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewInfluxDBCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, address string, token string, org string, config *MetricConfig, interval time.Duration) (*InfluxDBCollector, error) {
|
func NewInfluxDBCollector(_ context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, address string, token string, org string, config *MetricConfig, interval time.Duration) (*InfluxDBCollector, error) {
|
||||||
collector := &InfluxDBCollector{
|
collector := &InfluxDBCollector{
|
||||||
interval: interval,
|
interval: interval,
|
||||||
metric: config.Metric,
|
metric: config.Metric,
|
||||||
@ -107,9 +107,9 @@ type queryResult struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getValue returns the first result gathered from an InfluxDB instance.
|
// getValue returns the first result gathered from an InfluxDB instance.
|
||||||
func (c *InfluxDBCollector) getValue() (resource.Quantity, error) {
|
func (c *InfluxDBCollector) getValue(ctx context.Context) (resource.Quantity, error) {
|
||||||
queryAPI := c.influxDBClient.QueryAPI(c.org)
|
queryAPI := c.influxDBClient.QueryAPI(c.org)
|
||||||
res, err := queryAPI.Query(context.Background(), c.query)
|
res, err := queryAPI.Query(ctx, c.query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resource.Quantity{}, err
|
return resource.Quantity{}, err
|
||||||
}
|
}
|
||||||
@ -125,8 +125,8 @@ func (c *InfluxDBCollector) getValue() (resource.Quantity, error) {
|
|||||||
return resource.Quantity{}, fmt.Errorf("empty result returned")
|
return resource.Quantity{}, fmt.Errorf("empty result returned")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *InfluxDBCollector) GetMetrics() ([]CollectedMetric, error) {
|
func (c *InfluxDBCollector) GetMetrics(ctx context.Context) ([]CollectedMetric, error) {
|
||||||
v, err := c.getValue()
|
v, err := c.getValue(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -38,7 +39,7 @@ func TestInfluxDBCollector_New(t *testing.T) {
|
|||||||
"query-name": "range2m",
|
"query-name": "range2m",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
c, err := NewInfluxDBCollector(hpa, "http://localhost:9999", "secret", "deadbeef", m, time.Second)
|
c, err := NewInfluxDBCollector(context.Background(), hpa, "http://localhost:9999", "secret", "deadbeef", m, time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -79,7 +80,7 @@ func TestInfluxDBCollector_New(t *testing.T) {
|
|||||||
"query-name": "range3m",
|
"query-name": "range3m",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
c, err := NewInfluxDBCollector(hpa, "http://localhost:8888", "secret", "deadbeef", m, time.Second)
|
c, err := NewInfluxDBCollector(context.Background(), hpa, "http://localhost:8888", "secret", "deadbeef", m, time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -149,7 +150,7 @@ func TestInfluxDBCollector_New(t *testing.T) {
|
|||||||
CollectorType: "influxdb",
|
CollectorType: "influxdb",
|
||||||
Config: tc.config,
|
Config: tc.config,
|
||||||
}
|
}
|
||||||
_, err := NewInfluxDBCollector(hpa, "http://localhost:9999", "secret", "deadbeef", m, time.Second)
|
_, err := NewInfluxDBCollector(context.Background(), hpa, "http://localhost:9999", "secret", "deadbeef", m, time.Second)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("expected error got none")
|
t.Fatal("expected error got none")
|
||||||
}
|
}
|
||||||
|
@ -36,8 +36,8 @@ func NewNakadiCollectorPlugin(nakadi nakadi.Nakadi) (*NakadiCollectorPlugin, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewCollector initializes a new Nakadi collector from the specified HPA.
|
// NewCollector initializes a new Nakadi collector from the specified HPA.
|
||||||
func (c *NakadiCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
func (c *NakadiCollectorPlugin) NewCollector(ctx context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||||
return NewNakadiCollector(c.nakadi, hpa, config, interval)
|
return NewNakadiCollector(ctx, c.nakadi, hpa, config, interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NakadiCollector defines a collector that is able to collect metrics from
|
// NakadiCollector defines a collector that is able to collect metrics from
|
||||||
@ -53,7 +53,7 @@ type NakadiCollector struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewNakadiCollector initializes a new NakadiCollector.
|
// NewNakadiCollector initializes a new NakadiCollector.
|
||||||
func NewNakadiCollector(nakadi nakadi.Nakadi, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*NakadiCollector, error) {
|
func NewNakadiCollector(_ context.Context, nakadi nakadi.Nakadi, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*NakadiCollector, error) {
|
||||||
if config.Metric.Selector == nil {
|
if config.Metric.Selector == nil {
|
||||||
return nil, fmt.Errorf("selector for nakadi is not specified")
|
return nil, fmt.Errorf("selector for nakadi is not specified")
|
||||||
}
|
}
|
||||||
@ -84,17 +84,17 @@ func NewNakadiCollector(nakadi nakadi.Nakadi, hpa *autoscalingv2.HorizontalPodAu
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetMetrics returns a list of collected metrics for the Nakadi subscription ID.
|
// GetMetrics returns a list of collected metrics for the Nakadi subscription ID.
|
||||||
func (c *NakadiCollector) GetMetrics() ([]CollectedMetric, error) {
|
func (c *NakadiCollector) GetMetrics(ctx context.Context) ([]CollectedMetric, error) {
|
||||||
var value int64
|
var value int64
|
||||||
var err error
|
var err error
|
||||||
switch c.nakadiMetricType {
|
switch c.nakadiMetricType {
|
||||||
case nakadiMetricTypeConsumerLagSeconds:
|
case nakadiMetricTypeConsumerLagSeconds:
|
||||||
value, err = c.nakadi.ConsumerLagSeconds(context.TODO(), c.subscriptionID)
|
value, err = c.nakadi.ConsumerLagSeconds(ctx, c.subscriptionID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
case nakadiMetricTypeUnconsumedEvents:
|
case nakadiMetricTypeUnconsumedEvents:
|
||||||
value, err = c.nakadi.UnconsumedEvents(context.TODO(), c.subscriptionID)
|
value, err = c.nakadi.UnconsumedEvents(ctx, c.subscriptionID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -30,8 +30,8 @@ func NewPodCollectorPlugin(client kubernetes.Interface, argoRolloutsClient argoR
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PodCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
func (p *PodCollectorPlugin) NewCollector(ctx context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||||
return NewPodCollector(p.client, p.argoRolloutsClient, hpa, config, interval)
|
return NewPodCollector(ctx, p.client, p.argoRolloutsClient, hpa, config, interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
type PodCollector struct {
|
type PodCollector struct {
|
||||||
@ -46,9 +46,9 @@ type PodCollector struct {
|
|||||||
logger *log.Entry
|
logger *log.Entry
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPodCollector(client kubernetes.Interface, argoRolloutsClient argoRolloutsClient.Interface, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*PodCollector, error) {
|
func NewPodCollector(ctx context.Context, client kubernetes.Interface, argoRolloutsClient argoRolloutsClient.Interface, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*PodCollector, error) {
|
||||||
// get pod selector based on HPA scale target ref
|
// get pod selector based on HPA scale target ref
|
||||||
selector, err := getPodLabelSelector(client, argoRolloutsClient, hpa)
|
selector, err := getPodLabelSelector(ctx, client, argoRolloutsClient, hpa)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get pod label selector: %v", err)
|
return nil, fmt.Errorf("failed to get pod label selector: %v", err)
|
||||||
}
|
}
|
||||||
@ -81,12 +81,12 @@ func NewPodCollector(client kubernetes.Interface, argoRolloutsClient argoRollout
|
|||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PodCollector) GetMetrics() ([]CollectedMetric, error) {
|
func (c *PodCollector) GetMetrics(ctx context.Context) ([]CollectedMetric, error) {
|
||||||
opts := metav1.ListOptions{
|
opts := metav1.ListOptions{
|
||||||
LabelSelector: labels.Set(c.podLabelSelector.MatchLabels).String(),
|
LabelSelector: labels.Set(c.podLabelSelector.MatchLabels).String(),
|
||||||
}
|
}
|
||||||
|
|
||||||
pods, err := c.client.CoreV1().Pods(c.namespace).List(context.TODO(), opts)
|
pods, err := c.client.CoreV1().Pods(c.namespace).List(ctx, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -156,22 +156,22 @@ func (c *PodCollector) getPodMetric(pod corev1.Pod, ch chan CollectedMetric, err
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPodLabelSelector(client kubernetes.Interface, argoRolloutsClient argoRolloutsClient.Interface, hpa *autoscalingv2.HorizontalPodAutoscaler) (*metav1.LabelSelector, error) {
|
func getPodLabelSelector(ctx context.Context, client kubernetes.Interface, argoRolloutsClient argoRolloutsClient.Interface, hpa *autoscalingv2.HorizontalPodAutoscaler) (*metav1.LabelSelector, error) {
|
||||||
switch hpa.Spec.ScaleTargetRef.Kind {
|
switch hpa.Spec.ScaleTargetRef.Kind {
|
||||||
case "Deployment":
|
case "Deployment":
|
||||||
deployment, err := client.AppsV1().Deployments(hpa.Namespace).Get(context.TODO(), hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
|
deployment, err := client.AppsV1().Deployments(hpa.Namespace).Get(ctx, hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return deployment.Spec.Selector, nil
|
return deployment.Spec.Selector, nil
|
||||||
case "StatefulSet":
|
case "StatefulSet":
|
||||||
sts, err := client.AppsV1().StatefulSets(hpa.Namespace).Get(context.TODO(), hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
|
sts, err := client.AppsV1().StatefulSets(hpa.Namespace).Get(ctx, hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return sts.Spec.Selector, nil
|
return sts.Spec.Selector, nil
|
||||||
case "Rollout":
|
case "Rollout":
|
||||||
rollout, err := argoRolloutsClient.ArgoprojV1alpha1().Rollouts(hpa.Namespace).Get(context.TODO(), hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
|
rollout, err := argoRolloutsClient.ArgoprojV1alpha1().Rollouts(hpa.Namespace).Get(ctx, hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -56,9 +56,9 @@ func TestPodCollector(t *testing.T) {
|
|||||||
makeTestPods(t, host, port, "test-metric", client, 5, podCondition, podDeletionTimestamp)
|
makeTestPods(t, host, port, "test-metric", client, 5, podCondition, podDeletionTimestamp)
|
||||||
testHPA := makeTestHPA(t, client)
|
testHPA := makeTestHPA(t, client)
|
||||||
testConfig := makeTestConfig(port, minPodReadyAge)
|
testConfig := makeTestConfig(port, minPodReadyAge)
|
||||||
collector, err := plugin.NewCollector(testHPA, testConfig, testInterval)
|
collector, err := plugin.NewCollector(context.Background(), testHPA, testConfig, testInterval)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
metrics, err := collector.GetMetrics()
|
metrics, err := collector.GetMetrics(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(metrics), int(metricsHandler.calledCounter))
|
require.Equal(t, len(metrics), int(metricsHandler.calledCounter))
|
||||||
var values []int64
|
var values []int64
|
||||||
@ -97,9 +97,9 @@ func TestPodCollectorWithMinPodReadyAge(t *testing.T) {
|
|||||||
makeTestPods(t, host, port, "test-metric", client, 5, podCondition, podDeletionTimestamp)
|
makeTestPods(t, host, port, "test-metric", client, 5, podCondition, podDeletionTimestamp)
|
||||||
testHPA := makeTestHPA(t, client)
|
testHPA := makeTestHPA(t, client)
|
||||||
testConfig := makeTestConfig(port, minPodReadyAge)
|
testConfig := makeTestConfig(port, minPodReadyAge)
|
||||||
collector, err := plugin.NewCollector(testHPA, testConfig, testInterval)
|
collector, err := plugin.NewCollector(context.Background(), testHPA, testConfig, testInterval)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
metrics, err := collector.GetMetrics()
|
metrics, err := collector.GetMetrics(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(metrics), int(metricsHandler.calledCounter))
|
require.Equal(t, len(metrics), int(metricsHandler.calledCounter))
|
||||||
var values []int64
|
var values []int64
|
||||||
@ -137,9 +137,9 @@ func TestPodCollectorWithPodCondition(t *testing.T) {
|
|||||||
makeTestPods(t, host, port, "test-metric", client, 5, podCondition, podDeletionTimestamp)
|
makeTestPods(t, host, port, "test-metric", client, 5, podCondition, podDeletionTimestamp)
|
||||||
testHPA := makeTestHPA(t, client)
|
testHPA := makeTestHPA(t, client)
|
||||||
testConfig := makeTestConfig(port, minPodReadyAge)
|
testConfig := makeTestConfig(port, minPodReadyAge)
|
||||||
collector, err := plugin.NewCollector(testHPA, testConfig, testInterval)
|
collector, err := plugin.NewCollector(context.Background(), testHPA, testConfig, testInterval)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
metrics, err := collector.GetMetrics()
|
metrics, err := collector.GetMetrics(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(metrics), int(metricsHandler.calledCounter))
|
require.Equal(t, len(metrics), int(metricsHandler.calledCounter))
|
||||||
var values []int64
|
var values []int64
|
||||||
@ -177,9 +177,9 @@ func TestPodCollectorWithPodTerminatingCondition(t *testing.T) {
|
|||||||
makeTestPods(t, host, port, "test-metric", client, 5, podCondition, podDeletionTimestamp)
|
makeTestPods(t, host, port, "test-metric", client, 5, podCondition, podDeletionTimestamp)
|
||||||
testHPA := makeTestHPA(t, client)
|
testHPA := makeTestHPA(t, client)
|
||||||
testConfig := makeTestConfig(port, minPodReadyAge)
|
testConfig := makeTestConfig(port, minPodReadyAge)
|
||||||
collector, err := plugin.NewCollector(testHPA, testConfig, testInterval)
|
collector, err := plugin.NewCollector(context.Background(), testHPA, testConfig, testInterval)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
metrics, err := collector.GetMetrics()
|
metrics, err := collector.GetMetrics(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(metrics), int(metricsHandler.calledCounter))
|
require.Equal(t, len(metrics), int(metricsHandler.calledCounter))
|
||||||
var values []int64
|
var values []int64
|
||||||
@ -217,9 +217,9 @@ func TestPodCollectorWithRollout(t *testing.T) {
|
|||||||
makeTestPods(t, host, port, "test-metric", client, 5, podCondition, podDeletionTimestamp)
|
makeTestPods(t, host, port, "test-metric", client, 5, podCondition, podDeletionTimestamp)
|
||||||
testHPA := makeTestHPAForRollout(t, client)
|
testHPA := makeTestHPAForRollout(t, client)
|
||||||
testConfig := makeTestConfig(port, minPodReadyAge)
|
testConfig := makeTestConfig(port, minPodReadyAge)
|
||||||
collector, err := plugin.NewCollector(testHPA, testConfig, testInterval)
|
collector, err := plugin.NewCollector(context.Background(), testHPA, testConfig, testInterval)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
metrics, err := collector.GetMetrics()
|
metrics, err := collector.GetMetrics(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(metrics), int(metricsHandler.calledCounter))
|
require.Equal(t, len(metrics), int(metricsHandler.calledCounter))
|
||||||
var values []int64
|
var values []int64
|
||||||
|
@ -55,7 +55,7 @@ func NewPrometheusCollectorPlugin(client kubernetes.Interface, prometheusServer
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PrometheusCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
func (p *PrometheusCollectorPlugin) NewCollector(_ context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||||
return NewPrometheusCollector(p.client, p.promAPI, hpa, config, interval)
|
return NewPrometheusCollector(p.client, p.promAPI, hpa, config, interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -133,9 +133,9 @@ func NewPrometheusCollector(client kubernetes.Interface, promAPI promv1.API, hpa
|
|||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PrometheusCollector) GetMetrics() ([]CollectedMetric, error) {
|
func (c *PrometheusCollector) GetMetrics(ctx context.Context) ([]CollectedMetric, error) {
|
||||||
// TODO: use real context
|
// TODO: use real context
|
||||||
value, _, err := c.promAPI.Query(context.Background(), c.query, time.Now().UTC())
|
value, _, err := c.promAPI.Query(ctx, c.query, time.Now().UTC())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -163,7 +163,7 @@ func (c *PrometheusCollector) GetMetrics() ([]CollectedMetric, error) {
|
|||||||
// calculate an average metric instead of total.
|
// calculate an average metric instead of total.
|
||||||
// targetAverageValue will be available in Kubernetes v1.12
|
// targetAverageValue will be available in Kubernetes v1.12
|
||||||
// https://github.com/kubernetes/kubernetes/pull/64097
|
// https://github.com/kubernetes/kubernetes/pull/64097
|
||||||
replicas, err := targetRefReplicas(c.client, c.hpa)
|
replicas, err := targetRefReplicas(ctx, c.client, c.hpa)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@ -179,7 +180,7 @@ func TestNewPrometheusCollector(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, configs, 1)
|
require.Len(t, configs, 1)
|
||||||
|
|
||||||
collector, err := collectorFactory.NewCollector(tc.hpa, configs[0], 0)
|
collector, err := collectorFactory.NewCollector(context.Background(), tc.hpa, configs[0], 0)
|
||||||
if tc.expectedQuery != "" {
|
if tc.expectedQuery != "" {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
c, ok := collector.(*PrometheusCollector)
|
c, ok := collector.(*PrometheusCollector)
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
@ -93,14 +94,14 @@ func NewClusterScalingScheduleCollectorPlugin(store Store, now Now, defaultScali
|
|||||||
// NewCollector initializes a new scaling schedule collector from the
|
// NewCollector initializes a new scaling schedule collector from the
|
||||||
// specified HPA. It's the only required method to implement the
|
// specified HPA. It's the only required method to implement the
|
||||||
// collector.CollectorPlugin interface.
|
// collector.CollectorPlugin interface.
|
||||||
func (c *ScalingScheduleCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
func (c *ScalingScheduleCollectorPlugin) NewCollector(_ context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||||
return NewScalingScheduleCollector(c.store, c.defaultScalingWindow, c.defaultTimeZone, c.rampSteps, c.now, hpa, config, interval)
|
return NewScalingScheduleCollector(c.store, c.defaultScalingWindow, c.defaultTimeZone, c.rampSteps, c.now, hpa, config, interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCollector initializes a new cluster wide scaling schedule
|
// NewCollector initializes a new cluster wide scaling schedule
|
||||||
// collector from the specified HPA. It's the only required method to
|
// collector from the specified HPA. It's the only required method to
|
||||||
// implement the collector.CollectorPlugin interface.
|
// implement the collector.CollectorPlugin interface.
|
||||||
func (c *ClusterScalingScheduleCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
func (c *ClusterScalingScheduleCollectorPlugin) NewCollector(_ context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||||
return NewClusterScalingScheduleCollector(c.store, c.defaultScalingWindow, c.defaultTimeZone, c.rampSteps, c.now, hpa, config, interval)
|
return NewClusterScalingScheduleCollector(c.store, c.defaultScalingWindow, c.defaultTimeZone, c.rampSteps, c.now, hpa, config, interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,7 +170,7 @@ func NewClusterScalingScheduleCollector(store Store, defaultScalingWindow time.D
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetMetrics is the main implementation for collector.Collector interface
|
// GetMetrics is the main implementation for collector.Collector interface
|
||||||
func (c *ScalingScheduleCollector) GetMetrics() ([]CollectedMetric, error) {
|
func (c *ScalingScheduleCollector) GetMetrics(_ context.Context) ([]CollectedMetric, error) {
|
||||||
scalingScheduleInterface, exists, err := c.store.GetByKey(fmt.Sprintf("%s/%s", c.objectReference.Namespace, c.objectReference.Name))
|
scalingScheduleInterface, exists, err := c.store.GetByKey(fmt.Sprintf("%s/%s", c.objectReference.Namespace, c.objectReference.Name))
|
||||||
if !exists {
|
if !exists {
|
||||||
return nil, ErrScalingScheduleNotFound
|
return nil, ErrScalingScheduleNotFound
|
||||||
@ -186,7 +187,7 @@ func (c *ScalingScheduleCollector) GetMetrics() ([]CollectedMetric, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetMetrics is the main implementation for collector.Collector interface
|
// GetMetrics is the main implementation for collector.Collector interface
|
||||||
func (c *ClusterScalingScheduleCollector) GetMetrics() ([]CollectedMetric, error) {
|
func (c *ClusterScalingScheduleCollector) GetMetrics(_ context.Context) ([]CollectedMetric, error) {
|
||||||
clusterScalingScheduleInterface, exists, err := c.store.GetByKey(c.objectReference.Name)
|
clusterScalingScheduleInterface, exists, err := c.store.GetByKey(c.objectReference.Name)
|
||||||
if !exists {
|
if !exists {
|
||||||
return nil, ErrClusterScalingScheduleNotFound
|
return nil, ErrClusterScalingScheduleNotFound
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
@ -626,17 +627,17 @@ func TestScalingScheduleCollector(t *testing.T) {
|
|||||||
err = collectorFactoryFirstRun.RegisterObjectCollector("ClusterScalingSchedule", "", clusterPluginFirstRun)
|
err = collectorFactoryFirstRun.RegisterObjectCollector("ClusterScalingSchedule", "", clusterPluginFirstRun)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
collector, err := collectorFactory.NewCollector(hpa, configs[0], 0)
|
collector, err := collectorFactory.NewCollector(context.Background(), hpa, configs[0], 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
collector, ok := collector.(*ScalingScheduleCollector)
|
collector, ok := collector.(*ScalingScheduleCollector)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
clusterCollector, err := collectorFactory.NewCollector(hpa, configs[1], 0)
|
clusterCollector, err := collectorFactory.NewCollector(context.Background(), hpa, configs[1], 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
clusterCollector, ok = clusterCollector.(*ClusterScalingScheduleCollector)
|
clusterCollector, ok = clusterCollector.(*ClusterScalingScheduleCollector)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
clusterCollectorFirstRun, err := collectorFactoryFirstRun.NewCollector(hpa, configs[1], 0)
|
clusterCollectorFirstRun, err := collectorFactoryFirstRun.NewCollector(context.Background(), hpa, configs[1], 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
clusterCollectorFirstRun, ok = clusterCollectorFirstRun.(*ClusterScalingScheduleCollector)
|
clusterCollectorFirstRun, ok = clusterCollectorFirstRun.(*ClusterScalingScheduleCollector)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
@ -659,13 +660,13 @@ func TestScalingScheduleCollector(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
collected, err := collector.GetMetrics()
|
collected, err := collector.GetMetrics(context.Background())
|
||||||
checkCollectedMetrics(t, collected, "ScalingSchedule")
|
checkCollectedMetrics(t, collected, "ScalingSchedule")
|
||||||
|
|
||||||
clusterCollected, err := clusterCollector.GetMetrics()
|
clusterCollected, err := clusterCollector.GetMetrics(context.Background())
|
||||||
checkCollectedMetrics(t, clusterCollected, "ClusterScalingSchedule")
|
checkCollectedMetrics(t, clusterCollected, "ClusterScalingSchedule")
|
||||||
|
|
||||||
clusterCollectedFirstRun, err := clusterCollectorFirstRun.GetMetrics()
|
clusterCollectedFirstRun, err := clusterCollectorFirstRun.GetMetrics(context.Background())
|
||||||
checkCollectedMetrics(t, clusterCollectedFirstRun, "ClusterScalingSchedule")
|
checkCollectedMetrics(t, clusterCollectedFirstRun, "ClusterScalingSchedule")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -698,21 +699,21 @@ func TestScalingScheduleObjectNotPresentReturnsError(t *testing.T) {
|
|||||||
err = collectorFactory.RegisterObjectCollector("ClusterScalingSchedule", "", clusterPlugin)
|
err = collectorFactory.RegisterObjectCollector("ClusterScalingSchedule", "", clusterPlugin)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
collector, err := collectorFactory.NewCollector(hpa, configs[0], 0)
|
collector, err := collectorFactory.NewCollector(context.Background(), hpa, configs[0], 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
collector, ok := collector.(*ScalingScheduleCollector)
|
collector, ok := collector.(*ScalingScheduleCollector)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
clusterCollector, err := collectorFactory.NewCollector(hpa, configs[1], 0)
|
clusterCollector, err := collectorFactory.NewCollector(context.Background(), hpa, configs[1], 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
clusterCollector, ok = clusterCollector.(*ClusterScalingScheduleCollector)
|
clusterCollector, ok = clusterCollector.(*ClusterScalingScheduleCollector)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
_, err = collector.GetMetrics()
|
_, err = collector.GetMetrics(context.Background())
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Equal(t, ErrScalingScheduleNotFound, err)
|
require.Equal(t, ErrScalingScheduleNotFound, err)
|
||||||
|
|
||||||
_, err = clusterCollector.GetMetrics()
|
_, err = clusterCollector.GetMetrics(context.Background())
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Equal(t, ErrClusterScalingScheduleNotFound, err)
|
require.Equal(t, ErrClusterScalingScheduleNotFound, err)
|
||||||
|
|
||||||
@ -721,11 +722,11 @@ func TestScalingScheduleObjectNotPresentReturnsError(t *testing.T) {
|
|||||||
store.d["namespace/scalingScheduleName"] = invalidObject
|
store.d["namespace/scalingScheduleName"] = invalidObject
|
||||||
clusterStore.d["scalingScheduleName"] = invalidObject
|
clusterStore.d["scalingScheduleName"] = invalidObject
|
||||||
|
|
||||||
_, err = collector.GetMetrics()
|
_, err = collector.GetMetrics(context.Background())
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Equal(t, ErrNotScalingScheduleFound, err)
|
require.Equal(t, ErrNotScalingScheduleFound, err)
|
||||||
|
|
||||||
_, err = clusterCollector.GetMetrics()
|
_, err = clusterCollector.GetMetrics(context.Background())
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Equal(t, ErrNotClusterScalingScheduleFound, err)
|
require.Equal(t, ErrNotClusterScalingScheduleFound, err)
|
||||||
}
|
}
|
||||||
@ -755,20 +756,20 @@ func TestReturnsErrorWhenStoreDoes(t *testing.T) {
|
|||||||
err = collectorFactory.RegisterObjectCollector("ClusterScalingSchedule", "", clusterPlugin)
|
err = collectorFactory.RegisterObjectCollector("ClusterScalingSchedule", "", clusterPlugin)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
collector, err := collectorFactory.NewCollector(hpa, configs[0], 0)
|
collector, err := collectorFactory.NewCollector(context.Background(), hpa, configs[0], 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
collector, ok := collector.(*ScalingScheduleCollector)
|
collector, ok := collector.(*ScalingScheduleCollector)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
clusterCollector, err := collectorFactory.NewCollector(hpa, configs[1], 0)
|
clusterCollector, err := collectorFactory.NewCollector(context.Background(), hpa, configs[1], 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
clusterCollector, ok = clusterCollector.(*ClusterScalingScheduleCollector)
|
clusterCollector, ok = clusterCollector.(*ClusterScalingScheduleCollector)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
_, err = collector.GetMetrics()
|
_, err = collector.GetMetrics(context.Background())
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
_, err = clusterCollector.GetMetrics()
|
_, err = clusterCollector.GetMetrics(context.Background())
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ func NewSkipperCollectorPlugin(client kubernetes.Interface, rgClient rginterface
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewCollector initializes a new skipper collector from the specified HPA.
|
// NewCollector initializes a new skipper collector from the specified HPA.
|
||||||
func (c *SkipperCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
func (c *SkipperCollectorPlugin) NewCollector(_ context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||||
if strings.HasPrefix(config.Metric.Name, rpsMetricName) {
|
if strings.HasPrefix(config.Metric.Name, rpsMetricName) {
|
||||||
backend, ok := config.Config["backend"]
|
backend, ok := config.Config["backend"]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -204,7 +204,7 @@ func (c *SkipperCollector) getCollector(ctx context.Context) (Collector, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
config.PerReplica = false // per replica is handled outside of the prometheus collector
|
config.PerReplica = false // per replica is handled outside of the prometheus collector
|
||||||
collector, err := c.plugin.NewCollector(c.hpa, &config, c.interval)
|
collector, err := c.plugin.NewCollector(ctx, c.hpa, &config, c.interval)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -213,13 +213,13 @@ func (c *SkipperCollector) getCollector(ctx context.Context) (Collector, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetMetrics gets skipper metrics from prometheus.
|
// GetMetrics gets skipper metrics from prometheus.
|
||||||
func (c *SkipperCollector) GetMetrics() ([]CollectedMetric, error) {
|
func (c *SkipperCollector) GetMetrics(ctx context.Context) ([]CollectedMetric, error) {
|
||||||
collector, err := c.getCollector(context.TODO())
|
collector, err := c.getCollector(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
values, err := collector.GetMetrics()
|
values, err := collector.GetMetrics(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -236,7 +236,7 @@ func (c *SkipperCollector) GetMetrics() ([]CollectedMetric, error) {
|
|||||||
// calculate an average metric instead of total.
|
// calculate an average metric instead of total.
|
||||||
// targetAverageValue will be available in Kubernetes v1.12
|
// targetAverageValue will be available in Kubernetes v1.12
|
||||||
// https://github.com/kubernetes/kubernetes/pull/64097
|
// https://github.com/kubernetes/kubernetes/pull/64097
|
||||||
replicas, err := targetRefReplicas(c.client, c.hpa)
|
replicas, err := targetRefReplicas(ctx, c.client, c.hpa)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -257,17 +257,17 @@ func (c *SkipperCollector) Interval() time.Duration {
|
|||||||
return c.interval
|
return c.interval
|
||||||
}
|
}
|
||||||
|
|
||||||
func targetRefReplicas(client kubernetes.Interface, hpa *autoscalingv2.HorizontalPodAutoscaler) (int32, error) {
|
func targetRefReplicas(ctx context.Context, client kubernetes.Interface, hpa *autoscalingv2.HorizontalPodAutoscaler) (int32, error) {
|
||||||
var replicas int32
|
var replicas int32
|
||||||
switch hpa.Spec.ScaleTargetRef.Kind {
|
switch hpa.Spec.ScaleTargetRef.Kind {
|
||||||
case "Deployment":
|
case "Deployment":
|
||||||
deployment, err := client.AppsV1().Deployments(hpa.Namespace).Get(context.TODO(), hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
|
deployment, err := client.AppsV1().Deployments(hpa.Namespace).Get(ctx, hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
replicas = deployment.Status.Replicas
|
replicas = deployment.Status.Replicas
|
||||||
case "StatefulSet":
|
case "StatefulSet":
|
||||||
sts, err := client.AppsV1().StatefulSets(hpa.Namespace).Get(context.TODO(), hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
|
sts, err := client.AppsV1().StatefulSets(hpa.Namespace).Get(ctx, hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -38,7 +38,7 @@ func TestTargetRefReplicasDeployments(t *testing.T) {
|
|||||||
Create(context.TODO(), newHPA(defaultNamespace, name, "Deployment"), metav1.CreateOptions{})
|
Create(context.TODO(), newHPA(defaultNamespace, name, "Deployment"), metav1.CreateOptions{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
replicas, err := targetRefReplicas(client, hpa)
|
replicas, err := targetRefReplicas(context.Background(), client, hpa)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, deployment.Status.Replicas, replicas)
|
require.Equal(t, deployment.Status.Replicas, replicas)
|
||||||
}
|
}
|
||||||
@ -55,7 +55,7 @@ func TestTargetRefReplicasStatefulSets(t *testing.T) {
|
|||||||
Create(context.TODO(), newHPA(defaultNamespace, name, "StatefulSet"), metav1.CreateOptions{})
|
Create(context.TODO(), newHPA(defaultNamespace, name, "StatefulSet"), metav1.CreateOptions{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
replicas, err := targetRefReplicas(client, hpa)
|
replicas, err := targetRefReplicas(context.Background(), client, hpa)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, statefulSet.Status.Replicas, replicas)
|
require.Equal(t, statefulSet.Status.Replicas, replicas)
|
||||||
}
|
}
|
||||||
@ -340,7 +340,7 @@ func TestSkipperCollectorIngress(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
collector, err := NewSkipperCollector(client, nil, plugin, hpa, config, time.Minute, tc.backendAnnotations, tc.backend)
|
collector, err := NewSkipperCollector(client, nil, plugin, hpa, config, time.Minute, tc.backendAnnotations, tc.backend)
|
||||||
require.NoError(t, err, "failed to create skipper collector: %v", err)
|
require.NoError(t, err, "failed to create skipper collector: %v", err)
|
||||||
collected, err := collector.GetMetrics()
|
collected, err := collector.GetMetrics(context.Background())
|
||||||
if tc.expectError {
|
if tc.expectError {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
} else {
|
} else {
|
||||||
@ -522,7 +522,7 @@ func TestSkipperCollector(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
collector, err := NewSkipperCollector(client, rgClient, plugin, hpa, config, time.Minute, []string{testBackendWeightsAnnotation}, tc.backend)
|
collector, err := NewSkipperCollector(client, rgClient, plugin, hpa, config, time.Minute, []string{testBackendWeightsAnnotation}, tc.backend)
|
||||||
require.NoError(t, err, "failed to create skipper collector: %v", err)
|
require.NoError(t, err, "failed to create skipper collector: %v", err)
|
||||||
collected, err := collector.GetMetrics()
|
collected, err := collector.GetMetrics(context.Background())
|
||||||
if tc.expectError {
|
if tc.expectError {
|
||||||
require.Error(t, err, "%s", kind)
|
require.Error(t, err, "%s", kind)
|
||||||
} else {
|
} else {
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zalando-incubator/kube-metrics-adapter/pkg/zmon"
|
"github.com/zalando-incubator/kube-metrics-adapter/pkg/zmon"
|
||||||
|
"golang.org/x/net/context"
|
||||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -40,7 +41,7 @@ func NewZMONCollectorPlugin(zmon zmon.ZMON) (*ZMONCollectorPlugin, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewCollector initializes a new ZMON collector from the specified HPA.
|
// NewCollector initializes a new ZMON collector from the specified HPA.
|
||||||
func (c *ZMONCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
func (c *ZMONCollectorPlugin) NewCollector(_ context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||||
return NewZMONCollector(c.zmon, hpa, config, interval)
|
return NewZMONCollector(c.zmon, hpa, config, interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,7 +122,7 @@ func NewZMONCollector(zmon zmon.ZMON, hpa *autoscalingv2.HorizontalPodAutoscaler
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetMetrics returns a list of collected metrics for the ZMON check.
|
// GetMetrics returns a list of collected metrics for the ZMON check.
|
||||||
func (c *ZMONCollector) GetMetrics() ([]CollectedMetric, error) {
|
func (c *ZMONCollector) GetMetrics(ctx context.Context) ([]CollectedMetric, error) {
|
||||||
dataPoints, err := c.zmon.Query(c.checkID, c.key, c.tags, c.aggregators, c.duration)
|
dataPoints, err := c.zmon.Query(c.checkID, c.key, c.tags, c.aggregators, c.duration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -38,7 +39,7 @@ func TestZMONCollectorNewCollector(t *testing.T) {
|
|||||||
|
|
||||||
hpa := &autoscalingv2.HorizontalPodAutoscaler{}
|
hpa := &autoscalingv2.HorizontalPodAutoscaler{}
|
||||||
|
|
||||||
collector, err := collectPlugin.NewCollector(hpa, config, 1*time.Second)
|
collector, err := collectPlugin.NewCollector(context.Background(), hpa, config, 1*time.Second)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, collector)
|
require.NotNil(t, collector)
|
||||||
zmonCollector := collector.(*ZMONCollector)
|
zmonCollector := collector.(*ZMONCollector)
|
||||||
@ -52,7 +53,7 @@ func TestZMONCollectorNewCollector(t *testing.T) {
|
|||||||
// should fail if the check id is not specified.
|
// should fail if the check id is not specified.
|
||||||
delete(config.Config, zmonCheckIDLabelKey)
|
delete(config.Config, zmonCheckIDLabelKey)
|
||||||
config.Metric.Name = "foo-check"
|
config.Metric.Name = "foo-check"
|
||||||
_, err = collectPlugin.NewCollector(nil, config, 1*time.Second)
|
_, err = collectPlugin.NewCollector(context.Background(), nil, config, 1*time.Second)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,7 +125,7 @@ func TestZMONCollectorGetMetrics(tt *testing.T) {
|
|||||||
zmonCollector, err := NewZMONCollector(z, hpa, config, 1*time.Second)
|
zmonCollector, err := NewZMONCollector(z, hpa, config, 1*time.Second)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
metrics, _ := zmonCollector.GetMetrics()
|
metrics, _ := zmonCollector.GetMetrics(context.Background())
|
||||||
require.Equal(t, ti.collectedMetrics, metrics)
|
require.Equal(t, ti.collectedMetrics, metrics)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -161,7 +161,7 @@ func (p *HPAProvider) updateHPAs() error {
|
|||||||
interval = p.collectorInterval
|
interval = p.collectorInterval
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := p.collectorFactory.NewCollector(&hpa, config, interval)
|
c, err := p.collectorFactory.NewCollector(context.TODO(), &hpa, config, interval)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
||||||
// Only log when it's not a PluginNotFoundError AND flag disregardIncompatibleHPAs is true
|
// Only log when it's not a PluginNotFoundError AND flag disregardIncompatibleHPAs is true
|
||||||
@ -347,7 +347,7 @@ func (t *CollectorScheduler) Add(resourceRef resourceReference, typeName collect
|
|||||||
// context is canceled the collection will be stopped.
|
// context is canceled the collection will be stopped.
|
||||||
func collectorRunner(ctx context.Context, collector collector.Collector, metricsc chan<- metricCollection) {
|
func collectorRunner(ctx context.Context, collector collector.Collector, metricsc chan<- metricCollection) {
|
||||||
for {
|
for {
|
||||||
values, err := collector.GetMetrics()
|
values, err := collector.GetMetrics(ctx)
|
||||||
|
|
||||||
metricsc <- metricCollection{
|
metricsc <- metricCollection{
|
||||||
Values: values,
|
Values: values,
|
||||||
|
@ -17,13 +17,13 @@ import (
|
|||||||
|
|
||||||
type mockCollectorPlugin struct{}
|
type mockCollectorPlugin struct{}
|
||||||
|
|
||||||
func (m mockCollectorPlugin) NewCollector(hpa *autoscaling.HorizontalPodAutoscaler, config *collector.MetricConfig, interval time.Duration) (collector.Collector, error) {
|
func (m mockCollectorPlugin) NewCollector(_ context.Context, hpa *autoscaling.HorizontalPodAutoscaler, config *collector.MetricConfig, interval time.Duration) (collector.Collector, error) {
|
||||||
return mockCollector{}, nil
|
return mockCollector{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockCollector struct{}
|
type mockCollector struct{}
|
||||||
|
|
||||||
func (c mockCollector) GetMetrics() ([]collector.CollectedMetric, error) {
|
func (c mockCollector) GetMetrics(_ context.Context) ([]collector.CollectedMetric, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user