mirror of
https://github.com/zalando-incubator/kube-metrics-adapter.git
synced 2025-05-18 03:05:16 +00:00
Compare commits
38 Commits
4bfdecf896
...
master
Author | SHA1 | Date | |
---|---|---|---|
292bd4fc27 | |||
be40d664f6 | |||
5418c64e8f | |||
f0a07e0c64 | |||
a1d90533e8 | |||
e16119f821 | |||
96a1315ad3 | |||
8b1245cbc8 | |||
30dcc24c7f | |||
d1d37dca26 | |||
221589d630 | |||
3acdacf8c9 | |||
62c50df209 | |||
25e6ce4bd1 | |||
52d3980c56 | |||
da84a4292d | |||
80b80c878d | |||
e44da55641 | |||
b9308bd8a9 | |||
29ce099563 | |||
a4306be7a5 | |||
d346066ee7 | |||
763ba23fd9 | |||
f4e7a37a7b | |||
21dae23214 | |||
7d47ff5c06 | |||
c027968c71 | |||
eef3840510 | |||
716d03a09b | |||
f625fe9540 | |||
9e38fe15a5 | |||
0c7b5bdafe | |||
b44fcbaae8 | |||
eb27f6037f | |||
6149308e6d | |||
62cc9c98f3 | |||
e03222b12b | |||
063d3de6d2 |
2
.github/workflows/ci.yaml
vendored
2
.github/workflows/ci.yaml
vendored
@ -13,7 +13,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '^1.23'
|
||||
go-version: '^1.24'
|
||||
- run: go version
|
||||
- run: go install github.com/mattn/goveralls@latest
|
||||
- run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
|
33
README.md
33
README.md
@ -751,12 +751,12 @@ spec:
|
||||
- type: External
|
||||
external:
|
||||
metric:
|
||||
name: my-nakadi-consumer
|
||||
selector:
|
||||
matchLabels:
|
||||
type: nakadi
|
||||
subscription-id: "708095f6-cece-4d02-840e-ee488d710b29"
|
||||
metric-type: "consumer-lag-seconds|unconsumed-events"
|
||||
name: my-nakadi-consumer
|
||||
selector:
|
||||
matchLabels:
|
||||
type: nakadi
|
||||
subscription-id: "708095f6-cece-4d02-840e-ee488d710b29"
|
||||
metric-type: "consumer-lag-seconds|unconsumed-events"
|
||||
target:
|
||||
# value is compatible with the consumer-lag-seconds metric type.
|
||||
# It describes the amount of consumer lag in seconds before scaling
|
||||
@ -805,6 +805,27 @@ with more consumers.
|
||||
For this case you should also account for the average time for processing an
|
||||
event when defining the target.
|
||||
|
||||
Alternative to defining `subscription-id` you can also filter based on
|
||||
`owning_application`, `event-types` and `consumer-group`:
|
||||
|
||||
```yaml
|
||||
metrics:
|
||||
- type: External
|
||||
external:
|
||||
metric:
|
||||
name: my-nakadi-consumer
|
||||
selector:
|
||||
matchLabels:
|
||||
type: nakadi
|
||||
owning-application: "example-app"
|
||||
# comma separated list of event types
|
||||
event-types: "example-event-type,example-event-type2"
|
||||
consumer-group: "abcd1234"
|
||||
metric-type: "consumer-lag-seconds|unconsumed-events"
|
||||
```
|
||||
|
||||
This is useful in dynamic environments where the subscription ID might not be
|
||||
known before deployment time (e.g. because it's created by the same deployment).
|
||||
|
||||
## HTTP Collector
|
||||
|
||||
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.17.3
|
||||
name: clusterscalingschedules.zalando.org
|
||||
spec:
|
||||
group: zalando.org
|
||||
@ -11,6 +11,10 @@ spec:
|
||||
kind: ClusterScalingSchedule
|
||||
listKind: ClusterScalingScheduleList
|
||||
plural: clusterscalingschedules
|
||||
shortNames:
|
||||
- css
|
||||
- clustersched
|
||||
- clusterschedule
|
||||
singular: clusterscalingschedule
|
||||
scope: Cluster
|
||||
versions:
|
||||
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.17.3
|
||||
name: scalingschedules.zalando.org
|
||||
spec:
|
||||
group: zalando.org
|
||||
@ -13,6 +13,9 @@ spec:
|
||||
kind: ScalingSchedule
|
||||
listKind: ScalingScheduleList
|
||||
plural: scalingschedules
|
||||
shortNames:
|
||||
- sched
|
||||
- schedule
|
||||
singular: scalingschedule
|
||||
scope: Namespaced
|
||||
versions:
|
||||
|
144
go.mod
144
go.mod
@ -1,37 +1,38 @@
|
||||
module github.com/zalando-incubator/kube-metrics-adapter
|
||||
|
||||
require (
|
||||
github.com/argoproj/argo-rollouts v1.8.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.4
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.37.12
|
||||
github.com/argoproj/argo-rollouts v1.8.2
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.14
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.5
|
||||
github.com/influxdata/influxdb-client-go v1.4.0
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/common v0.62.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/common v0.63.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spyzhov/ajson v0.9.6
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/szuecs/routegroup-client v0.28.2
|
||||
github.com/zalando-incubator/cluster-lifecycle-manager v0.0.0-20240619093047-7853f3386b71
|
||||
golang.org/x/net v0.34.0
|
||||
golang.org/x/oauth2 v0.25.0
|
||||
golang.org/x/sync v0.10.0
|
||||
k8s.io/api v0.31.5
|
||||
k8s.io/apimachinery v0.31.5
|
||||
k8s.io/apiserver v0.31.5
|
||||
k8s.io/client-go v0.31.5
|
||||
k8s.io/code-generator v0.31.5
|
||||
k8s.io/component-base v0.31.5
|
||||
golang.org/x/net v0.40.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/sync v0.14.0
|
||||
k8s.io/api v0.32.4
|
||||
k8s.io/apimachinery v0.32.4
|
||||
k8s.io/apiserver v0.32.4
|
||||
k8s.io/client-go v0.32.4
|
||||
k8s.io/code-generator v0.32.4
|
||||
k8s.io/component-base v0.32.4
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/kube-openapi v0.0.0-20240620174524-b456828f718b
|
||||
k8s.io/metrics v0.31.5
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
sigs.k8s.io/controller-tools v0.16.5
|
||||
sigs.k8s.io/custom-metrics-apiserver v1.30.1-0.20241105195130-84dc8cfe2555
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff
|
||||
k8s.io/metrics v0.32.4
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
sigs.k8s.io/controller-tools v0.17.3
|
||||
sigs.k8s.io/custom-metrics-apiserver v1.32.0
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.19.1 // indirect
|
||||
github.com/BurntSushi/toml v1.4.0 // indirect
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 // indirect
|
||||
github.com/CloudyKit/jet/v6 v6.2.0 // indirect
|
||||
@ -42,16 +43,16 @@ require (
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.57 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect
|
||||
github.com/aws/smithy-go v1.22.2 // indirect
|
||||
github.com/aymerick/douceur v0.2.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
@ -88,19 +89,18 @@ require (
|
||||
github.com/goccy/go-json v0.10.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20241205020045-f7e15b2f3e62 // indirect
|
||||
github.com/google/cel-go v0.20.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.22.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/css v1.0.1 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect
|
||||
github.com/iris-contrib/schema v0.0.6 // indirect
|
||||
@ -112,7 +112,7 @@ require (
|
||||
github.com/kataras/pio v0.0.13 // indirect
|
||||
github.com/kataras/sitemap v0.0.6 // indirect
|
||||
github.com/kataras/tunnel v0.0.4 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/labstack/echo/v4 v4.12.0 // indirect
|
||||
@ -133,7 +133,7 @@ require (
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/schollz/closestmatch v2.1.0+incompatible // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/tdewolff/minify/v2 v2.20.34 // indirect
|
||||
github.com/tdewolff/parse/v2 v2.7.15 // indirect
|
||||
@ -145,47 +145,51 @@ require (
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/yosssi/ace v0.0.5 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.14 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.14 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.14 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.21 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.21 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.33.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/arch v0.8.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
golang.org/x/term v0.28.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
golang.org/x/crypto v0.38.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.32.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/grpc v1.68.1 // indirect
|
||||
google.golang.org/protobuf v1.36.1 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.31.2 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.32.4 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kms v0.31.5 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
k8s.io/kms v0.32.4 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
go 1.23
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.1
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -20,7 +20,7 @@ type ScalingScheduler interface {
|
||||
// ScalingSchedule describes a namespaced time based metric to be used
|
||||
// in autoscaling operations.
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +kubebuilder:resource:categories=all
|
||||
// +kubebuilder:resource:categories=all,shortName=sched;schedule
|
||||
// +kubebuilder:printcolumn:name="Active",type=boolean,JSONPath=`.status.active`,description="Whether one or more schedules are currently active."
|
||||
// +kubebuilder:subresource:status
|
||||
type ScalingSchedule struct {
|
||||
@ -51,10 +51,9 @@ func (s *ScalingSchedule) ResourceSpec() ScalingScheduleSpec {
|
||||
// ClusterScalingSchedule describes a cluster scoped time based metric
|
||||
// to be used in autoscaling operations.
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +kubebuilder:resource:categories=all
|
||||
// +kubebuilder:resource:scope=Cluster,shortName=css;clustersched;clusterschedule
|
||||
// +kubebuilder:printcolumn:name="Active",type=boolean,JSONPath=`.status.active`,description="Whether one or more schedules are currently active."
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:resource:scope=Cluster
|
||||
type ClusterScalingSchedule struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
@ -19,8 +19,8 @@ limitations under the License.
|
||||
package versioned
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
fmt "fmt"
|
||||
http "net/http"
|
||||
|
||||
zalandov1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/clientset/versioned/typed/zalando.org/v1"
|
||||
discovery "k8s.io/client-go/discovery"
|
||||
|
@ -19,9 +19,9 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
context "context"
|
||||
|
||||
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
zalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
scheme "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
@ -37,33 +37,34 @@ type ClusterScalingSchedulesGetter interface {
|
||||
|
||||
// ClusterScalingScheduleInterface has methods to work with ClusterScalingSchedule resources.
|
||||
type ClusterScalingScheduleInterface interface {
|
||||
Create(ctx context.Context, clusterScalingSchedule *v1.ClusterScalingSchedule, opts metav1.CreateOptions) (*v1.ClusterScalingSchedule, error)
|
||||
Update(ctx context.Context, clusterScalingSchedule *v1.ClusterScalingSchedule, opts metav1.UpdateOptions) (*v1.ClusterScalingSchedule, error)
|
||||
Create(ctx context.Context, clusterScalingSchedule *zalandoorgv1.ClusterScalingSchedule, opts metav1.CreateOptions) (*zalandoorgv1.ClusterScalingSchedule, error)
|
||||
Update(ctx context.Context, clusterScalingSchedule *zalandoorgv1.ClusterScalingSchedule, opts metav1.UpdateOptions) (*zalandoorgv1.ClusterScalingSchedule, error)
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
UpdateStatus(ctx context.Context, clusterScalingSchedule *v1.ClusterScalingSchedule, opts metav1.UpdateOptions) (*v1.ClusterScalingSchedule, error)
|
||||
UpdateStatus(ctx context.Context, clusterScalingSchedule *zalandoorgv1.ClusterScalingSchedule, opts metav1.UpdateOptions) (*zalandoorgv1.ClusterScalingSchedule, error)
|
||||
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
|
||||
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
|
||||
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterScalingSchedule, error)
|
||||
List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterScalingScheduleList, error)
|
||||
Get(ctx context.Context, name string, opts metav1.GetOptions) (*zalandoorgv1.ClusterScalingSchedule, error)
|
||||
List(ctx context.Context, opts metav1.ListOptions) (*zalandoorgv1.ClusterScalingScheduleList, error)
|
||||
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterScalingSchedule, err error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *zalandoorgv1.ClusterScalingSchedule, err error)
|
||||
ClusterScalingScheduleExpansion
|
||||
}
|
||||
|
||||
// clusterScalingSchedules implements ClusterScalingScheduleInterface
|
||||
type clusterScalingSchedules struct {
|
||||
*gentype.ClientWithList[*v1.ClusterScalingSchedule, *v1.ClusterScalingScheduleList]
|
||||
*gentype.ClientWithList[*zalandoorgv1.ClusterScalingSchedule, *zalandoorgv1.ClusterScalingScheduleList]
|
||||
}
|
||||
|
||||
// newClusterScalingSchedules returns a ClusterScalingSchedules
|
||||
func newClusterScalingSchedules(c *ZalandoV1Client) *clusterScalingSchedules {
|
||||
return &clusterScalingSchedules{
|
||||
gentype.NewClientWithList[*v1.ClusterScalingSchedule, *v1.ClusterScalingScheduleList](
|
||||
gentype.NewClientWithList[*zalandoorgv1.ClusterScalingSchedule, *zalandoorgv1.ClusterScalingScheduleList](
|
||||
"clusterscalingschedules",
|
||||
c.RESTClient(),
|
||||
scheme.ParameterCodec,
|
||||
"",
|
||||
func() *v1.ClusterScalingSchedule { return &v1.ClusterScalingSchedule{} },
|
||||
func() *v1.ClusterScalingScheduleList { return &v1.ClusterScalingScheduleList{} }),
|
||||
func() *zalandoorgv1.ClusterScalingSchedule { return &zalandoorgv1.ClusterScalingSchedule{} },
|
||||
func() *zalandoorgv1.ClusterScalingScheduleList { return &zalandoorgv1.ClusterScalingScheduleList{} },
|
||||
),
|
||||
}
|
||||
}
|
||||
|
@ -19,120 +19,34 @@ limitations under the License.
|
||||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
zalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/clientset/versioned/typed/zalando.org/v1"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// FakeClusterScalingSchedules implements ClusterScalingScheduleInterface
|
||||
type FakeClusterScalingSchedules struct {
|
||||
// fakeClusterScalingSchedules implements ClusterScalingScheduleInterface
|
||||
type fakeClusterScalingSchedules struct {
|
||||
*gentype.FakeClientWithList[*v1.ClusterScalingSchedule, *v1.ClusterScalingScheduleList]
|
||||
Fake *FakeZalandoV1
|
||||
}
|
||||
|
||||
var clusterscalingschedulesResource = v1.SchemeGroupVersion.WithResource("clusterscalingschedules")
|
||||
|
||||
var clusterscalingschedulesKind = v1.SchemeGroupVersion.WithKind("ClusterScalingSchedule")
|
||||
|
||||
// Get takes name of the clusterScalingSchedule, and returns the corresponding clusterScalingSchedule object, and an error if there is any.
|
||||
func (c *FakeClusterScalingSchedules) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterScalingSchedule, err error) {
|
||||
emptyResult := &v1.ClusterScalingSchedule{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootGetActionWithOptions(clusterscalingschedulesResource, name, options), emptyResult)
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
func newFakeClusterScalingSchedules(fake *FakeZalandoV1) zalandoorgv1.ClusterScalingScheduleInterface {
|
||||
return &fakeClusterScalingSchedules{
|
||||
gentype.NewFakeClientWithList[*v1.ClusterScalingSchedule, *v1.ClusterScalingScheduleList](
|
||||
fake.Fake,
|
||||
"",
|
||||
v1.SchemeGroupVersion.WithResource("clusterscalingschedules"),
|
||||
v1.SchemeGroupVersion.WithKind("ClusterScalingSchedule"),
|
||||
func() *v1.ClusterScalingSchedule { return &v1.ClusterScalingSchedule{} },
|
||||
func() *v1.ClusterScalingScheduleList { return &v1.ClusterScalingScheduleList{} },
|
||||
func(dst, src *v1.ClusterScalingScheduleList) { dst.ListMeta = src.ListMeta },
|
||||
func(list *v1.ClusterScalingScheduleList) []*v1.ClusterScalingSchedule {
|
||||
return gentype.ToPointerSlice(list.Items)
|
||||
},
|
||||
func(list *v1.ClusterScalingScheduleList, items []*v1.ClusterScalingSchedule) {
|
||||
list.Items = gentype.FromPointerSlice(items)
|
||||
},
|
||||
),
|
||||
fake,
|
||||
}
|
||||
return obj.(*v1.ClusterScalingSchedule), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ClusterScalingSchedules that match those selectors.
|
||||
func (c *FakeClusterScalingSchedules) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterScalingScheduleList, err error) {
|
||||
emptyResult := &v1.ClusterScalingScheduleList{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootListActionWithOptions(clusterscalingschedulesResource, clusterscalingschedulesKind, opts), emptyResult)
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1.ClusterScalingScheduleList{ListMeta: obj.(*v1.ClusterScalingScheduleList).ListMeta}
|
||||
for _, item := range obj.(*v1.ClusterScalingScheduleList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested clusterScalingSchedules.
|
||||
func (c *FakeClusterScalingSchedules) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewRootWatchActionWithOptions(clusterscalingschedulesResource, opts))
|
||||
}
|
||||
|
||||
// Create takes the representation of a clusterScalingSchedule and creates it. Returns the server's representation of the clusterScalingSchedule, and an error, if there is any.
|
||||
func (c *FakeClusterScalingSchedules) Create(ctx context.Context, clusterScalingSchedule *v1.ClusterScalingSchedule, opts metav1.CreateOptions) (result *v1.ClusterScalingSchedule, err error) {
|
||||
emptyResult := &v1.ClusterScalingSchedule{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootCreateActionWithOptions(clusterscalingschedulesResource, clusterScalingSchedule, opts), emptyResult)
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1.ClusterScalingSchedule), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a clusterScalingSchedule and updates it. Returns the server's representation of the clusterScalingSchedule, and an error, if there is any.
|
||||
func (c *FakeClusterScalingSchedules) Update(ctx context.Context, clusterScalingSchedule *v1.ClusterScalingSchedule, opts metav1.UpdateOptions) (result *v1.ClusterScalingSchedule, err error) {
|
||||
emptyResult := &v1.ClusterScalingSchedule{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateActionWithOptions(clusterscalingschedulesResource, clusterScalingSchedule, opts), emptyResult)
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1.ClusterScalingSchedule), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeClusterScalingSchedules) UpdateStatus(ctx context.Context, clusterScalingSchedule *v1.ClusterScalingSchedule, opts metav1.UpdateOptions) (result *v1.ClusterScalingSchedule, err error) {
|
||||
emptyResult := &v1.ClusterScalingSchedule{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateSubresourceActionWithOptions(clusterscalingschedulesResource, "status", clusterScalingSchedule, opts), emptyResult)
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1.ClusterScalingSchedule), err
|
||||
}
|
||||
|
||||
// Delete takes name of the clusterScalingSchedule and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeClusterScalingSchedules) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewRootDeleteActionWithOptions(clusterscalingschedulesResource, name, opts), &v1.ClusterScalingSchedule{})
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeClusterScalingSchedules) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
|
||||
action := testing.NewRootDeleteCollectionActionWithOptions(clusterscalingschedulesResource, opts, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1.ClusterScalingScheduleList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched clusterScalingSchedule.
|
||||
func (c *FakeClusterScalingSchedules) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterScalingSchedule, err error) {
|
||||
emptyResult := &v1.ClusterScalingSchedule{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterscalingschedulesResource, name, pt, data, opts, subresources...), emptyResult)
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1.ClusterScalingSchedule), err
|
||||
}
|
||||
|
@ -19,129 +19,32 @@ limitations under the License.
|
||||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
zalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/clientset/versioned/typed/zalando.org/v1"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// FakeScalingSchedules implements ScalingScheduleInterface
|
||||
type FakeScalingSchedules struct {
|
||||
// fakeScalingSchedules implements ScalingScheduleInterface
|
||||
type fakeScalingSchedules struct {
|
||||
*gentype.FakeClientWithList[*v1.ScalingSchedule, *v1.ScalingScheduleList]
|
||||
Fake *FakeZalandoV1
|
||||
ns string
|
||||
}
|
||||
|
||||
var scalingschedulesResource = v1.SchemeGroupVersion.WithResource("scalingschedules")
|
||||
|
||||
var scalingschedulesKind = v1.SchemeGroupVersion.WithKind("ScalingSchedule")
|
||||
|
||||
// Get takes name of the scalingSchedule, and returns the corresponding scalingSchedule object, and an error if there is any.
|
||||
func (c *FakeScalingSchedules) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ScalingSchedule, err error) {
|
||||
emptyResult := &v1.ScalingSchedule{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetActionWithOptions(scalingschedulesResource, c.ns, name, options), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
func newFakeScalingSchedules(fake *FakeZalandoV1, namespace string) zalandoorgv1.ScalingScheduleInterface {
|
||||
return &fakeScalingSchedules{
|
||||
gentype.NewFakeClientWithList[*v1.ScalingSchedule, *v1.ScalingScheduleList](
|
||||
fake.Fake,
|
||||
namespace,
|
||||
v1.SchemeGroupVersion.WithResource("scalingschedules"),
|
||||
v1.SchemeGroupVersion.WithKind("ScalingSchedule"),
|
||||
func() *v1.ScalingSchedule { return &v1.ScalingSchedule{} },
|
||||
func() *v1.ScalingScheduleList { return &v1.ScalingScheduleList{} },
|
||||
func(dst, src *v1.ScalingScheduleList) { dst.ListMeta = src.ListMeta },
|
||||
func(list *v1.ScalingScheduleList) []*v1.ScalingSchedule { return gentype.ToPointerSlice(list.Items) },
|
||||
func(list *v1.ScalingScheduleList, items []*v1.ScalingSchedule) {
|
||||
list.Items = gentype.FromPointerSlice(items)
|
||||
},
|
||||
),
|
||||
fake,
|
||||
}
|
||||
return obj.(*v1.ScalingSchedule), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ScalingSchedules that match those selectors.
|
||||
func (c *FakeScalingSchedules) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ScalingScheduleList, err error) {
|
||||
emptyResult := &v1.ScalingScheduleList{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListActionWithOptions(scalingschedulesResource, scalingschedulesKind, c.ns, opts), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1.ScalingScheduleList{ListMeta: obj.(*v1.ScalingScheduleList).ListMeta}
|
||||
for _, item := range obj.(*v1.ScalingScheduleList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested scalingSchedules.
|
||||
func (c *FakeScalingSchedules) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchActionWithOptions(scalingschedulesResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a scalingSchedule and creates it. Returns the server's representation of the scalingSchedule, and an error, if there is any.
|
||||
func (c *FakeScalingSchedules) Create(ctx context.Context, scalingSchedule *v1.ScalingSchedule, opts metav1.CreateOptions) (result *v1.ScalingSchedule, err error) {
|
||||
emptyResult := &v1.ScalingSchedule{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateActionWithOptions(scalingschedulesResource, c.ns, scalingSchedule, opts), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1.ScalingSchedule), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a scalingSchedule and updates it. Returns the server's representation of the scalingSchedule, and an error, if there is any.
|
||||
func (c *FakeScalingSchedules) Update(ctx context.Context, scalingSchedule *v1.ScalingSchedule, opts metav1.UpdateOptions) (result *v1.ScalingSchedule, err error) {
|
||||
emptyResult := &v1.ScalingSchedule{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateActionWithOptions(scalingschedulesResource, c.ns, scalingSchedule, opts), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1.ScalingSchedule), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeScalingSchedules) UpdateStatus(ctx context.Context, scalingSchedule *v1.ScalingSchedule, opts metav1.UpdateOptions) (result *v1.ScalingSchedule, err error) {
|
||||
emptyResult := &v1.ScalingSchedule{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceActionWithOptions(scalingschedulesResource, "status", c.ns, scalingSchedule, opts), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1.ScalingSchedule), err
|
||||
}
|
||||
|
||||
// Delete takes name of the scalingSchedule and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeScalingSchedules) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteActionWithOptions(scalingschedulesResource, c.ns, name, opts), &v1.ScalingSchedule{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeScalingSchedules) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionActionWithOptions(scalingschedulesResource, c.ns, opts, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1.ScalingScheduleList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched scalingSchedule.
|
||||
func (c *FakeScalingSchedules) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ScalingSchedule, err error) {
|
||||
emptyResult := &v1.ScalingSchedule{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceActionWithOptions(scalingschedulesResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1.ScalingSchedule), err
|
||||
}
|
||||
|
@ -29,11 +29,11 @@ type FakeZalandoV1 struct {
|
||||
}
|
||||
|
||||
func (c *FakeZalandoV1) ClusterScalingSchedules() v1.ClusterScalingScheduleInterface {
|
||||
return &FakeClusterScalingSchedules{c}
|
||||
return newFakeClusterScalingSchedules(c)
|
||||
}
|
||||
|
||||
func (c *FakeZalandoV1) ScalingSchedules(namespace string) v1.ScalingScheduleInterface {
|
||||
return &FakeScalingSchedules{c, namespace}
|
||||
return newFakeScalingSchedules(c, namespace)
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
|
@ -19,9 +19,9 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
context "context"
|
||||
|
||||
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
zalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
scheme "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
@ -37,33 +37,34 @@ type ScalingSchedulesGetter interface {
|
||||
|
||||
// ScalingScheduleInterface has methods to work with ScalingSchedule resources.
|
||||
type ScalingScheduleInterface interface {
|
||||
Create(ctx context.Context, scalingSchedule *v1.ScalingSchedule, opts metav1.CreateOptions) (*v1.ScalingSchedule, error)
|
||||
Update(ctx context.Context, scalingSchedule *v1.ScalingSchedule, opts metav1.UpdateOptions) (*v1.ScalingSchedule, error)
|
||||
Create(ctx context.Context, scalingSchedule *zalandoorgv1.ScalingSchedule, opts metav1.CreateOptions) (*zalandoorgv1.ScalingSchedule, error)
|
||||
Update(ctx context.Context, scalingSchedule *zalandoorgv1.ScalingSchedule, opts metav1.UpdateOptions) (*zalandoorgv1.ScalingSchedule, error)
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
UpdateStatus(ctx context.Context, scalingSchedule *v1.ScalingSchedule, opts metav1.UpdateOptions) (*v1.ScalingSchedule, error)
|
||||
UpdateStatus(ctx context.Context, scalingSchedule *zalandoorgv1.ScalingSchedule, opts metav1.UpdateOptions) (*zalandoorgv1.ScalingSchedule, error)
|
||||
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
|
||||
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
|
||||
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ScalingSchedule, error)
|
||||
List(ctx context.Context, opts metav1.ListOptions) (*v1.ScalingScheduleList, error)
|
||||
Get(ctx context.Context, name string, opts metav1.GetOptions) (*zalandoorgv1.ScalingSchedule, error)
|
||||
List(ctx context.Context, opts metav1.ListOptions) (*zalandoorgv1.ScalingScheduleList, error)
|
||||
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ScalingSchedule, err error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *zalandoorgv1.ScalingSchedule, err error)
|
||||
ScalingScheduleExpansion
|
||||
}
|
||||
|
||||
// scalingSchedules implements ScalingScheduleInterface
|
||||
type scalingSchedules struct {
|
||||
*gentype.ClientWithList[*v1.ScalingSchedule, *v1.ScalingScheduleList]
|
||||
*gentype.ClientWithList[*zalandoorgv1.ScalingSchedule, *zalandoorgv1.ScalingScheduleList]
|
||||
}
|
||||
|
||||
// newScalingSchedules returns a ScalingSchedules
|
||||
func newScalingSchedules(c *ZalandoV1Client, namespace string) *scalingSchedules {
|
||||
return &scalingSchedules{
|
||||
gentype.NewClientWithList[*v1.ScalingSchedule, *v1.ScalingScheduleList](
|
||||
gentype.NewClientWithList[*zalandoorgv1.ScalingSchedule, *zalandoorgv1.ScalingScheduleList](
|
||||
"scalingschedules",
|
||||
c.RESTClient(),
|
||||
scheme.ParameterCodec,
|
||||
namespace,
|
||||
func() *v1.ScalingSchedule { return &v1.ScalingSchedule{} },
|
||||
func() *v1.ScalingScheduleList { return &v1.ScalingScheduleList{} }),
|
||||
func() *zalandoorgv1.ScalingSchedule { return &zalandoorgv1.ScalingSchedule{} },
|
||||
func() *zalandoorgv1.ScalingScheduleList { return &zalandoorgv1.ScalingScheduleList{} },
|
||||
),
|
||||
}
|
||||
}
|
||||
|
@ -19,10 +19,10 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
http "net/http"
|
||||
|
||||
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
"github.com/zalando-incubator/kube-metrics-adapter/pkg/client/clientset/versioned/scheme"
|
||||
zalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
scheme "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/clientset/versioned/scheme"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
@ -90,10 +90,10 @@ func New(c rest.Interface) *ZalandoV1Client {
|
||||
}
|
||||
|
||||
func setConfigDefaults(config *rest.Config) error {
|
||||
gv := v1.SchemeGroupVersion
|
||||
gv := zalandoorgv1.SchemeGroupVersion
|
||||
config.GroupVersion = &gv
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
|
||||
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
|
||||
|
||||
if config.UserAgent == "" {
|
||||
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
|
@ -19,7 +19,7 @@ limitations under the License.
|
||||
package externalversions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
fmt "fmt"
|
||||
|
||||
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
@ -19,13 +19,13 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
context "context"
|
||||
time "time"
|
||||
|
||||
zalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
apiszalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
versioned "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/clientset/versioned"
|
||||
internalinterfaces "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/informers/externalversions/internalinterfaces"
|
||||
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/listers/zalando.org/v1"
|
||||
zalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/listers/zalando.org/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
@ -36,7 +36,7 @@ import (
|
||||
// ClusterScalingSchedules.
|
||||
type ClusterScalingScheduleInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.ClusterScalingScheduleLister
|
||||
Lister() zalandoorgv1.ClusterScalingScheduleLister
|
||||
}
|
||||
|
||||
type clusterScalingScheduleInformer struct {
|
||||
@ -70,7 +70,7 @@ func NewFilteredClusterScalingScheduleInformer(client versioned.Interface, resyn
|
||||
return client.ZalandoV1().ClusterScalingSchedules().Watch(context.TODO(), options)
|
||||
},
|
||||
},
|
||||
&zalandoorgv1.ClusterScalingSchedule{},
|
||||
&apiszalandoorgv1.ClusterScalingSchedule{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
@ -81,9 +81,9 @@ func (f *clusterScalingScheduleInformer) defaultInformer(client versioned.Interf
|
||||
}
|
||||
|
||||
func (f *clusterScalingScheduleInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&zalandoorgv1.ClusterScalingSchedule{}, f.defaultInformer)
|
||||
return f.factory.InformerFor(&apiszalandoorgv1.ClusterScalingSchedule{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *clusterScalingScheduleInformer) Lister() v1.ClusterScalingScheduleLister {
|
||||
return v1.NewClusterScalingScheduleLister(f.Informer().GetIndexer())
|
||||
func (f *clusterScalingScheduleInformer) Lister() zalandoorgv1.ClusterScalingScheduleLister {
|
||||
return zalandoorgv1.NewClusterScalingScheduleLister(f.Informer().GetIndexer())
|
||||
}
|
||||
|
@ -19,13 +19,13 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
context "context"
|
||||
time "time"
|
||||
|
||||
zalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
apiszalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
versioned "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/clientset/versioned"
|
||||
internalinterfaces "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/informers/externalversions/internalinterfaces"
|
||||
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/listers/zalando.org/v1"
|
||||
zalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/listers/zalando.org/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
@ -36,7 +36,7 @@ import (
|
||||
// ScalingSchedules.
|
||||
type ScalingScheduleInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.ScalingScheduleLister
|
||||
Lister() zalandoorgv1.ScalingScheduleLister
|
||||
}
|
||||
|
||||
type scalingScheduleInformer struct {
|
||||
@ -71,7 +71,7 @@ func NewFilteredScalingScheduleInformer(client versioned.Interface, namespace st
|
||||
return client.ZalandoV1().ScalingSchedules(namespace).Watch(context.TODO(), options)
|
||||
},
|
||||
},
|
||||
&zalandoorgv1.ScalingSchedule{},
|
||||
&apiszalandoorgv1.ScalingSchedule{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
@ -82,9 +82,9 @@ func (f *scalingScheduleInformer) defaultInformer(client versioned.Interface, re
|
||||
}
|
||||
|
||||
func (f *scalingScheduleInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&zalandoorgv1.ScalingSchedule{}, f.defaultInformer)
|
||||
return f.factory.InformerFor(&apiszalandoorgv1.ScalingSchedule{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *scalingScheduleInformer) Lister() v1.ScalingScheduleLister {
|
||||
return v1.NewScalingScheduleLister(f.Informer().GetIndexer())
|
||||
func (f *scalingScheduleInformer) Lister() zalandoorgv1.ScalingScheduleLister {
|
||||
return zalandoorgv1.NewScalingScheduleLister(f.Informer().GetIndexer())
|
||||
}
|
||||
|
@ -19,10 +19,10 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/listers"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
zalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
listers "k8s.io/client-go/listers"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// ClusterScalingScheduleLister helps list ClusterScalingSchedules.
|
||||
@ -30,19 +30,19 @@ import (
|
||||
type ClusterScalingScheduleLister interface {
|
||||
// List lists all ClusterScalingSchedules in the indexer.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1.ClusterScalingSchedule, err error)
|
||||
List(selector labels.Selector) (ret []*zalandoorgv1.ClusterScalingSchedule, err error)
|
||||
// Get retrieves the ClusterScalingSchedule from the index for a given name.
|
||||
// Objects returned here must be treated as read-only.
|
||||
Get(name string) (*v1.ClusterScalingSchedule, error)
|
||||
Get(name string) (*zalandoorgv1.ClusterScalingSchedule, error)
|
||||
ClusterScalingScheduleListerExpansion
|
||||
}
|
||||
|
||||
// clusterScalingScheduleLister implements the ClusterScalingScheduleLister interface.
|
||||
type clusterScalingScheduleLister struct {
|
||||
listers.ResourceIndexer[*v1.ClusterScalingSchedule]
|
||||
listers.ResourceIndexer[*zalandoorgv1.ClusterScalingSchedule]
|
||||
}
|
||||
|
||||
// NewClusterScalingScheduleLister returns a new ClusterScalingScheduleLister.
|
||||
func NewClusterScalingScheduleLister(indexer cache.Indexer) ClusterScalingScheduleLister {
|
||||
return &clusterScalingScheduleLister{listers.New[*v1.ClusterScalingSchedule](indexer, v1.Resource("clusterscalingschedule"))}
|
||||
return &clusterScalingScheduleLister{listers.New[*zalandoorgv1.ClusterScalingSchedule](indexer, zalandoorgv1.Resource("clusterscalingschedule"))}
|
||||
}
|
||||
|
@ -19,10 +19,10 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/listers"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
zalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
listers "k8s.io/client-go/listers"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// ScalingScheduleLister helps list ScalingSchedules.
|
||||
@ -30,7 +30,7 @@ import (
|
||||
type ScalingScheduleLister interface {
|
||||
// List lists all ScalingSchedules in the indexer.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1.ScalingSchedule, err error)
|
||||
List(selector labels.Selector) (ret []*zalandoorgv1.ScalingSchedule, err error)
|
||||
// ScalingSchedules returns an object that can list and get ScalingSchedules.
|
||||
ScalingSchedules(namespace string) ScalingScheduleNamespaceLister
|
||||
ScalingScheduleListerExpansion
|
||||
@ -38,17 +38,17 @@ type ScalingScheduleLister interface {
|
||||
|
||||
// scalingScheduleLister implements the ScalingScheduleLister interface.
|
||||
type scalingScheduleLister struct {
|
||||
listers.ResourceIndexer[*v1.ScalingSchedule]
|
||||
listers.ResourceIndexer[*zalandoorgv1.ScalingSchedule]
|
||||
}
|
||||
|
||||
// NewScalingScheduleLister returns a new ScalingScheduleLister.
|
||||
func NewScalingScheduleLister(indexer cache.Indexer) ScalingScheduleLister {
|
||||
return &scalingScheduleLister{listers.New[*v1.ScalingSchedule](indexer, v1.Resource("scalingschedule"))}
|
||||
return &scalingScheduleLister{listers.New[*zalandoorgv1.ScalingSchedule](indexer, zalandoorgv1.Resource("scalingschedule"))}
|
||||
}
|
||||
|
||||
// ScalingSchedules returns an object that can list and get ScalingSchedules.
|
||||
func (s *scalingScheduleLister) ScalingSchedules(namespace string) ScalingScheduleNamespaceLister {
|
||||
return scalingScheduleNamespaceLister{listers.NewNamespaced[*v1.ScalingSchedule](s.ResourceIndexer, namespace)}
|
||||
return scalingScheduleNamespaceLister{listers.NewNamespaced[*zalandoorgv1.ScalingSchedule](s.ResourceIndexer, namespace)}
|
||||
}
|
||||
|
||||
// ScalingScheduleNamespaceLister helps list and get ScalingSchedules.
|
||||
@ -56,15 +56,15 @@ func (s *scalingScheduleLister) ScalingSchedules(namespace string) ScalingSchedu
|
||||
type ScalingScheduleNamespaceLister interface {
|
||||
// List lists all ScalingSchedules in the indexer for a given namespace.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1.ScalingSchedule, err error)
|
||||
List(selector labels.Selector) (ret []*zalandoorgv1.ScalingSchedule, err error)
|
||||
// Get retrieves the ScalingSchedule from the indexer for a given namespace and name.
|
||||
// Objects returned here must be treated as read-only.
|
||||
Get(name string) (*v1.ScalingSchedule, error)
|
||||
Get(name string) (*zalandoorgv1.ScalingSchedule, error)
|
||||
ScalingScheduleNamespaceListerExpansion
|
||||
}
|
||||
|
||||
// scalingScheduleNamespaceLister implements the ScalingScheduleNamespaceLister
|
||||
// interface.
|
||||
type scalingScheduleNamespaceLister struct {
|
||||
listers.ResourceIndexer[*v1.ScalingSchedule]
|
||||
listers.ResourceIndexer[*zalandoorgv1.ScalingSchedule]
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package collector
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/zalando-incubator/kube-metrics-adapter/pkg/nakadi"
|
||||
@ -17,6 +18,9 @@ const (
|
||||
// subscriptions.
|
||||
NakadiMetricType = "nakadi"
|
||||
nakadiSubscriptionIDKey = "subscription-id"
|
||||
nakadiOwningApplicationKey = "owning-application"
|
||||
nakadiConsumerGroupKey = "consumer-group"
|
||||
nakadiEventTypesKey = "event-types"
|
||||
nakadiMetricTypeKey = "metric-type"
|
||||
nakadiMetricTypeConsumerLagSeconds = "consumer-lag-seconds"
|
||||
nakadiMetricTypeUnconsumedEvents = "unconsumed-events"
|
||||
@ -43,26 +47,21 @@ func (c *NakadiCollectorPlugin) NewCollector(ctx context.Context, hpa *autoscali
|
||||
// NakadiCollector defines a collector that is able to collect metrics from
|
||||
// Nakadi.
|
||||
type NakadiCollector struct {
|
||||
nakadi nakadi.Nakadi
|
||||
interval time.Duration
|
||||
subscriptionID string
|
||||
nakadiMetricType string
|
||||
metric autoscalingv2.MetricIdentifier
|
||||
metricType autoscalingv2.MetricSourceType
|
||||
namespace string
|
||||
nakadi nakadi.Nakadi
|
||||
interval time.Duration
|
||||
subscriptionFilter *nakadi.SubscriptionFilter
|
||||
nakadiMetricType string
|
||||
metric autoscalingv2.MetricIdentifier
|
||||
metricType autoscalingv2.MetricSourceType
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewNakadiCollector initializes a new NakadiCollector.
|
||||
func NewNakadiCollector(_ context.Context, nakadi nakadi.Nakadi, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*NakadiCollector, error) {
|
||||
func NewNakadiCollector(_ context.Context, nakadiClient nakadi.Nakadi, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*NakadiCollector, error) {
|
||||
if config.Metric.Selector == nil {
|
||||
return nil, fmt.Errorf("selector for nakadi is not specified")
|
||||
}
|
||||
|
||||
subscriptionID, ok := config.Config[nakadiSubscriptionIDKey]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("subscription-id not specified on metric")
|
||||
}
|
||||
|
||||
metricType, ok := config.Config[nakadiMetricTypeKey]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("metric-type not specified on metric")
|
||||
@ -72,14 +71,40 @@ func NewNakadiCollector(_ context.Context, nakadi nakadi.Nakadi, hpa *autoscalin
|
||||
return nil, fmt.Errorf("metric-type must be either '%s' or '%s', was '%s'", nakadiMetricTypeConsumerLagSeconds, nakadiMetricTypeUnconsumedEvents, metricType)
|
||||
}
|
||||
|
||||
// Either subscription-id or filtering via owning-application,
|
||||
// event-types, and consumer-group is supported. If all are defined
|
||||
// then only subscription-id is used and the rest of the fields are
|
||||
// ignored.
|
||||
subscriptionFilter := &nakadi.SubscriptionFilter{}
|
||||
if subscriptionID, ok := config.Config[nakadiSubscriptionIDKey]; ok {
|
||||
subscriptionFilter.SubscriptionID = subscriptionID
|
||||
}
|
||||
|
||||
if owningApplication, ok := config.Config[nakadiOwningApplicationKey]; ok {
|
||||
subscriptionFilter.OwningApplication = owningApplication
|
||||
}
|
||||
|
||||
if nakadiEventTypes, ok := config.Config[nakadiEventTypesKey]; ok {
|
||||
eventTypes := strings.Split(nakadiEventTypes, ",")
|
||||
subscriptionFilter.EventTypes = eventTypes
|
||||
}
|
||||
|
||||
if consumerGroup, ok := config.Config[nakadiConsumerGroupKey]; ok {
|
||||
subscriptionFilter.ConsumerGroup = consumerGroup
|
||||
}
|
||||
|
||||
if subscriptionFilter.SubscriptionID == "" && (subscriptionFilter.OwningApplication == "" || len(subscriptionFilter.EventTypes) == 0 || subscriptionFilter.ConsumerGroup == "") {
|
||||
return nil, fmt.Errorf("either subscription-id or all of [owning-application, event-types, consumer-group] must be specified on the metric")
|
||||
}
|
||||
|
||||
return &NakadiCollector{
|
||||
nakadi: nakadi,
|
||||
interval: interval,
|
||||
subscriptionID: subscriptionID,
|
||||
nakadiMetricType: metricType,
|
||||
metric: config.Metric,
|
||||
metricType: config.Type,
|
||||
namespace: hpa.Namespace,
|
||||
nakadi: nakadiClient,
|
||||
interval: interval,
|
||||
subscriptionFilter: subscriptionFilter,
|
||||
nakadiMetricType: metricType,
|
||||
metric: config.Metric,
|
||||
metricType: config.Type,
|
||||
namespace: hpa.Namespace,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -89,12 +114,12 @@ func (c *NakadiCollector) GetMetrics(ctx context.Context) ([]CollectedMetric, er
|
||||
var err error
|
||||
switch c.nakadiMetricType {
|
||||
case nakadiMetricTypeConsumerLagSeconds:
|
||||
value, err = c.nakadi.ConsumerLagSeconds(ctx, c.subscriptionID)
|
||||
value, err = c.nakadi.ConsumerLagSeconds(ctx, c.subscriptionFilter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case nakadiMetricTypeUnconsumedEvents:
|
||||
value, err = c.nakadi.UnconsumedEvents(ctx, c.subscriptionID)
|
||||
value, err = c.nakadi.UnconsumedEvents(ctx, c.subscriptionFilter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -12,8 +12,8 @@ import (
|
||||
|
||||
// Nakadi defines an interface for talking to the Nakadi API.
|
||||
type Nakadi interface {
|
||||
ConsumerLagSeconds(ctx context.Context, subscriptionID string) (int64, error)
|
||||
UnconsumedEvents(ctx context.Context, subscriptionID string) (int64, error)
|
||||
ConsumerLagSeconds(ctx context.Context, filter *SubscriptionFilter) (int64, error)
|
||||
UnconsumedEvents(ctx context.Context, filter *SubscriptionFilter) (int64, error)
|
||||
}
|
||||
|
||||
// Client defines client for interfacing with the Nakadi API.
|
||||
@ -30,8 +30,8 @@ func NewNakadiClient(nakadiEndpoint string, client *http.Client) *Client {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) ConsumerLagSeconds(ctx context.Context, subscriptionID string) (int64, error) {
|
||||
stats, err := c.stats(ctx, subscriptionID)
|
||||
func (c *Client) ConsumerLagSeconds(ctx context.Context, filter *SubscriptionFilter) (int64, error) {
|
||||
stats, err := c.stats(ctx, filter)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -46,8 +46,8 @@ func (c *Client) ConsumerLagSeconds(ctx context.Context, subscriptionID string)
|
||||
return maxConsumerLagSeconds, nil
|
||||
}
|
||||
|
||||
func (c *Client) UnconsumedEvents(ctx context.Context, subscriptionID string) (int64, error) {
|
||||
stats, err := c.stats(ctx, subscriptionID)
|
||||
func (c *Client) UnconsumedEvents(ctx context.Context, filter *SubscriptionFilter) (int64, error) {
|
||||
stats, err := c.stats(ctx, filter)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -62,6 +62,90 @@ func (c *Client) UnconsumedEvents(ctx context.Context, subscriptionID string) (i
|
||||
return unconsumedEvents, nil
|
||||
}
|
||||
|
||||
type SubscriptionFilter struct {
|
||||
SubscriptionID string
|
||||
OwningApplication string
|
||||
EventTypes []string
|
||||
ConsumerGroup string
|
||||
}
|
||||
|
||||
func (c *Client) subscriptions(ctx context.Context, filter *SubscriptionFilter, href string) ([]string, error) {
|
||||
endpoint, err := url.Parse(c.nakadiEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if href != "" {
|
||||
endpoint, err = url.Parse(c.nakadiEndpoint + href)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[nakadi subscriptions] failed to parse URL with href: %w", err)
|
||||
}
|
||||
} else {
|
||||
endpoint.Path = "/subscriptions"
|
||||
q := endpoint.Query()
|
||||
if filter.OwningApplication != "" {
|
||||
q.Set("owning_application", filter.OwningApplication)
|
||||
}
|
||||
for _, eventType := range filter.EventTypes {
|
||||
q.Add("event_type", eventType)
|
||||
}
|
||||
if filter.ConsumerGroup != "" {
|
||||
q.Set("consumer_group", filter.ConsumerGroup)
|
||||
}
|
||||
endpoint.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint.String(), nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[nakadi subscriptions] failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := c.http.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[nakadi subscriptions] failed to make request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
d, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("[nakadi subscriptions] unexpected response code: %d (%s)", resp.StatusCode, string(d))
|
||||
}
|
||||
|
||||
var subscriptionsResp struct {
|
||||
Items []struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
Links struct {
|
||||
Next struct {
|
||||
Href string `json:"href"`
|
||||
} `json:"next"`
|
||||
} `json:"_links"`
|
||||
}
|
||||
err = json.Unmarshal(d, &subscriptionsResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var subscriptions []string
|
||||
for _, item := range subscriptionsResp.Items {
|
||||
subscriptions = append(subscriptions, item.ID)
|
||||
}
|
||||
|
||||
if subscriptionsResp.Links.Next.Href != "" {
|
||||
nextSubscriptions, err := c.subscriptions(ctx, nil, subscriptionsResp.Links.Next.Href)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[nakadi subscriptions] failed to get next subscriptions: %w", err)
|
||||
}
|
||||
subscriptions = append(subscriptions, nextSubscriptions...)
|
||||
}
|
||||
|
||||
return subscriptions, nil
|
||||
}
|
||||
|
||||
type statsResp struct {
|
||||
Items []statsEventType `json:"items"`
|
||||
}
|
||||
@ -80,45 +164,68 @@ type statsPartition struct {
|
||||
AssignmentType string `json:"assignment_type"`
|
||||
}
|
||||
|
||||
// stats returns the Nakadi stats for a given subscription ID.
|
||||
// stats returns the Nakadi stats for a given a subscription filter which can
|
||||
// include the subscription ID or a filter combination of [owning-applicaiton,
|
||||
// event-types, consumer-group]..
|
||||
//
|
||||
// https://nakadi.io/manual.html#/subscriptions/subscription_id/stats_get
|
||||
func (c *Client) stats(ctx context.Context, subscriptionID string) ([]statsEventType, error) {
|
||||
func (c *Client) stats(ctx context.Context, filter *SubscriptionFilter) ([]statsEventType, error) {
|
||||
var subscriptionIDs []string
|
||||
if filter.SubscriptionID == "" {
|
||||
subscriptions, err := c.subscriptions(ctx, filter, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[nakadi stats] failed to get subscriptions: %w", err)
|
||||
}
|
||||
subscriptionIDs = subscriptions
|
||||
} else {
|
||||
subscriptionIDs = []string{filter.SubscriptionID}
|
||||
}
|
||||
|
||||
endpoint, err := url.Parse(c.nakadiEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("[nakadi stats] failed to parse URL %q: %w", c.nakadiEndpoint, err)
|
||||
}
|
||||
|
||||
endpoint.Path = fmt.Sprintf("/subscriptions/%s/stats", subscriptionID)
|
||||
var stats []statsEventType
|
||||
for _, subscriptionID := range subscriptionIDs {
|
||||
endpoint.Path = fmt.Sprintf("/subscriptions/%s/stats", subscriptionID)
|
||||
|
||||
q := endpoint.Query()
|
||||
q.Set("show_time_lag", "true")
|
||||
endpoint.RawQuery = q.Encode()
|
||||
q := endpoint.Query()
|
||||
q.Set("show_time_lag", "true")
|
||||
endpoint.RawQuery = q.Encode()
|
||||
|
||||
resp, err := c.http.Get(endpoint.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint.String(), nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[nakadi stats] failed to create request: %w", err)
|
||||
}
|
||||
|
||||
d, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
resp, err := c.http.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[nakadi stats] failed to make request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
d, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("[nakadi stats] unexpected response code: %d (%s)", resp.StatusCode, string(d))
|
||||
}
|
||||
|
||||
var result statsResp
|
||||
err = json.Unmarshal(d, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(result.Items) == 0 {
|
||||
return nil, errors.New("[nakadi stats] expected at least 1 event-type, 0 returned")
|
||||
}
|
||||
|
||||
stats = append(stats, result.Items...)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("[nakadi stats] unexpected response code: %d (%s)", resp.StatusCode, string(d))
|
||||
}
|
||||
|
||||
var result statsResp
|
||||
err = json.Unmarshal(d, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(result.Items) == 0 {
|
||||
return nil, errors.New("expected at least 1 event-type, 0 returned")
|
||||
}
|
||||
|
||||
return result.Items, nil
|
||||
return stats, nil
|
||||
}
|
||||
|
@ -12,18 +12,42 @@ import (
|
||||
|
||||
func TestQuery(tt *testing.T) {
|
||||
client := &http.Client{}
|
||||
|
||||
subscriptionsResponseBody := `{
|
||||
"items": [
|
||||
{
|
||||
"id": "id_1"
|
||||
},
|
||||
{
|
||||
"id": "id_2"
|
||||
}
|
||||
],
|
||||
"_links": {
|
||||
"next": {
|
||||
"href": "/subscriptions?event_type=example-event&owning_application=example-app&offset=20&limit=20"
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
subscriptionsResponseBodyNoNext := `{
|
||||
"items": [],
|
||||
"_links": {}
|
||||
}`
|
||||
|
||||
for _, ti := range []struct {
|
||||
msg string
|
||||
status int
|
||||
responseBody string
|
||||
err error
|
||||
unconsumedEvents int64
|
||||
consumerLagSeconds int64
|
||||
msg string
|
||||
status int
|
||||
subscriptionIDResponseBody string
|
||||
subscriptionFilter *SubscriptionFilter
|
||||
err error
|
||||
unconsumedEvents int64
|
||||
consumerLagSeconds int64
|
||||
}{
|
||||
{
|
||||
msg: "test getting a single event-type",
|
||||
status: http.StatusOK,
|
||||
responseBody: `{
|
||||
msg: "test getting a single event-type",
|
||||
status: http.StatusOK,
|
||||
subscriptionFilter: &SubscriptionFilter{SubscriptionID: "id"},
|
||||
subscriptionIDResponseBody: `{
|
||||
"items": [
|
||||
{
|
||||
"event_type": "example-event",
|
||||
@ -52,9 +76,10 @@ func TestQuery(tt *testing.T) {
|
||||
consumerLagSeconds: 2,
|
||||
},
|
||||
{
|
||||
msg: "test getting multiple event-types",
|
||||
status: http.StatusOK,
|
||||
responseBody: `{
|
||||
msg: "test getting multiple event-types",
|
||||
status: http.StatusOK,
|
||||
subscriptionFilter: &SubscriptionFilter{SubscriptionID: "id"},
|
||||
subscriptionIDResponseBody: `{
|
||||
"items": [
|
||||
{
|
||||
"event_type": "example-event",
|
||||
@ -104,38 +129,92 @@ func TestQuery(tt *testing.T) {
|
||||
consumerLagSeconds: 6,
|
||||
},
|
||||
{
|
||||
msg: "test call with invalid response",
|
||||
status: http.StatusInternalServerError,
|
||||
responseBody: `{"error": 500}`,
|
||||
err: errors.New("[nakadi stats] unexpected response code: 500 ({\"error\": 500})"),
|
||||
msg: "test call with invalid response",
|
||||
status: http.StatusInternalServerError,
|
||||
subscriptionFilter: &SubscriptionFilter{SubscriptionID: "id"},
|
||||
subscriptionIDResponseBody: `{"error": 500}`,
|
||||
err: errors.New("[nakadi stats] unexpected response code: 500 ({\"error\": 500})"),
|
||||
},
|
||||
{
|
||||
msg: "test getting back a single data point",
|
||||
status: http.StatusOK,
|
||||
responseBody: `{
|
||||
msg: "test getting back no data points",
|
||||
status: http.StatusOK,
|
||||
subscriptionFilter: &SubscriptionFilter{SubscriptionID: "id"},
|
||||
subscriptionIDResponseBody: `{
|
||||
"items": []
|
||||
}`,
|
||||
err: errors.New("expected at least 1 event-type, 0 returned"),
|
||||
err: errors.New("[nakadi stats] expected at least 1 event-type, 0 returned"),
|
||||
},
|
||||
{
|
||||
msg: "test filtering by owning_application and event_type",
|
||||
status: http.StatusOK,
|
||||
subscriptionFilter: &SubscriptionFilter{OwningApplication: "example-app", EventTypes: []string{"example-event"}, ConsumerGroup: "example-group"},
|
||||
subscriptionIDResponseBody: `{
|
||||
"items": [
|
||||
{
|
||||
"event_type": "example-event",
|
||||
"partitions": [
|
||||
{
|
||||
"partition": "0",
|
||||
"state": "assigned",
|
||||
"unconsumed_events": 4,
|
||||
"consumer_lag_seconds": 2,
|
||||
"stream_id": "example-id",
|
||||
"assignment_type": "auto"
|
||||
},
|
||||
{
|
||||
"partition": "0",
|
||||
"state": "assigned",
|
||||
"unconsumed_events": 5,
|
||||
"consumer_lag_seconds": 1,
|
||||
"stream_id": "example-id",
|
||||
"assignment_type": "auto"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}`,
|
||||
unconsumedEvents: 18,
|
||||
consumerLagSeconds: 2,
|
||||
},
|
||||
} {
|
||||
tt.Run(ti.msg, func(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(ti.status)
|
||||
_, err := w.Write([]byte(ti.responseBody))
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/subscriptions", func(w http.ResponseWriter, r *http.Request) {
|
||||
offset := r.URL.Query().Get("offset")
|
||||
if offset != "" {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err := w.Write([]byte(subscriptionsResponseBodyNoNext))
|
||||
assert.NoError(t, err)
|
||||
}),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
owningApplication := r.URL.Query().Get("owning_application")
|
||||
eventTypes := r.URL.Query()["event_type"]
|
||||
consumerGroup := r.URL.Query().Get("consumer_group")
|
||||
|
||||
assert.Equal(t, ti.subscriptionFilter.OwningApplication, owningApplication)
|
||||
assert.Equal(t, ti.subscriptionFilter.EventTypes, eventTypes)
|
||||
assert.Equal(t, ti.subscriptionFilter.ConsumerGroup, consumerGroup)
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err := w.Write([]byte(subscriptionsResponseBody))
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
mux.HandleFunc("/subscriptions/{id}/stats", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(ti.status)
|
||||
_, err := w.Write([]byte(ti.subscriptionIDResponseBody))
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
ts := httptest.NewServer(mux)
|
||||
defer ts.Close()
|
||||
|
||||
nakadiClient := NewNakadiClient(ts.URL, client)
|
||||
consumerLagSeconds, err := nakadiClient.ConsumerLagSeconds(context.Background(), "id")
|
||||
consumerLagSeconds, err := nakadiClient.ConsumerLagSeconds(context.Background(), ti.subscriptionFilter)
|
||||
assert.Equal(t, ti.err, err)
|
||||
assert.Equal(t, ti.consumerLagSeconds, consumerLagSeconds)
|
||||
unconsumedEvents, err := nakadiClient.UnconsumedEvents(context.Background(), "id")
|
||||
unconsumedEvents, err := nakadiClient.UnconsumedEvents(context.Background(), ti.subscriptionFilter)
|
||||
assert.Equal(t, ti.err, err)
|
||||
assert.Equal(t, ti.unconsumedEvents, unconsumedEvents)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -25,6 +25,10 @@ import (
|
||||
"github.com/zalando-incubator/kube-metrics-adapter/pkg/recorder"
|
||||
)
|
||||
|
||||
const (
|
||||
kubectlLastAppliedAnnotation = "kubectl.kubernetes.io/last-applied-configuration"
|
||||
)
|
||||
|
||||
var (
|
||||
// CollectionSuccesses is the total number of successful collections.
|
||||
CollectionSuccesses = promauto.NewCounter(prometheus.CounterOpts{
|
||||
@ -120,7 +124,7 @@ func (p *HPAProvider) Run(ctx context.Context) {
|
||||
// updateHPAs discovers all HPA resources and sets up metric collectors for new
|
||||
// HPAs.
|
||||
func (p *HPAProvider) updateHPAs() error {
|
||||
p.logger.Info("Looking for HPAs")
|
||||
p.logger.Debug("Looking for HPAs")
|
||||
|
||||
hpas, err := p.client.AutoscalingV2().HorizontalPodAutoscalers(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -144,7 +148,7 @@ func (p *HPAProvider) updateHPAs() error {
|
||||
// if the hpa has changed then remove the previous
|
||||
// scheduled collector.
|
||||
if hpaUpdated {
|
||||
p.logger.Infof("Removing previously scheduled metrics collector: %s", resourceRef)
|
||||
p.logger.Infof("Removing previously scheduled metrics collector as HPA changed: %s", resourceRef)
|
||||
p.collectorScheduler.Remove(resourceRef)
|
||||
}
|
||||
|
||||
@ -197,7 +201,12 @@ func (p *HPAProvider) updateHPAs() error {
|
||||
p.collectorScheduler.Remove(ref)
|
||||
}
|
||||
|
||||
p.logger.Infof("Found %d new/updated HPA(s)", newHPAs)
|
||||
if newHPAs > 0 {
|
||||
p.logger.Infof("Found %d new/updated HPA(s)", newHPAs)
|
||||
} else {
|
||||
p.logger.Debug("No new/updated HPAs found")
|
||||
}
|
||||
|
||||
p.hpaCache = newHPACache
|
||||
|
||||
return nil
|
||||
@ -205,12 +214,38 @@ func (p *HPAProvider) updateHPAs() error {
|
||||
|
||||
// equalHPA returns true if two HPAs are identical (apart from their status).
|
||||
func equalHPA(a, b autoscalingv2.HorizontalPodAutoscaler) bool {
|
||||
// reset resource version to not compare it since this will change
|
||||
// whenever the status of the object is updated. We only want to
|
||||
// compare the metadata and the spec.
|
||||
a.ObjectMeta.ResourceVersion = ""
|
||||
b.ObjectMeta.ResourceVersion = ""
|
||||
return reflect.DeepEqual(a.ObjectMeta, b.ObjectMeta) && reflect.DeepEqual(a.Spec, b.Spec)
|
||||
return annotationsUpToDate(a.ObjectMeta, b.ObjectMeta, kubectlLastAppliedAnnotation) && reflect.DeepEqual(a.Spec, b.Spec)
|
||||
}
|
||||
|
||||
// annotationsUpToDate checks whether the annotations of the existing and
|
||||
// updated resource are up to date.
|
||||
func annotationsUpToDate(updated, existing metav1.ObjectMeta, ignoredAnnotations ...string) bool {
|
||||
if len(updated.Annotations) != len(existing.Annotations) {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range updated.Annotations {
|
||||
isIgnored := false
|
||||
for _, ignored := range ignoredAnnotations {
|
||||
if k == ignored {
|
||||
isIgnored = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if isIgnored {
|
||||
continue
|
||||
}
|
||||
|
||||
existingValue, ok := existing.GetAnnotations()[k]
|
||||
if ok && existingValue == v {
|
||||
continue
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// collectMetrics collects all metrics from collectors and manages a central
|
||||
|
@ -193,3 +193,172 @@ func TestUpdateHPAsDisregardingIncompatibleHPA(t *testing.T) {
|
||||
// we expect an event when disregardIncompatibleHPAs=false
|
||||
require.Len(t, eventRecorder.Events, 1)
|
||||
}
|
||||
|
||||
func TestEqualHPA(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
hpa1 autoscaling.HorizontalPodAutoscaler
|
||||
hpa2 autoscaling.HorizontalPodAutoscaler
|
||||
equal bool
|
||||
}{
|
||||
{
|
||||
name: "Identical HPAs are equal",
|
||||
hpa1: autoscaling.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hpa1",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: autoscaling.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscaling.CrossVersionObjectReference{
|
||||
Kind: "Deployment",
|
||||
Name: "app",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
MinReplicas: &[]int32{1}[0],
|
||||
MaxReplicas: 10,
|
||||
},
|
||||
},
|
||||
hpa2: autoscaling.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hpa1",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: autoscaling.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscaling.CrossVersionObjectReference{
|
||||
Kind: "Deployment",
|
||||
Name: "app",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
MinReplicas: &[]int32{1}[0],
|
||||
MaxReplicas: 10,
|
||||
},
|
||||
},
|
||||
equal: true,
|
||||
},
|
||||
{
|
||||
name: "Only kubectl-last-applied diff on HPAs are equal",
|
||||
hpa1: autoscaling.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hpa1",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
kubectlLastAppliedAnnotation: "old value",
|
||||
},
|
||||
},
|
||||
Spec: autoscaling.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscaling.CrossVersionObjectReference{
|
||||
Kind: "Deployment",
|
||||
Name: "app",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
MinReplicas: &[]int32{1}[0],
|
||||
MaxReplicas: 10,
|
||||
},
|
||||
},
|
||||
hpa2: autoscaling.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hpa1",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
kubectlLastAppliedAnnotation: "new value",
|
||||
},
|
||||
},
|
||||
Spec: autoscaling.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscaling.CrossVersionObjectReference{
|
||||
Kind: "Deployment",
|
||||
Name: "app",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
MinReplicas: &[]int32{1}[0],
|
||||
MaxReplicas: 10,
|
||||
},
|
||||
},
|
||||
equal: true,
|
||||
},
|
||||
{
|
||||
name: "diff in annotations are not equal",
|
||||
hpa1: autoscaling.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hpa1",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
"annotation": "old value",
|
||||
},
|
||||
},
|
||||
Spec: autoscaling.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscaling.CrossVersionObjectReference{
|
||||
Kind: "Deployment",
|
||||
Name: "app",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
MinReplicas: &[]int32{1}[0],
|
||||
MaxReplicas: 10,
|
||||
},
|
||||
},
|
||||
hpa2: autoscaling.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hpa1",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
"annotation": "new value",
|
||||
},
|
||||
},
|
||||
Spec: autoscaling.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscaling.CrossVersionObjectReference{
|
||||
Kind: "Deployment",
|
||||
Name: "app",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
MinReplicas: &[]int32{1}[0],
|
||||
MaxReplicas: 10,
|
||||
},
|
||||
},
|
||||
equal: false,
|
||||
},
|
||||
{
|
||||
name: "diff in labels are equal",
|
||||
hpa1: autoscaling.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hpa1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"label": "old-value",
|
||||
},
|
||||
},
|
||||
Spec: autoscaling.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscaling.CrossVersionObjectReference{
|
||||
Kind: "Deployment",
|
||||
Name: "app",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
MinReplicas: &[]int32{1}[0],
|
||||
MaxReplicas: 10,
|
||||
},
|
||||
},
|
||||
hpa2: autoscaling.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hpa1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"label": "new-value",
|
||||
},
|
||||
},
|
||||
Spec: autoscaling.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscaling.CrossVersionObjectReference{
|
||||
Kind: "Deployment",
|
||||
Name: "app",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
MinReplicas: &[]int32{1}[0],
|
||||
MaxReplicas: 10,
|
||||
},
|
||||
},
|
||||
equal: true,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
require.Equal(t, tc.equal, equalHPA(tc.hpa1, tc.hpa2))
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user