mirror of
https://github.com/zalando-incubator/kube-metrics-adapter.git
synced 2026-03-13 03:27:18 +00:00
Compare commits
113 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 7cbfc8d006 | |||
| ae5c2a538a | |||
| 68bd36ed7b | |||
| 7cd565c100 | |||
| 8f0b25517c | |||
| c8372976e6 | |||
| dcedc0c17e | |||
| f1a2e80d15 | |||
| 2f9aefc4e7 | |||
| 63120a8c1e | |||
| dcf686b9c5 | |||
| 77a04c49b6 | |||
| b237f5a9d2 | |||
| 136d31e97f | |||
| 4d4d5d44bf | |||
| a46a544bce | |||
| ea4a3eb421 | |||
| 897fcdd423 | |||
| 61e5cde106 | |||
| f9141fd882 | |||
| aece0b5efa | |||
| 856ce2c391 | |||
| 66b324bb30 | |||
| c765317359 | |||
| c973a9e712 | |||
| ef26f47884 | |||
| 2ec9b26ec8 | |||
| 7cc533c892 | |||
| c9ab7402c5 | |||
| 0e22243de6 | |||
| 91acacb4f4 | |||
| 0678ef1131 | |||
| 130c67cdf1 | |||
| 1661d7c4bc | |||
| 905cf8a06f | |||
| 15ceeb7316 | |||
| 776ee93d4d | |||
| 99a194fe1a | |||
| 7a63a04668 | |||
| 8190553285 | |||
| cb43919967 | |||
| 49f9f5ffe0 | |||
| 43dd2e9741 | |||
| 4aeebc853c | |||
| 45a09c12a0 | |||
| 789c2dee79 | |||
| bbe5a8a1f7 | |||
| 32e9a39be0 | |||
| afea8de1d9 | |||
| a17ad004d3 | |||
| ec77258ba4 | |||
| 3ef92536c1 | |||
| ae5c4af76f | |||
| b130ce5fa7 | |||
| 8cd076b01d | |||
| 15cffaee88 | |||
| 82a96580a2 | |||
| 08825c4920 | |||
| ac3c500d84 | |||
| 656a05fe07 | |||
| e54f3b88cf | |||
| f5124993a2 | |||
| 7c1339a6f2 | |||
| fc2f6aa5df | |||
| 7448688788 | |||
| 0e1302a97e | |||
| 749c1a7e25 | |||
| e7eb21a773 | |||
| 2c3247bf57 | |||
| 6860217a71 | |||
| a66ce04128 | |||
| 9eabbd53d9 | |||
| ec8622c028 | |||
| 166865edbb | |||
| 85f1c3e13d | |||
| 1a5297d6b2 | |||
| be0e0d485e | |||
| a07eb0b79c | |||
| 34dc968e77 | |||
| 9174f2550a | |||
| 6c4dfd682b | |||
| aa3f8ab969 | |||
| 437bca4de8 | |||
| c2fe13d21d | |||
| bbfd982fb6 | |||
| e9112a7114 | |||
| 2c526dd498 | |||
| 29b782160d | |||
| 28f3d96061 | |||
| 24b7276282 | |||
| 542d90d9fe | |||
| 0f359920af | |||
| 54e2d2d564 | |||
| 16ec43c361 | |||
| ffcbfcee48 | |||
| 69f95534e8 | |||
| c2179a35ba | |||
| 35e3fe83e8 | |||
| 69df60e724 | |||
| d171e049bf | |||
| b89ca19e6a | |||
| a276b64576 | |||
| ff6d479f1a | |||
| cd986058e4 | |||
| d6a33fed63 | |||
| b8532b756b | |||
| f28653de74 | |||
| 2f5d3f5a42 | |||
| 153d754353 | |||
| 02ec2282ab | |||
| 65dd585813 | |||
| aa7b64e637 | |||
| 7633ac551e |
@@ -38,11 +38,16 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: setup go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
@@ -56,7 +61,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
@@ -69,4 +74,4 @@ jobs:
|
||||
# ./location_of_script_within_repo/buildscript.sh
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@v3
|
||||
|
||||
@@ -0,0 +1,91 @@
|
||||
name: gh-package-deploy
|
||||
permissions: {}
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: "${{ github.repository }}"
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
if: ${{ github.actor != 'dependabot[bot]' }}
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
# Adding this block will overridw default values to None if not specified in the block
|
||||
# https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
permissions:
|
||||
contents: read
|
||||
actions: read
|
||||
packages: write # to push packages
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
|
||||
- uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753
|
||||
with:
|
||||
# https://www.npmjs.com/package/semver#caret-ranges-123-025-004
|
||||
go-version: '^1.21'
|
||||
|
||||
- name: Login to Github Container Registry
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: actions-ecosystem/action-get-latest-tag@b7c32daec3395a9616f88548363a42652b22d435
|
||||
id: get-latest-tag
|
||||
|
||||
- name: Build binaries
|
||||
run: |
|
||||
make build.linux.amd64 build.linux.arm64
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@4c0219f9ac95b02789c1075625400b2acbff50b1
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker meta
|
||||
uses: docker/metadata-action@818d4b7b91585d195f67373fd9cb0332e31a7175
|
||||
id: meta
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=semver,pattern=v{{version}}
|
||||
type=semver,pattern=v{{major}}.{{minor}}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825
|
||||
with:
|
||||
context: .
|
||||
build-args: BASE_IMAGE=alpine:3
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' && startsWith(github.ref, 'refs/tags/v') }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
# Build and push latest tag
|
||||
- name: Build and push latest
|
||||
uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825
|
||||
with:
|
||||
context: .
|
||||
build-args: BASE_IMAGE=alpine:3
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
@@ -4,7 +4,6 @@ run:
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- gosimple
|
||||
- govet
|
||||
@@ -12,4 +11,3 @@ linters:
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
+1
-1
@@ -50,7 +50,7 @@ contribution is in line with our goals.
|
||||
- Make sure you sign-off on your commits `git commit -s -m "adding X to change Y"`
|
||||
- Write good commit messages (see below).
|
||||
- Push your changes to a topic branch in your fork of the repository.
|
||||
- As you push your changes, update the pull request with new infomation and tasks as you complete them
|
||||
- As you push your changes, update the pull request with new information and tasks as you complete them
|
||||
- Project maintainers might comment on your work as you progress
|
||||
- When you are done, remove the `work in progess` label and ping the maintainers for a review
|
||||
- Your pull request must receive a :thumbsup: from two [maintainers](MAINTAINERS)
|
||||
|
||||
+1
-1
@@ -1,4 +1,4 @@
|
||||
ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3.13:latest
|
||||
ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3:latest
|
||||
FROM ${BASE_IMAGE}
|
||||
LABEL maintainer="Team Teapot @ Zalando SE <team-teapot@zalando.de>"
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ $(GENERATED): go.mod $(CRD_TYPE_SOURCE) $(OPENAPI)
|
||||
./hack/update-codegen.sh
|
||||
|
||||
$(GENERATED_CRDS): $(GENERATED) $(CRD_SOURCES)
|
||||
go run sigs.k8s.io/controller-tools/cmd/controller-gen crd:crdVersions=v1 paths=./pkg/apis/... output:crd:dir=docs || /bin/true || true
|
||||
go run sigs.k8s.io/controller-tools/cmd/controller-gen crd:crdVersions=v1 paths=./pkg/apis/... output:crd:dir=docs
|
||||
mv docs/zalando.org_clusterscalingschedules.yaml docs/cluster_scaling_schedules_crd.yaml
|
||||
mv docs/zalando.org_scalingschedules.yaml docs/scaling_schedules_crd.yaml
|
||||
|
||||
|
||||
@@ -82,9 +82,12 @@ HPA resource, or via additional annotations on the HPA resource.
|
||||
|
||||
## Pod collector
|
||||
|
||||
The pod collector allows collecting metrics from each pod matched by the HPA.
|
||||
The pod collector allows collecting metrics from each pod matching the label selector defined in the HPA's `scaleTargetRef`.
|
||||
Currently only `json-path` collection is supported.
|
||||
|
||||
### Supported HPA `scaleTargetRef`
|
||||
The Pod Collector utilizes the `scaleTargetRef` specified in an HPA resource to obtain the label selector from the referenced Kubernetes object. This enables the identification and management of pods associated with that object. Currently, the supported Kubernetes objects for this operation are: `Deployment`, `StatefulSet` and [`Rollout`](https://argoproj.github.io/argo-rollouts/features/specification/).
|
||||
|
||||
### Supported metrics
|
||||
|
||||
| Metric | Description | Type | K8s Versions |
|
||||
@@ -402,6 +405,64 @@ the `backend` label under `matchLabels` for the metric. The ingress annotation
|
||||
where the backend weights can be obtained can be specified through the flag
|
||||
`--skipper-backends-annotation`.
|
||||
|
||||
## External RPS collector
|
||||
|
||||
The External RPS collector, like Skipper collector, is a simple wrapper around the Prometheus collector to
|
||||
make it easy to define an HPA for scaling based on the RPS measured for a given hostname. When
|
||||
[skipper](https://github.com/zalando/skipper) is used as the ingress
|
||||
implementation in your cluster everything should work automatically, in case another reverse proxy is used as ingress, like [Nginx](https://github.com/kubernetes/ingress-nginx) for example, its necessary to configure which prometheus metric should be used through `--external-rps-metric-name <metric-name>` flag. Assuming `skipper-ingress` is being used or the appropriate metric name is passed using the flag mentioned previously this collector provides the correct Prometheus queries out of the
|
||||
box so users don't have to define those manually.
|
||||
|
||||
### Supported metrics
|
||||
|
||||
| Metric | Description | Type | Kind | K8s Versions |
|
||||
| ------------ | -------------- | ------- | -- | -- |
|
||||
| `requests-per-second` | Scale based on requests per second for a certain hostname. | External | | `>=1.12` |
|
||||
|
||||
### Example: External Metric
|
||||
|
||||
This is an example of an HPA that will scale based on `requests-per-second` for the RPS measured in the hostnames called: `www.example1.com` and `www.example2.com`; and weighted by 42%.
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: myapp-hpa
|
||||
annotations:
|
||||
metric-config.external.example-rps.requests-per-second/hostnames: www.example1.com,www.example2.com
|
||||
metric-config.external.example-rps.requests-per-second/weight: "42"
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: custom-metrics-consumer
|
||||
minReplicas: 1
|
||||
maxReplicas: 10
|
||||
metrics:
|
||||
- type: External
|
||||
external:
|
||||
metric:
|
||||
name: example-rps
|
||||
selector:
|
||||
matchLabels:
|
||||
type: requests-per-second
|
||||
target:
|
||||
type: AverageValue
|
||||
averageValue: "42"
|
||||
```
|
||||
### Multiple hostnames per metric
|
||||
|
||||
This metric supports a relation of n:1 between hostnames and metrics. The way it works is the measured RPS is the sum of the RPS rate of each of the specified hostnames. This value is further modified by the weight parameter explained below.
|
||||
|
||||
### Metric weighting based on backend
|
||||
|
||||
There are ingress-controllers, like skipper-ingress, that supports sending traffic to different backends based on some kind of configuration, in case of skipper annotations
|
||||
present on the `Ingress` object, or weights on the RouteGroup backends. By
|
||||
default the number of replicas will be calculated based on the full traffic
|
||||
served by these components. If however only the traffic being routed to
|
||||
a specific hostname should be used then the weight for the configured hostname(s) might be specified via the `weight` annotation `metric-config.external.<metric-name>.request-per-second/weight` for the metric being configured.
|
||||
|
||||
|
||||
## InfluxDB collector
|
||||
|
||||
The InfluxDB collector maps [Flux](https://github.com/influxdata/flux) queries to metrics that can be used for scaling.
|
||||
@@ -644,6 +705,93 @@ you need to define a `key` or other `tag` with a "star" query syntax like
|
||||
metric label definitions. If both annotations and corresponding label is
|
||||
defined, then the annotation takes precedence.
|
||||
|
||||
|
||||
## Nakadi collector
|
||||
|
||||
The Nakadi collector allows scaling based on [Nakadi](https://nakadi.io/)
|
||||
Subscription API stats metrics `consumer_lag_seconds` or `unconsumed_events`.
|
||||
|
||||
### Supported metrics
|
||||
|
||||
| Metric Type | Description | Type | K8s Versions |
|
||||
|------------------------|-----------------------------------------------------------------------------|----------|--------------|
|
||||
| `unconsumed-events` | Scale based on number of unconsumed events for a Nakadi subscription | External | `>=1.24` |
|
||||
| `consumer-lag-seconds` | Scale based on number of max consumer lag seconds for a Nakadi subscription | External | `>=1.24` |
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: myapp-hpa
|
||||
annotations:
|
||||
# metric-config.<metricType>.<metricName>.<collectorType>/<configKey>
|
||||
metric-config.external.my-nakadi-consumer.nakadi/interval: "60s" # optional
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: custom-metrics-consumer
|
||||
minReplicas: 0
|
||||
maxReplicas: 8 # should match number of partitions for the event type
|
||||
metrics:
|
||||
- type: External
|
||||
external:
|
||||
metric:
|
||||
name: my-nakadi-consumer
|
||||
selector:
|
||||
matchLabels:
|
||||
type: nakadi
|
||||
subscription-id: "708095f6-cece-4d02-840e-ee488d710b29"
|
||||
metric-type: "consumer-lag-seconds|unconsumed-events"
|
||||
target:
|
||||
# value is compatible with the consumer-lag-seconds metric type.
|
||||
# It describes the amount of consumer lag in seconds before scaling
|
||||
# additionally up.
|
||||
# if an event-type has multiple partitions the value of
|
||||
# consumer-lag-seconds is the max of all the partitions.
|
||||
value: "600" # 10m
|
||||
type: Value
|
||||
# averageValue is compatible with unconsumed-events metric type.
|
||||
# This means for every 30 unconsumed events a pod is added.
|
||||
# unconsumed-events is the sum of of unconsumed_events over all
|
||||
# partitions.
|
||||
averageValue: "30"
|
||||
type: AverageValue
|
||||
```
|
||||
|
||||
The `subscription-id` is the Subscription ID of the relevant consumer. The
|
||||
`metric-type` indicates whether to scale on `consumer-lag-seconds` or
|
||||
`unconsumed-events` as outlined below.
|
||||
|
||||
`unconsumed-events` - is the total number of unconsumed events over all
|
||||
partitions. When using this `metric-type` you should also use the target
|
||||
`averageValue` which indicates the number of events which can be handled per
|
||||
pod. To best estimate the number of events per pods, you need to understand the
|
||||
average time for processing an event as well as the rate of events.
|
||||
|
||||
*Example*: You have an event type producing 100 events per second between 00:00
|
||||
and 08:00. Between 08:01 to 23:59 it produces 400 events per second.
|
||||
Let's assume that on average a single pod can consume 100 events per second,
|
||||
then we can define 100 as `averageValue` and the HPA would scale to 1 between
|
||||
00:00 and 08:00, and scale to 4 between 08:01 and 23:59. If there for some
|
||||
reason is a short spike of 800 events per second, then it would scale to 8 pods
|
||||
to process those events until the rate goes down again.
|
||||
|
||||
`consumer-lag-seconds` - describes the age of the oldest unconsumed event for
|
||||
a subscription. If the event type has multiple partitions the lag is defined as
|
||||
the max age over all partitions. When using this `metric-type` you should use
|
||||
the target `value` to indicate the max lag (in seconds) before the HPA should
|
||||
scale.
|
||||
|
||||
*Example*: You have a subscription with a defined SLO of "99.99 of events are
|
||||
consumed within 30 min.". In this case you can define a target `value` of e.g.
|
||||
20 min. (1200s) (to include a safety buffer) such that the HPA only scales up
|
||||
from 1 to 2 if the target of 20 min. is breached and it needs to work faster
|
||||
with more consumers.
|
||||
For this case you should also account for the average time for processing an
|
||||
event when defining the target.
|
||||
|
||||
|
||||
## HTTP Collector
|
||||
|
||||
The http collector allows collecting metrics from an external endpoint specified in the HPA.
|
||||
|
||||
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.8.0
|
||||
creationTimestamp: null
|
||||
controller-gen.kubebuilder.io/version: v0.12.0
|
||||
name: clusterscalingschedules.zalando.org
|
||||
spec:
|
||||
group: zalando.org
|
||||
@@ -56,7 +55,7 @@ spec:
|
||||
properties:
|
||||
date:
|
||||
description: Defines the starting date of a OneTime schedule.
|
||||
It has to be a RFC3339 formated date.
|
||||
It has to be a RFC3339 formatted date.
|
||||
format: date-time
|
||||
type: string
|
||||
durationMinutes:
|
||||
@@ -65,7 +64,7 @@ spec:
|
||||
type: integer
|
||||
endDate:
|
||||
description: Defines the ending date of a OneTime schedule.
|
||||
It must be a RFC3339 formated date.
|
||||
It must be a RFC3339 formatted date.
|
||||
format: date-time
|
||||
type: string
|
||||
period:
|
||||
@@ -140,9 +139,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: custom-metrics-apiserver
|
||||
containers:
|
||||
- name: kube-metrics-adapter
|
||||
image: registry.opensource.zalan.do/teapot/kube-metrics-adapter:latest
|
||||
image: ghcr.io/zalando-incubator/kube-metrics-adapter:latest
|
||||
args:
|
||||
# - --v=9
|
||||
- --prometheus-server=http://prometheus.kube-system.svc.cluster.local
|
||||
|
||||
@@ -56,7 +56,7 @@ spec:
|
||||
properties:
|
||||
date:
|
||||
description: Defines the starting date of a OneTime schedule.
|
||||
It has to be a RFC3339 formated date.
|
||||
It has to be a RFC3339 formatted date.
|
||||
format: date-time
|
||||
type: string
|
||||
durationMinutes:
|
||||
@@ -65,7 +65,7 @@ spec:
|
||||
type: integer
|
||||
endDate:
|
||||
description: Defines the ending date of a OneTime schedule.
|
||||
It must be a RFC3339 formated date.
|
||||
It must be a RFC3339 formatted date.
|
||||
format: date-time
|
||||
type: string
|
||||
period:
|
||||
|
||||
@@ -58,7 +58,7 @@ spec:
|
||||
properties:
|
||||
date:
|
||||
description: Defines the starting date of a OneTime schedule.
|
||||
It has to be a RFC3339 formated date.
|
||||
It has to be a RFC3339 formatted date.
|
||||
format: date-time
|
||||
type: string
|
||||
durationMinutes:
|
||||
@@ -67,7 +67,7 @@ spec:
|
||||
type: integer
|
||||
endDate:
|
||||
description: Defines the ending date of a OneTime schedule.
|
||||
It must be a RFC3339 formated date.
|
||||
It must be a RFC3339 formatted date.
|
||||
format: date-time
|
||||
type: string
|
||||
period:
|
||||
|
||||
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.8.0
|
||||
creationTimestamp: null
|
||||
controller-gen.kubebuilder.io/version: v0.12.0
|
||||
name: scalingschedules.zalando.org
|
||||
spec:
|
||||
group: zalando.org
|
||||
@@ -58,7 +57,7 @@ spec:
|
||||
properties:
|
||||
date:
|
||||
description: Defines the starting date of a OneTime schedule.
|
||||
It has to be a RFC3339 formated date.
|
||||
It has to be a RFC3339 formatted date.
|
||||
format: date-time
|
||||
type: string
|
||||
durationMinutes:
|
||||
@@ -67,7 +66,7 @@ spec:
|
||||
type: integer
|
||||
endDate:
|
||||
description: Defines the ending date of a OneTime schedule.
|
||||
It must be a RFC3339 formated date.
|
||||
It must be a RFC3339 formatted date.
|
||||
format: date-time
|
||||
type: string
|
||||
period:
|
||||
@@ -142,9 +141,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
|
||||
+1
-1
@@ -1,4 +1,4 @@
|
||||
FROM registry.opensource.zalan.do/library/alpine-3.13:latest
|
||||
FROM registry.opensource.zalan.do/library/alpine-3:latest
|
||||
LABEL maintainer="Team Teapot @ Zalando SE <team-teapot@zalando.de>"
|
||||
|
||||
# add binary
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: custom-metrics-consumer
|
||||
|
||||
@@ -1,62 +1,68 @@
|
||||
module github.com/zalando-incubator/kube-metrics-adapter
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go v1.44.254
|
||||
github.com/influxdata/influxdb-client-go v0.2.0
|
||||
github.com/prometheus/client_golang v1.15.0
|
||||
github.com/prometheus/common v0.42.0
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spyzhov/ajson v0.7.2
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/szuecs/routegroup-client v0.21.1
|
||||
github.com/zalando-incubator/cluster-lifecycle-manager v0.0.0-20230223125308-aff25efae501
|
||||
golang.org/x/net v0.9.0
|
||||
golang.org/x/oauth2 v0.7.0
|
||||
golang.org/x/sync v0.1.0
|
||||
k8s.io/api v0.23.0
|
||||
k8s.io/apimachinery v0.23.0
|
||||
k8s.io/apiserver v0.23.0
|
||||
k8s.io/client-go v0.23.0
|
||||
k8s.io/code-generator v0.23.0
|
||||
k8s.io/component-base v0.23.0
|
||||
github.com/argoproj/argo-rollouts v1.6.6
|
||||
github.com/aws/aws-sdk-go v1.51.32
|
||||
github.com/influxdata/influxdb-client-go v1.4.0
|
||||
github.com/prometheus/client_golang v1.18.0
|
||||
github.com/prometheus/common v0.45.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spyzhov/ajson v0.9.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/szuecs/routegroup-client v0.28.2
|
||||
github.com/zalando-incubator/cluster-lifecycle-manager v0.0.0-20230601114834-6ed1bba3c85d
|
||||
golang.org/x/net v0.24.0
|
||||
golang.org/x/oauth2 v0.18.0
|
||||
golang.org/x/sync v0.7.0
|
||||
k8s.io/api v0.29.4
|
||||
k8s.io/apimachinery v0.29.4
|
||||
k8s.io/apiserver v0.29.4
|
||||
k8s.io/client-go v0.29.4
|
||||
k8s.io/code-generator v0.29.4
|
||||
k8s.io/component-base v0.29.4
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf
|
||||
k8s.io/metrics v0.22.17
|
||||
sigs.k8s.io/controller-tools v0.8.0
|
||||
sigs.k8s.io/custom-metrics-apiserver v1.22.0
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00
|
||||
k8s.io/metrics v0.29.4
|
||||
sigs.k8s.io/controller-tools v0.14.0
|
||||
sigs.k8s.io/custom-metrics-apiserver v1.29.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful v2.16.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/gobuffalo/flect v0.2.3 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.11.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible // indirect
|
||||
github.com/fatih/color v1.16.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.20.2 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.4 // indirect
|
||||
github.com/go-openapi/swag v0.22.9 // indirect
|
||||
github.com/gobuffalo/flect v1.0.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.17.7 // indirect
|
||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/google/uuid v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
@@ -64,63 +70,60 @@ require (
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.6 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.6 // indirect
|
||||
go.etcd.io/etcd/client/v2 v2.305.6 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.6 // indirect
|
||||
go.opentelemetry.io/contrib v0.20.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel v1.11.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.11.1 // indirect
|
||||
go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.11.1 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/term v0.7.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
|
||||
google.golang.org/grpc v1.52.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.11 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.11 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.11 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
|
||||
go.opentelemetry.io/otel v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.21.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20231226003508-02704c960a9b // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/term v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.16.1 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect
|
||||
google.golang.org/grpc v1.60.1 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.23.0 // indirect
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect
|
||||
k8s.io/klog/v2 v2.90.0 // indirect
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.29.0 // indirect
|
||||
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/kms v0.29.4 // indirect
|
||||
k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
go.opentelemetry.io/otel => go.opentelemetry.io/otel v0.20.0
|
||||
go.opentelemetry.io/otel/sdk => go.opentelemetry.io/otel/sdk v0.20.0
|
||||
go.opentelemetry.io/otel/trace => go.opentelemetry.io/otel/trace v0.20.0
|
||||
)
|
||||
replace go.opentelemetry.io/otel => go.opentelemetry.io/otel v1.19.0
|
||||
|
||||
go 1.20
|
||||
go 1.22
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -68,11 +68,11 @@ type Schedule struct {
|
||||
// +optional
|
||||
Period *SchedulePeriod `json:"period,omitempty"`
|
||||
// Defines the starting date of a OneTime schedule. It has to
|
||||
// be a RFC3339 formated date.
|
||||
// be a RFC3339 formatted date.
|
||||
// +optional
|
||||
Date *ScheduleDate `json:"date,omitempty"`
|
||||
// Defines the ending date of a OneTime schedule. It must be
|
||||
// a RFC3339 formated date.
|
||||
// a RFC3339 formatted date.
|
||||
// +optional
|
||||
EndDate *ScheduleDate `json:"endDate,omitempty"`
|
||||
// The duration in minutes (default 0) that the configured value will be
|
||||
|
||||
@@ -33,8 +33,7 @@ type Interface interface {
|
||||
ZalandoV1() zalandov1.ZalandoV1Interface
|
||||
}
|
||||
|
||||
// Clientset contains the clients for groups. Each group has exactly one
|
||||
// version included in a Clientset.
|
||||
// Clientset contains the clients for groups.
|
||||
type Clientset struct {
|
||||
*discovery.DiscoveryClient
|
||||
zalandoV1 *zalandov1.ZalandoV1Client
|
||||
@@ -61,6 +60,10 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface {
|
||||
func NewForConfig(c *rest.Config) (*Clientset, error) {
|
||||
configShallowCopy := *c
|
||||
|
||||
if configShallowCopy.UserAgent == "" {
|
||||
configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
|
||||
// share the transport between all clients
|
||||
httpClient, err := rest.HTTPClientFor(&configShallowCopy)
|
||||
if err != nil {
|
||||
|
||||
+28
-29
@@ -21,10 +21,9 @@ package fake
|
||||
import (
|
||||
"context"
|
||||
|
||||
zalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
@@ -35,24 +34,24 @@ type FakeClusterScalingSchedules struct {
|
||||
Fake *FakeZalandoV1
|
||||
}
|
||||
|
||||
var clusterscalingschedulesResource = schema.GroupVersionResource{Group: "zalando.org", Version: "v1", Resource: "clusterscalingschedules"}
|
||||
var clusterscalingschedulesResource = v1.SchemeGroupVersion.WithResource("clusterscalingschedules")
|
||||
|
||||
var clusterscalingschedulesKind = schema.GroupVersionKind{Group: "zalando.org", Version: "v1", Kind: "ClusterScalingSchedule"}
|
||||
var clusterscalingschedulesKind = v1.SchemeGroupVersion.WithKind("ClusterScalingSchedule")
|
||||
|
||||
// Get takes name of the clusterScalingSchedule, and returns the corresponding clusterScalingSchedule object, and an error if there is any.
|
||||
func (c *FakeClusterScalingSchedules) Get(ctx context.Context, name string, options v1.GetOptions) (result *zalandoorgv1.ClusterScalingSchedule, err error) {
|
||||
func (c *FakeClusterScalingSchedules) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterScalingSchedule, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootGetAction(clusterscalingschedulesResource, name), &zalandoorgv1.ClusterScalingSchedule{})
|
||||
Invokes(testing.NewRootGetAction(clusterscalingschedulesResource, name), &v1.ClusterScalingSchedule{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*zalandoorgv1.ClusterScalingSchedule), err
|
||||
return obj.(*v1.ClusterScalingSchedule), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ClusterScalingSchedules that match those selectors.
|
||||
func (c *FakeClusterScalingSchedules) List(ctx context.Context, opts v1.ListOptions) (result *zalandoorgv1.ClusterScalingScheduleList, err error) {
|
||||
func (c *FakeClusterScalingSchedules) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterScalingScheduleList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootListAction(clusterscalingschedulesResource, clusterscalingschedulesKind, opts), &zalandoorgv1.ClusterScalingScheduleList{})
|
||||
Invokes(testing.NewRootListAction(clusterscalingschedulesResource, clusterscalingschedulesKind, opts), &v1.ClusterScalingScheduleList{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -61,8 +60,8 @@ func (c *FakeClusterScalingSchedules) List(ctx context.Context, opts v1.ListOpti
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &zalandoorgv1.ClusterScalingScheduleList{ListMeta: obj.(*zalandoorgv1.ClusterScalingScheduleList).ListMeta}
|
||||
for _, item := range obj.(*zalandoorgv1.ClusterScalingScheduleList).Items {
|
||||
list := &v1.ClusterScalingScheduleList{ListMeta: obj.(*v1.ClusterScalingScheduleList).ListMeta}
|
||||
for _, item := range obj.(*v1.ClusterScalingScheduleList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
@@ -71,63 +70,63 @@ func (c *FakeClusterScalingSchedules) List(ctx context.Context, opts v1.ListOpti
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested clusterScalingSchedules.
|
||||
func (c *FakeClusterScalingSchedules) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
func (c *FakeClusterScalingSchedules) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewRootWatchAction(clusterscalingschedulesResource, opts))
|
||||
}
|
||||
|
||||
// Create takes the representation of a clusterScalingSchedule and creates it. Returns the server's representation of the clusterScalingSchedule, and an error, if there is any.
|
||||
func (c *FakeClusterScalingSchedules) Create(ctx context.Context, clusterScalingSchedule *zalandoorgv1.ClusterScalingSchedule, opts v1.CreateOptions) (result *zalandoorgv1.ClusterScalingSchedule, err error) {
|
||||
func (c *FakeClusterScalingSchedules) Create(ctx context.Context, clusterScalingSchedule *v1.ClusterScalingSchedule, opts metav1.CreateOptions) (result *v1.ClusterScalingSchedule, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootCreateAction(clusterscalingschedulesResource, clusterScalingSchedule), &zalandoorgv1.ClusterScalingSchedule{})
|
||||
Invokes(testing.NewRootCreateAction(clusterscalingschedulesResource, clusterScalingSchedule), &v1.ClusterScalingSchedule{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*zalandoorgv1.ClusterScalingSchedule), err
|
||||
return obj.(*v1.ClusterScalingSchedule), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a clusterScalingSchedule and updates it. Returns the server's representation of the clusterScalingSchedule, and an error, if there is any.
|
||||
func (c *FakeClusterScalingSchedules) Update(ctx context.Context, clusterScalingSchedule *zalandoorgv1.ClusterScalingSchedule, opts v1.UpdateOptions) (result *zalandoorgv1.ClusterScalingSchedule, err error) {
|
||||
func (c *FakeClusterScalingSchedules) Update(ctx context.Context, clusterScalingSchedule *v1.ClusterScalingSchedule, opts metav1.UpdateOptions) (result *v1.ClusterScalingSchedule, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateAction(clusterscalingschedulesResource, clusterScalingSchedule), &zalandoorgv1.ClusterScalingSchedule{})
|
||||
Invokes(testing.NewRootUpdateAction(clusterscalingschedulesResource, clusterScalingSchedule), &v1.ClusterScalingSchedule{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*zalandoorgv1.ClusterScalingSchedule), err
|
||||
return obj.(*v1.ClusterScalingSchedule), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeClusterScalingSchedules) UpdateStatus(ctx context.Context, clusterScalingSchedule *zalandoorgv1.ClusterScalingSchedule, opts v1.UpdateOptions) (*zalandoorgv1.ClusterScalingSchedule, error) {
|
||||
func (c *FakeClusterScalingSchedules) UpdateStatus(ctx context.Context, clusterScalingSchedule *v1.ClusterScalingSchedule, opts metav1.UpdateOptions) (*v1.ClusterScalingSchedule, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateSubresourceAction(clusterscalingschedulesResource, "status", clusterScalingSchedule), &zalandoorgv1.ClusterScalingSchedule{})
|
||||
Invokes(testing.NewRootUpdateSubresourceAction(clusterscalingschedulesResource, "status", clusterScalingSchedule), &v1.ClusterScalingSchedule{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*zalandoorgv1.ClusterScalingSchedule), err
|
||||
return obj.(*v1.ClusterScalingSchedule), err
|
||||
}
|
||||
|
||||
// Delete takes name of the clusterScalingSchedule and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeClusterScalingSchedules) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
func (c *FakeClusterScalingSchedules) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewRootDeleteActionWithOptions(clusterscalingschedulesResource, name, opts), &zalandoorgv1.ClusterScalingSchedule{})
|
||||
Invokes(testing.NewRootDeleteActionWithOptions(clusterscalingschedulesResource, name, opts), &v1.ClusterScalingSchedule{})
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeClusterScalingSchedules) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
func (c *FakeClusterScalingSchedules) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
|
||||
action := testing.NewRootDeleteCollectionAction(clusterscalingschedulesResource, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &zalandoorgv1.ClusterScalingScheduleList{})
|
||||
_, err := c.Fake.Invokes(action, &v1.ClusterScalingScheduleList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched clusterScalingSchedule.
|
||||
func (c *FakeClusterScalingSchedules) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *zalandoorgv1.ClusterScalingSchedule, err error) {
|
||||
func (c *FakeClusterScalingSchedules) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterScalingSchedule, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootPatchSubresourceAction(clusterscalingschedulesResource, name, pt, data, subresources...), &zalandoorgv1.ClusterScalingSchedule{})
|
||||
Invokes(testing.NewRootPatchSubresourceAction(clusterscalingschedulesResource, name, pt, data, subresources...), &v1.ClusterScalingSchedule{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*zalandoorgv1.ClusterScalingSchedule), err
|
||||
return obj.(*v1.ClusterScalingSchedule), err
|
||||
}
|
||||
|
||||
@@ -21,10 +21,9 @@ package fake
|
||||
import (
|
||||
"context"
|
||||
|
||||
zalandoorgv1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
@@ -36,25 +35,25 @@ type FakeScalingSchedules struct {
|
||||
ns string
|
||||
}
|
||||
|
||||
var scalingschedulesResource = schema.GroupVersionResource{Group: "zalando.org", Version: "v1", Resource: "scalingschedules"}
|
||||
var scalingschedulesResource = v1.SchemeGroupVersion.WithResource("scalingschedules")
|
||||
|
||||
var scalingschedulesKind = schema.GroupVersionKind{Group: "zalando.org", Version: "v1", Kind: "ScalingSchedule"}
|
||||
var scalingschedulesKind = v1.SchemeGroupVersion.WithKind("ScalingSchedule")
|
||||
|
||||
// Get takes name of the scalingSchedule, and returns the corresponding scalingSchedule object, and an error if there is any.
|
||||
func (c *FakeScalingSchedules) Get(ctx context.Context, name string, options v1.GetOptions) (result *zalandoorgv1.ScalingSchedule, err error) {
|
||||
func (c *FakeScalingSchedules) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ScalingSchedule, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(scalingschedulesResource, c.ns, name), &zalandoorgv1.ScalingSchedule{})
|
||||
Invokes(testing.NewGetAction(scalingschedulesResource, c.ns, name), &v1.ScalingSchedule{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*zalandoorgv1.ScalingSchedule), err
|
||||
return obj.(*v1.ScalingSchedule), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ScalingSchedules that match those selectors.
|
||||
func (c *FakeScalingSchedules) List(ctx context.Context, opts v1.ListOptions) (result *zalandoorgv1.ScalingScheduleList, err error) {
|
||||
func (c *FakeScalingSchedules) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ScalingScheduleList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(scalingschedulesResource, scalingschedulesKind, c.ns, opts), &zalandoorgv1.ScalingScheduleList{})
|
||||
Invokes(testing.NewListAction(scalingschedulesResource, scalingschedulesKind, c.ns, opts), &v1.ScalingScheduleList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
@@ -64,8 +63,8 @@ func (c *FakeScalingSchedules) List(ctx context.Context, opts v1.ListOptions) (r
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &zalandoorgv1.ScalingScheduleList{ListMeta: obj.(*zalandoorgv1.ScalingScheduleList).ListMeta}
|
||||
for _, item := range obj.(*zalandoorgv1.ScalingScheduleList).Items {
|
||||
list := &v1.ScalingScheduleList{ListMeta: obj.(*v1.ScalingScheduleList).ListMeta}
|
||||
for _, item := range obj.(*v1.ScalingScheduleList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
@@ -74,69 +73,69 @@ func (c *FakeScalingSchedules) List(ctx context.Context, opts v1.ListOptions) (r
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested scalingSchedules.
|
||||
func (c *FakeScalingSchedules) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
func (c *FakeScalingSchedules) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(scalingschedulesResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a scalingSchedule and creates it. Returns the server's representation of the scalingSchedule, and an error, if there is any.
|
||||
func (c *FakeScalingSchedules) Create(ctx context.Context, scalingSchedule *zalandoorgv1.ScalingSchedule, opts v1.CreateOptions) (result *zalandoorgv1.ScalingSchedule, err error) {
|
||||
func (c *FakeScalingSchedules) Create(ctx context.Context, scalingSchedule *v1.ScalingSchedule, opts metav1.CreateOptions) (result *v1.ScalingSchedule, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(scalingschedulesResource, c.ns, scalingSchedule), &zalandoorgv1.ScalingSchedule{})
|
||||
Invokes(testing.NewCreateAction(scalingschedulesResource, c.ns, scalingSchedule), &v1.ScalingSchedule{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*zalandoorgv1.ScalingSchedule), err
|
||||
return obj.(*v1.ScalingSchedule), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a scalingSchedule and updates it. Returns the server's representation of the scalingSchedule, and an error, if there is any.
|
||||
func (c *FakeScalingSchedules) Update(ctx context.Context, scalingSchedule *zalandoorgv1.ScalingSchedule, opts v1.UpdateOptions) (result *zalandoorgv1.ScalingSchedule, err error) {
|
||||
func (c *FakeScalingSchedules) Update(ctx context.Context, scalingSchedule *v1.ScalingSchedule, opts metav1.UpdateOptions) (result *v1.ScalingSchedule, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(scalingschedulesResource, c.ns, scalingSchedule), &zalandoorgv1.ScalingSchedule{})
|
||||
Invokes(testing.NewUpdateAction(scalingschedulesResource, c.ns, scalingSchedule), &v1.ScalingSchedule{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*zalandoorgv1.ScalingSchedule), err
|
||||
return obj.(*v1.ScalingSchedule), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeScalingSchedules) UpdateStatus(ctx context.Context, scalingSchedule *zalandoorgv1.ScalingSchedule, opts v1.UpdateOptions) (*zalandoorgv1.ScalingSchedule, error) {
|
||||
func (c *FakeScalingSchedules) UpdateStatus(ctx context.Context, scalingSchedule *v1.ScalingSchedule, opts metav1.UpdateOptions) (*v1.ScalingSchedule, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(scalingschedulesResource, "status", c.ns, scalingSchedule), &zalandoorgv1.ScalingSchedule{})
|
||||
Invokes(testing.NewUpdateSubresourceAction(scalingschedulesResource, "status", c.ns, scalingSchedule), &v1.ScalingSchedule{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*zalandoorgv1.ScalingSchedule), err
|
||||
return obj.(*v1.ScalingSchedule), err
|
||||
}
|
||||
|
||||
// Delete takes name of the scalingSchedule and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeScalingSchedules) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
func (c *FakeScalingSchedules) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteActionWithOptions(scalingschedulesResource, c.ns, name, opts), &zalandoorgv1.ScalingSchedule{})
|
||||
Invokes(testing.NewDeleteActionWithOptions(scalingschedulesResource, c.ns, name, opts), &v1.ScalingSchedule{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeScalingSchedules) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
func (c *FakeScalingSchedules) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(scalingschedulesResource, c.ns, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &zalandoorgv1.ScalingScheduleList{})
|
||||
_, err := c.Fake.Invokes(action, &v1.ScalingScheduleList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched scalingSchedule.
|
||||
func (c *FakeScalingSchedules) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *zalandoorgv1.ScalingSchedule, err error) {
|
||||
func (c *FakeScalingSchedules) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ScalingSchedule, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(scalingschedulesResource, c.ns, name, pt, data, subresources...), &zalandoorgv1.ScalingSchedule{})
|
||||
Invokes(testing.NewPatchSubresourceAction(scalingschedulesResource, c.ns, name, pt, data, subresources...), &v1.ScalingSchedule{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*zalandoorgv1.ScalingSchedule), err
|
||||
return obj.(*v1.ScalingSchedule), err
|
||||
}
|
||||
|
||||
@@ -47,6 +47,11 @@ type sharedInformerFactory struct {
|
||||
// startedInformers is used for tracking which informers have been started.
|
||||
// This allows Start() to be called multiple times safely.
|
||||
startedInformers map[reflect.Type]bool
|
||||
// wg tracks how many goroutines were started.
|
||||
wg sync.WaitGroup
|
||||
// shuttingDown is true when Shutdown has been called. It may still be running
|
||||
// because it needs to wait for goroutines.
|
||||
shuttingDown bool
|
||||
}
|
||||
|
||||
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
|
||||
@@ -107,20 +112,39 @@ func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResy
|
||||
return factory
|
||||
}
|
||||
|
||||
// Start initializes all requested informers.
|
||||
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.shuttingDown {
|
||||
return
|
||||
}
|
||||
|
||||
for informerType, informer := range f.informers {
|
||||
if !f.startedInformers[informerType] {
|
||||
go informer.Run(stopCh)
|
||||
f.wg.Add(1)
|
||||
// We need a new variable in each loop iteration,
|
||||
// otherwise the goroutine would use the loop variable
|
||||
// and that keeps changing.
|
||||
informer := informer
|
||||
go func() {
|
||||
defer f.wg.Done()
|
||||
informer.Run(stopCh)
|
||||
}()
|
||||
f.startedInformers[informerType] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForCacheSync waits for all started informers' cache were synced.
|
||||
func (f *sharedInformerFactory) Shutdown() {
|
||||
f.lock.Lock()
|
||||
f.shuttingDown = true
|
||||
f.lock.Unlock()
|
||||
|
||||
// Will return immediately if there is nothing to wait for.
|
||||
f.wg.Wait()
|
||||
}
|
||||
|
||||
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
|
||||
informers := func() map[reflect.Type]cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
@@ -142,7 +166,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref
|
||||
return res
|
||||
}
|
||||
|
||||
// InternalInformerFor returns the SharedIndexInformer for obj using an internal
|
||||
// InformerFor returns the SharedIndexInformer for obj using an internal
|
||||
// client.
|
||||
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
@@ -167,11 +191,58 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal
|
||||
|
||||
// SharedInformerFactory provides shared informers for resources in all known
|
||||
// API group versions.
|
||||
//
|
||||
// It is typically used like this:
|
||||
//
|
||||
// ctx, cancel := context.Background()
|
||||
// defer cancel()
|
||||
// factory := NewSharedInformerFactory(client, resyncPeriod)
|
||||
// defer factory.WaitForStop() // Returns immediately if nothing was started.
|
||||
// genericInformer := factory.ForResource(resource)
|
||||
// typedInformer := factory.SomeAPIGroup().V1().SomeType()
|
||||
// factory.Start(ctx.Done()) // Start processing these informers.
|
||||
// synced := factory.WaitForCacheSync(ctx.Done())
|
||||
// for v, ok := range synced {
|
||||
// if !ok {
|
||||
// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Creating informers can also be created after Start, but then
|
||||
// // Start must be called again:
|
||||
// anotherGenericInformer := factory.ForResource(resource)
|
||||
// factory.Start(ctx.Done())
|
||||
type SharedInformerFactory interface {
|
||||
internalinterfaces.SharedInformerFactory
|
||||
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
|
||||
|
||||
// Start initializes all requested informers. They are handled in goroutines
|
||||
// which run until the stop channel gets closed.
|
||||
Start(stopCh <-chan struct{})
|
||||
|
||||
// Shutdown marks a factory as shutting down. At that point no new
|
||||
// informers can be started anymore and Start will return without
|
||||
// doing anything.
|
||||
//
|
||||
// In addition, Shutdown blocks until all goroutines have terminated. For that
|
||||
// to happen, the close channel(s) that they were started with must be closed,
|
||||
// either before Shutdown gets called or while it is waiting.
|
||||
//
|
||||
// Shutdown may be called multiple times, even concurrently. All such calls will
|
||||
// block until all goroutines have terminated.
|
||||
Shutdown()
|
||||
|
||||
// WaitForCacheSync blocks until all started informers' caches were synced
|
||||
// or the stop channel gets closed.
|
||||
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
|
||||
|
||||
// ForResource gives generic access to a shared informer of the matching type.
|
||||
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
|
||||
|
||||
// InformerFor returns the SharedIndexInformer for obj using an internal
|
||||
// client.
|
||||
InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
|
||||
|
||||
Zalando() zalandoorg.Interface
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,125 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
ExternalRPSMetricType = "requests-per-second"
|
||||
ExternalRPSQuery = `scalar(sum(rate(%s{host=~"%s"}[1m])) * %.4f)`
|
||||
)
|
||||
|
||||
type ExternalRPSCollectorPlugin struct {
|
||||
metricName string
|
||||
promPlugin CollectorPlugin
|
||||
pattern *regexp.Regexp
|
||||
}
|
||||
|
||||
type ExternalRPSCollector struct {
|
||||
interval time.Duration
|
||||
promCollector Collector
|
||||
}
|
||||
|
||||
func NewExternalRPSCollectorPlugin(
|
||||
promPlugin CollectorPlugin,
|
||||
metricName string,
|
||||
) (*ExternalRPSCollectorPlugin, error) {
|
||||
if metricName == "" {
|
||||
return nil, fmt.Errorf("failed to initialize hostname collector plugin, metric name was not defined")
|
||||
}
|
||||
|
||||
p, err := regexp.Compile("^[a-zA-Z0-9.-]+$")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create regular expression to match hostname format")
|
||||
}
|
||||
|
||||
return &ExternalRPSCollectorPlugin{
|
||||
metricName: metricName,
|
||||
promPlugin: promPlugin,
|
||||
pattern: p,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewCollector initializes a new skipper collector from the specified HPA.
|
||||
func (p *ExternalRPSCollectorPlugin) NewCollector(
|
||||
hpa *autoscalingv2.HorizontalPodAutoscaler,
|
||||
config *MetricConfig,
|
||||
interval time.Duration,
|
||||
) (Collector, error) {
|
||||
if config == nil {
|
||||
return nil, fmt.Errorf("metric config not present, it is not possible to initialize the collector")
|
||||
}
|
||||
// Need to copy config and add a promQL query in order to get
|
||||
// RPS data from a specific hostname from prometheus. The idea
|
||||
// of the copy is to not modify the original config struct.
|
||||
confCopy := *config
|
||||
|
||||
if _, ok := config.Config["hostnames"]; !ok {
|
||||
return nil, fmt.Errorf("Hostname is not specified, unable to create collector")
|
||||
}
|
||||
|
||||
hostnames := strings.Split(config.Config["hostnames"], ",")
|
||||
if p.pattern == nil {
|
||||
return nil, fmt.Errorf("plugin did not specify hostname regex pattern, unable to create collector")
|
||||
}
|
||||
for _, h := range hostnames {
|
||||
if ok := p.pattern.MatchString(h); !ok {
|
||||
return nil, fmt.Errorf(
|
||||
"invalid hostname format, unable to create collector: %s",
|
||||
h,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
weight := 1.0
|
||||
if w, ok := config.Config["weight"]; ok {
|
||||
num, err := strconv.ParseFloat(w, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse weight annotation, unable to create collector: %s", w)
|
||||
}
|
||||
weight = num / 100.0
|
||||
}
|
||||
|
||||
confCopy.Config = map[string]string{
|
||||
"query": fmt.Sprintf(
|
||||
ExternalRPSQuery,
|
||||
p.metricName,
|
||||
strings.ReplaceAll(strings.Join(hostnames, "|"), ".", "_"),
|
||||
weight,
|
||||
),
|
||||
}
|
||||
|
||||
c, err := p.promPlugin.NewCollector(hpa, &confCopy, interval)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ExternalRPSCollector{
|
||||
interval: interval,
|
||||
promCollector: c,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetMetrics gets hostname metrics from Prometheus
|
||||
func (c *ExternalRPSCollector) GetMetrics() ([]CollectedMetric, error) {
|
||||
v, err := c.promCollector.GetMetrics()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(v) != 1 {
|
||||
return nil, fmt.Errorf("expected to only get one metric value, got %d", len(v))
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Interval returns the interval at which the collector should run.
|
||||
func (c *ExternalRPSCollector) Interval() time.Duration {
|
||||
return c.interval
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,57 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/metrics/pkg/apis/custom_metrics"
|
||||
)
|
||||
|
||||
type FakeCollectorPlugin struct {
|
||||
metrics []CollectedMetric
|
||||
config map[string]string
|
||||
}
|
||||
|
||||
type FakeCollector struct {
|
||||
metrics []CollectedMetric
|
||||
interval time.Duration
|
||||
stub func() ([]CollectedMetric, error)
|
||||
}
|
||||
|
||||
func (c *FakeCollector) GetMetrics() ([]CollectedMetric, error) {
|
||||
if c.stub != nil {
|
||||
v, err := c.stub()
|
||||
return v, err
|
||||
}
|
||||
|
||||
return c.metrics, nil
|
||||
}
|
||||
|
||||
func (FakeCollector) Interval() time.Duration {
|
||||
return time.Minute
|
||||
}
|
||||
|
||||
func (p *FakeCollectorPlugin) NewCollector(
|
||||
hpa *autoscalingv2.HorizontalPodAutoscaler,
|
||||
config *MetricConfig,
|
||||
interval time.Duration,
|
||||
) (Collector, error) {
|
||||
|
||||
p.config = config.Config
|
||||
return &FakeCollector{metrics: p.metrics, interval: interval}, nil
|
||||
}
|
||||
|
||||
func makePlugin(metric int) *FakeCollectorPlugin {
|
||||
return &FakeCollectorPlugin{
|
||||
metrics: []CollectedMetric{
|
||||
{
|
||||
Custom: custom_metrics.MetricValue{Value: *resource.NewQuantity(int64(metric), resource.DecimalSI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeCollectorWithStub(f func() ([]CollectedMetric, error)) *FakeCollector {
|
||||
return &FakeCollector{stub: f}
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb-client-go"
|
||||
influxdb "github.com/influxdata/influxdb-client-go"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -47,7 +47,7 @@ type InfluxDBCollector struct {
|
||||
token string
|
||||
org string
|
||||
|
||||
influxDBClient *influxdb.Client
|
||||
influxDBClient influxdb.Client
|
||||
interval time.Duration
|
||||
metric autoscalingv2.MetricIdentifier
|
||||
metricType autoscalingv2.MetricSourceType
|
||||
@@ -92,10 +92,7 @@ func NewInfluxDBCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, address st
|
||||
if v, ok := config.Config[influxDBOrgKey]; ok {
|
||||
org = v
|
||||
}
|
||||
influxDbClient, err := influxdb.New(address, token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
influxDbClient := influxdb.NewClient(address, token)
|
||||
collector.address = address
|
||||
collector.token = token
|
||||
collector.org = org
|
||||
@@ -111,7 +108,8 @@ type queryResult struct {
|
||||
|
||||
// getValue returns the first result gathered from an InfluxDB instance.
|
||||
func (c *InfluxDBCollector) getValue() (resource.Quantity, error) {
|
||||
res, err := c.influxDBClient.QueryCSV(context.Background(), c.query, c.org)
|
||||
queryAPI := c.influxDBClient.QueryAPI(c.org)
|
||||
res, err := queryAPI.Query(context.Background(), c.query)
|
||||
if err != nil {
|
||||
return resource.Quantity{}, err
|
||||
}
|
||||
@@ -119,12 +117,9 @@ func (c *InfluxDBCollector) getValue() (resource.Quantity, error) {
|
||||
// Keeping just the first result.
|
||||
if res.Next() {
|
||||
qr := queryResult{}
|
||||
if err := res.Unmarshal(&qr); err != nil {
|
||||
return resource.Quantity{}, fmt.Errorf("error in unmarshaling query result: %v", err)
|
||||
}
|
||||
return *resource.NewMilliQuantity(int64(qr.MetricValue*1000), resource.DecimalSI), nil
|
||||
}
|
||||
if err := res.Err; err != nil {
|
||||
if err := res.Err(); err != nil {
|
||||
return resource.Quantity{}, fmt.Errorf("error in query result: %v", err)
|
||||
}
|
||||
return resource.Quantity{}, fmt.Errorf("empty result returned")
|
||||
|
||||
@@ -0,0 +1,120 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/zalando-incubator/kube-metrics-adapter/pkg/nakadi"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/metrics/pkg/apis/external_metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
// NakadiMetricType defines the metric type for metrics based on Nakadi
|
||||
// subscriptions.
|
||||
NakadiMetricType = "nakadi"
|
||||
nakadiSubscriptionIDKey = "subscription-id"
|
||||
nakadiMetricTypeKey = "metric-type"
|
||||
nakadiMetricTypeConsumerLagSeconds = "consumer-lag-seconds"
|
||||
nakadiMetricTypeUnconsumedEvents = "unconsumed-events"
|
||||
)
|
||||
|
||||
// NakadiCollectorPlugin defines a plugin for creating collectors that can get
|
||||
// unconsumed events from Nakadi.
|
||||
type NakadiCollectorPlugin struct {
|
||||
nakadi nakadi.Nakadi
|
||||
}
|
||||
|
||||
// NewNakadiCollectorPlugin initializes a new NakadiCollectorPlugin.
|
||||
func NewNakadiCollectorPlugin(nakadi nakadi.Nakadi) (*NakadiCollectorPlugin, error) {
|
||||
return &NakadiCollectorPlugin{
|
||||
nakadi: nakadi,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewCollector initializes a new Nakadi collector from the specified HPA.
|
||||
func (c *NakadiCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||
return NewNakadiCollector(c.nakadi, hpa, config, interval)
|
||||
}
|
||||
|
||||
// NakadiCollector defines a collector that is able to collect metrics from
|
||||
// Nakadi.
|
||||
type NakadiCollector struct {
|
||||
nakadi nakadi.Nakadi
|
||||
interval time.Duration
|
||||
subscriptionID string
|
||||
nakadiMetricType string
|
||||
metric autoscalingv2.MetricIdentifier
|
||||
metricType autoscalingv2.MetricSourceType
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewNakadiCollector initializes a new NakadiCollector.
|
||||
func NewNakadiCollector(nakadi nakadi.Nakadi, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*NakadiCollector, error) {
|
||||
if config.Metric.Selector == nil {
|
||||
return nil, fmt.Errorf("selector for nakadi is not specified")
|
||||
}
|
||||
|
||||
subscriptionID, ok := config.Config[nakadiSubscriptionIDKey]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("subscription-id not specified on metric")
|
||||
}
|
||||
|
||||
metricType, ok := config.Config[nakadiMetricTypeKey]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("metric-type not specified on metric")
|
||||
}
|
||||
|
||||
if metricType != nakadiMetricTypeConsumerLagSeconds && metricType != nakadiMetricTypeUnconsumedEvents {
|
||||
return nil, fmt.Errorf("metric-type must be either '%s' or '%s', was '%s'", nakadiMetricTypeConsumerLagSeconds, nakadiMetricTypeUnconsumedEvents, metricType)
|
||||
}
|
||||
|
||||
return &NakadiCollector{
|
||||
nakadi: nakadi,
|
||||
interval: interval,
|
||||
subscriptionID: subscriptionID,
|
||||
nakadiMetricType: metricType,
|
||||
metric: config.Metric,
|
||||
metricType: config.Type,
|
||||
namespace: hpa.Namespace,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetMetrics returns a list of collected metrics for the Nakadi subscription ID.
|
||||
func (c *NakadiCollector) GetMetrics() ([]CollectedMetric, error) {
|
||||
var value int64
|
||||
var err error
|
||||
switch c.nakadiMetricType {
|
||||
case nakadiMetricTypeConsumerLagSeconds:
|
||||
value, err = c.nakadi.ConsumerLagSeconds(context.TODO(), c.subscriptionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case nakadiMetricTypeUnconsumedEvents:
|
||||
value, err = c.nakadi.UnconsumedEvents(context.TODO(), c.subscriptionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
metricValue := CollectedMetric{
|
||||
Namespace: c.namespace,
|
||||
Type: c.metricType,
|
||||
External: external_metrics.ExternalMetricValue{
|
||||
MetricName: c.metric.Name,
|
||||
MetricLabels: c.metric.Selector.MatchLabels,
|
||||
Timestamp: metav1.Now(),
|
||||
Value: *resource.NewQuantity(value, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
|
||||
return []CollectedMetric{metricValue}, nil
|
||||
}
|
||||
|
||||
// Interval returns the interval at which the collector should run.
|
||||
func (c *NakadiCollector) Interval() time.Duration {
|
||||
return c.interval
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
argoRolloutsClient "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned"
|
||||
log "github.com/sirupsen/logrus"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -18,17 +19,19 @@ import (
|
||||
)
|
||||
|
||||
type PodCollectorPlugin struct {
|
||||
client kubernetes.Interface
|
||||
client kubernetes.Interface
|
||||
argoRolloutsClient argoRolloutsClient.Interface
|
||||
}
|
||||
|
||||
func NewPodCollectorPlugin(client kubernetes.Interface) *PodCollectorPlugin {
|
||||
func NewPodCollectorPlugin(client kubernetes.Interface, argoRolloutsClient argoRolloutsClient.Interface) *PodCollectorPlugin {
|
||||
return &PodCollectorPlugin{
|
||||
client: client,
|
||||
client: client,
|
||||
argoRolloutsClient: argoRolloutsClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PodCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||
return NewPodCollector(p.client, hpa, config, interval)
|
||||
return NewPodCollector(p.client, p.argoRolloutsClient, hpa, config, interval)
|
||||
}
|
||||
|
||||
type PodCollector struct {
|
||||
@@ -43,9 +46,9 @@ type PodCollector struct {
|
||||
logger *log.Entry
|
||||
}
|
||||
|
||||
func NewPodCollector(client kubernetes.Interface, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*PodCollector, error) {
|
||||
func NewPodCollector(client kubernetes.Interface, argoRolloutsClient argoRolloutsClient.Interface, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*PodCollector, error) {
|
||||
// get pod selector based on HPA scale target ref
|
||||
selector, err := getPodLabelSelector(client, hpa)
|
||||
selector, err := getPodLabelSelector(client, argoRolloutsClient, hpa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get pod label selector: %v", err)
|
||||
}
|
||||
@@ -153,7 +156,7 @@ func (c *PodCollector) getPodMetric(pod corev1.Pod, ch chan CollectedMetric, err
|
||||
}
|
||||
}
|
||||
|
||||
func getPodLabelSelector(client kubernetes.Interface, hpa *autoscalingv2.HorizontalPodAutoscaler) (*metav1.LabelSelector, error) {
|
||||
func getPodLabelSelector(client kubernetes.Interface, argoRolloutsClient argoRolloutsClient.Interface, hpa *autoscalingv2.HorizontalPodAutoscaler) (*metav1.LabelSelector, error) {
|
||||
switch hpa.Spec.ScaleTargetRef.Kind {
|
||||
case "Deployment":
|
||||
deployment, err := client.AppsV1().Deployments(hpa.Namespace).Get(context.TODO(), hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
|
||||
@@ -167,6 +170,12 @@ func getPodLabelSelector(client kubernetes.Interface, hpa *autoscalingv2.Horizon
|
||||
return nil, err
|
||||
}
|
||||
return sts.Spec.Selector, nil
|
||||
case "Rollout":
|
||||
rollout, err := argoRolloutsClient.ArgoprojV1alpha1().Rollouts(hpa.Namespace).Get(context.TODO(), hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rollout.Spec.Selector, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unable to get pod label selector for scale target ref '%s'", hpa.Spec.ScaleTargetRef.Kind)
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
argorolloutsv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
argorolloutsfake "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||
@@ -25,6 +27,7 @@ const (
|
||||
applicationLabelName = "application"
|
||||
applicationLabelValue = "test-application"
|
||||
testDeploymentName = "test-application"
|
||||
testRolloutName = "test-application"
|
||||
testInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
@@ -42,7 +45,8 @@ func TestPodCollector(t *testing.T) {
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
plugin := NewPodCollectorPlugin(client)
|
||||
argoRolloutsClient := argorolloutsfake.NewSimpleClientset()
|
||||
plugin := NewPodCollectorPlugin(client, argoRolloutsClient)
|
||||
makeTestDeployment(t, client)
|
||||
host, port, metricsHandler := makeTestHTTPServer(t, tc.metrics)
|
||||
lastReadyTransitionTimeTimestamp := v1.NewTime(time.Now().Add(time.Duration(-30) * time.Second))
|
||||
@@ -80,7 +84,8 @@ func TestPodCollectorWithMinPodReadyAge(t *testing.T) {
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
plugin := NewPodCollectorPlugin(client)
|
||||
argoRolloutsClient := argorolloutsfake.NewSimpleClientset()
|
||||
plugin := NewPodCollectorPlugin(client, argoRolloutsClient)
|
||||
makeTestDeployment(t, client)
|
||||
host, port, metricsHandler := makeTestHTTPServer(t, tc.metrics)
|
||||
// Setting pods age to 30 seconds
|
||||
@@ -120,7 +125,8 @@ func TestPodCollectorWithPodCondition(t *testing.T) {
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
plugin := NewPodCollectorPlugin(client)
|
||||
argoRolloutsClient := argorolloutsfake.NewSimpleClientset()
|
||||
plugin := NewPodCollectorPlugin(client, argoRolloutsClient)
|
||||
makeTestDeployment(t, client)
|
||||
host, port, metricsHandler := makeTestHTTPServer(t, tc.metrics)
|
||||
lastScheduledTransitionTimeTimestamp := v1.NewTime(time.Now().Add(time.Duration(-30) * time.Second))
|
||||
@@ -159,7 +165,8 @@ func TestPodCollectorWithPodTerminatingCondition(t *testing.T) {
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
plugin := NewPodCollectorPlugin(client)
|
||||
argoRolloutsClient := argorolloutsfake.NewSimpleClientset()
|
||||
plugin := NewPodCollectorPlugin(client, argoRolloutsClient)
|
||||
makeTestDeployment(t, client)
|
||||
host, port, metricsHandler := makeTestHTTPServer(t, tc.metrics)
|
||||
lastScheduledTransitionTimeTimestamp := v1.NewTime(time.Now().Add(time.Duration(-30) * time.Second))
|
||||
@@ -184,6 +191,46 @@ func TestPodCollectorWithPodTerminatingCondition(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodCollectorWithRollout(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
metrics [][]int64
|
||||
result []int64
|
||||
}{
|
||||
{
|
||||
name: "simple-with-rollout",
|
||||
metrics: [][]int64{{1}, {3}, {8}, {5}, {2}},
|
||||
result: []int64{1, 3, 8, 5, 2},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
argoRolloutsClient := argorolloutsfake.NewSimpleClientset()
|
||||
plugin := NewPodCollectorPlugin(client, argoRolloutsClient)
|
||||
|
||||
makeTestRollout(t, argoRolloutsClient)
|
||||
host, port, metricsHandler := makeTestHTTPServer(t, tc.metrics)
|
||||
lastReadyTransitionTimeTimestamp := v1.NewTime(time.Now().Add(time.Duration(-30) * time.Second))
|
||||
minPodReadyAge := time.Duration(0 * time.Second)
|
||||
podCondition := corev1.PodCondition{Type: corev1.PodReady, Status: corev1.ConditionTrue, LastTransitionTime: lastReadyTransitionTimeTimestamp}
|
||||
podDeletionTimestamp := time.Time{}
|
||||
makeTestPods(t, host, port, "test-metric", client, 5, podCondition, podDeletionTimestamp)
|
||||
testHPA := makeTestHPAForRollout(t, client)
|
||||
testConfig := makeTestConfig(port, minPodReadyAge)
|
||||
collector, err := plugin.NewCollector(testHPA, testConfig, testInterval)
|
||||
require.NoError(t, err)
|
||||
metrics, err := collector.GetMetrics()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(metrics), int(metricsHandler.calledCounter))
|
||||
var values []int64
|
||||
for _, m := range metrics {
|
||||
values = append(values, m.Custom.Value.Value())
|
||||
}
|
||||
require.ElementsMatch(t, tc.result, values)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testMetricResponse struct {
|
||||
Values []int64 `json:"values"`
|
||||
}
|
||||
@@ -246,7 +293,7 @@ func makeTestPods(t *testing.T, testServer string, metricName string, port strin
|
||||
} else {
|
||||
testPod.ObjectMeta.DeletionTimestamp = &v1.Time{Time: podDeletionTimestamp}
|
||||
}
|
||||
_, err := client.CoreV1().Pods(testNamespace).Create(context.TODO(), testPod, v1.CreateOptions{})
|
||||
_, err := client.CoreV1().Pods(testNamespace).Create(context.Background(), testPod, v1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
@@ -260,12 +307,28 @@ func makeTestDeployment(t *testing.T, client kubernetes.Interface) *appsv1.Deplo
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := client.AppsV1().Deployments(testNamespace).Create(context.TODO(), &deployment, v1.CreateOptions{})
|
||||
_, err := client.AppsV1().Deployments(testNamespace).Create(context.Background(), &deployment, v1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
return &deployment
|
||||
|
||||
}
|
||||
|
||||
func makeTestRollout(t *testing.T, argoRolloutsClient *argorolloutsfake.Clientset) *argorolloutsv1alpha1.Rollout {
|
||||
rollout := &argorolloutsv1alpha1.Rollout{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: testRolloutName,
|
||||
},
|
||||
Spec: argorolloutsv1alpha1.RolloutSpec{
|
||||
Selector: &v1.LabelSelector{
|
||||
MatchLabels: map[string]string{applicationLabelName: applicationLabelValue},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := argoRolloutsClient.ArgoprojV1alpha1().Rollouts(testNamespace).Create(context.Background(), rollout, v1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
return rollout
|
||||
}
|
||||
|
||||
func makeTestHPA(t *testing.T, client kubernetes.Interface) *autoscalingv2.HorizontalPodAutoscaler {
|
||||
hpa := &autoscalingv2.HorizontalPodAutoscaler{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
@@ -280,7 +343,26 @@ func makeTestHPA(t *testing.T, client kubernetes.Interface) *autoscalingv2.Horiz
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := client.AutoscalingV2().HorizontalPodAutoscalers("test-namespace").Create(context.TODO(), hpa, v1.CreateOptions{})
|
||||
_, err := client.AutoscalingV2().HorizontalPodAutoscalers("test-namespace").Create(context.Background(), hpa, v1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
return hpa
|
||||
}
|
||||
|
||||
func makeTestHPAForRollout(t *testing.T, client kubernetes.Interface) *autoscalingv2.HorizontalPodAutoscaler {
|
||||
hpa := &autoscalingv2.HorizontalPodAutoscaler{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "test-hpa-rollout",
|
||||
Namespace: testNamespace,
|
||||
},
|
||||
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
|
||||
Kind: "Rollout",
|
||||
Name: testRolloutName,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := client.AutoscalingV2().HorizontalPodAutoscalers(testNamespace).Create(context.Background(), hpa, v1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
return hpa
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
rgfake "github.com/szuecs/routegroup-client/client/clientset/versioned/fake"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
netv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -61,10 +60,10 @@ func TestTargetRefReplicasStatefulSets(t *testing.T) {
|
||||
require.Equal(t, statefulSet.Status.Replicas, replicas)
|
||||
}
|
||||
|
||||
func newHPA(namesapce string, refName string, refKind string) *autoscalingv2.HorizontalPodAutoscaler {
|
||||
func newHPA(namespace string, refName string, refKind string) *autoscalingv2.HorizontalPodAutoscaler {
|
||||
return &autoscalingv2.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namesapce,
|
||||
Name: namespace,
|
||||
},
|
||||
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
|
||||
@@ -561,7 +560,7 @@ func makeIngress(client kubernetes.Interface, namespace, resourceName, backend s
|
||||
TLS: nil,
|
||||
},
|
||||
Status: netv1.IngressStatus{
|
||||
LoadBalancer: corev1.LoadBalancerStatus{
|
||||
LoadBalancer: netv1.IngressLoadBalancerStatus{
|
||||
Ingress: nil,
|
||||
},
|
||||
},
|
||||
@@ -658,38 +657,3 @@ func makeConfig(resourceName, namespace, kind, backend string, fakedAverage bool
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
type FakeCollectorPlugin struct {
|
||||
metrics []CollectedMetric
|
||||
config map[string]string
|
||||
}
|
||||
|
||||
type FakeCollector struct {
|
||||
metrics []CollectedMetric
|
||||
}
|
||||
|
||||
func (c *FakeCollector) GetMetrics() ([]CollectedMetric, error) {
|
||||
return c.metrics, nil
|
||||
}
|
||||
|
||||
func (FakeCollector) Interval() time.Duration {
|
||||
return time.Minute
|
||||
}
|
||||
|
||||
func (p *FakeCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
|
||||
if p.config != nil {
|
||||
return nil, fmt.Errorf("config already assigned once: %v", p.config)
|
||||
}
|
||||
p.config = config.Config
|
||||
return &FakeCollector{metrics: p.metrics}, nil
|
||||
}
|
||||
|
||||
func makePlugin(metric int) *FakeCollectorPlugin {
|
||||
return &FakeCollectorPlugin{
|
||||
metrics: []CollectedMetric{
|
||||
{
|
||||
Custom: custom_metrics.MetricValue{Value: *resource.NewQuantity(int64(metric), resource.DecimalSI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user