mirror of
https://github.com/zalando-incubator/kube-metrics-adapter.git
synced 2025-07-19 13:18:57 +00:00
Compare commits
53 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
c4a1e08fdf | ||
![]() |
61e0336247 | ||
![]() |
2492bdf5c1 | ||
![]() |
88b7d747d1 | ||
![]() |
0794873fcb | ||
![]() |
e2a922f110 | ||
![]() |
b631a4fe08 | ||
![]() |
90fea54307 | ||
![]() |
8b6a54627d | ||
![]() |
75738f1eb7 | ||
![]() |
d8934ebbc1 | ||
![]() |
17bef811f3 | ||
![]() |
7a041c991a | ||
![]() |
979ae68eab | ||
![]() |
e9a209aa31 | ||
![]() |
838edd9082 | ||
![]() |
c50d9ccd18 | ||
![]() |
c60253e90c | ||
![]() |
680e0feea1 | ||
![]() |
070f9ab299 | ||
![]() |
8ba4d19c35 | ||
![]() |
37bf73c7fe | ||
![]() |
d01a3f6347 | ||
![]() |
50633ba06c | ||
![]() |
bce27b748c | ||
![]() |
9e057c1075 | ||
![]() |
55ce2d8c3d | ||
![]() |
37969486fe | ||
![]() |
1be6357c8e | ||
![]() |
7b592c6832 | ||
![]() |
203aaeafb9 | ||
![]() |
5926f1aea1 | ||
![]() |
be1e1c635d | ||
![]() |
094a9f9b9d | ||
![]() |
47d34b328c | ||
![]() |
4b5b96084b | ||
![]() |
18f4c82f07 | ||
![]() |
10dc42b9e9 | ||
![]() |
20db592a62 | ||
![]() |
d0a3ea1934 | ||
![]() |
837e7b9c5d | ||
![]() |
9d8359b580 | ||
![]() |
71a8e99d1f | ||
![]() |
4d4c70c553 | ||
![]() |
2ccd6903d9 | ||
![]() |
f58db31f98 | ||
![]() |
0bf8f5dd0f | ||
![]() |
e600557636 | ||
![]() |
0f06db7cdf | ||
![]() |
1c9038b2cc | ||
![]() |
fd4ead837e | ||
![]() |
f46f801811 | ||
![]() |
4acdf72ef7 |
20
.github/workflows/ci.yaml
vendored
20
.github/workflows/ci.yaml
vendored
@@ -1,7 +1,11 @@
|
||||
name: ci
|
||||
on: [push, pull_request]
|
||||
env:
|
||||
GO111MODULE: on
|
||||
on:
|
||||
push:
|
||||
branches-ignore:
|
||||
- 'gh-pages'
|
||||
pull_request:
|
||||
branches-ignore:
|
||||
- 'gh-pages'
|
||||
jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -9,14 +13,10 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.16'
|
||||
go-version: '^1.19'
|
||||
- run: go version
|
||||
- run: go get github.com/mattn/goveralls
|
||||
env:
|
||||
GO111MODULE: off
|
||||
- run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin ${GOLANGCI_RELEASE}
|
||||
env:
|
||||
GOLANGCI_RELEASE: v1.37.0
|
||||
- run: go install github.com/mattn/goveralls@latest
|
||||
- run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
- run: make build.docker
|
||||
- run: make test
|
||||
- run: make check
|
||||
|
72
.github/workflows/codeql-analysis.yml
vendored
Normal file
72
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ "master" ]
|
||||
schedule:
|
||||
- cron: '29 17 * * 1'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'go' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
||||
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
|
||||
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
|
||||
# If the Autobuild fails above, remove it and uncomment the following three lines.
|
||||
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
|
||||
|
||||
# - run: |
|
||||
# echo "Run, Build Application using script"
|
||||
# ./location_of_script_within_repo/buildscript.sh
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
27
.github/workflows/release-chart.yml
vendored
Normal file
27
.github/workflows/release-chart.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: Release Charts
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.4.0
|
||||
with:
|
||||
charts_dir: docs
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
@@ -1,19 +1,15 @@
|
||||
run:
|
||||
linters-settings:
|
||||
golint:
|
||||
min-confidence: 0.9
|
||||
concurrency: 4
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- staticcheck
|
||||
- ineffassign
|
||||
- golint
|
||||
- goimports
|
||||
- errcheck
|
||||
issues:
|
||||
exclude-rules:
|
||||
# Exclude some staticcheck messages
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: "SA9003:"
|
||||
- deadcode
|
||||
- errcheck
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unused
|
||||
- varcheck
|
||||
|
@@ -1,9 +1,11 @@
|
||||
FROM registry.opensource.zalan.do/library/alpine-3.12:latest
|
||||
ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3.13:latest
|
||||
FROM ${BASE_IMAGE}
|
||||
LABEL maintainer="Team Teapot @ Zalando SE <team-teapot@zalando.de>"
|
||||
|
||||
RUN apk add --no-cache tzdata
|
||||
|
||||
# add binary
|
||||
ADD build/linux/kube-metrics-adapter /
|
||||
ARG TARGETARCH
|
||||
|
||||
ADD build/linux/${TARGETARCH}/kube-metrics-adapter /
|
||||
|
||||
ENTRYPOINT ["/kube-metrics-adapter"]
|
||||
|
13
Makefile
13
Makefile
@@ -50,7 +50,9 @@ $(OPENAPI): go.mod
|
||||
|
||||
build.local: build/$(BINARY) $(GENERATED_CRDS)
|
||||
build.linux: build/linux/$(BINARY)
|
||||
build.osx: build/osx/$(BINARY)
|
||||
build.linux.amd64: build/linux/amd64/$(BINARY)
|
||||
build.linux.arm64: build/linux/arm64/$(BINARY)
|
||||
|
||||
|
||||
build/$(BINARY): go.mod $(SOURCES) $(GENERATED)
|
||||
CGO_ENABLED=0 go build -o build/$(BINARY) $(BUILD_FLAGS) -ldflags "$(LDFLAGS)" .
|
||||
@@ -58,11 +60,14 @@ build/$(BINARY): go.mod $(SOURCES) $(GENERATED)
|
||||
build/linux/$(BINARY): go.mod $(SOURCES) $(GENERATED)
|
||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build $(BUILD_FLAGS) -o build/linux/$(BINARY) -ldflags "$(LDFLAGS)" .
|
||||
|
||||
build/osx/$(BINARY): go.mod $(SOURCES) $(GENERATED)
|
||||
GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 go build $(BUILD_FLAGS) -o build/osx/$(BINARY) -ldflags "$(LDFLAGS)" .
|
||||
build/linux/amd64/$(BINARY): go.mod $(SOURCES)
|
||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build $(BUILD_FLAGS) -o build/linux/amd64/$(BINARY) -ldflags "$(LDFLAGS)" .
|
||||
|
||||
build/linux/arm64/$(BINARY): go.mod $(SOURCES)
|
||||
GOOS=linux GOARCH=arm64 CGO_ENABLED=0 go build $(BUILD_FLAGS) -o build/linux/arm64/$(BINARY) -ldflags "$(LDFLAGS)" .
|
||||
|
||||
build.docker: build.linux
|
||||
docker build --rm -t "$(IMAGE):$(TAG)" -f $(DOCKERFILE) .
|
||||
docker build --rm -t "$(IMAGE):$(TAG)" -f $(DOCKERFILE) --build-arg TARGETARCH= .
|
||||
|
||||
build.push: build.docker
|
||||
docker push "$(IMAGE):$(TAG)"
|
||||
|
84
README.md
84
README.md
@@ -17,7 +17,7 @@ Here's an example of a `HorizontalPodAutoscaler` resource configured to get
|
||||
`requests-per-second` metrics from each pod of the deployment `myapp`.
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: myapp-hpa
|
||||
@@ -54,10 +54,8 @@ policy](https://kubernetes.io/docs/setup/release/version-skew-policy/) offered
|
||||
for Kubernetes, this project aims to support the latest three minor releases of
|
||||
Kubernetes.
|
||||
|
||||
The default supported API is `autoscaling/v2beta2` (available since `v1.12`).
|
||||
This API MUST be available in the cluster which is the default. However for
|
||||
GKE, this requires GKE v1.15.7 according to this [GKE
|
||||
Issue](https://issuetracker.google.com/issues/135624588).
|
||||
The default supported API is `autoscaling/v2` (available since `v1.23`).
|
||||
This API MUST be available in the cluster which is the default.
|
||||
|
||||
## Building
|
||||
|
||||
@@ -99,7 +97,7 @@ This is an example of using the pod collector to collect metrics from a json
|
||||
metrics endpoint of each pod matched by the HPA.
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: myapp-hpa
|
||||
@@ -219,7 +217,7 @@ with the result of the query.
|
||||
This allows having multiple prometheus queries associated with a single HPA.
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: myapp-hpa
|
||||
@@ -320,7 +318,7 @@ box so users don't have to define those manually.
|
||||
|
||||
| Metric | Description | Type | Kind | K8s Versions |
|
||||
| ----------- | -------------- | ------ | ---- | ---- |
|
||||
| `requests-per-second` | Scale based on requests per second for a certain ingress or routegroup. | Object | `Ingress`, `RouteGroup` | `>=1.14` |
|
||||
| `requests-per-second` | Scale based on requests per second for a certain ingress or routegroup. | Object | `Ingress`, `RouteGroup` | `>=1.19` |
|
||||
|
||||
### Example
|
||||
|
||||
@@ -331,7 +329,7 @@ This is an example of an HPA that will scale based on `requests-per-second` for
|
||||
an ingress called `myapp`.
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: myapp-hpa
|
||||
@@ -365,7 +363,7 @@ This is an example of an HPA that will scale based on `requests-per-second` for
|
||||
a routegroup called `myapp`.
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: myapp-hpa
|
||||
@@ -426,7 +424,7 @@ the query name which will be associated with the result of the query. This
|
||||
allows having multiple flux queries associated with a single HPA.
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: myapp-hpa
|
||||
@@ -514,7 +512,7 @@ This is an example of an HPA that will scale based on the length of an SQS
|
||||
queue.
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: myapp-hpa
|
||||
@@ -566,7 +564,7 @@ This is an example of an HPA that will scale based on the specified value
|
||||
exposed by a ZMON check with id `1234`.
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: myapp-hpa
|
||||
@@ -663,7 +661,7 @@ This is an example of using the HTTP collector to collect metrics from a json
|
||||
metrics endpoint specified in the annotations.
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: myapp-hpa
|
||||
@@ -720,12 +718,63 @@ The `ScalingSchedule` and `ClusterScalingSchedule` collectors allow
|
||||
collecting time-based metrics from the respective CRD objects specified
|
||||
in the HPA.
|
||||
|
||||
These collectors are disabled by default, you have to start the server
|
||||
with the `--scaling-schedule` flag to enable it. Remember to deploy the CRDs
|
||||
`ScalingSchedule` and `ClusterScalingSchedule` and allow the service
|
||||
account used by the server to read, watch and list them.
|
||||
|
||||
### Supported metrics
|
||||
|
||||
| Metric | Description | Type | K8s Versions |
|
||||
| ---------- | -------------- | ------- | -- |
|
||||
| ObjectName | The metric is calculated and stored for each `ScalingSchedule` and `ClusterScalingSchedule` referenced in the HPAs | `ScalingSchedule` and `ClusterScalingSchedule` | `>=1.16` |
|
||||
|
||||
### Ramp-up and ramp-down feature
|
||||
|
||||
To avoid abrupt scaling due to time based metrics,the `SchalingSchedule`
|
||||
collector has a feature of ramp-up and ramp-down the metric over a
|
||||
specific period of time. The duration of the scaling window can be
|
||||
configured individually in the `[Cluster]ScalingSchedule` object, via
|
||||
the option `scalingWindowDurationMinutes` or globally for all scheduled
|
||||
events, and defaults to a globally configured value if not specified.
|
||||
The default for the latter is set to 10 minutes, but can be changed
|
||||
using the `--scaling-schedule-default-scaling-window` flag.
|
||||
|
||||
This spreads the scale events around, creating less load on the other
|
||||
components, and helping the rest of the metrics (like the CPU ones) to
|
||||
adjust as well.
|
||||
|
||||
The [HPA algorithm][algo-details] does not make changes if the metric
|
||||
change is less than the specified by the
|
||||
`horizontal-pod-autoscaler-tolerance` flag:
|
||||
|
||||
> We'll skip scaling if the ratio is sufficiently close to 1.0 (within a
|
||||
> globally-configurable tolerance, from the
|
||||
> `--horizontal-pod-autoscaler-tolerance` flag, which defaults to 0.1.
|
||||
|
||||
With that in mind, the ramp-up and ramp-down feature divides the scaling
|
||||
over the specified period of time in buckets, trying to achieve changes
|
||||
bigger than the configured tolerance. The number of buckets defaults to
|
||||
10 and can be configured by the `--scaling-schedule-ramp-steps` flag.
|
||||
|
||||
**Important**: note that the ramp-up and ramp-down feature can lead to
|
||||
deployments achieving less than the specified number of pods, due to the
|
||||
HPA 10% change rule and the ceiling function applied to the desired
|
||||
number of the pods (check the [algorithm details][algo-details]). It
|
||||
varies with the configured metric for `ScalingSchedule` events, the
|
||||
number of pods and the configured `horizontal-pod-autoscaler-tolerance`
|
||||
flag of your kubernetes installation. [This gist][gist] contains the code to
|
||||
simulate the situations a deployment with different number of pods, with
|
||||
a metric of 10000 can face with 10 buckets (max of 90% of the metric
|
||||
returned) and 5 buckets (max of 80% of the metric returned). The ramp-up
|
||||
and ramp-down feature can be disabled by setting
|
||||
`--scaling-schedule-default-scaling-window` to 0 and abrupt scalings can
|
||||
be handled via [scaling policies][policies].
|
||||
|
||||
[algo-details]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#algorithm-details
|
||||
[gist]: https://gist.github.com/jonathanbeber/37f1f918ab7ef6101c6ce56cc2cef3a2
|
||||
[policies]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#scaling-policies
|
||||
|
||||
### Example
|
||||
|
||||
This is an example of using the ScalingSchedule collectors to collect
|
||||
@@ -779,7 +828,7 @@ An HPA can reference the deployed `ClusterScalingSchedule` object as
|
||||
this example:
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: "myapp-hpa"
|
||||
@@ -826,8 +875,3 @@ Note that these number of pods are just considering these custom
|
||||
metrics, the normal HPA behavior still applies, such as: in case of
|
||||
multiple metrics the biggest number of pods is the utilized one, HPA max
|
||||
and min replica configuration, autoscaling policies, etc.
|
||||
|
||||
These collectors are disabled by default, you have to start the server
|
||||
with the `--scaling-schedule` flag to enable it. Remember to deploy the CRDs
|
||||
`ScalingSchedule` and `ClusterScalingSchedule` and allow the service
|
||||
account used by the server to read, watch and list them.
|
||||
|
@@ -1,7 +1,9 @@
|
||||
version: "2017-09-20"
|
||||
pipeline:
|
||||
- id: build
|
||||
overlay: ci/golang
|
||||
vm_config:
|
||||
type: linux
|
||||
image: "cdp-runtime/go"
|
||||
cache:
|
||||
paths:
|
||||
- /go/pkg/mod # pkg cache for Go modules
|
||||
@@ -28,3 +30,17 @@ pipeline:
|
||||
IMAGE=$IMAGE VERSION=$VERSION make build.docker
|
||||
git diff --stat --exit-code
|
||||
IMAGE=$IMAGE VERSION=$VERSION make build.push
|
||||
- desc: Build and push image to Zalando's registry
|
||||
cmd: |
|
||||
if [[ $CDP_TARGET_BRANCH == master && ! $CDP_PULL_REQUEST_NUMBER ]]; then
|
||||
IMAGE=container-registry-test.zalando.net/teapot/kube-metrics-adapter
|
||||
VERSION=$(git describe --tags --always)
|
||||
else
|
||||
IMAGE=container-registry-test.zalando.net/teapot/kube-metrics-adapter-test
|
||||
VERSION=$CDP_BUILD_VERSION
|
||||
fi
|
||||
make build.linux.amd64 build.linux.arm64
|
||||
|
||||
docker buildx create --config /etc/cdp-buildkitd.toml --driver-opt network=host --bootstrap --use
|
||||
docker buildx build --rm --build-arg BASE_IMAGE=container-registry.zalando.net/library/alpine-3:latest -t "${IMAGE}:${VERSION}" --platform linux/amd64,linux/arm64 --push .
|
||||
cdp-promote-image "${IMAGE}:${VERSION}"
|
||||
|
@@ -1,10 +1,9 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.5.0
|
||||
controller-gen.kubebuilder.io/version: v0.8.0
|
||||
creationTimestamp: null
|
||||
name: clusterscalingschedules.zalando.org
|
||||
spec:
|
||||
@@ -16,7 +15,12 @@ spec:
|
||||
singular: clusterscalingschedule
|
||||
scope: Cluster
|
||||
versions:
|
||||
- name: v1
|
||||
- additionalPrinterColumns:
|
||||
- description: Whether one or more schedules are currently active.
|
||||
jsonPath: .status.active
|
||||
name: Active
|
||||
type: boolean
|
||||
name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: ClusterScalingSchedule describes a cluster scoped time based
|
||||
@@ -56,9 +60,14 @@ spec:
|
||||
format: date-time
|
||||
type: string
|
||||
durationMinutes:
|
||||
description: The duration in minutes that the configured value
|
||||
will be returned for the defined schedule.
|
||||
description: The duration in minutes (default 0) that the configured
|
||||
value will be returned for the defined schedule.
|
||||
type: integer
|
||||
endDate:
|
||||
description: Defines the ending date of a OneTime schedule.
|
||||
It must be a RFC3339 formated date.
|
||||
format: date-time
|
||||
type: string
|
||||
period:
|
||||
description: Defines the details of a Repeating schedule.
|
||||
properties:
|
||||
@@ -77,6 +86,10 @@ spec:
|
||||
- Sat
|
||||
type: string
|
||||
type: array
|
||||
endTime:
|
||||
description: The endTime has the format HH:MM
|
||||
pattern: (([0-1][0-9])|([2][0-3])):([0-5][0-9])
|
||||
type: string
|
||||
startTime:
|
||||
description: The startTime has the format HH:MM
|
||||
pattern: (([0-1][0-9])|([2][0-3])):([0-5][0-9])
|
||||
@@ -104,7 +117,6 @@ spec:
|
||||
format: int64
|
||||
type: integer
|
||||
required:
|
||||
- durationMinutes
|
||||
- type
|
||||
- value
|
||||
type: object
|
||||
@@ -112,11 +124,22 @@ spec:
|
||||
required:
|
||||
- schedules
|
||||
type: object
|
||||
status:
|
||||
description: ScalingScheduleStatus is the status section of the ScalingSchedule.
|
||||
properties:
|
||||
active:
|
||||
default: false
|
||||
description: Active is true if at least one of the schedules defined
|
||||
in the scaling schedule is currently active.
|
||||
type: boolean
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
|
@@ -1,6 +1,6 @@
|
||||
apiVersion: v2
|
||||
name: kube-metrics-adapter
|
||||
version: 0.1.11
|
||||
version: 0.1.18
|
||||
description: kube-metrics-adapter helm chart
|
||||
home: https://github.com/zalando-incubator/kube-metrics-adapter
|
||||
maintainers:
|
||||
|
@@ -16,8 +16,23 @@ spec:
|
||||
labels:
|
||||
application: kube-metrics-adapter
|
||||
version: {{ .Values.registry.imageTag }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations: {{- toYaml .Values.podAnnotations | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
serviceAccountName: kube-metrics-adapter
|
||||
{{- if .Values.nodeSelector }}
|
||||
nodeSelector: {{ toYaml .Values.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.tolerations }}
|
||||
tolerations: {{ toYaml .Values.tolerations | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.affinity }}
|
||||
affinity: {{ toYaml .Values.affinity | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: kube-metrics-adapter
|
||||
image: {{ .Values.registry.image}}:{{ .Values.registry.imageTag }}
|
||||
|
@@ -60,7 +60,7 @@ rules:
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
|
@@ -95,3 +95,14 @@ resources:
|
||||
|
||||
scalingSchedule:
|
||||
enabled: false
|
||||
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
priorityClassName: ""
|
||||
|
||||
podAnnotations: {}
|
||||
|
@@ -67,7 +67,6 @@ rules:
|
||||
# only relevant if running with the flag:
|
||||
# --skipper-ingress-metrics
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
@@ -98,6 +97,13 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- zalando.org
|
||||
resources:
|
||||
- clusterscalingschedules/status
|
||||
- scalingschedules/status
|
||||
verbs:
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
|
@@ -1,22 +1,28 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.5.0
|
||||
controller-gen.kubebuilder.io/version: v0.8.0
|
||||
creationTimestamp: null
|
||||
name: scalingschedules.zalando.org
|
||||
spec:
|
||||
group: zalando.org
|
||||
names:
|
||||
categories:
|
||||
- all
|
||||
kind: ScalingSchedule
|
||||
listKind: ScalingScheduleList
|
||||
plural: scalingschedules
|
||||
singular: scalingschedule
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1
|
||||
- additionalPrinterColumns:
|
||||
- description: Whether one or more schedules are currently active.
|
||||
jsonPath: .status.active
|
||||
name: Active
|
||||
type: boolean
|
||||
name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: ScalingSchedule describes a namespaced time based metric to be
|
||||
@@ -56,9 +62,14 @@ spec:
|
||||
format: date-time
|
||||
type: string
|
||||
durationMinutes:
|
||||
description: The duration in minutes that the configured value
|
||||
will be returned for the defined schedule.
|
||||
description: The duration in minutes (default 0) that the configured
|
||||
value will be returned for the defined schedule.
|
||||
type: integer
|
||||
endDate:
|
||||
description: Defines the ending date of a OneTime schedule.
|
||||
It must be a RFC3339 formated date.
|
||||
format: date-time
|
||||
type: string
|
||||
period:
|
||||
description: Defines the details of a Repeating schedule.
|
||||
properties:
|
||||
@@ -77,6 +88,10 @@ spec:
|
||||
- Sat
|
||||
type: string
|
||||
type: array
|
||||
endTime:
|
||||
description: The endTime has the format HH:MM
|
||||
pattern: (([0-1][0-9])|([2][0-3])):([0-5][0-9])
|
||||
type: string
|
||||
startTime:
|
||||
description: The startTime has the format HH:MM
|
||||
pattern: (([0-1][0-9])|([2][0-3])):([0-5][0-9])
|
||||
@@ -104,7 +119,6 @@ spec:
|
||||
format: int64
|
||||
type: integer
|
||||
required:
|
||||
- durationMinutes
|
||||
- type
|
||||
- value
|
||||
type: object
|
||||
@@ -112,11 +126,22 @@ spec:
|
||||
required:
|
||||
- schedules
|
||||
type: object
|
||||
status:
|
||||
description: ScalingScheduleStatus is the status section of the ScalingSchedule.
|
||||
properties:
|
||||
active:
|
||||
default: false
|
||||
description: Active is true if at least one of the schedules defined
|
||||
in the scaling schedule is currently active.
|
||||
type: boolean
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
|
@@ -1,5 +1,5 @@
|
||||
FROM registry.opensource.zalan.do/stups/alpine:latest
|
||||
MAINTAINER Team Teapot @ Zalando SE <team-teapot@zalando.de>
|
||||
FROM registry.opensource.zalan.do/library/alpine-3.13:latest
|
||||
LABEL maintainer="Team Teapot @ Zalando SE <team-teapot@zalando.de>"
|
||||
|
||||
# add binary
|
||||
ADD build/linux/custom-metrics-consumer /
|
||||
|
@@ -39,7 +39,7 @@ spec:
|
||||
- type: Object
|
||||
object:
|
||||
describedObject:
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
name: custom-metrics-consumer
|
||||
metric:
|
||||
|
@@ -1,4 +1,4 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: custom-metrics-consumer
|
||||
|
@@ -11,7 +11,9 @@ import (
|
||||
func metricsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
_, err := w.Write([]byte(fmt.Sprintf(`{"queue": {"length": %d}}`, size)))
|
||||
log.Fatalf("failed to write: %v", err)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to write: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
|
147
go.mod
147
go.mod
@@ -1,35 +1,126 @@
|
||||
module github.com/zalando-incubator/kube-metrics-adapter
|
||||
|
||||
require (
|
||||
github.com/NYTimes/gziphandler v1.0.1 // indirect
|
||||
github.com/aws/aws-sdk-go v1.40.22
|
||||
github.com/go-openapi/spec v0.20.3
|
||||
github.com/aws/aws-sdk-go v1.44.254
|
||||
github.com/influxdata/influxdb-client-go v0.2.0
|
||||
github.com/influxdata/line-protocol v0.0.0-20201012155213-5f565037cbc9 // indirect
|
||||
github.com/kubernetes-sigs/custom-metrics-apiserver v0.0.0-20201216091021-1b9fa998bbaa
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/common v0.26.0
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spyzhov/ajson v0.4.2
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/szuecs/routegroup-client v0.18.3
|
||||
github.com/zalando-incubator/cluster-lifecycle-manager v0.0.0-20180921141935-824b77fb1f84
|
||||
go.uber.org/zap v1.13.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/api v0.20.9
|
||||
k8s.io/apimachinery v0.20.9
|
||||
k8s.io/apiserver v0.20.9
|
||||
k8s.io/client-go v0.20.9
|
||||
k8s.io/code-generator v0.20.9
|
||||
k8s.io/component-base v0.20.9
|
||||
github.com/prometheus/client_golang v1.15.0
|
||||
github.com/prometheus/common v0.42.0
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spyzhov/ajson v0.7.2
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/szuecs/routegroup-client v0.21.1
|
||||
github.com/zalando-incubator/cluster-lifecycle-manager v0.0.0-20230223125308-aff25efae501
|
||||
golang.org/x/net v0.9.0
|
||||
golang.org/x/oauth2 v0.7.0
|
||||
golang.org/x/sync v0.1.0
|
||||
k8s.io/api v0.23.0
|
||||
k8s.io/apimachinery v0.23.0
|
||||
k8s.io/apiserver v0.23.0
|
||||
k8s.io/client-go v0.23.0
|
||||
k8s.io/code-generator v0.23.0
|
||||
k8s.io/component-base v0.23.0
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd
|
||||
k8s.io/metrics v0.20.9
|
||||
sigs.k8s.io/controller-tools v0.5.0
|
||||
k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf
|
||||
k8s.io/metrics v0.22.17
|
||||
sigs.k8s.io/controller-tools v0.8.0
|
||||
sigs.k8s.io/custom-metrics-apiserver v1.22.0
|
||||
)
|
||||
|
||||
go 1.16
|
||||
require (
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful v2.16.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/gobuffalo/flect v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.6 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.6 // indirect
|
||||
go.etcd.io/etcd/client/v2 v2.305.6 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.6 // indirect
|
||||
go.opentelemetry.io/contrib v0.20.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel v1.11.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.11.1 // indirect
|
||||
go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.11.1 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/term v0.7.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
|
||||
google.golang.org/grpc v1.52.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.23.0 // indirect
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect
|
||||
k8s.io/klog/v2 v2.90.0 // indirect
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
go.opentelemetry.io/otel => go.opentelemetry.io/otel v0.20.0
|
||||
go.opentelemetry.io/otel/sdk => go.opentelemetry.io/otel/sdk v0.20.0
|
||||
go.opentelemetry.io/otel/trace => go.opentelemetry.io/otel/trace v0.20.0
|
||||
)
|
||||
|
||||
go 1.20
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||
)
|
||||
|
||||
func TestParser(t *testing.T) {
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -13,11 +13,16 @@ import (
|
||||
// ScalingSchedule describes a namespaced time based metric to be used
|
||||
// in autoscaling operations.
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +kubebuilder:resource:categories=all
|
||||
// +kubebuilder:printcolumn:name="Active",type=boolean,JSONPath=`.status.active`,description="Whether one or more schedules are currently active."
|
||||
// +kubebuilder:subresource:status
|
||||
type ScalingSchedule struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ScalingScheduleSpec `json:"spec"`
|
||||
// +optional
|
||||
Status ScalingScheduleStatus `json:"status"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
@@ -28,12 +33,17 @@ type ScalingSchedule struct {
|
||||
// ClusterScalingSchedule describes a cluster scoped time based metric
|
||||
// to be used in autoscaling operations.
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +kubebuilder:resource:categories=all
|
||||
// +kubebuilder:printcolumn:name="Active",type=boolean,JSONPath=`.status.active`,description="Whether one or more schedules are currently active."
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:resource:scope=Cluster
|
||||
type ClusterScalingSchedule struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ScalingScheduleSpec `json:"spec"`
|
||||
// +optional
|
||||
Status ScalingScheduleStatus `json:"status"`
|
||||
}
|
||||
|
||||
// ScalingScheduleSpec is the spec part of the ScalingSchedule.
|
||||
@@ -61,8 +71,13 @@ type Schedule struct {
|
||||
// be a RFC3339 formated date.
|
||||
// +optional
|
||||
Date *ScheduleDate `json:"date,omitempty"`
|
||||
// The duration in minutes that the configured value will be
|
||||
// Defines the ending date of a OneTime schedule. It must be
|
||||
// a RFC3339 formated date.
|
||||
// +optional
|
||||
EndDate *ScheduleDate `json:"endDate,omitempty"`
|
||||
// The duration in minutes (default 0) that the configured value will be
|
||||
// returned for the defined schedule.
|
||||
// +optional
|
||||
DurationMinutes int `json:"durationMinutes"`
|
||||
// The metric value that will be returned for the defined schedule.
|
||||
Value int64 `json:"value"`
|
||||
@@ -90,6 +105,10 @@ type SchedulePeriod struct {
|
||||
// The startTime has the format HH:MM
|
||||
// +kubebuilder:validation:Pattern="(([0-1][0-9])|([2][0-3])):([0-5][0-9])"
|
||||
StartTime string `json:"startTime"`
|
||||
// The endTime has the format HH:MM
|
||||
// +kubebuilder:validation:Pattern="(([0-1][0-9])|([2][0-3])):([0-5][0-9])"
|
||||
// +optional
|
||||
EndTime string `json:"endTime"`
|
||||
// The days that this schedule will be active.
|
||||
Days []ScheduleDay `json:"days"`
|
||||
// The location name corresponding to a file in the IANA
|
||||
@@ -116,6 +135,16 @@ const (
|
||||
// +kubebuilder:validation:Format="date-time"
|
||||
type ScheduleDate string
|
||||
|
||||
// ScalingScheduleStatus is the status section of the ScalingSchedule.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type ScalingScheduleStatus struct {
|
||||
// Active is true if at least one of the schedules defined in the
|
||||
// scaling schedule is currently active.
|
||||
// +kubebuilder:default:=false
|
||||
// +optional
|
||||
Active bool `json:"active"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ScalingScheduleList is a list of namespaced scaling schedules.
|
||||
|
@@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
@@ -30,6 +31,7 @@ func (in *ClusterScalingSchedule) DeepCopyInto(out *ClusterScalingSchedule) {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
return
|
||||
}
|
||||
|
||||
@@ -90,6 +92,7 @@ func (in *ScalingSchedule) DeepCopyInto(out *ScalingSchedule) {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
return
|
||||
}
|
||||
|
||||
@@ -172,6 +175,22 @@ func (in *ScalingScheduleSpec) DeepCopy() *ScalingScheduleSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScalingScheduleStatus) DeepCopyInto(out *ScalingScheduleStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingScheduleStatus.
|
||||
func (in *ScalingScheduleStatus) DeepCopy() *ScalingScheduleStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScalingScheduleStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Schedule) DeepCopyInto(out *Schedule) {
|
||||
*out = *in
|
||||
@@ -185,6 +204,11 @@ func (in *Schedule) DeepCopyInto(out *Schedule) {
|
||||
*out = new(ScheduleDate)
|
||||
**out = **in
|
||||
}
|
||||
if in.EndDate != nil {
|
||||
in, out := &in.EndDate, &out.EndDate
|
||||
*out = new(ScheduleDate)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@@ -20,6 +20,7 @@ package versioned
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
zalandov1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/clientset/versioned/typed/zalando.org/v1"
|
||||
discovery "k8s.io/client-go/discovery"
|
||||
@@ -55,22 +56,41 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface {
|
||||
// NewForConfig creates a new Clientset for the given config.
|
||||
// If config's RateLimiter is not set and QPS and Burst are acceptable,
|
||||
// NewForConfig will generate a rate-limiter in configShallowCopy.
|
||||
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
|
||||
// where httpClient was generated with rest.HTTPClientFor(c).
|
||||
func NewForConfig(c *rest.Config) (*Clientset, error) {
|
||||
configShallowCopy := *c
|
||||
|
||||
// share the transport between all clients
|
||||
httpClient, err := rest.HTTPClientFor(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewForConfigAndClient(&configShallowCopy, httpClient)
|
||||
}
|
||||
|
||||
// NewForConfigAndClient creates a new Clientset for the given config and http client.
|
||||
// Note the http client provided takes precedence over the configured transport values.
|
||||
// If config's RateLimiter is not set and QPS and Burst are acceptable,
|
||||
// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
|
||||
func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
|
||||
configShallowCopy := *c
|
||||
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
|
||||
if configShallowCopy.Burst <= 0 {
|
||||
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
|
||||
}
|
||||
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
|
||||
}
|
||||
|
||||
var cs Clientset
|
||||
var err error
|
||||
cs.zalandoV1, err = zalandov1.NewForConfig(&configShallowCopy)
|
||||
cs.zalandoV1, err = zalandov1.NewForConfigAndClient(&configShallowCopy, httpClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
|
||||
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -80,11 +100,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
|
||||
// NewForConfigOrDie creates a new Clientset for the given config and
|
||||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *Clientset {
|
||||
var cs Clientset
|
||||
cs.zalandoV1 = zalandov1.NewForConfigOrDie(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
|
||||
return &cs
|
||||
cs, err := NewForConfig(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return cs
|
||||
}
|
||||
|
||||
// New creates a new Clientset for the given RESTClient.
|
||||
|
@@ -74,7 +74,10 @@ func (c *Clientset) Tracker() testing.ObjectTracker {
|
||||
return c.tracker
|
||||
}
|
||||
|
||||
var _ clientset.Interface = &Clientset{}
|
||||
var (
|
||||
_ clientset.Interface = &Clientset{}
|
||||
_ testing.FakeClient = &Clientset{}
|
||||
)
|
||||
|
||||
// ZalandoV1 retrieves the ZalandoV1Client
|
||||
func (c *Clientset) ZalandoV1() zalandov1.ZalandoV1Interface {
|
||||
|
@@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
// of clientsets, like in:
|
||||
//
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
|
@@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
// of clientsets, like in:
|
||||
//
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user