From 3db0cc3135063f81314aec8fad914dfc64e60a84 Mon Sep 17 00:00:00 2001 From: Per Ploug Date: Mon, 8 Oct 2018 13:17:05 +0200 Subject: [PATCH] Adding boilerplate files Signed-off-by: Per Ploug --- .github/CODEOWNERS | 35 +++ .github/ISSUE_TEMPLATE.md | 17 ++ .github/PULL_REQUEST_TEMPLATE.md | 31 +++ .gitignore | 2 + .travis.yml | 11 + CODE_OF_CONDUCT.md | 74 ++++++ CONTRIBUTING.md | 86 +++++++ CONTRIBUTORS.md | 13 ++ Dockerfile | 7 + LICENSE | 2 +- MAINTAINERS | 3 + Makefile | 42 ++++ README.md | 296 +++++++++++++++++++++++- SECURITY.md | 7 + docs/custom-metrics-apiservice.yaml | 13 ++ docs/deployment.yaml | 40 ++++ docs/external-metrics-apiservice.yaml | 13 ++ docs/rbac.yaml | 122 ++++++++++ docs/service.yaml | 11 + example/Dockerfile | 7 + example/Makefile | 42 ++++ example/deploy/deployment.yaml | 29 +++ example/deploy/hpa.yaml | 48 ++++ example/deploy/ingress.yaml | 14 ++ example/deploy/service.yaml | 14 ++ example/main.go | 33 +++ go.mod | 94 ++++++++ go.sum | 186 +++++++++++++++ main.go | 42 ++++ pkg/collector/aws_collector.go | 128 +++++++++++ pkg/collector/collector.go | 311 +++++++++++++++++++++++++ pkg/collector/json_path_collector.go | 133 +++++++++++ pkg/collector/max_collector.go | 42 ++++ pkg/collector/object_collector.go | 20 ++ pkg/collector/pod_collector.go | 141 ++++++++++++ pkg/collector/prometheus_collector.go | 131 +++++++++++ pkg/collector/skipper_collector.go | 165 +++++++++++++ pkg/provider/hpa.go | 320 ++++++++++++++++++++++++++ pkg/provider/metric_store.go | 311 +++++++++++++++++++++++++ pkg/server/start.go | 213 +++++++++++++++++ 40 files changed, 3247 insertions(+), 2 deletions(-) create mode 100644 .github/CODEOWNERS create mode 100644 .github/ISSUE_TEMPLATE.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .gitignore create mode 100644 .travis.yml create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 CONTRIBUTORS.md create mode 100644 Dockerfile create mode 100644 MAINTAINERS create mode 100644 Makefile create mode 100644 SECURITY.md create mode 100644 docs/custom-metrics-apiservice.yaml create mode 100644 docs/deployment.yaml create mode 100644 docs/external-metrics-apiservice.yaml create mode 100644 docs/rbac.yaml create mode 100644 docs/service.yaml create mode 100644 example/Dockerfile create mode 100644 example/Makefile create mode 100644 example/deploy/deployment.yaml create mode 100644 example/deploy/hpa.yaml create mode 100644 example/deploy/ingress.yaml create mode 100644 example/deploy/service.yaml create mode 100644 example/main.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 main.go create mode 100644 pkg/collector/aws_collector.go create mode 100644 pkg/collector/collector.go create mode 100644 pkg/collector/json_path_collector.go create mode 100644 pkg/collector/max_collector.go create mode 100644 pkg/collector/object_collector.go create mode 100644 pkg/collector/pod_collector.go create mode 100644 pkg/collector/prometheus_collector.go create mode 100644 pkg/collector/skipper_collector.go create mode 100644 pkg/provider/hpa.go create mode 100644 pkg/provider/metric_store.go create mode 100644 pkg/server/start.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..7b44cbf --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,35 @@ +# These owners will be the default owners for everything in +# the repo. +* @arjunrn + + + +# Samples for assigning codeowners below: +# Order is important; the last matching pattern takes the most +# precedence. When someone opens a pull request that only +# modifies JS files, only @js-owner and not the global +# owner(s) will be requested for a review. +# *.js @js-owner + +# You can also use email addresses if you prefer. They'll be +# used to look up users just like we do for commit author +# emails. +# *.go docs@example.com + +# In this example, @doctocat owns any files in the build/logs +# directory at the root of the repository and any of its +# subdirectories. +# /build/logs/ @doctocat + +# The `docs/*` pattern will match files like +# `docs/getting-started.md` but not further nested files like +# `docs/build-app/troubleshooting.md`. +# docs/* docs@example.com + +# In this example, @octocat owns any file in an apps directory +# anywhere in your repository. +# apps/ @octocat + +# In this example, @doctocat owns any file in the `/docs` +# directory in the root of your repository. +# /docs/ @doctocat diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..f8b8ed9 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,17 @@ +## Expected Behavior + + +## Actual Behavior + + +## Steps to Reproduce the Problem + + 1. + 1. + 1. + +## Specifications + + - Version: + - Platform: + - Subsystem: diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..4e9b62d --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,31 @@ +# One-line summary + +> Issue : #1234 (only if appropriate) + +## Description +A few sentences describing the overall goals of the pull request's +commits. + +## Types of Changes +_What types of changes does your code introduce? Keep the ones that apply:_ + +- New feature (non-breaking change which adds functionality) +- Bug fix (non-breaking change which fixes an issue) +- Configuration change +- Refactor/improvements +- Documentation / non-code + +## Tasks +_List of tasks you will do to complete the PR_ + - [ ] Created Task 1 + - [ ] Created Task 2 + - [ ] To-do Task 3 + +## Review +_List of tasks the reviewer must do to review the PR_ +- [ ] Tests +- [ ] Documentation +- [ ] CHANGELOG + +## Deployment Notes +These should highlight any db migrations, feature toggles, etc. diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f4f7988 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +build/ +apiserver.local.config/ diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..feeb47f --- /dev/null +++ b/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: + - "1.11" + +env: +- GO111MODULE=on + +script: + - make test + - make build.docker diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..a4b151c --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team see [MAINTAINERS.md](MAINTAINERS.md). All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..306dd59 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,86 @@ +# Contributing + +**Thank you for your interest in making the project better and more useful. +Your contributions are highly welcome.** + +There are multiple ways of getting involved: + +- [Report a bug](#report-a-bug) +- [Suggest a feature](#suggest-a-feature) +- [Contribute code](#contribute-code) + +Below are a few guidelines we would like you to follow. +If you need help, please reach out to us by opening an issue. + +## Report a bug + +Reporting bugs is one of the best ways to contribute. Before creating a bug +report, please check that an [issue](/issues) reporting the same problem does +not already exist. If there is such an issue, you may add your information as a +comment. + +To report a new bug you should open an issue that summarizes the bug and set +the label to "bug". + +If you want to provide a fix along with your bug report: That is great! In this +case please send us a pull request as described in section [Contribute +Code](#contribute-code). + +## Suggest a feature + +To request a new feature you should open an [issue](../../issues/new) and +summarize the desired functionality and its use case. Set the issue label to +"feature". + +## Contribute code + +This is an outline of what the workflow for code contributions looks like + +- Check the list of open [issues](../../issues). Either assign an existing + issue to yourself, or create a new one that you would like work on and + discuss your ideas and use cases. + +It is always best to discuss your plans beforehand, to ensure that your +contribution is in line with our goals. + +- Fork the repository on GitHub +- Create a topic branch from where you want to base your work. This is usually master. +- Open a new pull request, label it `work in progress` and outline what you will be contributing +- Make commits of logical units. +- Make sure you sign-off on your commits `git commit -s -m "adding X to change Y"` +- Write good commit messages (see below). +- Push your changes to a topic branch in your fork of the repository. +- As you push your changes, update the pull request with new infomation and tasks as you complete them +- Project maintainers might comment on your work as you progress +- When you are done, remove the `work in progess` label and ping the maintainers for a review +- Your pull request must receive a :thumbsup: from two [maintainers](MAINTAINERS) + +Thanks for your contributions! + +### Commit messages + +Your commit messages ideally can answer two questions: what changed and why. +The subject line should feature the “what” and the body of the commit should +describe the “why”. + +When creating a pull request, its description should reference the corresponding issue id. + +### Sign your work / Developer certificate of origin +All contributions (including pull requests) must agree to the Developer +Certificate of Origin (DCO) version 1.1. This is exactly the same one created +and used by the Linux kernel developers and posted on +http://developercertificate.org/. This is a developer's certification that he +or she has the right to submit the patch for inclusion into the project. Simply +submitting a contribution implies this agreement, however, please include a +"Signed-off-by" tag in every patch (this tag is a conventional way to confirm +that you agree to the DCO) - you can automate this with a [Git +hook](https://stackoverflow.com/questions/15015894/git-add-signed-off-by-line-using-format-signoff-not-working) + +``` +git commit -s -m "adding X to change Y" +``` + + + + +**Have fun, and happy hacking!** diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md new file mode 100644 index 0000000..7b456d2 --- /dev/null +++ b/CONTRIBUTORS.md @@ -0,0 +1,13 @@ +# Project Contributors + +All external contributors to the project, we are grateful for all their help + +## Contributors sorted alphabetically + +- **[Name of contributor](https://github.com/user/name)** + - Contribution + - Contribution + +- **[Name of contributor](https://github.com/user/name)** + - Contribution + - Contribution diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..c9824a6 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,7 @@ +FROM registry.opensource.zalan.do/stups/alpine:latest +MAINTAINER Team Teapot @ Zalando SE + +# add binary +ADD build/linux/kube-metrics-adapter / + +ENTRYPOINT ["/kube-metrics-adapter"] diff --git a/LICENSE b/LICENSE index 45ad829..de05715 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2018 The Zalando Incubator +Copyright (c) 2018 Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/MAINTAINERS b/MAINTAINERS new file mode 100644 index 0000000..d5fd96d --- /dev/null +++ b/MAINTAINERS @@ -0,0 +1,3 @@ +Mikkel Larsen +Arjun Naik +Team Teapot diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..ba315db --- /dev/null +++ b/Makefile @@ -0,0 +1,42 @@ +.PHONY: clean test check build.local build.linux build.osx build.docker build.push + +BINARY ?= kube-metrics-adapter +VERSION ?= $(shell git describe --tags --always --dirty) +IMAGE ?= mikkeloscar/$(BINARY) +TAG ?= $(VERSION) +SOURCES = $(shell find . -name '*.go') +DOCKERFILE ?= Dockerfile +GOPKGS = $(shell go list ./...) +BUILD_FLAGS ?= -v +LDFLAGS ?= -X main.version=$(VERSION) -w -s + +default: build.local + +clean: + rm -rf build + +test: + go test -v $(GOPKGS) + +check: + golint $(GOPKGS) + go vet -v $(GOPKGS) + +build.local: build/$(BINARY) +build.linux: build/linux/$(BINARY) +build.osx: build/osx/$(BINARY) + +build/$(BINARY): go.mod $(SOURCES) + CGO_ENABLED=0 go build -o build/$(BINARY) $(BUILD_FLAGS) -ldflags "$(LDFLAGS)" . + +build/linux/$(BINARY): go.mod $(SOURCES) + GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build $(BUILD_FLAGS) -o build/linux/$(BINARY) -ldflags "$(LDFLAGS)" . + +build/osx/$(BINARY): go.mod $(SOURCES) + GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 go build $(BUILD_FLAGS) -o build/osx/$(BINARY) -ldflags "$(LDFLAGS)" . + +build.docker: build.linux + docker build --rm -t "$(IMAGE):$(TAG)" -f $(DOCKERFILE) . + +build.push: build.docker + docker push "$(IMAGE):$(TAG)" diff --git a/README.md b/README.md index 0e31729..c66b293 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,296 @@ # kube-metrics-adapter -General purpose metrics adapter for Kubernetes HPA metrics +[![Build Status](https://travis-ci.com/mikkeloscar/kube-metrics-adapter.svg?branch=master)](https://travis-ci.com/mikkeloscar/kube-metrics-adapter) + +Kube Metrics Adapter is a general purpose metrics adapter for Kubernetes that +can collect and serve custom and external metrics for Horizontal Pod +Autoscaling. + +It discovers Horizontal Pod Autoscaling resources and starts to collect the +requested metrics and stores them in memory. It's implemented using the +[custom-metrics-apiserver](https://github.com/kubernetes-incubator/custom-metrics-apiserver) +library. + +Here's an example of a `HorizontalPodAutoscaler` resource configured to get +`requests-per-second` metrics from each pod of the deployment `myapp`. + +```yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: myapp-hpa + annotations: + # metric-config.../ + metric-config.pods.requests-per-second.json-path/json-key: "$.http_server.rps" + metric-config.pods.requests-per-second.json-path/path: /metrics + metric-config.pods.requests-per-second.json-path/port: "9090" +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: myapp + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Pods + pods: + metricName: requests-per-second + targetAverageValue: 1k +``` + +The `metric-config.*` annotations are used by the `kube-metrics-adapter` to +configure a collector for getting the metrics. In the above example it +configures a *json-path pod collector*. + +## Building + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) as +introduced in Go 1.11 therefore you need Go >=1.11 installed in order to build. +If using Go 1.11 you also need to [activate Module +support](https://github.com/golang/go/wiki/Modules#installing-and-activating-module-support). + +Assuming Go has been setup with module support it can be built simply by running: + +```sh +export GO111MODULE=on # needed if the project is checked out in your $GOPATH. +$ make +``` + +## Collectors + +Collectors are different implementations for getting metrics requested by an +HPA resource. They are configured based on HPA resources and started on-demand by the +`kube-metrics-adapter` to only collect the metrics required for scaling the application. + +The collectors are configured either simply based on the metrics defined in an +HPA resource, or via additional annotations on the HPA resource. + +## Pod collector + +The pod collector allows collecting metrics from each pod matched by the HPA. +Currently only `json-path` collection is supported. + +### Supported metrics + +| Metric | Description | Type | +| ------------ | -------------- | ------- | +| *custom* | No predefined metrics. Metrics are generated from user defined queries. | Pods | + +### Example + +This is an example of using the pod collector to collect metrics from a json +metrics endpoint of each pod matched by the HPA. + +```yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: myapp-hpa + annotations: + # metric-config.../ + metric-config.pods.requests-per-second.json-path/json-key: "$.http_server.rps" + metric-config.pods.requests-per-second.json-path/path: /metrics + metric-config.pods.requests-per-second.json-path/port: "9090" +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: myapp + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Pods + pods: + metricName: requests-per-second + targetAverageValue: 1k +``` + +The pod collector is configured through the annotations which specify the +collector name `json-path` and a set of configuration options for the +collector. `json-key` defines the json-path query for extracting the right +metric. This assumes the pod is exposing metrics in JSON format. For the above +example the following JSON data would be expected: + +```json +{ + "http_server": { + "rps": 0.5 + } +} +``` + +The json-path query support depends on the +[github.com/oliveagle/jsonpath](https://github.com/oliveagle/jsonpath) library. +See the README for possible queries. It's expected that the metric you query +returns something that can be turned into a `float64`. + +The other configuration options `path` and `port` specifies where the metrics +endpoint is exposed on the pod. There's no default values, so they must be +defined. + +## Prometheus collector + +The Prometheus collector is a generic collector which can map Prometheus +queries to metrics that can be used for scaling. This approach is different +from how it's done in the +[k8s-prometheus-adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) +where all available Prometheus metrics are collected +and transformed into metrics which the HPA can scale on, and there is no +possibility to do custom queries. +With the approach implemented here, users can define custom queries and only metrics +returned from those queries will be available, reducing the total number of +metrics stored. + +One downside of this approach is that bad performing queries can slow down/kill +Prometheus, so it can be dangerous to allow in a multi tenant cluster. It's +also not possible to restrict the available metrics using something like RBAC +since any user would be able to create the metrics based on a custom query. + +I still believe custom queries are more useful, but it's good to be aware of +the trade-offs between the two approaches. + +### Supported metrics + +| Metric | Description | Type | Kind | +| ------------ | -------------- | ------- | -- | +| *custom* | No predefined metrics. Metrics are generated from user defined queries. | Object | *any* | + +### Example + +This is an example of an HPA configured to get metrics based on a Prometheus +query. The query is defined in the annotation +`metric-config.object.processed-events-per-second.prometheus/query` where +`processed-events-per-second` is the metric name which will be associated with +the result of the query. + +It also specifies an annotation +`metric-config.object.processed-events-per-second.prometheus/per-replica` which +instructs the collector to treat the results as an average over all pods +targeted by the HPA. This makes it possible to mimic the behavior of +`targetAverageValue` which is not implemented for metric type `Object` as of +Kubernetes v1.10. ([It will most likely come in v1.12](https://github.com/kubernetes/kubernetes/pull/64097#event-1696222479)). + +```yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: myapp-hpa + annotations: + # metric-config.../ + metric-config.object.processed-events-per-second.prometheus/query: | + scalar(sum(rate(event-service_events_count{application="event-service",processed="true"}[1m]))) + metric-config.object.processed-events-per-second.prometheus/per-replica: "true" +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: custom-metrics-consumer + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Object + object: + metricName: processed-events-per-second + target: + apiVersion: v1 + kind: Service + name: event-service + targetValue: 10 # this will be treated as targetAverageValue +``` + +## Skipper collector + +The skipper collector is a simple wrapper around the Prometheus collector to +make it easy to define an HPA for scaling based on ingress metrics when +[skipper](https://github.com/zalando/skipper) is used as the ingress +implementation in your cluster. It assumes you are collecting Prometheus +metrics from skipper and it provides the correct Prometheus queries out of the +box so users don't have to define those manually. + +### Supported metrics + +| Metric | Description | Type | Kind | +| ----------- | -------------- | ------ | ---- | +| `requests-per-second` | Scale based on requests per second for a certain ingress. | Object | `Ingress` | + +### Example + +This is an example of an HPA that will scale based on `requests-per-second` for +an ingress called `myapp`. + +```yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: myapp-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: myapp + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Object + object: + metricName: requests-per-second + target: + apiVersion: extensions/v1beta1 + kind: Ingress + name: myapp + targetValue: 10 # this will be treated as targetAverageValue +``` + +**Note:** As of Kubernetes v1.10 the HPA does not support `targetAverageValue` for +metrics of type `Object`. In case of requests per second it does not make sense +to scale on a summed value because you can not make the total requests per +second go down by adding more pods. For this reason the skipper collector will +automatically treat the value you define in `targetValue` as an average per pod +instead of a total sum. + +## AWS collector + +The AWS collector allows scaling based on external metrics exposed by AWS +services e.g. SQS queue lengths. + +### Supported metrics + +| Metric | Description | Type | +| ------------ | ------- | -- | +| `sqs-queue-length` | Scale based on SQS queue length | External | + +### Example + +This is an example of an HPA that will scale based on the length of an SQS +queue. + +```yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: myapp-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: custom-metrics-consumer + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: External + external: + metricName: sqs-queue-length + metricSelector: + matchLabels: + queue-name: foobar + region: eu-central-1 + targetAverageValue: 30 +``` + +The `matchLabels` are used by `kube-metrics-adapter` to configure a collector +that will get the queue length for an SQS queue named `foobar` in region +`eu-central-1`. + +The AWS account of the queue currently depends on how `kube-metrics-adapter` is +configured to get AWS credentials. The normal assumption is that you run the +adapter in a cluster running in the AWS account where the queue is defined. +Please open an issue if you would like support for other use cases. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..16b236b --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,7 @@ +We acknowledge that every line of code that we write may potentially contain security issues. + +We are trying to deal with it responsibly and provide patches as quickly as possible. If you have anything to report to us please use the following channels: + +Email: Tech-Security@zalando.de +OR +Submit your vulnerability report through our bug bounty program at: https://hackerone.com/zalando diff --git a/docs/custom-metrics-apiservice.yaml b/docs/custom-metrics-apiservice.yaml new file mode 100644 index 0000000..5df1876 --- /dev/null +++ b/docs/custom-metrics-apiservice.yaml @@ -0,0 +1,13 @@ +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1beta1.custom.metrics.k8s.io +spec: + service: + name: kube-metrics-adapter + namespace: kube-system + group: custom.metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: true + groupPriorityMinimum: 100 + versionPriority: 100 diff --git a/docs/deployment.yaml b/docs/deployment.yaml new file mode 100644 index 0000000..c68d8ea --- /dev/null +++ b/docs/deployment.yaml @@ -0,0 +1,40 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-metrics-adapter + namespace: kube-system + labels: + application: kube-metrics-adapter + version: latest +spec: + replicas: 1 + selector: + matchLabels: + application: kube-metrics-adapter + template: + metadata: + labels: + application: kube-metrics-adapter + version: latest + annotations: + iam.amazonaws.com/role: "kube-aws-test-1-app-zmon" + spec: + serviceAccountName: custom-metrics-apiserver + containers: + - name: kube-metrics-adapter + image: mikkeloscar/kube-metrics-adapter:latest + args: + # - --v=9 + - --prometheus-server=http://prometheus.kube-system.svc.cluster.local + - --skipper-ingress-metrics + - --aws-external-metrics + env: + - name: AWS_REGION + value: eu-central-1 + resources: + limits: + cpu: 100m + memory: 100Mi + requests: + cpu: 100m + memory: 100Mi diff --git a/docs/external-metrics-apiservice.yaml b/docs/external-metrics-apiservice.yaml new file mode 100644 index 0000000..e43784f --- /dev/null +++ b/docs/external-metrics-apiservice.yaml @@ -0,0 +1,13 @@ +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1beta1.external.metrics.k8s.io +spec: + service: + name: kube-metrics-adapter + namespace: kube-system + group: external.metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: true + groupPriorityMinimum: 100 + versionPriority: 100 diff --git a/docs/rbac.yaml b/docs/rbac.yaml new file mode 100644 index 0000000..72169a4 --- /dev/null +++ b/docs/rbac.yaml @@ -0,0 +1,122 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: custom-metrics-apiserver + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: custom-metrics-server-resources +rules: +- apiGroups: + - custom.metrics.k8s.io + resources: ["*"] + verbs: ["*"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: custom-metrics-resource-reader +rules: +- apiGroups: + - "" + resources: + - namespaces + - pods + - services + verbs: + - get + - list + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: custom-metrics-resource-collector +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - list +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - get +- apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: hpa-controller-custom-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: custom-metrics-server-resources +subjects: +- kind: ServiceAccount + name: horizontal-pod-autoscaler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: custom-metrics-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: custom-metrics-apiserver + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: custom-metrics:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: custom-metrics-apiserver + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: custom-metrics-resource-collector +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: custom-metrics-resource-collector +subjects: +- kind: ServiceAccount + name: custom-metrics-apiserver + namespace: kube-system diff --git a/docs/service.yaml b/docs/service.yaml new file mode 100644 index 0000000..b1a24f1 --- /dev/null +++ b/docs/service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: kube-metrics-adapter + namespace: kube-system +spec: + ports: + - port: 443 + targetPort: 443 + selector: + application: kube-metrics-adapter diff --git a/example/Dockerfile b/example/Dockerfile new file mode 100644 index 0000000..f72b2e7 --- /dev/null +++ b/example/Dockerfile @@ -0,0 +1,7 @@ +FROM registry.opensource.zalan.do/stups/alpine:latest +MAINTAINER Team Teapot @ Zalando SE + +# add binary +ADD build/linux/custom-metrics-consumer / + +ENTRYPOINT ["/custom-metrics-consumer"] diff --git a/example/Makefile b/example/Makefile new file mode 100644 index 0000000..45171eb --- /dev/null +++ b/example/Makefile @@ -0,0 +1,42 @@ +.PHONY: clean test check build.local build.linux build.osx build.docker build.push + +BINARY ?= custom-metrics-consumer +VERSION ?= $(shell git describe --tags --always --dirty) +IMAGE ?= mikkeloscar/$(BINARY) +TAG ?= $(VERSION) +SOURCES = $(shell find . -name '*.go') +DOCKERFILE ?= Dockerfile +GOPKGS = $(shell go list ./...) +BUILD_FLAGS ?= -v +LDFLAGS ?= -X main.version=$(VERSION) -w -s + +default: build.local + +clean: + rm -rf build + +test: + go test -v $(GOPKGS) + +check: + golint $(GOPKGS) + go vet -v $(GOPKGS) + +build.local: build/$(BINARY) +build.linux: build/linux/$(BINARY) +build.osx: build/osx/$(BINARY) + +build/$(BINARY): $(SOURCES) + CGO_ENABLED=0 go build -o build/$(BINARY) $(BUILD_FLAGS) -ldflags "$(LDFLAGS)" . + +build/linux/$(BINARY): $(SOURCES) + GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build $(BUILD_FLAGS) -o build/linux/$(BINARY) -ldflags "$(LDFLAGS)" . + +build/osx/$(BINARY): $(SOURCES) + GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 go build $(BUILD_FLAGS) -o build/osx/$(BINARY) -ldflags "$(LDFLAGS)" . + +build.docker: build.linux + docker build --rm -t "$(IMAGE):$(TAG)" -f $(DOCKERFILE) . + +build.push: build.docker + docker push "$(IMAGE):$(TAG)" diff --git a/example/deploy/deployment.yaml b/example/deploy/deployment.yaml new file mode 100644 index 0000000..b129422 --- /dev/null +++ b/example/deploy/deployment.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: custom-metrics-consumer + labels: + application: custom-metrics-consumer + version: latest +spec: + selector: + matchLabels: + application: custom-metrics-consumer + template: + metadata: + labels: + application: custom-metrics-consumer + version: latest + spec: + containers: + - name: custom-metrics-consumer + image: mikkeloscar/custom-metrics-consumer:latest + args: + - --fake-queue-length=2000 + resources: + limits: + cpu: 10m + memory: 25Mi + requests: + cpu: 10m + memory: 25Mi diff --git a/example/deploy/hpa.yaml b/example/deploy/hpa.yaml new file mode 100644 index 0000000..fc7401f --- /dev/null +++ b/example/deploy/hpa.yaml @@ -0,0 +1,48 @@ +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: custom-metrics-consumer + namespace: default + labels: + application: custom-metrics-consumer + annotations: + # metric-config.../ + metric-config.pods.queue-length.json-path/json-key: "$.queue.length" + metric-config.pods.queue-length.json-path/path: /metrics + metric-config.pods.queue-length.json-path/port: "9090" + # metric-config.object.requests-per-second.prometheus/query: | + # scalar(sum(rate(skipper_serve_host_duration_seconds_count{host="custom-metrics_example_org"}[1m]))) + # metric-config.object.requests-per-second.prometheus/per-replica: "true" + # metric-config.object.requests-per-second.skipper/interval: "1s" +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: custom-metrics-consumer + minReplicas: 1 + maxReplicas: 10 + metrics: + # - type: Resource + # resource: + # name: cpu + # targetAverageUtilization: 50 + - type: Pods + pods: + metricName: queue-length + targetAverageValue: 1k + - type: Object + object: + metricName: requests-per-second + target: + apiVersion: extensions/v1beta1 + kind: Ingress + name: custom-metrics-consumer + targetValue: 10 # this will be treated as targetAverageValue + - type: External + external: + metricName: sqs-queue-length + metricSelector: + matchLabels: + queue-name: foobar + region: eu-central-1 + targetAverageValue: 30 diff --git a/example/deploy/ingress.yaml b/example/deploy/ingress.yaml new file mode 100644 index 0000000..8a95f81 --- /dev/null +++ b/example/deploy/ingress.yaml @@ -0,0 +1,14 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: custom-metrics-consumer + labels: + application: custom-metrics-consumer +spec: + rules: + - host: custom-metrics.example.org + http: + paths: + - backend: + serviceName: custom-metrics-consumer + servicePort: 80 diff --git a/example/deploy/service.yaml b/example/deploy/service.yaml new file mode 100644 index 0000000..d641113 --- /dev/null +++ b/example/deploy/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: custom-metrics-consumer + labels: + application: custom-metrics-consumer +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 9090 + selector: + application: custom-metrics-consumer + type: ClusterIP diff --git a/example/main.go b/example/main.go new file mode 100644 index 0000000..3a51cbd --- /dev/null +++ b/example/main.go @@ -0,0 +1,33 @@ +package main + +import ( + "flag" + "fmt" + "net/http" + "time" +) + +func metricsHandler(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Write([]byte(fmt.Sprintf(`{"queue": {"length": %d}}`, size))) +} + +var ( + size int +) + +func main() { + flag.IntVar(&size, "fake-queue-length", 10, "Fake queue length for fake metrics.") + flag.Parse() + + mux := http.NewServeMux() + mux.HandleFunc("/metrics", metricsHandler) + + server := &http.Server{ + Addr: ":9090", + Handler: mux, + ReadTimeout: 5 * time.Second, + } + + server.ListenAndServe() +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..f2a1c77 --- /dev/null +++ b/go.mod @@ -0,0 +1,94 @@ +module github.com/mikkeloscar/kube-metrics-adapter + +require ( + bitbucket.org/ww/goautoneg v0.0.0-20120707110453-75cd24fc2f2c // indirect + github.com/BurntSushi/toml v0.3.0 // indirect + github.com/NYTimes/gziphandler v1.0.1 // indirect + github.com/PuerkitoBio/purell v1.1.0 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/aws/aws-sdk-go v1.15.21 + github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect + github.com/boltdb/bolt v1.3.1 // indirect + github.com/coreos/bbolt v1.3.0 // indirect + github.com/coreos/etcd v3.3.9+incompatible // indirect + github.com/coreos/go-semver v0.2.0 // indirect + github.com/coreos/go-systemd v0.0.0-20180705093442-88bfeed483d3 // indirect + github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/elazarl/go-bindata-assetfs v1.0.0 // indirect + github.com/emicklei/go-restful v2.8.0+incompatible // indirect + github.com/emicklei/go-restful-swagger12 v0.0.0-20170926063155-7524189396c6 // indirect + github.com/evanphx/json-patch v3.0.0+incompatible // indirect + github.com/fsnotify/fsnotify v1.4.7 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-openapi/jsonpointer v0.0.0-20180322222829-3a0015ad55fa // indirect + github.com/go-openapi/jsonreference v0.0.0-20180322222742-3fb327e6747d // indirect + github.com/go-openapi/spec v0.0.0-20180801175345-384415f06ee2 // indirect + github.com/go-openapi/swag v0.0.0-20180715190254-becd2f08beaf // indirect + github.com/gogo/protobuf v1.1.1 // indirect + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b + github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 // indirect + github.com/golang/protobuf v1.2.0 // indirect + github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c // indirect + github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect + github.com/googleapis/gnostic v0.2.0 // indirect + github.com/gopherjs/gopherjs v0.0.0-20180820052304-89baedc74dd7 // indirect + github.com/gorilla/websocket v1.3.0 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.4.1 // indirect + github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 // indirect + github.com/hpcloud/tail v1.0.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jonboulle/clockwork v0.1.0 // indirect + github.com/json-iterator/go v1.1.5 // indirect + github.com/jtolds/gls v4.2.1+incompatible // indirect + github.com/kubernetes-incubator/custom-metrics-apiserver v0.0.0-20180824182428-26e5299457d3 + github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 + github.com/onsi/ginkgo v1.6.0 // indirect + github.com/onsi/gomega v1.4.1 // indirect + github.com/pborman/uuid v0.0.0-20180122190007-c65b2f87fee3 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v0.9.0-pre1.0.20180824101016-4eb539fa85a2 + github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect + github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e + github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 // indirect + github.com/sirupsen/logrus v1.0.6 // indirect + github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf // indirect + github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a // indirect + github.com/soheilhy/cmux v0.1.4 // indirect + github.com/spf13/cobra v0.0.3 + github.com/spf13/pflag v1.0.2 // indirect + github.com/stretchr/testify v1.2.2 // indirect + github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6 // indirect + github.com/ugorji/go v1.1.1 // indirect + github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 // indirect + golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac // indirect + golang.org/x/net v0.0.0-20180824152047-4bcd98cce591 // indirect + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect + golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87 // indirect + golang.org/x/text v0.3.0 // indirect + golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 // indirect + google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 // indirect + google.golang.org/grpc v1.14.0 // indirect + gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect + gopkg.in/fsnotify.v1 v1.4.7 // indirect + gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/yaml.v2 v2.2.1 // indirect + k8s.io/api v0.0.0-20180628040859-072894a440bd + k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d + k8s.io/apiserver v0.0.0-20180628044425-01459b68eb5f + k8s.io/client-go v8.0.0+incompatible + k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c // indirect + k8s.io/metrics v0.0.0-20180718014405-6efa0bfaa5c1 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..8cb1e5e --- /dev/null +++ b/go.sum @@ -0,0 +1,186 @@ +bitbucket.org/ww/goautoneg v0.0.0-20120707110453-75cd24fc2f2c h1:t+Ra932MCC0eeyD/vigXqMbZTzgZjd4JOfBJWC6VSMI= +bitbucket.org/ww/goautoneg v0.0.0-20120707110453-75cd24fc2f2c/go.mod h1:1vhO7Mn/FZMgOgDVGLy5X1mE6rq1HbkBdkF/yj8zkcg= +github.com/BurntSushi/toml v0.3.0 h1:e1/Ivsx3Z0FVTV0NSOv/aVgbUWyQuzj7DDnFblkRvsY= +github.com/BurntSushi/toml v0.3.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v1.0.1 h1:iLrQrdwjDd52kHDA5op2UBJFjmOb9g+7scBan4RN8F0= +github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/aws/aws-sdk-go v1.15.21 h1:STLvc6RrpycslC1NRtTvt/YSgDkIGCTrB9K9vE5R2oQ= +github.com/aws/aws-sdk-go v1.15.21/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/coreos/bbolt v1.3.0 h1:HIgH5xUWXT914HCI671AxuTTqjj64UOFr7pHn48LUTI= +github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.9+incompatible h1:/pWnp1yEff0z+vBEOBFLZZ22Ux5xoVozEe7X0VFyRNo= +github.com/coreos/etcd v3.3.9+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180705093442-88bfeed483d3 h1:h/wTyTK7VVFaSLpGFKLPkEYiWuloHpStKd30EZIaL9I= +github.com/coreos/go-systemd v0.0.0-20180705093442-88bfeed483d3/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea h1:n2Ltr3SrfQlf/9nOna1DoGKxLx3qTSI8Ttl6Xrqp6mw= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk= +github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/emicklei/go-restful v2.8.0+incompatible h1:wN8GCRDPGHguIynsnBartv5GUgGUg1LAU7+xnSn1j7Q= +github.com/emicklei/go-restful v2.8.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful-swagger12 v0.0.0-20170926063155-7524189396c6 h1:V94anc0ZG3Pa/cAMwP2m1aQW3+/FF8Qmw/GsFyTJAp4= +github.com/emicklei/go-restful-swagger12 v0.0.0-20170926063155-7524189396c6/go.mod h1:qr0VowGBT4CS4Q8vFF8BSeKz34PuqKGxs/L0IAQA9DQ= +github.com/evanphx/json-patch v3.0.0+incompatible h1:l91aby7TzBXBdmF8heZqjskeH9f3g7ZOL8/sSe+vTlU= +github.com/evanphx/json-patch v3.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-openapi/jsonpointer v0.0.0-20180322222829-3a0015ad55fa h1:hr8WVDjg4JKtQptZpzyb196TmruCs7PIsdJz8KAOZp8= +github.com/go-openapi/jsonpointer v0.0.0-20180322222829-3a0015ad55fa/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20180322222742-3fb327e6747d h1:k3UQ7Z8yFYq0BNkYykKIheY0HlZBl1Hku+pO9HE9FNU= +github.com/go-openapi/jsonreference v0.0.0-20180322222742-3fb327e6747d/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20180801175345-384415f06ee2 h1:D9BfbclVKVcfluusYmw6Z2/O74y41shNFpN5jbbcbSs= +github.com/go-openapi/spec v0.0.0-20180801175345-384415f06ee2/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20180715190254-becd2f08beaf h1:7lg/DRYbE2t6UvEipbKopk3GzYhRnv9/+qpUMYNfiAw= +github.com/go-openapi/swag v0.0.0-20180715190254-becd2f08beaf/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gopherjs/gopherjs v0.0.0-20180820052304-89baedc74dd7 h1:WF7x3tAe0mEb4wf/yhSThHwZYQIjVmEGSbAH9hzOeZQ= +github.com/gopherjs/gopherjs v0.0.0-20180820052304-89baedc74dd7/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.3.0 h1:r/LXc0VJIMd0rCMsc6DxgczaQtoCwCLatnfXmSYcXx8= +github.com/gorilla/websocket v1.3.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.4.1 h1:pX7cnDwSSmG0dR9yNjCQSSpmsJOqFdT7SzVp5Yl9uVw= +github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw= +github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kubernetes-incubator/custom-metrics-apiserver v0.0.0-20180824182428-26e5299457d3 h1:X22IRs6vbuj0xu3ZuMYMI2Qe0IDmxv0RJvWEwLw2nSg= +github.com/kubernetes-incubator/custom-metrics-apiserver v0.0.0-20180824182428-26e5299457d3/go.mod h1:KWRxWvzVCNvDtG9ejU5UdpgvxdCZFMUZu0xroKWG8Bo= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 h1:Yl0tPBa8QPjGmesFh1D0rDy+q1Twx6FyU7VWHi8wZbI= +github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852/go.mod h1:eqOVx5Vwu4gd2mmMZvVZsgIqNSaW3xxRThUJ0k/TPk4= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.1 h1:PZSj/UFNaVp3KxrzHOcS7oyuWA7LoOY/77yCTEFu21U= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/pborman/uuid v0.0.0-20180122190007-c65b2f87fee3 h1:9J0mOv1rXIBlRjQCiAGyx9C3dZZh5uIa3HU0oTV8v1E= +github.com/pborman/uuid v0.0.0-20180122190007-c65b2f87fee3/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.0-pre1.0.20180824101016-4eb539fa85a2 h1:jzd8QkZcF6lmxZUhgz/fisGD9H6GJja8jt7ucaaZPgM= +github.com/prometheus/client_golang v0.9.0-pre1.0.20180824101016-4eb539fa85a2/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/sirupsen/logrus v1.0.6 h1:hcP1GmhGigz/O7h1WVUM5KklBp1JoNS9FggWKdj/j3s= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf h1:6V1qxN6Usn4jy8unvggSJz/NC790tefw8Zdy6OZS5co= +github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a h1:JSvGDIbmil4Ui/dDdFBExb7/cmkNjyX5F97oglmvCDo= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6 h1:lYIiVDtZnyTWlNwiAxLj0bbpTcx1BWCFhXjfsvmPdNc= +github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.1 h1:gmervu+jDMvXTbcHQ0pd2wee85nEoE0BsVyEuzkfK8w= +github.com/ugorji/go v1.1.1/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 h1:MPPkRncZLN9Kh4MEFmbnK4h3BD7AUmskWv2+EeZJCCs= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac h1:7d7lG9fHOLdL6jZPtnV4LpI41SbohIJ1Atq7U991dMg= +golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180824152047-4bcd98cce591 h1:4S2XUgvg3hUNTvxI307qkFPb9zKHG3Nf9TXFzX/DZZI= +golang.org/x/net v0.0.0-20180824152047-4bcd98cce591/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87 h1:GqwDwfvIpC33dK9bA1fD+JiDUNsuAiQiEkpHqUKze4o= +golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3 h1:AFxeG48hTWHhDTQDk/m2gorfVHUEa9vo3tp3D7TzwjI= +gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +k8s.io/api v0.0.0-20180628040859-072894a440bd h1:HzgYeLDS1jLxw8DGr68KJh9cdQ5iZJizG0HZWstIhfQ= +k8s.io/api v0.0.0-20180628040859-072894a440bd/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d h1:MZjlsu9igBoVPZkXpIGoxI6EonqNsXXZU7hhvfQLkd4= +k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/apiserver v0.0.0-20180628044425-01459b68eb5f h1:eCb6E7epnzes186KGtXgA3PE2mLim3ceVSbCXQ8v5e8= +k8s.io/apiserver v0.0.0-20180628044425-01459b68eb5f/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= +k8s.io/client-go v8.0.0+incompatible h1:7Zl+OVXn0bobcsi4NEZGdoQDTE9ij1zPMfM21+yqQsM= +k8s.io/client-go v8.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/2U0YruD65DqX3INopDAQM= +k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/metrics v0.0.0-20180718014405-6efa0bfaa5c1 h1:7l3a6VjAMN3vKX7iN6pJLIn+wVzdIdHm+YWJfV4sg2s= +k8s.io/metrics v0.0.0-20180718014405-6efa0bfaa5c1/go.mod h1:a25VAbm3QT3xiVl1jtoF1ueAKQM149UdZ+L93ePfV3M= diff --git a/main.go b/main.go new file mode 100644 index 0000000..4ba3d0a --- /dev/null +++ b/main.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "os" + "runtime" + + "github.com/mikkeloscar/kube-metrics-adapter/pkg/server" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/util/logs" +) + +func main() { + logs.InitLogs() + defer logs.FlushLogs() + + if len(os.Getenv("GOMAXPROCS")) == 0 { + runtime.GOMAXPROCS(runtime.NumCPU()) + } + + cmd := server.NewCommandStartAdapterServer(wait.NeverStop) + cmd.Flags().AddGoFlagSet(flag.CommandLine) + if err := cmd.Execute(); err != nil { + panic(err) + } +} diff --git a/pkg/collector/aws_collector.go b/pkg/collector/aws_collector.go new file mode 100644 index 0000000..fca1403 --- /dev/null +++ b/pkg/collector/aws_collector.go @@ -0,0 +1,128 @@ +package collector + +import ( + "fmt" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sqs" + "github.com/aws/aws-sdk-go/service/sqs/sqsiface" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/metrics/pkg/apis/external_metrics" +) + +const ( + AWSSQSQueueLengthMetric = "sqs-queue-length" + sqsQueueNameLabelKey = "queue-name" + sqsQueueRegionLabelKey = "region" +) + +type AWSCollectorPlugin struct { + sessions map[string]*session.Session +} + +func NewAWSCollectorPlugin(sessions map[string]*session.Session) *AWSCollectorPlugin { + return &AWSCollectorPlugin{ + sessions: sessions, + } +} + +// NewCollector initializes a new skipper collector from the specified HPA. +func (c *AWSCollectorPlugin) NewCollector(hpa *autoscalingv2beta1.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) { + switch config.Name { + case AWSSQSQueueLengthMetric: + return NewAWSSQSCollector(c.sessions, config, interval) + } + + return nil, fmt.Errorf("metric '%s' not supported", config.Name) +} + +type AWSSQSCollector struct { + sqs sqsiface.SQSAPI + interval time.Duration + region string + queueURL string + queueName string + labels map[string]string + metricName string + metricType autoscalingv2beta1.MetricSourceType +} + +func NewAWSSQSCollector(sessions map[string]*session.Session, config *MetricConfig, interval time.Duration) (*AWSSQSCollector, error) { + + name, ok := config.Labels[sqsQueueNameLabelKey] + if !ok { + return nil, fmt.Errorf("sqs queue name not specified on metric") + } + region, ok := config.Labels[sqsQueueRegionLabelKey] + if !ok { + return nil, fmt.Errorf("sqs queue region is not specified on metric") + } + + session, ok := sessions[region] + if !ok { + return nil, fmt.Errorf("the metric region: %s is not configured", region) + } + + service := sqs.New(session) + params := &sqs.GetQueueUrlInput{ + QueueName: aws.String(name), + } + + resp, err := service.GetQueueUrl(params) + if err != nil { + return nil, fmt.Errorf("failed to get queue URL for queue '%s': %v", name, err) + } + + return &AWSSQSCollector{ + sqs: service, + interval: interval, + queueURL: aws.StringValue(resp.QueueUrl), + queueName: name, + metricName: config.Name, + metricType: config.Type, + labels: config.Labels, + }, nil +} + +func (c *AWSSQSCollector) GetMetrics() ([]CollectedMetric, error) { + params := &sqs.GetQueueAttributesInput{ + QueueUrl: aws.String(c.queueURL), + AttributeNames: aws.StringSlice([]string{sqs.QueueAttributeNameApproximateNumberOfMessages}), + } + + resp, err := c.sqs.GetQueueAttributes(params) + if err != nil { + return nil, err + } + + if v, ok := resp.Attributes[sqs.QueueAttributeNameApproximateNumberOfMessages]; ok { + i, err := strconv.Atoi(aws.StringValue(v)) + if err != nil { + return nil, err + } + + metricValue := CollectedMetric{ + Type: c.metricType, + External: external_metrics.ExternalMetricValue{ + MetricName: c.metricName, + MetricLabels: c.labels, + Timestamp: metav1.Time{Time: time.Now().UTC()}, + Value: *resource.NewQuantity(int64(i), resource.DecimalSI), + }, + } + + return []CollectedMetric{metricValue}, nil + } + + return nil, fmt.Errorf("failed to get queue length for '%s'", c.queueName) +} + +// Interval returns the interval at which the collector should run. +func (c *AWSSQSCollector) Interval() time.Duration { + return c.interval +} diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go new file mode 100644 index 0000000..015b7bf --- /dev/null +++ b/pkg/collector/collector.go @@ -0,0 +1,311 @@ +package collector + +import ( + "fmt" + "strings" + "time" + + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + "k8s.io/metrics/pkg/apis/custom_metrics" + "k8s.io/metrics/pkg/apis/external_metrics" +) + +const ( + customMetricsPrefix = "metric-config." + perReplicaMetricsConfKey = "per-replica" + intervalMetricsConfKey = "interval" +) + +type ObjectReference struct { + autoscalingv2beta1.CrossVersionObjectReference + Namespace string +} + +type CollectorFactory struct { + podsPlugins pluginMap + objectPlugins objectPluginMap + externalPlugins map[string]CollectorPlugin +} + +type objectPluginMap struct { + Any pluginMap + Named map[string]*pluginMap +} + +type pluginMap struct { + Any CollectorPlugin + Named map[string]CollectorPlugin +} + +func NewCollectorFactory() *CollectorFactory { + return &CollectorFactory{ + podsPlugins: pluginMap{Named: map[string]CollectorPlugin{}}, + objectPlugins: objectPluginMap{ + Any: pluginMap{}, + Named: map[string]*pluginMap{}, + }, + externalPlugins: map[string]CollectorPlugin{}, + } +} + +type CollectorPlugin interface { + NewCollector(hpa *autoscalingv2beta1.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) +} + +func (c *CollectorFactory) RegisterPodsCollector(metricCollector string, plugin CollectorPlugin) error { + if metricCollector == "" { + c.podsPlugins.Any = plugin + } else { + c.podsPlugins.Named[metricCollector] = plugin + } + return nil + +} + +func (c *CollectorFactory) RegisterObjectCollector(kind, metricCollector string, plugin CollectorPlugin) error { + if kind == "" { + if metricCollector == "" { + c.objectPlugins.Any.Any = plugin + } else { + if c.objectPlugins.Any.Named == nil { + c.objectPlugins.Any.Named = map[string]CollectorPlugin{ + metricCollector: plugin, + } + } else { + c.objectPlugins.Any.Named[metricCollector] = plugin + } + } + } else { + if named, ok := c.objectPlugins.Named[kind]; ok { + if metricCollector == "" { + named.Any = plugin + } else { + named.Named[metricCollector] = plugin + } + } else { + if metricCollector == "" { + c.objectPlugins.Named[kind] = &pluginMap{ + Any: plugin, + } + } else { + c.objectPlugins.Named[kind] = &pluginMap{ + Named: map[string]CollectorPlugin{ + metricCollector: plugin, + }, + } + } + } + } + + return nil +} + +func (c *CollectorFactory) RegisterExternalCollector(metrics []string, plugin CollectorPlugin) { + for _, metric := range metrics { + c.externalPlugins[metric] = plugin + } +} + +func (c *CollectorFactory) NewCollector(hpa *autoscalingv2beta1.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) { + switch config.Type { + case autoscalingv2beta1.PodsMetricSourceType: + // first try to find a plugin by format + if plugin, ok := c.podsPlugins.Named[config.CollectorName]; ok { + return plugin.NewCollector(hpa, config, interval) + } + + // else try to use the default plugin if set + if c.podsPlugins.Any != nil { + return c.podsPlugins.Any.NewCollector(hpa, config, interval) + } + case autoscalingv2beta1.ObjectMetricSourceType: + // first try to find a plugin by kind + if kinds, ok := c.objectPlugins.Named[config.ObjectReference.Kind]; ok { + if plugin, ok := kinds.Named[config.CollectorName]; ok { + return plugin.NewCollector(hpa, config, interval) + } + + if kinds.Any != nil { + return kinds.Any.NewCollector(hpa, config, interval) + } + break + } + + // else try to find a default plugin for this kind + if plugin, ok := c.objectPlugins.Any.Named[config.CollectorName]; ok { + return plugin.NewCollector(hpa, config, interval) + } + + if c.objectPlugins.Any.Any != nil { + return c.objectPlugins.Any.Any.NewCollector(hpa, config, interval) + } + case autoscalingv2beta1.ExternalMetricSourceType: + if plugin, ok := c.externalPlugins[config.Name]; ok { + return plugin.NewCollector(hpa, config, interval) + } + } + + return nil, fmt.Errorf("no plugin found for %s", config.MetricTypeName) +} + +func getObjectReference(hpa *autoscalingv2beta1.HorizontalPodAutoscaler, metricName string) (custom_metrics.ObjectReference, error) { + for _, metric := range hpa.Spec.Metrics { + if metric.Type == autoscalingv2beta1.ObjectMetricSourceType && metric.Object.MetricName == metricName { + return custom_metrics.ObjectReference{ + APIVersion: metric.Object.Target.APIVersion, + Kind: metric.Object.Target.Kind, + Name: metric.Object.Target.Name, + Namespace: hpa.Namespace, + }, nil + } + } + + return custom_metrics.ObjectReference{}, fmt.Errorf("failed to find object reference") +} + +type MetricTypeName struct { + Type autoscalingv2beta1.MetricSourceType + Name string +} + +type CollectedMetric struct { + Type autoscalingv2beta1.MetricSourceType + Custom custom_metrics.MetricValue + External external_metrics.ExternalMetricValue + Labels map[string]string +} + +type Collector interface { + GetMetrics() ([]CollectedMetric, error) + Interval() time.Duration +} + +type MetricConfig struct { + MetricTypeName + CollectorName string + Config map[string]string + ObjectReference custom_metrics.ObjectReference + PerReplica bool + Interval time.Duration + Labels map[string]string +} + +func parseCustomMetricsAnnotations(annotations map[string]string) (map[MetricTypeName]*MetricConfig, error) { + metrics := make(map[MetricTypeName]*MetricConfig) + for key, val := range annotations { + if !strings.HasPrefix(key, customMetricsPrefix) { + continue + } + + parts := strings.Split(key, "/") + if len(parts) != 2 { + // TODO: error? + continue + } + + configs := strings.Split(parts[0], ".") + if len(configs) != 4 { + // TODO: error? + continue + } + + metricTypeName := MetricTypeName{ + Name: configs[2], + } + + switch configs[1] { + case "pods": + metricTypeName.Type = autoscalingv2beta1.PodsMetricSourceType + case "object": + metricTypeName.Type = autoscalingv2beta1.ObjectMetricSourceType + } + + metricCollector := configs[3] + + config, ok := metrics[metricTypeName] + if !ok { + config = &MetricConfig{ + MetricTypeName: metricTypeName, + CollectorName: metricCollector, + Config: map[string]string{}, + } + metrics[metricTypeName] = config + } + + // TODO: fail if collector name doesn't match + if config.CollectorName != metricCollector { + continue + } + + if parts[1] == perReplicaMetricsConfKey { + config.PerReplica = true + continue + } + + if parts[1] == intervalMetricsConfKey { + interval, err := time.ParseDuration(val) + if err != nil { + return nil, fmt.Errorf("failed to parse interval value %s for %s: %v", val, key, err) + } + config.Interval = interval + continue + } + + config.Config[parts[1]] = val + } + + return metrics, nil +} + +// ParseHPAMetrics parses the HPA object into a list of metric configurations. +func ParseHPAMetrics(hpa *autoscalingv2beta1.HorizontalPodAutoscaler) ([]*MetricConfig, error) { + metricConfigs := make([]*MetricConfig, 0, len(hpa.Spec.Metrics)) + + // TODO: validate that the specified metric names are defined + // in the HPA + configs, err := parseCustomMetricsAnnotations(hpa.Annotations) + if err != nil { + return nil, err + } + + for _, metric := range hpa.Spec.Metrics { + typeName := MetricTypeName{ + Type: metric.Type, + } + + var ref custom_metrics.ObjectReference + switch metric.Type { + case autoscalingv2beta1.PodsMetricSourceType: + typeName.Name = metric.Pods.MetricName + case autoscalingv2beta1.ObjectMetricSourceType: + typeName.Name = metric.Object.MetricName + ref = custom_metrics.ObjectReference{ + APIVersion: metric.Object.Target.APIVersion, + Kind: metric.Object.Target.Kind, + Name: metric.Object.Target.Name, + Namespace: hpa.Namespace, + } + case autoscalingv2beta1.ExternalMetricSourceType: + typeName.Name = metric.External.MetricName + } + + if config, ok := configs[typeName]; ok { + config.ObjectReference = ref + metricConfigs = append(metricConfigs, config) + continue + } + + config := &MetricConfig{ + MetricTypeName: typeName, + ObjectReference: ref, + Config: map[string]string{}, + } + + if metric.Type == autoscalingv2beta1.ExternalMetricSourceType { + config.Labels = metric.External.MetricSelector.MatchLabels + } + metricConfigs = append(metricConfigs, config) + } + + return metricConfigs, nil +} diff --git a/pkg/collector/json_path_collector.go b/pkg/collector/json_path_collector.go new file mode 100644 index 0000000..d101e8c --- /dev/null +++ b/pkg/collector/json_path_collector.go @@ -0,0 +1,133 @@ +package collector + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/oliveagle/jsonpath" + "k8s.io/api/core/v1" +) + +// JSONPathMetricsGetter is a metrics getter which looks up pod metrics by +// querying the pods metrics endpoint and lookup the metric value as defined by +// the json path query. +type JSONPathMetricsGetter struct { + jsonPath *jsonpath.Compiled + scheme string + path string + port int +} + +// NewJSONPathMetricsGetter initializes a new JSONPathMetricsGetter. +func NewJSONPathMetricsGetter(config map[string]string) (*JSONPathMetricsGetter, error) { + getter := &JSONPathMetricsGetter{} + + if v, ok := config["json-key"]; ok { + pat, err := jsonpath.Compile(v) + if err != nil { + return nil, fmt.Errorf("failed to parse json path definition: %v", err) + } + + getter.jsonPath = pat + } + + if v, ok := config["scheme"]; ok { + getter.scheme = v + } + + if v, ok := config["path"]; ok { + getter.path = v + } + + if v, ok := config["port"]; ok { + n, err := strconv.Atoi(v) + if err != nil { + return nil, err + } + getter.port = n + } + + return getter, nil +} + +// GetMetric gets metric from pod by fetching json metrics from the pods metric +// endpoint and extracting the desired value using the specified json path +// query. +func (g *JSONPathMetricsGetter) GetMetric(pod *v1.Pod) (float64, error) { + data, err := getPodMetrics(pod, g.scheme, g.path, g.port) + if err != nil { + return 0, err + } + + // parse data + var jsonData interface{} + err = json.Unmarshal(data, &jsonData) + if err != nil { + return 0, err + } + + res, err := g.jsonPath.Lookup(jsonData) + if err != nil { + return 0, err + } + + switch res := res.(type) { + case int: + return float64(res), nil + case float32: + return float64(res), nil + case float64: + return res, nil + default: + return 0, fmt.Errorf("unsupported type %T", res) + } +} + +// getPodMetrics returns the content of the pods metrics endpoint. +func getPodMetrics(pod *v1.Pod, scheme, path string, port int) ([]byte, error) { + if pod.Status.PodIP == "" { + return nil, fmt.Errorf("pod %s/%s does not have a pod IP", pod.Namespace, pod.Namespace) + } + + httpClient := &http.Client{ + Timeout: 15 * time.Second, + Transport: &http.Transport{}, + } + + if scheme == "" { + scheme = "http" + } + + metricsURL := url.URL{ + Scheme: scheme, + Host: fmt.Sprintf("%s:%d", pod.Status.PodIP, port), + Path: path, + } + + request, err := http.NewRequest(http.MethodGet, metricsURL.String(), nil) + if err != nil { + return nil, err + } + + resp, err := httpClient.Do(request) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unsuccessful response: %s", resp.Status) + } + + return data, nil +} diff --git a/pkg/collector/max_collector.go b/pkg/collector/max_collector.go new file mode 100644 index 0000000..b1aa744 --- /dev/null +++ b/pkg/collector/max_collector.go @@ -0,0 +1,42 @@ +package collector + +import "time" + +// MaxCollector is a simple aggregator collector that returns the maximum value +// of metrics from all collectors. +type MaxCollector struct { + collectors []Collector + interval time.Duration +} + +// NewMaxCollector initializes a new MacCollector. +func NewMaxCollector(interval time.Duration, collectors ...Collector) *MaxCollector { + return &MaxCollector{ + collectors: collectors, + interval: interval, + } +} + +// GetMetrics gets metrics from all collectors and return the higest value. +func (c *MaxCollector) GetMetrics() ([]CollectedMetric, error) { + var max CollectedMetric + for _, collector := range c.collectors { + values, err := collector.GetMetrics() + if err != nil { + return nil, err + } + + for _, value := range values { + if value.Custom.Value.MilliValue() > max.Custom.Value.MilliValue() { + max = value + } + } + + } + return []CollectedMetric{max}, nil +} + +// Interval returns the interval at which the collector should run. +func (c *MaxCollector) Interval() time.Duration { + return c.interval +} diff --git a/pkg/collector/object_collector.go b/pkg/collector/object_collector.go new file mode 100644 index 0000000..5fb5d08 --- /dev/null +++ b/pkg/collector/object_collector.go @@ -0,0 +1,20 @@ +package collector + +import autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + +type ObjectMetricsGetter interface { + GetObjectMetric(namespace string, reference *autoscalingv2beta1.CrossVersionObjectReference) (float64, error) +} + +// type PodCollector struct { +// client kubernetes.Interface +// Getter PodMetricsGetter +// podLabelSelector string +// namespace string +// metricName string +// interval time.Duration +// } + +// func NewObjectCollector(client kubernetes.Interface, hpa *autoscalingv2beta1.HorizontalPodAutoscaler, metricName string, config *MetricConfig, interval time.Duration) (Collector, error) { +// switch +// } diff --git a/pkg/collector/pod_collector.go b/pkg/collector/pod_collector.go new file mode 100644 index 0000000..8d2a759 --- /dev/null +++ b/pkg/collector/pod_collector.go @@ -0,0 +1,141 @@ +package collector + +import ( + "fmt" + "time" + + "github.com/golang/glog" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/kubernetes" + "k8s.io/metrics/pkg/apis/custom_metrics" +) + +type PodCollectorPlugin struct { + client kubernetes.Interface +} + +func NewPodCollectorPlugin(client kubernetes.Interface) *PodCollectorPlugin { + return &PodCollectorPlugin{ + client: client, + } +} + +func (p *PodCollectorPlugin) NewCollector(hpa *autoscalingv2beta1.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) { + return NewPodCollector(p.client, hpa, config, interval) +} + +type PodCollector struct { + client kubernetes.Interface + Getter PodMetricsGetter + podLabelSelector string + namespace string + metricName string + metricType autoscalingv2beta1.MetricSourceType + interval time.Duration +} + +type PodMetricsGetter interface { + GetMetric(pod *v1.Pod) (float64, error) +} + +func NewPodCollector(client kubernetes.Interface, hpa *autoscalingv2beta1.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*PodCollector, error) { + // get pod selector based on HPA scale target ref + selector, err := getPodLabelSelector(client, hpa) + if err != nil { + return nil, fmt.Errorf("failed to get pod label selector: %v", err) + } + + c := &PodCollector{ + client: client, + namespace: hpa.Namespace, + metricName: config.Name, + metricType: config.Type, + interval: interval, + podLabelSelector: selector, + } + + var getter PodMetricsGetter + switch config.CollectorName { + case "json-path": + var err error + getter, err = NewJSONPathMetricsGetter(config.Config) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("format '%s' not supported", config.CollectorName) + } + + c.Getter = getter + + return c, nil +} + +func (c *PodCollector) GetMetrics() ([]CollectedMetric, error) { + opts := metav1.ListOptions{ + LabelSelector: c.podLabelSelector, + } + + pods, err := c.client.CoreV1().Pods(c.namespace).List(opts) + if err != nil { + return nil, err + } + + values := make([]CollectedMetric, 0, len(pods.Items)) + + // TODO: get metrics in parallel + for _, pod := range pods.Items { + value, err := c.Getter.GetMetric(&pod) + if err != nil { + glog.Errorf("Failed to get metrics from pod '%s/%s': %v", pod.Namespace, pod.Name, err) + continue + } + + metricValue := CollectedMetric{ + Type: c.metricType, + Custom: custom_metrics.MetricValue{ + DescribedObject: custom_metrics.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Name: pod.Name, + Namespace: pod.Namespace, + }, + MetricName: c.metricName, + Timestamp: metav1.Time{Time: time.Now().UTC()}, + Value: *resource.NewMilliQuantity(int64(value*1000), resource.DecimalSI), + }, + Labels: pod.Labels, + } + + values = append(values, metricValue) + } + + return values, nil +} + +func (c *PodCollector) Interval() time.Duration { + return c.interval +} + +func getPodLabelSelector(client kubernetes.Interface, hpa *autoscalingv2beta1.HorizontalPodAutoscaler) (string, error) { + switch hpa.Spec.ScaleTargetRef.Kind { + case "Deployment": + deployment, err := client.AppsV1().Deployments(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return labels.Set(deployment.Spec.Selector.MatchLabels).String(), nil + case "StatefulSet": + sts, err := client.AppsV1().StatefulSets(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return labels.Set(sts.Spec.Selector.MatchLabels).String(), nil + } + + return "", fmt.Errorf("unable to get pod label selector for scale target ref '%s'", hpa.Spec.ScaleTargetRef.Kind) +} diff --git a/pkg/collector/prometheus_collector.go b/pkg/collector/prometheus_collector.go new file mode 100644 index 0000000..e22accc --- /dev/null +++ b/pkg/collector/prometheus_collector.go @@ -0,0 +1,131 @@ +package collector + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/prometheus/client_golang/api" + promv1 "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/common/model" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/metrics/pkg/apis/custom_metrics" +) + +type PrometheusCollectorPlugin struct { + promAPI promv1.API + client kubernetes.Interface +} + +func NewPrometheusCollectorPlugin(client kubernetes.Interface, prometheusServer string) (*PrometheusCollectorPlugin, error) { + cfg := api.Config{ + Address: prometheusServer, + RoundTripper: &http.Transport{}, + } + + promClient, err := api.NewClient(cfg) + if err != nil { + return nil, err + } + + return &PrometheusCollectorPlugin{ + client: client, + promAPI: promv1.NewAPI(promClient), + }, nil +} + +func (p *PrometheusCollectorPlugin) NewCollector(hpa *autoscalingv2beta1.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) { + return NewPrometheusCollector(p.client, p.promAPI, hpa, config, interval) +} + +type PrometheusCollector struct { + client kubernetes.Interface + promAPI promv1.API + query string + metricName string + metricType autoscalingv2beta1.MetricSourceType + objectReference custom_metrics.ObjectReference + interval time.Duration + perReplica bool + hpa *autoscalingv2beta1.HorizontalPodAutoscaler +} + +func NewPrometheusCollector(client kubernetes.Interface, promAPI promv1.API, hpa *autoscalingv2beta1.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*PrometheusCollector, error) { + c := &PrometheusCollector{ + client: client, + objectReference: config.ObjectReference, + metricName: config.Name, + metricType: config.Type, + interval: interval, + promAPI: promAPI, + perReplica: config.PerReplica, + hpa: hpa, + } + + if v, ok := config.Config["query"]; ok { + // TODO: validate query + c.query = v + } else { + return nil, fmt.Errorf("no prometheus query defined") + } + + return c, nil +} + +func (c *PrometheusCollector) GetMetrics() ([]CollectedMetric, error) { + // TODO: use real context + value, err := c.promAPI.Query(context.Background(), c.query, time.Now().UTC()) + if err != nil { + return nil, err + } + + var sampleValue model.SampleValue + switch value.Type() { + case model.ValVector: + samples := value.(model.Vector) + if len(samples) == 0 { + return nil, fmt.Errorf("query '%s' returned no samples", c.query) + } + + sampleValue = samples[0].Value + case model.ValScalar: + scalar := value.(*model.Scalar) + sampleValue = scalar.Value + } + + if sampleValue.String() == "NaN" { + return nil, fmt.Errorf("query '%s' returned no samples: %s", c.query, sampleValue.String()) + } + + if c.perReplica { + // get current replicas for the targeted scale object. This is used to + // calculate an average metric instead of total. + // targetAverageValue will be available in Kubernetes v1.12 + // https://github.com/kubernetes/kubernetes/pull/64097 + replicas, err := targetRefReplicas(c.client, c.hpa) + if err != nil { + return nil, err + } + sampleValue = model.SampleValue(float64(sampleValue) / float64(replicas)) + } + + metricValue := CollectedMetric{ + Type: c.metricType, + Custom: custom_metrics.MetricValue{ + DescribedObject: c.objectReference, + MetricName: c.metricName, + Timestamp: metav1.Time{Time: time.Now().UTC()}, + Value: *resource.NewMilliQuantity(int64(sampleValue*1000), resource.DecimalSI), + }, + } + + return []CollectedMetric{metricValue}, nil +} + +func (c *PrometheusCollector) Interval() time.Duration { + return c.interval +} diff --git a/pkg/collector/skipper_collector.go b/pkg/collector/skipper_collector.go new file mode 100644 index 0000000..ba4863d --- /dev/null +++ b/pkg/collector/skipper_collector.go @@ -0,0 +1,165 @@ +package collector + +import ( + "fmt" + "strings" + "time" + + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/metrics/pkg/apis/custom_metrics" +) + +const ( + rpsQuery = `scalar(sum(rate(skipper_serve_host_duration_seconds_count{host="%s"}[1m])))` + rpsMetricName = "requests-per-second" +) + +// SkipperCollectorPlugin is a collector plugin for initializing metrics +// collectors for getting skipper ingress metrics. +type SkipperCollectorPlugin struct { + client kubernetes.Interface + plugin CollectorPlugin +} + +// NewSkipperCollectorPlugin initializes a new SkipperCollectorPlugin. +func NewSkipperCollectorPlugin(client kubernetes.Interface, prometheusPlugin *PrometheusCollectorPlugin) (*SkipperCollectorPlugin, error) { + return &SkipperCollectorPlugin{ + client: client, + plugin: prometheusPlugin, + }, nil +} + +// NewCollector initializes a new skipper collector from the specified HPA. +func (c *SkipperCollectorPlugin) NewCollector(hpa *autoscalingv2beta1.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) { + switch config.Name { + case rpsMetricName: + return NewSkipperCollector(c.client, c.plugin, hpa, config, interval) + default: + return nil, fmt.Errorf("metric '%s' not supported", config.Name) + } +} + +// SkipperCollector is a metrics collector for getting skipper ingress metrics. +// It depends on the prometheus collector for getting the metrics. +type SkipperCollector struct { + client kubernetes.Interface + metricName string + objectReference custom_metrics.ObjectReference + hpa *autoscalingv2beta1.HorizontalPodAutoscaler + interval time.Duration + plugin CollectorPlugin + config MetricConfig +} + +// NewSkipperCollector initializes a new SkipperCollector. +func NewSkipperCollector(client kubernetes.Interface, plugin CollectorPlugin, hpa *autoscalingv2beta1.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*SkipperCollector, error) { + return &SkipperCollector{ + client: client, + objectReference: config.ObjectReference, + hpa: hpa, + metricName: config.Name, + interval: interval, + plugin: plugin, + config: *config, + }, nil +} + +// getCollector returns a collector for getting the metrics. +func (c *SkipperCollector) getCollector() (Collector, error) { + ingress, err := c.client.ExtensionsV1beta1().Ingresses(c.objectReference.Namespace).Get(c.objectReference.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + config := c.config + + var collector Collector + collectors := make([]Collector, 0, len(ingress.Spec.Rules)) + for _, rule := range ingress.Spec.Rules { + host := strings.Replace(rule.Host, ".", "_", -1) + config.Config = map[string]string{ + "query": fmt.Sprintf(rpsQuery, host), + } + + config.PerReplica = false // per replica is handled outside of the prometheus collector + collector, err := c.plugin.NewCollector(c.hpa, &config, c.interval) + if err != nil { + return nil, err + } + + collectors = append(collectors, collector) + } + if len(collectors) > 1 { + collector = NewMaxCollector(c.interval, collectors...) + } else if len(collectors) == 1 { + collector = collectors[0] + } else { + return nil, fmt.Errorf("no hosts defined on ingress %s/%s, unable to create collector", c.objectReference.Namespace, c.objectReference.Name) + } + + return collector, nil +} + +// GetMetrics gets skipper metrics from prometheus. +func (c *SkipperCollector) GetMetrics() ([]CollectedMetric, error) { + collector, err := c.getCollector() + if err != nil { + return nil, err + } + + values, err := collector.GetMetrics() + if err != nil { + return nil, err + } + + if len(values) != 1 { + return nil, fmt.Errorf("expected to only get one metric value, got %d", len(values)) + } + + // get current replicas for the targeted scale object. This is used to + // calculate an average metric instead of total. + // targetAverageValue will be available in Kubernetes v1.12 + // https://github.com/kubernetes/kubernetes/pull/64097 + replicas, err := targetRefReplicas(c.client, c.hpa) + if err != nil { + return nil, err + } + + if replicas < 1 { + return nil, fmt.Errorf("unable to get average value for %d replicas", replicas) + } + + value := values[0] + avgValue := float64(value.Custom.Value.MilliValue()) / float64(replicas) + value.Custom.Value = *resource.NewMilliQuantity(int64(avgValue), resource.DecimalSI) + + return []CollectedMetric{value}, nil +} + +// Interval returns the interval at which the collector should run. +func (c *SkipperCollector) Interval() time.Duration { + return c.interval +} + +func targetRefReplicas(client kubernetes.Interface, hpa *autoscalingv2beta1.HorizontalPodAutoscaler) (int32, error) { + var replicas int32 + switch hpa.Spec.ScaleTargetRef.Kind { + case "Deployment": + deployment, err := client.AppsV1().Deployments(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{}) + if err != nil { + return 0, err + } + replicas = deployment.Status.ReadyReplicas + case "StatefulSet": + sts, err := client.AppsV1().StatefulSets(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{}) + if err != nil { + return 0, err + } + replicas = sts.Status.ReadyReplicas + } + + return replicas, nil +} diff --git a/pkg/provider/hpa.go b/pkg/provider/hpa.go new file mode 100644 index 0000000..daaa41e --- /dev/null +++ b/pkg/provider/hpa.go @@ -0,0 +1,320 @@ +package provider + +import ( + "context" + "reflect" + "sync" + "time" + + "github.com/golang/glog" + "github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/provider" + "github.com/mikkeloscar/kube-metrics-adapter/pkg/collector" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/metrics/pkg/apis/custom_metrics" + "k8s.io/metrics/pkg/apis/external_metrics" +) + +type objectCollector struct { + ObjectReference *autoscalingv2beta1.CrossVersionObjectReference +} + +// HPAProvider is a base provider for initializing metric collectors based on +// HPA resources. +type HPAProvider struct { + client kubernetes.Interface + interval time.Duration + collectorScheduler *CollectorScheduler + collectorInterval time.Duration + metricSink chan metricCollection + hpaCache map[resourceReference]autoscalingv2beta1.HorizontalPodAutoscaler + metricStore *MetricStore + collectorFactory *collector.CollectorFactory +} + +// metricCollection is a container for sending collected metrics across a +// channel. +type metricCollection struct { + Values []collector.CollectedMetric + Error error +} + +// NewHPAProvider initializes a new HPAProvider. +func NewHPAProvider(client kubernetes.Interface, interval, collectorInterval time.Duration, collectorFactory *collector.CollectorFactory) *HPAProvider { + metricsc := make(chan metricCollection) + return &HPAProvider{ + client: client, + interval: interval, + collectorInterval: collectorInterval, + metricSink: metricsc, + metricStore: NewMetricStore(), + collectorFactory: collectorFactory, + } +} + +// Run runs the HPA resource discovery and metric collection. +func (p *HPAProvider) Run(ctx context.Context) { + // initialize collector table + p.collectorScheduler = NewCollectorScheduler(ctx, p.metricSink) + + go p.collectMetrics(ctx) + + for { + err := p.updateHPAs() + if err != nil { + glog.Error(err) + } + + select { + case <-time.After(p.interval): + case <-ctx.Done(): + glog.Info("Stopped HPA provider.") + return + } + } +} + +// updateHPAs discovers all HPA resources and sets up metric collectors for new +// HPAs. +func (p *HPAProvider) updateHPAs() error { + glog.Info("Looking for HPAs") + + hpas, err := p.client.AutoscalingV2beta1().HorizontalPodAutoscalers(metav1.NamespaceAll).List(metav1.ListOptions{}) + if err != nil { + return err + } + + newHPACache := make(map[resourceReference]autoscalingv2beta1.HorizontalPodAutoscaler, len(hpas.Items)) + + newHPAs := 0 + + for _, hpa := range hpas.Items { + hpa := hpa + resourceRef := resourceReference{ + Name: hpa.Name, + Namespace: hpa.Namespace, + } + + if cachedHPA, ok := p.hpaCache[resourceRef]; !ok || !equalHPA(cachedHPA, hpa) { + metricConfigs, err := collector.ParseHPAMetrics(&hpa) + if err != nil { + glog.Errorf("Failed to parse HPA metrics: %v", err) + continue + } + + cache := true + for _, config := range metricConfigs { + interval := config.Interval + if interval == 0 { + interval = p.collectorInterval + } + + collector, err := p.collectorFactory.NewCollector(&hpa, config, interval) + if err != nil { + // TODO: log and send event + glog.Errorf("Failed to create new metrics collector: %v", err) + cache = false + continue + } + + glog.Infof("Adding new metrics collector: %T", collector) + p.collectorScheduler.Add(resourceRef, config.MetricTypeName, collector) + } + newHPAs++ + + // if we get an error setting up the collectors for the + // HPA, don't cache it, but try again later. + if !cache { + continue + } + } + + newHPACache[resourceRef] = hpa + } + + for ref := range p.hpaCache { + if _, ok := newHPACache[ref]; ok { + continue + } + + glog.V(2).Infof("Removing previously scheduled metrics collector: %s", ref) + p.collectorScheduler.Remove(ref) + } + + glog.Infof("Found %d new/updated HPA(s)", newHPAs) + p.hpaCache = newHPACache + return nil +} + +// equalHPA returns true if two HPAs are identical (apart from their status). +func equalHPA(a, b autoscalingv2beta1.HorizontalPodAutoscaler) bool { + // reset resource version to not compare it since this will change + // whenever the status of the object is updated. We only want to + // compare the metadata and the spec. + a.ObjectMeta.ResourceVersion = "" + b.ObjectMeta.ResourceVersion = "" + return reflect.DeepEqual(a.ObjectMeta, b.ObjectMeta) && reflect.DeepEqual(a.Spec, b.Spec) +} + +// collectMetrics collects all metrics from collectors and manages a central +// metric store. +func (p *HPAProvider) collectMetrics(ctx context.Context) { + // run garbage collection every 10 minutes + go func(ctx context.Context) { + for { + select { + case <-time.After(10 * time.Minute): + p.metricStore.RemoveExpired() + case <-ctx.Done(): + glog.Info("Stopped metrics store garbage collection.") + return + } + } + }(ctx) + + for { + select { + case collection := <-p.metricSink: + if collection.Error != nil { + glog.Errorf("Failed to collect metrics: %v", collection.Error) + } + + glog.Infof("Collected %d new metric(s)", len(collection.Values)) + for _, value := range collection.Values { + switch value.Type { + case autoscalingv2beta1.ObjectMetricSourceType, autoscalingv2beta1.PodsMetricSourceType: + glog.Infof("Collected new custom metric '%s' (%s) for %s %s/%s", + value.Custom.MetricName, + value.Custom.Value.String(), + value.Custom.DescribedObject.Kind, + value.Custom.DescribedObject.Namespace, + value.Custom.DescribedObject.Name, + ) + case autoscalingv2beta1.ExternalMetricSourceType: + glog.Infof("Collected new external metric '%s' (%s) [%s]", + value.External.MetricName, + value.External.Value.String(), + labels.Set(value.External.MetricLabels).String(), + ) + } + p.metricStore.Insert(value) + } + case <-ctx.Done(): + glog.Info("Stopped metrics collection.") + return + } + } +} + +// GetMetricByName gets a single metric by name. +func (p *HPAProvider) GetMetricByName(name types.NamespacedName, info provider.CustomMetricInfo) (*custom_metrics.MetricValue, error) { + metric := p.metricStore.GetMetricsByName(name, info) + if metric == nil { + return nil, provider.NewMetricNotFoundForError(info.GroupResource, info.Metric, name.Name) + } + return metric, nil +} + +// GetMetricBySelector returns metrics for namespaced resources by +// label selector. +func (p *HPAProvider) GetMetricBySelector(namespace string, selector labels.Selector, info provider.CustomMetricInfo) (*custom_metrics.MetricValueList, error) { + return p.metricStore.GetMetricsBySelector(namespace, selector, info), nil +} + +// ListAllMetrics list all available metrics from the provicer. +func (p *HPAProvider) ListAllMetrics() []provider.CustomMetricInfo { + return p.metricStore.ListAllMetrics() +} + +func (p *HPAProvider) GetExternalMetric(namespace string, metricSelector labels.Selector, info provider.ExternalMetricInfo) (*external_metrics.ExternalMetricValueList, error) { + return p.metricStore.GetExternalMetric(namespace, metricSelector, info) +} + +func (p *HPAProvider) ListAllExternalMetrics() []provider.ExternalMetricInfo { + return p.metricStore.ListAllExternalMetrics() +} + +type resourceReference struct { + Name string + Namespace string +} + +// CollectorScheduler is a scheduler for running metric collection jobs. +// It keeps track of all running collectors and stops them if they are to be +// removed. +type CollectorScheduler struct { + ctx context.Context + table map[resourceReference]map[collector.MetricTypeName]context.CancelFunc + metricSink chan<- metricCollection + sync.RWMutex +} + +// NewCollectorScheudler initializes a new CollectorScheduler. +func NewCollectorScheduler(ctx context.Context, metricsc chan<- metricCollection) *CollectorScheduler { + return &CollectorScheduler{ + ctx: ctx, + table: map[resourceReference]map[collector.MetricTypeName]context.CancelFunc{}, + metricSink: metricsc, + } +} + +// Add adds a new collector to the collector scheduler. Once the collector is +// added it will be started to collect metrics. +func (t *CollectorScheduler) Add(resourceRef resourceReference, typeName collector.MetricTypeName, metricCollector collector.Collector) { + t.Lock() + defer t.Unlock() + + collectors, ok := t.table[resourceRef] + if !ok { + collectors = map[collector.MetricTypeName]context.CancelFunc{} + t.table[resourceRef] = collectors + } + + if cancelCollector, ok := collectors[typeName]; ok { + // stop old collector + cancelCollector() + } + + ctx, cancel := context.WithCancel(t.ctx) + collectors[typeName] = cancel + + // start runner for new collector + go collectorRunner(ctx, metricCollector, t.metricSink) +} + +// collectorRunner runs a collector at the desirec interval. If the passed +// context is canceled the collection will be stopped. +func collectorRunner(ctx context.Context, collector collector.Collector, metricsc chan<- metricCollection) { + for { + values, err := collector.GetMetrics() + + metricsc <- metricCollection{ + Values: values, + Error: err, + } + + select { + case <-time.After(collector.Interval()): + case <-ctx.Done(): + glog.V(2).Infof("stopping collector runner...") + return + } + } +} + +// Remove removes a collector from the Collector schduler. The collector is +// stopped before it's removed. +func (t *CollectorScheduler) Remove(resourceRef resourceReference) { + t.Lock() + defer t.Unlock() + + if collectors, ok := t.table[resourceRef]; ok { + for _, cancelCollector := range collectors { + cancelCollector() + } + delete(t.table, resourceRef) + } +} diff --git a/pkg/provider/metric_store.go b/pkg/provider/metric_store.go new file mode 100644 index 0000000..226430d --- /dev/null +++ b/pkg/provider/metric_store.go @@ -0,0 +1,311 @@ +package provider + +import ( + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/provider" + "github.com/mikkeloscar/kube-metrics-adapter/pkg/collector" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/metrics/pkg/apis/custom_metrics" + "k8s.io/metrics/pkg/apis/external_metrics" +) + +// customMetricsStoredMetric is a wrapper around custom_metrics.MetricValue with a TTL used +// to clean up stale metrics from the customMetricsStore. +type customMetricsStoredMetric struct { + Value custom_metrics.MetricValue + Labels map[string]string + TTL time.Time +} + +type externalMetricsStoredMetric struct { + Value external_metrics.ExternalMetricValue + TTL time.Time +} + +// MetricStore is a simple in-memory Metrics Store for HPA metrics. +type MetricStore struct { + customMetricsStore map[string]map[schema.GroupResource]map[string]map[string]customMetricsStoredMetric + externalMetricsStore map[string]map[string]externalMetricsStoredMetric + sync.RWMutex +} + +// NewMetricStore initializes an empty Metrics Store. +func NewMetricStore() *MetricStore { + return &MetricStore{ + customMetricsStore: make(map[string]map[schema.GroupResource]map[string]map[string]customMetricsStoredMetric, 0), + externalMetricsStore: make(map[string]map[string]externalMetricsStoredMetric, 0), + } +} + +// Insert inserts a collected metric into the metric customMetricsStore. +func (s *MetricStore) Insert(value collector.CollectedMetric) { + switch value.Type { + case autoscalingv2beta1.ObjectMetricSourceType, autoscalingv2beta1.PodsMetricSourceType: + s.insertCustomMetric(value.Custom, value.Labels) + case autoscalingv2beta1.ExternalMetricSourceType: + s.insertExternalMetric(value.External) + } +} + +// insertCustomMetric inserts a custom metric plus labels into the store. +func (s *MetricStore) insertCustomMetric(value custom_metrics.MetricValue, labels map[string]string) { + s.Lock() + defer s.Unlock() + + // TODO: handle this mapping nicer + var groupResource schema.GroupResource + switch value.DescribedObject.Kind { + case "Pod": + groupResource = schema.GroupResource{ + Resource: "pods", + } + case "Ingress": + groupResource = schema.GroupResource{ + Resource: "ingresses", + Group: "extensions", + } + } + + metric := customMetricsStoredMetric{ + Value: value, + Labels: labels, + TTL: time.Now().UTC().Add(15 * time.Minute), // TODO: make TTL configurable + } + + metrics, ok := s.customMetricsStore[value.MetricName] + if !ok { + s.customMetricsStore[value.MetricName] = map[schema.GroupResource]map[string]map[string]customMetricsStoredMetric{ + groupResource: map[string]map[string]customMetricsStoredMetric{ + value.DescribedObject.Namespace: map[string]customMetricsStoredMetric{ + value.DescribedObject.Name: metric, + }, + }, + } + return + } + + group, ok := metrics[groupResource] + if !ok { + metrics[groupResource] = map[string]map[string]customMetricsStoredMetric{ + value.DescribedObject.Namespace: map[string]customMetricsStoredMetric{ + value.DescribedObject.Name: metric, + }, + } + } + + namespace, ok := group[value.DescribedObject.Namespace] + if !ok { + group[value.DescribedObject.Namespace] = map[string]customMetricsStoredMetric{ + value.DescribedObject.Name: metric, + } + } + + namespace[value.DescribedObject.Name] = metric +} + +// insertExternalMetric inserts an external metric into the store. +func (s *MetricStore) insertExternalMetric(metric external_metrics.ExternalMetricValue) { + s.Lock() + defer s.Unlock() + + storedMetric := externalMetricsStoredMetric{ + Value: metric, + TTL: time.Now().UTC().Add(15 * time.Minute), // TODO: make TTL configurable + } + + labelsKey := hashLabelMap(metric.MetricLabels) + + if metrics, ok := s.externalMetricsStore[metric.MetricName]; ok { + metrics[labelsKey] = storedMetric + } else { + s.externalMetricsStore[metric.MetricName] = map[string]externalMetricsStoredMetric{ + labelsKey: storedMetric, + } + } +} + +// hashLabelMap converts a map into a sorted string to provide a stable +// representation of a labels map. +func hashLabelMap(labels map[string]string) string { + strLabels := make([]string, 0, len(labels)) + for k, v := range labels { + strLabels = append(strLabels, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(strLabels) + return strings.Join(strLabels, ",") +} + +// GetMetricsBySelector gets metric from the customMetricsStore using a label selector to +// find metrics for matching resources. +func (s *MetricStore) GetMetricsBySelector(namespace string, selector labels.Selector, info provider.CustomMetricInfo) *custom_metrics.MetricValueList { + matchedMetrics := make([]custom_metrics.MetricValue, 0) + + s.RLock() + defer s.RUnlock() + + metrics, ok := s.customMetricsStore[info.Metric] + if !ok { + return nil + } + + group, ok := metrics[info.GroupResource] + if !ok { + return nil + } + + if !info.Namespaced { + for _, metricMap := range group { + for _, metric := range metricMap { + if selector.Matches(labels.Set(metric.Labels)) { + matchedMetrics = append(matchedMetrics, metric.Value) + } + } + } + } else if metricMap, ok := group[namespace]; ok { + for _, metric := range metricMap { + if selector.Matches(labels.Set(metric.Labels)) { + matchedMetrics = append(matchedMetrics, metric.Value) + } + } + } + + return &custom_metrics.MetricValueList{Items: matchedMetrics} +} + +// GetMetricsByName looks up metrics in the customMetricsStore by resource name. +func (s *MetricStore) GetMetricsByName(name types.NamespacedName, info provider.CustomMetricInfo) *custom_metrics.MetricValue { + s.RLock() + defer s.RUnlock() + + metrics, ok := s.customMetricsStore[info.Metric] + if !ok { + return nil + } + + group, ok := metrics[info.GroupResource] + if !ok { + return nil + } + + if !info.Namespaced { + // TODO: rethink no namespace queries + for _, metricMap := range group { + if metric, ok := metricMap[name.Name]; ok { + return &metric.Value + } + } + } else if metricMap, ok := group[name.Namespace]; ok { + if metric, ok := metricMap[name.Name]; ok { + return &metric.Value + } + } + + return nil +} + +// ListAllMetrics lists all custom metrics in the Metrics Store. +func (s *MetricStore) ListAllMetrics() []provider.CustomMetricInfo { + s.RLock() + defer s.RUnlock() + + metrics := make([]provider.CustomMetricInfo, 0, len(s.customMetricsStore)) + + for metricName, customMetricsStoredMetrics := range s.customMetricsStore { + for groupResource, group := range customMetricsStoredMetrics { + for namespace := range group { + metric := provider.CustomMetricInfo{ + GroupResource: groupResource, + Namespaced: namespace != "", + Metric: metricName, + } + metrics = append(metrics, metric) + } + } + } + + return metrics +} + +// GetExternalMetric gets external metric from the store by metric name and +// selector. +func (s *MetricStore) GetExternalMetric(namespace string, selector labels.Selector, info provider.ExternalMetricInfo) (*external_metrics.ExternalMetricValueList, error) { + matchedMetrics := make([]external_metrics.ExternalMetricValue, 0) + + s.RLock() + defer s.RUnlock() + + if metrics, ok := s.externalMetricsStore[info.Metric]; ok { + for _, metric := range metrics { + if selector.Matches(labels.Set(metric.Value.MetricLabels)) { + matchedMetrics = append(matchedMetrics, metric.Value) + } + } + } + + return &external_metrics.ExternalMetricValueList{Items: matchedMetrics}, nil +} + +// ListAllExternalMetrics lists all external metrics in the Metrics Store. +func (s *MetricStore) ListAllExternalMetrics() []provider.ExternalMetricInfo { + s.RLock() + defer s.RUnlock() + + metricsInfo := make([]provider.ExternalMetricInfo, 0, len(s.externalMetricsStore)) + + for metricName := range s.externalMetricsStore { + info := provider.ExternalMetricInfo{ + Metric: metricName, + } + metricsInfo = append(metricsInfo, info) + } + return metricsInfo +} + +// RemoveExpired removes expired metrics from the Metrics Store. A metric is +// considered expired if its TTL is before time.Now(). +func (s *MetricStore) RemoveExpired() { + s.Lock() + defer s.Unlock() + + // cleanup custom metrics + for metricName, groups := range s.customMetricsStore { + for group, namespaces := range groups { + for namespace, resources := range namespaces { + for resource, metric := range resources { + if metric.TTL.Before(time.Now().UTC()) { + delete(resources, resource) + } + } + if len(resources) == 0 { + delete(namespaces, namespace) + } + } + if len(namespaces) == 0 { + delete(groups, group) + } + } + if len(groups) == 0 { + delete(s.customMetricsStore, metricName) + } + } + + // cleanup external metrics + for metricName, metrics := range s.externalMetricsStore { + for k, metric := range metrics { + if metric.TTL.Before(time.Now().UTC()) { + delete(metrics, k) + } + } + if len(metrics) == 0 { + delete(s.externalMetricsStore, metricName) + } + } +} diff --git a/pkg/server/start.go b/pkg/server/start.go new file mode 100644 index 0000000..542d034 --- /dev/null +++ b/pkg/server/start.go @@ -0,0 +1,213 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go/aws" + "time" + + "github.com/aws/aws-sdk-go/aws/session" + "github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/cmd/server" + "github.com/mikkeloscar/kube-metrics-adapter/pkg/collector" + "github.com/mikkeloscar/kube-metrics-adapter/pkg/provider" + "github.com/spf13/cobra" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +const ( + defaultClientGOTimeout = 30 * time.Second +) + +// NewCommandStartAdapterServer provides a CLI handler for 'start adapter server' command +func NewCommandStartAdapterServer(stopCh <-chan struct{}) *cobra.Command { + baseOpts := server.NewCustomMetricsAdapterServerOptions() + o := AdapterServerOptions{ + CustomMetricsAdapterServerOptions: baseOpts, + EnableCustomMetricsAPI: true, + EnableExternalMetricsAPI: true, + } + + cmd := &cobra.Command{ + Short: "Launch the custom metrics API adapter server", + Long: "Launch the custom metrics API adapter server", + RunE: func(c *cobra.Command, args []string) error { + if err := o.Complete(); err != nil { + return err + } + if err := o.Validate(args); err != nil { + return err + } + if err := o.RunCustomMetricsAdapterServer(stopCh); err != nil { + return err + } + return nil + }, + } + + flags := cmd.Flags() + o.SecureServing.AddFlags(flags) + o.Authentication.AddFlags(flags) + o.Authorization.AddFlags(flags) + o.Features.AddFlags(flags) + + flags.StringVar(&o.RemoteKubeConfigFile, "lister-kubeconfig", o.RemoteKubeConfigFile, ""+ + "kubeconfig file pointing at the 'core' kubernetes server with enough rights to list "+ + "any described objects") + flags.BoolVar(&o.EnableCustomMetricsAPI, "enable-custom-metrics-api", o.EnableCustomMetricsAPI, ""+ + "whether to enable Custom Metrics API") + flags.BoolVar(&o.EnableExternalMetricsAPI, "enable-external-metrics-api", o.EnableExternalMetricsAPI, ""+ + "whether to enable External Metrics API") + flags.StringVar(&o.PrometheusServer, "prometheus-server", o.PrometheusServer, ""+ + "url of prometheus server to query") + flags.BoolVar(&o.SkipperIngressMetrics, "skipper-ingress-metrics", o.SkipperIngressMetrics, ""+ + "whether to enable skipper ingress metrics") + flags.BoolVar(&o.AWSExternalMetrics, "aws-external-metrics", o.AWSExternalMetrics, ""+ + "whether to enable AWS external metrics") + flags.StringSliceVar(&o.AWSRegions, "aws-region", o.AWSRegions, "the AWS regions which should be monitored. eg: eu-central, eu-west-1") + + return cmd +} + +func (o AdapterServerOptions) RunCustomMetricsAdapterServer(stopCh <-chan struct{}) error { + config, err := o.Config() + if err != nil { + return err + } + + var clientConfig *rest.Config + if len(o.RemoteKubeConfigFile) > 0 { + loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: o.RemoteKubeConfigFile} + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + + clientConfig, err = loader.ClientConfig() + } else { + clientConfig, err = rest.InClusterConfig() + } + if err != nil { + return fmt.Errorf("unable to construct lister client config to initialize provider: %v", err) + } + + clientConfig.Timeout = defaultClientGOTimeout + + client, err := kubernetes.NewForConfig(clientConfig) + if err != nil { + return fmt.Errorf("failed to initialize new client: %v", err) + } + + collectorFactory := collector.NewCollectorFactory() + + if o.PrometheusServer != "" { + promPlugin, err := collector.NewPrometheusCollectorPlugin(client, o.PrometheusServer) + if err != nil { + return fmt.Errorf("failed to initialize prometheus collector plugin: %v", err) + } + + err = collectorFactory.RegisterObjectCollector("", "prometheus", promPlugin) + if err != nil { + return fmt.Errorf("failed to register prometheus collector plugin: %v", err) + } + + // skipper collector can only be enabled if prometheus is. + if o.SkipperIngressMetrics { + skipperPlugin, err := collector.NewSkipperCollectorPlugin(client, promPlugin) + if err != nil { + return fmt.Errorf("failed to initialize skipper collector plugin: %v", err) + } + + err = collectorFactory.RegisterObjectCollector("Ingress", "", skipperPlugin) + if err != nil { + return fmt.Errorf("failed to register skipper collector plugin: %v", err) + } + } + } + + // register generic pod collector + err = collectorFactory.RegisterPodsCollector("", collector.NewPodCollectorPlugin(client)) + if err != nil { + return fmt.Errorf("failed to register skipper collector plugin: %v", err) + } + + awsSessions := make(map[string]*session.Session, len(o.AWSRegions)) + for _, region := range o.AWSRegions { + awsSessions[region], err = session.NewSession(&aws.Config{Region: aws.String(region)}) + if err != nil { + return fmt.Errorf("unabled to create aws session for region: %s", region) + } + } + + if o.AWSExternalMetrics { + collectorFactory.RegisterExternalCollector([]string{collector.AWSSQSQueueLengthMetric}, collector.NewAWSCollectorPlugin(awsSessions)) + } + + hpaProvider := provider.NewHPAProvider(client, 30*time.Second, 1*time.Minute, collectorFactory) + + // convert stop channel to a context + ctx, cancel := context.WithCancel(context.Background()) + go func() { + <-stopCh + cancel() + }() + + go hpaProvider.Run(ctx) + + customMetricsProvider := hpaProvider + externalMetricsProvider := hpaProvider + + // var externalMetricsProvider := nil + if !o.EnableCustomMetricsAPI { + customMetricsProvider = nil + } + if !o.EnableExternalMetricsAPI { + externalMetricsProvider = nil + } + + informer := informers.NewSharedInformerFactory(client, 0) + + // In this example, the same provider implements both Custom Metrics API and External Metrics API + server, err := config.Complete(informer).New("kube-metrics-adapter", customMetricsProvider, externalMetricsProvider) + if err != nil { + return err + } + return server.GenericAPIServer.PrepareRun().Run(ctx.Done()) +} + +type AdapterServerOptions struct { + *server.CustomMetricsAdapterServerOptions + + // RemoteKubeConfigFile is the config used to list pods from the master API server + RemoteKubeConfigFile string + // EnableCustomMetricsAPI switches on sample apiserver for Custom Metrics API + EnableCustomMetricsAPI bool + // EnableExternalMetricsAPI switches on sample apiserver for External Metrics API + EnableExternalMetricsAPI bool + // PrometheusServer enables prometheus queries to the specified + // server. + PrometheusServer string + // SkipperIngressMetrics switches on support for skipper ingress based + // metric collection. + SkipperIngressMetrics bool + // AWSExternalMetrics switches on support for getting external metrics + // from AWS. + AWSExternalMetrics bool + // AWSRegions the AWS regions which are supported for monitoring. + AWSRegions []string +}