Merge pull request #549 from zalando-incubator/scaling-schedule-status

Add scaling schedule controller to updating status
This commit is contained in:
Mikkel Oscar Lyderik Larsen
2023-04-12 11:51:53 +02:00
committed by GitHub
15 changed files with 793 additions and 155 deletions

View File

@ -15,7 +15,12 @@ spec:
singular: clusterscalingschedule
scope: Cluster
versions:
- name: v1
- additionalPrinterColumns:
- description: Whether one or more schedules are currently active.
jsonPath: .status.active
name: Active
type: boolean
name: v1
schema:
openAPIV3Schema:
description: ClusterScalingSchedule describes a cluster scoped time based
@ -119,11 +124,22 @@ spec:
required:
- schedules
type: object
status:
description: ScalingScheduleStatus is the status section of the ScalingSchedule.
properties:
active:
default: false
description: Active is true if at least one of the schedules defined
in the scaling schedule is currently active.
type: boolean
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""

View File

@ -97,6 +97,13 @@ rules:
- get
- list
- watch
- apiGroups:
- zalando.org
resources:
- clusterscalingschedules/status
- scalingschedules/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding

View File

@ -9,13 +9,20 @@ metadata:
spec:
group: zalando.org
names:
categories:
- all
kind: ScalingSchedule
listKind: ScalingScheduleList
plural: scalingschedules
singular: scalingschedule
scope: Namespaced
versions:
- name: v1
- additionalPrinterColumns:
- description: Whether one or more schedules are currently active.
jsonPath: .status.active
name: Active
type: boolean
name: v1
schema:
openAPIV3Schema:
description: ScalingSchedule describes a namespaced time based metric to be
@ -119,11 +126,22 @@ spec:
required:
- schedules
type: object
status:
description: ScalingScheduleStatus is the status section of the ScalingSchedule.
properties:
active:
default: false
description: Active is true if at least one of the schedules defined
in the scaling schedule is currently active.
type: boolean
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""

2
go.mod
View File

@ -13,6 +13,7 @@ require (
github.com/zalando-incubator/cluster-lifecycle-manager v0.0.0-20230223125308-aff25efae501
golang.org/x/net v0.8.0
golang.org/x/oauth2 v0.6.0
golang.org/x/sync v0.1.0
k8s.io/api v0.23.0
k8s.io/apimachinery v0.23.0
k8s.io/apiserver v0.23.0
@ -93,7 +94,6 @@ require (
go.uber.org/zap v1.21.0 // indirect
golang.org/x/crypto v0.6.0 // indirect
golang.org/x/mod v0.8.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect

View File

@ -13,11 +13,16 @@ import (
// ScalingSchedule describes a namespaced time based metric to be used
// in autoscaling operations.
// +k8s:deepcopy-gen=true
// +kubebuilder:resource:categories=all
// +kubebuilder:printcolumn:name="Active",type=boolean,JSONPath=`.status.active`,description="Whether one or more schedules are currently active."
// +kubebuilder:subresource:status
type ScalingSchedule struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ScalingScheduleSpec `json:"spec"`
// +optional
Status ScalingScheduleStatus `json:"status"`
}
// +genclient
@ -28,12 +33,17 @@ type ScalingSchedule struct {
// ClusterScalingSchedule describes a cluster scoped time based metric
// to be used in autoscaling operations.
// +k8s:deepcopy-gen=true
// +kubebuilder:resource:categories=all
// +kubebuilder:printcolumn:name="Active",type=boolean,JSONPath=`.status.active`,description="Whether one or more schedules are currently active."
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Cluster
type ClusterScalingSchedule struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ScalingScheduleSpec `json:"spec"`
// +optional
Status ScalingScheduleStatus `json:"status"`
}
// ScalingScheduleSpec is the spec part of the ScalingSchedule.
@ -125,6 +135,16 @@ const (
// +kubebuilder:validation:Format="date-time"
type ScheduleDate string
// ScalingScheduleStatus is the status section of the ScalingSchedule.
// +k8s:deepcopy-gen=true
type ScalingScheduleStatus struct {
// Active is true if at least one of the schedules defined in the
// scaling schedule is currently active.
// +kubebuilder:default:=false
// +optional
Active bool `json:"active"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ScalingScheduleList is a list of namespaced scaling schedules.

View File

@ -31,6 +31,7 @@ func (in *ClusterScalingSchedule) DeepCopyInto(out *ClusterScalingSchedule) {
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
return
}
@ -91,6 +92,7 @@ func (in *ScalingSchedule) DeepCopyInto(out *ScalingSchedule) {
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
return
}
@ -173,6 +175,22 @@ func (in *ScalingScheduleSpec) DeepCopy() *ScalingScheduleSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScalingScheduleStatus) DeepCopyInto(out *ScalingScheduleStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingScheduleStatus.
func (in *ScalingScheduleStatus) DeepCopy() *ScalingScheduleStatus {
if in == nil {
return nil
}
out := new(ScalingScheduleStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Schedule) DeepCopyInto(out *Schedule) {
*out = *in

View File

@ -40,6 +40,7 @@ type ClusterScalingSchedulesGetter interface {
type ClusterScalingScheduleInterface interface {
Create(ctx context.Context, clusterScalingSchedule *v1.ClusterScalingSchedule, opts metav1.CreateOptions) (*v1.ClusterScalingSchedule, error)
Update(ctx context.Context, clusterScalingSchedule *v1.ClusterScalingSchedule, opts metav1.UpdateOptions) (*v1.ClusterScalingSchedule, error)
UpdateStatus(ctx context.Context, clusterScalingSchedule *v1.ClusterScalingSchedule, opts metav1.UpdateOptions) (*v1.ClusterScalingSchedule, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterScalingSchedule, error)
@ -128,6 +129,21 @@ func (c *clusterScalingSchedules) Update(ctx context.Context, clusterScalingSche
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *clusterScalingSchedules) UpdateStatus(ctx context.Context, clusterScalingSchedule *v1.ClusterScalingSchedule, opts metav1.UpdateOptions) (result *v1.ClusterScalingSchedule, err error) {
result = &v1.ClusterScalingSchedule{}
err = c.client.Put().
Resource("clusterscalingschedules").
Name(clusterScalingSchedule.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterScalingSchedule).
Do(ctx).
Into(result)
return
}
// Delete takes name of the clusterScalingSchedule and deletes it. Returns an error if one occurs.
func (c *clusterScalingSchedules) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().

View File

@ -96,6 +96,17 @@ func (c *FakeClusterScalingSchedules) Update(ctx context.Context, clusterScaling
return obj.(*zalandoorgv1.ClusterScalingSchedule), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeClusterScalingSchedules) UpdateStatus(ctx context.Context, clusterScalingSchedule *zalandoorgv1.ClusterScalingSchedule, opts v1.UpdateOptions) (*zalandoorgv1.ClusterScalingSchedule, error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateSubresourceAction(clusterscalingschedulesResource, "status", clusterScalingSchedule), &zalandoorgv1.ClusterScalingSchedule{})
if obj == nil {
return nil, err
}
return obj.(*zalandoorgv1.ClusterScalingSchedule), err
}
// Delete takes name of the clusterScalingSchedule and deletes it. Returns an error if one occurs.
func (c *FakeClusterScalingSchedules) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.

View File

@ -102,6 +102,18 @@ func (c *FakeScalingSchedules) Update(ctx context.Context, scalingSchedule *zala
return obj.(*zalandoorgv1.ScalingSchedule), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeScalingSchedules) UpdateStatus(ctx context.Context, scalingSchedule *zalandoorgv1.ScalingSchedule, opts v1.UpdateOptions) (*zalandoorgv1.ScalingSchedule, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(scalingschedulesResource, "status", c.ns, scalingSchedule), &zalandoorgv1.ScalingSchedule{})
if obj == nil {
return nil, err
}
return obj.(*zalandoorgv1.ScalingSchedule), err
}
// Delete takes name of the scalingSchedule and deletes it. Returns an error if one occurs.
func (c *FakeScalingSchedules) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.

View File

@ -40,6 +40,7 @@ type ScalingSchedulesGetter interface {
type ScalingScheduleInterface interface {
Create(ctx context.Context, scalingSchedule *v1.ScalingSchedule, opts metav1.CreateOptions) (*v1.ScalingSchedule, error)
Update(ctx context.Context, scalingSchedule *v1.ScalingSchedule, opts metav1.UpdateOptions) (*v1.ScalingSchedule, error)
UpdateStatus(ctx context.Context, scalingSchedule *v1.ScalingSchedule, opts metav1.UpdateOptions) (*v1.ScalingSchedule, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ScalingSchedule, error)
@ -135,6 +136,22 @@ func (c *scalingSchedules) Update(ctx context.Context, scalingSchedule *v1.Scali
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *scalingSchedules) UpdateStatus(ctx context.Context, scalingSchedule *v1.ScalingSchedule, opts metav1.UpdateOptions) (result *v1.ScalingSchedule, err error) {
result = &v1.ScalingSchedule{}
err = c.client.Put().
Namespace(c.ns).
Resource("scalingschedules").
Name(scalingSchedule.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(scalingSchedule).
Do(ctx).
Into(result)
return
}
// Delete takes name of the scalingSchedule and deletes it. Returns an error if one occurs.
func (c *scalingSchedules) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().

View File

@ -7,32 +7,13 @@ import (
"time"
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
scheduledscaling "github.com/zalando-incubator/kube-metrics-adapter/pkg/controller/scheduledscaling"
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/metrics/pkg/apis/custom_metrics"
)
const (
// The format used by v1.SchedulePeriod.StartTime. 15:04 are
// the defined reference time in time.Format.
hourColonMinuteLayout = "15:04"
// The default timezone used in v1.SchedulePeriod if none is
// defined.
// TODO(jonathanbeber): it should be configurable.
defaultTimeZone = "Europe/Berlin"
)
var days = map[v1.ScheduleDay]time.Weekday{
v1.SundaySchedule: time.Sunday,
v1.MondaySchedule: time.Monday,
v1.TuesdaySchedule: time.Tuesday,
v1.WednesdaySchedule: time.Wednesday,
v1.ThursdaySchedule: time.Thursday,
v1.FridaySchedule: time.Friday,
v1.SaturdaySchedule: time.Saturday,
}
var (
// ErrScalingScheduleNotFound is returned when a item referenced in
// the HPA config is not in the ScalingScheduleCollectorPlugin.store.
@ -49,15 +30,6 @@ var (
// be an ClusterScalingSchedule but the type assertion failed. When
// returned the type assertion to ScalingSchedule failed too.
ErrNotClusterScalingScheduleFound = errors.New("error converting returned object to ClusterScalingSchedule")
// ErrInvalidScheduleDate is returned when the v1.ScheduleDate is
// not a valid RFC3339 date. It shouldn't happen since the
// validation is done by the CRD.
ErrInvalidScheduleDate = errors.New("could not parse the specified schedule date, format is not RFC3339")
// ErrInvalidScheduleStartTime is returned when the
// v1.SchedulePeriod.StartTime is not in the format specified by
// hourColonMinuteLayout. It shouldn't happen since the validation
// is done by the CRD.
ErrInvalidScheduleStartTime = errors.New("could not parse the specified schedule period start time, format is not HH:MM")
)
// Now is the function that returns a time.Time object representing the
@ -82,6 +54,7 @@ type ScalingScheduleCollectorPlugin struct {
store Store
now Now
defaultScalingWindow time.Duration
defaultTimeZone string
rampSteps int
}
@ -91,25 +64,28 @@ type ClusterScalingScheduleCollectorPlugin struct {
store Store
now Now
defaultScalingWindow time.Duration
defaultTimeZone string
rampSteps int
}
// NewScalingScheduleCollectorPlugin initializes a new ScalingScheduleCollectorPlugin.
func NewScalingScheduleCollectorPlugin(store Store, now Now, defaultScalingWindow time.Duration, rampSteps int) (*ScalingScheduleCollectorPlugin, error) {
func NewScalingScheduleCollectorPlugin(store Store, now Now, defaultScalingWindow time.Duration, defaultTimeZone string, rampSteps int) (*ScalingScheduleCollectorPlugin, error) {
return &ScalingScheduleCollectorPlugin{
store: store,
now: now,
defaultScalingWindow: defaultScalingWindow,
defaultTimeZone: defaultTimeZone,
rampSteps: rampSteps,
}, nil
}
// NewClusterScalingScheduleCollectorPlugin initializes a new ClusterScalingScheduleCollectorPlugin.
func NewClusterScalingScheduleCollectorPlugin(store Store, now Now, defaultScalingWindow time.Duration, rampSteps int) (*ClusterScalingScheduleCollectorPlugin, error) {
func NewClusterScalingScheduleCollectorPlugin(store Store, now Now, defaultScalingWindow time.Duration, defaultTimeZone string, rampSteps int) (*ClusterScalingScheduleCollectorPlugin, error) {
return &ClusterScalingScheduleCollectorPlugin{
store: store,
now: now,
defaultScalingWindow: defaultScalingWindow,
defaultTimeZone: defaultTimeZone,
rampSteps: rampSteps,
}, nil
}
@ -118,14 +94,14 @@ func NewClusterScalingScheduleCollectorPlugin(store Store, now Now, defaultScali
// specified HPA. It's the only required method to implement the
// collector.CollectorPlugin interface.
func (c *ScalingScheduleCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
return NewScalingScheduleCollector(c.store, c.defaultScalingWindow, c.rampSteps, c.now, hpa, config, interval)
return NewScalingScheduleCollector(c.store, c.defaultScalingWindow, c.defaultTimeZone, c.rampSteps, c.now, hpa, config, interval)
}
// NewCollector initializes a new cluster wide scaling schedule
// collector from the specified HPA. It's the only required method to
// implement the collector.CollectorPlugin interface.
func (c *ClusterScalingScheduleCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {
return NewClusterScalingScheduleCollector(c.store, c.defaultScalingWindow, c.rampSteps, c.now, hpa, config, interval)
return NewClusterScalingScheduleCollector(c.store, c.defaultScalingWindow, c.defaultTimeZone, c.rampSteps, c.now, hpa, config, interval)
}
// ScalingScheduleCollector is a metrics collector for time based
@ -152,11 +128,12 @@ type scalingScheduleCollector struct {
interval time.Duration
config MetricConfig
defaultScalingWindow time.Duration
defaultTimeZone string
rampSteps int
}
// NewScalingScheduleCollector initializes a new ScalingScheduleCollector.
func NewScalingScheduleCollector(store Store, defaultScalingWindow time.Duration, rampSteps int, now Now, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*ScalingScheduleCollector, error) {
func NewScalingScheduleCollector(store Store, defaultScalingWindow time.Duration, defaultTimeZone string, rampSteps int, now Now, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*ScalingScheduleCollector, error) {
return &ScalingScheduleCollector{
scalingScheduleCollector{
store: store,
@ -167,13 +144,14 @@ func NewScalingScheduleCollector(store Store, defaultScalingWindow time.Duration
interval: interval,
config: *config,
defaultScalingWindow: defaultScalingWindow,
defaultTimeZone: defaultTimeZone,
rampSteps: rampSteps,
},
}, nil
}
// NewClusterScalingScheduleCollector initializes a new ScalingScheduleCollector.
func NewClusterScalingScheduleCollector(store Store, defaultScalingWindow time.Duration, rampSteps int, now Now, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*ClusterScalingScheduleCollector, error) {
func NewClusterScalingScheduleCollector(store Store, defaultScalingWindow time.Duration, defaultTimeZone string, rampSteps int, now Now, hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (*ClusterScalingScheduleCollector, error) {
return &ClusterScalingScheduleCollector{
scalingScheduleCollector{
store: store,
@ -184,6 +162,7 @@ func NewClusterScalingScheduleCollector(store Store, defaultScalingWindow time.D
interval: interval,
config: *config,
defaultScalingWindow: defaultScalingWindow,
defaultTimeZone: defaultTimeZone,
rampSteps: rampSteps,
},
}, nil
@ -203,7 +182,7 @@ func (c *ScalingScheduleCollector) GetMetrics() ([]CollectedMetric, error) {
if !ok {
return nil, ErrNotScalingScheduleFound
}
return calculateMetrics(scalingSchedule.Spec, c.defaultScalingWindow, c.rampSteps, c.now(), c.objectReference, c.metric)
return calculateMetrics(scalingSchedule.Spec, c.defaultScalingWindow, c.defaultTimeZone, c.rampSteps, c.now(), c.objectReference, c.metric)
}
// GetMetrics is the main implementation for collector.Collector interface
@ -236,7 +215,7 @@ func (c *ClusterScalingScheduleCollector) GetMetrics() ([]CollectedMetric, error
clusterScalingSchedule = v1.ClusterScalingSchedule(*scalingSchedule)
}
return calculateMetrics(clusterScalingSchedule.Spec, c.defaultScalingWindow, c.rampSteps, c.now(), c.objectReference, c.metric)
return calculateMetrics(clusterScalingSchedule.Spec, c.defaultScalingWindow, c.defaultTimeZone, c.rampSteps, c.now(), c.objectReference, c.metric)
}
// Interval returns the interval at which the collector should run.
@ -249,7 +228,7 @@ func (c *ClusterScalingScheduleCollector) Interval() time.Duration {
return c.interval
}
func calculateMetrics(spec v1.ScalingScheduleSpec, defaultScalingWindow time.Duration, rampSteps int, now time.Time, objectReference custom_metrics.ObjectReference, metric autoscalingv2.MetricIdentifier) ([]CollectedMetric, error) {
func calculateMetrics(spec v1.ScalingScheduleSpec, defaultScalingWindow time.Duration, defaultTimeZone string, rampSteps int, now time.Time, objectReference custom_metrics.ObjectReference, metric autoscalingv2.MetricIdentifier) ([]CollectedMetric, error) {
scalingWindowDuration := defaultScalingWindow
if spec.ScalingWindowDurationMinutes != nil {
scalingWindowDuration = time.Duration(*spec.ScalingWindowDurationMinutes) * time.Minute
@ -259,89 +238,12 @@ func calculateMetrics(spec v1.ScalingScheduleSpec, defaultScalingWindow time.Dur
}
value := int64(0)
var scheduledEndTime time.Time
for _, schedule := range spec.Schedules {
switch schedule.Type {
case v1.RepeatingSchedule:
location, err := time.LoadLocation(schedule.Period.Timezone)
if schedule.Period.Timezone == "" || err != nil {
location, err = time.LoadLocation(defaultTimeZone)
startTime, endTime, err := scheduledscaling.ScheduleStartEnd(now, schedule, defaultTimeZone)
if err != nil {
return nil, fmt.Errorf("unexpected error loading default location: %s", err.Error())
}
}
nowInLocation := now.In(location)
weekday := nowInLocation.Weekday()
for _, day := range schedule.Period.Days {
if days[day] == weekday {
parsedStartTime, err := time.Parse(hourColonMinuteLayout, schedule.Period.StartTime)
if err != nil {
return nil, ErrInvalidScheduleStartTime
}
scheduledTime := time.Date(
// v1.SchedulePeriod.StartTime can't define the
// year, month or day, so we compute it as the
// current date in the configured location.
nowInLocation.Year(),
nowInLocation.Month(),
nowInLocation.Day(),
// Hours and minute are configured in the
// v1.SchedulePeriod.StartTime.
parsedStartTime.Hour(),
parsedStartTime.Minute(),
parsedStartTime.Second(),
parsedStartTime.Nanosecond(),
location,
)
// If no end time was provided, set it to equal the start time
if schedule.Period.EndTime == "" {
scheduledEndTime = scheduledTime
} else {
parsedEndTime, err := time.Parse(hourColonMinuteLayout, schedule.Period.EndTime)
if err != nil {
return nil, ErrInvalidScheduleDate
}
scheduledEndTime = time.Date(
// v1.SchedulePeriod.StartTime can't define the
// year, month or day, so we compute it as the
// current date in the configured location.
nowInLocation.Year(),
nowInLocation.Month(),
nowInLocation.Day(),
// Hours and minute are configured in the
// v1.SchedulePeriod.StartTime.
parsedEndTime.Hour(),
parsedEndTime.Minute(),
parsedEndTime.Second(),
parsedEndTime.Nanosecond(),
location,
)
}
value = maxInt64(value, valueForEntry(now, scheduledTime, schedule.Duration(), scheduledEndTime, scalingWindowDuration, rampSteps, schedule.Value))
break
}
}
case v1.OneTimeSchedule:
scheduledTime, err := time.Parse(time.RFC3339, string(*schedule.Date))
if err != nil {
return nil, ErrInvalidScheduleDate
}
// If no end time was provided, set it to equal the start time
if schedule.EndDate == nil || string(*schedule.EndDate) == "" {
scheduledEndTime = scheduledTime
} else {
scheduledEndTime, err = time.Parse(time.RFC3339, string(*schedule.EndDate))
if err != nil {
return nil, ErrInvalidScheduleDate
}
}
value = maxInt64(value, valueForEntry(now, scheduledTime, schedule.Duration(), scheduledEndTime, scalingWindowDuration, rampSteps, schedule.Value))
return nil, err
}
value = maxInt64(value, valueForEntry(now, startTime, endTime, scalingWindowDuration, rampSteps, schedule.Value))
}
return []CollectedMetric{
@ -358,27 +260,17 @@ func calculateMetrics(spec v1.ScalingScheduleSpec, defaultScalingWindow time.Dur
}, nil
}
func valueForEntry(timestamp time.Time, startTime time.Time, entryDuration time.Duration, scheduledEndTime time.Time, scalingWindowDuration time.Duration, rampSteps int, value int64) int64 {
func valueForEntry(timestamp time.Time, startTime time.Time, endTime time.Time, scalingWindowDuration time.Duration, rampSteps int, value int64) int64 {
scaleUpStart := startTime.Add(-scalingWindowDuration)
var endTime time.Time
// Use either the defined end time/date or the start time/date + the
// duration, whichever is longer.
if startTime.Add(entryDuration).Before(scheduledEndTime) {
endTime = scheduledEndTime
} else {
endTime = startTime.Add(entryDuration)
}
scaleUpEnd := endTime.Add(scalingWindowDuration)
if between(timestamp, startTime, endTime) {
if scheduledscaling.Between(timestamp, startTime, endTime) {
return value
}
if between(timestamp, scaleUpStart, startTime) {
if scheduledscaling.Between(timestamp, scaleUpStart, startTime) {
return scaledValue(timestamp, scaleUpStart, scalingWindowDuration, rampSteps, value)
}
if between(timestamp, endTime, scaleUpEnd) {
if scheduledscaling.Between(timestamp, endTime, scaleUpEnd) {
return scaledValue(scaleUpEnd, timestamp, scalingWindowDuration, rampSteps, value)
}
return 0
@ -400,13 +292,6 @@ func scaledValue(timestamp time.Time, startTime time.Time, scalingWindowDuration
return int64(math.Floor(requiredPercentage*steps) * (float64(value) / steps))
}
func between(timestamp, start, end time.Time) bool {
if timestamp.Before(start) {
return false
}
return timestamp.Before(end)
}
func maxInt64(i1, i2 int64) int64 {
if i1 > i2 {
return i1

View File

@ -8,6 +8,7 @@ import (
"github.com/stretchr/testify/require"
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
scheduledscaling "github.com/zalando-incubator/kube-metrics-adapter/pkg/controller/scheduledscaling"
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -16,6 +17,7 @@ const (
hHMMFormat = "15:04"
defaultScalingWindowDuration = 1 * time.Minute
defaultRampSteps = 10
defaultTimeZone = "Europe/Berlin"
)
type schedule struct {
@ -247,7 +249,7 @@ func TestScalingScheduleCollector(t *testing.T) {
value: 100,
},
},
err: ErrInvalidScheduleDate,
err: scheduledscaling.ErrInvalidScheduleDate,
},
{
msg: "Return error for one time config end date not in RFC3339 format",
@ -260,7 +262,7 @@ func TestScalingScheduleCollector(t *testing.T) {
value: 100,
},
},
err: ErrInvalidScheduleDate,
err: scheduledscaling.ErrInvalidScheduleDate,
},
{
msg: "Return the right value for two one time config",
@ -413,7 +415,7 @@ func TestScalingScheduleCollector(t *testing.T) {
days: []v1.ScheduleDay{nowWeekday},
},
},
err: ErrInvalidScheduleStartTime,
err: scheduledscaling.ErrInvalidScheduleStartTime,
},
{
msg: "Return the right value for a repeating schedule in the right timezone even in the day after it",
@ -598,15 +600,15 @@ func TestScalingScheduleCollector(t *testing.T) {
schedules := getSchedules(tc.schedules)
store := newMockStore(scalingScheduleName, namespace, tc.scalingWindowDurationMinutes, schedules)
plugin, err := NewScalingScheduleCollectorPlugin(store, now, defaultScalingWindowDuration, rampSteps)
plugin, err := NewScalingScheduleCollectorPlugin(store, now, defaultScalingWindowDuration, defaultTimeZone, rampSteps)
require.NoError(t, err)
clusterStore := newClusterMockStore(scalingScheduleName, tc.scalingWindowDurationMinutes, schedules)
clusterPlugin, err := NewClusterScalingScheduleCollectorPlugin(clusterStore, now, defaultScalingWindowDuration, rampSteps)
clusterPlugin, err := NewClusterScalingScheduleCollectorPlugin(clusterStore, now, defaultScalingWindowDuration, defaultTimeZone, rampSteps)
require.NoError(t, err)
clusterStoreFirstRun := newClusterMockStoreFirstRun(scalingScheduleName, tc.scalingWindowDurationMinutes, schedules)
clusterPluginFirstRun, err := NewClusterScalingScheduleCollectorPlugin(clusterStoreFirstRun, now, defaultScalingWindowDuration, rampSteps)
clusterPluginFirstRun, err := NewClusterScalingScheduleCollectorPlugin(clusterStoreFirstRun, now, defaultScalingWindowDuration, defaultTimeZone, rampSteps)
require.NoError(t, err)
hpa := makeScalingScheduleHPA(namespace, scalingScheduleName)
@ -674,14 +676,14 @@ func TestScalingScheduleObjectNotPresentReturnsError(t *testing.T) {
make(map[string]interface{}),
getByKeyFn,
}
plugin, err := NewScalingScheduleCollectorPlugin(store, time.Now, defaultScalingWindowDuration, defaultRampSteps)
plugin, err := NewScalingScheduleCollectorPlugin(store, time.Now, defaultScalingWindowDuration, defaultTimeZone, defaultRampSteps)
require.NoError(t, err)
clusterStore := mockStore{
make(map[string]interface{}),
getByKeyFn,
}
clusterPlugin, err := NewClusterScalingScheduleCollectorPlugin(clusterStore, time.Now, defaultScalingWindowDuration, defaultRampSteps)
clusterPlugin, err := NewClusterScalingScheduleCollectorPlugin(clusterStore, time.Now, defaultScalingWindowDuration, defaultTimeZone, defaultRampSteps)
require.NoError(t, err)
hpa := makeScalingScheduleHPA("namespace", "scalingScheduleName")
@ -736,10 +738,10 @@ func TestReturnsErrorWhenStoreDoes(t *testing.T) {
},
}
plugin, err := NewScalingScheduleCollectorPlugin(store, time.Now, defaultScalingWindowDuration, defaultRampSteps)
plugin, err := NewScalingScheduleCollectorPlugin(store, time.Now, defaultScalingWindowDuration, defaultTimeZone, defaultRampSteps)
require.NoError(t, err)
clusterPlugin, err := NewClusterScalingScheduleCollectorPlugin(store, time.Now, defaultScalingWindowDuration, defaultRampSteps)
clusterPlugin, err := NewClusterScalingScheduleCollectorPlugin(store, time.Now, defaultScalingWindowDuration, defaultTimeZone, defaultRampSteps)
require.NoError(t, err)
hpa := makeScalingScheduleHPA("namespace", "scalingScheduleName")

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,299 @@
package scheduledscaling
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/require"
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
scalingschedulefake "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/clientset/versioned/fake"
zalandov1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/client/clientset/versioned/typed/zalando.org/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
hHMMFormat = "15:04"
)
type fakeClusterScalingScheduleStore struct {
client zalandov1.ZalandoV1Interface
}
func (s fakeClusterScalingScheduleStore) List() []interface{} {
schedules, err := s.client.ClusterScalingSchedules().List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil
}
objects := make([]interface{}, 0, len(schedules.Items))
for _, schedule := range schedules.Items {
schedule := schedule
objects = append(objects, &schedule)
}
return objects
}
type fakeScalingScheduleStore struct {
client zalandov1.ZalandoV1Interface
}
func (s fakeScalingScheduleStore) List() []interface{} {
schedules, err := s.client.ScalingSchedules(corev1.NamespaceAll).List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil
}
objects := make([]interface{}, 0, len(schedules.Items))
for _, schedule := range schedules.Items {
schedule := schedule
objects = append(objects, &schedule)
}
return objects
}
type schedule struct {
schedules []v1.Schedule
expectedActive bool
preActiveStatus bool
}
func scheduleDate(date string) *v1.ScheduleDate {
d := v1.ScheduleDate(date)
return &d
}
func TestRunOnce(t *testing.T) {
nowRFC3339 := "2009-11-10T23:00:00+01:00" // +01:00 is Berlin timezone (default)
nowTime, _ := time.Parse(time.RFC3339, nowRFC3339)
nowWeekday := v1.TuesdaySchedule
// now func always return in UTC for test purposes
utc, _ := time.LoadLocation("UTC")
uTCNow := nowTime.In(utc)
// uTCNowRFC3339 := uTCNow.Format(time.RFC3339)
now := func() time.Time {
return uTCNow
}
// date := v1.ScheduleDate(schedule.date)
// endDate := v1.ScheduleDate(schedule.endDate)
for _, tc := range []struct {
msg string
schedules map[string]schedule
preActiveStatus bool
// scalingWindowDurationMinutes *int64
// expectedValue int64
// err error
// rampSteps int
}{
{
msg: "OneTime Schedules",
schedules: map[string]schedule{
"active": {
schedules: []v1.Schedule{
{
Type: v1.OneTimeSchedule,
Date: scheduleDate(nowRFC3339),
DurationMinutes: 15,
},
},
expectedActive: true,
},
"inactive": {
schedules: []v1.Schedule{
{
Type: v1.OneTimeSchedule,
Date: scheduleDate(nowTime.Add(1 * time.Hour).Format(time.RFC3339)),
DurationMinutes: 15,
},
},
expectedActive: false,
},
},
},
{
msg: "OneTime Schedules change active",
schedules: map[string]schedule{
"active": {
schedules: []v1.Schedule{
{
Type: v1.OneTimeSchedule,
Date: scheduleDate(nowRFC3339),
DurationMinutes: 15,
},
},
preActiveStatus: false,
expectedActive: true,
},
"inactive": {
schedules: []v1.Schedule{
{
Type: v1.OneTimeSchedule,
Date: scheduleDate(nowTime.Add(1 * time.Hour).Format(time.RFC3339)),
DurationMinutes: 15,
},
},
preActiveStatus: true,
expectedActive: false,
},
},
},
{
msg: "Repeating Schedules",
schedules: map[string]schedule{
"active": {
schedules: []v1.Schedule{
{
Type: v1.RepeatingSchedule,
DurationMinutes: 15,
Period: &v1.SchedulePeriod{
Days: []v1.ScheduleDay{nowWeekday},
StartTime: nowTime.Format(hHMMFormat),
},
},
},
expectedActive: true,
},
"inactive": {
schedules: []v1.Schedule{
{
Type: v1.RepeatingSchedule,
DurationMinutes: 15,
Period: &v1.SchedulePeriod{
Days: []v1.ScheduleDay{nowWeekday},
StartTime: nowTime.Add(1 * time.Hour).Format(hHMMFormat),
},
},
},
expectedActive: false,
},
},
},
{
msg: "Repeating Schedules change active",
schedules: map[string]schedule{
"active": {
schedules: []v1.Schedule{
{
Type: v1.RepeatingSchedule,
DurationMinutes: 15,
Period: &v1.SchedulePeriod{
Days: []v1.ScheduleDay{nowWeekday},
StartTime: nowTime.Format(hHMMFormat),
},
},
},
preActiveStatus: false,
expectedActive: true,
},
"inactive": {
schedules: []v1.Schedule{
{
Type: v1.RepeatingSchedule,
DurationMinutes: 15,
Period: &v1.SchedulePeriod{
Days: []v1.ScheduleDay{nowWeekday},
StartTime: nowTime.Add(1 * time.Hour).Format(hHMMFormat),
},
},
},
preActiveStatus: true,
expectedActive: false,
},
},
},
} {
t.Run(tc.msg, func(t *testing.T) {
// setup fake client and cache
client := scalingschedulefake.NewSimpleClientset()
clusterScalingSchedulesStore := fakeClusterScalingScheduleStore{
client: client.ZalandoV1(),
}
scalingSchedulesStore := fakeScalingScheduleStore{
client: client.ZalandoV1(),
}
// add schedules
err := applySchedules(client.ZalandoV1(), tc.schedules)
require.NoError(t, err)
controller := NewController(client.ZalandoV1(), scalingSchedulesStore, clusterScalingSchedulesStore, now, 0, "Europe/Berlin")
err = controller.runOnce(context.Background())
require.NoError(t, err)
// check schedule status
err = checkSchedules(t, client.ZalandoV1(), tc.schedules)
require.NoError(t, err)
})
}
}
func applySchedules(client zalandov1.ZalandoV1Interface, schedules map[string]schedule) error {
for name, schedule := range schedules {
spec := v1.ScalingScheduleSpec{
// ScalingWindowDurationMinutes *int64 `json:"scalingWindowDurationMinutes,omitempty"`
Schedules: schedule.schedules,
}
status := v1.ScalingScheduleStatus{
Active: schedule.preActiveStatus,
}
scalingSchedule := &v1.ScalingSchedule{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "default",
},
Spec: spec,
Status: status,
}
_, err := client.ScalingSchedules(scalingSchedule.Namespace).Create(context.Background(), scalingSchedule, metav1.CreateOptions{})
if err != nil {
return err
}
clusterScalingSchedule := &v1.ClusterScalingSchedule{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: spec,
Status: status,
}
_, err = client.ClusterScalingSchedules().Create(context.Background(), clusterScalingSchedule, metav1.CreateOptions{})
if err != nil {
return err
}
}
return nil
}
func checkSchedules(t *testing.T, client zalandov1.ZalandoV1Interface, schedules map[string]schedule) error {
for name, expectedSchedule := range schedules {
scalingSchedule, err := client.ScalingSchedules("default").Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return err
}
require.Equal(t, expectedSchedule.expectedActive, scalingSchedule.Status.Active)
clusterScalingSchedule, err := client.ClusterScalingSchedules().Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return err
}
require.Equal(t, expectedSchedule.expectedActive, clusterScalingSchedule.Status.Active)
}
return nil
}

View File

@ -34,6 +34,7 @@ import (
v1 "github.com/zalando-incubator/kube-metrics-adapter/pkg/apis/zalando.org/v1"
"github.com/zalando-incubator/kube-metrics-adapter/pkg/client/clientset/versioned"
"github.com/zalando-incubator/kube-metrics-adapter/pkg/collector"
"github.com/zalando-incubator/kube-metrics-adapter/pkg/controller/scheduledscaling"
"github.com/zalando-incubator/kube-metrics-adapter/pkg/provider"
"github.com/zalando-incubator/kube-metrics-adapter/pkg/zmon"
"golang.org/x/oauth2"
@ -130,6 +131,7 @@ func NewCommandStartAdapterServer(stopCh <-chan struct{}) *cobra.Command {
"whether to enable time-based ScalingSchedule metrics")
flags.DurationVar(&o.DefaultScheduledScalingWindow, "scaling-schedule-default-scaling-window", 10*time.Minute, "Default rampup and rampdown window duration for ScalingSchedules")
flags.IntVar(&o.RampSteps, "scaling-schedule-ramp-steps", 10, "Number of steps used to rampup and rampdown ScalingSchedules. It's used to guarantee won't avoid reaching the max scaling due to the 10% minimum change rule.")
flags.StringVar(&o.DefaultTimeZone, "scaling-schedule-default-time-zone", "Europe/Berlin", "Default time zone to use for ScalingSchedules.")
return cmd
}
@ -295,7 +297,7 @@ func (o AdapterServerOptions) RunCustomMetricsAdapterServer(stopCh <-chan struct
)
go reflector.Run(ctx.Done())
clusterPlugin, err := collector.NewClusterScalingScheduleCollectorPlugin(clusterScalingSchedulesStore, time.Now, o.DefaultScheduledScalingWindow, o.RampSteps)
clusterPlugin, err := collector.NewClusterScalingScheduleCollectorPlugin(clusterScalingSchedulesStore, time.Now, o.DefaultScheduledScalingWindow, o.DefaultTimeZone, o.RampSteps)
if err != nil {
return fmt.Errorf("unable to create ClusterScalingScheduleCollector plugin: %v", err)
}
@ -304,7 +306,7 @@ func (o AdapterServerOptions) RunCustomMetricsAdapterServer(stopCh <-chan struct
return fmt.Errorf("failed to register ClusterScalingSchedule object collector plugin: %v", err)
}
plugin, err := collector.NewScalingScheduleCollectorPlugin(scalingSchedulesStore, time.Now, o.DefaultScheduledScalingWindow, o.RampSteps)
plugin, err := collector.NewScalingScheduleCollectorPlugin(scalingSchedulesStore, time.Now, o.DefaultScheduledScalingWindow, o.DefaultTimeZone, o.RampSteps)
if err != nil {
return fmt.Errorf("unable to create ScalingScheduleCollector plugin: %v", err)
}
@ -312,6 +314,13 @@ func (o AdapterServerOptions) RunCustomMetricsAdapterServer(stopCh <-chan struct
if err != nil {
return fmt.Errorf("failed to register ScalingSchedule object collector plugin: %v", err)
}
// setup ScheduledScaling controller to continously update
// status of ScalingSchedule and ClusterScalingSchedule
// resources.
scheduledScalingController := scheduledscaling.NewController(scalingScheduleClient.ZalandoV1(), scalingSchedulesStore, clusterScalingSchedulesStore, time.Now, o.DefaultScheduledScalingWindow, o.DefaultTimeZone)
go scheduledScalingController.Run(ctx)
}
hpaProvider := provider.NewHPAProvider(client, 30*time.Second, 1*time.Minute, collectorFactory, o.DisregardIncompatibleHPAs, o.MetricsTTL, o.GCInterval)
@ -434,4 +443,6 @@ type AdapterServerOptions struct {
DefaultScheduledScalingWindow time.Duration
// Number of steps utilized during the rampup and rampdown for scheduled metrics
RampSteps int
// Default time zone to use for ScalingSchedules.
DefaultTimeZone string
}