Use 10 buckets on ScalingSchedule ramp-up/down

The HPA has a feature to do not scale up and down when the change in the
metric is less than 10%:

> We'll skip scaling if the ratio is sufficiently close to 1.0 (within a
> globally-configurable tolerance, from the
> `--horizontal-pod-autoscaler-tolerance` flag, which defaults to 0.1.

It could lead to pods scaling up to 10% less than the target for
ScalingSchedules and then not scaling to the actual value if the metric
calculated before was less than 10% of the target.

This commit uses 10 fixed buckets for scaling, this way we know the
metric returned during a scaling event is at least 10% more than a
previous one calculated during the period of ramp up. The same is valid
for the scaling down during a ramp-down

Signed-off-by: Jonathan Juares Beber <jonathanbeber@gmail.com>
This commit is contained in:
Jonathan Juares Beber 2021-09-30 18:44:50 +02:00
parent 0730c6ef1e
commit 8fe330941a
No known key found for this signature in database
GPG Key ID: 41D3F4ACE4465751
2 changed files with 16 additions and 9 deletions

View File

@ -334,7 +334,14 @@ func scaledValue(timestamp time.Time, startTime time.Time, scalingWindowDuration
if scalingWindowDuration == 0 {
return 0
}
return int64(math.Ceil(math.Abs(float64(timestamp.Sub(startTime))) / float64(scalingWindowDuration) * float64(value)))
// The HPA has a rule to do not scale up or down if the change in
// the metric is less than 10% of the current value. We will use 10
// buckets of time using the floor of each. This value might be
// flexible one day, but for now it's fixed.
const steps float64 = 10
requiredPercentage := math.Abs(float64(timestamp.Sub(startTime))) / float64(scalingWindowDuration)
return int64(math.Floor(requiredPercentage*steps) * (float64(value) / steps))
}
func between(timestamp, start, end time.Time) bool {

View File

@ -86,7 +86,7 @@ func TestScalingScheduleCollector(t *testing.T) {
expectedValue: 100,
},
{
msg: "Return the scaled value (67) for one time config - 20 seconds before starting",
msg: "Return the scaled value (60) for one time config - 20 seconds before starting",
schedules: []schedule{
{
date: nowTime.Add(time.Second * 20).Format(time.RFC3339),
@ -95,10 +95,10 @@ func TestScalingScheduleCollector(t *testing.T) {
value: 100,
},
},
expectedValue: 67,
expectedValue: 60,
},
{
msg: "Return the scaled value (67) for one time config - 20 seconds after",
msg: "Return the scaled value (60) for one time config - 20 seconds after",
schedules: []schedule{
{
date: nowTime.Add(-time.Minute * 45).Add(-time.Second * 20).Format(time.RFC3339),
@ -107,10 +107,10 @@ func TestScalingScheduleCollector(t *testing.T) {
value: 100,
},
},
expectedValue: 67,
expectedValue: 60,
},
{
msg: "Return the scaled value (95) for one time config with a custom scaling window - 30 seconds before starting",
msg: "Return the scaled value (90) for one time config with a custom scaling window - 30 seconds before starting",
scalingWindowDurationMinutes: &tenMinutes,
schedules: []schedule{
{
@ -120,10 +120,10 @@ func TestScalingScheduleCollector(t *testing.T) {
value: 100,
},
},
expectedValue: 95,
expectedValue: 90,
},
{
msg: "Return the scaled value (95) for one time config with a custom scaling window - 30 seconds after",
msg: "Return the scaled value (90) for one time config with a custom scaling window - 30 seconds after",
scalingWindowDurationMinutes: &tenMinutes,
schedules: []schedule{
{
@ -133,7 +133,7 @@ func TestScalingScheduleCollector(t *testing.T) {
value: 100,
},
},
expectedValue: 95,
expectedValue: 90,
},
{
msg: "Return the default value (0) for one time config not started yet (20 minutes before)",