outlet/kafka: decrease if we have many scale down requests

Don't wait for a majority. Just a third of them should be enough. We
have a 10-factor between underloaded and overloaded.
This commit is contained in:
Vincent Bernat
2025-11-15 06:36:19 +01:00
parent 0a18f447b9
commit 9afe57b32c
2 changed files with 4 additions and 10 deletions

View File

@@ -164,8 +164,8 @@ func runScaler(ctx context.Context, config scalerConfiguration) chan<- ScaleRequ
} }
} }
// Scale down if we have a majority of decrease requests. // Scale down if we have many decrease requests
if decreaseCount > steadyCount { if decreaseCount > steadyCount/2 {
current := config.getWorkerCount() current := config.getWorkerCount()
target := state.nextWorkerCount(ScaleDecrease, current, config.minWorkers, config.maxWorkers) target := state.nextWorkerCount(ScaleDecrease, current, config.minWorkers, config.maxWorkers)
if target < current { if target < current {

View File

@@ -154,18 +154,12 @@ func TestScalerRateLimiter(t *testing.T) {
time.Sleep(time.Second) time.Sleep(time.Second)
ch <- ScaleDecrease ch <- ScaleDecrease
} }
// time=7m, no change (180 vs 120) // time=7m, decrease (180 vs 120)
check([]int{8, 12, 11, 12, 13, 12, 11}) check([]int{8, 12, 11, 12, 13, 12, 11, 10})
for range 30 { for range 30 {
time.Sleep(time.Second) time.Sleep(time.Second)
ch <- ScaleDecrease ch <- ScaleDecrease
} }
// time=7m30s, no change (150 vs 150)
check([]int{8, 12, 11, 12, 13, 12, 11})
time.Sleep(time.Second)
ch <- ScaleDecrease
// OK, now more decrease than increase!
check([]int{8, 12, 11, 12, 13, 12, 11, 10})
// We should not account for steady requests for too long! // We should not account for steady requests for too long!
time.Sleep(time.Minute) time.Sleep(time.Minute)