Skip to content

Commit 1b9c118

Browse files
Peter Zijlstragregkh
authored andcommitted
sched/fair: Proportional newidle balance
commit 33cf66d upstream. Add a randomized algorithm that runs newidle balancing proportional to its success rate. This improves schbench significantly: 6.18-rc4: 2.22 Mrps/s 6.18-rc4+revert: 2.04 Mrps/s 6.18-rc4+revert+random: 2.18 Mrps/S Conversely, per Adam Li this affects SpecJBB slightly, reducing it by 1%: 6.17: -6% 6.17+revert: 0% 6.17+revert+random: -1% Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Tested-by: Chris Mason <clm@meta.com> Link: https://lkml.kernel.org/r/6825c50d-7fa7-45d8-9b81-c6e7e25738e2@meta.com Link: https://patch.msgid.link/20251107161739.770122091@infradead.org [ Ajay: Modified to apply on v6.12 ] Signed-off-by: Ajay Kaher <ajay.kaher@broadcom.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent c6ae271 commit 1b9c118

6 files changed

Lines changed: 64 additions & 4 deletions

File tree

include/linux/sched/topology.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,9 @@ struct sched_domain {
106106
unsigned int nr_balance_failed; /* initialise to 0 */
107107

108108
/* idle_balance() stats */
109+
unsigned int newidle_call;
110+
unsigned int newidle_success;
111+
unsigned int newidle_ratio;
109112
u64 max_newidle_lb_cost;
110113
unsigned long last_decay_max_lb_cost;
111114

kernel/sched/core.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
118118
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
119119

120120
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
121+
DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
121122

122123
#ifdef CONFIG_SCHED_DEBUG
123124
/*
@@ -8335,6 +8336,8 @@ void __init sched_init_smp(void)
83358336
{
83368337
sched_init_numa(NUMA_NO_NODE);
83378338

8339+
prandom_init_once(&sched_rnd_state);
8340+
83388341
/*
83398342
* There's no userspace yet to cause hotplug operations; hence all the
83408343
* CPU masks are stable and all blatant races in the below code cannot

kernel/sched/fair.c

Lines changed: 40 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12186,11 +12186,27 @@ void update_max_interval(void)
1218612186
max_load_balance_interval = HZ*num_online_cpus()/10;
1218712187
}
1218812188

12189-
static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
12189+
static inline void update_newidle_stats(struct sched_domain *sd, unsigned int success)
12190+
{
12191+
sd->newidle_call++;
12192+
sd->newidle_success += success;
12193+
12194+
if (sd->newidle_call >= 1024) {
12195+
sd->newidle_ratio = sd->newidle_success;
12196+
sd->newidle_call /= 2;
12197+
sd->newidle_success /= 2;
12198+
}
12199+
}
12200+
12201+
static inline bool
12202+
update_newidle_cost(struct sched_domain *sd, u64 cost, unsigned int success)
1219012203
{
1219112204
unsigned long next_decay = sd->last_decay_max_lb_cost + HZ;
1219212205
unsigned long now = jiffies;
1219312206

12207+
if (cost)
12208+
update_newidle_stats(sd, success);
12209+
1219412210
if (cost > sd->max_newidle_lb_cost) {
1219512211
/*
1219612212
* Track max cost of a domain to make sure to not delay the
@@ -12238,7 +12254,7 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
1223812254
* Decay the newidle max times here because this is a regular
1223912255
* visit to all the domains.
1224012256
*/
12241-
need_decay = update_newidle_cost(sd, 0);
12257+
need_decay = update_newidle_cost(sd, 0, 0);
1224212258
max_cost += sd->max_newidle_lb_cost;
1224312259

1224412260
/*
@@ -12896,17 +12912,37 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
1289612912
break;
1289712913

1289812914
if (sd->flags & SD_BALANCE_NEWIDLE) {
12915+
unsigned int weight = 1;
12916+
12917+
if (sched_feat(NI_RANDOM)) {
12918+
/*
12919+
* Throw a 1k sided dice; and only run
12920+
* newidle_balance according to the success
12921+
* rate.
12922+
*/
12923+
u32 d1k = sched_rng() % 1024;
12924+
weight = 1 + sd->newidle_ratio;
12925+
if (d1k > weight) {
12926+
update_newidle_stats(sd, 0);
12927+
continue;
12928+
}
12929+
weight = (1024 + weight/2) / weight;
12930+
}
1289912931

1290012932
pulled_task = sched_balance_rq(this_cpu, this_rq,
1290112933
sd, CPU_NEWLY_IDLE,
1290212934
&continue_balancing);
1290312935

1290412936
t1 = sched_clock_cpu(this_cpu);
1290512937
domain_cost = t1 - t0;
12906-
update_newidle_cost(sd, domain_cost);
12907-
1290812938
curr_cost += domain_cost;
1290912939
t0 = t1;
12940+
12941+
/*
12942+
* Track max cost of a domain to make sure to not delay the
12943+
* next wakeup on the CPU.
12944+
*/
12945+
update_newidle_cost(sd, domain_cost, weight * !!pulled_task);
1291012946
}
1291112947

1291212948
/*

kernel/sched/features.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,3 +122,8 @@ SCHED_FEAT(WA_BIAS, true)
122122
SCHED_FEAT(UTIL_EST, true)
123123

124124
SCHED_FEAT(LATENCY_WARN, false)
125+
126+
/*
127+
* Do newidle balancing proportional to its success rate using randomization.
128+
*/
129+
SCHED_FEAT(NI_RANDOM, true)

kernel/sched/sched.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#ifndef _KERNEL_SCHED_SCHED_H
66
#define _KERNEL_SCHED_SCHED_H
77

8+
#include <linux/prandom.h>
89
#include <linux/sched/affinity.h>
910
#include <linux/sched/autogroup.h>
1011
#include <linux/sched/cpufreq.h>
@@ -1348,6 +1349,12 @@ static inline bool is_migration_disabled(struct task_struct *p)
13481349
}
13491350

13501351
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1352+
DECLARE_PER_CPU(struct rnd_state, sched_rnd_state);
1353+
1354+
static inline u32 sched_rng(void)
1355+
{
1356+
return prandom_u32_state(this_cpu_ptr(&sched_rnd_state));
1357+
}
13511358

13521359
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
13531360
#define this_rq() this_cpu_ptr(&runqueues)

kernel/sched/topology.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1632,6 +1632,12 @@ sd_init(struct sched_domain_topology_level *tl,
16321632

16331633
.last_balance = jiffies,
16341634
.balance_interval = sd_weight,
1635+
1636+
/* 50% success rate */
1637+
.newidle_call = 512,
1638+
.newidle_success = 256,
1639+
.newidle_ratio = 512,
1640+
16351641
.max_newidle_lb_cost = 0,
16361642
.last_decay_max_lb_cost = jiffies,
16371643
.child = child,

0 commit comments

Comments
 (0)