sched/fair: Add policy for restricting prefer_spread to newly idle balance

Add policy for restricting prefer_spread to newly idle load balance
by expanding the tunable range.

To allow lower capacity CPUs to do aggressive newly idle load balance:
echo 3 > /proc/sys/kernel/sched_prefer_spread

To allow bother lower capacity and higher capacity CPUs to do
aggressive newly idle load balance:
echo 4 > /proc/sys/kernel/sched_prefer_spread

Change-Id: Ia62ddb29bdf592a956a9688f277178ef71dee1b3
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
Co-developed-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
[pkondeti@codeaurora.org: The tunable range is expanded]
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
This commit is contained in:
Satya Durga Srinivasu Prabhala 2020-06-03 20:23:34 -07:00 committed by Pavankumar Kondeti
parent 82e942f312
commit f177186646
3 changed files with 20 additions and 13 deletions

View file

@ -4005,7 +4005,7 @@ static inline void adjust_cpus_for_packing(struct task_struct *p,
if (*best_idle_cpu == -1 || *target_cpu == -1)
return;
if (prefer_spread_on_idle(*best_idle_cpu))
if (prefer_spread_on_idle(*best_idle_cpu, false))
fbt_env->need_idle |= 2;
if (task_rtg_high_prio(p) && walt_nr_rtg_high_prio(*target_cpu) > 0) {
@ -10676,7 +10676,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,
};
env.prefer_spread = (idle != CPU_NOT_IDLE &&
prefer_spread_on_idle(this_cpu) &&
prefer_spread_on_idle(this_cpu,
idle == CPU_NEWLY_IDLE) &&
!((sd->flags & SD_ASYM_CPUCAPACITY) &&
!is_asym_cap_cpu(this_cpu)));
@ -11202,7 +11203,8 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
}
max_cost += sd->max_newidle_lb_cost;
if (!sd_overutilized(sd) && !prefer_spread_on_idle(cpu))
if (!sd_overutilized(sd) && !prefer_spread_on_idle(cpu,
idle == CPU_NEWLY_IDLE))
continue;
if (!(sd->flags & SD_LOAD_BALANCE))
@ -11456,7 +11458,7 @@ static void nohz_balancer_kick(struct rq *rq)
*/
if (static_branch_likely(&sched_energy_present)) {
if (rq->nr_running >= 2 && (cpu_overutilized(cpu) ||
prefer_spread_on_idle(cpu)))
prefer_spread_on_idle(cpu, false)))
flags = NOHZ_KICK_MASK;
goto out;
}
@ -11827,7 +11829,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
int pulled_task = 0;
u64 curr_cost = 0;
u64 avg_idle = this_rq->avg_idle;
bool prefer_spread = prefer_spread_on_idle(this_cpu);
bool prefer_spread = prefer_spread_on_idle(this_cpu, true);
bool force_lb = (!is_min_capacity_cpu(this_cpu) &&
silver_has_big_tasks() &&
(atomic_read(&this_rq->nr_iowait) == 0));

View file

@ -451,15 +451,20 @@ static int in_sched_bug;
} \
})
static inline bool prefer_spread_on_idle(int cpu)
static inline bool prefer_spread_on_idle(int cpu, bool new_ilb)
{
if (likely(!sysctl_sched_prefer_spread))
switch (sysctl_sched_prefer_spread) {
case 1:
return is_min_capacity_cpu(cpu);
case 2:
return true;
case 3:
return (new_ilb && is_min_capacity_cpu(cpu));
case 4:
return new_ilb;
default:
return false;
if (is_min_capacity_cpu(cpu))
return sysctl_sched_prefer_spread >= 1;
return sysctl_sched_prefer_spread > 1;
}
}
#else /* CONFIG_SCHED_WALT */

View file

@ -568,7 +568,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &two,
.extra2 = &four,
},
{
.procname = "walt_rtg_cfs_boost_prio",