sched: Fix indentations in find_busiest_group() using gotos

Impact: cleanup

Some indentations in find_busiest_group() can minimized by using
early exits with the help of gotos. This improves readability in
a couple of cases.

Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
Cc: "Vaidyanathan Srinivasan" <svaidy@linux.vnet.ibm.com>
LKML-Reference: <20090325091340.13992.45062.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Gautham R Shenoy 2009-03-25 14:43:40 +05:30 committed by Ingo Molnar
parent 67bb6c036d
commit 6dfdb06290

View file

@ -3403,14 +3403,14 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* capacity but still has some space to pick up some load * capacity but still has some space to pick up some load
* from other group and save more power * from other group and save more power
*/ */
if (sum_nr_running <= group_capacity - 1) { if (sum_nr_running > group_capacity - 1)
if (sum_nr_running > leader_nr_running || goto group_next;
(sum_nr_running == leader_nr_running &&
group_first_cpu(group) < if (sum_nr_running > leader_nr_running ||
group_first_cpu(group_leader))) { (sum_nr_running == leader_nr_running &&
group_leader = group; group_first_cpu(group) < group_first_cpu(group_leader))) {
leader_nr_running = sum_nr_running; group_leader = group;
} leader_nr_running = sum_nr_running;
} }
group_next: group_next:
#endif #endif
@ -3531,14 +3531,16 @@ out_balanced:
if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
goto ret; goto ret;
if (this == group_leader && group_leader != group_min) { if (this != group_leader || group_leader == group_min)
*imbalance = min_load_per_task; goto ret;
if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = *imbalance = min_load_per_task;
group_first_cpu(group_leader); if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
} cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
return group_min; group_first_cpu(group_leader);
} }
return group_min;
#endif #endif
ret: ret:
*imbalance = 0; *imbalance = 0;