sched: convert struct (sys_)sched_setaffinity() to cpumask_var_t.

Impact: stack usage reduction

Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves
space on the stack.  cpumask_var_t is just a struct cpumask for
!CONFIG_CPUMASK_OFFSTACK.

Note the removal of the initializer of new_mask: since the first thing
we did was "cpus_and(new_mask, new_mask, cpus_allowed)" I just changed
that to "cpumask_and(new_mask, in_mask, cpus_allowed);".

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Rusty Russell 2008-11-25 02:35:11 +10:30 committed by Ingo Molnar
parent e76bd8d985
commit 5a16f3d30c

View file

@ -5378,8 +5378,7 @@ out_unlock:
long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
{
cpumask_t cpus_allowed;
cpumask_t new_mask = *in_mask;
cpumask_var_t cpus_allowed, new_mask;
struct task_struct *p;
int retval;
@ -5401,6 +5400,14 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
get_task_struct(p);
read_unlock(&tasklist_lock);
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_put_task;
}
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_free_cpus_allowed;
}
retval = -EPERM;
if ((current->euid != p->euid) && (current->euid != p->uid) &&
!capable(CAP_SYS_NICE))
@ -5410,24 +5417,28 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
if (retval)
goto out_unlock;
cpuset_cpus_allowed(p, &cpus_allowed);
cpus_and(new_mask, new_mask, cpus_allowed);
cpuset_cpus_allowed(p, cpus_allowed);
cpumask_and(new_mask, in_mask, cpus_allowed);
again:
retval = set_cpus_allowed_ptr(p, &new_mask);
retval = set_cpus_allowed_ptr(p, new_mask);
if (!retval) {
cpuset_cpus_allowed(p, &cpus_allowed);
if (!cpus_subset(new_mask, cpus_allowed)) {
cpuset_cpus_allowed(p, cpus_allowed);
if (!cpumask_subset(new_mask, cpus_allowed)) {
/*
* We must have raced with a concurrent cpuset
* update. Just reset the cpus_allowed to the
* cpuset's cpus_allowed
*/
new_mask = cpus_allowed;
cpumask_copy(new_mask, cpus_allowed);
goto again;
}
}
out_unlock:
free_cpumask_var(new_mask);
out_free_cpus_allowed:
free_cpumask_var(cpus_allowed);
out_put_task:
put_task_struct(p);
put_online_cpus();
return retval;
@ -5453,14 +5464,17 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
cpumask_t new_mask;
cpumask_var_t new_mask;
int retval;
retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
if (retval)
return retval;
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM;
return sched_setaffinity(pid, &new_mask);
retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
if (retval == 0)
retval = sched_setaffinity(pid, new_mask);
free_cpumask_var(new_mask);
return retval;
}
long sched_getaffinity(pid_t pid, cpumask_t *mask)