Merge branch 'pm-cpufreq'

* pm-cpufreq:
  cpufreq: Make cpufreq_notify_transition & cpufreq_notify_post_transition static
  cpufreq: Convert existing drivers to use cpufreq_freq_transition_{begin|end}
  cpufreq: Make sure frequency transitions are serialized
  intel_pstate: Use del_timer_sync in intel_pstate_cpu_stop
  cpufreq: resume drivers before enabling governors
This commit is contained in:
Rafael J. Wysocki 2014-04-01 22:10:08 +02:00
commit 797cb8a6f7
15 changed files with 81 additions and 42 deletions

View file

@ -270,7 +270,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
pr_debug("Old CPU frequency %d kHz, new %d kHz\n",
freqs.old, freqs.new);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
cpufreq_freq_transition_begin(policy, &freqs);
/* Disable IRQs */
/* local_irq_save(flags); */
@ -285,7 +285,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
/* Enable IRQs */
/* local_irq_restore(flags); */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cpufreq_freq_transition_end(policy, &freqs, 0);
return 0;
}

View file

@ -331,16 +331,15 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
* function. It is called twice on all CPU frequency changes that have
* external effects.
*/
void cpufreq_notify_transition(struct cpufreq_policy *policy,
static void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state)
{
for_each_cpu(freqs->cpu, policy->cpus)
__cpufreq_notify_transition(policy, freqs, state);
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
/* Do post notifications when there are chances that transition has failed */
void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int transition_failed)
{
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
@ -351,7 +350,41 @@ void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
}
EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs)
{
wait:
wait_event(policy->transition_wait, !policy->transition_ongoing);
spin_lock(&policy->transition_lock);
if (unlikely(policy->transition_ongoing)) {
spin_unlock(&policy->transition_lock);
goto wait;
}
policy->transition_ongoing = true;
spin_unlock(&policy->transition_lock);
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
}
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int transition_failed)
{
if (unlikely(WARN_ON(!policy->transition_ongoing)))
return;
cpufreq_notify_post_transition(policy, freqs, transition_failed);
policy->transition_ongoing = false;
wake_up(&policy->transition_wait);
}
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
/*********************************************************************
@ -985,6 +1018,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
INIT_LIST_HEAD(&policy->policy_list);
init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
init_waitqueue_head(&policy->transition_wait);
return policy;
@ -1470,8 +1505,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
policy = per_cpu(cpufreq_cpu_data, cpu);
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cpufreq_freq_transition_begin(policy, &freqs);
cpufreq_freq_transition_end(policy, &freqs, 0);
}
/**
@ -1652,14 +1687,13 @@ void cpufreq_resume(void)
cpufreq_suspended = false;
list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
pr_err("%s: Failed to resume driver: %p\n", __func__,
policy);
else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
|| __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
pr_err("%s: Failed to start governor for policy: %p\n",
__func__, policy);
else if (cpufreq_driver->resume
&& cpufreq_driver->resume(policy))
pr_err("%s: Failed to resume driver: %p\n", __func__,
policy);
/*
* schedule call cpufreq_update_policy() for boot CPU, i.e. last
@ -1832,8 +1866,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
__func__, policy->cpu, freqs.old, freqs.new);
cpufreq_notify_transition(policy, &freqs,
CPUFREQ_PRECHANGE);
cpufreq_freq_transition_begin(policy, &freqs);
}
retval = cpufreq_driver->target_index(policy, index);
@ -1842,7 +1875,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
__func__, retval);
if (notify)
cpufreq_notify_post_transition(policy, &freqs, retval);
cpufreq_freq_transition_end(policy, &freqs, retval);
}
out:

View file

@ -219,7 +219,7 @@ static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
freqs.old = policy->cur;
freqs.new = freq_table[index].frequency;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
cpufreq_freq_transition_begin(policy, &freqs);
/* Set the target frequency in all C0_3_PSTATE register */
for_each_cpu(i, policy->cpus) {
@ -258,7 +258,7 @@ static void exynos_cpufreq_work(struct work_struct *work)
dev_crit(dvfs_info->dev, "New frequency out of range\n");
freqs.new = freqs.old;
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cpufreq_freq_transition_end(policy, &freqs, 0);
cpufreq_cpu_put(policy);
mutex_unlock(&cpufreq_lock);

View file

@ -265,7 +265,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
freqs.new = new_khz;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
cpufreq_freq_transition_begin(policy, &freqs);
local_irq_save(flags);
if (new_khz != stock_freq) {
@ -314,7 +314,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
gx_params->pci_suscfg = suscfg;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cpufreq_freq_transition_end(policy, &freqs, 0);
pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
gx_params->on_duration * 32, gx_params->off_duration * 32);

View file

@ -122,7 +122,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
return 0;
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
cpufreq_freq_transition_begin(policy, &freqs);
cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
@ -143,7 +143,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
*/
set_cpus_allowed(current, cpus_allowed);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cpufreq_freq_transition_end(policy, &freqs, 0);
return 0;
}

View file

@ -778,7 +778,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
pr_info("intel_pstate CPU %d exiting\n", cpu_num);
del_timer(&all_cpu_data[cpu_num]->timer);
del_timer_sync(&all_cpu_data[cpu_num]->timer);
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
kfree(all_cpu_data[cpu_num]);
all_cpu_data[cpu_num] = NULL;

View file

@ -269,7 +269,7 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
freqs.old = calc_speed(longhaul_get_cpu_mult());
freqs.new = speed;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
cpufreq_freq_transition_begin(policy, &freqs);
pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
@ -386,7 +386,7 @@ retry_loop:
}
}
/* Report true CPU frequency */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cpufreq_freq_transition_end(policy, &freqs, 0);
if (!bm_timeout)
printk(KERN_INFO PFX "Warning: Timeout while waiting for "

View file

@ -215,7 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
freqs.old = policy->cur;
freqs.new = target_freq;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
cpufreq_freq_transition_begin(policy, &freqs);
input_buffer = 0x1 | (((target_freq * 100)
/ (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
@ -231,7 +231,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
status = ioread16(&pcch_hdr->status);
iowrite16(0, &pcch_hdr->status);
cpufreq_notify_post_transition(policy, &freqs, status != CMD_COMPLETE);
cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE);
spin_unlock(&pcc_lock);
if (status != CMD_COMPLETE) {

View file

@ -148,11 +148,11 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
freqs.new = busfreq * clock_ratio[best_i].driver_data;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
cpufreq_freq_transition_begin(policy, &freqs);
powernow_k6_set_cpu_multiplier(best_i);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cpufreq_freq_transition_end(policy, &freqs, 0);
return 0;
}

View file

@ -269,7 +269,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
freqs.new = powernow_table[index].frequency;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
cpufreq_freq_transition_begin(policy, &freqs);
/* Now do the magic poking into the MSRs. */
@ -290,7 +290,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
if (have_a0 == 1)
local_irq_enable();
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cpufreq_freq_transition_end(policy, &freqs, 0);
return 0;
}

View file

@ -963,9 +963,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
policy = cpufreq_cpu_get(smp_processor_id());
cpufreq_cpu_put(policy);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
cpufreq_freq_transition_begin(policy, &freqs);
res = transition_fid_vid(data, fid, vid);
cpufreq_notify_post_transition(policy, &freqs, res);
cpufreq_freq_transition_end(policy, &freqs, res);
return res;
}

View file

@ -217,7 +217,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk);
/* start the frequency change */
cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_PRECHANGE);
cpufreq_freq_transition_begin(policy, &freqs.freqs);
/* If hclk is staying the same, then we do not need to
* re-write the IO or the refresh timings whilst we are changing
@ -261,7 +261,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
local_irq_restore(flags);
/* notify everyone we've done this */
cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_POSTCHANGE);
cpufreq_freq_transition_end(policy, &freqs.freqs, 0);
s3c_freq_dbg("%s: finished\n", __func__);
return 0;

View file

@ -68,10 +68,10 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
freqs.new = (freq + 500) / 1000;
freqs.flags = 0;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
cpufreq_freq_transition_begin(policy, &freqs);
set_cpus_allowed_ptr(current, &cpus_allowed);
clk_set_rate(cpuclk, freq);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cpufreq_freq_transition_end(policy, &freqs, 0);
dev_dbg(dev, "set frequency %lu Hz\n", freq);

View file

@ -44,9 +44,9 @@ static int ucv2_target(struct cpufreq_policy *policy,
freqs.old = policy->cur;
freqs.new = target_freq;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
cpufreq_freq_transition_begin(policy, &freqs);
ret = clk_set_rate(policy->mclk, target_freq * 1000);
cpufreq_notify_post_transition(policy, &freqs, ret);
cpufreq_freq_transition_end(policy, &freqs, ret);
return ret;
}

View file

@ -16,6 +16,7 @@
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
/*********************************************************************
@ -104,6 +105,11 @@ struct cpufreq_policy {
* __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
*/
struct rw_semaphore rwsem;
/* Synchronization for frequency transitions */
bool transition_ongoing; /* Tracks transition status */
spinlock_t transition_lock;
wait_queue_head_t transition_wait;
};
/* Only for ACPI */
@ -333,9 +339,9 @@ static inline void cpufreq_resume(void) {}
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state);
void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs);
void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int transition_failed);
#else /* CONFIG_CPU_FREQ */