Merge branches 'clk-iproc', 'clk-mvebu' and 'clk-qcom-a53' into clk-next

* clk-iproc:
  clk: iproc: Minor tidy up of iproc pll data structures
  clk: iproc: Allow plls to do minor rate changes without reset
  clk: iproc: Fix error in the pll post divider rate calculation
  clk: iproc: Allow iproc pll to runtime calculate vco parameters

* clk-mvebu:
  clk: mvebu: armada-37xx-periph: Use PTR_ERR_OR_ZERO()

* clk-qcom-a53:
  clk: qcom: Add APCS clock controller support
  clk: qcom: Add regmap mux-div clocks support
  clk: qcom: Add A53 PLL support
This commit is contained in:
Stephen Boyd 2018-01-26 16:41:58 -08:00
commit 0003046477
11 changed files with 759 additions and 102 deletions

View file

@ -0,0 +1,22 @@
Qualcomm MSM8916 A53 PLL Binding
--------------------------------
The A53 PLL on MSM8916 platforms is the main CPU PLL used used for frequencies
above 1GHz.
Required properties :
- compatible : Shall contain only one of the following:
"qcom,msm8916-a53pll"
- reg : shall contain base register location and length
- #clock-cells : must be set to <0>
Example:
a53pll: clock@b016000 {
compatible = "qcom,msm8916-a53pll";
reg = <0xb016000 0x40>;
#clock-cells = <0>;
};

View file

@ -269,23 +269,10 @@ static void __init cygnus_asiu_init(struct device_node *node)
}
CLK_OF_DECLARE(cygnus_asiu_clk, "brcm,cygnus-asiu-clk", cygnus_asiu_init);
/*
* AUDIO PLL VCO frequency parameter table
*
* PLL output frequency = ((ndiv_int + ndiv_frac / 2^20) *
* (parent clock rate / pdiv)
*
* On Cygnus, parent is the 25MHz oscillator
*/
static const struct iproc_pll_vco_param audiopll_vco_params[] = {
/* rate (Hz) ndiv_int ndiv_frac pdiv */
{ 1354750204UL, 54, 199238, 1 },
{ 1769470191UL, 70, 816639, 1 },
};
static const struct iproc_pll_ctrl audiopll = {
.flags = IPROC_CLK_PLL_NEEDS_SW_CFG | IPROC_CLK_PLL_HAS_NDIV_FRAC |
IPROC_CLK_PLL_USER_MODE_ON | IPROC_CLK_PLL_RESET_ACTIVE_LOW,
IPROC_CLK_PLL_USER_MODE_ON | IPROC_CLK_PLL_RESET_ACTIVE_LOW |
IPROC_CLK_PLL_CALC_PARAM,
.reset = RESET_VAL(0x5c, 0, 1),
.dig_filter = DF_VAL(0x48, 0, 3, 6, 4, 3, 3),
.sw_ctrl = SW_CTRL_VAL(0x4, 0),
@ -300,8 +287,7 @@ static const struct iproc_pll_ctrl audiopll = {
static const struct iproc_clk_ctrl audiopll_clk[] = {
[BCM_CYGNUS_AUDIOPLL_CH0] = {
.channel = BCM_CYGNUS_AUDIOPLL_CH0,
.flags = IPROC_CLK_AON |
IPROC_CLK_MCLK_DIV_BY_2,
.flags = IPROC_CLK_AON | IPROC_CLK_MCLK_DIV_BY_2,
.enable = ENABLE_VAL(0x14, 8, 10, 9),
.mdiv = REG_VAL(0x14, 0, 8),
},
@ -321,9 +307,8 @@ static const struct iproc_clk_ctrl audiopll_clk[] = {
static void __init cygnus_audiopll_clk_init(struct device_node *node)
{
iproc_pll_clk_setup(node, &audiopll, audiopll_vco_params,
ARRAY_SIZE(audiopll_vco_params), audiopll_clk,
ARRAY_SIZE(audiopll_clk));
iproc_pll_clk_setup(node, &audiopll, NULL, 0,
audiopll_clk, ARRAY_SIZE(audiopll_clk));
}
CLK_OF_DECLARE(cygnus_audiopll, "brcm,cygnus-audiopll",
cygnus_audiopll_clk_init);

View file

@ -69,16 +69,6 @@ enum vco_freq_range {
VCO_MAX = 4000000000U,
};
struct iproc_pll;
struct iproc_clk {
struct clk_hw hw;
const char *name;
struct iproc_pll *pll;
unsigned long rate;
const struct iproc_clk_ctrl *ctrl;
};
struct iproc_pll {
void __iomem *status_base;
void __iomem *control_base;
@ -88,13 +78,49 @@ struct iproc_pll {
const struct iproc_pll_ctrl *ctrl;
const struct iproc_pll_vco_param *vco_param;
unsigned int num_vco_entries;
};
struct clk_hw_onecell_data *clk_data;
struct iproc_clk *clks;
struct iproc_clk {
struct clk_hw hw;
struct iproc_pll *pll;
const struct iproc_clk_ctrl *ctrl;
};
#define to_iproc_clk(hw) container_of(hw, struct iproc_clk, hw)
static int pll_calc_param(unsigned long target_rate,
unsigned long parent_rate,
struct iproc_pll_vco_param *vco_out)
{
u64 ndiv_int, ndiv_frac, residual;
ndiv_int = target_rate / parent_rate;
if (!ndiv_int || (ndiv_int > 255))
return -EINVAL;
residual = target_rate - (ndiv_int * parent_rate);
residual <<= 20;
/*
* Add half of the divisor so the result will be rounded to closest
* instead of rounded down.
*/
residual += (parent_rate / 2);
ndiv_frac = div64_u64((u64)residual, (u64)parent_rate);
vco_out->ndiv_int = ndiv_int;
vco_out->ndiv_frac = ndiv_frac;
vco_out->pdiv = 1;
vco_out->rate = vco_out->ndiv_int * parent_rate;
residual = (u64)vco_out->ndiv_frac * (u64)parent_rate;
residual >>= 20;
vco_out->rate += residual;
return 0;
}
/*
* Based on the target frequency, find a match from the VCO frequency parameter
* table and return its index
@ -252,17 +278,51 @@ static void __pll_bring_out_reset(struct iproc_pll *pll, unsigned int kp,
iproc_pll_write(pll, pll->control_base, reset->offset, val);
}
static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index,
/*
* Determines if the change to be applied to the PLL is minor (just an update
* or the fractional divider). If so, then we can avoid going through a
* disruptive reset and lock sequence.
*/
static bool pll_fractional_change_only(struct iproc_pll *pll,
struct iproc_pll_vco_param *vco)
{
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
u32 val;
u32 ndiv_int;
unsigned int pdiv;
/* PLL needs to be locked */
val = readl(pll->status_base + ctrl->status.offset);
if ((val & (1 << ctrl->status.shift)) == 0)
return false;
val = readl(pll->control_base + ctrl->ndiv_int.offset);
ndiv_int = (val >> ctrl->ndiv_int.shift) &
bit_mask(ctrl->ndiv_int.width);
if (ndiv_int != vco->ndiv_int)
return false;
val = readl(pll->control_base + ctrl->pdiv.offset);
pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
if (pdiv != vco->pdiv)
return false;
return true;
}
static int pll_set_rate(struct iproc_clk *clk, struct iproc_pll_vco_param *vco,
unsigned long parent_rate)
{
struct iproc_pll *pll = clk->pll;
const struct iproc_pll_vco_param *vco = &pll->vco_param[rate_index];
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
int ka = 0, ki, kp, ret;
unsigned long rate = vco->rate;
u32 val;
enum kp_band kp_index;
unsigned long ref_freq;
const char *clk_name = clk_hw_get_name(&clk->hw);
/*
* reference frequency = parent frequency / PDIV
@ -285,22 +345,35 @@ static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index,
kp_index = KP_BAND_HIGH_HIGH;
} else {
pr_err("%s: pll: %s has invalid rate: %lu\n", __func__,
clk->name, rate);
clk_name, rate);
return -EINVAL;
}
kp = get_kp(ref_freq, kp_index);
if (kp < 0) {
pr_err("%s: pll: %s has invalid kp\n", __func__, clk->name);
pr_err("%s: pll: %s has invalid kp\n", __func__, clk_name);
return kp;
}
ret = __pll_enable(pll);
if (ret) {
pr_err("%s: pll: %s fails to enable\n", __func__, clk->name);
pr_err("%s: pll: %s fails to enable\n", __func__, clk_name);
return ret;
}
if (pll_fractional_change_only(clk->pll, vco)) {
/* program fractional part of NDIV */
if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
val = readl(pll->control_base + ctrl->ndiv_frac.offset);
val &= ~(bit_mask(ctrl->ndiv_frac.width) <<
ctrl->ndiv_frac.shift);
val |= vco->ndiv_frac << ctrl->ndiv_frac.shift;
iproc_pll_write(pll, pll->control_base,
ctrl->ndiv_frac.offset, val);
return 0;
}
}
/* put PLL in reset */
__pll_put_in_reset(pll);
@ -354,7 +427,7 @@ static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index,
ret = pll_wait_for_lock(pll);
if (ret < 0) {
pr_err("%s: pll: %s failed to lock\n", __func__, clk->name);
pr_err("%s: pll: %s failed to lock\n", __func__, clk_name);
return ret;
}
@ -390,16 +463,15 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
u32 val;
u64 ndiv, ndiv_int, ndiv_frac;
unsigned int pdiv;
unsigned long rate;
if (parent_rate == 0)
return 0;
/* PLL needs to be locked */
val = readl(pll->status_base + ctrl->status.offset);
if ((val & (1 << ctrl->status.shift)) == 0) {
clk->rate = 0;
if ((val & (1 << ctrl->status.shift)) == 0)
return 0;
}
/*
* PLL output frequency =
@ -421,35 +493,60 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
val = readl(pll->control_base + ctrl->pdiv.offset);
pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
clk->rate = (ndiv * parent_rate) >> 20;
rate = (ndiv * parent_rate) >> 20;
if (pdiv == 0)
clk->rate *= 2;
rate *= 2;
else
clk->rate /= pdiv;
rate /= pdiv;
return clk->rate;
return rate;
}
static long iproc_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
static int iproc_pll_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
unsigned i;
unsigned int i;
struct iproc_clk *clk = to_iproc_clk(hw);
struct iproc_pll *pll = clk->pll;
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
unsigned long diff, best_diff;
unsigned int best_idx = 0;
int ret;
if (rate == 0 || *parent_rate == 0 || !pll->vco_param)
if (req->rate == 0 || req->best_parent_rate == 0)
return -EINVAL;
if (ctrl->flags & IPROC_CLK_PLL_CALC_PARAM) {
struct iproc_pll_vco_param vco_param;
ret = pll_calc_param(req->rate, req->best_parent_rate,
&vco_param);
if (ret)
return ret;
req->rate = vco_param.rate;
return 0;
}
if (!pll->vco_param)
return -EINVAL;
best_diff = ULONG_MAX;
for (i = 0; i < pll->num_vco_entries; i++) {
if (rate <= pll->vco_param[i].rate)
diff = abs(req->rate - pll->vco_param[i].rate);
if (diff <= best_diff) {
best_diff = diff;
best_idx = i;
}
/* break now if perfect match */
if (diff == 0)
break;
}
if (i == pll->num_vco_entries)
i--;
req->rate = pll->vco_param[best_idx].rate;
return pll->vco_param[i].rate;
return 0;
}
static int iproc_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@ -457,13 +554,23 @@ static int iproc_pll_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct iproc_clk *clk = to_iproc_clk(hw);
struct iproc_pll *pll = clk->pll;
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
struct iproc_pll_vco_param vco_param;
int rate_index, ret;
rate_index = pll_get_rate_index(pll, rate);
if (rate_index < 0)
return rate_index;
if (ctrl->flags & IPROC_CLK_PLL_CALC_PARAM) {
ret = pll_calc_param(rate, parent_rate, &vco_param);
if (ret)
return ret;
} else {
rate_index = pll_get_rate_index(pll, rate);
if (rate_index < 0)
return rate_index;
ret = pll_set_rate(clk, rate_index, parent_rate);
vco_param = pll->vco_param[rate_index];
}
ret = pll_set_rate(clk, &vco_param, parent_rate);
return ret;
}
@ -471,7 +578,7 @@ static const struct clk_ops iproc_pll_ops = {
.enable = iproc_pll_enable,
.disable = iproc_pll_disable,
.recalc_rate = iproc_pll_recalc_rate,
.round_rate = iproc_pll_round_rate,
.determine_rate = iproc_pll_determine_rate,
.set_rate = iproc_pll_set_rate,
};
@ -518,6 +625,7 @@ static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw,
struct iproc_pll *pll = clk->pll;
u32 val;
unsigned int mdiv;
unsigned long rate;
if (parent_rate == 0)
return 0;
@ -528,32 +636,33 @@ static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw,
mdiv = 256;
if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
clk->rate = parent_rate / (mdiv * 2);
rate = parent_rate / (mdiv * 2);
else
clk->rate = parent_rate / mdiv;
rate = parent_rate / mdiv;
return clk->rate;
return rate;
}
static long iproc_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
static int iproc_clk_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
unsigned int div;
unsigned int bestdiv;
if (rate == 0 || *parent_rate == 0)
if (req->rate == 0)
return -EINVAL;
if (req->rate == req->best_parent_rate)
return 0;
if (rate == *parent_rate)
return *parent_rate;
bestdiv = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
if (bestdiv < 2)
req->rate = req->best_parent_rate;
div = DIV_ROUND_UP(*parent_rate, rate);
if (div < 2)
return *parent_rate;
if (bestdiv > 256)
bestdiv = 256;
if (div > 256)
div = 256;
req->rate = req->best_parent_rate / bestdiv;
return *parent_rate / div;
return 0;
}
static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@ -568,10 +677,10 @@ static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (rate == 0 || parent_rate == 0)
return -EINVAL;
div = DIV_ROUND_CLOSEST(parent_rate, rate);
if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
div = DIV_ROUND_UP(parent_rate, rate * 2);
else
div = DIV_ROUND_UP(parent_rate, rate);
div /= 2;
if (div > 256)
return -EINVAL;
@ -583,10 +692,6 @@ static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
val |= div << ctrl->mdiv.shift;
}
iproc_pll_write(pll, pll->control_base, ctrl->mdiv.offset, val);
if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
clk->rate = parent_rate / (div * 2);
else
clk->rate = parent_rate / div;
return 0;
}
@ -595,7 +700,7 @@ static const struct clk_ops iproc_clk_ops = {
.enable = iproc_clk_enable,
.disable = iproc_clk_disable,
.recalc_rate = iproc_clk_recalc_rate,
.round_rate = iproc_clk_round_rate,
.determine_rate = iproc_clk_determine_rate,
.set_rate = iproc_clk_set_rate,
};
@ -629,6 +734,8 @@ void iproc_pll_clk_setup(struct device_node *node,
struct iproc_clk *iclk;
struct clk_init_data init;
const char *parent_name;
struct iproc_clk *iclk_array;
struct clk_hw_onecell_data *clk_data;
if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl))
return;
@ -637,14 +744,14 @@ void iproc_pll_clk_setup(struct device_node *node,
if (WARN_ON(!pll))
return;
pll->clk_data = kzalloc(sizeof(*pll->clk_data->hws) * num_clks +
sizeof(*pll->clk_data), GFP_KERNEL);
if (WARN_ON(!pll->clk_data))
clk_data = kzalloc(sizeof(*clk_data->hws) * num_clks +
sizeof(*clk_data), GFP_KERNEL);
if (WARN_ON(!clk_data))
goto err_clk_data;
pll->clk_data->num = num_clks;
clk_data->num = num_clks;
pll->clks = kcalloc(num_clks, sizeof(*pll->clks), GFP_KERNEL);
if (WARN_ON(!pll->clks))
iclk_array = kcalloc(num_clks, sizeof(struct iproc_clk), GFP_KERNEL);
if (WARN_ON(!iclk_array))
goto err_clks;
pll->control_base = of_iomap(node, 0);
@ -674,9 +781,8 @@ void iproc_pll_clk_setup(struct device_node *node,
/* initialize and register the PLL itself */
pll->ctrl = pll_ctrl;
iclk = &pll->clks[0];
iclk = &iclk_array[0];
iclk->pll = pll;
iclk->name = node->name;
init.name = node->name;
init.ops = &iproc_pll_ops;
@ -697,7 +803,7 @@ void iproc_pll_clk_setup(struct device_node *node,
if (WARN_ON(ret))
goto err_pll_register;
pll->clk_data->hws[0] = &iclk->hw;
clk_data->hws[0] = &iclk->hw;
/* now initialize and register all leaf clocks */
for (i = 1; i < num_clks; i++) {
@ -711,8 +817,7 @@ void iproc_pll_clk_setup(struct device_node *node,
if (WARN_ON(ret))
goto err_clk_register;
iclk = &pll->clks[i];
iclk->name = clk_name;
iclk = &iclk_array[i];
iclk->pll = pll;
iclk->ctrl = &clk_ctrl[i];
@ -727,11 +832,10 @@ void iproc_pll_clk_setup(struct device_node *node,
if (WARN_ON(ret))
goto err_clk_register;
pll->clk_data->hws[i] = &iclk->hw;
clk_data->hws[i] = &iclk->hw;
}
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
pll->clk_data);
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (WARN_ON(ret))
goto err_clk_register;
@ -739,7 +843,7 @@ void iproc_pll_clk_setup(struct device_node *node,
err_clk_register:
while (--i >= 0)
clk_hw_unregister(pll->clk_data->hws[i]);
clk_hw_unregister(clk_data->hws[i]);
err_pll_register:
if (pll->status_base != pll->control_base)
@ -756,10 +860,10 @@ err_asiu_iomap:
iounmap(pll->control_base);
err_pll_iomap:
kfree(pll->clks);
kfree(iclk_array);
err_clks:
kfree(pll->clk_data);
kfree(clk_data);
err_clk_data:
kfree(pll);

View file

@ -80,6 +80,11 @@
*/
#define IPROC_CLK_PLL_RESET_ACTIVE_LOW BIT(9)
/*
* Calculate the PLL parameters are runtime, instead of using table
*/
#define IPROC_CLK_PLL_CALC_PARAM BIT(10)
/*
* Parameters for VCO frequency configuration
*

View file

@ -638,10 +638,7 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
mux_ops, rate_hw, rate_ops,
gate_hw, gate_ops, CLK_IGNORE_UNUSED);
if (IS_ERR(*hw))
return PTR_ERR(*hw);
return 0;
return PTR_ERR_OR_ZERO(*hw);
}
static int armada_3700_periph_clock_probe(struct platform_device *pdev)

View file

@ -12,6 +12,27 @@ config COMMON_CLK_QCOM
select REGMAP_MMIO
select RESET_CONTROLLER
config QCOM_A53PLL
tristate "MSM8916 A53 PLL"
depends on COMMON_CLK_QCOM
default ARCH_QCOM
help
Support for the A53 PLL on MSM8916 devices. It provides
the CPU with frequencies above 1GHz.
Say Y if you want to support higher CPU frequencies on MSM8916
devices.
config QCOM_CLK_APCS_MSM8916
tristate "MSM8916 APCS Clock Controller"
depends on COMMON_CLK_QCOM
depends on QCOM_APCS_IPC || COMPILE_TEST
default ARCH_QCOM
help
Support for the APCS Clock Controller on msm8916 devices. The
APCS is managing the mux and divider which feeds the CPUs.
Say Y if you want to support CPU frequency scaling on devices
such as msm8916.
config QCOM_CLK_RPM
tristate "RPM based Clock Controller"
depends on COMMON_CLK_QCOM && MFD_QCOM_RPM

View file

@ -10,6 +10,7 @@ clk-qcom-y += clk-rcg2.o
clk-qcom-y += clk-branch.o
clk-qcom-y += clk-regmap-divider.o
clk-qcom-y += clk-regmap-mux.o
clk-qcom-y += clk-regmap-mux-div.o
clk-qcom-y += reset.o
clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
@ -32,6 +33,8 @@ obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o

107
drivers/clk/qcom/a53-pll.c Normal file
View file

@ -0,0 +1,107 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm A53 PLL driver
*
* Copyright (c) 2017, Linaro Limited
* Author: Georgi Djakov <georgi.djakov@linaro.org>
*/
#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/module.h>
#include "clk-pll.h"
#include "clk-regmap.h"
static const struct pll_freq_tbl a53pll_freq[] = {
{ 998400000, 52, 0x0, 0x1, 0 },
{ 1094400000, 57, 0x0, 0x1, 0 },
{ 1152000000, 62, 0x0, 0x1, 0 },
{ 1209600000, 63, 0x0, 0x1, 0 },
{ 1248000000, 65, 0x0, 0x1, 0 },
{ 1363200000, 71, 0x0, 0x1, 0 },
{ 1401600000, 73, 0x0, 0x1, 0 },
{ }
};
static const struct regmap_config a53pll_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x40,
.fast_io = true,
};
static int qcom_a53pll_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct regmap *regmap;
struct resource *res;
struct clk_pll *pll;
void __iomem *base;
struct clk_init_data init = { };
int ret;
pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
if (!pll)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
regmap = devm_regmap_init_mmio(dev, base, &a53pll_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
pll->l_reg = 0x04;
pll->m_reg = 0x08;
pll->n_reg = 0x0c;
pll->config_reg = 0x14;
pll->mode_reg = 0x00;
pll->status_reg = 0x1c;
pll->status_bit = 16;
pll->freq_tbl = a53pll_freq;
init.name = "a53pll";
init.parent_names = (const char *[]){ "xo" };
init.num_parents = 1;
init.ops = &clk_pll_sr2_ops;
init.flags = CLK_IS_CRITICAL;
pll->clkr.hw.init = &init;
ret = devm_clk_register_regmap(dev, &pll->clkr);
if (ret) {
dev_err(dev, "failed to register regmap clock: %d\n", ret);
return ret;
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
&pll->clkr.hw);
if (ret) {
dev_err(dev, "failed to add clock provider: %d\n", ret);
return ret;
}
return 0;
}
static const struct of_device_id qcom_a53pll_match_table[] = {
{ .compatible = "qcom,msm8916-a53pll" },
{ }
};
static struct platform_driver qcom_a53pll_driver = {
.probe = qcom_a53pll_probe,
.driver = {
.name = "qcom-a53pll",
.of_match_table = qcom_a53pll_match_table,
},
};
module_platform_driver(qcom_a53pll_driver);
MODULE_DESCRIPTION("Qualcomm A53 PLL Driver");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,138 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm APCS clock controller driver
*
* Copyright (c) 2017, Linaro Limited
* Author: Georgi Djakov <georgi.djakov@linaro.org>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "clk-regmap.h"
#include "clk-regmap-mux-div.h"
static const u32 gpll0_a53cc_map[] = { 4, 5 };
static const char * const gpll0_a53cc[] = {
"gpll0_vote",
"a53pll",
};
/*
* We use the notifier function for switching to a temporary safe configuration
* (mux and divider), while the A53 PLL is reconfigured.
*/
static int a53cc_notifier_cb(struct notifier_block *nb, unsigned long event,
void *data)
{
int ret = 0;
struct clk_regmap_mux_div *md = container_of(nb,
struct clk_regmap_mux_div,
clk_nb);
if (event == PRE_RATE_CHANGE)
/* set the mux and divider to safe frequency (400mhz) */
ret = mux_div_set_src_div(md, 4, 3);
return notifier_from_errno(ret);
}
static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device *parent = dev->parent;
struct clk_regmap_mux_div *a53cc;
struct regmap *regmap;
struct clk_init_data init = { };
int ret;
regmap = dev_get_regmap(parent, NULL);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(dev, "failed to get regmap: %d\n", ret);
return ret;
}
a53cc = devm_kzalloc(dev, sizeof(*a53cc), GFP_KERNEL);
if (!a53cc)
return -ENOMEM;
init.name = "a53mux";
init.parent_names = gpll0_a53cc;
init.num_parents = ARRAY_SIZE(gpll0_a53cc);
init.ops = &clk_regmap_mux_div_ops;
init.flags = CLK_SET_RATE_PARENT;
a53cc->clkr.hw.init = &init;
a53cc->clkr.regmap = regmap;
a53cc->reg_offset = 0x50;
a53cc->hid_width = 5;
a53cc->hid_shift = 0;
a53cc->src_width = 3;
a53cc->src_shift = 8;
a53cc->parent_map = gpll0_a53cc_map;
a53cc->pclk = devm_clk_get(parent, NULL);
if (IS_ERR(a53cc->pclk)) {
ret = PTR_ERR(a53cc->pclk);
dev_err(dev, "failed to get clk: %d\n", ret);
return ret;
}
a53cc->clk_nb.notifier_call = a53cc_notifier_cb;
ret = clk_notifier_register(a53cc->pclk, &a53cc->clk_nb);
if (ret) {
dev_err(dev, "failed to register clock notifier: %d\n", ret);
return ret;
}
ret = devm_clk_register_regmap(dev, &a53cc->clkr);
if (ret) {
dev_err(dev, "failed to register regmap clock: %d\n", ret);
goto err;
}
ret = of_clk_add_hw_provider(parent->of_node, of_clk_hw_simple_get,
&a53cc->clkr.hw);
if (ret) {
dev_err(dev, "failed to add clock provider: %d\n", ret);
goto err;
}
platform_set_drvdata(pdev, a53cc);
return 0;
err:
clk_notifier_unregister(a53cc->pclk, &a53cc->clk_nb);
return ret;
}
static int qcom_apcs_msm8916_clk_remove(struct platform_device *pdev)
{
struct clk_regmap_mux_div *a53cc = platform_get_drvdata(pdev);
struct device *parent = pdev->dev.parent;
clk_notifier_unregister(a53cc->pclk, &a53cc->clk_nb);
of_clk_del_provider(parent->of_node);
return 0;
}
static struct platform_driver qcom_apcs_msm8916_clk_driver = {
.probe = qcom_apcs_msm8916_clk_probe,
.remove = qcom_apcs_msm8916_clk_remove,
.driver = {
.name = "qcom-apcs-msm8916-clk",
},
};
module_platform_driver(qcom_apcs_msm8916_clk_driver);
MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Qualcomm MSM8916 APCS clock driver");

View file

@ -0,0 +1,231 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017, Linaro Limited
* Author: Georgi Djakov <georgi.djakov@linaro.org>
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/regmap.h>
#include "clk-regmap-mux-div.h"
#define CMD_RCGR 0x0
#define CMD_RCGR_UPDATE BIT(0)
#define CMD_RCGR_DIRTY_CFG BIT(4)
#define CMD_RCGR_ROOT_OFF BIT(31)
#define CFG_RCGR 0x4
#define to_clk_regmap_mux_div(_hw) \
container_of(to_clk_regmap(_hw), struct clk_regmap_mux_div, clkr)
int mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div)
{
int ret, count;
u32 val, mask;
const char *name = clk_hw_get_name(&md->clkr.hw);
val = (div << md->hid_shift) | (src << md->src_shift);
mask = ((BIT(md->hid_width) - 1) << md->hid_shift) |
((BIT(md->src_width) - 1) << md->src_shift);
ret = regmap_update_bits(md->clkr.regmap, CFG_RCGR + md->reg_offset,
mask, val);
if (ret)
return ret;
ret = regmap_update_bits(md->clkr.regmap, CMD_RCGR + md->reg_offset,
CMD_RCGR_UPDATE, CMD_RCGR_UPDATE);
if (ret)
return ret;
/* Wait for update to take effect */
for (count = 500; count > 0; count--) {
ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset,
&val);
if (ret)
return ret;
if (!(val & CMD_RCGR_UPDATE))
return 0;
udelay(1);
}
pr_err("%s: RCG did not update its configuration", name);
return -EBUSY;
}
EXPORT_SYMBOL_GPL(mux_div_set_src_div);
static void mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src,
u32 *div)
{
u32 val, d, s;
const char *name = clk_hw_get_name(&md->clkr.hw);
regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, &val);
if (val & CMD_RCGR_DIRTY_CFG) {
pr_err("%s: RCG configuration is pending\n", name);
return;
}
regmap_read(md->clkr.regmap, CFG_RCGR + md->reg_offset, &val);
s = (val >> md->src_shift);
s &= BIT(md->src_width) - 1;
*src = s;
d = (val >> md->hid_shift);
d &= BIT(md->hid_width) - 1;
*div = d;
}
static inline bool is_better_rate(unsigned long req, unsigned long best,
unsigned long new)
{
return (req <= new && new < best) || (best < req && best < new);
}
static int mux_div_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
unsigned int i, div, max_div;
unsigned long actual_rate, best_rate = 0;
unsigned long req_rate = req->rate;
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
unsigned long parent_rate = clk_hw_get_rate(parent);
max_div = BIT(md->hid_width) - 1;
for (div = 1; div < max_div; div++) {
parent_rate = mult_frac(req_rate, div, 2);
parent_rate = clk_hw_round_rate(parent, parent_rate);
actual_rate = mult_frac(parent_rate, 2, div);
if (is_better_rate(req_rate, best_rate, actual_rate)) {
best_rate = actual_rate;
req->rate = best_rate;
req->best_parent_rate = parent_rate;
req->best_parent_hw = parent;
}
if (actual_rate < req_rate || best_rate <= req_rate)
break;
}
}
if (!best_rate)
return -EINVAL;
return 0;
}
static int __mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
unsigned long prate, u32 src)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
int ret;
u32 div, max_div, best_src = 0, best_div = 0;
unsigned int i;
unsigned long actual_rate, best_rate = 0;
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
unsigned long parent_rate = clk_hw_get_rate(parent);
max_div = BIT(md->hid_width) - 1;
for (div = 1; div < max_div; div++) {
parent_rate = mult_frac(rate, div, 2);
parent_rate = clk_hw_round_rate(parent, parent_rate);
actual_rate = mult_frac(parent_rate, 2, div);
if (is_better_rate(rate, best_rate, actual_rate)) {
best_rate = actual_rate;
best_src = md->parent_map[i];
best_div = div - 1;
}
if (actual_rate < rate || best_rate <= rate)
break;
}
}
ret = mux_div_set_src_div(md, best_src, best_div);
if (!ret) {
md->div = best_div;
md->src = best_src;
}
return ret;
}
static u8 mux_div_get_parent(struct clk_hw *hw)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
const char *name = clk_hw_get_name(hw);
u32 i, div, src = 0;
mux_div_get_src_div(md, &src, &div);
for (i = 0; i < clk_hw_get_num_parents(hw); i++)
if (src == md->parent_map[i])
return i;
pr_err("%s: Can't find parent with src %d\n", name, src);
return 0;
}
static int mux_div_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
return mux_div_set_src_div(md, md->parent_map[index], md->div);
}
static int mux_div_set_rate(struct clk_hw *hw,
unsigned long rate, unsigned long prate)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
return __mux_div_set_rate_and_parent(hw, rate, prate, md->src);
}
static int mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
unsigned long prate, u8 index)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
return __mux_div_set_rate_and_parent(hw, rate, prate,
md->parent_map[index]);
}
static unsigned long mux_div_recalc_rate(struct clk_hw *hw, unsigned long prate)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
u32 div, src;
int i, num_parents = clk_hw_get_num_parents(hw);
const char *name = clk_hw_get_name(hw);
mux_div_get_src_div(md, &src, &div);
for (i = 0; i < num_parents; i++)
if (src == md->parent_map[i]) {
struct clk_hw *p = clk_hw_get_parent_by_index(hw, i);
unsigned long parent_rate = clk_hw_get_rate(p);
return mult_frac(parent_rate, 2, div + 1);
}
pr_err("%s: Can't find parent %d\n", name, src);
return 0;
}
const struct clk_ops clk_regmap_mux_div_ops = {
.get_parent = mux_div_get_parent,
.set_parent = mux_div_set_parent,
.set_rate = mux_div_set_rate,
.set_rate_and_parent = mux_div_set_rate_and_parent,
.determine_rate = mux_div_determine_rate,
.recalc_rate = mux_div_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_regmap_mux_div_ops);

View file

@ -0,0 +1,44 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017, Linaro Limited
* Author: Georgi Djakov <georgi.djakov@linaro.org>
*/
#ifndef __QCOM_CLK_REGMAP_MUX_DIV_H__
#define __QCOM_CLK_REGMAP_MUX_DIV_H__
#include <linux/clk-provider.h>
#include "clk-regmap.h"
/**
* struct mux_div_clk - combined mux/divider clock
* @reg_offset: offset of the mux/divider register
* @hid_width: number of bits in half integer divider
* @hid_shift: lowest bit of hid value field
* @src_width: number of bits in source select
* @src_shift: lowest bit of source select field
* @div: the divider raw configuration value
* @src: the mux index which will be used if the clock is enabled
* @parent_map: map from parent_names index to src_sel field
* @clkr: handle between common and hardware-specific interfaces
* @pclk: the input PLL clock
* @clk_nb: clock notifier for rate changes of the input PLL
*/
struct clk_regmap_mux_div {
u32 reg_offset;
u32 hid_width;
u32 hid_shift;
u32 src_width;
u32 src_shift;
u32 div;
u32 src;
const u32 *parent_map;
struct clk_regmap clkr;
struct clk *pclk;
struct notifier_block clk_nb;
};
extern const struct clk_ops clk_regmap_mux_div_ops;
extern int mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div);
#endif