|
@@ -142,7 +142,8 @@ static int _omap3_noncore_dpll_lock(struct clk *clk)
|
|
|
|
|
|
ai = omap3_dpll_autoidle_read(clk);
|
|
|
|
|
|
- omap3_dpll_deny_idle(clk);
|
|
|
+ if (ai)
|
|
|
+ omap3_dpll_deny_idle(clk);
|
|
|
|
|
|
_omap3_dpll_write_clken(clk, DPLL_LOCKED);
|
|
|
|
|
@@ -186,8 +187,6 @@ static int _omap3_noncore_dpll_bypass(struct clk *clk)
|
|
|
|
|
|
if (ai)
|
|
|
omap3_dpll_allow_idle(clk);
|
|
|
- else
|
|
|
- omap3_dpll_deny_idle(clk);
|
|
|
|
|
|
return r;
|
|
|
}
|
|
@@ -216,8 +215,6 @@ static int _omap3_noncore_dpll_stop(struct clk *clk)
|
|
|
|
|
|
if (ai)
|
|
|
omap3_dpll_allow_idle(clk);
|
|
|
- else
|
|
|
- omap3_dpll_deny_idle(clk);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -519,6 +516,9 @@ u32 omap3_dpll_autoidle_read(struct clk *clk)
|
|
|
|
|
|
dd = clk->dpll_data;
|
|
|
|
|
|
+ if (!dd->autoidle_reg)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
v = __raw_readl(dd->autoidle_reg);
|
|
|
v &= dd->autoidle_mask;
|
|
|
v >>= __ffs(dd->autoidle_mask);
|
|
@@ -545,6 +545,12 @@ void omap3_dpll_allow_idle(struct clk *clk)
|
|
|
|
|
|
dd = clk->dpll_data;
|
|
|
|
|
|
+ if (!dd->autoidle_reg) {
|
|
|
+ pr_debug("clock: DPLL %s: autoidle not supported\n",
|
|
|
+ clk->name);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* REVISIT: CORE DPLL can optionally enter low-power bypass
|
|
|
* by writing 0x5 instead of 0x1. Add some mechanism to
|
|
@@ -554,6 +560,7 @@ void omap3_dpll_allow_idle(struct clk *clk)
|
|
|
v &= ~dd->autoidle_mask;
|
|
|
v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
|
|
|
__raw_writel(v, dd->autoidle_reg);
|
|
|
+
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -572,6 +579,12 @@ void omap3_dpll_deny_idle(struct clk *clk)
|
|
|
|
|
|
dd = clk->dpll_data;
|
|
|
|
|
|
+ if (!dd->autoidle_reg) {
|
|
|
+ pr_debug("clock: DPLL %s: autoidle not supported\n",
|
|
|
+ clk->name);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
v = __raw_readl(dd->autoidle_reg);
|
|
|
v &= ~dd->autoidle_mask;
|
|
|
v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
|