From 3d6ee287a3e341c88eafd0b4620b12d640b3736b Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Tue, 12 Mar 2013 20:26:02 +0100 Subject: clk: Introduce optional is_prepared callback To reflect whether a clk_hw is prepared the clk_hw may implement the optional is_prepared callback. If not implemented we fall back to use the software prepare counter. Signed-off-by: Ulf Hansson Acked-by: Linus Walleij Signed-off-by: Mike Turquette --- drivers/clk/clk.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index ed87b240580..7571b5054f3 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -451,6 +451,27 @@ unsigned long __clk_get_flags(struct clk *clk) return !clk ? 0 : clk->flags; } +bool __clk_is_prepared(struct clk *clk) +{ + int ret; + + if (!clk) + return false; + + /* + * .is_prepared is optional for clocks that can prepare + * fall back to software usage counter if it is missing + */ + if (!clk->ops->is_prepared) { + ret = clk->prepare_count ? 1 : 0; + goto out; + } + + ret = clk->ops->is_prepared(clk->hw); +out: + return !!ret; +} + bool __clk_is_enabled(struct clk *clk) { int ret; -- cgit v1.2.3-70-g09d2 From 1c155b3dfe08351f5fc811062648969f1ba7af53 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Tue, 12 Mar 2013 20:26:03 +0100 Subject: clk: Unprepare the unused prepared slow clocks at late init The unused ungated fast clocks are already being disabled from clk_disable_unused at late init. This patch extend this sequence to the slow unused prepared clocks to be unprepared. Unless the optional .is_prepared callback is implemented by a clk_hw the clk_disable_unused sequence will not unprepare any unused clocks, since it will fall back to use the software prepare counter. Signed-off-by: Ulf Hansson Acked-by: Linus Walleij Signed-off-by: Mike Turquette [mturquette@linaro.org: fixed hlist accessors per b67bfe0d] --- drivers/clk/clk.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 7571b5054f3..c0141f3e110 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -335,6 +335,28 @@ late_initcall(clk_debug_init); static inline int clk_debug_register(struct clk *clk) { return 0; } #endif +/* caller must hold prepare_lock */ +static void clk_unprepare_unused_subtree(struct clk *clk) +{ + struct clk *child; + + if (!clk) + return; + + hlist_for_each_entry(child, &clk->children, child_node) + clk_unprepare_unused_subtree(child); + + if (clk->prepare_count) + return; + + if (clk->flags & CLK_IGNORE_UNUSED) + return; + + if (__clk_is_prepared(clk)) + if (clk->ops->unprepare) + clk->ops->unprepare(clk->hw); +} + /* caller must hold prepare_lock */ static void clk_disable_unused_subtree(struct clk *clk) { @@ -386,6 +408,12 @@ static int clk_disable_unused(void) hlist_for_each_entry(clk, &clk_orphan_list, child_node) clk_disable_unused_subtree(clk); + hlist_for_each_entry(clk, &clk_root_list, child_node) + clk_unprepare_unused_subtree(clk); + + hlist_for_each_entry(clk, &clk_orphan_list, child_node) + clk_unprepare_unused_subtree(clk); + mutex_unlock(&prepare_lock); return 0; -- cgit v1.2.3-70-g09d2 From 3cc8247f1dce79511de8bf0f69ab02a46cc315b7 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Tue, 12 Mar 2013 20:26:04 +0100 Subject: clk: Introduce optional unprepare_unused callback An unprepare_unused callback is introduced due to the same reasons to why the disable_unused callback was added. During the clk_disable_unused sequence, those clk_hw that needs specific treatment with regards to being unprepared, shall implement the unprepare_unused callback. Signed-off-by: Ulf Hansson Acked-by: Linus Walleij Signed-off-by: Mike Turquette --- drivers/clk/clk.c | 7 +++++-- include/linux/clk-provider.h | 5 +++++ 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index c0141f3e110..253792a46c0 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -352,9 +352,12 @@ static void clk_unprepare_unused_subtree(struct clk *clk) if (clk->flags & CLK_IGNORE_UNUSED) return; - if (__clk_is_prepared(clk)) - if (clk->ops->unprepare) + if (__clk_is_prepared(clk)) { + if (clk->ops->unprepare_unused) + clk->ops->unprepare_unused(clk->hw); + else if (clk->ops->unprepare) clk->ops->unprepare(clk->hw); + } } /* caller must hold prepare_lock */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index ee946862e05..56e6cc12c79 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -49,6 +49,10 @@ struct clk_hw; * This function is allowed to sleep. Optional, if this op is not * set then the prepare count will be used. * + * @unprepare_unused: Unprepare the clock atomically. Only called from + * clk_disable_unused for prepare clocks with special needs. + * Called with prepare mutex held. This function may sleep. + * * @enable: Enable the clock atomically. This must not return until the * clock is generating a valid clock signal, usable by consumer * devices. Called with enable_lock held. This function must not @@ -113,6 +117,7 @@ struct clk_ops { int (*prepare)(struct clk_hw *hw); void (*unprepare)(struct clk_hw *hw); int (*is_prepared)(struct clk_hw *hw); + void (*unprepare_unused)(struct clk_hw *hw); int (*enable)(struct clk_hw *hw); void (*disable)(struct clk_hw *hw); int (*is_enabled)(struct clk_hw *hw); -- cgit v1.2.3-70-g09d2 From 5fda6858a49c2d8706adcc05f083b64af172d3eb Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Wed, 13 Mar 2013 15:17:49 +0530 Subject: clk: Fix incorrect return type in clk.c Return type of function clk_propagate_rate_change is a pointer. But 0 was being returned. Change it to NULL. Silences the following warning: drivers/clk/clk.c:977:24: warning: Using plain integer as NULL pointer Signed-off-by: Sachin Kamat Reviewed-by: Pankaj Jangra Signed-off-by: Mike Turquette --- drivers/clk/clk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 253792a46c0..5e8ffff9936 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1026,7 +1026,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even int ret = NOTIFY_DONE; if (clk->rate == clk->new_rate) - return 0; + return NULL; if (clk->notifier_count) { ret = __clk_notify(clk, event, clk->rate, clk->new_rate); -- cgit v1.2.3-70-g09d2 From eab89f690ee0805c02017d7959f4f930379a8c46 Mon Sep 17 00:00:00 2001 From: Mike Turquette Date: Thu, 28 Mar 2013 13:59:01 -0700 Subject: clk: abstract locking out into helper functions Create locking helpers for the global mutex and global spinlock. The definitions of these helpers will be expanded upon in the next patch which introduces reentrancy into the locking scheme. Signed-off-by: Mike Turquette Cc: Rajagopal Venkat Cc: David Brown Tested-by: Laurent Pinchart Reviewed-by: Thomas Gleixner Reviewed-by: Ulf Hansson --- drivers/clk/clk.c | 99 ++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 61 insertions(+), 38 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 5e8ffff9936..0b5d61201ca 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -27,6 +27,29 @@ static HLIST_HEAD(clk_root_list); static HLIST_HEAD(clk_orphan_list); static LIST_HEAD(clk_notifier_list); +/*** locking ***/ +static void clk_prepare_lock(void) +{ + mutex_lock(&prepare_lock); +} + +static void clk_prepare_unlock(void) +{ + mutex_unlock(&prepare_lock); +} + +static unsigned long clk_enable_lock(void) +{ + unsigned long flags; + spin_lock_irqsave(&enable_lock, flags); + return flags; +} + +static void clk_enable_unlock(unsigned long flags) +{ + spin_unlock_irqrestore(&enable_lock, flags); +} + /*** debugfs support ***/ #ifdef CONFIG_COMMON_CLK_DEBUG @@ -69,7 +92,7 @@ static int clk_summary_show(struct seq_file *s, void *data) seq_printf(s, " clock enable_cnt prepare_cnt rate\n"); seq_printf(s, "---------------------------------------------------------------------\n"); - mutex_lock(&prepare_lock); + clk_prepare_lock(); hlist_for_each_entry(c, &clk_root_list, child_node) clk_summary_show_subtree(s, c, 0); @@ -77,7 +100,7 @@ static int clk_summary_show(struct seq_file *s, void *data) hlist_for_each_entry(c, &clk_orphan_list, child_node) clk_summary_show_subtree(s, c, 0); - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return 0; } @@ -130,7 +153,7 @@ static int clk_dump(struct seq_file *s, void *data) seq_printf(s, "{"); - mutex_lock(&prepare_lock); + clk_prepare_lock(); hlist_for_each_entry(c, &clk_root_list, child_node) { if (!first_node) @@ -144,7 +167,7 @@ static int clk_dump(struct seq_file *s, void *data) clk_dump_subtree(s, c, 0); } - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); seq_printf(s, "}"); return 0; @@ -316,7 +339,7 @@ static int __init clk_debug_init(void) if (!orphandir) return -ENOMEM; - mutex_lock(&prepare_lock); + clk_prepare_lock(); hlist_for_each_entry(clk, &clk_root_list, child_node) clk_debug_create_subtree(clk, rootdir); @@ -326,7 +349,7 @@ static int __init clk_debug_init(void) inited = 1; - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return 0; } @@ -372,7 +395,7 @@ static void clk_disable_unused_subtree(struct clk *clk) hlist_for_each_entry(child, &clk->children, child_node) clk_disable_unused_subtree(child); - spin_lock_irqsave(&enable_lock, flags); + flags = clk_enable_lock(); if (clk->enable_count) goto unlock_out; @@ -393,7 +416,7 @@ static void clk_disable_unused_subtree(struct clk *clk) } unlock_out: - spin_unlock_irqrestore(&enable_lock, flags); + clk_enable_unlock(flags); out: return; @@ -403,7 +426,7 @@ static int clk_disable_unused(void) { struct clk *clk; - mutex_lock(&prepare_lock); + clk_prepare_lock(); hlist_for_each_entry(clk, &clk_root_list, child_node) clk_disable_unused_subtree(clk); @@ -417,7 +440,7 @@ static int clk_disable_unused(void) hlist_for_each_entry(clk, &clk_orphan_list, child_node) clk_unprepare_unused_subtree(clk); - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return 0; } @@ -600,9 +623,9 @@ void __clk_unprepare(struct clk *clk) */ void clk_unprepare(struct clk *clk) { - mutex_lock(&prepare_lock); + clk_prepare_lock(); __clk_unprepare(clk); - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); } EXPORT_SYMBOL_GPL(clk_unprepare); @@ -648,9 +671,9 @@ int clk_prepare(struct clk *clk) { int ret; - mutex_lock(&prepare_lock); + clk_prepare_lock(); ret = __clk_prepare(clk); - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return ret; } @@ -692,9 +715,9 @@ void clk_disable(struct clk *clk) { unsigned long flags; - spin_lock_irqsave(&enable_lock, flags); + flags = clk_enable_lock(); __clk_disable(clk); - spin_unlock_irqrestore(&enable_lock, flags); + clk_enable_unlock(flags); } EXPORT_SYMBOL_GPL(clk_disable); @@ -745,9 +768,9 @@ int clk_enable(struct clk *clk) unsigned long flags; int ret; - spin_lock_irqsave(&enable_lock, flags); + flags = clk_enable_lock(); ret = __clk_enable(clk); - spin_unlock_irqrestore(&enable_lock, flags); + clk_enable_unlock(flags); return ret; } @@ -792,9 +815,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate) { unsigned long ret; - mutex_lock(&prepare_lock); + clk_prepare_lock(); ret = __clk_round_rate(clk, rate); - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return ret; } @@ -889,13 +912,13 @@ unsigned long clk_get_rate(struct clk *clk) { unsigned long rate; - mutex_lock(&prepare_lock); + clk_prepare_lock(); if (clk && (clk->flags & CLK_GET_RATE_NOCACHE)) __clk_recalc_rates(clk, 0); rate = __clk_get_rate(clk); - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return rate; } @@ -1100,7 +1123,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate) int ret = 0; /* prevent racing with updates to the clock topology */ - mutex_lock(&prepare_lock); + clk_prepare_lock(); /* bail early if nothing to do */ if (rate == clk->rate) @@ -1132,7 +1155,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate) clk_change_rate(top); out: - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return ret; } @@ -1148,9 +1171,9 @@ struct clk *clk_get_parent(struct clk *clk) { struct clk *parent; - mutex_lock(&prepare_lock); + clk_prepare_lock(); parent = __clk_get_parent(clk); - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return parent; } @@ -1294,19 +1317,19 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent) __clk_prepare(parent); /* FIXME replace with clk_is_enabled(clk) someday */ - spin_lock_irqsave(&enable_lock, flags); + flags = clk_enable_lock(); if (clk->enable_count) __clk_enable(parent); - spin_unlock_irqrestore(&enable_lock, flags); + clk_enable_unlock(flags); /* change clock input source */ ret = clk->ops->set_parent(clk->hw, i); /* clean up old prepare and enable */ - spin_lock_irqsave(&enable_lock, flags); + flags = clk_enable_lock(); if (clk->enable_count) __clk_disable(old_parent); - spin_unlock_irqrestore(&enable_lock, flags); + clk_enable_unlock(flags); if (clk->prepare_count) __clk_unprepare(old_parent); @@ -1338,7 +1361,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent) return -ENOSYS; /* prevent racing with updates to the clock topology */ - mutex_lock(&prepare_lock); + clk_prepare_lock(); if (clk->parent == parent) goto out; @@ -1367,7 +1390,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent) __clk_reparent(clk, parent); out: - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return ret; } @@ -1390,7 +1413,7 @@ int __clk_init(struct device *dev, struct clk *clk) if (!clk) return -EINVAL; - mutex_lock(&prepare_lock); + clk_prepare_lock(); /* check to see if a clock with this name is already registered */ if (__clk_lookup(clk->name)) { @@ -1514,7 +1537,7 @@ int __clk_init(struct device *dev, struct clk *clk) clk_debug_register(clk); out: - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return ret; } @@ -1748,7 +1771,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb) if (!clk || !nb) return -EINVAL; - mutex_lock(&prepare_lock); + clk_prepare_lock(); /* search the list of notifiers for this clk */ list_for_each_entry(cn, &clk_notifier_list, node) @@ -1772,7 +1795,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb) clk->notifier_count++; out: - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return ret; } @@ -1797,7 +1820,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) if (!clk || !nb) return -EINVAL; - mutex_lock(&prepare_lock); + clk_prepare_lock(); list_for_each_entry(cn, &clk_notifier_list, node) if (cn->clk == clk) @@ -1818,7 +1841,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) ret = -ENOENT; } - mutex_unlock(&prepare_lock); + clk_prepare_unlock(); return ret; } -- cgit v1.2.3-70-g09d2 From 533ddeb1e86f506129ee388a6cc13796dcf31311 Mon Sep 17 00:00:00 2001 From: Mike Turquette Date: Thu, 28 Mar 2013 13:59:02 -0700 Subject: clk: allow reentrant calls into the clk framework Reentrancy into the clock framework is necessary for clock operations that result in nested calls to the clk api. A common example is a clock that is prepared via an i2c transaction, such as a clock inside of a discrete audio chip or a power management IC. The i2c subsystem itself will use the clk api resulting in a deadlock: clk_prepare(audio_clk) i2c_transfer(..) clk_prepare(i2c_controller_clk) The ability to reenter the clock framework prevents this deadlock. Other use cases exist such as allowing .set_rate callbacks to call clk_set_parent to achieve the best rate, or to save power in certain configurations. Yet another example is performing pinctrl operations from a clk_ops callback. Calls into the pinctrl subsystem may call clk_{un}prepare on an unrelated clock. Allowing for nested calls to reenter the clock framework enables both of these use cases. Reentrancy is implemented by two global pointers that track the owner currently holding a global lock. One pointer tracks the owner during sleepable, mutex-protected operations and the other one tracks the owner during non-interruptible, spinlock-protected operations. When the clk framework is entered we try to hold the global lock. If it is held we compare the current task against the current owner; a match implies a nested call and we reenter. If the values do not match then we block on the lock until it is released. Signed-off-by: Mike Turquette Cc: Rajagopal Venkat Cc: David Brown Tested-by: Laurent Pinchart Reviewed-by: Thomas Gleixner Reviewed-by: Ulf Hansson --- drivers/clk/clk.c | 44 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) (limited to 'drivers/clk/clk.c') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 0b5d61201ca..0230c9d9597 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -19,10 +19,17 @@ #include #include #include +#include static DEFINE_SPINLOCK(enable_lock); static DEFINE_MUTEX(prepare_lock); +static struct task_struct *prepare_owner; +static struct task_struct *enable_owner; + +static int prepare_refcnt; +static int enable_refcnt; + static HLIST_HEAD(clk_root_list); static HLIST_HEAD(clk_orphan_list); static LIST_HEAD(clk_notifier_list); @@ -30,23 +37,56 @@ static LIST_HEAD(clk_notifier_list); /*** locking ***/ static void clk_prepare_lock(void) { - mutex_lock(&prepare_lock); + if (!mutex_trylock(&prepare_lock)) { + if (prepare_owner == current) { + prepare_refcnt++; + return; + } + mutex_lock(&prepare_lock); + } + WARN_ON_ONCE(prepare_owner != NULL); + WARN_ON_ONCE(prepare_refcnt != 0); + prepare_owner = current; + prepare_refcnt = 1; } static void clk_prepare_unlock(void) { + WARN_ON_ONCE(prepare_owner != current); + WARN_ON_ONCE(prepare_refcnt == 0); + + if (--prepare_refcnt) + return; + prepare_owner = NULL; mutex_unlock(&prepare_lock); } static unsigned long clk_enable_lock(void) { unsigned long flags; - spin_lock_irqsave(&enable_lock, flags); + + if (!spin_trylock_irqsave(&enable_lock, flags)) { + if (enable_owner == current) { + enable_refcnt++; + return flags; + } + spin_lock_irqsave(&enable_lock, flags); + } + WARN_ON_ONCE(enable_owner != NULL); + WARN_ON_ONCE(enable_refcnt != 0); + enable_owner = current; + enable_refcnt = 1; return flags; } static void clk_enable_unlock(unsigned long flags) { + WARN_ON_ONCE(enable_owner != current); + WARN_ON_ONCE(enable_refcnt == 0); + + if (--enable_refcnt) + return; + enable_owner = NULL; spin_unlock_irqrestore(&enable_lock, flags); } -- cgit v1.2.3-70-g09d2