diff options
Diffstat (limited to 'arch/arm/mach-omap2/clock.c')
| -rw-r--r-- | arch/arm/mach-omap2/clock.c | 518 | 
1 files changed, 510 insertions, 8 deletions
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c index d0c6d9b066d..9205ea7d8dd 100644 --- a/arch/arm/mach-omap2/clock.c +++ b/arch/arm/mach-omap2/clock.c @@ -15,6 +15,7 @@  #undef DEBUG  #include <linux/kernel.h> +#include <linux/export.h>  #include <linux/list.h>  #include <linux/errno.h>  #include <linux/err.h> @@ -25,7 +26,6 @@  #include <asm/cpu.h> -#include <plat/clock.h>  #include <plat/prcm.h>  #include <trace/events/power.h> @@ -48,6 +48,10 @@ u16 cpu_mask;   */  static bool clkdm_control = true; +static LIST_HEAD(clocks); +static DEFINE_MUTEX(clocks_mutex); +static DEFINE_SPINLOCK(clockfw_lock); +  /*   * OMAP2+ specific clock functions   */ @@ -513,12 +517,510 @@ void __init omap2_clk_print_new_rates(const char *hfclkin_ck_name,  /* Common data */ -struct clk_functions omap2_clk_functions = { -	.clk_enable		= omap2_clk_enable, -	.clk_disable		= omap2_clk_disable, -	.clk_round_rate		= omap2_clk_round_rate, -	.clk_set_rate		= omap2_clk_set_rate, -	.clk_set_parent		= omap2_clk_set_parent, -	.clk_disable_unused	= omap2_clk_disable_unused, +int clk_enable(struct clk *clk) +{ +	unsigned long flags; +	int ret; + +	if (clk == NULL || IS_ERR(clk)) +		return -EINVAL; + +	spin_lock_irqsave(&clockfw_lock, flags); +	ret = omap2_clk_enable(clk); +	spin_unlock_irqrestore(&clockfw_lock, flags); + +	return ret; +} +EXPORT_SYMBOL(clk_enable); + +void clk_disable(struct clk *clk) +{ +	unsigned long flags; + +	if (clk == NULL || IS_ERR(clk)) +		return; + +	spin_lock_irqsave(&clockfw_lock, flags); +	if (clk->usecount == 0) { +		pr_err("Trying disable clock %s with 0 usecount\n", +		       clk->name); +		WARN_ON(1); +		goto out; +	} + +	omap2_clk_disable(clk); + +out: +	spin_unlock_irqrestore(&clockfw_lock, flags); +} +EXPORT_SYMBOL(clk_disable); + +unsigned long clk_get_rate(struct clk *clk) +{ +	unsigned long flags; +	unsigned long ret; + +	if (clk == NULL || IS_ERR(clk)) +		return 0; + +	spin_lock_irqsave(&clockfw_lock, flags); +	ret = clk->rate; +	spin_unlock_irqrestore(&clockfw_lock, flags); + +	return ret; +} +EXPORT_SYMBOL(clk_get_rate); + +/* + * Optional clock functions defined in include/linux/clk.h + */ + +long clk_round_rate(struct clk *clk, unsigned long rate) +{ +	unsigned long flags; +	long ret; + +	if (clk == NULL || IS_ERR(clk)) +		return 0; + +	spin_lock_irqsave(&clockfw_lock, flags); +	ret = omap2_clk_round_rate(clk, rate); +	spin_unlock_irqrestore(&clockfw_lock, flags); + +	return ret; +} +EXPORT_SYMBOL(clk_round_rate); + +int clk_set_rate(struct clk *clk, unsigned long rate) +{ +	unsigned long flags; +	int ret = -EINVAL; + +	if (clk == NULL || IS_ERR(clk)) +		return ret; + +	spin_lock_irqsave(&clockfw_lock, flags); +	ret = omap2_clk_set_rate(clk, rate); +	if (ret == 0) +		propagate_rate(clk); +	spin_unlock_irqrestore(&clockfw_lock, flags); + +	return ret; +} +EXPORT_SYMBOL(clk_set_rate); + +int clk_set_parent(struct clk *clk, struct clk *parent) +{ +	unsigned long flags; +	int ret = -EINVAL; + +	if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent)) +		return ret; + +	spin_lock_irqsave(&clockfw_lock, flags); +	if (clk->usecount == 0) { +		ret = omap2_clk_set_parent(clk, parent); +		if (ret == 0) +			propagate_rate(clk); +	} else { +		ret = -EBUSY; +	} +	spin_unlock_irqrestore(&clockfw_lock, flags); + +	return ret; +} +EXPORT_SYMBOL(clk_set_parent); + +struct clk *clk_get_parent(struct clk *clk) +{ +	return clk->parent; +} +EXPORT_SYMBOL(clk_get_parent); + +/* + * OMAP specific clock functions shared between omap1 and omap2 + */ + +int __initdata mpurate; + +/* + * By default we use the rate set by the bootloader. + * You can override this with mpurate= cmdline option. + */ +static int __init omap_clk_setup(char *str) +{ +	get_option(&str, &mpurate); + +	if (!mpurate) +		return 1; + +	if (mpurate < 1000) +		mpurate *= 1000000; + +	return 1; +} +__setup("mpurate=", omap_clk_setup); + +/* Used for clocks that always have same value as the parent clock */ +unsigned long followparent_recalc(struct clk *clk) +{ +	return clk->parent->rate; +} + +/* + * Used for clocks that have the same value as the parent clock, + * divided by some factor + */ +unsigned long omap_fixed_divisor_recalc(struct clk *clk) +{ +	WARN_ON(!clk->fixed_div); + +	return clk->parent->rate / clk->fixed_div; +} + +void clk_reparent(struct clk *child, struct clk *parent) +{ +	list_del_init(&child->sibling); +	if (parent) +		list_add(&child->sibling, &parent->children); +	child->parent = parent; + +	/* now do the debugfs renaming to reattach the child +	   to the proper parent */ +} + +/* Propagate rate to children */ +void propagate_rate(struct clk *tclk) +{ +	struct clk *clkp; + +	list_for_each_entry(clkp, &tclk->children, sibling) { +		if (clkp->recalc) +			clkp->rate = clkp->recalc(clkp); +		propagate_rate(clkp); +	} +} + +static LIST_HEAD(root_clks); + +/** + * recalculate_root_clocks - recalculate and propagate all root clocks + * + * Recalculates all root clocks (clocks with no parent), which if the + * clock's .recalc is set correctly, should also propagate their rates. + * Called at init. + */ +void recalculate_root_clocks(void) +{ +	struct clk *clkp; + +	list_for_each_entry(clkp, &root_clks, sibling) { +		if (clkp->recalc) +			clkp->rate = clkp->recalc(clkp); +		propagate_rate(clkp); +	} +} + +/** + * clk_preinit - initialize any fields in the struct clk before clk init + * @clk: struct clk * to initialize + * + * Initialize any struct clk fields needed before normal clk initialization + * can run.  No return value. + */ +void clk_preinit(struct clk *clk) +{ +	INIT_LIST_HEAD(&clk->children); +} + +int clk_register(struct clk *clk) +{ +	if (clk == NULL || IS_ERR(clk)) +		return -EINVAL; + +	/* +	 * trap out already registered clocks +	 */ +	if (clk->node.next || clk->node.prev) +		return 0; + +	mutex_lock(&clocks_mutex); +	if (clk->parent) +		list_add(&clk->sibling, &clk->parent->children); +	else +		list_add(&clk->sibling, &root_clks); + +	list_add(&clk->node, &clocks); +	if (clk->init) +		clk->init(clk); +	mutex_unlock(&clocks_mutex); + +	return 0; +} +EXPORT_SYMBOL(clk_register); + +void clk_unregister(struct clk *clk) +{ +	if (clk == NULL || IS_ERR(clk)) +		return; + +	mutex_lock(&clocks_mutex); +	list_del(&clk->sibling); +	list_del(&clk->node); +	mutex_unlock(&clocks_mutex); +} +EXPORT_SYMBOL(clk_unregister); + +void clk_enable_init_clocks(void) +{ +	struct clk *clkp; + +	list_for_each_entry(clkp, &clocks, node) +		if (clkp->flags & ENABLE_ON_INIT) +			clk_enable(clkp); +} + +/** + * omap_clk_get_by_name - locate OMAP struct clk by its name + * @name: name of the struct clk to locate + * + * Locate an OMAP struct clk by its name.  Assumes that struct clk + * names are unique.  Returns NULL if not found or a pointer to the + * struct clk if found. + */ +struct clk *omap_clk_get_by_name(const char *name) +{ +	struct clk *c; +	struct clk *ret = NULL; + +	mutex_lock(&clocks_mutex); + +	list_for_each_entry(c, &clocks, node) { +		if (!strcmp(c->name, name)) { +			ret = c; +			break; +		} +	} + +	mutex_unlock(&clocks_mutex); + +	return ret; +} + +int omap_clk_enable_autoidle_all(void) +{ +	struct clk *c; +	unsigned long flags; + +	spin_lock_irqsave(&clockfw_lock, flags); + +	list_for_each_entry(c, &clocks, node) +		if (c->ops->allow_idle) +			c->ops->allow_idle(c); + +	spin_unlock_irqrestore(&clockfw_lock, flags); + +	return 0; +} + +int omap_clk_disable_autoidle_all(void) +{ +	struct clk *c; +	unsigned long flags; + +	spin_lock_irqsave(&clockfw_lock, flags); + +	list_for_each_entry(c, &clocks, node) +		if (c->ops->deny_idle) +			c->ops->deny_idle(c); + +	spin_unlock_irqrestore(&clockfw_lock, flags); + +	return 0; +} + +/* + * Low level helpers + */ +static int clkll_enable_null(struct clk *clk) +{ +	return 0; +} + +static void clkll_disable_null(struct clk *clk) +{ +} + +const struct clkops clkops_null = { +	.enable		= clkll_enable_null, +	.disable	= clkll_disable_null,  }; +/* + * Dummy clock + * + * Used for clock aliases that are needed on some OMAPs, but not others + */ +struct clk dummy_ck = { +	.name	= "dummy", +	.ops	= &clkops_null, +}; + +/* + * + */ + +#ifdef CONFIG_OMAP_RESET_CLOCKS +/* + * Disable any unused clocks left on by the bootloader + */ +static int __init clk_disable_unused(void) +{ +	struct clk *ck; +	unsigned long flags; + +	pr_info("clock: disabling unused clocks to save power\n"); + +	spin_lock_irqsave(&clockfw_lock, flags); +	list_for_each_entry(ck, &clocks, node) { +		if (ck->ops == &clkops_null) +			continue; + +		if (ck->usecount > 0 || !ck->enable_reg) +			continue; + +		omap2_clk_disable_unused(ck); +	} +	spin_unlock_irqrestore(&clockfw_lock, flags); + +	return 0; +} +late_initcall(clk_disable_unused); +late_initcall(omap_clk_enable_autoidle_all); +#endif + +#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) +/* + *	debugfs support to trace clock tree hierarchy and attributes + */ + +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +static struct dentry *clk_debugfs_root; + +static int clk_dbg_show_summary(struct seq_file *s, void *unused) +{ +	struct clk *c; +	struct clk *pa; + +	mutex_lock(&clocks_mutex); +	seq_printf(s, "%-30s %-30s %-10s %s\n", +		   "clock-name", "parent-name", "rate", "use-count"); + +	list_for_each_entry(c, &clocks, node) { +		pa = c->parent; +		seq_printf(s, "%-30s %-30s %-10lu %d\n", +			   c->name, pa ? pa->name : "none", c->rate, +			   c->usecount); +	} +	mutex_unlock(&clocks_mutex); + +	return 0; +} + +static int clk_dbg_open(struct inode *inode, struct file *file) +{ +	return single_open(file, clk_dbg_show_summary, inode->i_private); +} + +static const struct file_operations debug_clock_fops = { +	.open           = clk_dbg_open, +	.read           = seq_read, +	.llseek         = seq_lseek, +	.release        = single_release, +}; + +static int clk_debugfs_register_one(struct clk *c) +{ +	int err; +	struct dentry *d; +	struct clk *pa = c->parent; + +	d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root); +	if (!d) +		return -ENOMEM; +	c->dent = d; + +	d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount); +	if (!d) { +		err = -ENOMEM; +		goto err_out; +	} +	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); +	if (!d) { +		err = -ENOMEM; +		goto err_out; +	} +	d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); +	if (!d) { +		err = -ENOMEM; +		goto err_out; +	} +	return 0; + +err_out: +	debugfs_remove_recursive(c->dent); +	return err; +} + +static int clk_debugfs_register(struct clk *c) +{ +	int err; +	struct clk *pa = c->parent; + +	if (pa && !pa->dent) { +		err = clk_debugfs_register(pa); +		if (err) +			return err; +	} + +	if (!c->dent) { +		err = clk_debugfs_register_one(c); +		if (err) +			return err; +	} +	return 0; +} + +static int __init clk_debugfs_init(void) +{ +	struct clk *c; +	struct dentry *d; +	int err; + +	d = debugfs_create_dir("clock", NULL); +	if (!d) +		return -ENOMEM; +	clk_debugfs_root = d; + +	list_for_each_entry(c, &clocks, node) { +		err = clk_debugfs_register(c); +		if (err) +			goto err_out; +	} + +	d = debugfs_create_file("summary", S_IRUGO, +		d, NULL, &debug_clock_fops); +	if (!d) +		return -ENOMEM; + +	return 0; +err_out: +	debugfs_remove_recursive(clk_debugfs_root); +	return err; +} +late_initcall(clk_debugfs_init); + +#endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */ +  |