diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index 83053b388372..a07708819615 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig @@ -768,6 +768,12 @@ config MSM_DEVFREQ_CPUBW agnostic interface to so that some of the devfreq governors can be shared across SoCs. +config MSM_CPU_VOLTAGE_CONTROL + bool "Userspace CPU Voltage Control" + default y + help + This enables userspace CPU Voltage Control. + config MSM_AVS_HW bool "Enable Adaptive Voltage Scaling (AVS)" default n diff --git a/drivers/clk/qcom/clock-krait-8974.c b/drivers/clk/qcom/clock-krait-8974.c index e59bc2d587da..a1cf5f82e71b 100644 --- a/drivers/clk/qcom/clock-krait-8974.c +++ b/drivers/clk/qcom/clock-krait-8974.c @@ -28,11 +28,14 @@ #include #include #include +#include #include #include "clock.h" + + /* Clock inputs coming into Krait subsystem */ DEFINE_FIXED_DIV_CLK(hfpll_src_clk, 1, NULL); DEFINE_FIXED_DIV_CLK(acpu_aux_clk, 2, NULL); @@ -693,6 +696,74 @@ module_param_string(table_name, table_name, sizeof(table_name), S_IRUGO); static unsigned int pvs_config_ver; module_param(pvs_config_ver, uint, S_IRUGO); +#ifdef CONFIG_MSM_CPU_VOLTAGE_CONTROL +#define CPU_VDD_MAX 1200 +#define CPU_VDD_MIN 600 + +extern int use_for_scaling(unsigned int freq); +static unsigned int cnt; + +ssize_t show_UV_mV_table(struct cpufreq_policy *policy, + char *buf) +{ + int i, freq, len = 0; + unsigned int cpu = 0; + unsigned int num_levels = cpu_clk[cpu]->vdd_class->num_levels; + + if (!buf) + return -EINVAL; + + for (i = 0; i < num_levels; i++) { + freq = use_for_scaling(cpu_clk[cpu]->fmax[i] / 1000); + if (freq < 0) + continue; + + len += sprintf(buf + len, "%dmhz: %u mV\n", freq / 1000, + cpu_clk[cpu]->vdd_class->vdd_uv[i] / 1000); + } + + return len; +} + +ssize_t store_UV_mV_table(struct cpufreq_policy *policy, + char *buf, size_t count) +{ + int i, j; + int ret = 0; + unsigned int val, cpu = 0; + unsigned int num_levels = cpu_clk[cpu]->vdd_class->num_levels; + char size_cur[4]; + + if (cnt) { + cnt = 0; + return -EINVAL; + } + + for (i = 0; i < num_levels; i++) { + if (use_for_scaling(cpu_clk[cpu]->fmax[i] / 1000) < 0) + continue; + + ret = sscanf(buf, "%u", &val); + if (!ret) + return -EINVAL; + + if (val > CPU_VDD_MAX) + val = CPU_VDD_MAX; + else if (val < CPU_VDD_MIN) + val = CPU_VDD_MIN; + + for (j = 0; j < NR_CPUS; j++) + cpu_clk[j]->vdd_class->vdd_uv[i] = val * 1000; + + ret = sscanf(buf, "%s", size_cur); + cnt = strlen(size_cur); + buf += cnt + 1; + } + + return ret; +} +#endif + static int clock_krait_8974_driver_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 221d73b50874..a3f60754d17f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -39,7 +39,7 @@ static struct cpufreq_driver *cpufreq_driver; static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback); static DEFINE_RWLOCK(cpufreq_driver_lock); -static DEFINE_MUTEX(cpufreq_governor_lock); +DEFINE_MUTEX(cpufreq_governor_lock); static LIST_HEAD(cpufreq_policy_list); #ifdef CONFIG_HOTPLUG_CPU @@ -305,10 +305,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, trace_cpu_frequency(freqs->new, freqs->cpu); srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); - if (likely(policy) && likely(policy->cpu == freqs->cpu)) { + if (likely(policy) && likely(policy->cpu == freqs->cpu)) policy->cur = freqs->new; - sysfs_notify(&policy->kobj, NULL, "scaling_cur_freq"); - } break; } } @@ -504,6 +502,9 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, &new_policy.governor)) return -EINVAL; + new_policy.min = new_policy.user_policy.min; + new_policy.max = new_policy.user_policy.max; + ret = cpufreq_set_policy(policy, &new_policy); policy->user_policy.policy = policy->policy; @@ -625,6 +626,14 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); } +#ifdef CONFIG_MSM_CPU_VOLTAGE_CONTROL +extern ssize_t show_UV_mV_table(struct cpufreq_policy *policy, + char *buf); + +extern ssize_t store_UV_mV_table(struct cpufreq_policy *policy, + const char *buf, size_t count); +#endif + cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); cpufreq_freq_attr_ro(cpuinfo_min_freq); cpufreq_freq_attr_ro(cpuinfo_max_freq); @@ -639,6 +648,9 @@ cpufreq_freq_attr_rw(scaling_min_freq); cpufreq_freq_attr_rw(scaling_max_freq); cpufreq_freq_attr_rw(scaling_governor); cpufreq_freq_attr_rw(scaling_setspeed); +#ifdef CONFIG_MSM_CPU_VOLTAGE_CONTROL +cpufreq_freq_attr_rw(UV_mV_table); +#endif static struct attribute *default_attrs[] = { &cpuinfo_min_freq.attr, @@ -652,6 +664,9 @@ static struct attribute *default_attrs[] = { &scaling_driver.attr, &scaling_available_governors.attr, &scaling_setspeed.attr, +#ifdef CONFIG_MSM_CPU_VOLTAGE_CONTROL + &UV_mV_table.attr, +#endif NULL }; @@ -662,10 +677,15 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) { struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); - ssize_t ret; + ssize_t ret = -EINVAL; + + get_online_cpus(); + + if (!cpu_online(policy->cpu)) + goto unlock; if (!down_read_trylock(&cpufreq_rwsem)) - return -EINVAL; + goto unlock; down_read(&policy->rwsem); @@ -676,7 +696,8 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) up_read(&policy->rwsem); up_read(&cpufreq_rwsem); - +unlock: + put_online_cpus(); return ret; } @@ -1200,6 +1221,27 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, return cpu_dev->id; } +#ifdef CONFIG_HOTPLUG_CPU +static void update_related_cpus(struct cpufreq_policy *policy) +{ + unsigned int j; + + for_each_cpu(j, policy->related_cpus) { + if (!cpufreq_driver->setpolicy) + strlcpy(per_cpu(cpufreq_policy_save, j).gov, + policy->governor->name, CPUFREQ_NAME_LEN); + per_cpu(cpufreq_policy_save, j).min = policy->user_policy.min; + per_cpu(cpufreq_policy_save, j).max = policy->user_policy.max; + pr_debug("Saving CPU%d user policy min %d and max %d\n", + j, policy->user_policy.min, policy->user_policy.max); + } +} +#else +static void update_related_cpus(struct cpufreq_policy *policy) +{ +} +#endif + static int __cpufreq_remove_dev_prepare(struct device *dev, struct subsys_interface *sif, bool frozen) @@ -1235,20 +1277,13 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, } } -#ifdef CONFIG_HOTPLUG_CPU - if (!cpufreq_driver->setpolicy) - strlcpy(per_cpu(cpufreq_policy_save, cpu).gov, - policy->governor->name, CPUFREQ_NAME_LEN); - per_cpu(cpufreq_policy_save, cpu).min = policy->user_policy.min; - per_cpu(cpufreq_policy_save, cpu).max = policy->user_policy.max; - pr_debug("Saving CPU%d user policy min %d and max %d\n", - cpu, policy->user_policy.min, policy->user_policy.max); -#endif - down_read(&policy->rwsem); cpus = cpumask_weight(policy->cpus); up_read(&policy->rwsem); + if (cpus == 1) + update_related_cpus(policy); + if (cpu != policy->cpu) { sysfs_remove_link(&dev->kobj, "cpufreq"); } else if (cpus > 1) { @@ -1275,10 +1310,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev, unsigned long flags; struct cpufreq_policy *policy; - read_lock_irqsave(&cpufreq_driver_lock, flags); + write_lock_irqsave(&cpufreq_driver_lock, flags); policy = per_cpu(cpufreq_cpu_data, cpu); per_cpu(cpufreq_cpu_data, cpu) = NULL; - read_unlock_irqrestore(&cpufreq_driver_lock, flags); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); if (!policy) { pr_debug("%s: No cpu_data found\n", __func__); @@ -1444,14 +1479,22 @@ EXPORT_SYMBOL(cpufreq_quick_get_max); static unsigned int __cpufreq_get(unsigned int cpu) { - struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + struct cpufreq_policy *policy; unsigned int ret_freq = 0; + unsigned long flags; if (!cpufreq_driver->get) return ret_freq; + read_lock_irqsave(&cpufreq_driver_lock, flags); + policy = per_cpu(cpufreq_cpu_data, cpu); + read_unlock_irqrestore(&cpufreq_driver_lock, flags); + ret_freq = cpufreq_driver->get(cpu); + if (!policy) + return ret_freq; + if (ret_freq && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { /* verify no discrepancy between actual and @@ -1473,12 +1516,17 @@ static unsigned int __cpufreq_get(unsigned int cpu) */ unsigned int cpufreq_get(unsigned int cpu) { - struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + struct cpufreq_policy *policy; unsigned int ret_freq = 0; + unsigned long flags; if (cpufreq_disabled() || !cpufreq_driver) return -ENOENT; + read_lock_irqsave(&cpufreq_driver_lock, flags); + policy = per_cpu(cpufreq_cpu_data, cpu); + read_unlock_irqrestore(&cpufreq_driver_lock, flags); + BUG_ON(!policy); if (!down_read_trylock(&cpufreq_rwsem)) @@ -1696,15 +1744,6 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", policy->cpu, target_freq, relation, old_target_freq); - /* - * This might look like a redundant call as we are checking it again - * after finding index. But it is left intentionally for cases where - * exactly same freq is called again and so we can save on few function - * calls. - */ - if (target_freq == policy->cur) - return 0; - if (cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); else if (cpufreq_driver->target_index) { @@ -1956,6 +1995,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_policy *new_policy) { int ret = 0, failed = 1; + struct cpufreq_policy *cpu0_policy = NULL; pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu, new_policy->min, new_policy->max); @@ -1997,6 +2037,9 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, policy->max = new_policy->max; trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu); + if (new_policy->cpu) + cpu0_policy = cpufreq_cpu_get(0); + pr_debug("new min and max freqs are %u - %u kHz\n", policy->min, policy->max); @@ -2021,7 +2064,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, } /* start new governor */ - policy->governor = new_policy->governor; + if (new_policy->cpu && cpu0_policy) + policy->governor = cpu0_policy->governor; + else + policy->governor = new_policy->governor; if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) { failed = 0; @@ -2120,9 +2166,6 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, dev = get_cpu_device(cpu); if (dev) { - if (action & CPU_TASKS_FROZEN) - frozen = true; - switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: __cpufreq_add_dev(dev, NULL, frozen); @@ -2189,7 +2232,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) cpufreq_driver = driver_data; write_unlock_irqrestore(&cpufreq_driver_lock, flags); + register_hotcpu_notifier(&cpufreq_cpu_notifier); + + get_online_cpus(); ret = subsys_interface_register(&cpufreq_interface); + put_online_cpus(); if (ret) goto err_null_driver; @@ -2212,13 +2259,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) } } - register_hotcpu_notifier(&cpufreq_cpu_notifier); pr_debug("driver %s up and running\n", driver_data->name); return 0; err_if_unreg: subsys_interface_unregister(&cpufreq_interface); err_null_driver: + unregister_hotcpu_notifier(&cpufreq_cpu_notifier); write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; write_unlock_irqrestore(&cpufreq_driver_lock, flags); diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index e6be63561fa6..1b44496b2d2b 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -36,14 +36,29 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) struct od_dbs_tuners *od_tuners = dbs_data->tuners; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; struct cpufreq_policy *policy; + unsigned int sampling_rate; unsigned int max_load = 0; unsigned int ignore_nice; unsigned int j; - if (dbs_data->cdata->governor == GOV_ONDEMAND) + if (dbs_data->cdata->governor == GOV_ONDEMAND) { + struct od_cpu_dbs_info_s *od_dbs_info = + dbs_data->cdata->get_cpu_dbs_info_s(cpu); + + /* + * Sometimes, the ondemand governor uses an additional + * multiplier to give long delays. So apply this multiplier to + * the 'sampling_rate', so as to keep the wake-up-from-idle + * detection logic a bit conservative. + */ + sampling_rate = od_tuners->sampling_rate; + sampling_rate *= od_dbs_info->rate_mult; + ignore_nice = od_tuners->ignore_nice_load; - else + } else { + sampling_rate = cs_tuners->sampling_rate; ignore_nice = cs_tuners->ignore_nice_load; + } policy = cdbs->cur_policy; @@ -96,7 +111,46 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) if (unlikely(!wall_time || wall_time < idle_time)) continue; - load = 100 * (wall_time - idle_time) / wall_time; + /* + * If the CPU had gone completely idle, and a task just woke up + * on this CPU now, it would be unfair to calculate 'load' the + * usual way for this elapsed time-window, because it will show + * near-zero load, irrespective of how CPU intensive that task + * actually is. This is undesirable for latency-sensitive bursty + * workloads. + * + * To avoid this, we reuse the 'load' from the previous + * time-window and give this task a chance to start with a + * reasonably high CPU frequency. (However, we shouldn't over-do + * this copy, lest we get stuck at a high load (high frequency) + * for too long, even when the current system load has actually + * dropped down. So we perform the copy only once, upon the + * first wake-up from idle.) + * + * Detecting this situation is easy: the governor's deferrable + * timer would not have fired during CPU-idle periods. Hence + * an unusually large 'wall_time' (as compared to the sampling + * rate) indicates this scenario. + * + * prev_load can be zero in two cases and we must recalculate it + * for both cases: + * - during long idle intervals + * - explicitly set to zero + */ + if (unlikely(wall_time > (2 * sampling_rate) && + j_cdbs->prev_load)) { + load = j_cdbs->prev_load; + + /* + * Perform a destructive copy, to ensure that we copy + * the previous load only once, upon the first wake-up + * from idle. + */ + j_cdbs->prev_load = 0; + } else { + load = 100 * (wall_time - idle_time) / wall_time; + j_cdbs->prev_load = load; + } if (load > max_load) max_load = load; @@ -119,8 +173,9 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, { int i; + mutex_lock(&cpufreq_governor_lock); if (!policy->governor_enabled) - return; + goto out_unlock; if (!all_cpus) { /* @@ -135,6 +190,9 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, for_each_cpu(i, policy->cpus) __gov_queue_work(i, dbs_data, delay); } + +out_unlock: + mutex_unlock(&cpufreq_governor_lock); } EXPORT_SYMBOL_GPL(gov_queue_work); @@ -314,11 +372,18 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, for_each_cpu(j, policy->cpus) { struct cpu_dbs_common_info *j_cdbs = dbs_data->cdata->get_cpu_cdbs(j); + unsigned int prev_load; j_cdbs->cpu = j; j_cdbs->cur_policy = policy; j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); + + prev_load = (unsigned int) + (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle); + j_cdbs->prev_load = 100 * prev_load / + (unsigned int) j_cdbs->prev_cpu_wall; + if (ignore_nice) j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; @@ -362,6 +427,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, break; case CPUFREQ_GOV_LIMITS: + mutex_lock(&dbs_data->mutex); + if (!cpu_cdbs->cur_policy) { + mutex_unlock(&dbs_data->mutex); + break; + } mutex_lock(&cpu_cdbs->timer_mutex); if (policy->max < cpu_cdbs->cur_policy->cur) __cpufreq_driver_target(cpu_cdbs->cur_policy, @@ -371,6 +441,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, policy->min, CPUFREQ_RELATION_L); dbs_check_cpu(dbs_data, cpu); mutex_unlock(&cpu_cdbs->timer_mutex); + mutex_unlock(&dbs_data->mutex); break; } return 0; diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index b5f2b8618949..cc401d147e72 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -134,6 +134,13 @@ struct cpu_dbs_common_info { u64 prev_cpu_idle; u64 prev_cpu_wall; u64 prev_cpu_nice; + /* + * Used to keep track of load in the previous interval. However, when + * explicitly set to zero, it is used as a flag to ensure that we copy + * the previous load to the current interval only once, upon the first + * wake-up from idle. + */ + unsigned int prev_load; struct cpufreq_policy *cur_policy; struct delayed_work work; /* @@ -257,6 +264,8 @@ static ssize_t show_sampling_rate_min_gov_pol \ return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \ } +extern struct mutex cpufreq_governor_lock; + void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); bool need_load_eval(struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate); diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index ecc619ca09d0..47c6393d22a9 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -364,12 +364,16 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy, if (per_cpu(cpufreq_stats_table, cpu)) return -EBUSY; stat = kzalloc(sizeof(*stat), GFP_KERNEL); - if ((stat) == NULL) + if ((stat) == NULL) { + pr_err("Failed to alloc cpufreq_stats table\n"); return -ENOMEM; + } ret = sysfs_create_group(&policy->kobj, &stats_attr_group); - if (ret) + if (ret) { + pr_err("Failed to create cpufreq_stats sysfs\n"); goto error_out; + } stat->cpu = cpu; per_cpu(cpufreq_stats_table, cpu) = stat; @@ -384,6 +388,7 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy, stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); if (!stat->time_in_state) { ret = -ENOMEM; + pr_err("Failed to alloc cpufreq_stats table\n"); goto error_alloc; } stat->freq_table = (unsigned int *)(stat->time_in_state + count); @@ -418,6 +423,8 @@ static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy) struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->last_cpu); + if (!stat) + return; pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n", policy->cpu, policy->last_cpu); per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table, diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 3458d27f63b4..12e2ad0e41cd 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -135,9 +135,13 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, continue; if ((freq < policy->min) || (freq > policy->max)) continue; + if (freq == target_freq) { + optimal.driver_data = i; + break; + } switch (relation) { case CPUFREQ_RELATION_H: - if (freq <= target_freq) { + if (freq < target_freq) { if (freq >= optimal.frequency) { optimal.frequency = freq; optimal.driver_data = i; @@ -150,7 +154,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, } break; case CPUFREQ_RELATION_L: - if (freq >= target_freq) { + if (freq > target_freq) { if (freq <= optimal.frequency) { optimal.frequency = freq; optimal.driver_data = i; diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c index 322d8f02f1f8..1fe527169962 100644 --- a/drivers/cpufreq/qcom-cpufreq.c +++ b/drivers/cpufreq/qcom-cpufreq.c @@ -47,6 +47,9 @@ static struct clk *l2_clk; static unsigned int freq_index[NR_CPUS]; static unsigned int max_freq_index; static struct cpufreq_frequency_table *freq_table; +#ifdef CONFIG_MSM_CPU_VOLTAGE_CONTROL +static struct cpufreq_frequency_table *krait_freq_table; +#endif static unsigned int *l2_khz; static bool is_sync; static unsigned long *mem_bw; @@ -167,7 +170,7 @@ static int msm_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - int ret = -EFAULT; + int ret = 0; int index; struct cpufreq_frequency_table *table; @@ -175,6 +178,9 @@ static int msm_cpufreq_target(struct cpufreq_policy *policy, mutex_lock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex); + if (target_freq == policy->cur) + goto done; + if (per_cpu(cpufreq_suspend, policy->cpu).device_suspended) { pr_debug("cpufreq: cpu%d scheduling frequency change " "in suspend.\n", policy->cpu); @@ -363,12 +369,35 @@ static int msm_cpufreq_suspend(void) static int msm_cpufreq_resume(void) { - int cpu; + int cpu, ret; + struct cpufreq_policy policy; for_each_possible_cpu(cpu) { per_cpu(cpufreq_suspend, cpu).device_suspended = 0; } + /* + * Freq request might be rejected during suspend, resulting + * in policy->cur violating min/max constraint. + * Correct the frequency as soon as possible. + */ + get_online_cpus(); + for_each_online_cpu(cpu) { + ret = cpufreq_get_policy(&policy, cpu); + if (ret) + continue; + if (policy.cur <= policy.max && policy.cur >= policy.min) + continue; + ret = cpufreq_update_policy(cpu); + if (ret) + pr_info("cpufreq: Current frequency violates policy min/max for CPU%d\n", + cpu); + else + pr_info("cpufreq: Frequency violation fixed for CPU%d\n", + cpu); + } + put_online_cpus(); + return NOTIFY_DONE; } @@ -495,6 +524,20 @@ static int cpufreq_parse_dt(struct device *dev) freq_table[i].driver_data = i; freq_table[i].frequency = CPUFREQ_TABLE_END; +#ifdef CONFIG_MSM_CPU_VOLTAGE_CONTROL + /* Create frequence table with unrounded values */ + krait_freq_table = devm_kzalloc(dev, (nf + 1) * sizeof(*krait_freq_table), + GFP_KERNEL); + if (!krait_freq_table) + return -ENOMEM; + + *krait_freq_table = *freq_table; + + for (i = 0, j = 0; i < nf; i++, j += 3) + krait_freq_table[i].frequency = data[j]; + krait_freq_table[i].frequency = CPUFREQ_TABLE_END; +#endif + devm_kfree(dev, data); return 0; @@ -536,6 +579,26 @@ const struct file_operations msm_cpufreq_fops = { }; #endif +#ifdef CONFIG_MSM_CPU_VOLTAGE_CONTROL +int use_for_scaling(unsigned int freq) +{ + unsigned int i, cpu_freq; + + if (!krait_freq_table) + return -EINVAL; + + for (i = 0; krait_freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + cpu_freq = krait_freq_table[i].frequency; + if (cpu_freq == CPUFREQ_ENTRY_INVALID) + continue; + if (freq == cpu_freq) + return freq; + } + + return -EINVAL; +} +#endif + static int __init msm_cpufreq_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev;