Untitled diff

Created Diff never expires
29 Entfernungen
Zeilen
Gesamt
Entfernt
Wörter
Gesamt
Entfernt
Um diese Funktion weiterhin zu nutzen, aktualisieren Sie auf
Diffchecker logo
Diffchecker Pro
805 Zeilen
29 Hinzufügungen
Zeilen
Gesamt
Hinzugefügt
Wörter
Gesamt
Hinzugefügt
Um diese Funktion weiterhin zu nutzen, aktualisieren Sie auf
Diffchecker logo
Diffchecker Pro
805 Zeilen
/*
/*
* drivers/cpufreq/cpufreq_smartass2.c
* drivers/cpufreq/cpufreq_smartassH3.c
*
*
* Copyright (C) 2010 Google, Inc.
* Copyright (C) 2010 Google, Inc.
*
*
* This software is licensed under the terms of the GNU General Public
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
* may be copied, distributed, and modified under those terms.
*
*
* This program is distributed in the hope that it will be useful,
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* GNU General Public License for more details.
*
*
* Author: Erasmux
* Author: Erasmux
*
*
* Based on the interactive governor By Mike Chan (mike@android.com)
* Based on the interactive governor By Mike Chan (mike@android.com)
* which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net)
* which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net)
*
*
* SMP support based on mod by faux123
* SMP support based on mod by faux123
*
*
* For a general overview of smartassV2 see the relavent part in
* For a general overview of smartassV2 see the relavent part in
* Documentation/cpu-freq/governors.txt
* Documentation/cpu-freq/governors.txt
*
*
*/
*/
#include <linux/cpu.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpumask.h>
#include <linux/cpufreq.h>
#include <linux/cpufreq.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <linux/tick.h>
#include <linux/timer.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/workqueue.h>
#include <linux/moduleparam.h>
#include <linux/moduleparam.h>
#include <asm/cputime.h>
#include <asm/cputime.h>
#include <linux/earlysuspend.h>
#include <linux/earlysuspend.h>
/******************** Tunable parameters: ********************/
/******************** Tunable parameters: ********************/
/*
/*
* The "ideal" frequency to use when awake. The governor will ramp up faster
* The "ideal" frequency to use when awake. The governor will ramp up faster
* towards the ideal frequency and slower after it has passed it. Similarly,
* towards the ideal frequency and slower after it has passed it. Similarly,
* lowering the frequency towards the ideal frequency is faster than below it.
* lowering the frequency towards the ideal frequency is faster than below it.
*/
*/
#define DEFAULT_AWAKE_IDEAL_FREQ 600000
#define DEFAULT_AWAKE_IDEAL_FREQ 320000
static unsigned int awake_ideal_freq;
static unsigned int awake_ideal_freq;
/*
/*
* The "ideal" frequency to use when suspended.
* The "ideal" frequency to use when suspended.
* When set to 0, the governor will not track the suspended state (meaning
* When set to 0, the governor will not track the suspended state (meaning
* that practically when sleep_ideal_freq==0 the awake_ideal_freq is used
* that practically when sleep_ideal_freq==0 the awake_ideal_freq is used
* also when suspended).
* also when suspended).
*/
*/
#define DEFAULT_SLEEP_IDEAL_FREQ 128000
#define DEFAULT_SLEEP_IDEAL_FREQ 122880
static unsigned int sleep_ideal_freq;
static unsigned int sleep_ideal_freq;
/*
/*
* Freqeuncy delta when ramping up above the ideal freqeuncy.
* Freqeuncy delta when ramping up above the ideal freqeuncy.
* Zero disables and causes to always jump straight to max frequency.
* Zero disables and causes to always jump straight to max frequency.
* When below the ideal freqeuncy we always ramp up to the ideal freq.
* When below the ideal freqeuncy we always ramp up to the ideal freq.
*/
*/
#define DEFAULT_RAMP_UP_STEP 128000
#define DEFAULT_RAMP_UP_STEP 80000
static unsigned int ramp_up_step;
static unsigned int ramp_up_step;
/*
/*
* Freqeuncy delta when ramping down below the ideal freqeuncy.
* Freqeuncy delta when ramping down below the ideal freqeuncy.
* Zero disables and will calculate ramp down according to load heuristic.
* Zero disables and will calculate ramp down according to load heuristic.
* When above the ideal freqeuncy we always ramp down to the ideal freq.
* When above the ideal freqeuncy we always ramp down to the ideal freq.
*/
*/
#define DEFAULT_RAMP_DOWN_STEP 256000
#define DEFAULT_RAMP_DOWN_STEP 80000
static unsigned int ramp_down_step;
static unsigned int ramp_down_step;
/*
/*
* CPU freq will be increased if measured load > max_cpu_load;
* CPU freq will be increased if measured load > max_cpu_load;
*/
*/
#define DEFAULT_MAX_CPU_LOAD 50
#define DEFAULT_MAX_CPU_LOAD 85
static unsigned long max_cpu_load;
static unsigned long max_cpu_load;
/*
/*
* CPU freq will be decreased if measured load < min_cpu_load;
* CPU freq will be decreased if measured load < min_cpu_load;
*/
*/
#define DEFAULT_MIN_CPU_LOAD 25
#define DEFAULT_MIN_CPU_LOAD 70
static unsigned long min_cpu_load;
static unsigned long min_cpu_load;
/*
/*
* The minimum amount of time to spend at a frequency before we can ramp up.
* The minimum amount of time to spend at a frequency before we can ramp up.
* Notice we ignore this when we are below the ideal frequency.
* Notice we ignore this when we are below the ideal frequency.
*/
*/
#define DEFAULT_UP_RATE_US 48000;
#define DEFAULT_UP_RATE_US 48000;
static unsigned long up_rate_us;
static unsigned long up_rate_us;
/*
/*
* The minimum amount of time to spend at a frequency before we can ramp down.
* The minimum amount of time to spend at a frequency before we can ramp down.
* Notice we ignore this when we are above the ideal frequency.
* Notice we ignore this when we are above the ideal frequency.
*/
*/
#define DEFAULT_DOWN_RATE_US 99000;
#define DEFAULT_DOWN_RATE_US 49000;
static unsigned long down_rate_us;
static unsigned long down_rate_us;
/*
/*
* The frequency to set when waking up from sleep.
* The frequency to set when waking up from sleep.
* When sleep_ideal_freq=0 this will have no effect.
* When sleep_ideal_freq=0 this will have no effect.
*/
*/
#define DEFAULT_SLEEP_WAKEUP_FREQ 99999999
#define DEFAULT_SLEEP_WAKEUP_FREQ 99999999
static unsigned int sleep_wakeup_freq;
static unsigned int sleep_wakeup_freq;
/*
/*
* Sampling rate, I highly recommend to leave it at 2.
* Sampling rate, I highly recommend to leave it at 2.
*/
*/
#define DEFAULT_SAMPLE_RATE_JIFFIES 2
#define DEFAULT_SAMPLE_RATE_JIFFIES 2
static unsigned int sample_rate_jiffies;
static unsigned int sample_rate_jiffies;
/*************** End of tunables ***************/
/*************** End of tunables ***************/
static void (*pm_idle_old)(void);
static void (*pm_idle_old)(void);
static atomic_t active_count = ATOMIC_INIT(0);
static atomic_t active_count = ATOMIC_INIT(0);
struct smartass_info_s {
struct smartass_info_s {
struct cpufreq_policy *cur_policy;
struct cpufreq_policy *cur_policy;
struct cpufreq_frequency_table *freq_table;
struct cpufreq_frequency_table *freq_table;
struct timer_list timer;
struct timer_list timer;
u64 time_in_idle;
u64 time_in_idle;
u64 idle_exit_time;
u64 idle_exit_time;
u64 freq_change_time;
u64 freq_change_time;
u64 freq_change_time_in_idle;
u64 freq_change_time_in_idle;
int cur_cpu_load;
int cur_cpu_load;
int old_freq;
int old_freq;
int ramp_dir;
int ramp_dir;
unsigned int enable;
unsigned int enable;
int ideal_speed;
int ideal_speed;
};
};
static DEFINE_PER_CPU(struct smartass_info_s, smartass_info);
static DEFINE_PER_CPU(struct smartass_info_s, smartass_info);
/* Workqueues handle frequency scaling */
/* Workqueues handle frequency scaling */
static struct workqueue_struct *up_wq;
static struct workqueue_struct *up_wq;
static struct workqueue_struct *down_wq;
static struct workqueue_struct *down_wq;
static struct work_struct freq_scale_work;
static struct work_struct freq_scale_work;
static cpumask_t work_cpumask;
static cpumask_t work_cpumask;
static spinlock_t cpumask_lock;
static spinlock_t cpumask_lock;
static unsigned int suspended;
static unsigned int suspended;
#define dprintk(flag,msg...) do { \
#define dprintk(flag,msg...) do { \
if (debug_mask & flag) printk(KERN_DEBUG msg); \
if (debug_mask & flag) printk(KERN_DEBUG msg); \
} while (0)
} while (0)
enum {
enum {
SMARTASS_DEBUG_JUMPS=1,
SMARTASS_DEBUG_JUMPS=1,
SMARTASS_DEBUG_LOAD=2,
SMARTASS_DEBUG_LOAD=2,
SMARTASS_DEBUG_ALG=4
SMARTASS_DEBUG_ALG=4
};
};
/*
/*
* Combination of the above debug flags.
* Combination of the above debug flags.
*/
*/
static unsigned long debug_mask;
static unsigned long debug_mask;
static int cpufreq_governor_smartass(struct cpufreq_policy *policy,
static int cpufreq_governor_smartass_h3(struct cpufreq_policy *policy,
unsigned int event);
unsigned int event);
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASSH3
static
static
#endif
#endif
struct cpufreq_governor cpufreq_gov_smartass2 = {
struct cpufreq_governor cpufreq_gov_smartass_h3 = {
.name = "smartassV2",
.name = "SmartassH3",
.governor = cpufreq_governor_smartass,
.governor = cpufreq_governor_smartass_h3,
.max_transition_latency = 9000000,
.max_transition_latency = 9000000,
.owner = THIS_MODULE,
.owner = THIS_MODULE,
};
};
inline static void smartass_update_min_max(struct smartass_info_s *this_smartass, struct cpufreq_policy *policy, int suspend) {
inline static void smartass_update_min_max(struct smartass_info_s *this_smartass, struct cpufreq_policy *policy, int suspend) {
if (suspend) {
if (suspend) {
this_smartass->ideal_speed = // sleep_ideal_freq; but make sure it obeys the policy min/max
this_smartass->ideal_speed = // sleep_ideal_freq; but make sure it obeys the policy min/max
policy->max > sleep_ideal_freq ?
policy->max > sleep_ideal_freq ?
(sleep_ideal_freq > policy->min ? sleep_ideal_freq : policy->min) : policy->max;
(sleep_ideal_freq > policy->min ? sleep_ideal_freq : policy->min) : policy->max;
} else {
} else {
this_smartass->ideal_speed = // awake_ideal_freq; but make sure it obeys the policy min/max
this_smartass->ideal_speed = // awake_ideal_freq; but make sure it obeys the policy min/max
policy->min < awake_ideal_freq ?
policy->min < awake_ideal_freq ?
(awake_ideal_freq < policy->max ? awake_ideal_freq : policy->max) : policy->min;
(awake_ideal_freq < policy->max ? awake_ideal_freq : policy->max) : policy->min;
}
}
}
}
inline static void smartass_update_min_max_allcpus(void) {
inline static void smartass_update_min_max_allcpus(void) {
unsigned int i;
unsigned int i;
for_each_online_cpu(i) {
for_each_online_cpu(i) {
struct smartass_info_s *this_smartass = &per_cpu(smartass_info, i);
struct smartass_info_s *this_smartass = &per_cpu(smartass_info, i);
if (this_smartass->enable)
if (this_smartass->enable)
smartass_update_min_max(this_smartass,this_smartass->cur_policy,suspended);
smartass_update_min_max(this_smartass,this_smartass->cur_policy,suspended);
}
}
}
}
inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) {
inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) {
if (freq > (int)policy->max)
if (freq > (int)policy->max)
return policy->max;
return policy->max;
if (freq < (int)policy->min)
if (freq < (int)policy->min)
return policy->min;
return policy->min;
return freq;
return freq;
}
}
inline static void reset_timer(unsigned long cpu, struct smartass_info_s *this_smartass) {
inline static void reset_timer(unsigned long cpu, struct smartass_info_s *this_smartass) {
this_smartass->time_in_idle = get_cpu_idle_time_us(cpu, &this_smartass->idle_exit_time);
this_smartass->time_in_idle = get_cpu_idle_time_us(cpu, &this_smartass->idle_exit_time);
mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies);
mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies);
}
}
inline static void work_cpumask_set(unsigned long cpu) {
inline static void work_cpumask_set(unsigned long cpu) {
unsigned long flags;
unsigned long flags;
spin_lock_irqsave(&cpumask_lock, flags);
spin_lock_irqsave(&cpumask_lock, flags);
cpumask_set_cpu(cpu, &work_cpumask);
cpumask_set_cpu(cpu, &work_cpumask);
spin_unlock_irqrestore(&cpumask_lock, flags);
spin_unlock_irqrestore(&cpumask_lock, flags);
}
}
inline static int work_cpumask_test_and_clear(unsigned long cpu) {
inline static int work_cpumask_test_and_clear(unsigned long cpu) {
unsigned long flags;
unsigned long flags;
int res = 0;
int res = 0;
spin_lock_irqsave(&cpumask_lock, flags);
spin_lock_irqsave(&cpumask_lock, flags);
res = test_and_clear_bit(cpu, work_cpumask.bits);
res = test_and_clear_bit(cpu, work_cpumask.bits);
spin_unlock_irqrestore(&cpumask_lock, flags);
spin_unlock_irqrestore(&cpumask_lock, flags);
return res;
return res;
}
}
inline static int target_freq(struct cpufreq_policy *policy, struct smartass_info_s *this_smartass,
inline static int target_freq(struct cpufreq_policy *policy, struct smartass_info_s *this_smartass,
int new_freq, int old_freq, int prefered_relation) {
int new_freq, int old_freq, int prefered_relation) {
int index, target;
int index, target;
struct cpufreq_frequency_table *table = this_smartass->freq_table;
struct cpufreq_frequency_table *table = this_smartass->freq_table;
if (new_freq == old_freq)
if (new_freq == old_freq)
return 0;
return 0;
new_freq = validate_freq(policy,new_freq);
new_freq = validate_freq(policy,new_freq);
if (new_freq == old_freq)
if (new_freq == old_freq)
return 0;
return 0;
if (table &&
if (table &&
!cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index))
!cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index))
{
{
target = table[index].frequency;
target = table[index].frequency;
if (target == old_freq) {
if (target == old_freq) {
// if for example we are ramping up to *at most* current + ramp_up_step
// if for example we are ramping up to *at most* current + ramp_up_step
// but there is no such frequency higher than the current, try also
// but there is no such frequency higher than the current, try also
// to ramp up to *at least* current + ramp_up_step.
// to ramp up to *at least* current + ramp_up_step.
if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H
if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H
&& !cpufreq_frequency_table_target(policy,table,new_freq,
&& !cpufreq_frequency_table_target(policy,table,new_freq,
CPUFREQ_RELATION_L,&index))
CPUFREQ_RELATION_L,&index))
target = table[index].frequency;
target = table[index].frequency;
// simlarly for ramping down:
// simlarly for ramping down:
else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L
else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L
&& !cpufreq_frequency_table_target(policy,table,new_freq,
&& !cpufreq_frequency_table_target(policy,table,new_freq,
CPUFREQ_RELATION_H,&index))
CPUFREQ_RELATION_H,&index))
target = table[index].frequency;
target = table[index].frequency;
}
}
if (target == old_freq) {
if (target == old_freq) {
// We should not get here:
// We should not get here:
// If we got here we tried to change to a validated new_freq which is different
// If we got here we tried to change to a validated new_freq which is different
// from old_freq, so there is no reason for us to remain at same frequency.
// from old_freq, so there is no reason for us to remain at same frequency.
printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n",
printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n",
old_freq,new_freq,target);
old_freq,new_freq,target);
return 0;
return 0;
}
}
}
}
else target = new_freq;
else target = new_freq;
__cpufreq_driver_target(policy, target, prefered_relation);
__cpufreq_driver_target(policy, target, prefered_relation);
dprintk(SMARTASS_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n",
dprintk(SMARTASS_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n",
old_freq,new_freq,target,policy->cur);
old_freq,new_freq,target,policy->cur);
return target;
return target;
}
}
static void cpufreq_smartass_timer(unsigned long cpu)
static void cpufreq_smartass_timer(unsigned long cpu)
{
{
u64 delta_idle;
u64 delta_idle;
u64 delta_time;
u64 delta_time;
int cpu_load;
int cpu_load;
int old_freq;
int old_freq;
u64 update_time;
u64 update_time;
u64 now_idle;
u64 now_idle;
int queued_work = 0;
int queued_work = 0;
struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu);
struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu);
struct cpufreq_policy *policy = this_smartass->cur_policy;
struct cpufreq_policy *policy = this_smartass->cur_policy;
now_idle = get_cpu_idle_time_us(cpu, &update_time);
now_idle = get_cpu_idle_time_us(cpu, &update_time);
old_freq = policy->cur;
old_freq = policy->cur;
if (this_smartass->idle_exit_time == 0 || update_time == this_smartass->idle_exit_time)
if (this_smartass->idle_exit_time == 0 || update_time == this_smartass->idle_exit_time)
return;
return;
delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle);
delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle);
delta_time = cputime64_sub(update_time, this_smartass->idle_exit_time);
delta_time = cputime64_sub(update_time, this_smartass->idle_exit_time);
// If timer ran less than 1ms after short-term sample started, retry.
// If timer ran less than 1ms after short-term sample started, retry.
if (delta_time < 1000) {
if (delta_time < 1000) {
if (!timer_pending(&this_smartass->timer))
if (!timer_pending(&this_smartass->timer))
reset_timer(cpu,this_smartass);
reset_timer(cpu,this_smartass);
return;
return;
}
}
if (delta_idle > delta_time)
if (delta_idle > delta_time)
cpu_load = 0;
cpu_load = 0;
else
else
cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time;
cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time;
dprintk(SMARTASS_DEBUG_LOAD,"smartassT @ %d: load %d (delta_time %llu)\n",
dprintk(SMARTASS_DEBUG_LOAD,"smartassT @ %d: load %d (delta_time %llu)\n",
old_freq,cpu_load,delta_time);
old_freq,cpu_load,delta_time);
this_smartass->cur_cpu_load = cpu_load;
this_smartass->cur_cpu_load = cpu_load;
this_smartass->old_freq = old_freq;
this_smartass->old_freq = old_freq;
// Scale up if load is above max or if there where no idle cycles since coming out of idle,
// Scale up if load is above max or if there where no idle cycles since coming out of idle,
// additionally, if we are at or above the ideal_speed, verify we have been at this frequency
// additionally, if we are at or above the ideal_speed, verify we have been at this frequency
// for at least up_rate_us:
// for at least up_rate_us:
if (cpu_load > max_cpu_load || delta_idle == 0)
if (cpu_load > max_cpu_load || delta_idle == 0)
{
{
if (old_freq < policy->max &&
if (old_freq < policy->max &&
(old_freq < this_smartass->ideal_speed || delta_idle == 0 ||
(old_freq < this_smartass->ideal_speed || delta_idle == 0 ||
cputime64_sub(update_time, this_smartass->freq_change_time) >= up_rate_us))
cputime64_sub(update_time, this_smartass->freq_change_time) >= up_rate_us))
{
{
dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp up: load %d (delta_idle %llu)\n",
dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp up: load %d (delta_idle %llu)\n",
old_freq,cpu_load,delta_idle);
old_freq,cpu_load,delta_idle);
this_smartass->ramp_dir = 1;
this_smartass->ramp_dir = 1;
work_cpumask_set(cpu);
work_cpumask_set(cpu);
queue_work(up_wq, &freq_scale_work);
queue_work(up_wq, &freq_scale_work);
queued_work = 1;
queued_work = 1;
}
}
else this_smartass->ramp_dir = 0;
else this_smartass->ramp_dir = 0;
}
}
// Similarly for scale down: load should be below min and if we are at or below ideal
// Similarly for scale down: load should be below min and if we are at or below ideal
// frequency we require that we have been at this frequency for at least down_rate_us:
// frequency we require that we have been at this frequency for at least down_rate_us:
else if (cpu_load < min_cpu_load && old_freq > policy->min &&
else if (cpu_load < min_cpu_load && old_freq > policy->min &&
(old_freq > this_smartass->ideal_speed ||
(old_freq > this_smartass->ideal_speed ||
cputime64_sub(update_time, this_smartass->freq_change_time) >= down_rate_us))
cputime64_sub(update_time, this_smartass->freq_change_time) >= down_rate_us))
{
{
dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp down: load %d (delta_idle %llu)\n",
dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp down: load %d (delta_idle %llu)\n",
old_freq,cpu_load,delta_idle);
old_freq,cpu_load,delta_idle);
this_smartass->ramp_dir = -1;
this_smartass->ramp_dir = -1;
work_cpumask_set(cpu);
work_cpumask_set(cpu);
queue_work(down_wq, &freq_scale_work);
queue_work(down_wq, &freq_scale_work);
queued_work = 1;
queued_work = 1;
}
}
else this_smartass->ramp_dir = 0;
else this_smartass->ramp_dir = 0;
// To avoid unnecessary load when the CPU is already at high load, we don't
// To avoid unnecessary load when the CPU is already at high load, we don't
// reset ourselves if we are at max speed. If and when there are idle cycles,
// reset ourselves if we are at max speed. If and when there are idle cycles,
// the idle loop will activate the timer.
// the idle loop will activate the timer.
// Additionally, if we queued some work, the work task will reset the timer
// Additionally, if we queued some work, the work task will reset the timer
// after it has done its adjustments.
// after it has done its adjustments.
if (!queued_work && old_freq < policy->max)
if (!queued_work && old_freq < policy->max)
reset_timer(cpu,this_smartass);
reset_timer(cpu,this_smartass);
}
}
static void cpufreq_idle(void)
static void cpufreq_idle(void)
{
{
struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id());
struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id());
struct cpufreq_policy *policy = this_smartass->cur_policy;
struct cpufreq_policy *policy = this_smartass->cur_policy;
if (!this_smartass->enable) {
if (!this_smartass->enable) {
pm_idle_old();
pm_idle_old();
return;
return;
}
}
if (policy->cur == policy->min && timer_pending(&this_smartass->timer))
if (policy->cur == policy->min && timer_pending(&this_smartass->timer))
del_timer(&this_smartass->timer);
del_timer(&this_smartass->timer);
pm_idle_old();
pm_idle_old();
if (!timer_pending(&this_smartass->timer))
if (!timer_pending(&this_smartass->timer))
reset_timer(smp_processor_id(), this_smartass);
reset_timer(smp_processor_id(), this_smartass);
}
}
/* We use the same work function to sale up and down */
/* We use the same work function to sale up and down */
static void cpufreq_smartass_freq_change_time_work(struct work_struct *work)
static void cpufreq_smartass_freq_change_time_work(struct work_struct *work)
{
{
unsigned int cpu;
unsigned int cpu;
int new_freq;
int new_freq;
int old_freq;
int old_freq;
int ramp_dir;
int ramp_dir;
struct smartass_info_s *this_smartass;
struct smartass_info_s *this_smartass;
struct cpufreq_policy *policy;
struct cpufreq_policy *policy;
unsigned int relation = CPUFREQ_RELATION_L;
unsigned int relation = CPUFREQ_RELATION_L;
for_each_possible_cpu(cpu) {
for_each_possible_cpu(cpu) {
this_smartass = &per_cpu(smartass_info, cpu);
this_smartass = &per_cpu(smartass_info, cpu);
if (!work_cpumask_test_and_clear(cpu))
if (!work_cpumask_test_and_clear(cpu))
continue;
continue;
ramp_dir = this_smartass->ramp_dir;
ramp_dir = this_smartass->ramp_dir;
this_smartass->ramp_dir = 0;
this_smartass->ramp_dir = 0;
old_freq = this_smartass->old_freq;
old_freq = this_smartass->old_freq;
policy = this_smartass->cur_policy;
policy = this_smartass->cur_policy;
if (old_freq != policy->cur) {
if (old_freq != policy->cur) {
// frequency was changed by someone else?
// frequency was changed by someone else?
printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n",
printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n",
old_freq,policy->cur);
old_freq,policy->cur);
new_freq = old_freq;
new_freq = old_freq;
}
}
else if (ramp_dir > 0 && nr_running() > 1) {
else if (ramp_dir > 0 && nr_running() > 1) {
// ramp up logic:
// ramp up logic:
if (old_freq < this_smartass->ideal_speed)
if (old_freq < this_smartass->ideal_speed)
new_freq = this_smartass->ideal_speed;
new_freq = this_smartass->ideal_speed;
else if (ramp_up_step) {
else if (ramp_up_step) {
new_freq = old_freq + ramp_up_step;
new_freq = old_freq + ramp_up_step;
relation = CPUFREQ_RELATION_H;
relation = CPUFREQ_RELATION_H;
}
}
else {
else {
new_freq = policy->max;
new_freq = policy->max;
relation = CPUFREQ_RELATION_H;
relation = CPUFREQ_RELATION_H;
}
}
dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp up: ramp_dir=%d ideal=%d\n",
dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp up: ramp_dir=%d ideal=%d\n",
old_freq,ramp_dir,this_smartass->ideal_speed);
old_freq,ramp_dir,this_smartass->ideal_speed);
}
}
else if (ramp_dir < 0) {
else if (ramp_dir < 0) {
// ramp down logic:
// ramp down logic:
if (old_freq > this_smartass->ideal_speed) {
if (old_freq > this_smartass->ideal_speed) {
new_freq = this_smartass->ideal_speed;
new_freq = this_smartass->ideal_speed;
relation = CPUFREQ_RELATION_H;
relation = CPUFREQ_RELATION_H;
}
}
else if (ramp_down_step)
else if (ramp_down_step)
new_freq = old_freq - ramp_down_step;
new_freq = old_freq - ramp_down_step;
else {
else {
// Load heuristics: Adjust new_freq such that, assuming a linear
// Load heuristics: Adjust new_freq such that, assuming a linear
// scaling of load vs. frequency, the load in the new frequency
// scaling of load vs. frequency, the load in the new frequency
// will be max_cpu_load:
// will be max_cpu_load:
new_freq = old_freq * this_smartass->cur_cpu_load / max_cpu_load;
new_freq = old_freq * this_smartass->cur_cpu_load / max_cpu_load;
if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?!
if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?!
new_freq = old_freq -1;
new_freq = old_freq -1;
}
}
dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp down: ramp_dir=%d ideal=%d\n",
dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp down: ramp_dir=%d ideal=%d\n",
old_freq,ramp_dir,this_smartass->ideal_speed);
old_freq,ramp_dir,this_smartass->ideal_speed);
}
}
else { // ramp_dir==0 ?! Could the timer change its mind about a queued ramp up/down
else { // ramp_dir==0 ?! Could the timer change its mind about a queued ramp up/down
// before the work task gets to run?
// before the work task gets to run?
// This may also happen if we refused to ramp up because the nr_running()==1
// This may also happen if we refused to ramp up because the nr_running()==1
new_freq = old_freq;
new_freq = old_freq;
dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d nothing: ramp_dir=%d nr_running=%lu\n",
dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d nothing: ramp_dir=%d nr_running=%lu\n",
old_freq,ramp_dir,nr_running());
old_freq,ramp_dir,nr_running());
}
}
// do actual ramp up (returns 0, if frequency change failed):
// do actual ramp up (returns 0, if frequency change failed):
new_freq = target_freq(policy,this_smartass,new_freq,old_freq,relation);
new_freq = target_freq(policy,this_smartass,new_freq,old_freq,relation);
if (new_freq)
if (new_freq)
this_smartass->freq_change_time_in_idle =
this_smartass->freq_change_time_in_idle =
get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time);
get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time);
// reset timer:
// reset timer:
if (new_freq < policy->max)
if (new_freq < policy->max)
reset_timer(cpu,this_smartass);
reset_timer(cpu,this_smartass);
// if we are maxed out, it is pointless to use the timer
// if we are maxed out, it is pointless to use the timer
// (idle cycles wake up the timer when the timer comes)
// (idle cycles wake up the timer when the timer comes)
else if (timer_pending(&this_smartass->timer))
else if (timer_pending(&this_smartass->timer))
del_timer(&this_smartass->timer);
del_timer(&this_smartass->timer);
}
}
}
}
static ssize_t show_debug_mask(struct cpufreq_policy *policy, char *buf)
static ssize_t show_debug_mask(struct cpufreq_policy *policy, char *buf)
{
{
return sprintf(buf, "%lu\n", debug_mask);
return sprintf(buf, "%lu\n", debug_mask);
}
}
static ssize_t store_debug_mask(struct cpufreq_policy *policy, const char *buf, size_t count)
static ssize_t store_debug_mask(struct cpufreq_policy *policy, const char *buf, size_t count)
{
{
ssize_t res;
ssize_t res;
unsigned long input;
unsigned long input;
res = strict_strtoul(buf, 0, &input);
res = strict_strtoul(buf, 0, &input);
if (res >= 0)
if (res >= 0)
debug_mask = input;
debug_mask = input;
return res;
return count;
}
}
static ssize_t show_up_rate_us(struct cpufreq_policy *policy, char *buf)
static ssize_t show_up_rate_us(struct cpufreq_policy *policy, char *buf)
{
{
return sprintf(buf, "%lu\n", up_rate_us);
return sprintf(buf, "%lu\n", up_rate_us);
}
}
static ssize_t store_up_rate_us(struct cpufreq_policy *policy, const char *buf, size_t count)
static ssize_t store_up_rate_us(struct cpufreq_policy *policy, const char *buf, size_t count)
{
{
ssize_t res;
ssize_t res;
unsigned long input;
unsigned long input;
res = strict_strtoul(buf, 0, &input);
res = strict_strtoul(buf, 0, &input);
if (res >= 0 && input >= 0 && input <= 100000000)
if (res >= 0 && input >= 0 && input <= 100000000)
up_rate_us = input;
up_rate_us = input;
return res;
return count;
}
}
static ssize_t show_down_rate_us(struct cpufreq_policy *policy, char *buf)
static ssize_t show_down_rate_us(struct cpufreq_policy *policy, char *buf)
{
{
return sprintf(buf, "%lu\n", down_rate_us);
return sprintf(buf, "%lu\n", down_rate_us);
}
}
static ssize_t store_down_rate_us(struct cpufreq_policy *policy, const char *buf, size_t count)
static ssize_t store_down_rate_us(struct cpufreq_policy *policy, const char *buf, size_t count)
{
{
ssize_t res;
ssize_t res;
unsigned long input;
unsigned long input;
res = strict_strtoul(buf, 0, &input);
res = strict_strtoul(buf, 0, &input);
if (res >= 0 && input >= 0 && input <= 100000000)
if (res >= 0 && input >= 0 && input <= 100000000)
down_rate_us = input;
down_rate_us = input;
return res;
return count;
}
}
static ssize_t show_sleep_ideal_freq(struct cpufreq_policy *policy, char *buf)
static ssize_t show_sleep_ideal_freq(struct cpufreq_policy *policy, char *buf)
{
{
return sprintf(buf, "%u\n", sleep_ideal_freq);
return sprintf(buf, "%u\n", sleep_ideal_freq);
}
}
static ssize_t store_sleep_ideal_freq(struct cpufreq_policy *policy, const char *buf, size_t count)
static ssize_t store_sleep_ideal_freq(struct cpufreq_policy *policy, const char *buf, size_t count)
{
{
ssize_t res;
ssize_t res;
unsigned long input;
unsigned long input;
res = strict_strtoul(buf, 0, &input);
res = strict_strtoul(buf, 0, &input);
if (res >= 0 && input >= 0) {
if (res >= 0 && input >= 0) {
sleep_ideal_freq = input;
sleep_ideal_freq = input;
if (suspended)
if (suspended)
smartass_update_min_max_allcpus();
smartass_update_min_max_allcpus();
}
}
return res;
return count;
}
}
static ssize_t show_sleep_wakeup_freq(struct cpufreq_policy *policy, char *buf)
static ssize_t show_sleep_wakeup_freq(struct cpufreq_policy *policy, char *buf)
{
{
return sprintf(buf, "%u\n", sleep_wakeup_freq);
return sprintf(buf, "%u\n", sleep_wakeup_freq);
}
}
static ssize_t store_sleep_wakeup_freq(struct cpufreq_policy *policy, const char *buf, size_t count)
static ssize_t store_sleep_wakeup_freq(struct cpufreq_policy *policy, const char *buf, size_t count)
{
{
ssize_t res;
ssize_t res;
unsigned long input;
unsigned long input;
res = strict_strtoul(buf, 0, &input);
res = strict_strtoul(buf, 0, &input);
if (res >= 0 && input >= 0)
if (res >= 0 && input >= 0)
sleep_wakeup_freq = input;
sleep_wakeup_freq = input;
return res;
return count;
}
}
static ssize_t show_awake_ideal_freq(struct cpufreq_policy *policy, char *buf)
static ssize_t show_awake_ideal_freq(struct cpufreq_policy *policy, char *buf)
{
{
return sprintf(buf, "%u\n", awake_ideal_freq);
return sprintf(buf, "%u\n", awake_ideal_freq);
}
}
static ssize_t store_awake_ideal_freq(struct cpufreq_policy *policy, const char *buf, size_t count)
static ssize_t store_awake_ideal_freq(struct cpufreq_policy *policy, const char *buf, size_t count)
{
{
ssize_t res;
ssize_t res;
unsigned long input;
unsigned long input;
res = strict_strtoul(buf, 0, &input);
res = strict_strtoul(buf, 0, &input);
if (res >= 0 && input >= 0) {
if (res >= 0 && input >= 0) {
awake_ideal_freq = input;
awake_ideal_freq = input;
if (!suspended)
if (!suspended)
smartass_update_min_max_allcpus();
smartass_update_min_max_allcpus();
}
}
return res;
return count;
}
}
static ssize_t show_sample_rate_jiffies(struct cpufreq_policy *policy, char *buf)
static ssize_t show_sample_rate_jiffies(struct cpufreq_policy *policy, char *buf)
{
{
return sprintf(buf, "%u\n", sample_rate_jiffies);
return sprintf(buf, "%u\n", sample_rate_jiffies);
}
}
static ssize_t store_sample_rate_jiffies(struct cpufreq_policy *policy, const char *buf, size_t count)
static ssize_t store_sample_rate_jiffies(struct cpufreq_policy *policy, const char *buf, size_t count)
{
{
ssize_t res;
ssize_t res;
unsigned long input;
unsigned long input;
res = strict_strtoul(buf, 0, &input);
res = strict_strtoul(buf, 0, &input);
if (res >= 0 && input > 0 && input <= 1000)
if (res >= 0 && input > 0 && input <= 1000)
sample_rate_jiffies = input;
sample_rate_jiffies = input;
return res;
return count;
}
}
static ssize_t show_ramp_up_step(struct cpufreq_policy *policy, char *buf)
static ssize_t show_ramp_up_step(struct cpufreq_policy *policy, char *buf)
{
{
return sprintf(buf, "%u\n", ramp_up_step);
return sprintf(buf, "%u\n", ramp_up_step);
}
}
static ssize_t store_ramp_up_step(struct cpufreq_policy *policy, const char *buf, size_t count)
static ssize_t store_ramp_up_step(struct cpufreq_policy *policy, const char *buf, size_t count)
{
{
ssize_t res;
ssize_t res;
unsigned long input;
unsigned long input;
res = strict_strtoul(buf, 0, &input);
res = strict_strtoul(buf, 0, &input);
if (res >= 0 && input >= 0)
if (res >= 0 && input >= 0)
ramp_up_step = input;
ramp_up_step = input;
return res;
return count;
}
}
static ssize_t show_ramp_down_step(struct cpufreq_policy *policy, char *buf)
static ssize_t show_ramp_down_step(struct cpufreq_policy *policy, char *buf)
{
{
return sprintf(buf, "%u\n", ramp_down_step);
return sprintf(buf, "%u\n", ramp_down_step);
}
}
static ssize_t store_ramp_down_step(struct cpufreq_policy *policy, const char *buf, size_t count)
static ssize_t store_ramp_down_step(struct cpufreq_policy *policy, const char *buf, size_t count)
{
{
ssize_t res;
ssize_t res;
unsigned long input;
unsigned long input;
res = strict_strtoul(buf, 0, &input);
res = strict_strtoul(buf, 0, &input);
if (res >= 0 && input >= 0)
if (res >= 0 && input >= 0)
ramp_down_step = input;
ramp_down_step = input;
return res;
return count;
}
}
static ssize_t show_max_cpu_load(struct cpufreq_policy *policy, char *buf)
static ssize_t show_max_cpu_load(struct cpufreq_policy *policy, char *buf)
{
{
return sprintf(buf, "%lu\n", max_cpu_load);
return sprintf(buf, "%lu\n", max_cpu_load);
}
}
static ssize_t store_max_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count)
static ssize_t store_max_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count)
{
{
ssize_t res;
ssize_t res;
unsigned long input;
unsigned long input;
res = strict_strtoul(buf, 0, &input);
res = strict_strtoul(buf, 0, &input);
if (res >= 0 && input > 0 && input <= 100)
if (res >= 0 && input > 0 && input <= 100)
max_cpu_load = input;
max_cpu_load = input;
return res;
return count;
}
}
static ssize_t show_min_cpu_load(struct cpufreq_policy *policy, char *buf)
static ssize_t show_min_cpu_load(struct cpufreq_policy *policy, char *buf)
{
{
return sprintf(buf, "%lu\n", min_cpu_load);
return sprintf(buf, "%lu\n", min_cpu_load);
}
}
static ssize_t store_min_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count)
static ssize_t store_min_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count)
{
{
ssize_t res;
ssize_t res;
unsigned long input;
unsigned long input;
res = strict_strtoul(buf, 0, &input);
res = strict_strtoul(buf, 0, &input);
if (res >= 0 && input > 0 && input < 100)
if (res >= 0 && input > 0 && input < 100)
min_cpu_load = input;
min_cpu_load = input;
return res;
return count;
}
}
#define define_global_rw_attr(_name) \
#define define_global_rw_attr(_name) \
static struct freq_attr _name##_attr = \
static struct freq_attr _name##_attr = \
__ATTR(_name, 0644, show_##_name, store_##_name)
__ATTR(_name, 0644, show_##_name, store_##_name)
define_global_rw_attr(debug_mask);
define_global_rw_attr(debug_mask);
define_global_rw_attr(up_rate_us);
define_global_rw_attr(up_rate_us);
define_global_rw_attr(down_rate_us);
define_global_rw_attr(down_rate_us);
define_global_rw_attr(sleep_ideal_freq);
define_global_rw_attr(sleep_ideal_freq);
define_global_rw_attr(sleep_wakeup_freq);
define_global_rw_attr(sleep_wakeup_freq);
define_global_rw_attr(awake_ideal_freq);
define_global_rw_attr(awake_ideal_freq);
define_global_rw_attr(sample_rate_jiffies);
define_global_rw_attr(sample_rate_jiffies);
define_global_rw_attr(ramp_up_step);
define_global_rw_attr(ramp_up_step);
define_global_rw_attr(ramp_down_step);
define_global_rw_attr(ramp_down_step);
define_global_rw_attr(max_cpu_load);
define_global_rw_attr(max_cpu_load);
define_global_rw_attr(min_cpu_load);
define_global_rw_attr(min_cpu_load);
static struct attribute * smartass_attributes[] = {
static struct attribute * smartass_attributes[] = {
&debug_mask_attr.attr,
&debug_mask_attr.attr,
&up_rate_us_attr.attr,
&up_rate_us_attr.attr,
&down_rate_us_attr.attr,
&down_rate_us_attr.attr,
&sleep_ideal_freq_attr.attr,
&sleep_ideal_freq_attr.attr,
&sleep_wakeup_freq_attr.attr,
&sleep_wakeup_freq_attr.attr,
&awake_ideal_freq_attr.attr,
&awake_ideal_freq_attr.attr,
&sample_rate_jiffies_attr.attr,
&sample_rate_jiffies_attr.attr,
&ramp_up_step_attr.attr,
&ramp_up_step_attr.attr,
&ramp_down_step_attr.attr,
&ramp_down_step_attr.attr,
&max_cpu_load_attr.attr,
&max_cpu_load_attr.attr,
&min_cpu_load_attr.attr,
&min_cpu_load_attr.attr,
NULL,
NULL,
};
};
static struct attribute_group smartass_attr_group = {
static struct attribute_group smartass_attr_group = {
.attrs = smartass_attributes,
.attrs = smartass_attributes,
.name = "smartass",
.name = "smartassH3",
};
};
static int cpufreq_governor_smartass(struct cpufreq_policy *new_policy,
static int cpufreq_governor_smartass_h3(struct cpufreq_policy *new_policy,
unsigned int event)
unsigned int event)
{
{
unsigned int cpu = new_policy->cpu;
unsigned int cpu = new_policy->cpu;
int rc;
int rc;
struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu);
struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu);
switch (event) {
switch (event) {
case CPUFREQ_GOV_START:
case CPUFREQ_GOV_START:
if ((!cpu_online(cpu)) || (!new_policy->cur))
if ((!cpu_online(cpu)) || (!new_policy->cur))
return -EINVAL;
return -EINVAL;
this_smartass->cur_policy = new_policy;
this_smartass->cur_policy = new_policy;
this_smartass->enable = 1;
this_smartass->enable = 1;
smartass_update_min_max(this_smartass,new_policy,suspended);
smartass_update_min_max(this_smartass,new_policy,suspended);
this_smartass->freq_table = cpufreq_frequency_get_table(cpu);
this_smartass->freq_table = cpufreq_frequency_get_table(cpu);
if (!this_smartass->freq_table)
if (!this_smartass->freq_table)
printk(KERN_WARNING "Smartass: no frequency table for cpu %d?!\n",cpu);
printk(KERN_WARNING "Smartass: no frequency table for cpu %d?!\n",cpu);
smp_wmb();
smp_wmb();
// Do not register the idle hook and create sysfs
// Do not register the idle hook and create sysfs
// entries if we have already done so.
// entries if we have already done so.
if (atomic_inc_return(&active_count) <= 1) {
if (atomic_inc_return(&active_count) <= 1) {
rc = sysfs_create_group(&new_policy->kobj, &smartass_attr_group);
rc = sysfs_create_group(&new_policy->kobj, &smartass_attr_group);
if (rc)
if (rc)
return rc;
return rc;
pm_idle_old = pm_idle;
pm_idle_old = pm_idle;
pm_idle = cpufreq_idle;
pm_idle = cpufreq_idle;
}
}
if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer))
if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer))
reset_timer(cpu,this_smartass);
reset_timer(cpu,this_smartass);
break;
break;
case CPUFREQ_GOV_LIMITS:
case CPUFREQ_GOV_LIMITS:
smartass_update_min_max(this_smartass,new_policy,suspended);
smartass_update_min_max(this_smartass,new_policy,suspended);
if (this_smartass->cur_policy->cur > new_policy->max) {
if (this_smartass->cur_policy->cur > new_policy->max) {
dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new max freq: %d\n",new_policy->max);
dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new max freq: %d\n",new_policy->max);
__cpufreq_driver_target(this_smartass->cur_policy,
__cpufreq_driver_target(this_smartass->cur_policy,
new_policy->max, CPUFREQ_RELATION_H);
new_policy->max, CPUFREQ_RELATION_H);
}
}
else if (this_smartass->cur_policy->cur < new_policy->min) {
else if (this_smartass->cur_policy->cur < new_policy->min) {
dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new min freq: %d\n",new_policy->min);
dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new min freq: %d\n",new_policy->min);
__cpufreq_driver_target(this_smartass->cur_policy,
__cpufreq_driver_target(this_smartass->cur_policy,
new_policy->min, CPUFREQ_RELATION_L);
new_policy->min, CPUFREQ_RELATION_L);
}
}
if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer))
if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer))
reset_timer(cpu,this_smartass);
reset_timer(cpu,this_smartass);
break;
break;
case CPUFREQ_GOV_STOP:
case CPUFREQ_GOV_STOP:
this_smartass->enable = 0;
this_smartass->enable = 0;
smp_wmb();
smp_wmb();
del_timer(&this_smartass->timer);
del_timer(&this_smartass->timer);
flush_work(&freq_scale_work);
flush_work(&freq_scale_work);
this_smartass->idle_exit_time = 0;
this_smartass->idle_exit_time = 0;
if (atomic_dec_return(&active_count) <= 1) {
if (atomic_dec_return(&active_count) <= 1) {
sysfs_remove_group(&new_policy->kobj,
sysfs_remove_group(&new_policy->kobj,
&smartass_attr_group);
&smartass_attr_group);
pm_idle = pm_idle_old;
pm_idle = pm_idle_old;
}
}
break;
break;
}
}
return 0;
return 0;
}
}
static void smartass_suspend(int cpu, int suspend)
static void smartass_suspend(int cpu, int suspend)
{
{
struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id());
struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id());
struct cpufreq_policy *policy = this_smartass->cur_policy;
struct cpufreq_policy *policy = this_smartass->cur_policy;
unsigned int new_freq;
unsigned int new_freq;
if (!this_smartass->enable)
if (!this_smartass->enable)
return;
return;
smartass_update_min_max(this_smartass,policy,suspend);
smartass_update_min_max(this_smartass,policy,suspend);
if (!suspend) { // resume at max speed:
if (!suspend) { // resume at max speed:
new_freq = validate_freq(policy,sleep_wakeup_freq);
new_freq = validate_freq(policy,sleep_wakeup_freq);
dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: awaking at %d\n",new_freq);
dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: awaking at %d\n",new_freq);
__cpufreq_driver_target(policy, new_freq,
__cpufreq_driver_target(policy, new_freq,
CPUFREQ_RELATION_L);
CPUFREQ_RELATION_L);
} else {
} else {
// to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep
// to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep
// to allow some time to settle down. Instead we just reset our statistics (and reset the timer).
// to allow some time to settle down. Instead we just reset our statistics (and reset the timer).
// Eventually, the timer will adjust the frequency if necessary.
// Eventually, the timer will adjust the frequency if necessary.
this_smartass->freq_change_time_in_idle =
this_smartass->freq_change_time_in_idle =
get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time);
get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time);
dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: suspending at %d\n",policy->cur);
dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: suspending at %d\n",policy->cur);
}
}
reset_timer(smp_processor_id(),this_smartass);
reset_timer(smp_processor_id(),this_smartass);
}
}
static void smartass_early_suspend(struct early_suspend *handler) {
static void smartass_early_suspend(struct early_suspend *handler) {
int i;
int i;
if (suspended || sleep_ideal_freq==0) // disable behavior for sleep_ideal_freq==0
if (suspended || sleep_ideal_freq==0) // disable behavior for sleep_ideal_freq==0
return;
return;
suspended = 1;
suspended = 1;
for_each_online_cpu(i)
for_each_online_cpu(i)
smartass_suspend(i,1);
smartass_suspend(i,1);
}
}
static void smartass_late_resume(struct early_suspend *handler) {
static void smartass_late_resume(struct early_suspend *handler) {
int i;
int i;
if (!suspended) // already not suspended so nothing to do
if (!suspended) // already not suspended so nothing to do
return;
return;
suspended = 0;
suspended = 0;
for_each_online_cpu(i)
for_each_online_cpu(i)
smartass_suspend(i,0);
smartass_suspend(i,0);
}
}
static struct early_suspend smartass_power_suspend = {
static struct early_suspend smartass_power_suspend = {
.suspend = smartass_early_suspend,
.suspend = smartass_early_suspend,
.resume = smartass_late_resume,
.resume = smartass_late_resume,
#ifdef CONFIG_MACH_HERO
#ifdef CONFIG_MACH_HERO
.level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
.level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
#endif
#endif
};
};
static int __init cpufreq_smartass_init(void)
static int __init cpufreq_smartass_init(void)
{
{
unsigned int i;
unsigned int i;
struct smartass_info_s *this_smartass;
struct smartass_info_s *this_smartass;
debug_mask = 0;
debug_mask = 0;
up_rate_us = DEFAULT_UP_RATE_US;
up_rate_us = DEFAULT_UP_RATE_US;
down_rate_us = DEFAULT_DOWN_RATE
down_rate_us = DEFA