diff options
Diffstat (limited to 'drivers/acpi/processor_idle.c')
| -rw-r--r-- | drivers/acpi/processor_idle.c | 228 |
1 files changed, 99 insertions, 129 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 7bc22a471fe..66393d5c4c7 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -64,7 +64,6 @@ #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_idle"); #define ACPI_PROCESSOR_FILE_POWER "power" -#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) #define C2_OVERHEAD 1 /* 1us */ #define C3_OVERHEAD 1 /* 1us */ @@ -78,6 +77,10 @@ module_param(nocst, uint, 0000); static unsigned int latency_factor __read_mostly = 2; module_param(latency_factor, uint, 0644); +static s64 us_to_pm_timer_ticks(s64 t) +{ + return div64_u64(t * PM_TIMER_FREQUENCY, 1000000); +} /* * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. * For now disable this. Probably a bug somewhere else. @@ -101,57 +104,6 @@ static int set_max_cstate(const struct dmi_system_id *id) /* Actually this shouldn't be __cpuinitdata, would be better to fix the callers to only run once -AK */ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1}, - { set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), - DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1}, - { set_max_cstate, "Medion 41700", { - DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), - DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1}, { set_max_cstate, "Clevo 5600D", { DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, @@ -159,25 +111,6 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { {}, }; -static inline u32 ticks_elapsed(u32 t1, u32 t2) -{ - if (t2 >= t1) - return (t2 - t1); - else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) - return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); - else - return ((0xFFFFFFFF - t1) + t2); -} - -static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) -{ - if (t2 >= t1) - return PM_TIMER_TICKS_TO_US(t2 - t1); - else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) - return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); - else - return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); -} /* * Callers should disable interrupts before the call and enable @@ -206,12 +139,18 @@ static void acpi_safe_halt(void) * are affected too. We pick the most conservative approach: we assume * that the local APIC stops in both C2 and C3. */ -static void acpi_timer_check_state(int state, struct acpi_processor *pr, +static void lapic_timer_check_state(int state, struct acpi_processor *pr, struct acpi_processor_cx *cx) { struct acpi_processor_power *pwr = &pr->power; u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; + if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) + return; + + if (boot_cpu_has(X86_FEATURE_AMDC1E)) + type = ACPI_STATE_C1; + /* * Check, if one of the previous states already marked the lapic * unstable @@ -223,8 +162,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr, pr->power.timer_broadcast_on_state = state; } -static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) +static void lapic_timer_propagate_broadcast(void *arg) { + struct acpi_processor *pr = (struct acpi_processor *) arg; unsigned long reason; reason = pr->power.timer_broadcast_on_state < INT_MAX ? @@ -234,7 +174,7 @@ static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) } /* Power(C) State timer broadcast control */ -static void acpi_state_timer_broadcast(struct acpi_processor *pr, +static void lapic_timer_state_broadcast(struct acpi_processor *pr, struct acpi_processor_cx *cx, int broadcast) { @@ -251,10 +191,10 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr, #else -static void acpi_timer_check_state(int state, struct acpi_processor *pr, +static void lapic_timer_check_state(int state, struct acpi_processor *pr, struct acpi_processor_cx *cstate) { } -static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { } -static void acpi_state_timer_broadcast(struct acpi_processor *pr, +static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } +static void lapic_timer_state_broadcast(struct acpi_processor *pr, struct acpi_processor_cx *cx, int broadcast) { @@ -266,21 +206,44 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr, * Suspend / resume control */ static int acpi_idle_suspend; +static u32 saved_bm_rld; + +static void acpi_idle_bm_rld_save(void) +{ + acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); +} +static void acpi_idle_bm_rld_restore(void) +{ + u32 resumed_bm_rld; + + acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); + + if (resumed_bm_rld != saved_bm_rld) + acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); +} int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) { + if (acpi_idle_suspend == 1) + return 0; + + acpi_idle_bm_rld_save(); acpi_idle_suspend = 1; return 0; } int acpi_processor_resume(struct acpi_device * device) { + if (acpi_idle_suspend == 0) + return 0; + + acpi_idle_bm_rld_restore(); acpi_idle_suspend = 0; return 0; } #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) -static int tsc_halts_in_c(int state) +static void tsc_check_state(int state) { switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: @@ -290,13 +253,17 @@ static int tsc_halts_in_c(int state) * C/P/S0/S1 states when this bit is set. */ if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) - return 0; + return; /*FALL THROUGH*/ default: - return state > ACPI_STATE_C1; + /* TSC could halt in idle, so notify users */ + if (state > ACPI_STATE_C1) + mark_tsc_unstable("TSC halts in idle"); } } +#else +static void tsc_check_state(int state) { return; } #endif static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) @@ -549,7 +516,8 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) static void acpi_processor_power_verify_c3(struct acpi_processor *pr, struct acpi_processor_cx *cx) { - static int bm_check_flag; + static int bm_check_flag = -1; + static int bm_control_flag = -1; if (!cx->address) @@ -579,12 +547,14 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, } /* All the logic here assumes flags.bm_check is same across all CPUs */ - if (!bm_check_flag) { + if (bm_check_flag == -1) { /* Determine whether bm_check is needed based on CPU */ acpi_processor_power_init_bm_check(&(pr->flags), pr->id); bm_check_flag = pr->flags.bm_check; + bm_control_flag = pr->flags.bm_control; } else { pr->flags.bm_check = bm_check_flag; + pr->flags.bm_control = bm_control_flag; } if (pr->flags.bm_check) { @@ -630,7 +600,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, * In either case, the proper way to * handle BM_RLD is to set it and leave it set. */ - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); + acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); return; } @@ -642,7 +612,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) pr->power.timer_broadcast_on_state = INT_MAX; - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { struct acpi_processor_cx *cx = &pr->power.states[i]; switch (cx->type) { @@ -652,22 +622,22 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) case ACPI_STATE_C2: acpi_processor_power_verify_c2(cx); - if (cx->valid) - acpi_timer_check_state(i, pr, cx); break; case ACPI_STATE_C3: acpi_processor_power_verify_c3(pr, cx); - if (cx->valid) - acpi_timer_check_state(i, pr, cx); break; } + if (!cx->valid) + continue; - if (cx->valid) - working++; + lapic_timer_check_state(i, pr, cx); + tsc_check_state(cx->type); + working++; } - acpi_propagate_timer_broadcast(pr); + smp_call_function_single(pr->id, lapic_timer_propagate_broadcast, + pr, 1); return (working); } @@ -721,11 +691,9 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) seq_printf(seq, "active state: C%zd\n" "max_cstate: C%d\n" - "bus master activity: %08x\n" "maximum allowed latency: %d usec\n", pr->power.state ? pr->power.state - pr->power.states : 0, - max_cstate, (unsigned)pr->power.bm_activity, - pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)); + max_cstate, pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)); seq_puts(seq, "states:\n"); @@ -800,9 +768,9 @@ static int acpi_idle_bm_check(void) { u32 bm_status = 0; - acpi_get_register_unlocked(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); + acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); if (bm_status) - acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); + acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); /* * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect * the true state of bus mastering activity; forcing us to @@ -853,7 +821,8 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct cpuidle_state *state) { - u32 t1, t2; + ktime_t kt1, kt2; + s64 idle_time; struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); @@ -866,19 +835,22 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, /* Do not access any ACPI IO ports in suspend path */ if (acpi_idle_suspend) { - acpi_safe_halt(); local_irq_enable(); + cpu_relax(); return 0; } - t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); + lapic_timer_state_broadcast(pr, cx, 1); + kt1 = ktime_get_real(); acpi_idle_do_entry(cx); - t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); + kt2 = ktime_get_real(); + idle_time = ktime_to_us(ktime_sub(kt2, kt1)); local_irq_enable(); cx->usage++; + lapic_timer_state_broadcast(pr, cx, 0); - return ticks_elapsed_in_us(t1, t2); + return idle_time; } /** @@ -891,8 +863,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, { struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); - u32 t1, t2; - int sleep_ticks = 0; + ktime_t kt1, kt2; + s64 idle_time; + s64 sleep_ticks = 0; pr = __get_cpu_var(processors); @@ -920,23 +893,19 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, * Must be done before busmaster disable as we might need to * access HPET ! */ - acpi_state_timer_broadcast(pr, cx, 1); + lapic_timer_state_broadcast(pr, cx, 1); if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); - t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); + kt1 = ktime_get_real(); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); - t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); + kt2 = ktime_get_real(); + idle_time = ktime_to_us(ktime_sub(kt2, kt1)); -#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) - /* TSC could halt in idle, so notify users */ - if (tsc_halts_in_c(cx->type)) - mark_tsc_unstable("TSC halts in idle");; -#endif - sleep_ticks = ticks_elapsed(t1, t2); + sleep_ticks = us_to_pm_timer_ticks(idle_time); /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); @@ -946,9 +915,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, cx->usage++; - acpi_state_timer_broadcast(pr, cx, 0); + lapic_timer_state_broadcast(pr, cx, 0); cx->time += sleep_ticks; - return ticks_elapsed_in_us(t1, t2); + return idle_time; } static int c3_cpu_count; @@ -966,8 +935,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, { struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); - u32 t1, t2; - int sleep_ticks = 0; + ktime_t kt1, kt2; + s64 idle_time; + s64 sleep_ticks = 0; + pr = __get_cpu_var(processors); @@ -1011,8 +982,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, * Must be done before busmaster disable as we might need to * access HPET ! */ - acpi_state_timer_broadcast(pr, cx, 1); + lapic_timer_state_broadcast(pr, cx, 1); + kt1 = ktime_get_real(); /* * disable bus master * bm_check implies we need ARB_DIS @@ -1028,30 +1000,25 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, c3_cpu_count++; /* Disable bus master arbitration when all CPUs are in C3 */ if (c3_cpu_count == num_online_cpus()) - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); + acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); spin_unlock(&c3_lock); } else if (!pr->flags.bm_check) { ACPI_FLUSH_CPU_CACHE(); } - t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); acpi_idle_do_entry(cx); - t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); /* Re-enable bus master arbitration */ if (pr->flags.bm_check && pr->flags.bm_control) { spin_lock(&c3_lock); - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); + acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_count--; spin_unlock(&c3_lock); } + kt2 = ktime_get_real(); + idle_time = ktime_to_us(ktime_sub(kt2, kt1)); -#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) - /* TSC could halt in idle, so notify users */ - if (tsc_halts_in_c(ACPI_STATE_C3)) - mark_tsc_unstable("TSC halts in idle"); -#endif - sleep_ticks = ticks_elapsed(t1, t2); + sleep_ticks = us_to_pm_timer_ticks(idle_time); /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); @@ -1060,9 +1027,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, cx->usage++; - acpi_state_timer_broadcast(pr, cx, 0); + lapic_timer_state_broadcast(pr, cx, 0); cx->time += sleep_ticks; - return ticks_elapsed_in_us(t1, t2); + return idle_time; } struct cpuidle_driver acpi_idle_driver = { @@ -1094,6 +1061,9 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) dev->states[i].desc[0] = '\0'; } + if (max_cstate == 0) + max_cstate = 1; + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { cx = &pr->power.states[i]; state = &dev->states[count]; |