aboutsummaryrefslogtreecommitdiff
path: root/drivers/cpuidle
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpuidle')
-rw-r--r--drivers/cpuidle/cpuidle-arm.c4
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c12
-rw-r--r--drivers/cpuidle/cpuidle-mvebu-v7.c15
-rw-r--r--drivers/cpuidle/cpuidle-psci.c22
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c4
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c19
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c31
-rw-r--r--drivers/cpuidle/cpuidle.c72
-rw-r--r--drivers/cpuidle/dt_idle_states.c2
-rw-r--r--drivers/cpuidle/poll_state.c8
10 files changed, 119 insertions, 70 deletions
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 8c758920d699..7cfb980a357d 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -31,8 +31,8 @@
* Called from the CPUidle framework to program the device to the
* specified target state selected by the governor.
*/
-static int arm_enter_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int arm_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
/*
* Pass idle state index to arm_cpuidle_suspend which in turn
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index abe51185f243..74972deda0ea 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -64,7 +64,8 @@ static struct cpuidle_driver bl_idle_little_driver = {
.enter = bl_enter_powerdown,
.exit_latency = 700,
.target_residency = 2500,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE,
.name = "C1",
.desc = "ARM little-cluster power down",
},
@@ -85,7 +86,8 @@ static struct cpuidle_driver bl_idle_big_driver = {
.enter = bl_enter_powerdown,
.exit_latency = 500,
.target_residency = 2000,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE,
.name = "C1",
.desc = "ARM big-cluster power down",
},
@@ -120,15 +122,17 @@ static int notrace bl_powerdown_finisher(unsigned long arg)
* Called from the CPUidle framework to program the device to the
* specified target state selected by the governor.
*/
-static int bl_enter_powerdown(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int bl_enter_powerdown(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
cpu_pm_enter();
+ ct_cpuidle_enter();
cpu_suspend(0, bl_powerdown_finisher);
/* signals the MCPM core that CPU is out of low power state */
mcpm_cpu_powered_up();
+ ct_cpuidle_exit();
cpu_pm_exit();
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c
index 01a856971f05..563dba609b98 100644
--- a/drivers/cpuidle/cpuidle-mvebu-v7.c
+++ b/drivers/cpuidle/cpuidle-mvebu-v7.c
@@ -25,9 +25,9 @@
static int (*mvebu_v7_cpu_suspend)(int);
-static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
+static __cpuidle int mvebu_v7_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
{
int ret;
bool deepidle = false;
@@ -36,7 +36,10 @@ static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
if (drv->states[index].flags & MVEBU_V7_FLAG_DEEP_IDLE)
deepidle = true;
+ ct_cpuidle_enter();
ret = mvebu_v7_cpu_suspend(deepidle);
+ ct_cpuidle_exit();
+
cpu_pm_exit();
if (ret)
@@ -53,6 +56,7 @@ static struct cpuidle_driver armadaxp_idle_driver = {
.exit_latency = 100,
.power_usage = 50,
.target_residency = 1000,
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.name = "MV CPU IDLE",
.desc = "CPU power down",
},
@@ -61,7 +65,7 @@ static struct cpuidle_driver armadaxp_idle_driver = {
.exit_latency = 1000,
.power_usage = 5,
.target_residency = 10000,
- .flags = MVEBU_V7_FLAG_DEEP_IDLE,
+ .flags = MVEBU_V7_FLAG_DEEP_IDLE | CPUIDLE_FLAG_RCU_IDLE,
.name = "MV CPU DEEP IDLE",
.desc = "CPU and L2 Fabric power down",
},
@@ -76,7 +80,7 @@ static struct cpuidle_driver armada370_idle_driver = {
.exit_latency = 100,
.power_usage = 5,
.target_residency = 1000,
- .flags = MVEBU_V7_FLAG_DEEP_IDLE,
+ .flags = MVEBU_V7_FLAG_DEEP_IDLE | CPUIDLE_FLAG_RCU_IDLE,
.name = "Deep Idle",
.desc = "CPU and L2 Fabric power down",
},
@@ -91,6 +95,7 @@ static struct cpuidle_driver armada38x_idle_driver = {
.exit_latency = 10,
.power_usage = 5,
.target_residency = 100,
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.name = "Idle",
.desc = "CPU and SCU power down",
},
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 68467a325dcb..6de027f9f6f5 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -49,14 +49,9 @@ static inline u32 psci_get_domain_state(void)
return __this_cpu_read(domain_state);
}
-static inline int psci_enter_state(int idx, u32 state)
-{
- return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, idx, state);
-}
-
-static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx,
- bool s2idle)
+static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx,
+ bool s2idle)
{
struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
u32 *states = data->psci_states;
@@ -69,12 +64,10 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
return -1;
/* Do runtime PM to manage a hierarchical CPU toplogy. */
- ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_suspend(pd_dev);
else
pm_runtime_put_sync_suspend(pd_dev);
- ct_irq_exit_irqson();
state = psci_get_domain_state();
if (!state)
@@ -82,12 +75,10 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
ret = psci_cpu_suspend_enter(state) ? -1 : idx;
- ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_resume(pd_dev);
else
pm_runtime_get_sync(pd_dev);
- ct_irq_exit_irqson();
cpu_pm_exit();
@@ -192,12 +183,12 @@ static void psci_idle_init_cpuhp(void)
pr_warn("Failed %d while setup cpuhp state\n", err);
}
-static int psci_enter_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int psci_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states);
- return psci_enter_state(idx, state[idx]);
+ return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter, idx, state[idx]);
}
static const struct of_device_id psci_idle_state_match[] = {
@@ -243,6 +234,7 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
* of a shared state for the domain, assumes the domain states are all
* deeper states.
*/
+ drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE;
drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state;
psci_cpuidle_use_cpuhp = true;
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index beedf22cbe78..326bca154ac7 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -58,8 +58,8 @@ static int qcom_cpu_spc(struct spm_driver_data *drv)
return ret;
}
-static int spm_enter_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int spm_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
struct cpuidle_qcom_spm_data *data = container_of(drv, struct cpuidle_qcom_spm_data,
cpuidle_driver);
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index 05fe2902df9a..be383f4b6855 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -93,8 +93,8 @@ static int sbi_suspend(u32 state)
return sbi_suspend_finisher(state, 0, 0);
}
-static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
u32 state = states[idx];
@@ -106,9 +106,9 @@ static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
idx, state);
}
-static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx,
- bool s2idle)
+static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx,
+ bool s2idle)
{
struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data);
u32 *states = data->states;
@@ -121,12 +121,12 @@ static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
return -1;
/* Do runtime PM to manage a hierarchical CPU toplogy. */
- ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_suspend(pd_dev);
else
pm_runtime_put_sync_suspend(pd_dev);
- ct_irq_exit_irqson();
+
+ ct_cpuidle_enter();
if (sbi_is_domain_state_available())
state = sbi_get_domain_state();
@@ -135,12 +135,12 @@ static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
ret = sbi_suspend(state) ? -1 : idx;
- ct_irq_enter_irqson();
+ ct_cpuidle_exit();
+
if (s2idle)
dev_pm_genpd_resume(pd_dev);
else
pm_runtime_get_sync(pd_dev);
- ct_irq_exit_irqson();
cpu_pm_exit();
@@ -251,6 +251,7 @@ static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
* of a shared state for the domain, assumes the domain states are all
* deeper states.
*/
+ drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE;
drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
drv->states[state_count - 1].enter_s2idle =
sbi_enter_s2idle_domain_idle_state;
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
index 9845629aeb6d..b203a93deac5 100644
--- a/drivers/cpuidle/cpuidle-tegra.c
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -160,8 +160,8 @@ static int tegra_cpuidle_coupled_barrier(struct cpuidle_device *dev)
return 0;
}
-static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
- int index, unsigned int cpu)
+static __cpuidle int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ int index, unsigned int cpu)
{
int err;
@@ -180,9 +180,11 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
}
local_fiq_disable();
- RCU_NONIDLE(tegra_pm_set_cpu_in_lp2());
+ tegra_pm_set_cpu_in_lp2();
cpu_pm_enter();
+ ct_cpuidle_enter();
+
switch (index) {
case TEGRA_C7:
err = tegra_cpuidle_c7_enter();
@@ -197,8 +199,10 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
break;
}
+ ct_cpuidle_exit();
+
cpu_pm_exit();
- RCU_NONIDLE(tegra_pm_clear_cpu_in_lp2());
+ tegra_pm_clear_cpu_in_lp2();
local_fiq_enable();
return err ?: index;
@@ -222,10 +226,11 @@ static int tegra_cpuidle_adjust_state_index(int index, unsigned int cpu)
return index;
}
-static int tegra_cpuidle_enter(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
+static __cpuidle int tegra_cpuidle_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
{
+ bool do_rcu = drv->states[index].flags & CPUIDLE_FLAG_RCU_IDLE;
unsigned int cpu = cpu_logical_map(dev->cpu);
int ret;
@@ -233,9 +238,13 @@ static int tegra_cpuidle_enter(struct cpuidle_device *dev,
if (dev->states_usage[index].disable)
return -1;
- if (index == TEGRA_C1)
+ if (index == TEGRA_C1) {
+ if (do_rcu)
+ ct_cpuidle_enter();
ret = arm_cpuidle_simple_enter(dev, drv, index);
- else
+ if (do_rcu)
+ ct_cpuidle_exit();
+ } else
ret = tegra_cpuidle_state_enter(dev, index, cpu);
if (ret < 0) {
@@ -285,7 +294,8 @@ static struct cpuidle_driver tegra_idle_driver = {
.exit_latency = 2000,
.target_residency = 2200,
.power_usage = 100,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE,
.name = "C7",
.desc = "CPU core powered off",
},
@@ -295,6 +305,7 @@ static struct cpuidle_driver tegra_idle_driver = {
.target_residency = 10000,
.power_usage = 0,
.flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE |
CPUIDLE_FLAG_COUPLED,
.name = "CC6",
.desc = "CPU cluster powered off",
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 6eceb1988243..0b00f21cefe3 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <linux/sched/idle.h>
#include <linux/notifier.h>
#include <linux/pm_qos.h>
#include <linux/cpu.h>
@@ -136,11 +137,13 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
}
#ifdef CONFIG_SUSPEND
-static void enter_s2idle_proper(struct cpuidle_driver *drv,
- struct cpuidle_device *dev, int index)
+static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev, int index)
{
- ktime_t time_start, time_end;
struct cpuidle_state *target_state = &drv->states[index];
+ ktime_t time_start, time_end;
+
+ instrumentation_begin();
time_start = ns_to_ktime(local_clock());
@@ -151,13 +154,18 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
* suspended is generally unsafe.
*/
stop_critical_timings();
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_enter();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ ct_cpuidle_enter();
+ /* Annotate away the indirect call */
+ instrumentation_begin();
+ }
target_state->enter_s2idle(dev, drv, index);
if (WARN_ON_ONCE(!irqs_disabled()))
- local_irq_disable();
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_exit();
+ raw_local_irq_disable();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ instrumentation_end();
+ ct_cpuidle_exit();
+ }
tick_unfreeze();
start_critical_timings();
@@ -165,6 +173,7 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start);
dev->states_usage[index].s2idle_usage++;
+ instrumentation_end();
}
/**
@@ -199,8 +208,9 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* @drv: cpuidle driver for this cpu
* @index: index into the states table in @drv of the state to enter
*/
-int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
- int index)
+noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
{
int entered_state;
@@ -208,6 +218,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
ktime_t time_start, time_end;
+ instrumentation_begin();
+
/*
* Tell the time framework to switch to a broadcast timer because our
* local timer will be shut down. If a local timer is used from another
@@ -234,11 +246,33 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
time_start = ns_to_ktime(local_clock());
stop_critical_timings();
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_enter();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ ct_cpuidle_enter();
+ /* Annotate away the indirect call */
+ instrumentation_begin();
+ }
+
+ /*
+ * NOTE!!
+ *
+ * For cpuidle_state::enter() methods that do *NOT* set
+ * CPUIDLE_FLAG_RCU_IDLE RCU will be disabled here and these functions
+ * must be marked either noinstr or __cpuidle.
+ *
+ * For cpuidle_state::enter() methods that *DO* set
+ * CPUIDLE_FLAG_RCU_IDLE this isn't required, but they must mark the
+ * function calling ct_cpuidle_enter() as noinstr/__cpuidle and all
+ * functions called within the RCU-idle region.
+ */
entered_state = target_state->enter(dev, drv, index);
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_exit();
+
+ if (WARN_ONCE(!irqs_disabled(), "%ps leaked IRQ state", target_state->enter))
+ raw_local_irq_disable();
+
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ instrumentation_end();
+ ct_cpuidle_exit();
+ }
start_critical_timings();
sched_clock_idle_wakeup_event();
@@ -248,12 +282,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
/* The cpu is no longer idle or about to enter idle. */
sched_idle_set_state(NULL);
- if (broadcast) {
- if (WARN_ON_ONCE(!irqs_disabled()))
- local_irq_disable();
-
+ if (broadcast)
tick_broadcast_exit();
- }
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
@@ -305,6 +335,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
dev->states_usage[index].rejected++;
}
+ instrumentation_end();
+
return entered_state;
}
@@ -394,7 +426,7 @@ void cpuidle_reflect(struct cpuidle_device *dev, int index)
* @dev: the cpuidle device
*
*/
-u64 cpuidle_poll_time(struct cpuidle_driver *drv,
+__cpuidle u64 cpuidle_poll_time(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
int i;
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index 7ca3d7d9b5ea..02aa0b39af9d 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -77,7 +77,7 @@ static int init_state_node(struct cpuidle_state *idle_state,
if (err)
desc = state_node->name;
- idle_state->flags = 0;
+ idle_state->flags = CPUIDLE_FLAG_RCU_IDLE;
if (of_property_read_bool(state_node, "local-timer-stop"))
idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
/*
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index f7e83613ae94..bdcfeaecd228 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -13,11 +13,13 @@
static int __cpuidle poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- u64 time_start = local_clock();
+ u64 time_start;
+
+ time_start = local_clock();
dev->poll_time_limit = false;
- local_irq_enable();
+ raw_local_irq_enable();
if (!current_set_polling_and_test()) {
unsigned int loop_count = 0;
u64 limit;
@@ -36,6 +38,8 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
}
}
}
+ raw_local_irq_disable();
+
current_clr_polling();
return index;