aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c17
-rw-r--r--drivers/acpi/acpica/hwgpe.c47
-rw-r--r--drivers/acpi/ec.c5
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/sleep.c24
-rw-r--r--drivers/base/power/domain.c2
-rw-r--r--drivers/base/power/main.c12
-rw-r--r--drivers/base/power/runtime.c36
-rw-r--r--drivers/base/power/wakeup.c17
-rw-r--r--drivers/cpuidle/cpuidle-psci.c46
11 files changed, 142 insertions, 67 deletions
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 6ad0517553d5..ebf6453d0e21 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -101,7 +101,7 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void);
acpi_status acpi_hw_enable_all_wakeup_gpes(void);
-u8 acpi_hw_check_all_gpes(void);
+u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number);
acpi_status
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index f2de66bfd8a7..3be60673e461 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -799,17 +799,19 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
*
* FUNCTION: acpi_any_gpe_status_set
*
- * PARAMETERS: None
+ * PARAMETERS: gpe_skip_number - Number of the GPE to skip
*
* RETURN: Whether or not the status bit is set for any GPE
*
- * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any
- * of them is set or FALSE otherwise.
+ * DESCRIPTION: Check the status bits of all enabled GPEs, except for the one
+ * represented by the "skip" argument, and return TRUE if any of
+ * them is set or FALSE otherwise.
*
******************************************************************************/
-u32 acpi_any_gpe_status_set(void)
+u32 acpi_any_gpe_status_set(u32 gpe_skip_number)
{
acpi_status status;
+ acpi_handle gpe_device;
u8 ret;
ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set);
@@ -819,7 +821,12 @@ u32 acpi_any_gpe_status_set(void)
return (FALSE);
}
- ret = acpi_hw_check_all_gpes();
+ status = acpi_get_gpe_device(gpe_skip_number, &gpe_device);
+ if (ACPI_FAILURE(status)) {
+ gpe_device = NULL;
+ }
+
+ ret = acpi_hw_check_all_gpes(gpe_device, gpe_skip_number);
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return (ret);
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index f4c285c2f595..49c46d4dd070 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -444,12 +444,19 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
return (AE_OK);
}
+struct acpi_gpe_block_status_context {
+ struct acpi_gpe_register_info *gpe_skip_register_info;
+ u8 gpe_skip_mask;
+ u8 retval;
+};
+
/******************************************************************************
*
* FUNCTION: acpi_hw_get_gpe_block_status
*
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
+ * context - GPE list walk context data
*
* RETURN: Success
*
@@ -460,12 +467,13 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
static acpi_status
acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
- void *ret_ptr)
+ void *context)
{
+ struct acpi_gpe_block_status_context *c = context;
struct acpi_gpe_register_info *gpe_register_info;
u64 in_enable, in_status;
acpi_status status;
- u8 *ret = ret_ptr;
+ u8 ret_mask;
u32 i;
/* Examine each GPE Register within the block */
@@ -485,7 +493,11 @@ acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
continue;
}
- *ret |= in_enable & in_status;
+ ret_mask = in_enable & in_status;
+ if (ret_mask && c->gpe_skip_register_info == gpe_register_info) {
+ ret_mask &= ~c->gpe_skip_mask;
+ }
+ c->retval |= ret_mask;
}
return (AE_OK);
@@ -561,24 +573,41 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
*
* FUNCTION: acpi_hw_check_all_gpes
*
- * PARAMETERS: None
+ * PARAMETERS: gpe_skip_device - GPE devoce of the GPE to skip
+ * gpe_skip_number - Number of the GPE to skip
*
* RETURN: Combined status of all GPEs
*
- * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the
+ * DESCRIPTION: Check all enabled GPEs in all GPE blocks, except for the one
+ * represented by the "skip" arguments, and return TRUE if the
* status bit is set for at least one of them of FALSE otherwise.
*
******************************************************************************/
-u8 acpi_hw_check_all_gpes(void)
+u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number)
{
- u8 ret = 0;
+ struct acpi_gpe_block_status_context context = {
+ .gpe_skip_register_info = NULL,
+ .retval = 0,
+ };
+ struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
- (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret);
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_skip_device,
+ gpe_skip_number);
+ if (gpe_event_info) {
+ context.gpe_skip_register_info = gpe_event_info->register_info;
+ context.gpe_skip_mask = acpi_hw_get_gpe_register_bit(gpe_event_info);
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- return (ret != 0);
+ (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &context);
+ return (context.retval != 0);
}
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d1f1cf5d4bf0..9b9094b0b73f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2037,6 +2037,11 @@ void acpi_ec_set_gpe_wake_mask(u8 action)
acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
}
+bool acpi_ec_other_gpes_active(void)
+{
+ return acpi_any_gpe_status_set(first_ec ? first_ec->gpe : U32_MAX);
+}
+
bool acpi_ec_dispatch_gpe(void)
{
u32 ret;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 3616daec650b..d44c591c4ee4 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -202,6 +202,7 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
#ifdef CONFIG_PM_SLEEP
void acpi_ec_flush_work(void);
+bool acpi_ec_other_gpes_active(void);
bool acpi_ec_dispatch_gpe(void);
#endif
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index e5f95922bc21..bb1ae400ec1f 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -982,10 +982,7 @@ static int acpi_s2idle_prepare_late(void)
static void acpi_s2idle_sync(void)
{
- /*
- * The EC driver uses the system workqueue and an additional special
- * one, so those need to be flushed too.
- */
+ /* The EC driver uses special workqueues that need to be flushed. */
acpi_ec_flush_work();
acpi_os_wait_events_complete(); /* synchronize Notify handling */
}
@@ -1013,19 +1010,20 @@ static bool acpi_s2idle_wake(void)
return true;
/*
- * If there are no EC events to process and at least one of the
- * other enabled GPEs is active, the wakeup is regarded as a
- * genuine one.
- *
- * Note that the checks below must be carried out in this order
- * to avoid returning prematurely due to a change of the EC GPE
- * status bit from unset to set between the checks with the
- * status bits of all the other GPEs unset.
+ * If the status bit is set for any enabled GPE other than the
+ * EC one, the wakeup is regarded as a genuine one.
*/
- if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe())
+ if (acpi_ec_other_gpes_active())
return true;
/*
+ * If the EC GPE status bit has not been set, the wakeup is
+ * regarded as a spurious one.
+ */
+ if (!acpi_ec_dispatch_gpe())
+ return false;
+
+ /*
* Cancel the wakeup and process all pending events in case
* there are any wakeup ones in there.
*
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 959d6d5eb000..0a01df608849 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2653,7 +2653,7 @@ static int genpd_iterate_idle_states(struct device_node *dn,
ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
if (ret <= 0)
- return ret;
+ return ret == -ENOENT ? 0 : ret;
/* Loop over the phandles until all the requested entry is found */
of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 0e99a760aebd..6d1dee7051eb 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -40,6 +40,10 @@
typedef int (*pm_callback_t)(struct device *);
+#define list_for_each_entry_rcu_locked(pos, head, member) \
+ list_for_each_entry_rcu(pos, head, member, \
+ device_links_read_lock_held())
+
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -266,7 +270,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->supplier, async);
@@ -323,7 +327,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+ list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->consumer, async);
@@ -1235,7 +1239,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
@@ -1695,7 +1699,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 16134a69bf6f..99c7da112c95 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1087,29 +1087,47 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
- * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
+ * pm_runtime_get_if_active - Conditionally bump up the device's usage counter.
* @dev: Device to handle.
*
* Return -EINVAL if runtime PM is disabled for the device.
*
- * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
- * and the runtime PM usage counter is nonzero, increment the counter and
- * return 1. Otherwise return 0 without changing the counter.
+ * Otherwise, if the device's runtime PM status is RPM_ACTIVE and either
+ * ign_usage_count is true or the device's usage_count is non-zero, increment
+ * the counter and return 1. Otherwise return 0 without changing the counter.
+ *
+ * If ign_usage_count is true, the function can be used to prevent suspending
+ * the device when its runtime PM status is RPM_ACTIVE.
+ *
+ * If ign_usage_count is false, the function can be used to prevent suspending
+ * the device when both its runtime PM status is RPM_ACTIVE and its usage_count
+ * is non-zero.
+ *
+ * The caller is resposible for putting the device's usage count when ther
+ * return value is greater than zero.
*/
-int pm_runtime_get_if_in_use(struct device *dev)
+int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
- retval = dev->power.disable_depth > 0 ? -EINVAL :
- dev->power.runtime_status == RPM_ACTIVE
- && atomic_inc_not_zero(&dev->power.usage_count);
+ if (dev->power.disable_depth > 0) {
+ retval = -EINVAL;
+ } else if (dev->power.runtime_status != RPM_ACTIVE) {
+ retval = 0;
+ } else if (ign_usage_count) {
+ retval = 1;
+ atomic_inc(&dev->power.usage_count);
+ } else {
+ retval = atomic_inc_not_zero(&dev->power.usage_count);
+ }
trace_rpm_usage_rcuidle(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
+
return retval;
}
-EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
/**
* __pm_runtime_set_status - Set runtime PM status of a device.
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 27f3e60608e5..92073ac68473 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -24,6 +24,9 @@ suspend_state_t pm_suspend_target_state;
#define pm_suspend_target_state (PM_SUSPEND_ON)
#endif
+#define list_for_each_entry_rcu_locked(pos, head, member) \
+ list_for_each_entry_rcu(pos, head, member, \
+ srcu_read_lock_held(&wakeup_srcu))
/*
* If set, the suspend/hibernate code will abort transitions to a sleep state
* if wakeup events are registered during or immediately before the transition.
@@ -241,7 +244,9 @@ void wakeup_source_unregister(struct wakeup_source *ws)
{
if (ws) {
wakeup_source_remove(ws);
- wakeup_source_sysfs_remove(ws);
+ if (ws->dev)
+ wakeup_source_sysfs_remove(ws);
+
wakeup_source_destroy(ws);
}
}
@@ -405,7 +410,7 @@ void device_wakeup_arm_wake_irqs(void)
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
dev_pm_arm_wake_irq(ws->wakeirq);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
@@ -421,7 +426,7 @@ void device_wakeup_disarm_wake_irqs(void)
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
dev_pm_disarm_wake_irq(ws->wakeirq);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
@@ -874,7 +879,7 @@ void pm_print_active_wakeup_sources(void)
struct wakeup_source *last_activity_ws = NULL;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
if (ws->active) {
pm_pr_dbg("active wakeup source: %s\n", ws->name);
active = 1;
@@ -1025,7 +1030,7 @@ void pm_wakep_autosleep_enabled(bool set)
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
spin_lock_irq(&ws->lock);
if (ws->autosleep_enabled != set) {
ws->autosleep_enabled = set;
@@ -1104,7 +1109,7 @@ static void *wakeup_sources_stats_seq_start(struct seq_file *m,
}
*srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
if (n-- <= 0)
return ws;
}
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index edd7a54ef0d3..bae9140a65a5 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -160,6 +160,29 @@ int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
return 0;
}
+static int __init psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
+ struct psci_cpuidle_data *data,
+ unsigned int state_count, int cpu)
+{
+ /* Currently limit the hierarchical topology to be used in OSI mode. */
+ if (!psci_has_osi_support())
+ return 0;
+
+ data->dev = psci_dt_attach_cpu(cpu);
+ if (IS_ERR_OR_NULL(data->dev))
+ return PTR_ERR_OR_ZERO(data->dev);
+
+ /*
+ * Using the deepest state for the CPU to trigger a potential selection
+ * of a shared state for the domain, assumes the domain states are all
+ * deeper states.
+ */
+ drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
+ psci_cpuidle_use_cpuhp = true;
+
+ return 0;
+}
+
static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
struct device_node *cpu_node,
unsigned int state_count, int cpu)
@@ -193,25 +216,10 @@ static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
goto free_mem;
}
- /* Currently limit the hierarchical topology to be used in OSI mode. */
- if (psci_has_osi_support()) {
- data->dev = psci_dt_attach_cpu(cpu);
- if (IS_ERR(data->dev)) {
- ret = PTR_ERR(data->dev);
- goto free_mem;
- }
-
- /*
- * Using the deepest state for the CPU to trigger a potential
- * selection of a shared state for the domain, assumes the
- * domain states are all deeper states.
- */
- if (data->dev) {
- drv->states[state_count - 1].enter =
- psci_enter_domain_idle_state;
- psci_cpuidle_use_cpuhp = true;
- }
- }
+ /* Initialize optional data, used for the hierarchical topology. */
+ ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu);
+ if (ret < 0)
+ goto free_mem;
/* Idle states parsed correctly, store them in the per-cpu struct. */
data->psci_states = psci_states;