aboutsummaryrefslogtreecommitdiff
path: root/drivers/soc/qcom/rpmh.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/soc/qcom/rpmh.c')
-rw-r--r--drivers/soc/qcom/rpmh.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index d1626a1328d7..f2b5b46ccd1f 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -435,9 +435,6 @@ static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
*
* @ctrlr: Controller making request to flush cached data
*
- * This function is called from sleep code on the last CPU
- * (thus no spinlock needed).
- *
* Return:
* * 0 - Success
* * Error code - Otherwise
@@ -445,13 +442,21 @@ static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
int rpmh_flush(struct rpmh_ctrlr *ctrlr)
{
struct cache_req *p;
- int ret;
+ int ret = 0;
lockdep_assert_irqs_disabled();
+ /*
+ * Currently rpmh_flush() is only called when we think we're running
+ * on the last processor. If the lock is busy it means another
+ * processor is up and it's better to abort than spin.
+ */
+ if (!spin_trylock(&ctrlr->cache_lock))
+ return -EBUSY;
+
if (!ctrlr->dirty) {
pr_debug("Skipping flush, TCS has latest data.\n");
- return 0;
+ goto exit;
}
/* Invalidate the TCSes first to avoid stale data */
@@ -460,7 +465,7 @@ int rpmh_flush(struct rpmh_ctrlr *ctrlr)
/* First flush the cached batch requests */
ret = flush_batch(ctrlr);
if (ret)
- return ret;
+ goto exit;
list_for_each_entry(p, &ctrlr->cache, list) {
if (!is_req_valid(p)) {
@@ -471,16 +476,18 @@ int rpmh_flush(struct rpmh_ctrlr *ctrlr)
ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
p->sleep_val);
if (ret)
- return ret;
+ goto exit;
ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
p->wake_val);
if (ret)
- return ret;
+ goto exit;
}
ctrlr->dirty = false;
- return 0;
+exit:
+ spin_unlock(&ctrlr->cache_lock);
+ return ret;
}
/**