summaryrefslogtreecommitdiff
path: root/drivers/base/power/qos.c
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2013-04-28 12:43:08 -0700
committerOlof Johansson <olof@lixom.net>2013-04-28 15:01:12 -0700
commitafcf7924ecab726dab0227188783c4a40d9f0eec (patch)
tree606b0883c0ad3fb2ef04f0f036a55ed3875fdc9b /drivers/base/power/qos.c
parentdc9c220304c882f06aaadf427821c6388782aab8 (diff)
parentd21be237ffa357e55005e2bf9ffef10b23c184d0 (diff)
Merge branch 'fixes' into next/cleanup
Merging in fixes since there's a conflict in the omap4 clock tables caused by it. * fixes: (245 commits) ARM: highbank: fix cache flush ordering for cpu hotplug ARM: OMAP4: hwmod data: make 'ocp2scp_usb_phy_phy_48m" as the main clock arm: mvebu: Fix the irq map function in SMP mode Fix GE0/GE1 init on ix2-200 as GE0 has no PHY ARM: S3C24XX: Fix interrupt pending register offset of the EINT controller ARM: S3C24XX: Correct NR_IRQS definition for s3c2440 ARM i.MX6: Fix ldb_di clock selection ARM: imx: provide twd clock lookup from device tree ARM: imx35 Bugfix admux clock ARM: clk-imx35: Bugfix iomux clock + Linux 3.9-rc6 Signed-off-by: Olof Johansson <olof@lixom.net> Conflicts: arch/arm/mach-omap2/cclock44xx_data.c
Diffstat (limited to 'drivers/base/power/qos.c')
-rw-r--r--drivers/base/power/qos.c60
1 files changed, 47 insertions, 13 deletions
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 5f74587ef258..71671c42ef45 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -46,6 +46,7 @@
#include "power.h"
static DEFINE_MUTEX(dev_pm_qos_mtx);
+static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
@@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
struct pm_qos_constraints *c;
struct pm_qos_flags *f;
- mutex_lock(&dev_pm_qos_mtx);
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
/*
* If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point.
*/
+ pm_qos_sysfs_remove_latency(dev);
+ pm_qos_sysfs_remove_flags(dev);
+
+ mutex_lock(&dev_pm_qos_mtx);
+
__dev_pm_qos_hide_latency_limit(dev);
__dev_pm_qos_hide_flags(dev);
@@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
out:
mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
/**
@@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
kfree(req);
}
+static void dev_pm_qos_drop_user_request(struct device *dev,
+ enum dev_pm_qos_req_type type)
+{
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_drop_user_request(dev, type);
+ mutex_unlock(&dev_pm_qos_mtx);
+}
+
/**
* dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
* @dev: Device whose PM QoS latency limit is to be exposed to user space.
@@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
return ret;
}
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
@@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
+ mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
-
dev->power.qos->latency_req = req;
+
+ mutex_unlock(&dev_pm_qos_mtx);
+
ret = pm_qos_sysfs_add_latency(dev);
if (ret)
- __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
out:
- mutex_unlock(&dev_pm_qos_mtx);
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
- if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
- pm_qos_sysfs_remove_latency(dev);
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
- }
}
/**
@@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev)
*/
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ pm_qos_sysfs_remove_latency(dev);
+
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
@@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
}
pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
@@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
+ mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
-
dev->power.qos->flags_req = req;
+
+ mutex_unlock(&dev_pm_qos_mtx);
+
ret = pm_qos_sysfs_add_flags(dev);
if (ret)
- __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
out:
- mutex_unlock(&dev_pm_qos_mtx);
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
pm_runtime_put(dev);
return ret;
}
@@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
static void __dev_pm_qos_hide_flags(struct device *dev)
{
- if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
- pm_qos_sysfs_remove_flags(dev);
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
- }
}
/**
@@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev)
void dev_pm_qos_hide_flags(struct device *dev)
{
pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ pm_qos_sysfs_remove_flags(dev);
+
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_flags(dev);
mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
pm_runtime_put(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);