summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-03-24 11:12:20 +1000
committerDave Airlie <airlied@redhat.com>2015-03-24 11:12:20 +1000
commit74ccbff99787b68e4eb01ef8cf29789229ab0f5d (patch)
treed4e322105618e5b3887f7dc6b54e5d2346974675 /kernel/workqueue.c
parentae10c2248593fb84c6951d67c98c9c934997e56a (diff)
parent0f9e9cd61f46c07246e30871fd638ffeaca3c576 (diff)
Merge tag 'drm-intel-next-2015-03-13-merge' of git://anongit.freedesktop.org/drm-intel into drm-next
drm-intel-next-2015-03-13-rebased: - EU count report param for gen9+ (Jeff McGee) - piles of pll/wm/... fixes for chv, finally out of preliminary hw support (Ville, Vijay) - gen9 rps support from Akash - more work to move towards atomic from Matt, Ander and others - runtime pm support for skl (Damien) - edp1.4 intermediate link clock support (Sonika) - use frontbuffer tracking for fbc (Paulo) - remove ilk rc6 (John Harrison) - a bunch of smaller things and fixes all over Includes backmerge because git rerere couldn't keep up any more. * tag 'drm-intel-next-2015-03-13-merge' of git://anongit.freedesktop.org/drm-intel: (366 commits) drm/i915: Make sure the primary plane is enabled before reading out the fb state drm/i915: Update DRIVER_DATE to 20150313 drm/i915: Fix vmap_batch page iterator overrun drm/i915: Export total subslice and EU counts drm/i915: redefine WARN_ON_ONCE to include the condition drm/i915/skl: Implement WaDisableHBR2 drm/i915: Remove the preliminary_hw_support shackles from CHV drm/i915: Read CHV_PLL_DW8 from the correct offset drm/i915: Rewrite IVB FDI bifurcation conflict checks drm/i915: Rewrite some some of the FDI lane checks drm/i915/skl: Enable the RPS interrupts programming drm/i915/skl: Enabling processing of Turbo interrupts drm/i915/skl: Updated the i915_frequency_info debugfs function drm/i915: Simplify the way BC bifurcation state consistency is kept drm/i915/skl: Updated the act_freq_mhz_show sysfs function drm/i915/skl: Updated the gen9_enable_rps function drm/i915/skl: Updated the gen6_rps_limits function drm/i915/skl: Restructured the gen6_set_rps_thresholds function drm/i915/skl: Updated the gen6_set_rps function drm/i915/skl: Updated the gen6_init_rps_frequencies function ...
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c56
1 files changed, 52 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f28849394791..41ff75b478c6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(flush_work);
+struct cwt_wait {
+ wait_queue_t wait;
+ struct work_struct *work;
+};
+
+static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+ struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
+
+ if (cwait->work != key)
+ return 0;
+ return autoremove_wake_function(wait, mode, sync, key);
+}
+
static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
{
+ static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
unsigned long flags;
int ret;
do {
ret = try_to_grab_pending(work, is_dwork, &flags);
/*
- * If someone else is canceling, wait for the same event it
- * would be waiting for before retrying.
+ * If someone else is already canceling, wait for it to
+ * finish. flush_work() doesn't work for PREEMPT_NONE
+ * because we may get scheduled between @work's completion
+ * and the other canceling task resuming and clearing
+ * CANCELING - flush_work() will return false immediately
+ * as @work is no longer busy, try_to_grab_pending() will
+ * return -ENOENT as @work is still being canceled and the
+ * other canceling task won't be able to clear CANCELING as
+ * we're hogging the CPU.
+ *
+ * Let's wait for completion using a waitqueue. As this
+ * may lead to the thundering herd problem, use a custom
+ * wake function which matches @work along with exclusive
+ * wait and wakeup.
*/
- if (unlikely(ret == -ENOENT))
- flush_work(work);
+ if (unlikely(ret == -ENOENT)) {
+ struct cwt_wait cwait;
+
+ init_wait(&cwait.wait);
+ cwait.wait.func = cwt_wakefn;
+ cwait.work = work;
+
+ prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
+ TASK_UNINTERRUPTIBLE);
+ if (work_is_canceling(work))
+ schedule();
+ finish_wait(&cancel_waitq, &cwait.wait);
+ }
} while (unlikely(ret < 0));
/* tell other tasks trying to grab @work to back off */
@@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
flush_work(work);
clear_work_data(work);
+
+ /*
+ * Paired with prepare_to_wait() above so that either
+ * waitqueue_active() is visible here or !work_is_canceling() is
+ * visible there.
+ */
+ smp_mb();
+ if (waitqueue_active(&cancel_waitq))
+ __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
+
return ret;
}