Lines Matching defs:work
512 /* Helper routine in DRM core does all the work: */
538 static void i915_hotplug_work_func(struct work_struct *work)
540 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
664 static void gen6_pm_rps_work(struct work_struct *work)
666 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
667 rps.work);
714 * @work: workqueue struct
720 static void ivybridge_parity_work(struct work_struct *work)
722 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
812 * The mask bit in IMR is cleared by dev_priv->rps.work.
821 queue_work(dev_priv->wq, &dev_priv->rps.work);
885 * of the way in which we use the masks to defer the RPS work (which for
900 queue_work(dev_priv->wq, &dev_priv->rps.work);
1356 * i915_error_work_func - do process context error handling work
1357 * @work: work struct
1362 static void i915_error_work_func(struct work_struct *work)
1364 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1365 work);
1382 * Note that there's only one work item which does gpu resets, so we
1387 * correct ordering between hang detection and this work item, and since
1389 * work we don't need to worry about any other races.
2097 (void) queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
2106 struct intel_unpin_work *work;
2115 work = intel_crtc->unpin_work;
2117 if (work == NULL ||
2118 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2119 !work->enable_stall_check) {
2126 obj = work->pending_flip_obj;
2379 * and break the hang. This should work on
2532 * SDEIER is also touched by the interrupt handler to work around missed
3557 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3558 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);