Lines Matching refs:dev_priv
85 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
87 assert_spin_locked(&dev_priv->irq_lock);
89 if ((dev_priv->irq_mask & mask) != 0) {
90 dev_priv->irq_mask &= ~mask;
91 I915_WRITE(DEIMR, dev_priv->irq_mask);
97 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
99 assert_spin_locked(&dev_priv->irq_lock);
101 if ((dev_priv->irq_mask & mask) != mask) {
102 dev_priv->irq_mask |= mask;
103 I915_WRITE(DEIMR, dev_priv->irq_mask);
110 struct drm_i915_private *dev_priv = dev->dev_private;
114 assert_spin_locked(&dev_priv->irq_lock);
117 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
128 struct drm_i915_private *dev_priv = dev->dev_private;
133 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
145 struct drm_i915_private *dev_priv = dev->dev_private;
150 ironlake_enable_display_irq(dev_priv, bit);
152 ironlake_disable_display_irq(dev_priv, bit);
158 struct drm_i915_private *dev_priv = dev->dev_private;
168 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
170 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
178 struct drm_i915_private *dev_priv = dev->dev_private;
194 struct drm_i915_private *dev_priv = dev->dev_private;
229 struct drm_i915_private *dev_priv = dev->dev_private;
230 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
235 spin_lock_irqsave(&dev_priv->irq_lock, flags);
250 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
272 struct drm_i915_private *dev_priv = dev->dev_private;
282 struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
293 crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
297 spin_lock_irqsave(&dev_priv->irq_lock, flags);
312 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
318 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
333 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
358 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
362 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
376 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
408 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
423 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
428 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
540 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
542 struct drm_device *dev = dev_priv->dev;
553 if (!dev_priv->enable_hotplug_processing)
559 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
561 hpd_event_bits = dev_priv->hpd_event_bits;
562 dev_priv->hpd_event_bits = 0;
567 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
572 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
587 mod_timer(&dev_priv->hotplug_reenable_timer,
591 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
611 drm_i915_private_t *dev_priv = dev->dev_private;
620 new_delay = dev_priv->ips.cur_delay;
630 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
631 new_delay = dev_priv->ips.cur_delay - 1;
632 if (new_delay < dev_priv->ips.max_delay)
633 new_delay = dev_priv->ips.max_delay;
635 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
636 new_delay = dev_priv->ips.cur_delay + 1;
637 if (new_delay > dev_priv->ips.min_delay)
638 new_delay = dev_priv->ips.min_delay;
642 dev_priv->ips.cur_delay = new_delay;
652 struct drm_i915_private *dev_priv = dev->dev_private;
658 if (i915_enable_hangcheck && !dev_priv->gpu_hang) {
659 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
666 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
671 spin_lock_irq(&dev_priv->rps.lock);
672 pm_iir = dev_priv->rps.pm_iir;
673 dev_priv->rps.pm_iir = 0;
676 spin_unlock_irq(&dev_priv->rps.lock);
681 mutex_lock(&dev_priv->rps.hw_lock);
684 new_delay = dev_priv->rps.cur_delay + 1;
690 if (IS_VALLEYVIEW(dev_priv->dev) &&
691 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
692 new_delay = dev_priv->rps.rpe_delay;
694 new_delay = dev_priv->rps.cur_delay - 1;
699 if (new_delay >= dev_priv->rps.min_delay &&
700 new_delay <= dev_priv->rps.max_delay) {
701 if (IS_VALLEYVIEW(dev_priv->dev))
702 valleyview_set_rps(dev_priv->dev, new_delay);
704 gen6_set_rps(dev_priv->dev, new_delay);
707 mutex_unlock(&dev_priv->rps.hw_lock);
722 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
732 mutex_lock(&dev_priv->dev->struct_mutex);
749 spin_lock_irqsave(&dev_priv->irq_lock, flags);
750 dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
751 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
752 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
754 mutex_unlock(&dev_priv->dev->struct_mutex);
762 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
768 spin_lock_irqsave(&dev_priv->irq_lock, flags);
769 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
770 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
771 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
773 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
777 struct drm_i915_private *dev_priv,
783 notify_ring(dev, &dev_priv->ring[RCS]);
785 notify_ring(dev, &dev_priv->ring[VCS]);
787 notify_ring(dev, &dev_priv->ring[BCS]);
800 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
809 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
812 * The mask bit in IMR is cleared by dev_priv->rps.work.
815 spin_lock_irqsave(&dev_priv->rps.lock, flags);
816 dev_priv->rps.pm_iir |= pm_iir;
817 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
819 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
821 queue_work(dev_priv->wq, &dev_priv->rps.work);
831 drm_i915_private_t *dev_priv = dev->dev_private;
838 spin_lock(&dev_priv->irq_lock);
842 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
845 dev_priv->hpd_event_bits |= (1 << i);
846 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
847 dev_priv->hpd_stats[i].hpd_last_jiffies
849 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
850 dev_priv->hpd_stats[i].hpd_cnt = 0;
851 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
852 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
853 dev_priv->hpd_event_bits &= ~(1 << i);
857 dev_priv->hpd_stats[i].hpd_cnt++;
862 dev_priv->display.hpd_irq_setup(dev);
863 spin_unlock(&dev_priv->irq_lock);
865 queue_work(dev_priv->wq,
866 &dev_priv->hotplug_work);
871 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
873 wake_up_all(&dev_priv->gmbus_wait_queue);
878 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
880 wake_up_all(&dev_priv->gmbus_wait_queue);
888 static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
893 spin_lock_irqsave(&dev_priv->rps.lock, flags);
894 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
895 if (dev_priv->rps.pm_iir) {
896 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
900 queue_work(dev_priv->wq, &dev_priv->rps.work);
902 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
906 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
910 i915_handle_error(dev_priv->dev, false);
919 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
926 atomic_inc(&dev_priv->irq_received);
938 snb_gt_irq_handler(dev, dev_priv, gt_iir);
940 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
955 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
985 gen6_queue_rps_work(dev_priv, pm_iir);
998 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1051 struct drm_i915_private *dev_priv = dev->dev_private;
1074 struct drm_i915_private *dev_priv = dev->dev_private;
1100 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1139 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1144 atomic_inc(&dev_priv->irq_received);
1173 spin_lock(&dev_priv->irq_lock);
1174 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1175 spin_unlock(&dev_priv->irq_lock);
1180 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1220 hsw_pm_irq_handler(dev_priv, pm_iir);
1222 gen6_queue_rps_work(dev_priv, pm_iir);
1228 spin_lock(&dev_priv->irq_lock);
1230 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1231 spin_unlock(&dev_priv->irq_lock);
1245 struct drm_i915_private *dev_priv,
1250 notify_ring(dev, &dev_priv->ring[RCS]);
1252 notify_ring(dev, &dev_priv->ring[VCS]);
1258 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1262 atomic_inc(&dev_priv->irq_received);
1288 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1290 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1340 gen6_queue_rps_work(dev_priv, pm_iir);
1366 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1368 struct drm_device *dev = dev_priv->dev;
1407 atomic_inc(&dev_priv->gpu_error.reset_counter);
1409 for_each_ring(ring, dev_priv, i)
1418 for_each_ring(ring, dev_priv, i)
1421 wake_up_all(&dev_priv->gpu_error.reset_queue);
1430 struct drm_i915_private *dev_priv = dev->dev_private;
1457 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1482 if (reloc_offset < dev_priv->gtt.mappable_end &&
1491 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1498 offset = dev_priv->mm.stolen_base;
1534 #define i915_error_object_create(dev_priv, src) \
1535 i915_error_object_create_sized((dev_priv), (src), \
1630 struct drm_i915_private *dev_priv = dev->dev_private;
1637 for (i = 0; i < dev_priv->num_fence_regs; i++)
1660 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1669 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1678 return i915_error_object_create(dev_priv, obj);
1682 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1695 return i915_error_object_create(dev_priv, obj);
1705 struct drm_i915_private *dev_priv = dev->dev_private;
1750 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1757 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1759 ering->ctx = i915_error_object_create_sized(dev_priv,
1768 struct drm_i915_private *dev_priv = dev->dev_private;
1773 for_each_ring(ring, dev_priv, i) {
1777 i915_error_first_batchbuffer(dev_priv, ring);
1780 i915_error_object_create(dev_priv, ring->obj);
1821 struct drm_i915_private *dev_priv = dev->dev_private;
1827 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1828 error = dev_priv->gpu_error.first_error;
1829 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1891 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1894 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1913 &dev_priv->mm.active_list);
1919 &dev_priv->mm.bound_list);
1926 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1927 if (dev_priv->gpu_error.first_error == NULL) {
1928 dev_priv->gpu_error.first_error = error;
1931 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1939 struct drm_i915_private *dev_priv = dev->dev_private;
1943 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1944 error = dev_priv->gpu_error.first_error;
1945 dev_priv->gpu_error.first_error = NULL;
1946 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1957 struct drm_i915_private *dev_priv = dev->dev_private;
2079 struct drm_i915_private *dev_priv = dev->dev_private;
2088 &dev_priv->gpu_error.reset_counter);
2093 for_each_ring(ring, dev_priv, i)
2097 (void) queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
2102 drm_i915_private_t *dev_priv = dev->dev_private;
2103 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2151 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2157 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2159 i915_enable_pipestat(dev_priv, pipe,
2162 i915_enable_pipestat(dev_priv, pipe,
2166 if (dev_priv->info->gen == 3)
2168 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2175 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2181 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2182 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
2184 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2191 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2197 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2198 ironlake_enable_display_irq(dev_priv,
2200 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2207 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2214 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2221 i915_enable_pipestat(dev_priv, pipe,
2223 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2233 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2236 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2237 if (dev_priv->info->gen == 3)
2240 i915_disable_pipestat(dev_priv, pipe,
2243 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2248 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2251 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2252 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
2254 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2259 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2262 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2263 ironlake_disable_display_irq(dev_priv,
2265 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2270 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2274 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2275 i915_disable_pipestat(dev_priv, pipe,
2283 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2305 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2332 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2337 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2355 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2360 for_each_ring(ring, dev_priv, i)
2368 struct drm_i915_private *dev_priv = dev->dev_private;
2418 drm_i915_private_t *dev_priv = dev->dev_private;
2431 for_each_ring(ring, dev_priv, i) {
2435 semaphore_clear_deadlocks(dev_priv);
2501 for_each_ring(ring, dev_priv, i) {
2518 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2524 struct drm_i915_private *dev_priv = dev->dev_private;
2545 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2547 atomic_set(&dev_priv->irq_received, 0);
2567 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2569 atomic_set(&dev_priv->irq_received, 0);
2594 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2597 atomic_set(&dev_priv->irq_received, 0);
2626 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2635 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2640 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2662 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2685 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2693 dev_priv->irq_mask = ~display_mask;
2697 I915_WRITE(DEIMR, dev_priv->irq_mask);
2702 dev_priv->gt_irq_mask = ~0;
2705 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2727 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2728 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2729 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2737 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2749 dev_priv->irq_mask = ~display_mask;
2754 I915_WRITE(DEIMR, dev_priv->irq_mask);
2763 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2766 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2796 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2811 dev_priv->irq_mask = (~enable_mask) |
2818 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2825 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2826 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2827 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2833 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2853 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2856 if (!dev_priv)
2859 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2877 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2879 if (!dev_priv)
2882 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2908 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2911 atomic_set(&dev_priv->irq_received, 0);
2922 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2929 dev_priv->irq_mask =
2935 I915_WRITE16(IMR, dev_priv->irq_mask);
2953 drm_i915_private_t *dev_priv = dev->dev_private;
2982 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2991 atomic_inc(&dev_priv->irq_received);
3003 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3021 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3029 notify_ring(dev, &dev_priv->ring[RCS]);
3047 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3062 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3065 atomic_set(&dev_priv->irq_received, 0);
3082 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3089 dev_priv->irq_mask =
3111 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3114 I915_WRITE(IMR, dev_priv->irq_mask);
3128 drm_i915_private_t *dev_priv = dev->dev_private;
3157 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3165 atomic_inc(&dev_priv->irq_received);
3176 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3193 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3217 notify_ring(dev, &dev_priv->ring[RCS]);
3257 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3260 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3281 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3284 atomic_set(&dev_priv->irq_received, 0);
3299 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3305 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3313 enable_mask = ~dev_priv->irq_mask;
3321 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
3340 I915_WRITE(IMR, dev_priv->irq_mask);
3353 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3358 assert_spin_locked(&dev_priv->irq_lock);
3366 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3386 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3396 atomic_inc(&dev_priv->irq_received);
3409 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3428 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3456 notify_ring(dev, &dev_priv->ring[RCS]);
3458 notify_ring(dev, &dev_priv->ring[VCS]);
3494 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3497 if (!dev_priv)
3500 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3519 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3520 struct drm_device *dev = dev_priv->dev;
3525 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3529 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3532 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3547 if (dev_priv->display.hpd_irq_setup)
3548 dev_priv->display.hpd_irq_setup(dev);
3549 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3554 struct drm_i915_private *dev_priv = dev->dev_private;
3556 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3557 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3558 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3559 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3561 init_timer(&dev_priv->gpu_error.hangcheck_timer);
3562 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3565 init_timer(&dev_priv->hotplug_reenable_timer);
3566 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3567 (void *) dev_priv);
3590 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3599 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3607 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3619 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3625 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3634 struct drm_i915_private *dev_priv = dev->dev_private;
3641 dev_priv->hpd_stats[i].hpd_cnt = 0;
3642 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3653 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3654 if (dev_priv->display.hpd_irq_setup)
3655 dev_priv->display.hpd_irq_setup(dev);
3656 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);