Lines Matching defs:seqno

237 		 * Ensure that any following seqno writes only happen
318 * Ensure that any following seqno writes only happen when the render
501 DRM_ERROR("Failed to allocate seqno page\n");
643 * @seqno - return seqno stuck into the ring
645 * Update the mailbox registers in the *other* rings with the current seqno.
679 u32 seqno)
682 return dev_priv->last_seqno < seqno;
686 * intel_ring_sync - sync the waiter to the signaller on seqno
690 * @seqno - seqno which the waiter will block on
695 u32 seqno)
702 /* Throughout all of the GEM code, seqno passed implies our current
703 * seqno is >= the last seqno executed. However for hardware the
706 seqno -= 1;
715 /* If seqno wrap happened, omit the wait with no-ops */
716 if (!i915_gem_has_seqno_wrapped(waiter->dev, seqno)) {
720 intel_ring_emit(waiter, seqno);
795 /* Workaround to force correct ordering between irq and seqno writes on
810 ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
812 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
823 pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
826 pc->cpu_page[0] = seqno;
1398 static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1402 ret = i915_wait_seqno(ring, seqno);
1412 u32 seqno = 0;
1435 seqno = request->seqno;
1447 if (seqno == 0)
1450 ret = intel_ring_wait_seqno(ring, seqno);
1537 u32 seqno;
1554 seqno = idle_req->seqno;
1556 return i915_wait_seqno(ring, seqno);
1608 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1619 ring->set_seqno(ring, seqno);
1620 ring->hangcheck.seqno = seqno;