Lines Matching defs:request

677  * Compare seqno against outstanding lazy request. Emit a request if they are
795 * request and object lists appropriately for that event.
924 * domain, and only that read domain. Enforce that in the request.
1522 struct drm_i915_gem_request *request;
1531 * things up similar to emitting the lazy request. The difference here
1532 * is that the flush _must_ happen before the next request, no matter
1539 request = kmalloc(sizeof(*request), GFP_KERNEL);
1540 if (request == NULL)
1544 /* Record the position of the start of the request so that
1546 * GPU processing the request, we never over-estimate the
1553 kfree(request, sizeof(*request));
1557 request->seqno = intel_ring_get_seqno(ring);
1558 request->ring = ring;
1559 request->head = request_start;
1560 request->tail = request_ring_position;
1561 request->ctx = ring->last_context;
1562 request->batch_obj = obj;
1564 /* Whilst this request exists, batch_obj will be on the
1566 * request is retired will the the batch_obj be moved onto the
1571 if (request->ctx)
1572 i915_gem_context_reference(request->ctx);
1574 request->emitted_jiffies = jiffies;
1576 list_add_tail(&request->list, &ring->request_list, (caddr_t)request);
1577 request->file_priv = NULL;
1583 request->file_priv = file_priv;
1584 list_add_tail(&request->client_list,
1585 &file_priv->mm.request_list, (caddr_t)request);
1606 *out_seqno = request->seqno;
1612 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1614 struct drm_i915_file_private *file_priv = request->file_priv;
1620 if (request->file_priv) {
1621 list_del(&request->client_list);
1622 request->file_priv = NULL;
1653 static bool i915_request_guilty(struct drm_i915_gem_request *request,
1661 if (request->batch_obj) {
1662 if (i915_head_inside_object(acthd, request->batch_obj)) {
1668 if (i915_head_inside_request(acthd, request->head, request->tail)) {
1677 struct drm_i915_gem_request *request,
1687 i915_request_guilty(request, acthd, &inside)) {
1691 request->batch_obj ?
1692 request->batch_obj->gtt_offset : 0,
1693 request->ctx ? request->ctx->id : 0,
1702 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
1703 hs = &request->ctx->hang_stats;
1704 else if (request->file_priv)
1705 hs = &request->file_priv->hang_stats;
1715 static void i915_gem_free_request(struct drm_i915_gem_request *request)
1717 list_del(&request->list);
1718 i915_gem_request_remove_from_client(request);
1720 if (request->ctx)
1721 i915_gem_context_unreference(request->ctx);
1723 kfree(request, sizeof(*request));
1736 struct drm_i915_gem_request *request;
1738 request = list_first_entry(&ring->request_list,
1742 if (request->seqno > completed_seqno)
1743 i915_set_reset_status(ring, request, acthd);
1745 i915_gem_free_request(request);
1804 * This function clears the request list as sequence numbers are passed.
1819 struct drm_i915_gem_request *request;
1821 request = list_first_entry(&ring->request_list,
1825 if (!i915_seqno_passed(seqno, request->seqno))
1828 /* We know the GPU must have read the request to have
1830 * of tail of the request to update the last known position
1833 ring->last_retired_head = request->tail;
1835 i915_gem_free_request(request);
1923 * write domains, emitting any outstanding lazy request and retiring and
1955 * -E?: The add request failed
3039 * relatively low latency when blocking on a particular request to finish.
3047 struct drm_i915_gem_request *request;
3061 list_for_each_entry(request, struct drm_i915_gem_request, &file_priv->mm.request_list, client_list) {
3062 if (time_after_eq(request->emitted_jiffies, recent_enough))
3065 ring = request->ring;
3066 seqno = request->seqno;
3975 /* Clean up our request list when the client is going away, so that
3981 struct drm_i915_gem_request *request;
3983 request = list_first_entry(&file_priv->mm.request_list,
3986 list_del(&request->client_list);
3987 request->file_priv = NULL;