Lines Matching defs:request

1100  * Compare seqno against outstanding lazy request. Emit a request if they are
1257 * request and object lists appropriately for that event.
1387 * domain, and only that read domain. Enforce that in the request.
2333 struct drm_i915_gem_request *request;
2338 request = ring->preallocated_lazy_request;
2339 if (WARN_ON(request == NULL))
2343 struct intel_context *ctx = request->ctx;
2352 * things up similar to emitting the lazy request. The difference here
2353 * is that the flush _must_ happen before the next request, no matter
2366 /* Record the position of the start of the request so that
2368 * GPU processing the request, we never over-estimate the
2383 request->seqno = intel_ring_get_seqno(ring);
2384 request->ring = ring;
2385 request->head = request_start;
2386 request->tail = request_ring_position;
2388 /* Whilst this request exists, batch_obj will be on the
2390 * request is retired will the the batch_obj be moved onto the
2394 request->batch_obj = obj;
2400 request->ctx = ring->last_context;
2401 if (request->ctx)
2402 i915_gem_context_reference(request->ctx);
2405 request->emitted_jiffies = jiffies;
2406 list_add_tail(&request->list, &ring->request_list);
2407 request->file_priv = NULL;
2413 request->file_priv = file_priv;
2414 list_add_tail(&request->client_list,
2419 trace_i915_gem_request_add(ring, request->seqno);
2434 *out_seqno = request->seqno;
2439 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2441 struct drm_i915_file_private *file_priv = request->file_priv;
2447 list_del(&request->client_list);
2448 request->file_priv = NULL;
2496 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2498 list_del(&request->list);
2499 i915_gem_request_remove_from_client(request);
2501 if (request->ctx)
2502 i915_gem_context_unreference(request->ctx);
2504 kfree(request);
2510 struct drm_i915_gem_request *request;
2515 list_for_each_entry(request, &ring->request_list, list) {
2516 if (i915_seqno_passed(completed_seqno, request->seqno))
2519 return request;
2528 struct drm_i915_gem_request *request;
2531 request = i915_gem_find_active_request(ring);
2533 if (request == NULL)
2538 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2540 list_for_each_entry_continue(request, &ring->request_list, list)
2541 i915_set_reset_status(dev_priv, request->ctx, false);
2562 * the request.
2565 struct drm_i915_gem_request *request;
2567 request = list_first_entry(&ring->request_list,
2571 i915_gem_free_request(request);
2636 * This function clears the request list as sequence numbers are passed.
2669 struct drm_i915_gem_request *request;
2672 request = list_first_entry(&ring->request_list,
2676 if (!i915_seqno_passed(seqno, request->seqno))
2679 trace_i915_gem_request_retire(ring, request->seqno);
2684 * ringbuffer to which the request belongs to.
2687 struct intel_context *ctx = request->ctx;
2692 /* We know the GPU must have read the request to have
2694 * of tail of the request to update the last known position
2697 ringbuf->last_retired_head = request->tail;
2699 i915_gem_free_request(request);
2762 * write domains, emitting any outstanding lazy request and retiring and
2794 * -E?: The add request failed
4034 * relatively low latency when blocking on a particular request to finish.
4042 struct drm_i915_gem_request *request;
4057 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4058 if (time_after_eq(request->emitted_jiffies, recent_enough))
4061 ring = request->ring;
4062 seqno = request->seqno;
5069 /* Clean up our request list when the client is going away, so that
5075 struct drm_i915_gem_request *request;
5077 request = list_first_entry(&file_priv->mm.request_list,
5080 list_del(&request->client_list);
5081 request->file_priv = NULL;