/*
*/
/*
* Copyright © 2008,2010, 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_drv.h"
struct eb_objects {
int and;
};
static struct eb_objects *
{
return eb;
return eb;
}
static void
{
}
static void
{
}
static struct drm_i915_gem_object *
{
return NULL;
}
static void
{
}
{
!obj->map_and_fenceable ||
}
static int
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *reloc)
{
if (target_obj == NULL)
return -ENOENT;
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
* through the ppgtt for non_secure batchbuffers. */
}
/* Validate that the target is in a valid r/w GPU domain */
DRM_DEBUG("reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
goto err;
}
& ~I915_GEM_GPU_DOMAINS)) {
"obj %p target %d offset %d "
"read %08x write %08x",
goto err;
}
/* If the relocation already has the right value in it, no
* more work needs to be done.
*/
goto out;
/* Check that the relocation address is valid... */
DRM_ERROR("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
goto err;
}
DRM_ERROR("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
goto err;
}
if (ret)
return ret;
if (ret)
return ret;
/* Map the page containing the relocation we're going to
* perform.
*/
*reloc_entry = reloc_val;
/* and update the user's relocation entry */
out:
ret = 0;
err:
return ret;
}
static int
struct eb_objects *eb)
{
while (remain) {
struct drm_i915_gem_relocation_entry *r = stack_reloc;
return -EFAULT;
do {
if (ret)
return ret;
if (r->presumed_offset != offset &&
&r->presumed_offset,
sizeof(r->presumed_offset))) {
return -EFAULT;
}
user_relocs++;
r++;
} while (--count);
}
return 0;
}
static int
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *relocs)
{
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
if (ret)
return ret;
}
return 0;
}
static int
struct eb_objects *eb,
{
int ret = 0;
if (ret)
break;
}
return ret;
}
static int
{
}
static int
struct intel_ring_buffer *ring,
bool *need_reloc)
{
int ret;
/* workaround for GEN5 for gpu hang with ugnx */
if (ret)
return ret;
if (has_fenced_gpu_access) {
if (ret)
return ret;
if (i915_gem_object_pin_fence(obj))
obj->pending_fenced_gpu_access = true;
}
}
/* Ensure ppgtt mapping exists if needed */
}
*need_reloc = true;
}
}
return 0;
}
static void
{
return;
}
static int
bool *need_relocs)
{
int retry;
struct drm_i915_gem_object,
while (!list_empty(objects)) {
struct drm_i915_gem_object,
/* workaround for GEN5 for gpu hang with ugnx */
if (need_mappable)
else
obj->pending_fenced_gpu_access = false;
int err;
else
if (err)
obj->pending_fenced_gpu_access = true;
}
}
/* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases:
*
* 1a. Unbind all objects that do not match the GTT constraints for
* the execbuffer (fenceable, mappable, alignment etc).
* 1b. Increment pin count for already bound objects.
* 2. Bind new objects.
* 3. Decrement pin count.
*
* This avoid unnecessary unbinding of later objects in order to makr
* room for the earlier objects *unless* we need to defragment.
*/
retry = 0;
do {
int ret = 0;
/* Unbind any ill-fitting objects or pin. */
continue;
/* workaround for GEN5 for gpu hang with ugnx */
else
if (ret)
goto err;
}
/* Bind fresh objects */
continue;
if (ret)
goto err;
}
err: /* Decrement pin count for bound objects */
return ret;
if (ret)
return ret;
} while (1);
/* LINTED */
}
static int
struct drm_i915_gem_execbuffer2 *args,
struct intel_ring_buffer *ring,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec,
int count)
{
bool need_relocs;
int *reloc_offset;
/* We may process another execbuffer during the unlock... */
while (!list_empty(objects)) {
struct drm_i915_gem_object,
}
total = 0;
for (i = 0; i < count; i++)
return -ENOMEM;
}
total = 0;
for (i = 0; i < count; i++) {
goto err;
}
reloc_offset[i] = total;
}
if (ret) {
goto err;
}
/* reacquire the objects */
for (i = 0; i < count; i++) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
goto err;
}
}
if (ret)
goto err;
if (ret)
goto err;
}
/* Leave the user relocations as are, this is the painfully slow path,
* and we want to avoid the complication of dropping the lock whilst
* having buffers reserved in the aperture and so causing spurious
* ENOSPC for random operations.
*/
err:
return ret;
}
static int
{
int ret;
if (ret)
return ret;
}
if (flush_domains & I915_GEM_DOMAIN_CPU)
if (flush_domains & I915_GEM_DOMAIN_GTT)
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
return intel_ring_invalidate_all_caches(ring);
}
static bool
{
}
static int
int count)
{
int i;
int relocs_total = 0;
sizeof(struct drm_i915_gem_relocation_entry);
for (i = 0; i < count; i++) {
#if 0 /* Should match the if a few lines below */
int length; /* limited by fault_in_pages_readable() */
#endif
/*
* First check for malicious input causing overflow in
* the worst case where we need to allocate the entire
* relocation tree as a single array.
*/
return -EINVAL;
#if 0
sizeof(struct drm_i915_gem_relocation_entry);
return -EFAULT;
/* we may also need to update the presumed offsets */
return -EFAULT;
return -EFAULT;
#endif
}
return 0;
}
static void
struct intel_ring_buffer *ring)
{
}
}
}
static void
struct intel_ring_buffer *ring,
struct drm_i915_gem_object *obj)
{
/* Unconditionally force add_request to emit a full flush. */
ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
}
static int
struct intel_ring_buffer *ring)
{
int ret, i;
return 0;
if (ret)
return ret;
for (i = 0; i < 4; i++) {
intel_ring_emit(ring, 0);
}
return 0;
}
static int
/* LINTED */
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec)
{
bool need_relocs;
if (!i915_gem_check_execbuffer(args)) {
return -EINVAL;
}
if (ret)
return ret;
flags = 0;
return -EPERM;
}
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
break;
case I915_EXEC_BSD:
if (ctx_id != 0) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
return -EPERM;
}
break;
case I915_EXEC_BLT:
if (ctx_id != 0) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
return -EPERM;
}
break;
case I915_EXEC_VEBOX:
if (ctx_id != 0) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
return -EPERM;
}
break;
default:
DRM_DEBUG("execbuf with unknown ring: %d\n",
return -EINVAL;
}
if (!intel_ring_initialized(ring)) {
DRM_DEBUG("execbuf with invalid ring: %d\n",
return -EINVAL;
}
switch (mode) {
return -EINVAL;
return -EINVAL;
/* The HW changed the meaning on this bit on gen6 */
}
break;
default:
return -EINVAL;
}
return -EINVAL;
}
if (args->num_cliprects != 0) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
return -EINVAL;
}
DRM_DEBUG("execbuf with %u cliprects\n",
return -EINVAL;
}
goto pre_mutex_err;
}
(struct drm_clip_rect __user *)
goto pre_mutex_err;
}
}
if (ret)
goto pre_mutex_err;
goto pre_mutex_err;
}
goto pre_mutex_err;
}
if (MDB_TRACK_ENABLE) {
}
/* Look up object handles */
for (i = 0; i < args->buffer_count; i++) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
/* prevent error path from reading uninitialized data */
goto err;
}
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
goto err;
}
if (MDB_TRACK_ENABLE)
}
/* take note of the batch buffer before we might reorder the lists */
struct drm_i915_gem_object,
/* Move the objects en-masse into the GTT, evicting if necessary. */
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
if (need_relocs)
if (ret) {
exec,
args->buffer_count);
}
if (ret)
goto err;
}
/* Set the pending read domains for the batch buffer to COMMAND */
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
goto err;
}
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but let's be paranoid and do it
* unconditionally for now. */
if (ret)
goto err;
if (ret)
goto err;
if (ret)
goto err;
}
if (ret)
goto err;
}
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
if (ret)
goto err;
flags);
if (ret)
goto err;
}
} else {
flags);
if (ret)
goto err;
}
err:
eb_destroy(eb);
while (!list_empty(&objects)) {
struct drm_i915_gem_object,
}
return ret;
}
/*
* Legacy execbuffer just creates an exec2 list from the original exec object
* list array and passes it to the real function.
*/
int
/* LINTED */
{
int ret, i;
return -EINVAL;
}
/* Copy in the exec list from userland */
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
return -ENOMEM;
}
(struct drm_i915_relocation_entry __user *)
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
return -EFAULT;
}
for (i = 0; i < args->buffer_count; i++) {
else
exec2_list[i].flags = 0;
}
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
/* ... and back out to userspace */
if (ret) {
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
}
}
return ret;
}
int
/* LINTED */
{
int ret;
return -EINVAL;
}
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
return -ENOMEM;
}
(struct drm_i915_relocation_entry __user *)
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
return -EFAULT;
}
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
if (ret) {
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
}
}
return ret;
}