/* BEGIN CSTYLED */
/*
* Copyright (c) 2009, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/x86_archext.h>
#include <sys/vfs_opreg.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#ifndef roundup
#endif /* !roundup */
static void
int write);
int write);
static void
/*ARGSUSED*/
int
{
return ENODEV;
DRM_ERROR("i915_gem_init_ioctel invalid arg 0x%lx args.start 0x%lx end 0x%lx", &args, args.gtt_start, args.gtt_end);
return EINVAL;
}
DRM_DEBUG("i915_gem_init_ioctl dev->gtt_total %x, dev_priv->mm.gtt_space 0x%x gtt_start 0x%lx", dev->gtt_total, dev_priv->mm.gtt_space, args.gtt_start);
return 0;
}
/*ARGSUSED*/
int
{
int ret;
return ENODEV;
if ( ret != 0)
DRM_DEBUG("i915_gem_get_aaperture_ioctl called sizeof %d, aper_size 0x%x, aper_available_size 0x%x\n", sizeof(args), dev->gtt_total, args.aper_available_size);
return 0;
}
/**
* Creates a new mm object and returns a handle to it.
*/
/*ARGSUSED*/
int
{
int handlep;
int ret;
return ENODEV;
return EINVAL;
}
/* Allocate the new object */
DRM_ERROR("Failed to alloc obj");
return ENOMEM;
}
if (ret)
return ret;
if ( ret != 0)
DRM_DEBUG("i915_gem_create_ioctl object name %d, size 0x%lx, list 0x%lx, obj 0x%lx",handlep, args.size, &fpriv->object_idr, obj);
return 0;
}
/**
* Reads data from the object referenced by handle.
*
* On error, the contents of *data are undefined.
*/
/*ARGSUSED*/
int
{
int ret;
return ENODEV;
return EBADF;
/* Bounds check source.
*
* XXX: This could use review for overflow issues...
*/
DRM_ERROR("i915_gem_pread_ioctl invalid args");
return EINVAL;
}
if (ret != 0) {
return EFAULT;
}
unsigned long unwritten = 0;
if (unwritten) {
}
return ret;
}
/*ARGSUSED*/
static int
struct drm_i915_gem_pwrite *args,
{
int ret = 0;
unsigned long unwritten = 0;
if (ret) {
return ret;
}
if (ret)
goto err;
DRM_DEBUG("obj %d write domain 0x%x read domain 0x%x", obj->name, obj->write_domain, obj->read_domains);
if (unwritten) {
goto err;
}
err:
if (ret)
return ret;
}
/*ARGSUSED*/
int
struct drm_i915_gem_pwrite *args,
{
DRM_ERROR(" i915_gem_shmem_pwrite Not support");
return -1;
}
/**
* Writes data to the object referenced by handle.
*
* On error, the contents of the buffer that were to be modified are undefined.
*/
/*ARGSUSED*/
int
{
int ret = 0;
return ENODEV;
if (ret)
DRM_ERROR("i915_gem_pwrite_ioctl failed to copy from user");
return EBADF;
/* Bounds check destination.
*
* XXX: This could use review for overflow issues...
*/
DRM_ERROR("i915_gem_pwrite_ioctl invalid arg");
return EINVAL;
}
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.
* perspective, requiring manual detiling by the client.
*/
else
if (ret)
return ret;
}
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
*/
/*ARGSUSED*/
int
{
int ret = 0;
return ENODEV;
/* Only handle setting domains to types used by the CPU. */
/* Having something in the write domain implies it's in the read
* domain, and only that read domain. Enforce that in the request.
*/
if (ret) {
DRM_ERROR("set_domain invalid read or write");
return EINVAL;
}
return EBADF;
DRM_DEBUG("set_domain_ioctl %p(name %d size 0x%x), %08x %08x\n",
if (read_domains & I915_GEM_DOMAIN_GTT) {
/* Silently promote "you're not bound, there was nothing to do"
* to success, since the client was just asking us to
* make sure everything was done.
*/
ret = 0;
} else {
}
if (ret)
return ret;
}
/**
* Called when user space has done writes to this buffer
*/
/*ARGSUSED*/
int
{
int ret = 0;
return ENODEV;
return EBADF;
}
DRM_DEBUG("%s: sw_finish %d (%p name %d size 0x%x)\n",
/* Pinned buffers may be scanout, so flush the cache */
{
}
return ret;
}
/**
* Maps the contents of an object, returning the address it is mapped
* into.
*
* While the mapping holds a reference on the contents of the object, it doesn't
* imply a ref on the object itself.
*/
/*ARGSUSED*/
int
{
int ret;
return ENODEV;
sizeof (struct drm_i915_gem_mmap));
return EBADF;
if (ret)
return ret;
&args, sizeof (struct drm_i915_gem_mmap));
return 0;
}
static void
{
return;
}
static void
{
/* Add a reference if we're newly entering the active list. */
}
/* Move from whatever list we were on to the tail of execution. */
}
static void
{
obj_priv->last_rendering_seqno = 0;
}
static void
{
{
} else {
}
obj_priv->last_rendering_seqno = 0;
}
}
/**
* Creates a new sequence number, emitting a write of it to the status page
* plus an interrupt, which will trigger i915_user_interrupt_handler.
*
* Must be called with struct_lock held.
*
* Returned sequence numbers are nonzero on success.
*/
static uint32_t
{
int was_empty;
DRM_ERROR("Failed to alloc request");
return 0;
}
/* Grab the seqno we're going to make this request be, and bump the
* next (skipping 0 so it can be the reserved no-seqno value).
*/
BEGIN_LP_RING(4);
OUT_RING(0);
BEGIN_LP_RING(2);
OUT_RING(0);
/* Associate any objects on the flushing list matching the write
* domain we're flushing with our flush.
*/
if (flush_domains != 0) {
obj->write_domain) {
obj->write_domain = 0;
}
}
}
{
/* change to delay HZ and then run work (not insert to workqueue of Linux) */
DRM_DEBUG("i915_gem: schedule_delayed_work");
}
return seqno;
}
/**
* Command execution barrier
*
* Ensures that all commands in the ring are finished
* before signalling the CPU
*/
{
/* The sampler always gets flushed on i965 (sigh) */
BEGIN_LP_RING(2);
OUT_RING(0); /* noop */
return flush_domains;
}
/**
* Moves buffers associated only with the given active seqno from the active
* to inactive list, potentially freeing them.
*/
static void
struct drm_i915_gem_request *request)
{
/* Move any buffers on the active list that are no longer referenced
*/
struct drm_i915_gem_object,
list);
/* If the seqno being retired doesn't match the oldest in the
* list, then the oldest in the list must still be newer than
* this seqno.
*/
return;
DRM_DEBUG("%s: retire %d moves to inactive list %p\n",
if (obj->write_domain != 0) {
} else {
}
}
}
/**
* Returns true if seq1 is later than seq2.
*/
static int
{
}
{
}
/**
* This function clears the request list as sequence numbers are passed.
*/
void
{
} else
break;
}
}
void
{
/* Return if gem idle */
if (worktimer_id == NULL) {
return;
}
{
DRM_DEBUG("i915_gem: schedule_delayed_work");
}
}
/**
* i965_reset - reset chip after a hang
* @dev: drm device to reset
* @flags: reset domains
*
* Reset the chip. Useful if a hang is detected.
*
* Procedure is fairly simple:
* - reset the chip using the reset reg
* - re-init context state
* - re-init hardware status page
* - re-init ring buffer
* - re-init interrupt state
* - re-init display
*/
{
int timeout = 0;
if (flags & GDRST_FULL)
DRM_ERROR(("i915_reset: pci_config_setup fail"));
return;
}
/*
* Set the reset bit, wait for reset, then clear it. Hardware
* will clear the status bit (bit 1) when it's actually ready
* for action again.
*/
drv_usecwait(50);
/* ...we don't want to loop forever though, 500ms should be plenty */
do {
drv_usecwait(100);
/* Ok now get things going again... */
/*
* Everything depends on having the GTT running, so we need to start
* there. Fortunately we don't need to do this unless we reset the
* chip at a PCI level.
*
* Next we need to restore the context, but we don't use those
* yet either...
*
* Ring buffer needs to be re-initialized in the KMS case, or if X
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
/* Stop the ring if it's running. */
I915_WRITE(PRB0_CTL, 0);
I915_WRITE(PRB0_TAIL, 0);
I915_WRITE(PRB0_HEAD, 0);
/* Initialize the ring. */
(void) drm_irq_install(dev);
}
/*
* Display needs restore too...
*/
if (flags & GDRST_FULL)
}
/**
* Waits for a sequence number to be signaled, and cleans up the
* request and object lists appropriately for that event.
*/
int
{
int ret = 0;
else
if (!ier) {
DRM_ERROR("something (likely vbetool) disabled "
"interrupts, re-enabling\n");
(void) i915_driver_irq_preinstall(dev);
}
}
}
/* GPU maybe hang, reset needed*/
DRM_ERROR("GPU hang detected try to reset ... wait for irq_queue seqno %d, now seqno %d", seqno, i915_get_gem_seqno(dev));
}
else
DRM_ERROR("GPU hang detected.... reboot required");
return 0;
}
/* Directly dispatch request retiring. While we have the work queue
* to handle this, the waiter on a request often wants an associated
* buffer to have made it to the inactive list, and we would need
* a separate wait queue to handle that.
*/
if (ret == 0)
return ret;
}
static void
{
if (flush_domains & I915_GEM_DOMAIN_CPU)
/*
*
* I915_GEM_DOMAIN_RENDER is always invalidated, but is
* only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
* also flushed at 2d versus 3d pipeline switches.
*
* read-only caches:
*
* I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
* MI_READ_FLUSH is set, and is always flushed on 965.
*
* I915_GEM_DOMAIN_COMMAND may not exist?
*
* I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
* invalidated when MI_EXE_FLUSH is set.
*
* I915_GEM_DOMAIN_VERTEX, which exists on 965, is
* invalidated with every MI_FLUSH.
*
* TLBs:
*
* On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
* and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
* I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
* are flushed at any MI_FLUSH.
*/
if ((invalidate_domains|flush_domains) &
cmd &= ~MI_NO_WRITE_FLUSH;
/*
* On the 965, the sampler cache always gets flushed
* and this bit is reserved.
*/
cmd |= MI_READ_FLUSH;
}
cmd |= MI_EXE_FLUSH;
BEGIN_LP_RING(2);
OUT_RING(0); /* noop */
}
}
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
static int
{
/* This function only exists to support waiting for existing rendering,
* not for emitting required flushes.
*/
return 0;
}
/* If there is rendering queued on the buffer being evicted, wait for
* it.
*/
DRM_DEBUG("%s: object %d %p wait for seqno %08x\n",
if (seqno == 0) {
DRM_DEBUG("last rendering maybe finished");
return 0;
}
if (ret != 0) {
DRM_ERROR("%s: i915_wait_request request->seqno %d now %d\n", __func__, seqno, i915_get_gem_seqno(dev));
return ret;
}
}
return 0;
}
/**
* Unbinds an object from the GTT aperture.
*/
int
{
int ret = 0;
return 0;
DRM_ERROR("Attempting to unbind pinned buffer\n");
return EINVAL;
}
/* Wait for any rendering to complete
*/
if (ret) {
return ret;
}
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
* also ensure that all pending GPU writes are finished
* before we unbind.
*/
if (ret) {
return ret;
}
}
}
/* Remove ourselves from the LRU list if present. */
return 0;
}
static int
{
int ret = 0;
for (;;) {
/* If there's an inactive buffer available now, grab it
* and be done.
*/
struct drm_i915_gem_object,
list);
/* Wait on the rendering and unbind the buffer. */
break;
}
/* If we didn't get anything, but the ring is still processing
* things, wait for one of those things to finish and hopefully
* leave us a buffer to evict.
*/
struct drm_i915_gem_request,
list);
if (ret) {
break;
}
/* if waiting caused an object to become inactive,
* then loop around and wait for it. Otherwise, we
* assume that waiting freed and unbound something,
* so there should now be some space in the GTT
*/
continue;
break;
}
/* If we didn't have anything on the request list but there
* are buffers awaiting a flush, emit one and try again.
* When we wait on it, those buffers waiting for that flush
* will get moved to inactive.
*/
struct drm_i915_gem_object,
list);
obj->write_domain);
continue;
}
DRM_ERROR("inactive empty %d request empty %d "
"flushing empty %d\n",
/* If we didn't do any of the above, there's nothing to be done
* and we just can't fit it in.
*/
return ENOMEM;
}
return ret;
}
static int
{
int ret;
for (;;) {
if (ret != 0)
break;
}
return 0;
else
return ret;
}
/**
* Finds free space in the GTT aperture and binds the object there.
*/
static int
{
return EBUSY;
if (alignment == 0)
return EINVAL;
}
DRM_ERROR("Already bind!!");
return 0;
}
if (free_space != NULL) {
}
}
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
DRM_ERROR("GTT full, but LRU list empty\n");
return ENOMEM;
}
if (ret != 0) {
return ret;
}
goto search_free;
}
if (ret) {
DRM_ERROR("bind to gtt failed to get page list");
return ret;
}
/* Create an AGP memory structure pointing at our pages, and bind it
* into the GTT.
*/
DRM_DEBUG("Binding object %d of page_count %d at gtt_offset 0x%x obj->pfnarray = 0x%lx",
return ENOMEM;
}
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
return 0;
}
void
{
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
return;
}
/** Flushes any GPU write domain for the object if it's dirty. */
static void
{
return;
/* Queue the GPU write cache flushing we need. */
obj->write_domain = 0;
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
{
return;
/* No actual flushing is required for the GTT write domain. Writes
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*/
obj->write_domain = 0;
}
/** Flushes the CPU write domain for the object if it's dirty. */
static void
{
return;
obj->write_domain = 0;
}
/**
* Moves a single object to the GTT read, and possibly write domain.
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
static int
{
int ret;
/* Not valid to be called on unbound objects. */
return EINVAL;
/* Wait on any GPU rendering and flushing to occur. */
if (ret != 0) {
return ret;
}
/* If we're writing through the GTT domain, then CPU and GPU caches
* will need to be invalidated at next use.
*/
if (write)
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
if (write) {
}
return 0;
}
/**
* Moves a single object to the CPU read, and possibly write domain.
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
static int
{
int ret;
/* Wait on any GPU rendering and flushing to occur. */
if (ret != 0)
return ret;
/* If we have a partially-valid cache of the object in the CPU,
* finish invalidating it and free the per-page flags.
*/
/* Flush the CPU cache if it's still invalid. */
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
*/
if (write) {
}
return 0;
}
/*
* Set the next domain for the specified object. This
* may not actually perform the necessary flushing/invaliding though,
* as that may want to be batched with other set_domain operations
*
* This is (we hope) the only really tricky part of gem. The goal
* is fairly simple -- track which caches hold bits of the object
* and make sure they remain coherent. A few concrete examples may
* help to explain how it works. For shorthand, we use the notation
* (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
* a pair of read and write domain masks.
*
* Case 1: the batch buffer
*
* 1. Allocated
* 2. Written by CPU
* 3. Mapped to GTT
* 4. Read by GPU
* 5. Unmapped from GTT
* 6. Freed
*
* Let's take these a step at a time
*
* 1. Allocated
* Pages allocated from the kernel may still have
* cache contents, so we set them to (CPU, CPU) always.
* 2. Written by CPU (using pwrite)
* The pwrite function calls set_domain (CPU, CPU) and
* this function does nothing (as nothing changes)
* 3. Mapped by GTT
* This function asserts that the object is not
* currently in any GPU-based read or write domains
* 4. Read by GPU
* i915_gem_execbuffer calls set_domain (COMMAND, 0).
* As write_domain is zero, this function adds in the
* current read domains (CPU+COMMAND, 0).
* flush_domains is set to CPU.
* invalidate_domains is set to COMMAND
* clflush is run to get data out of the CPU caches
* then i915_dev_set_domain calls i915_gem_flush to
* emit an MI_FLUSH and drm_agp_chipset_flush
* 5. Unmapped from GTT
* i915_gem_object_unbind calls set_domain (CPU, CPU)
* flush_domains and invalidate_domains end up both zero
* so no flushing/invalidating happens
* 6. Freed
* yay, done
*
* Case 2: The shared render buffer
*
* 1. Allocated
* 2. Mapped to GTT
* 4. set_domain to (CPU,CPU)
*
* 1. Allocated
* Same as last example, (CPU, CPU)
* 2. Mapped to GTT
* Nothing changes (assertions find that it is not in the GPU)
* execbuffer calls set_domain (RENDER, RENDER)
* flush_domains gets CPU
* invalidate_domains gets GPU
* clflush (obj)
* MI_FLUSH and drm_agp_chipset_flush
* 4. set_domain (CPU, CPU)
* flush_domains gets GPU
* invalidate_domains gets CPU
* wait_rendering (obj) to make sure all drawing is complete.
* This will include an MI_FLUSH to get the data from GPU
* to memory
* clflush (obj) to invalidate the CPU cache
* Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
* cache lines are loaded and dirtied
* 6. Read written by GPU
* Same as last GPU access
*
* Case 3: The constant buffer
*
* 1. Allocated
* 2. Written by CPU
* 3. Read by GPU
* 4. Updated (written) by CPU again
* 5. Read by GPU
*
* 1. Allocated
* (CPU, CPU)
* 2. Written by CPU
* (CPU, CPU)
* 3. Read by GPU
* (CPU+RENDER, 0)
* flush_domains = CPU
* invalidate_domains = RENDER
* clflush (obj)
* MI_FLUSH
* drm_agp_chipset_flush
* 4. Updated (written) by CPU again
* (CPU, CPU)
* flush_domains = 0 (no previous write domain)
* invalidate_domains = 0 (no new read domains)
* 5. Read by GPU
* (CPU+RENDER, 0)
* flush_domains = CPU
* invalidate_domains = RENDER
* clflush (obj)
* MI_FLUSH
* drm_agp_chipset_flush
*/
static void
{
DRM_DEBUG("%s: object %p read %08x -> %08x write %08x -> %08x\n",
/*
* If the object isn't moving to a new write domain,
* let the object stay in multiple read domains
*/
if (write_domain == 0)
else
/*
* Flush the current write domain if
* the new read domains don't match. Invalidate
* any read domains which differ from the old
* write domain
*/
}
/*
* Invalidate any read caches which may have
* stale data. That is, any new read domains.
*/
DRM_DEBUG("%s: CPU domain flush %08x invalidate %08x\n",
}
if ((write_domain | flush_domains) != 0)
DRM_DEBUG("%s: read %08x write %08x invalidate %08x flush %08x\n",
}
/**
* Moves the object from a partially CPU read to a full one.
*
* Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
* and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
*/
static void
{
if (!obj_priv->page_cpu_valid)
return;
/* If we're partially in the CPU read domain, finish moving it in.
*/
int i;
if (obj_priv->page_cpu_valid[i])
continue;
}
}
/* Free the page_cpu_valid mappings which are now stale, whether
* or not we've got I915_GEM_DOMAIN_CPU.
*/
}
/**
* Set the CPU read domain on a range of the object.
*
* The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
* not entirely valid. The page_cpu_valid member of the object flags which
* pages have been flushed, and will be respected by
* i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
* of the whole object.
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
static int
{
int i, ret;
return i915_gem_object_set_to_cpu_domain(obj, 0);
/* Wait on any GPU rendering and flushing to occur. */
if (ret != 0)
return ret;
/* If we're already fully in the CPU read domain, we're done. */
return 0;
* newly adding I915_GEM_DOMAIN_CPU
*/
return ENOMEM;
/* Flush the cache on any pages that are still invalid from the CPU's
* perspective.
*/
i++) {
if (obj_priv->page_cpu_valid[i])
continue;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
return 0;
}
/**
* Pin an object to the GTT and evaluate the relocations landing in it.
*/
static int
struct drm_i915_gem_exec_object *entry)
{
int i, ret;
/* Choose the GTT offset for our buffer and put it there. */
if (ret) {
DRM_ERROR("failed to pin");
return ret;
}
/* Apply the relocations, using the GTT aperture to avoid cache
* flushing requirements.
*/
for (i = 0; i < entry->relocation_count; i++) {
if (ret != 0) {
DRM_ERROR("failed to copy from user");
return ret;
}
if (target_obj == NULL) {
return EBADF;
}
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
DRM_ERROR("No GTT space found for object %d\n",
return EINVAL;
}
DRM_ERROR("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
return EINVAL;
}
DRM_ERROR("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
return EINVAL;
}
"obj %p target %d offset %d "
"read %08x write %08x",
return EINVAL;
}
DRM_ERROR("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
return EINVAL;
}
DRM_DEBUG("%s: obj %p offset %08x target %d "
"read %08x write %08x gtt %08x "
"presumed %08x delta %08x\n",
obj,
(int) reloc.target_handle,
(int) reloc.read_domains,
(int) reloc.write_domain,
(int) target_obj_priv->gtt_offset,
(int) reloc.presumed_offset,
/* If the relocation already has the right value in it, no
* more work needs to be done.
*/
continue;
}
if (ret != 0) {
return EINVAL;
}
/* Map the page containing the relocation we're going to
* perform.
*/
*reloc_entry = reloc_val;
/* Write the updated presumed offset for this entry back out
* to the user.
*/
if (ret != 0) {
return ret;
}
}
return 0;
}
/** Dispatch a batchbuffer to the ring
*/
static int
struct drm_i915_gem_execbuffer *exec,
{
int i = 0, count;
DRM_ERROR("alignment\n");
return EINVAL;
}
if (!exec_start) {
DRM_ERROR("wrong arg");
return EINVAL;
}
for (i = 0; i < count; i++) {
if (i < nbox) {
if (ret) {
return ret;
}
}
BEGIN_LP_RING(4);
OUT_RING(0);
} else {
BEGIN_LP_RING(2);
(2 << 6) |
(3 << 9) |
} else {
(2 << 6));
}
}
}
/* XXX breadcrumb */
return 0;
}
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
*
* This should get us reasonable parallelism between CPU and GPU but also
* relatively low latency when blocking on a particular request to finish.
*/
static int
{
int ret = 0;
if (seqno) {
if (ret != 0)
DRM_ERROR("%s: i915_wait_request request->seqno %d now %d\n", __func__, seqno, i915_get_gem_seqno(dev));
}
return ret;
}
/*ARGSUSED*/
int
{
int pin_tries;
return ENODEV;
return EINVAL;
}
/* Copy in the exec list from userland */
DRM_ERROR("Failed to allocate exec or object list "
"for %d buffers\n",
goto pre_mutex_err;
}
(struct drm_i915_gem_exec_object __user *)
if (ret != 0) {
DRM_ERROR("copy %d exec entries failed %d\n",
goto pre_mutex_err;
}
DRM_ERROR("Execbuf while wedged\n");
return EIO;
}
DRM_ERROR("Execbuf while VT-switched.\n");
return EBUSY;
}
/* Look up object handles */
for (i = 0; i < args.buffer_count; i++) {
if (object_list[i] == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
goto err;
}
if (obj_priv->in_execbuffer) {
DRM_ERROR("Object[%d] (%d) %p appears more than once in object list in args.buffer_count %d \n",
goto err;
}
}
/* Pin and relocate */
ret = 0;
for (i = 0; i < args.buffer_count; i++) {
object_list[i]->pending_read_domains = 0;
object_list[i]->pending_write_domain = 0;
&exec_list[i]);
if (ret) {
DRM_ERROR("Not all object pinned");
break;
}
pinned = i + 1;
}
/* success */
if (ret == 0)
{
DRM_DEBUG("gem_execbuffer pin_relocate success");
break;
}
/* error other than GTT full, or we've already tried again */
goto err;
}
/* unpin all of our buffers */
for (i = 0; i < pinned; i++)
pinned = 0;
/* evict everyone we can from the aperture */
if (ret)
goto err;
}
/* Set the pending read domains for the batch buffer to COMMAND */
/* Zero the gloabl flush/invalidate flags. These
* will be modified as each object is bound to the
* gtt
*/
dev->invalidate_domains = 0;
dev->flush_domains = 0;
for (i = 0; i < args.buffer_count; i++) {
/* Compute new gpu domains and update invalidate/flush */
}
DRM_DEBUG("%s: invalidate_domains %08x flush_domains %08x Then flush\n",
dev->flush_domains);
dev->flush_domains);
if (dev->flush_domains) {
}
}
for (i = 0; i < args.buffer_count; i++) {
}
/* Exec the batchbuffer */
if (ret) {
goto err;
}
/*
* Ensure that the commands in the batch buffer are
* finished before the interrupt fires
*/
/*
* Get a seqno representing the execution of the current buffer,
* which we can wait on. We would like to mitigate these interrupts,
* likely by only creating seqnos occasionally (so that we have
* *some* interrupts representing completion of buffers that we can
* wait on when trying to clear up gtt space).
*/
for (i = 0; i < args.buffer_count; i++) {
}
err:
if (object_list != NULL) {
for (i = 0; i < pinned; i++)
for (i = 0; i < args.buffer_count; i++) {
if (object_list[i]) {
obj_priv->in_execbuffer = 0;
}
}
}
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
if (ret)
DRM_ERROR("failed to copy %d exec entries "
"back to user (%d)\n",
}
return ret;
}
int
{
int ret;
if (ret != 0) {
return ret;
}
}
/* If the object is not active and not pending a flush,
* remove it from the inactive list
*/
I915_GEM_DOMAIN_GTT)) == 0 &&
}
return 0;
}
void
{
/* If the object is no longer pinned, and is
* neither active nor being flushed, then stick it on
* the inactive list
*/
I915_GEM_DOMAIN_GTT)) == 0)
}
}
/*ARGSUSED*/
int
{
int ret;
return ENODEV;
DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
return EBADF;
}
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
return EINVAL;
}
if (ret != 0) {
return ret;
}
}
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
if ( ret != 0)
return 0;
}
/*ARGSUSED*/
int
{
return ENODEV;
DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
return EBADF;
}
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
return EINVAL;
}
if (obj_priv->user_pin_count == 0) {
}
return 0;
}
/*ARGSUSED*/
int
{
int ret;
return ENODEV;
DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
return EBADF;
}
/* Don't count being on the flushing list against the object being
* done. Otherwise, a buffer left on the flushing list but not getting
* flushed (because nobody's flushing that domain) won't ever return
* unbusy and get reused by libdrm's bo cache. The other expected
* consumer of this interface, OpenGL's occlusion queries, also specs
* that the objects get unbusy "eventually" without any interference.
*/
if ( ret != 0)
return 0;
}
/*ARGSUSED*/
int
{
return ENODEV;
}
static int
{
long i;
return 0;
DRM_ERROR("Faled to allocate page list\n");
return ENOMEM;
}
}
return 0;
}
{
return ENOMEM;
/*
* We've just allocated pages from the kernel,
* so they've just been written by the CPU with
* zeros. They'll need to be clflushed before we
* use them with the GPU.
*/
return 0;
}
{
}
/** Unbinds all objects that are on the given buffer list. */
static int
{
int ret;
while (!list_empty(head)) {
struct drm_i915_gem_object,
list);
DRM_ERROR("Pinned object in unbind list\n");
return EINVAL;
}
if (ret != 0) {
DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
ret);
return ret;
}
}
return 0;
}
static int
{
return 0;
}
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
*/
/* Cancel the retire work handler, wait for it to finish if running
*/
if (worktimer_id != NULL) {
(void) untimeout(worktimer_id);
worktimer_id = NULL;
}
/* Flush the GPU along with all non-CPU write domains
*/
if (seqno == 0) {
return ENOMEM;
}
last_seqno = 0;
stuck = 0;
for (;;) {
break;
if (last_seqno == cur_seqno) {
if (stuck++ > 100) {
DRM_ERROR("hardware wedged\n");
break;
}
}
DRM_UDELAY(10);
}
/* Empty the active and flushing lists to inactive. If there's
* anything left at this point, it means that we're wedged and
* nothing good's going to happen by leaving them there. So strip
* the GPU domains and just stuff them onto inactive.
*/
struct drm_i915_gem_object,
list);
}
struct drm_i915_gem_object,
list);
}
/* Move all inactive buffers out of the GTT. */
if (ret) {
return ret;
}
return 0;
}
static int
{
int ret;
/* If we need a physical address for the status page, it's already
* initialized at driver load time.
*/
if (!I915_NEED_GFX_HWS(dev))
return 0;
DRM_ERROR("Failed to allocate status page\n");
return ENOMEM;
}
if (ret != 0) {
return ret;
}
DRM_ERROR("Failed to map status page.\n");
return EINVAL;
}
return 0;
}
static void
{
return;
/* Write high address into HWS_PGA when disabling. */
}
int
{
int ret;
if (ret != 0)
return ret;
DRM_ERROR("Failed to allocate ringbuffer\n");
return ENOMEM;
}
if (ret != 0) {
return ret;
}
/* Set up the kernel mapping for the ring. */
DRM_ERROR("Failed to map ringbuffer.\n");
return EINVAL;
}
/* Stop the ring if it's running. */
I915_WRITE(PRB0_CTL, 0);
I915_WRITE(PRB0_HEAD, 0);
I915_WRITE(PRB0_TAIL, 0);
/* Initialize the ring. */
/* G45 ring initialization fails to reset head to zero */
if (head != 0) {
DRM_ERROR("Ring head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n",
I915_WRITE(PRB0_HEAD, 0);
DRM_ERROR("Ring head forced to zero "
"ctl %08x head %08x tail %08x start %08x\n",
}
/* If the head is still not zero, the ring is dead */
if (head != 0) {
DRM_ERROR("Ring initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
return EIO;
}
/* Update our cache of the ring state */
return 0;
}
static void
{
return;
}
/*ARGSUSED*/
int
{
int ret;
return ENODEV;
DRM_ERROR("Reenabling wedged hardware, good luck\n");
}
/* Set up the kernel mapping for the ring. */
if (ret != 0)
return ret;
(void) drm_irq_install(dev);
return 0;
}
/*ARGSUSED*/
int
{
int ret;
return ENODEV;
(void) drm_irq_uninstall(dev);
return ret;
}
void
{
int ret;
if (ret)
}
void
{
}