/*
*/
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
*/
/*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* Copyright (c) 2009, 2013, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
#include "drm_linux.h"
#include "drm_mm.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_io32.h"
#define USE_PCI_DMA_API 0
#define BEGIN_LP_RING(n) \
#define OUT_RING(x) \
#define ADVANCE_LP_RING() \
/**
* Lock test for when it's just for synchronization of ring access.
*
* In that case, we don't need to do it when GEM is initialized as nobody else
* has access to the ring.
*/
} while (__lintzero)
static inline u32
{
} else
}
{
if (master_priv->sarea_priv)
}
}
{
}
/**
* Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server.
*/
{
if (dev_priv->status_page_dmah) {
}
}
/* Need to rewrite hardware status page */
}
{
/*
* We should never lose context on the ring with modesetting
* as we don't expose it to userspace
*/
return;
return;
}
{
int i;
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (dev->irq_enabled)
(void) drm_irq_uninstall(dev);
for (i = 0; i < I915_NUM_RINGS; i++)
/* Clear the HWS virtual address at teardown */
if (I915_NEED_GFX_HWS(dev))
return 0;
}
{
int ret;
if (master_priv->sarea) {
} else {
DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
}
(void) i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
if (ret) {
(void) i915_dma_cleanup(dev);
return ret;
}
}
if (master_priv->sarea_priv)
/* Allow hardware batchbuffers unless told otherwise.
*/
return 0;
}
{
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
/* Program Hardware Status Page */
DRM_ERROR("Can not find hardware status page\n");
return -EINVAL;
}
DRM_DEBUG_DRIVER("hw status page @ %p\n",
else
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
/* LINTED */
{
int retcode = 0;
return -ENODEV;
case I915_INIT_DMA:
break;
case I915_CLEANUP_DMA:
break;
case I915_RESUME_DMA:
break;
default:
break;
}
return retcode;
}
/* Implement basically the same security restrictions as hardware does
* for MI_BATCH_NON_SECURE. These can be made stricter at any time.
*
* Most of the calculations below involve calculating the size of a
* particular instruction. It's important to get the size right as
* that tells us where the next instruction to check is. Any illegal
* instruction detected will be given a size of zero, which is a
* signal to abort the rest of the buffer.
*/
{
case 0x0:
case 0x0:
return 1; /* MI_NOOP */
case 0x4:
return 1; /* MI_FLUSH */
default:
return 0; /* disallow everything else */
}
#ifndef __SUNPRO_C
break;
#endif
case 0x1:
return 0; /* reserved */
case 0x2:
case 0x3:
return 1;
case 0x1c:
return 1;
case 0x1d:
case 0x3:
case 0x4:
default:
}
case 0x1e:
else
return 1;
case 0x1f:
if ((cmd & 0xffff) == 0)
return 0; /* unknown length, too hard */
else
else
return 2; /* indirect sequential */
default:
return 0;
}
default:
return 0;
}
#ifndef __SUNPRO_C
return 0;
#endif
}
{
int i, ret;
return -EINVAL;
for (i = 0; i < dwords;) {
return -EINVAL;
i += sz;
}
if (ret)
return ret;
for (i = 0; i < dwords; i++)
if (dwords & 1)
OUT_RING(0);
return 0;
}
int
struct drm_clip_rect *box,
{
int ret;
DRM_ERROR("Bad box %d,%d..%d,%d\n",
return -EINVAL;
}
if (ret)
return ret;
} else {
if (ret)
return ret;
OUT_RING(0);
}
return 0;
}
/* XXX: Emitting the counter should really be moved to part of the IRQ
* emit. For now, do it in both places:
*/
{
if (master_priv->sarea_priv)
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(0);
}
}
struct drm_clip_rect *cliprects,
void *cmdbuf)
{
DRM_ERROR("alignment");
return -EINVAL;
}
for (i = 0; i < count; i++) {
if (i < nbox) {
if (ret)
return ret;
}
if (ret)
return ret;
}
return 0;
}
struct drm_clip_rect *cliprects)
{
DRM_ERROR("alignment");
return -EINVAL;
}
for (i = 0; i < count; i++) {
if (i < nbox) {
if (ret)
return ret;
}
if (ret)
return ret;
} else {
}
} else {
if (ret)
return ret;
OUT_RING(0);
}
}
if (BEGIN_LP_RING(2) == 0) {
}
}
return 0;
}
{
int ret;
if (!master_priv->sarea_priv)
return -EINVAL;
DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
if (ret)
return ret;
OUT_RING(0);
OUT_RING(0);
} else {
}
OUT_RING(0);
OUT_RING(0);
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(0);
}
return 0;
}
{
}
/* LINTED */
{
int ret;
return -ENODEV;
return ret;
}
/* LINTED */
{
int ret;
return -ENODEV;
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
}
DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
if (batch->num_cliprects < 0)
return -EINVAL;
if (batch->num_cliprects) {
sizeof(struct drm_clip_rect),
return -ENOMEM;
sizeof(struct drm_clip_rect));
if (ret != 0) {
goto fail_free;
}
}
if (sarea_priv)
return ret;
}
/* LINTED */
{
void *batch_data;
int ret;
DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
return -ENODEV;
if (cmdbuf->num_cliprects < 0)
return -EINVAL;
if (batch_data == NULL)
return -ENOMEM;
if (ret != 0) {
goto fail_batch_free;
}
if (cmdbuf->num_cliprects) {
sizeof(struct drm_clip_rect), GFP_KERNEL);
goto fail_batch_free;
}
sizeof(struct drm_clip_rect));
if (ret != 0) {
goto fail_clip_free;
}
}
if (ret) {
DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
goto fail_clip_free;
}
if (sarea_priv)
return ret;
}
{
DRM_DEBUG_DRIVER("\n");
if (master_priv->sarea_priv)
if (BEGIN_LP_RING(4) == 0) {
}
}
{
int ret = 0;
if (master_priv->sarea_priv)
return 0;
}
if (master_priv->sarea_priv)
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
}
return ret;
}
/* Needs the lock as it touches the ring.
*/
/* LINTED */
{
int result;
return -ENODEV;
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
return 0;
}
/* Doesn't need the hardware lock.
*/
/* LINTED */
{
return -ENODEV;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
}
/* LINTED */
{
return -ENODEV;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
return 0;
}
/**
* Schedule buffer swap at given vertical blank.
*/
/* LINTED */
{
/* The delayed swap mechanism was fundamentally racy, and has been
* from the kernel, then waited for vblank before continuing to perform
* rendering. The problem was that the kernel might wake the client
* up before it dispatched the vblank swap (since the lock has to be
* held while touching the ringbuffer), in which case the client would
* clear and start the next frame before the swap occurred, and
* flicker would occur in addition to likely missing the vblank.
*
* In the absence of this ioctl, userland falls back to a correct path
* of waiting for a vblank, then dispatching the swap on its own.
* Context switching to userland and back is plenty fast enough for
* meeting the requirements of vblank swapping.
*/
return -EINVAL;
}
/* LINTED */
{
int ret;
return -ENODEV;
return ret;
}
/* LINTED */
{
int value;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
case I915_PARAM_IRQ_ACTIVE:
break;
break;
case I915_PARAM_LAST_DISPATCH:
break;
case I915_PARAM_CHIPSET_ID:
break;
case I915_PARAM_HAS_GEM:
value = 1;
break;
break;
case I915_PARAM_HAS_OVERLAY:
break;
value = 1;
break;
case I915_PARAM_HAS_EXECBUF2:
/* depends on GEM */
value = 1;
break;
case I915_PARAM_HAS_BSD:
break;
case I915_PARAM_HAS_BLT:
break;
case I915_PARAM_HAS_VEBOX:
break;
value = 1;
break;
value = 1;
break;
break;
value = 1;
break;
value = 1;
break;
case I915_PARAM_HAS_LLC:
break;
break;
value = 1;
break;
break;
value = 1;
break;
/* not support yet */
value = 0;
break;
value = 1;
break;
value = 1;
break;
value = 1;
break;
/*
* These should be better supported in the next version, but
* are being requested in this one. so provide useful values.
*/
value = 1;
break;
value = 0;
break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
return -EINVAL;
}
DRM_ERROR("DRM_COPY_TO_USER failed\n");
return -EFAULT;
}
return 0;
}
/* LINTED */
{
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
break;
break;
break;
return -EINVAL;
/* Userspace can use first N regs */
break;
default:
DRM_DEBUG_DRIVER("unknown parameter %d\n",
return -EINVAL;
}
return 0;
}
/* LINTED */
{
return -ENODEV;
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_ERROR("tried to set status page when mode setting active\n");
return 0;
}
(void) i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
DRM_DEBUG_DRIVER("load hws at %p\n",
return 0;
}
{
/* OSOL_i915 Begin */
int i, err;
return 0;
if (bridge_dev->ldi_id) {
DRM_DEBUG("end");
return 0;
}
DRM_DEBUG("failed");
return -1;
};
/* Workaround here:
* agptarget0 is not always linked to the right device
* try agptarget1 if failed at agptarget0
*/
for (i = 0; i < 16; i++) {
if (err == 0) {
break;
}
DRM_INFO("can't open agptarget%d", i);
}
if (err) {
return -1;
}
/* OSOL_i915 End */
return 0;
}
{
return -1;
return 0;
}
{
return -1;
return 0;
}
/* true = enable decode, false = disable decoder */
/* LINTED */
{
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
else
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
{
int ret;
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
/* Initialise stolen first so that we may reserve preallocated
* objects for the BIOS to KMS transition.
*/
if (ret)
goto out;
/* clear interrupt related bits */
if (ret)
goto cleanup_gem_stolen;
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
if (ret)
goto cleanup_irq;
/* Always safe in the mode setting case. */
return 0;
}
if (ret)
goto cleanup_gem;
}
/* Only enable hotplug handling once the fbdev is fully set up. */
/*
* Some ports require correctly set-up hpd registers for detection to
* work properly (leading to ghost connected connector status), e.g. VGA
* on gm45. Hence we can only set up the initial fbdev config after hpd
* irqs are fully enabled. Now we should scan for the initial config
* only once hotplug handling is enabled, but due to screwed-up locking
* scanning against hotplug events. Hence do this first and ignore the
* tiny window where we will loose hotplug notifactions.
*/
/* Only enable hotplug handling once the fbdev is fully set up. */
dev_priv->enable_hotplug_processing = true;
return 0;
out:
return ret;
}
/* LINTED */
{
if (!master_priv)
return -ENOMEM;
return 0;
}
/* LINTED */
{
if (!master_priv)
return;
}
/* OSOL_i915 Begin */
{
if (bridge_dev->bridge_dev_hdl) {
}
if (bridge_dev->ldi_id) {
}
}
/* OSOL_i915 End */
/**
* intel_early_sanitize_regs - clean up BIOS state
* @dev: DRM device
*
* This function must be called before we do any I915_READ or I915_WRITE. Its
* purpose is to clean up any state left by the BIOS that may affect us when
*/
{
if (HAS_FPGA_DBG_UNCLAIMED(dev))
}
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
* @flags: startup flags
*
* The driver load routine has to do several things:
* - drive output discovery via intel_modeset_init()
* - initialize the memory manager
* - allocate initial config memory
* - setup the DRM framebuffer with the allocated memory
*/
{
/* Refuse to load on gen6+ without kms enabled. */
return -ENODEV;
/* i915 has 4 more counters */
return -ENOMEM;
goto put_bridge;
}
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
* by the GPU. i915_gem_retire_requests() is called directly when we
* need high-priority retirement, such as waiting for an explicit
* bo.
*
* It is also used for periodic low-priority events, such as
* idle-timers and hangcheck.
*
* All tasks on the workqueue are expected to acquire the dev mutex
* so there is no point in running more than one instance of the
* workqueue at any time: max_active = 1 and NON_REENTRANT.
*/
DRM_ERROR("Failed to create i915 workqueue.\n");
goto out_rmmap;
}
/* The i915 workqueue is primarily used for page_flip and fbc */
DRM_ERROR("Failed to create i915_other workqueue.\n");
goto out_mtrrfree;
}
/* This must be called before any calls to HAS_PCH_* */
if (intel_setup_gmbus(dev) != 0)
goto out_mtrrfree;
/* Make sure the bios did its job and set up vital registers */
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
* correctly in testing on 945G.
* This may be a side effect of MSI having been made available for PEG
* and the registers being closely associated.
*
* According to chipset errata, on the 965GM, MSI interrupts may
* be lost or delayed, but we use them anyways to avoid
* stuck interrupts on some machines.
*/
/* Fix me: Failed to get interrupts after resume, when enable msi */
/*
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
*/
if (IS_VALLEYVIEW(dev))
if (ret)
goto out_gem_unload;
}
/* Start out suspended */
if (HAS_POWER_WELL(dev))
i915_try_reset = true;
(void *) dev);
if (MDB_TRACK_ENABLE)
return 0;
return ret;
}
{
int ret;
if (HAS_POWER_WELL(dev))
if (ret)
/* XXXX rebracket after this is tested */
/*
* Uninitialized GTT indicates that i915 never opens.
* So we should not try to release the resources
* which are only allocated in i915_driver_firstopen.
*/
(void) drm_irq_uninstall(dev);
/* XXX FIXME vga_client_register(dev->pdev, NULL, NULL, NULL); */
}
if (!I915_NEED_GFX_HWS(dev))
}
}
}
if (MDB_TRACK_ENABLE) {
}
}
return 0;
}
int
{
static bool first_call = true;
int ret = 0;
if (first_call) {
/* OSOL_i915: moved from i915_driver_load */
if (i915_get_bridge_dev(dev)) {
DRM_ERROR("i915_get_bridge_dev() failed.");
return -EIO;
}
/*
* AGP has been removed for GEN6+,
* So we read the agp base and size here.
*/
} else {
}
if (ret) {
DRM_ERROR("Failed to initialize GTT\n");
return ret;
}
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
return ret;
}
}
}
first_call = false;
return ret;
}
/* LINTED */
{
DRM_DEBUG_DRIVER("\n");
i915_file_priv = (struct drm_i915_file_private *)
if (!i915_file_priv)
return -ENOMEM;
return 0;
}
/**
* i915_driver_lastclose - clean up after all DRM clients have exited
* @dev: DRM device
*
* Take care of cleaning up after all DRM clients have exited. In the
* mode setting case, we want to restore the kernel's initial mode (just
* in case the last client left us in a bad state).
*
* Additionally, in the non-mode setting case, we'll tear down the AGP
* and DMA structures, since the kernel won't be using them, and clea
* up any GEM state.
*/
{
/* On gen6+ we refuse to init without kms enabled, but then the drm core
* goes right around and calls lastclose. Check for this and don't clean
* up anything. */
if (!dev_priv)
return;
return;
}
(void) i915_dma_cleanup(dev);
}
{
}
{
/* Do nothing when coming back from high-res mode (VESA)*/
return;
/* Need to do full modeset from VGA TEXT mode */
if (dev_priv->vt_holding > 0) {
(void) i915_restore_state(dev);
if (IS_HASWELL(dev))
intel_modeset_setup_hw_state(dev, false);
else
intel_modeset_setup_hw_state(dev, true);
}
dev_priv->vt_holding = 0;
}
{
return;
(void) i915_save_state(dev);
if (IS_HASWELL(dev))
}
/* LINTED */
{
}
I915_IOCTL_DEF(DRM_IOCTL_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH, copyin32_i915_batchbuffer, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED, NULL, NULL),
I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED, NULL, NULL),
};
/**
* Determine if the device really is AGP or not.
*
* All Intel graphics chipsets are treated as AGP, even if they are really
*/
/* LINTED */
{
return 1;
}