/*
*/
/*
* Copyright (c) 2012, 2013, Intel Corporation. All rights reserved.
*/
/*
* Copyright © 2010 Daniel Vetter
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_drv.h"
/* PPGTT stuff */
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define HSW_PTE_UNCACHED (0)
enum i915_cache_level level)
{
switch (level) {
case I915_CACHE_LLC_MLC:
break;
case I915_CACHE_LLC:
break;
case I915_CACHE_NONE:
pte |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
}
return pte;
}
enum i915_cache_level level)
{
/* Mark the page as writeable. Other platforms don't have a
*/
pte |= BYT_PTE_WRITEABLE;
if (level != I915_CACHE_NONE)
return pte;
}
enum i915_cache_level level)
{
if (level != I915_CACHE_NONE)
return pte;
}
0xff000U, /* dma_attr_addr_lo */
0xffffffffU, /* dma_attr_addr_hi */
0xffffffffU, /* dma_attr_count_max */
4096, /* dma_attr_align */
0x1fffU, /* dma_attr_burstsizes */
1, /* dma_attr_minxfer */
0xffffffffU, /* dma_attr_maxxfer */
0xffffffffU, /* dma_attr_seg */
1, /* dma_attr_sgllen, variable */
4, /* dma_attr_granular */
DDI_DMA_FLAGERR, /* dma_attr_flags */
};
};
static int
int pgcnt)
{
int i, n;
DRM_ERROR("i915_ppgtt_page_alloc: "
"ddi_dma_alloc_handle failed");
goto err1;
}
DRM_ERROR("drm_gem_object_alloc: "
"ddi_dma_mem_alloc failed");
goto err2;
}
!= DDI_DMA_MAPPED) {
DRM_ERROR("drm_gem_object_alloc: "
"ddi_dma_addr_bind_handle failed");
goto err3;
}
DRM_DEBUG("pfnarray == NULL");
goto err4;
}
for (n = 0, i = 1; ; i++) {
paddr < cookie_end;
if (n >= real_pgcnt)
return (0);
}
if (i >= cookie_cnt)
break;
}
err4:
err3:
err2:
err1:
return (-1);
}
{
int i;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
}
}
{
int i;
pd_offset <<= 16;
if (IS_HASWELL(dev)) {
} else {
}
/* GFX_MODE is per-ring on gen7+ */
}
}
return 0;
}
/* PPGTT support for Sandybdrige/Gen6 and later */
unsigned first_entry,
unsigned num_entries)
{
unsigned last_pte, i;
while (num_entries) {
if (last_pte > I915_PPGTT_PT_ENTRIES)
pt_vaddr[i] = scratch_pte;
first_pte = 0;
act_pt++;
}
}
unsigned first_entry, unsigned num_entries,
{
unsigned i, j;
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
act_pt++;
act_pte = 0;
}
}
}
{
}
{
unsigned first_pd_entry_in_global_pt;
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
if (IS_HASWELL(dev)) {
} else if (IS_VALLEYVIEW(dev)) {
} else {
}
if (ret)
return (-ENOMEM);
return 0;
}
{
if (!ppgtt)
return -ENOMEM;
else {
BUG();
DRM_ERROR("ppgtt is not supported");
}
if (ret)
else
return ret;
}
{
if (!ppgtt)
return;
}
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
}
struct drm_i915_gem_object *obj)
{
}
{
/* First fill our portion of the GTT with scratch pages */
/*
i915_ggtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
*/
}
}
{
#if 0
if (obj->has_dma_mapping)
return 0;
return -ENOSPC;
#endif
return 0;
}
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
enum i915_cache_level level)
{
/* LINTED */
int i, j;
}
BUG_ON(i > max_entries);
/* XXX: This serves as a posting read to make sure that the PTE has
* actually been updated. There is some concern that even though
* registers and PTEs are within the same BAR that they are potentially
* of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0) {
}
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
}
struct drm_i915_gem_object *obj,
{
int i;
if (num_entries > max_entries) {
DRM_ERROR("First entry = %d; Num entries = %d (max=%d)\n",
}
}
}
enum i915_cache_level cache_level)
{
(void) drm_agp_bind_pages(dev,
flags);
}
struct drm_i915_gem_object *obj,
{
}
return;
}
enum i915_cache_level cache_level)
{
}
{
obj->has_global_gtt_mapping = 0;
}
{
}
unsigned long color,
unsigned long *start,
unsigned long *end)
{
*start += 4096;
struct drm_mm_node,
*end -= 4096;
}
}
unsigned long start,
unsigned long mappable_end,
unsigned long end)
{
/* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch page.
* There are a number of places where the hardware apparently prefetches
* past the end of the object, and we've seen multiple hangs with the
* GPU head pointer stuck in a batchbuffer bound at the last page of the
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
/* Substract the guard page ... */
/* Mark any preallocated objects as occupied */
DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
false);
}
}
static bool
{
if (i915_enable_ppgtt >= 0)
return i915_enable_ppgtt;
/* Disable ppgtt on SNB if VT-d is on. */
return false;
return true;
}
{
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
}
}
int ret;
if (!ret)
return;
}
}
{
int gen;
/* setup scratch page */
return (-ENOMEM);
gen = 33;
else
return (-ENOMEM);
}
return 0;
}
{
}
{
return snb_gmch_ctl << 20;
}
{
}
};
{
int ret;
if (ret != DDI_SUCCESS) {
DRM_ERROR("failed to map GTT");
}
}
{
unsigned int gtt_size;
* a coarse sanity check.
*/
DRM_ERROR("Unknown GMADR size (%lx)\n",
return -ENXIO;
}
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
DRM_ERROR("Failed to map the gtt page table");
return -ENOMEM;
}
return 0;
}
{
}
static unsigned int
{
int local = 0;
unsigned int stolen_size = 0;
return 0; /* no stolen mem on i81x */
return 0;
switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
case I830_GMCH_GMS_STOLEN_512:
break;
break;
break;
case I830_GMCH_GMS_LOCAL:
local = 1;
break;
default:
stolen_size = 0;
break;
}
} else {
switch (gmch_ctrl & INTEL_GMCH_GMS_MASK) {
break;
break;
break;
break;
break;
break;
break;
break;
break;
break;
break;
break;
break;
default:
stolen_size = 0;
break;
}
}
if (stolen_size > 0) {
DRM_INFO("detected %dK %s memory\n",
} else {
DRM_INFO("no pre-allocated video memory detected\n");
stolen_size = 0;
}
return stolen_size;
}
static unsigned int
{
unsigned int aperture_size;
if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
else
return 0;
else
} else {
/* 9xx supports large sizes, just look at the length */
}
return aperture_size >> PAGE_SHIFT;
}
static void
{
/* ensure that ppgtt is disabled */
/* write the new ggtt size */
pgetbl_ctl |= size_flag;
}
static unsigned int
{
int size;
return 0;
switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
case G4x_GMCH_SIZE_1M:
case G4x_GMCH_SIZE_VT_1M:
break;
case G4x_GMCH_SIZE_VT_1_5M:
break;
case G4x_GMCH_SIZE_2M:
case G4x_GMCH_SIZE_VT_2M:
break;
}
}
switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
case I965_PGETBL_SIZE_128KB:
break;
case I965_PGETBL_SIZE_256KB:
break;
case I965_PGETBL_SIZE_512KB:
break;
/* GTT pagetable sizes bigger than 512KB are not possible on G33! */
case I965_PGETBL_SIZE_1MB:
break;
case I965_PGETBL_SIZE_2MB:
break;
case I965_PGETBL_SIZE_1_5MB:
break;
default:
DRM_DEBUG("unknown page table size, assuming 512KB");
}
return size/4;
}
static unsigned int
{
return i965_gtt_total_entries(dev);
else {
/* On previous hardware, the GTT size was just what was
* required to map the aperture.
*/
}
}
{
return 0;
}
{
}
{
int ret;
} else {
if (IS_HASWELL(dev)) {
} else if (IS_VALLEYVIEW(dev)) {
} else {
}
}
if (ret)
return ret;
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %dM\n",
DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
DRM_DEBUG_DRIVER("GTT stolen size = %dM\n",
return 0;
}
void
void *gttp,
{
int i, j;
} else {
if (type) {
+ i * sizeof(uint32_t));
}
} else {
+ i * sizeof(uint32_t));
}
}
}
}
void
{
int i;
} else {
+ i * sizeof(gtt_pte_t));
}
}
}