intel_ringbuffer.h revision 1450
824N/A/*
824N/A * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
824N/A */
824N/A
824N/A/*
824N/A * Copyright (c) 2008-2010, 2013, Intel Corporation
824N/A *
824N/A * Permission is hereby granted, free of charge, to any person obtaining a
919N/A * copy of this software and associated documentation files (the "Software"),
919N/A * to deal in the Software without restriction, including without limitation
919N/A * the rights to use, copy, modify, merge, publish, distribute, sublicense,
919N/A * and/or sell copies of the Software, and to permit persons to whom the
919N/A * Software is furnished to do so, subject to the following conditions:
919N/A *
919N/A * The above copyright notice and this permission notice (including the next
919N/A * paragraph) shall be included in all copies or substantial portions of the
919N/A * Software.
919N/A *
919N/A * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
919N/A * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
919N/A * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
919N/A * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
919N/A * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
919N/A * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
919N/A * IN THE SOFTWARE.
824N/A *
824N/A * Authors:
824N/A * Eric Anholt <eric@anholt.net>
824N/A * Zou Nan hai <nanhai.zou@intel.com>
824N/A * Xiang Hai hao<haihao.xiang@intel.com>
824N/A *
824N/A */
824N/A
824N/A#ifndef _INTEL_RINGBUFFER_H_
824N/A#define _INTEL_RINGBUFFER_H_
824N/A
824N/A/*
824N/A * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
824N/A * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
824N/A * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
824N/A *
824N/A * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
824N/A * cacheline, the Head Pointer must not be greater than the Tail
824N/A * Pointer."
824N/A */
824N/A#define I915_RING_FREE_SPACE 64
824N/A
824N/Astruct intel_hw_status_page {
824N/A void *page_addr;
824N/A unsigned int gfx_addr;
824N/A struct drm_i915_gem_object *obj;
824N/A};
824N/A
824N/A#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
824N/A#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
824N/A
824N/A#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
824N/A#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
824N/A
824N/A#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
824N/A#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
824N/A
#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
enum intel_ring_hangcheck_action { wait, active, kick, hung };
struct intel_ring_hangcheck {
bool deadlock;
u32 seqno;
u32 acthd;
int score;
enum intel_ring_hangcheck_action action;
};
struct intel_ring_buffer {
const char *name;
enum intel_ring_id {
RCS = 0x0,
VCS,
BCS,
VECS,
} id;
#define I915_NUM_RINGS 4
u32 mmio_base;
void *virtual_start;
struct drm_device *dev;
struct drm_i915_gem_object *obj;
u32 head;
u32 tail;
int space;
int size;
int effective_size;
struct intel_hw_status_page status_page;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
* must have finished processing the request and so we know we
* can advance the ringbuffer up to that position.
*
* last_retired_head is set to -1 after the value is consumed so
* we can detect new retirements.
*/
u32 last_retired_head;
struct {
u32 gt; /* protected by dev_priv->irq_lock */
u32 pm; /* protected by dev_priv->rps.lock (sucks) */
} irq_refcount;
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
bool (*irq_get)(struct intel_ring_buffer *ring);
void (*irq_put)(struct intel_ring_buffer *ring);
int (*init)(struct intel_ring_buffer *ring);
void (*write_tail)(struct intel_ring_buffer *ring,
u32 value);
int (*flush)(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains);
int (*add_request)(struct intel_ring_buffer *ring);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
* seen value is good enough. Note that the seqno will always be
* monotonic, even if not coherent.
*/
u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency);
void (*set_seqno)(struct intel_ring_buffer *ring,
u32 seqno);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length,
unsigned flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_ring_buffer *ring);
int (*sync_to)(struct intel_ring_buffer *ring,
struct intel_ring_buffer *to,
u32 seqno);
/* our mbox written by others */
u32 semaphore_register[I915_NUM_RINGS];
/* mboxes this ring signals to */
u32 signal_mbox[I915_NUM_RINGS];
/**
* List of objects currently involved in rendering from the
* ringbuffer.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding.
*/
struct list_head request_list;
/**
* Do we have some not yet emitted requests outstanding?
*/
u32 outstanding_lazy_request;
bool gpu_caches_dirty;
bool fbc_dirty;
wait_queue_head_t irq_queue;
drm_local_map_t map;
/**
* Do an explicit TLB flush before MI_SET_CONTEXT
*/
bool itlb_before_ctx_switch;
struct i915_hw_context *default_context;
struct i915_hw_context *last_context;
struct intel_ring_hangcheck hangcheck;
void *private;
};
static inline bool
intel_ring_initialized(struct intel_ring_buffer *ring)
{
return ring->obj != NULL;
}
static inline unsigned
intel_ring_flag(struct intel_ring_buffer *ring)
{
return 1 << ring->id;
}
static inline u32
intel_ring_sync_index(struct intel_ring_buffer *ring,
struct intel_ring_buffer *other)
{
int idx;
/*
* cs -> 0 = vcs, 1 = bcs
* vcs -> 0 = bcs, 1 = cs,
* bcs -> 0 = cs, 1 = vcs.
*/
idx = (other - ring) - 1;
if (idx < 0)
idx += I915_NUM_RINGS;
return idx;
}
static inline u32
intel_read_status_page(struct intel_ring_buffer *ring,
int reg)
{
u32 *regs = ring->status_page.page_addr;
return regs[reg];
}
static inline void
intel_write_status_page(struct intel_ring_buffer *ring,
int reg, u32 value)
{
u32 *regs = ring->status_page.page_addr;
regs[reg] = value;
}
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
* MI_STORE_DATA_IMM.
*
* The following dwords have a reserved meaning:
* 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
* 0x04: ring 0 head pointer
* 0x05: ring 1 head pointer (915-class)
* 0x06: ring 2 head pointer (915-class)
* 0x10-0x1b: Context status DWords (GM45)
* 0x1f: Last written status offset. (GM45)
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define I915_GEM_HWS_INDEX 0x20
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
int intel_wait_ring_idle(struct intel_ring_buffer *ring);
int intel_ring_begin(struct intel_ring_buffer *ring, int n);
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
u32 data)
{
unsigned int *virt = (unsigned int *)((intptr_t)ring->virtual_start + ring->tail);
*virt = data;
ring->tail += 4;
}
void intel_ring_advance(struct intel_ring_buffer *ring);
int intel_ring_idle(struct intel_ring_buffer *ring);
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
{
return ring->tail;
}
static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
{
BUG_ON(ring->outstanding_lazy_request == 0);
return ring->outstanding_lazy_request;
}
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
ring->trace_irq_seqno = seqno;
}
/* DRI warts */
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
#endif /* _INTEL_RINGBUFFER_H_ */