summaryrefslogtreecommitdiff
path: root/shared-core/i915_drv.h
diff options
context:
space:
mode:
Diffstat (limited to 'shared-core/i915_drv.h')
-rw-r--r--shared-core/i915_drv.h287
1 files changed, 258 insertions, 29 deletions
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index 6d72c051..17829a56 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -39,7 +39,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20080312"
+#define DRIVER_DATE "20080611"
#if defined(__linux__)
#define I915_HAVE_FENCE
@@ -63,7 +63,7 @@
*/
#define DRIVER_MAJOR 1
#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
-#define DRIVER_MINOR 13
+#define DRIVER_MINOR 14
#else
#define DRIVER_MINOR 6
#endif
@@ -81,14 +81,13 @@ struct drm_i915_validate_buffer;
struct drm_i915_ring_buffer {
int tail_mask;
- unsigned long Start;
- unsigned long End;
unsigned long Size;
u8 *virtual_start;
int head;
int tail;
int space;
drm_local_map_t map;
+ struct drm_gem_object *ring_obj;
};
struct mem_block {
@@ -114,7 +113,7 @@ struct drm_i915_master_private {
};
struct drm_i915_private {
- struct drm_buffer_object *ring_buffer;
+ struct drm_device *dev;
drm_local_map_t *mmio_map;
@@ -124,18 +123,17 @@ struct drm_i915_private {
struct drm_i915_ring_buffer ring;
struct drm_dma_handle *status_page_dmah;
- void *hw_status_page;
dma_addr_t dma_status_page;
uint32_t counter;
- unsigned int status_gfx_addr;
+ uint32_t hws_agpoffset;
drm_local_map_t hws_map;
- struct drm_buffer_object *hws_bo;
+ void *hws_vaddr;
+ struct drm_memrange_node *hws;
unsigned int cpp;
wait_queue_head_t irq_queue;
atomic_t irq_received;
- atomic_t irq_emitted;
int tex_lru_log_granularity;
int allow_batchbuffer;
@@ -145,12 +143,14 @@ struct drm_i915_private {
DRM_SPINTYPE user_irq_lock;
int user_irq_refcount;
int fence_irq_on;
- uint32_t irq_enable_reg;
+ uint32_t irq_mask_reg;
int irq_enabled;
struct workqueue_struct *wq;
bool cursor_needs_physical;
+ struct drm_memrange vram;
+
#ifdef I915_HAVE_FENCE
uint32_t flush_sequence;
uint32_t flush_flags;
@@ -161,7 +161,7 @@ struct drm_i915_private {
void *agp_iomap;
unsigned int max_validate_buffers;
struct mutex cmdbuf_mutex;
- size_t stolen_base;
+ u32 stolen_base;
struct drm_i915_validate_buffer *val_bufs;
#endif
@@ -175,17 +175,96 @@ struct drm_i915_private {
struct drm_display_mode *panel_fixed_mode;
struct drm_display_mode *vbt_mode; /* if any */
-#if defined(I915_HAVE_BUFFER)
+#if defined(I915_HAVE_BUFFER) && defined(DRI2)
/* DRI2 sarea */
- struct drm_buffer_object *sarea_bo;
- struct drm_bo_kmap_obj sarea_kmap;
+ struct drm_gem_object *sarea_object;
+ struct drm_bo_kmap_obj sarea_kmap;
+#endif
/* Feature bits from the VBIOS */
int int_tv_support:1;
int lvds_dither:1;
int lvds_vbt:1;
int int_crt_support:1;
-#endif
+
+ struct {
+ struct drm_memrange gtt_space;
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of objects which are not in the ringbuffer but which
+ * still have a write_domain which needs to be flushed before
+ * unbinding.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head flushing_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+
+ uint32_t next_gem_seqno;
+
+ /**
+ * Waiting sequence number, if any
+ */
+ uint32_t waiting_gem_seqno;
+
+ /**
+ * Last seq seen at irq time
+ */
+ uint32_t irq_gem_seqno;
+
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int suspended;
+
+ /**
+ * Flag if the hardware appears to be wedged.
+ *
+ * This is set when attempts to idle the device timeout.
+ * It prevents command submission from occuring and makes
+ * every pending request fail
+ */
+ int wedged;
+ } mm;
+
+ struct work_struct user_interrupt_task;
/* Register state */
u8 saveLBB;
@@ -277,6 +356,13 @@ struct drm_i915_private {
u8 saveCR[37];
};
+struct drm_i915_file_private {
+ struct {
+ uint32_t last_gem_seqno;
+ uint32_t last_gem_throttle_seqno;
+ } mm;
+};
+
enum intel_chip_family {
CHIP_I8XX = 0x01,
CHIP_I9XX = 0x02,
@@ -284,6 +370,74 @@ enum intel_chip_family {
CHIP_I965 = 0x08,
};
+/** driver private structure attached to each drm_gem_object */
+struct drm_i915_gem_object {
+ struct drm_gem_object *obj;
+
+ /** Current space allocated to this object in the GTT, if any. */
+ struct drm_memrange_node *gtt_space;
+
+ /** This object's place on the active/flushing/inactive lists */
+ struct list_head list;
+
+ /**
+ * This is set if the object is on the active or flushing lists
+ * (has pending rendering), and is not set if it's on inactive (ready
+ * to be unbound).
+ */
+ int active;
+
+ /**
+ * This is set if the object has been written to since last bound
+ * to the GTT
+ */
+ int dirty;
+
+ /** AGP memory structure for our GTT binding. */
+ DRM_AGP_MEM *agp_mem;
+
+ struct page **page_list;
+
+ /**
+ * Current offset of the object in GTT space.
+ *
+ * This is the same as gtt_space->start
+ */
+ uint32_t gtt_offset;
+
+ /** Boolean whether this object has a valid gtt offset. */
+ int gtt_bound;
+
+ /** How many users have pinned this object in GTT space */
+ int pin_count;
+
+ /** Breadcrumb of last rendering to the buffer. */
+ uint32_t last_rendering_seqno;
+};
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable
+ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
+ * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ */
+struct drm_i915_gem_request {
+ /** GEM sequence number associated with this request. */
+ uint32_t seqno;
+
+ /** Time at which this request was emitted, in jiffies. */
+ unsigned long emitted_jiffies;
+
+ /** Cache domains that were flushed at the start of the request. */
+ uint32_t flush_domains;
+
+ struct list_head list;
+};
+
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
@@ -294,8 +448,11 @@ extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *dev);
extern void i915_driver_lastclose(struct drm_device * dev);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
extern void i915_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
+extern void i915_driver_postclose(struct drm_device *dev,
+ struct drm_file *file_priv);
extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
@@ -309,6 +466,10 @@ extern int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch);
extern int i915_quiescent(struct drm_device *dev);
+int i915_emit_box(struct drm_device * dev,
+ struct drm_clip_rect __user * boxes,
+ int i, int DR1, int DR4);
+
/* i915_irq.c */
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -325,6 +486,7 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_emit_irq(struct drm_device * dev);
extern void i915_enable_interrupt (struct drm_device *dev);
+extern int i915_wait_irq(struct drm_device * dev, int irq_nr);
extern int i915_enable_vblank(struct drm_device *dev, int crtc);
extern void i915_disable_vblank(struct drm_device *dev, int crtc);
extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
@@ -353,7 +515,7 @@ extern void i915_invalidate_reported_sequence(struct drm_device *dev);
#endif
-#ifdef I915_HAVE_BUFFER
+#if defined(I915_HAVE_BUFFER) && defined(I915_TTM)
/* i915_buffer.c */
extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
extern int i915_fence_type(struct drm_buffer_object *bo, uint32_t *fclass,
@@ -365,10 +527,54 @@ extern uint64_t i915_evict_flags(struct drm_buffer_object *bo);
extern int i915_move(struct drm_buffer_object *bo, int evict,
int no_wait, struct drm_bo_mem_reg *new_mem);
void i915_flush_ttm(struct drm_ttm *ttm);
+#endif /* ttm */
+#ifdef I915_HAVE_BUFFER
/* i915_execbuf.c */
int i915_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-
+/* i915_gem.c */
+int i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_proc_init(struct drm_minor *minor);
+void i915_gem_proc_cleanup(struct drm_minor *minor);
+int i915_gem_init_object(struct drm_gem_object *obj);
+void i915_gem_free_object(struct drm_gem_object *obj);
+int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
+void i915_gem_object_unpin(struct drm_gem_object *obj);
+void i915_gem_lastclose(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev);
+int i915_gem_init_ringbuffer(struct drm_device *dev);
+void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ unsigned long end);
+void i915_gem_retire_work_handler(struct work_struct *work);
#endif
extern unsigned int i915_fbpercrtc;
@@ -392,16 +598,25 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
#define I915_VERBOSE 0
+#define I915_RING_VALIDATE 0
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
volatile char *virt;
+#if I915_RING_VALIDATE
+void i915_ring_validate(struct drm_device *dev, const char *func, int line);
+#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__)
+#else
+#define I915_RING_DO_VALIDATE(dev)
+#endif
+
#define BEGIN_LP_RING(n) do { \
if (I915_VERBOSE) \
DRM_DEBUG("BEGIN_LP_RING(%d)\n", \
(n)); \
+ I915_RING_DO_VALIDATE(dev); \
if (dev_priv->ring.space < (n)*4) \
i915_wait_ring(dev, (n)*4, __FUNCTION__); \
outcount = 0; \
@@ -420,17 +635,12 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
#define ADVANCE_LP_RING() do { \
if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
+ I915_RING_DO_VALIDATE(dev); \
dev_priv->ring.tail = outring; \
dev_priv->ring.space -= outcount * 4; \
I915_WRITE(PRB0_TAIL, outring); \
} while(0)
-#define BREADCRUMB_BITS 31
-#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
-
-#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
-#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
-
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
/*
@@ -532,17 +742,40 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) /* used to have 1<<22? */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
+#define MI_STORE_DWORD_INDEX_SHIFT 2
#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define BREADCRUMB_BITS 31
+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
+
+#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hws_vaddr))[5])
+
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 4: ring 0 head pointer
+ * 5: ring 1 head pointer (915-class)
+ * 6: ring 2 head pointer (915-class)
+ *
+ * The area from dword 0x10 to 0x3ff is available for driver usage.
+ */
+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hws_vaddr))[reg])
+#define I915_GEM_HWS_INDEX 0x10
+
/*
* 3D instructions used by the kernel
*/
#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
@@ -603,6 +836,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
#define PRB1_CTL 0x0204c /* 915+ only */
+#define I965REG_ACTHD 0x02074
#define HWS_PGA 0x02080
#define IPEIR 0x02088
#define NOPID 0x02094
@@ -632,6 +866,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define EMR 0x020b4
#define ESR 0x020b8
#define INSTPM 0x020c0
+#define I915REG_ACTHD 0x020C8
#define FW_BLC 0x020d8
#define FW_BLC_SELF 0x020e0 /* 915+ only */
#define MI_ARB_STATE 0x020e4 /* 915+ only */
@@ -790,12 +1025,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define ADPA_DPMS_STANDBY (2<<10)
#define ADPA_DPMS_OFF (3<<10)
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-/* The binner has its own ring buffer:
- */
-#define HWB_RING 0x2400
-
#define RING_TAIL 0x00
#define TAIL_ADDR 0x001FFFF8
#define RING_HEAD 0x04