summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile.am2
-rw-r--r--RELEASING66
-rw-r--r--configure.ac2
-rw-r--r--include/drm/drm.h4
-rw-r--r--include/drm/drm_mode.h11
-rw-r--r--include/drm/i915_drm.h1
-rw-r--r--intel/intel_bufmgr_gem.c211
-rw-r--r--tests/modetest/modetest.c146
-rw-r--r--xf86drm.h8
-rw-r--r--xf86drmMode.c25
-rw-r--r--xf86drmMode.h2
11 files changed, 407 insertions, 71 deletions
diff --git a/Makefile.am b/Makefile.am
index de8bfe09..41e5a342 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -20,6 +20,8 @@
AUTOMAKE_OPTIONS = foreign
+DISTCHECK_CONFIGURE_FLAGS = --enable-nouveau-experimental-api --enable-radeon-experimental-api
+
pkgconfigdir = @pkgconfigdir@
pkgconfig_DATA = libdrm.pc
diff --git a/RELEASING b/RELEASING
new file mode 100644
index 00000000..3f07146d
--- /dev/null
+++ b/RELEASING
@@ -0,0 +1,66 @@
+The release criteria for libdrm is essentially "if you need a release,
+make one". There is no designated release engineer or maintainer.
+Anybody is free to make a release if there's a certain feature or bug
+fix they need in a released version of libdrm.
+
+When new ioctl definitions are merged into drm-next, we will add
+support to libdrm, at which point we typically create a new release.
+However, this is up to whoever is driving the feature in question.
+
+Follow these steps to release a new version of libdrm:
+
+ 1) Ensure that there are no local, uncommitted/unpushed
+ modifications. You're probably in a good state if both "git diff
+ HEAD" and "git log master..origin/master" give no output.
+
+ 3) Bump the version number in configure.ac. We seem to have settled
+ for 2.4.x as the versioning scheme for libdrm, so just bump the
+ micro version.
+
+ 4) Run autoconf and then re-run ./configure so the build system
+ picks up the new version number.
+
+ 5) Verify that the code passes "make distcheck". libdrm is tricky
+ to distcheck since the test suite will need to become drm master.
+ This means that you need to run it outside X, that is, in text
+ mode (KMS or no KMS doesn't matter).
+
+ Running "make distcheck" should result in no warnings or errors
+ and end with a message of the form:
+
+ =============================================
+ libdrm-X.Y.Z archives ready for distribution:
+ libdrm-X.Y.Z.tar.gz
+ libdrm-X.Y.Z.tar.bz2
+ =============================================
+
+ Make sure that the version number reported by distcheck and in
+ the tarball names matches the number you bumped to in configure.ac.
+
+ 6) Commit the configure.ac change and make an annotated tag for that
+ commit with the version number of the release as the name and a
+ message of "libdrm X.Y.Z". For example, for the 2.4.16 release
+ the command is:
+
+ git tag -a 2.4.16 -m "libdrm 2.4.16"
+
+ 7) Push the commit and tag by saying
+
+ git push --tags origin master
+
+ assuming the remote for the upstream libdrm repo is called origin.
+
+ 6) Use the release.sh script from the xorg/util/modular repo to
+ upload the tarballs to the freedesktop.org download area and
+ create an annouce email template. The script takes three
+ arguments: a "section", the previous tag and the new tag we just
+ created. For 2.4.16 again, the command is:
+
+ ../modular/release.sh libdrm 2.4.15 2.4.16
+
+ This copies the two tarballs to freedesktop.org and creates
+ libdrm-2.4.16.announce which has a detailed summary of the
+ changes, links to the tarballs, MD5 and SHA1 sums and pre-filled
+ out email headers. Fill out the blank between the email headers
+ and the list of changes with a brief message of what changed or
+ what prompted this release. Send out the email and you're done!
diff --git a/configure.ac b/configure.ac
index 39bc81eb..b8842201 100644
--- a/configure.ac
+++ b/configure.ac
@@ -19,7 +19,7 @@
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
AC_PREREQ(2.60)
-AC_INIT([libdrm], 2.4.15, [dri-devel@lists.sourceforge.net], libdrm)
+AC_INIT([libdrm], 2.4.16, [dri-devel@lists.sourceforge.net], libdrm)
AC_USE_SYSTEM_EXTENSIONS
AC_CONFIG_SRCDIR([Makefile.am])
AM_INIT_AUTOMAKE([dist-bzip2])
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 225ab6ce..48221599 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -696,8 +696,11 @@ struct drm_gem_open {
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
+#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
+/*@}*/
+
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x99.
@@ -727,6 +730,7 @@ struct drm_event {
};
#define DRM_EVENT_VBLANK 0x01
+#define DRM_EVENT_FLIP_COMPLETE 0x02
struct drm_event_vblank {
struct drm_event base;
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 042da5c3..dfc390ea 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -304,4 +304,15 @@ struct drm_mode_crtc_lut {
__u64 blue;
};
+#define DRM_MODE_PAGE_FLIP_EVENT 0x01
+#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT
+
+struct drm_mode_crtc_page_flip {
+ uint32_t crtc_id;
+ uint32_t fb_id;
+ uint32_t flags;
+ uint32_t reserved;
+ uint64_t user_data;
+};
+
#endif
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 25ff7b79..75b0e1d5 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -271,6 +271,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_GEM 5
#define I915_PARAM_NUM_FENCES_AVAIL 6
#define I915_PARAM_HAS_OVERLAY 7
+#define I915_PARAM_HAS_PAGEFLIPPING 8
typedef struct drm_i915_getparam {
int param;
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index 9db7bfeb..571ab5c8 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -154,6 +154,11 @@ struct _drm_intel_bo_gem {
char used_as_reloc_target;
/**
+ * Boolean of whether we have encountered an error whilst building the relocation tree.
+ */
+ char has_error;
+
+ /**
* Boolean of whether this buffer can be re-used
*/
char reusable;
@@ -187,7 +192,6 @@ static int
drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t stride);
-static void drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo);
static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
time_t time);
@@ -305,7 +309,7 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
}
}
-static void
+static inline void
drm_intel_gem_bo_reference(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
@@ -357,13 +361,33 @@ drm_intel_add_validate_buffer(drm_intel_bo *bo)
bufmgr_gem->exec_objects[index].alignment = 0;
bufmgr_gem->exec_objects[index].offset = 0;
bufmgr_gem->exec_bos[index] = bo;
- drm_intel_gem_bo_reference(bo);
bufmgr_gem->exec_count++;
}
#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
sizeof(uint32_t))
+static void
+drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
+ drm_intel_bo_gem *bo_gem)
+{
+ int size;
+
+ assert(!bo_gem->used_as_reloc_target);
+
+ /* The older chipsets are far-less flexible in terms of tiling,
+ * and require tiled buffer to be size aligned in the aperture.
+ * This means that in the worst possible case we will need a hole
+ * twice as large as the object in order for it to fit into the
+ * aperture. Optimal packing is for wimps.
+ */
+ size = bo_gem->bo.size;
+ if (!IS_I965G(bufmgr_gem) && bo_gem->tiling_mode != I915_TILING_NONE)
+ size *= 2;
+
+ bo_gem->reloc_tree_size = size;
+}
+
static int
drm_intel_setup_reloc_list(drm_intel_bo *bo)
{
@@ -377,6 +401,17 @@ drm_intel_setup_reloc_list(drm_intel_bo *bo)
bo_gem->relocs = malloc(max_relocs *
sizeof(struct drm_i915_gem_relocation_entry));
bo_gem->reloc_target_bo = malloc(max_relocs * sizeof(drm_intel_bo *));
+ if (bo_gem->relocs == NULL || bo_gem->reloc_target_bo == NULL) {
+ bo_gem->has_error = 1;
+
+ free (bo_gem->relocs);
+ bo_gem->relocs = NULL;
+
+ free (bo_gem->reloc_target_bo);
+ bo_gem->reloc_target_bo = NULL;
+
+ return 1;
+ }
return 0;
}
@@ -392,7 +427,9 @@ drm_intel_gem_bo_busy(drm_intel_bo *bo)
memset(&busy, 0, sizeof(busy));
busy.handle = bo_gem->gem_handle;
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ } while (ret == -1 && errno == EINTR);
return (ret == 0 && busy.busy);
}
@@ -524,7 +561,11 @@ retry:
memset(&create, 0, sizeof(create));
create.size = bo_size;
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+ do {
+ ret = ioctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_CREATE,
+ &create);
+ } while (ret == -1 && errno == EINTR);
bo_gem->gem_handle = create.handle;
bo_gem->bo.handle = bo_gem->gem_handle;
if (ret != 0) {
@@ -537,13 +578,15 @@ retry:
bo_gem->name = name;
atomic_set(&bo_gem->refcount, 1);
bo_gem->validate_index = -1;
- bo_gem->reloc_tree_size = bo_gem->bo.size;
bo_gem->reloc_tree_fences = 0;
bo_gem->used_as_reloc_target = 0;
+ bo_gem->has_error = 0;
bo_gem->tiling_mode = I915_TILING_NONE;
bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
bo_gem->reusable = 1;
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
+
DBG("bo_create: buf %d (%s) %ldb\n",
bo_gem->gem_handle, bo_gem->name, size);
@@ -629,7 +672,11 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
memset(&open_arg, 0, sizeof(open_arg));
open_arg.name = handle;
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
+ do {
+ ret = ioctl(bufmgr_gem->fd,
+ DRM_IOCTL_GEM_OPEN,
+ &open_arg);
+ } while (ret == -1 && errno == EINTR);
if (ret != 0) {
fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
name, handle, strerror(errno));
@@ -660,6 +707,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
bo_gem->reloc_tree_fences = 0;
else
bo_gem->reloc_tree_fences = 1;
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
@@ -679,9 +727,6 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
if (bo_gem->gtt_virtual)
munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
- free(bo_gem->reloc_target_bo);
- free(bo_gem->relocs);
-
/* Close this object */
memset(&close, 0, sizeof(close));
close.handle = bo_gem->gem_handle;
@@ -734,44 +779,42 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
reloc_target_bo[i],
time);
}
+ bo_gem->reloc_count = 0;
+ bo_gem->used_as_reloc_target = 0;
DBG("bo_unreference final: %d (%s)\n",
bo_gem->gem_handle, bo_gem->name);
+ /* release memory associated with this object */
+ if (bo_gem->reloc_target_bo) {
+ free(bo_gem->reloc_target_bo);
+ bo_gem->reloc_target_bo = NULL;
+ }
+ if (bo_gem->relocs) {
+ free(bo_gem->relocs);
+ bo_gem->relocs = NULL;
+ }
+
bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
/* Put the buffer into our internal cache for reuse if we can. */
tiling_mode = I915_TILING_NONE;
if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
- drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0) {
+ drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0 &&
+ drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
+ I915_MADV_DONTNEED)) {
bo_gem->free_time = time;
bo_gem->name = NULL;
bo_gem->validate_index = -1;
- bo_gem->reloc_count = 0;
DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
- drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
- I915_MADV_DONTNEED);
drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time);
} else {
drm_intel_gem_bo_free(bo);
}
}
-static void drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
-{
- drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-
- assert(atomic_read(&bo_gem->refcount) > 0);
- if (atomic_dec_and_test(&bo_gem->refcount)) {
- struct timespec time;
-
- clock_gettime(CLOCK_MONOTONIC, &time);
- drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
- }
-}
-
static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
time_t time)
{
@@ -821,8 +864,13 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
mmap_arg.handle = bo_gem->gem_handle;
mmap_arg.offset = 0;
mmap_arg.size = bo->size;
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+ do {
+ ret = ioctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_MMAP,
+ &mmap_arg);
+ } while (ret == -1 && errno == EINTR);
if (ret != 0) {
+ ret = -errno;
fprintf(stderr,
"%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo_gem->gem_handle,
@@ -843,10 +891,12 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
else
set_domain.write_domain = 0;
do {
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+ ret = ioctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_SET_DOMAIN,
&set_domain);
} while (ret == -1 && errno == EINTR);
if (ret != 0) {
+ ret = -errno;
fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
__FILE__, __LINE__, bo_gem->gem_handle,
strerror(errno));
@@ -879,9 +929,13 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
mmap_arg.handle = bo_gem->gem_handle;
/* Get the fake offset back... */
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT,
- &mmap_arg);
+ do {
+ ret = ioctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_MMAP_GTT,
+ &mmap_arg);
+ } while (ret == -1 && errno == EINTR);
if (ret != 0) {
+ ret = -errno;
fprintf(stderr,
"%s:%d: Error preparing buffer map %d (%s): %s .\n",
__FILE__, __LINE__,
@@ -896,13 +950,14 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
MAP_SHARED, bufmgr_gem->fd,
mmap_arg.offset);
if (bo_gem->gtt_virtual == MAP_FAILED) {
+ ret = -errno;
fprintf(stderr,
"%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__,
bo_gem->gem_handle, bo_gem->name,
strerror(errno));
pthread_mutex_unlock(&bufmgr_gem->lock);
- return errno;
+ return ret;
}
}
@@ -916,11 +971,13 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
set_domain.write_domain = I915_GEM_DOMAIN_GTT;
do {
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+ ret = ioctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_SET_DOMAIN,
&set_domain);
} while (ret == -1 && errno == EINTR);
if (ret != 0) {
+ ret = -errno;
fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
__FILE__, __LINE__, bo_gem->gem_handle,
strerror(errno));
@@ -928,7 +985,7 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
pthread_mutex_unlock(&bufmgr_gem->lock);
- return 0;
+ return ret;
}
int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
@@ -968,7 +1025,8 @@ static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
*/
sw_finish.handle = bo_gem->gem_handle;
do {
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
+ ret = ioctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_SW_FINISH,
&sw_finish);
} while (ret == -1 && errno == EINTR);
@@ -992,7 +1050,9 @@ drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
pwrite.size = size;
pwrite.data_ptr = (uint64_t) (uintptr_t) data;
do {
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
+ ret = ioctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_PWRITE,
+ &pwrite);
} while (ret == -1 && errno == EINTR);
if (ret != 0) {
fprintf(stderr,
@@ -1041,15 +1101,18 @@ drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
pread.size = size;
pread.data_ptr = (uint64_t) (uintptr_t) data;
do {
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
+ ret = ioctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_PREAD,
+ &pread);
} while (ret == -1 && errno == EINTR);
if (ret != 0) {
+ ret = -errno;
fprintf(stderr,
"%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
__FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
(int)size, strerror(errno));
}
- return 0;
+ return ret;
}
/** Waits for all GPU rendering to the object to have completed. */
@@ -1078,7 +1141,8 @@ drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
do {
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+ ret = ioctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_SET_DOMAIN,
&set_domain);
} while (ret == -1 && errno == EINTR);
if (ret != 0) {
@@ -1138,10 +1202,22 @@ drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
pthread_mutex_lock(&bufmgr_gem->lock);
+ if (bo_gem->has_error) {
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return -ENOMEM;
+ }
+
+ if (target_bo_gem->has_error) {
+ bo_gem->has_error = 1;
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return -ENOMEM;
+ }
/* Create a new relocation list if needed */
- if (bo_gem->relocs == NULL)
- drm_intel_setup_reloc_list(bo);
+ if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo)) {
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return -ENOMEM;
+ }
/* Check overflow */
assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
@@ -1228,9 +1304,13 @@ drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_execbuffer execbuf;
int ret, i;
+ if (bo_gem->has_error)
+ return -ENOMEM;
+
pthread_mutex_lock(&bufmgr_gem->lock);
/* Update indices and set up the validate list. */
drm_intel_gem_bo_process_reloc(bo);
@@ -1250,21 +1330,25 @@ drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
execbuf.DR4 = DR4;
do {
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER,
+ ret = ioctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_EXECBUFFER,
&execbuf);
- } while (ret != 0 && errno == EAGAIN);
+ } while (ret != 0 && errno == EINTR);
- if (ret != 0 && errno == ENOMEM) {
- fprintf(stderr,
- "Execbuffer fails to pin. "
- "Estimate: %u. Actual: %u. Available: %u\n",
- drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
- bufmgr_gem->
- exec_count),
- drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
- bufmgr_gem->
- exec_count),
- (unsigned int)bufmgr_gem->gtt_size);
+ if (ret != 0) {
+ ret = -errno;
+ if (errno == ENOSPC) {
+ fprintf(stderr,
+ "Execbuffer fails to pin. "
+ "Estimate: %u. Actual: %u. Available: %u\n",
+ drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
+ bufmgr_gem->
+ exec_count),
+ drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
+ bufmgr_gem->
+ exec_count),
+ (unsigned int)bufmgr_gem->gtt_size);
+ }
}
drm_intel_update_buffer_offsets(bufmgr_gem);
@@ -1277,13 +1361,12 @@ drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
/* Disconnect the buffer from the validate list */
bo_gem->validate_index = -1;
- drm_intel_gem_bo_unreference_locked(bo);
bufmgr_gem->exec_bos[i] = NULL;
}
bufmgr_gem->exec_count = 0;
pthread_mutex_unlock(&bufmgr_gem->lock);
- return 0;
+ return ret;
}
static int
@@ -1299,7 +1382,9 @@ drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
pin.alignment = alignment;
do {
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
+ ret = ioctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_PIN,
+ &pin);
} while (ret == -1 && errno == EINTR);
if (ret != 0)
@@ -1348,7 +1433,11 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
set_tiling.tiling_mode = *tiling_mode;
set_tiling.stride = stride;
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
+ do {
+ ret = ioctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_SET_TILING,
+ &set_tiling);
+ } while (ret == -1 && errno == EINTR);
if (ret != 0) {
*tiling_mode = bo_gem->tiling_mode;
return -errno;
@@ -1360,6 +1449,8 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
if (bo_gem->tiling_mode == I915_TILING_NONE)
bo_gem->reloc_tree_fences--;
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
+
*tiling_mode = bo_gem->tiling_mode;
return 0;
}
@@ -1564,7 +1655,7 @@ drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
if (bufmgr_gem->available_fences) {
total_fences = drm_intel_gem_total_fences(bo_array, count);
if (total_fences > bufmgr_gem->available_fences)
- return -1;
+ return -ENOSPC;
}
total = drm_intel_gem_estimate_batch_space(bo_array, count);
@@ -1576,7 +1667,7 @@ drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
DBG("check_space: overflowed available aperture, "
"%dkb vs %dkb\n",
total / 1024, (int)bufmgr_gem->gtt_size / 1024);
- return -1;
+ return -ENOSPC;
} else {
DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
(int)bufmgr_gem->gtt_size / 1024);
diff --git a/tests/modetest/modetest.c b/tests/modetest/modetest.c
index 6c69a570..4739a78b 100644
--- a/tests/modetest/modetest.c
+++ b/tests/modetest/modetest.c
@@ -46,6 +46,7 @@
#include <unistd.h>
#include <string.h>
#include <errno.h>
+#include <sys/poll.h>
#include "xf86drm.h"
#include "xf86drmMode.h"
@@ -172,7 +173,7 @@ void dump_connectors(void)
int i, j;
printf("Connectors:\n");
- printf("id\tencoder\tstatus\t\ttype\tsize (mm)\tmodes\n");
+ printf("id\tencoder\tstatus\t\ttype\tsize (mm)\tmodes\tencoders\n");
for (i = 0; i < resources->count_connectors; i++) {
connector = drmModeGetConnector(fd, resources->connectors[i]);
@@ -182,7 +183,7 @@ void dump_connectors(void)
continue;
}
- printf("%d\t%d\t%s\t%s\t%dx%d\t\t%d\n",
+ printf("%d\t%d\t%s\t%s\t%dx%d\t\t%d\t",
connector->connector_id,
connector->encoder_id,
connector_status_str(connector->connection),
@@ -190,6 +191,10 @@ void dump_connectors(void)
connector->mmWidth, connector->mmHeight,
connector->count_modes);
+ for (j = 0; j < connector->count_encoders; j++)
+ printf("%s%d", j > 0 ? ", " : "", connector->encoders[j]);
+ printf("\n");
+
if (!connector->count_modes)
continue;
@@ -271,6 +276,10 @@ struct connector {
drmModeModeInfo *mode;
drmModeEncoder *encoder;
int crtc;
+ unsigned int fb_id[2], current_fb_id;
+ struct timeval start;
+
+ int swap_count;
};
static void
@@ -457,16 +466,83 @@ create_test_buffer(drm_intel_bufmgr *bufmgr,
#endif
+static int
+create_grey_buffer(drm_intel_bufmgr *bufmgr,
+ int width, int height, int *stride_out, drm_intel_bo **bo_out)
+{
+ drm_intel_bo *bo;
+ unsigned int *fb_ptr;
+ int size, ret, i, stride;
+ div_t d;
+
+ /* Mode size at 32 bpp */
+ stride = width * 4;
+ size = stride * height;
+
+ bo = drm_intel_bo_alloc(bufmgr, "frontbuffer", size, 4096);
+ if (!bo) {
+ fprintf(stderr, "failed to alloc buffer: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ ret = drm_intel_gem_bo_map_gtt(bo);
+ if (ret) {
+ fprintf(stderr, "failed to GTT map buffer: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ memset(bo->virtual, 0x77, size);
+ drm_intel_gem_bo_unmap_gtt(bo);
+
+ *bo_out = bo;
+ *stride_out = stride;
+
+ return 0;
+}
+
+void
+page_flip_handler(int fd, unsigned int frame,
+ unsigned int sec, unsigned int usec, void *data)
+{
+ struct connector *c;
+ unsigned int new_fb_id;
+ int len, ms;
+ struct timeval end;
+ double t;
+
+ c = data;
+ if (c->current_fb_id == c->fb_id[0])
+ new_fb_id = c->fb_id[1];
+ else
+ new_fb_id = c->fb_id[0];
+
+ drmModePageFlip(fd, c->crtc, new_fb_id,
+ DRM_MODE_PAGE_FLIP_EVENT, c);
+ c->current_fb_id = new_fb_id;
+ c->swap_count++;
+ if (c->swap_count == 60) {
+ gettimeofday(&end, NULL);
+ t = end.tv_sec + end.tv_usec * 1e-6 -
+ (c->start.tv_sec + c->start.tv_usec * 1e-6);
+ fprintf(stderr, "freq: %.02fHz\n", c->swap_count / t);
+ c->swap_count = 0;
+ c->start = end;
+ }
+}
+
static void
-set_mode(struct connector *c, int count)
+set_mode(struct connector *c, int count, int page_flip)
{
drmModeConnector *connector;
drmModeEncoder *encoder = NULL;
struct drm_mode_modeinfo *mode = NULL;
drm_intel_bufmgr *bufmgr;
- drm_intel_bo *bo;
- unsigned int fb_id;
+ drm_intel_bo *bo, *other_bo;
+ unsigned int fb_id, other_fb_id;
int i, j, ret, width, height, x, stride;
+ drmEventContext evctx;
width = 0;
height = 0;
@@ -497,7 +573,6 @@ set_mode(struct connector *c, int count)
x = 0;
for (i = 0; i < count; i++) {
- int crtc_id;
if (c[i].mode == NULL)
continue;
@@ -513,11 +588,61 @@ set_mode(struct connector *c, int count)
return;
}
}
+
+ if (!page_flip)
+ return;
+
+ if (create_grey_buffer(bufmgr, width, height, &stride, &other_bo))
+ return;
+
+ ret = drmModeAddFB(fd, width, height, 32, 32, stride, other_bo->handle,
+ &other_fb_id);
+ if (ret) {
+ fprintf(stderr, "failed to add fb: %s\n", strerror(errno));
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (c[i].mode == NULL)
+ continue;
+
+ drmModePageFlip(fd, c[i].crtc, other_fb_id,
+ DRM_MODE_PAGE_FLIP_EVENT, &c[i]);
+ gettimeofday(&c[i].start, NULL);
+ c[i].swap_count = 0;
+ c[i].fb_id[0] = fb_id;
+ c[i].fb_id[1] = other_fb_id;
+ c[i].current_fb_id = fb_id;
+ }
+
+ memset(&evctx, 0, sizeof evctx);
+ evctx.version = DRM_EVENT_CONTEXT_VERSION;
+ evctx.vblank_handler = NULL;
+ evctx.pageflip_handler = page_flip_handler;
+
+ while (1) {
+ struct pollfd pfd[2];
+
+ pfd[0].fd = 0;
+ pfd[0].events = POLLIN;
+ pfd[1].fd = fd;
+ pfd[1].events = POLLIN;
+
+ if (poll(pfd, 2, -1) < 0) {
+ fprintf(stderr, "poll error\n");
+ break;
+ }
+
+ if (pfd[0].revents)
+ break;
+
+ drmHandleEvent(fd, &evctx);
+ }
}
extern char *optarg;
extern int optind, opterr, optopt;
-static char optstr[] = "ecpmfs:";
+static char optstr[] = "ecpmfs:v";
void usage(char *name)
{
@@ -527,6 +652,7 @@ void usage(char *name)
fprintf(stderr, "\t-p\tlist CRTCs (pipes)\n");
fprintf(stderr, "\t-m\tlist modes\n");
fprintf(stderr, "\t-f\tlist framebuffers\n");
+ fprintf(stderr, "\t-v\ttest vsynced page flipping\n");
fprintf(stderr, "\t-s <connector_id>:<mode>\tset a mode\n");
fprintf(stderr, "\t-s <connector_id>@<crtc_id>:<mode>\tset a mode\n");
fprintf(stderr, "\n\tDefault is to dump all info.\n");
@@ -539,6 +665,7 @@ int main(int argc, char **argv)
{
int c;
int encoders = 0, connectors = 0, crtcs = 0, framebuffers = 0;
+ int test_vsync = 0;
char *modules[] = { "i915", "radeon" };
char *modeset = NULL, *mode, *connector;
int i, connector_id, count = 0;
@@ -562,6 +689,9 @@ int main(int argc, char **argv)
case 'f':
framebuffers = 1;
break;
+ case 'v':
+ test_vsync = 1;
+ break;
case 's':
modeset = strdup(optarg);
con_args[count].crtc = -1;
@@ -614,7 +744,7 @@ int main(int argc, char **argv)
dump_resource(framebuffers);
if (count > 0) {
- set_mode(con_args, count);
+ set_mode(con_args, count, test_vsync);
getchar();
}
diff --git a/xf86drm.h b/xf86drm.h
index 496d95d8..9b89f562 100644
--- a/xf86drm.h
+++ b/xf86drm.h
@@ -693,7 +693,7 @@ extern void drmMsg(const char *format, ...);
extern int drmSetMaster(int fd);
extern int drmDropMaster(int fd);
-#define DRM_EVENT_CONTEXT_VERSION 1
+#define DRM_EVENT_CONTEXT_VERSION 2
typedef struct _drmEventContext {
@@ -707,6 +707,12 @@ typedef struct _drmEventContext {
unsigned int tv_usec,
void *user_data);
+ void (*page_flip_handler)(int fd,
+ unsigned int sequence,
+ unsigned int tv_sec,
+ unsigned int tv_usec,
+ void *user_data);
+
} drmEventContext, *drmEventContextPtr;
extern int drmHandleEvent(int fd, drmEventContextPtr evctx);
diff --git a/xf86drmMode.c b/xf86drmMode.c
index 7481ad60..ca36b71b 100644
--- a/xf86drmMode.c
+++ b/xf86drmMode.c
@@ -712,7 +712,17 @@ int drmHandleEvent(int fd, drmEventContextPtr evctx)
vblank->tv_usec,
U642VOID (vblank->user_data));
break;
-
+ case DRM_EVENT_FLIP_COMPLETE:
+ if (evctx->version < 2 ||
+ evctx->page_flip_handler == NULL)
+ break;
+ vblank = (struct drm_event_vblank *) e;
+ evctx->page_flip_handler(fd,
+ vblank->sequence,
+ vblank->tv_sec,
+ vblank->tv_usec,
+ U642VOID (vblank->user_data));
+ break;
default:
break;
}
@@ -722,3 +732,16 @@ int drmHandleEvent(int fd, drmEventContextPtr evctx)
return 0;
}
+int drmModePageFlip(int fd, uint32_t crtc_id, uint32_t fb_id,
+ uint32_t flags, void *user_data)
+{
+ struct drm_mode_crtc_page_flip flip;
+
+ flip.fb_id = fb_id;
+ flip.crtc_id = crtc_id;
+ flip.user_data = VOID2U64(user_data);
+ flip.flags = flags;
+ flip.reserved = 0;
+
+ return drmIoctl(fd, DRM_IOCTL_MODE_PAGE_FLIP, &flip);
+}
diff --git a/xf86drmMode.h b/xf86drmMode.h
index 226d9467..5a4ce637 100644
--- a/xf86drmMode.h
+++ b/xf86drmMode.h
@@ -382,3 +382,5 @@ extern int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size,
uint16_t *red, uint16_t *green, uint16_t *blue);
extern int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size,
uint16_t *red, uint16_t *green, uint16_t *blue);
+extern int drmModePageFlip(int fd, uint32_t crtc_id, uint32_t fb_id,
+ uint32_t flags, void *user_data);