diff options
author | Eric Anholt <eric@anholt.net> | 2007-07-19 06:31:26 -0700 |
---|---|---|
committer | Eric Anholt <eric@anholt.net> | 2007-07-19 06:31:26 -0700 |
commit | 05204b9c8d021e019456a8dbd83c012e277c7aaf (patch) | |
tree | 5639c2821d47e92cc615059cbc12e9c050753230 | |
parent | e544286eae71a6b150af4d86096895c14e42c36e (diff) | |
parent | 0c95d489abd19efd2ba017e78a4b28cea0854e77 (diff) |
Merge branch 'origin'
114 files changed, 3909 insertions, 2775 deletions
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 93185512..8cee4fbc 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -49,7 +49,6 @@ #include <sys/mman.h> #include <sys/time.h> #include <stdarg.h> -#include "drm.h" /* Not all systems have MAP_FAILED defined */ #ifndef MAP_FAILED @@ -2355,8 +2354,7 @@ int drmFenceCreate(int fd, unsigned flags, int class, unsigned type, arg.flags = flags; arg.type = type; arg.class = class; - arg.op = drm_fence_create; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg)) return -errno; fence->handle = arg.handle; fence->class = arg.class; @@ -2378,8 +2376,8 @@ int drmFenceBuffers(int fd, unsigned flags, drmFence *fence) memset(&arg, 0, sizeof(arg)); arg.flags = flags; - arg.op = drm_fence_buffers; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + + if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg)) return -errno; fence->handle = arg.handle; fence->class = arg.class; @@ -2395,8 +2393,8 @@ int drmFenceDestroy(int fd, const drmFence *fence) memset(&arg, 0, sizeof(arg)); arg.handle = fence->handle; - arg.op = drm_fence_destroy; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + + if (ioctl(fd, DRM_IOCTL_FENCE_DESTROY, &arg)) return -errno; return 0; } @@ -2407,8 +2405,8 @@ int drmFenceReference(int fd, unsigned handle, drmFence *fence) memset(&arg, 0, sizeof(arg)); arg.handle = handle; - arg.op = drm_fence_reference; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + + if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg)) return -errno; fence->handle = arg.handle; fence->class = arg.class; @@ -2424,8 +2422,8 @@ int drmFenceUnreference(int fd, const drmFence *fence) memset(&arg, 0, sizeof(arg)); arg.handle = fence->handle; - arg.op = drm_fence_unreference; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + + if (ioctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg)) return -errno; return 0; } @@ -2437,8 +2435,8 @@ int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type) memset(&arg, 0, sizeof(arg)); arg.handle = fence->handle; arg.type = flush_type; - arg.op = drm_fence_flush; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + + if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg)) return -errno; fence->class = arg.class; fence->type = arg.type; @@ -2452,8 +2450,8 @@ int drmFenceUpdate(int fd, drmFence *fence) memset(&arg, 0, sizeof(arg)); arg.handle = fence->handle; - arg.op = drm_fence_signaled; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + + if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg)) return -errno; fence->class = arg.class; fence->type = arg.type; @@ -2492,8 +2490,8 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type) arg.flags = flags; arg.handle = fence->handle; arg.type = emit_type; - arg.op = drm_fence_emit; - if (ioctl(fd, DRM_IOCTL_FENCE, &arg)) + + if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg)) return -errno; fence->class = arg.class; fence->type = arg.type; @@ -2526,9 +2524,9 @@ int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type) arg.handle = fence->handle; arg.type = flush_type; arg.flags = flags; - arg.op = drm_fence_wait; + do { - ret = ioctl(fd, DRM_IOCTL_FENCE, &arg); + ret = ioctl(fd, DRM_IOCTL_FENCE_WAIT, &arg); } while (ret != 0 && errno == EAGAIN); if (ret) @@ -2678,7 +2676,7 @@ int drmBOCreateList(int numTarget, drmBOList *list) return drmAdjustListNodes(list); } -static void drmBOCopyReply(const drm_bo_arg_reply_t *rep, drmBO *buf) +static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf) { buf->handle = rep->handle; buf->flags = rep->flags; @@ -2690,16 +2688,21 @@ static void drmBOCopyReply(const drm_bo_arg_reply_t *rep, drmBO *buf) buf->fenceFlags = rep->fence_flags; buf->replyFlags = rep->rep_flags; buf->pageAlignment = rep->page_alignment; + buf->tileInfo = rep->tile_info; + buf->hwTileStride = rep->hw_tile_stride; + buf->desiredTileStride = rep->desired_tile_stride; } -int drmBOCreate(int fd, unsigned long start, unsigned long size, - unsigned pageAlignment, void *user_buffer, drm_bo_type_t type, - unsigned mask, + + +int drmBOCreate(int fd, unsigned long start, unsigned long size, + unsigned pageAlignment, void *user_buffer, drm_bo_type_t type, + uint64_t mask, unsigned hint, drmBO *buf) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_create_arg arg; + struct drm_bo_create_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; int ret; memset(buf, 0, sizeof(*buf)); @@ -2726,21 +2729,13 @@ int drmBOCreate(int fd, unsigned long start, unsigned long size, default: return -EINVAL; } - req->op = drm_bo_create; do { - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg); + ret = ioctl(fd, DRM_IOCTL_BO_CREATE, &arg); } while (ret != 0 && errno == EAGAIN); if (ret) return -errno; - if (!arg.handled) { - return -EFAULT; - } - if (rep->ret) { - fprintf(stderr, "Error %d\n", rep->ret); - return rep->ret; - } drmBOCopyReply(rep, buf); buf->mapVirtual = NULL; @@ -2751,9 +2746,7 @@ int drmBOCreate(int fd, unsigned long start, unsigned long size, int drmBODestroy(int fd, drmBO *buf) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_handle_arg arg; if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) { (void) drmUnmap(buf->mapVirtual, buf->start + buf->size); @@ -2762,40 +2755,26 @@ int drmBODestroy(int fd, drmBO *buf) } memset(&arg, 0, sizeof(arg)); - req->handle = buf->handle; - req->op = drm_bo_destroy; + arg.handle = buf->handle; - if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg)) + if (ioctl(fd, DRM_IOCTL_BO_DESTROY, &arg)) return -errno; - if (!arg.handled) { - return -EFAULT; - } - if (rep->ret) { - return rep->ret; - } buf->handle = 0; return 0; } - + int drmBOReference(int fd, unsigned handle, drmBO *buf) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_reference_info_arg arg; + struct drm_bo_handle_arg *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; memset(&arg, 0, sizeof(arg)); req->handle = handle; - req->op = drm_bo_reference; - if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg)) + if (ioctl(fd, DRM_IOCTL_BO_REFERENCE, &arg)) return -errno; - if (!arg.handled) { - return -EFAULT; - } - if (rep->ret) { - return rep->ret; - } drmBOCopyReply(rep, buf); buf->type = drm_bo_type_dc; @@ -2808,9 +2787,7 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf) int drmBOUnReference(int fd, drmBO *buf) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_handle_arg arg; if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) { (void) munmap(buf->mapVirtual, buf->start + buf->size); @@ -2819,22 +2796,16 @@ int drmBOUnReference(int fd, drmBO *buf) } memset(&arg, 0, sizeof(arg)); - req->handle = buf->handle; - req->op = drm_bo_unreference; + arg.handle = buf->handle; - if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg)) + if (ioctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg)) return -errno; - if (!arg.handled) { - return -EFAULT; - } - if (rep->ret) { - return rep->ret; - } buf->handle = 0; return 0; } + /* * Flags can be DRM_BO_FLAG_READ, DRM_BO_FLAG_WRITE or'ed together * Hint currently be DRM_BO_HINT_DONT_BLOCK, which makes the @@ -2844,9 +2815,9 @@ int drmBOUnReference(int fd, drmBO *buf) int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, void **address) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_map_wait_idle_arg arg; + struct drm_bo_info_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; int ret = 0; /* @@ -2871,7 +2842,6 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, req->handle = buf->handle; req->mask = mapFlags; req->hint = mapHint; - req->op = drm_bo_map; /* * May hang if the buffer object is busy. @@ -2879,15 +2849,11 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, */ do { - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg); + ret = ioctl(fd, DRM_IOCTL_BO_MAP, &arg); } while (ret != 0 && errno == EAGAIN); if (ret) return -errno; - if (!arg.handled) - return -EFAULT; - if (rep->ret) - return rep->ret; drmBOCopyReply(rep, buf); buf->mapFlags = mapFlags; @@ -2897,44 +2863,39 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, return 0; } + int drmBOUnmap(int fd, drmBO *buf) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_handle_arg arg; memset(&arg, 0, sizeof(arg)); - req->handle = buf->handle; - req->op = drm_bo_unmap; + arg.handle = buf->handle; - if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg)) { + if (ioctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) { return -errno; } - if (!arg.handled) - return -EFAULT; - if (rep->ret) - return rep->ret; - return 0; } - -int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask, + +int drmBOValidate(int fd, drmBO *buf, + uint64_t flags, uint64_t mask, unsigned hint) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_op_arg arg; + struct drm_bo_op_req *req = &arg.d.req; + struct drm_bo_arg_rep *rep = &arg.d.rep; int ret = 0; memset(&arg, 0, sizeof(arg)); - req->handle = buf->handle; - req->mask = flags; - req->hint = hint; - req->arg_handle = mask; /* Encode mask in the arg_handle field :/ */ + req->bo_req.handle = buf->handle; + req->bo_req.flags = flags; + req->bo_req.mask = mask; + req->bo_req.hint = hint; + req->bo_req.fence_class = 0; /* Backwards compatibility. */ req->op = drm_bo_validate; do{ - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg); + ret = ioctl(fd, DRM_IOCTL_BO_OP, &arg); } while (ret && errno == EAGAIN); if (ret) @@ -2944,26 +2905,25 @@ int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask, if (rep->ret) return rep->ret; - drmBOCopyReply(rep, buf); + drmBOCopyReply(&rep->bo_info, buf); return 0; } int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_op_arg arg; + struct drm_bo_op_req *req = &arg.d.req; + struct drm_bo_arg_rep *rep = &arg.d.rep; int ret = 0; memset(&arg, 0, sizeof(arg)); - req->handle = buf->handle; - req->mask = flags; + req->bo_req.handle = buf->handle; + req->bo_req.flags = flags; req->arg_handle = fenceHandle; req->op = drm_bo_fence; - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg); - + ret = ioctl(fd, DRM_IOCTL_BO_OP, &arg); if (ret) return -errno; if (!arg.handled) @@ -2975,51 +2935,42 @@ int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle) int drmBOInfo(int fd, drmBO *buf) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_reference_info_arg arg; + struct drm_bo_handle_arg *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; int ret = 0; memset(&arg, 0, sizeof(arg)); req->handle = buf->handle; - req->op = drm_bo_info; - - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg); + ret = ioctl(fd, DRM_IOCTL_BO_INFO, &arg); if (ret) return -errno; - if (!arg.handled) - return -EFAULT; - if (rep->ret) - return rep->ret; + drmBOCopyReply(rep, buf); return 0; } int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint) { - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t *rep = &arg.d.rep; + struct drm_bo_map_wait_idle_arg arg; + struct drm_bo_info_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; int ret = 0; if ((buf->flags & DRM_BO_FLAG_SHAREABLE) || (buf->replyFlags & DRM_BO_REP_BUSY)) { memset(&arg, 0, sizeof(arg)); req->handle = buf->handle; - req->op = drm_bo_wait_idle; req->hint = hint; do { - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg); + ret = ioctl(fd, DRM_IOCTL_BO_WAIT_IDLE, &arg); } while (ret && errno == EAGAIN); if (ret) return -errno; - if (!arg.handled) - return -EFAULT; - if (rep->ret) - return rep->ret; + drmBOCopyReply(rep, buf); } return 0; @@ -3041,7 +2992,6 @@ int drmBOBusy(int fd, drmBO *buf, int *busy) } } - int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags, unsigned mask, int *newItem) @@ -3103,74 +3053,74 @@ int drmBOValidateList(int fd, drmBOList *list) { drmBONode *node; drmMMListHead *l; - drm_bo_arg_t *arg, *first; - drm_bo_arg_request_t *req; - drm_bo_arg_reply_t *rep; - drm_u64_t *prevNext = NULL; + struct drm_bo_op_arg *arg, *first; + struct drm_bo_op_req *req; + struct drm_bo_arg_rep *rep; + uint64_t *prevNext = NULL; drmBO *buf; int ret; first = NULL; for (l = list->list.next; l != &list->list; l = l->next) { - node = DRMLISTENTRY(drmBONode, l, head); + node = DRMLISTENTRY(drmBONode, l, head); - arg = &node->bo_arg; - req = &arg->d.req; + arg = &node->bo_arg; + req = &arg->d.req; - if (!first) - first = arg; + if (!first) + first = arg; if (prevNext) *prevNext = (unsigned long) arg; memset(arg, 0, sizeof(*arg)); prevNext = &arg->next; - req->handle = node->buf->handle; + req->bo_req.handle = node->buf->handle; req->op = drm_bo_validate; - req->mask = node->arg0; - req->hint = 0; - req->arg_handle = node->arg1; + req->bo_req.flags = node->arg0; + req->bo_req.hint = 0; + req->bo_req.mask = node->arg1; + req->bo_req.fence_class = 0; /* Backwards compat. */ } - - if (!first) + + if (!first) return 0; - do { - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first); + do{ + ret = ioctl(fd, DRM_IOCTL_BO_OP, first); } while (ret && errno == EAGAIN); if (ret) return -errno; - + for (l = list->list.next; l != &list->list; l = l->next) { node = DRMLISTENTRY(drmBONode, l, head); arg = &node->bo_arg; rep = &arg->d.rep; - + if (!arg->handled) { drmMsg("Unhandled request\n"); return -EFAULT; } if (rep->ret) return rep->ret; - + buf = node->buf; - drmBOCopyReply(rep, buf); + drmBOCopyReply(&rep->bo_info, buf); } - + return 0; } - int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle) { drmBONode *node; drmMMListHead *l; - drm_bo_arg_t *arg, *first; - drm_bo_arg_request_t *req; - drm_bo_arg_reply_t *rep; - drm_u64_t *prevNext = NULL; + struct drm_bo_op_arg *arg, *first; + struct drm_bo_op_req *req; + struct drm_bo_arg_rep *rep; + uint64_t *prevNext = NULL; drmBO *buf; unsigned fence_flags; int ret; @@ -3178,8 +3128,8 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle) first = NULL; for (l = list->list.next; l != &list->list; l = l->next) { - node = DRMLISTENTRY(drmBONode, l, head); - + node = DRMLISTENTRY(drmBONode, l, head); + arg = &node->bo_arg; req = &arg->d.req; @@ -3191,29 +3141,31 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle) memset(arg, 0, sizeof(*arg)); prevNext = &arg->next; - req->handle = node->buf->handle; + req->bo_req.handle = node->buf->handle; req->op = drm_bo_fence; - req->mask = node->arg0; + req->bo_req.mask = node->arg0; req->arg_handle = fenceHandle; } if (!first) return 0; - ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first); + ret = ioctl(fd, DRM_IOCTL_BO_OP, first); if (ret) return -errno; for (l = list->list.next; l != &list->list; l = l->next) { node = DRMLISTENTRY(drmBONode, l, head); + arg = &node->bo_arg; rep = &arg->d.rep; + if (!arg->handled) return -EFAULT; if (rep->ret) return rep->ret; - drmBOCopyReply(rep, node->buf); + drmBOCopyReply(&rep->bo_info, node->buf); } return 0; @@ -3222,13 +3174,16 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle) int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize, unsigned memType) { - drm_mm_init_arg_t arg; + struct drm_mm_init_arg arg; memset(&arg, 0, sizeof(arg)); - arg.req.op = mm_init; - arg.req.p_offset = pOffset; - arg.req.p_size = pSize; - arg.req.mem_type = memType; + + arg.magic = DRM_BO_INIT_MAGIC; + arg.major = DRM_BO_INIT_MAJOR; + arg.minor = DRM_BO_INIT_MINOR; + arg.p_offset = pOffset; + arg.p_size = pSize; + arg.mem_type = memType; if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg)) return -errno; @@ -3237,28 +3192,26 @@ int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize, int drmMMTakedown(int fd, unsigned memType) { - drm_mm_init_arg_t arg; + struct drm_mm_type_arg arg; memset(&arg, 0, sizeof(arg)); - arg.req.op = mm_takedown; - arg.req.mem_type = memType; + arg.mem_type = memType; - if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg)) + if (ioctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg)) return -errno; return 0; } int drmMMLock(int fd, unsigned memType) { - drm_mm_init_arg_t arg; + struct drm_mm_type_arg arg; int ret; memset(&arg, 0, sizeof(arg)); - arg.req.op = mm_lock; - arg.req.mem_type = memType; + arg.mem_type = memType; do{ - ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg); + ret = ioctl(fd, DRM_IOCTL_MM_LOCK, &arg); } while (ret && errno == EAGAIN); return (ret) ? -errno : 0; @@ -3266,15 +3219,15 @@ int drmMMLock(int fd, unsigned memType) int drmMMUnlock(int fd, unsigned memType) { - drm_mm_init_arg_t arg; + struct drm_mm_type_arg arg; int ret; memset(&arg, 0, sizeof(arg)); - arg.req.op = mm_unlock; - arg.req.mem_type = memType; + + arg.mem_type = memType; do{ - ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg); + ret = ioctl(fd, DRM_IOCTL_MM_UNLOCK, &arg); } while (ret && errno == EAGAIN); return (ret) ? -errno : 0; diff --git a/libdrm/xf86drm.h b/libdrm/xf86drm.h index d4260cc9..230f54ce 100644 --- a/libdrm/xf86drm.h +++ b/libdrm/xf86drm.h @@ -36,6 +36,7 @@ #include <stdarg.h> #include <sys/types.h> +#include <stdint.h> #include <drm.h> /* Defaults, if nothing set in xf86config */ diff --git a/libdrm/xf86drmHash.c b/libdrm/xf86drmHash.c index d1ade063..82cbc2a5 100644 --- a/libdrm/xf86drmHash.c +++ b/libdrm/xf86drmHash.c @@ -74,7 +74,6 @@ #define HASH_MAIN 0 #if !HASH_MAIN -# include "drm.h" # include "xf86drm.h" #endif diff --git a/libdrm/xf86drmRandom.c b/libdrm/xf86drmRandom.c index 61ffb078..ecab9e2d 100644 --- a/libdrm/xf86drmRandom.c +++ b/libdrm/xf86drmRandom.c @@ -77,7 +77,6 @@ #define RANDOM_MAIN 0 #if !RANDOM_MAIN -# include "drm.h" # include "xf86drm.h" #endif diff --git a/libdrm/xf86drmSL.c b/libdrm/xf86drmSL.c index ce60648d..58aefac7 100644 --- a/libdrm/xf86drmSL.c +++ b/libdrm/xf86drmSL.c @@ -44,7 +44,6 @@ #define SL_MAIN 0 #if !SL_MAIN -# include "drm.h" # include "xf86drm.h" #else # include <sys/time.h> diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index b3822d4d..d1e0b28f 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -107,9 +107,9 @@ typedef struct _drmBO { drm_bo_type_t type; unsigned handle; - drm_u64_t mapHandle; - unsigned flags; - unsigned mask; + uint64_t mapHandle; + uint64_t flags; + uint64_t mask; unsigned mapFlags; unsigned long size; unsigned long offset; @@ -117,6 +117,9 @@ typedef struct _drmBO unsigned replyFlags; unsigned fenceFlags; unsigned pageAlignment; + unsigned tileInfo; + unsigned hwTileStride; + unsigned desiredTileStride; void *virtual; void *mapVirtual; int mapCount; @@ -127,7 +130,7 @@ typedef struct _drmBONode { drmMMListHead head; drmBO *buf; - drm_bo_arg_t bo_arg; + struct drm_bo_op_arg bo_arg; unsigned long arg0; unsigned long arg1; } drmBONode; @@ -176,8 +179,8 @@ extern int drmBOCreateList(int numTarget, drmBOList *list); */ extern int drmBOCreate(int fd, unsigned long start, unsigned long size, - unsigned pageAlignment,void *user_buffer, - drm_bo_type_t type, unsigned mask, + unsigned pageAlignment,void *user_buffer, + drm_bo_type_t type, uint64_t mask, unsigned hint, drmBO *buf); extern int drmBODestroy(int fd, drmBO *buf); extern int drmBOReference(int fd, unsigned handle, drmBO *buf); @@ -185,14 +188,14 @@ extern int drmBOUnReference(int fd, drmBO *buf); extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, void **address); extern int drmBOUnmap(int fd, drmBO *buf); -extern int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask, - unsigned hint); +extern int drmBOValidate(int fd, drmBO *buf, uint64_t flags, + uint64_t mask, unsigned hint); + extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle); extern int drmBOInfo(int fd, drmBO *buf); extern int drmBOBusy(int fd, drmBO *buf, int *busy); - -extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags, +extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags, unsigned mask, int *newItem); extern int drmBOValidateList(int fd, drmBOList *list); diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index be2641c8..5aa589cd 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -22,6 +22,7 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o \ + nouveau_sgdma.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ diff --git a/linux-core/ati_pcigart.c b/linux-core/ati_pcigart.c index 524618a8..7241c2a8 100644 --- a/linux-core/ati_pcigart.c +++ b/linux-core/ati_pcigart.c @@ -81,9 +81,9 @@ static void drm_ati_free_pcigart_table(void *address, int order) free_pages((unsigned long)address, order); } -int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info) +int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info) { - drm_sg_mem_t *entry = dev->sg; + struct drm_sg_mem *entry = dev->sg; unsigned long pages; int i; int order; @@ -132,9 +132,9 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info) } EXPORT_SYMBOL(drm_ati_pcigart_cleanup); -int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info) +int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info) { - drm_sg_mem_t *entry = dev->sg; + struct drm_sg_mem *entry = dev->sg; void *address = NULL; unsigned long pages; u32 *pci_gart, page_base, bus_address = 0; diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 3b2176c9..575e6255 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -291,31 +291,27 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, #define DRM_MASTER 0x2 #define DRM_ROOT_ONLY 0x4 -typedef struct drm_ioctl_desc { +struct drm_ioctl_desc { drm_ioctl_t *func; int flags; -} drm_ioctl_desc_t; - -typedef struct drm_devstate { - pid_t owner; /**< X server pid holding x_lock */ -} drm_devstate_t; +}; -typedef struct drm_magic_entry { +struct drm_magic_entry { struct list_head head; - drm_hash_item_t hash_item; + struct drm_hash_item hash_item; struct drm_file *priv; -} drm_magic_entry_t; +}; -typedef struct drm_vma_entry { +struct drm_vma_entry { struct list_head head; struct vm_area_struct *vma; pid_t pid; -} drm_vma_entry_t; +}; /** * DMA buffer. */ -typedef struct drm_buf { +struct drm_buf { int idx; /**< Index into master buflist */ int total; /**< Buffer size */ int order; /**< log-base-2(total) */ @@ -341,30 +337,30 @@ typedef struct drm_buf { int dev_priv_size; /**< Size of buffer private storage */ void *dev_private; /**< Per-buffer private storage */ -} drm_buf_t; +}; /** bufs is one longer than it has to be */ -typedef struct drm_waitlist { +struct drm_waitlist { int count; /**< Number of possible buffers */ - drm_buf_t **bufs; /**< List of pointers to buffers */ - drm_buf_t **rp; /**< Read pointer */ - drm_buf_t **wp; /**< Write pointer */ - drm_buf_t **end; /**< End pointer */ + struct drm_buf **bufs; /**< List of pointers to buffers */ + struct drm_buf **rp; /**< Read pointer */ + struct drm_buf **wp; /**< Write pointer */ + struct drm_buf **end; /**< End pointer */ spinlock_t read_lock; spinlock_t write_lock; -} drm_waitlist_t; +}; -typedef struct drm_freelist { +struct drm_freelist { int initialized; /**< Freelist in use */ atomic_t count; /**< Number of free buffers */ - drm_buf_t *next; /**< End pointer */ + struct drm_buf *next; /**< End pointer */ wait_queue_head_t waiting; /**< Processes waiting on free bufs */ int low_mark; /**< Low water mark */ int high_mark; /**< High water mark */ atomic_t wfh; /**< If waiting for high mark */ spinlock_t lock; -} drm_freelist_t; +}; typedef struct drm_dma_handle { dma_addr_t busaddr; @@ -375,15 +371,15 @@ typedef struct drm_dma_handle { /** * Buffer entry. There is one of this for each buffer size order. */ -typedef struct drm_buf_entry { +struct drm_buf_entry { int buf_size; /**< size */ int buf_count; /**< number of buffers */ - drm_buf_t *buflist; /**< buffer list */ + struct drm_buf *buflist; /**< buffer list */ int seg_count; int page_order; - drm_dma_handle_t **seglist; - drm_freelist_t freelist; -} drm_buf_entry_t; + struct drm_dma_handle **seglist; + struct drm_freelist freelist; +}; /* * This should be small enough to allow the use of kmalloc for hash tables @@ -391,15 +387,15 @@ typedef struct drm_buf_entry { */ #define DRM_FILE_HASH_ORDER 8 -typedef enum{ +enum drm_ref_type { _DRM_REF_USE=0, _DRM_REF_TYPE1, _DRM_NO_REF_TYPES -} drm_ref_t; +}; /** File private data */ -typedef struct drm_file { +struct drm_file { int authenticated; int master; int minor; @@ -422,12 +418,12 @@ typedef struct drm_file { struct list_head refd_objects; struct list_head user_objects; - drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES]; + struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES]; void *driver_priv; -} drm_file_t; +}; /** Wait queue */ -typedef struct drm_queue { +struct drm_queue { atomic_t use_count; /**< Outstanding uses (+1) */ atomic_t finalization; /**< Finalization in progress */ atomic_t block_count; /**< Count of processes waiting */ @@ -440,16 +436,16 @@ typedef struct drm_queue { atomic_t total_flushed; /**< Total flushes statistic */ atomic_t total_locks; /**< Total locks statistics */ #endif - drm_ctx_flags_t flags; /**< Context preserving and 2D-only */ - drm_waitlist_t waitlist; /**< Pending buffers */ + enum drm_ctx_flags flags; /**< Context preserving and 2D-only */ + struct drm_waitlist waitlist; /**< Pending buffers */ wait_queue_head_t flush_queue; /**< Processes waiting until flush */ -} drm_queue_t; +}; /** * Lock data. */ -typedef struct drm_lock_data { - drm_hw_lock_t *hw_lock; /**< Hardware lock */ +struct drm_lock_data { + struct drm_hw_lock *hw_lock; /**< Hardware lock */ struct file *filp; /**< File descr of lock holder (0=kernel) */ wait_queue_head_t lock_queue; /**< Queue of blocked processes */ unsigned long lock_time; /**< Time of last lock in jiffies */ @@ -457,16 +453,16 @@ typedef struct drm_lock_data { uint32_t kernel_waiters; uint32_t user_waiters; int idle_has_lock; -} drm_lock_data_t; +}; /** * DMA data. */ -typedef struct drm_device_dma { +struct drm_device_dma { - drm_buf_entry_t bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ + struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ int buf_count; /**< total number of buffers */ - drm_buf_t **buflist; /**< Vector of pointers into drm_device_dma::bufs */ + struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ int seg_count; int page_count; /**< number of pages */ unsigned long *pagelist; /**< page list */ @@ -478,25 +474,25 @@ typedef struct drm_device_dma { _DRM_DMA_USE_PCI_RO = 0x08 } flags; -} drm_device_dma_t; +}; /** * AGP memory entry. Stored as a doubly linked list. */ -typedef struct drm_agp_mem { +struct drm_agp_mem { unsigned long handle; /**< handle */ DRM_AGP_MEM *memory; unsigned long bound; /**< address */ int pages; struct list_head head; -} drm_agp_mem_t; +}; /** * AGP data. * * \sa drm_agp_init)() and drm_device::agp. */ -typedef struct drm_agp_head { +struct drm_agp_head { DRM_AGP_KERN agp_info; /**< AGP device information */ struct list_head memory; unsigned long mode; /**< AGP mode */ @@ -509,30 +505,30 @@ typedef struct drm_agp_head { int agp_mtrr; int cant_use_aperture; unsigned long page_mask; -} drm_agp_head_t; +}; /** * Scatter-gather memory. */ -typedef struct drm_sg_mem { +struct drm_sg_mem { unsigned long handle; void *virtual; int pages; struct page **pagelist; dma_addr_t *busaddr; -} drm_sg_mem_t; +}; -typedef struct drm_sigdata { +struct drm_sigdata { int context; - drm_hw_lock_t *lock; -} drm_sigdata_t; + struct drm_hw_lock *lock; +}; /* * Generic memory manager structs */ -typedef struct drm_mm_node { +struct drm_mm_node { struct list_head fl_entry; struct list_head ml_entry; int free; @@ -540,42 +536,42 @@ typedef struct drm_mm_node { unsigned long size; struct drm_mm *mm; void *private; -} drm_mm_node_t; +}; -typedef struct drm_mm { +struct drm_mm { struct list_head fl_entry; struct list_head ml_entry; -} drm_mm_t; +}; /** * Mappings list */ -typedef struct drm_map_list { +struct drm_map_list { struct list_head head; /**< list head */ - drm_hash_item_t hash; - drm_map_t *map; /**< mapping */ - drm_u64_t user_token; - drm_mm_node_t *file_offset_node; -} drm_map_list_t; + struct drm_hash_item hash; + struct drm_map *map; /**< mapping */ + uint64_t user_token; + struct drm_mm_node *file_offset_node; +}; -typedef drm_map_t drm_local_map_t; +typedef struct drm_map drm_local_map_t; /** * Context handle list */ -typedef struct drm_ctx_list { +struct drm_ctx_list { struct list_head head; /**< list head */ drm_context_t handle; /**< context handle */ - drm_file_t *tag; /**< associated fd private data */ -} drm_ctx_list_t; + struct drm_file *tag; /**< associated fd private data */ +}; -typedef struct drm_vbl_sig { +struct drm_vbl_sig { struct list_head head; unsigned int sequence; struct siginfo info; struct task_struct *task; -} drm_vbl_sig_t; +}; /* location of GART table */ #define DRM_ATI_GART_MAIN 1 @@ -585,14 +581,14 @@ typedef struct drm_vbl_sig { #define DRM_ATI_GART_PCIE 2 #define DRM_ATI_GART_IGP 3 -typedef struct ati_pcigart_info { +struct ati_pcigart_info { int gart_table_location; int gart_reg_if; void *addr; dma_addr_t bus_addr; drm_local_map_t mapping; int table_size; -} drm_ati_pcigart_info; +}; #include "drm_objects.h" @@ -606,9 +602,9 @@ struct drm_device; struct drm_driver { int (*load) (struct drm_device *, unsigned long flags); int (*firstopen) (struct drm_device *); - int (*open) (struct drm_device *, drm_file_t *); + int (*open) (struct drm_device *, struct drm_file *); void (*preclose) (struct drm_device *, struct file * filp); - void (*postclose) (struct drm_device *, drm_file_t *); + void (*postclose) (struct drm_device *, struct drm_file *); void (*lastclose) (struct drm_device *); int (*unload) (struct drm_device *); int (*dma_ioctl) (DRM_IOCTL_ARGS); @@ -646,9 +642,9 @@ struct drm_driver { struct file * filp); void (*reclaim_buffers_idlelocked) (struct drm_device *dev, struct file * filp); - unsigned long (*get_map_ofs) (drm_map_t * map); + unsigned long (*get_map_ofs) (struct drm_map * map); unsigned long (*get_reg_ofs) (struct drm_device * dev); - void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); + void (*set_version) (struct drm_device * dev, struct drm_set_version * sv); struct drm_fence_driver *fence_driver; struct drm_bo_driver *bo_driver; @@ -663,7 +659,7 @@ struct drm_driver { /* variables */ u32 driver_features; int dev_priv_size; - drm_ioctl_desc_t *ioctls; + struct drm_ioctl_desc *ioctls; int num_ioctls; struct file_operations fops; struct pci_driver pci_driver; @@ -674,20 +670,20 @@ struct drm_driver { * that may contain multiple heads. Embed one per head of these in the * private drm_device structure. */ -typedef struct drm_head { +struct drm_head { int minor; /**< Minor device number */ struct drm_device *dev; struct proc_dir_entry *dev_root; /**< proc directory entry */ dev_t device; /**< Device number for mknod */ struct class_device *dev_class; -} drm_head_t; +}; /** * DRM device structure. This structure represent a complete card that * may contain multiple heads. */ -typedef struct drm_device { +struct drm_device { char *unique; /**< Unique identifier: e.g., busid */ int unique_len; /**< Length of unique field */ char *devname; /**< For /proc/interrupts */ @@ -713,14 +709,14 @@ typedef struct drm_device { /** \name Performance counters */ /*@{ */ unsigned long counters; - drm_stat_type_t types[15]; + enum drm_stat_type types[15]; atomic_t counts[15]; /*@} */ /** \name Authentication */ /*@{ */ struct list_head filelist; - drm_open_hash_t magiclist; + struct drm_open_hash magiclist; struct list_head magicfree; /*@} */ @@ -728,9 +724,9 @@ typedef struct drm_device { /*@{ */ struct list_head maplist; /**< Linked list of regions */ int map_count; /**< Number of mappable regions */ - drm_open_hash_t map_hash; /**< User token hash table for maps */ - drm_mm_t offset_manager; /**< User token manager */ - drm_open_hash_t object_hash; /**< User token hash table for objects */ + struct drm_open_hash map_hash; /**< User token hash table for maps */ + struct drm_mm offset_manager; /**< User token manager */ + struct drm_open_hash object_hash; /**< User token hash table for objects */ struct address_space *dev_mapping; /**< For unmap_mapping_range() */ struct page *ttm_dummy_page; @@ -743,7 +739,7 @@ typedef struct drm_device { struct idr ctx_idr; struct list_head vmalist; /**< List of vmas (for debugging) */ - drm_lock_data_t lock; /**< Information on hardware lock */ + struct drm_lock_data lock; /**< Information on hardware lock */ /*@} */ /** \name DMA queues (contexts) */ @@ -751,8 +747,8 @@ typedef struct drm_device { int queue_count; /**< Number of active DMA queues */ int queue_reserved; /**< Number of reserved DMA queues */ int queue_slots; /**< Actual length of queuelist */ - drm_queue_t **queuelist; /**< Vector of pointers to DMA queues */ - drm_device_dma_t *dma; /**< Optional pointer for DMA support */ + struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */ + struct drm_device_dma *dma; /**< Optional pointer for DMA support */ /*@} */ /** \name Context support */ @@ -792,7 +788,7 @@ typedef struct drm_device { wait_queue_head_t buf_readers; /**< Processes waiting to read */ wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */ - drm_agp_head_t *agp; /**< AGP data */ + struct drm_agp_head *agp; /**< AGP data */ struct pci_dev *pdev; /**< PCI device structure */ int pci_vendor; /**< PCI vendor id */ @@ -800,33 +796,33 @@ typedef struct drm_device { #ifdef __alpha__ struct pci_controller *hose; #endif - drm_sg_mem_t *sg; /**< Scatter gather memory */ + struct drm_sg_mem *sg; /**< Scatter gather memory */ void *dev_private; /**< device private data */ - drm_sigdata_t sigdata; /**< For block_all_signals */ + struct drm_sigdata sigdata; /**< For block_all_signals */ sigset_t sigmask; struct drm_driver *driver; drm_local_map_t *agp_buffer_map; unsigned int agp_buffer_token; - drm_head_t primary; /**< primary screen head */ + struct drm_head primary; /**< primary screen head */ - drm_fence_manager_t fm; - drm_buffer_manager_t bm; + struct drm_fence_manager fm; + struct drm_buffer_manager bm; /** \name Drawable information */ /*@{ */ spinlock_t drw_lock; struct idr drw_idr; /*@} */ -} drm_device_t; +}; #if __OS_HAS_AGP -typedef struct drm_agp_ttm_backend { - drm_ttm_backend_t backend; +struct drm_agp_ttm_backend { + struct drm_ttm_backend backend; DRM_AGP_MEM *mem; struct agp_bridge_data *bridge; int populated; -} drm_agp_ttm_backend_t; +}; #endif @@ -904,7 +900,7 @@ extern int drm_ioctl(struct inode *inode, struct file *filp, extern long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_lastclose(drm_device_t * dev); +extern int drm_lastclose(struct drm_device *dev); /* Device support (drm_fops.h) */ extern int drm_open(struct inode *inode, struct file *filp); @@ -915,7 +911,7 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); /* Mapping support (drm_vm.h) */ extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); -extern unsigned long drm_core_get_map_ofs(drm_map_t * map); +extern unsigned long drm_core_get_map_ofs(struct drm_map * map); extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma); @@ -928,16 +924,16 @@ extern void *drm_calloc(size_t nmemb, size_t size, int area); extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); extern unsigned long drm_alloc_pages(int order, int area); extern void drm_free_pages(unsigned long address, int order, int area); -extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type); +extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); extern int drm_unbind_agp(DRM_AGP_MEM * handle); extern void drm_free_memctl(size_t size); extern int drm_alloc_memctl(size_t size); -extern void drm_query_memctl(drm_u64_t *cur_used, - drm_u64_t *low_threshold, - drm_u64_t *high_threshold); +extern void drm_query_memctl(uint64_t *cur_used, + uint64_t *low_threshold, + uint64_t *high_threshold); extern void drm_init_memctl(size_t low_threshold, size_t high_threshold, size_t unit_size); @@ -976,9 +972,9 @@ extern int drm_newctx(struct inode *inode, struct file *filp, extern int drm_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_ctxbitmap_init(drm_device_t * dev); -extern void drm_ctxbitmap_cleanup(drm_device_t * dev); -extern void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle); +extern int drm_ctxbitmap_init(struct drm_device *dev); +extern void drm_ctxbitmap_cleanup(struct drm_device *dev); +extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); extern int drm_setsareactx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); @@ -992,9 +988,9 @@ extern int drm_rmdraw(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_update_drawable_info(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, - drm_drawable_t id); -extern void drm_drawable_free_all(drm_device_t *dev); +extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, + drm_drawable_t id); +extern void drm_drawable_free_all(struct drm_device *dev); /* Authentication IOCTL support (drm_auth.h) */ extern int drm_getmagic(struct inode *inode, struct file *filp, @@ -1007,10 +1003,10 @@ extern int drm_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_unlock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context); -extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context); -extern void drm_idlelock_take(drm_lock_data_t *lock_data); -extern void drm_idlelock_release(drm_lock_data_t *lock_data); +extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); +extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); +extern void drm_idlelock_take(struct drm_lock_data *lock_data); +extern void drm_idlelock_release(struct drm_lock_data *lock_data); /* * These are exported to drivers so that they can implement fencing using @@ -1021,16 +1017,16 @@ extern int drm_i_have_hw_lock(struct file *filp); extern int drm_kernel_take_hw_lock(struct file *filp); /* Buffer management support (drm_bufs.h) */ -extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); -extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request); -extern int drm_addbufs_fb (drm_device_t * dev, drm_buf_desc_t * request); -extern int drm_addmap(drm_device_t * dev, unsigned int offset, - unsigned int size, drm_map_type_t type, - drm_map_flags_t flags, drm_local_map_t ** map_ptr); +extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); +extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request); +extern int drm_addbufs_fb (struct drm_device *dev, struct drm_buf_desc * request); +extern int drm_addmap(struct drm_device *dev, unsigned int offset, + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, drm_local_map_t ** map_ptr); extern int drm_addmap_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map); -extern int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map); +extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map); +extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map); extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_addbufs(struct inode *inode, struct file *filp, @@ -1044,59 +1040,59 @@ extern int drm_freebufs(struct inode *inode, struct file *filp, extern int drm_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_order(unsigned long size); -extern unsigned long drm_get_resource_start(drm_device_t *dev, +extern unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource); -extern unsigned long drm_get_resource_len(drm_device_t *dev, +extern unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource); -extern drm_map_list_t *drm_find_matching_map(drm_device_t *dev, - drm_local_map_t *map); +extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev, + drm_local_map_t *map); /* DMA support (drm_dma.h) */ -extern int drm_dma_setup(drm_device_t * dev); -extern void drm_dma_takedown(drm_device_t * dev); -extern void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf); -extern void drm_core_reclaim_buffers(drm_device_t *dev, struct file *filp); +extern int drm_dma_setup(struct drm_device *dev); +extern void drm_dma_takedown(struct drm_device *dev); +extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); +extern void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp); /* IRQ support (drm_irq.h) */ extern int drm_control(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); -extern int drm_irq_uninstall(drm_device_t *dev); -extern void drm_driver_irq_preinstall(drm_device_t * dev); -extern void drm_driver_irq_postinstall(drm_device_t * dev); -extern void drm_driver_irq_uninstall(drm_device_t * dev); +extern int drm_irq_uninstall(struct drm_device *dev); +extern void drm_driver_irq_preinstall(struct drm_device *dev); +extern void drm_driver_irq_postinstall(struct drm_device *dev); +extern void drm_driver_irq_uninstall(struct drm_device *dev); extern int drm_wait_vblank(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_vblank_wait(drm_device_t * dev, unsigned int *vbl_seq); -extern void drm_vbl_send_signals(drm_device_t * dev); -extern void drm_locked_tasklet(drm_device_t *dev, void(*func)(drm_device_t*)); +extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); +extern void drm_vbl_send_signals(struct drm_device *dev); +extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); /* AGP/GART support (drm_agpsupport.h) */ -extern drm_agp_head_t *drm_agp_init(drm_device_t *dev); -extern int drm_agp_acquire(drm_device_t * dev); +extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); +extern int drm_agp_acquire(struct drm_device *dev); extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_release(drm_device_t *dev); +extern int drm_agp_release(struct drm_device *dev); extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode); +extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info); +extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request); +extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request); +extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request); +extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request); +extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) @@ -1107,22 +1103,22 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size extern int drm_agp_free_memory(DRM_AGP_MEM * handle); extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); -extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev); +extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev); /* Stub support (drm_stub.h) */ extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver); -extern int drm_put_dev(drm_device_t * dev); -extern int drm_put_head(drm_head_t * head); +extern int drm_put_dev(struct drm_device *dev); +extern int drm_put_head(struct drm_head * head); extern unsigned int drm_debug; /* 1 to enable debug output */ extern unsigned int drm_cards_limit; -extern drm_head_t **drm_heads; +extern struct drm_head **drm_heads; extern struct drm_sysfs_class *drm_class; extern struct proc_dir_entry *drm_proc_root; extern drm_local_map_t *drm_getsarea(struct drm_device *dev); /* Proc support (drm_proc.h) */ -extern int drm_proc_init(drm_device_t * dev, +extern int drm_proc_init(struct drm_device *dev, int minor, struct proc_dir_entry *root, struct proc_dir_entry **dev_root); @@ -1131,21 +1127,21 @@ extern int drm_proc_cleanup(int minor, struct proc_dir_entry *dev_root); /* Scatter Gather Support (drm_scatter.h) */ -extern void drm_sg_cleanup(drm_sg_mem_t * entry); +extern void drm_sg_cleanup(struct drm_sg_mem * entry); extern int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_sg_alloc(drm_device_t *dev, drm_scatter_gather_t * request); +extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); extern int drm_sg_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); /* ATI PCIGART support (ati_pcigart.h) */ -extern int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info *gart_info); -extern int drm_ati_pcigart_cleanup(drm_device_t * dev, drm_ati_pcigart_info *gart_info); +extern int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info); +extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info); -extern drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, +extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, size_t align, dma_addr_t maxaddr); -extern void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah); -extern void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah); +extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); +extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); /* sysfs support (drm_sysfs.c) */ struct drm_sysfs_class; @@ -1153,26 +1149,26 @@ extern struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name); extern void drm_sysfs_destroy(struct drm_sysfs_class *cs); extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, - drm_head_t * head); + struct drm_head * head); extern void drm_sysfs_device_remove(struct class_device *class_dev); /* * Basic memory manager support (drm_mm.c) */ -extern drm_mm_node_t * drm_mm_get_block(drm_mm_node_t * parent, unsigned long size, +extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size, unsigned alignment); -extern void drm_mm_put_block(drm_mm_node_t *cur); -extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size, +extern void drm_mm_put_block(struct drm_mm_node *cur); +extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, unsigned alignment, int best_match); -extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size); -extern void drm_mm_takedown(drm_mm_t *mm); -extern int drm_mm_clean(drm_mm_t *mm); -extern unsigned long drm_mm_tail_space(drm_mm_t *mm); -extern int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size); -extern int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size); - -static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block) +extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size); +extern void drm_mm_takedown(struct drm_mm *mm); +extern int drm_mm_clean(struct drm_mm *mm); +extern unsigned long drm_mm_tail_space(struct drm_mm *mm); +extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); +extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); + +static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) { return block->mm; } @@ -1183,14 +1179,14 @@ extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned int token) { - drm_map_list_t *_entry; + struct drm_map_list *_entry; list_for_each_entry(_entry, &dev->maplist, head) if (_entry->user_token == token) return _entry->map; return NULL; } -static __inline__ int drm_device_is_agp(drm_device_t *dev) +static __inline__ int drm_device_is_agp(struct drm_device *dev) { if ( dev->driver->device_is_agp != NULL ) { int err = (*dev->driver->device_is_agp)( dev ); @@ -1203,7 +1199,7 @@ static __inline__ int drm_device_is_agp(drm_device_t *dev) return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); } -static __inline__ int drm_device_is_pcie(drm_device_t *dev) +static __inline__ int drm_device_is_pcie(struct drm_device *dev) { return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); } diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index f134563a..57c88638 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -48,7 +48,7 @@ * Verifies the AGP device has been initialized and acquired and fills in the * drm_agp_info structure with the information in drm_agp_head::agp_info. */ -int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info) +int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info) { DRM_AGP_KERN *kern; @@ -73,16 +73,16 @@ EXPORT_SYMBOL(drm_agp_info); int drm_agp_info_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_agp_info_t info; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_agp_info info; int err; err = drm_agp_info(dev, &info); if (err) return err; - if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info))) + if (copy_to_user((struct drm_agp_info __user *) arg, &info, sizeof(info))) return -EFAULT; return 0; } @@ -96,7 +96,7 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ -int drm_agp_acquire(drm_device_t * dev) +int drm_agp_acquire(struct drm_device * dev) { #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) int retcode; @@ -134,9 +134,9 @@ EXPORT_SYMBOL(drm_agp_acquire); int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; - return drm_agp_acquire( (drm_device_t *) priv->head->dev ); + return drm_agp_acquire( (struct drm_device *) priv->head->dev ); } /** @@ -147,7 +147,7 @@ int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, * * Verifies the AGP device has been acquired and calls \c agp_backend_release. */ -int drm_agp_release(drm_device_t *dev) +int drm_agp_release(struct drm_device *dev) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; @@ -165,8 +165,8 @@ EXPORT_SYMBOL(drm_agp_release); int drm_agp_release_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; return drm_agp_release(dev); } @@ -181,7 +181,7 @@ int drm_agp_release_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device has been acquired but not enabled, and calls * \c agp_enable. */ -int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode) +int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; @@ -201,12 +201,12 @@ EXPORT_SYMBOL(drm_agp_enable); int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_agp_mode_t mode; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_agp_mode mode; - if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode))) + if (copy_from_user(&mode, (struct drm_agp_mode __user *) arg, sizeof(mode))) return -EFAULT; return drm_agp_enable(dev, mode); @@ -224,9 +224,9 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device is present and has been acquired, allocates the * memory via alloc_agp() and creates a drm_agp_mem entry for it. */ -int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request) +int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; DRM_AGP_MEM *memory; unsigned long pages; u32 type; @@ -262,10 +262,10 @@ EXPORT_SYMBOL(drm_agp_alloc); int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_agp_buffer_t request; - drm_agp_buffer_t __user *argp = (void __user *)arg; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_agp_buffer request; + struct drm_agp_buffer __user *argp = (void __user *)arg; int err; if (copy_from_user(&request, argp, sizeof(request))) @@ -276,7 +276,7 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, return err; if (copy_to_user(argp, &request, sizeof(request))) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; list_for_each_entry(entry, &dev->agp->memory, head) { if (entry->handle == request.handle) break; @@ -299,10 +299,10 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, * * Walks through drm_agp_head::memory until finding a matching handle. */ -static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev, +static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev, unsigned long handle) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; list_for_each_entry(entry, &dev->agp->memory, head) { if (entry->handle == handle) @@ -323,9 +323,9 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev, * Verifies the AGP device is present and acquired, looks-up the AGP memory * entry and passes it to the unbind_agp() function. */ -int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request) +int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; int ret; if (!dev->agp || !dev->agp->acquired) @@ -345,12 +345,12 @@ EXPORT_SYMBOL(drm_agp_unbind); int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_agp_binding_t request; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_agp_binding request; if (copy_from_user - (&request, (drm_agp_binding_t __user *) arg, sizeof(request))) + (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) return -EFAULT; return drm_agp_unbind(dev, &request); @@ -370,9 +370,9 @@ int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, * is currently bound into the GATT. Looks-up the AGP memory entry and passes * it to bind_agp() function. */ -int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request) +int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; int retcode; int page; @@ -396,12 +396,12 @@ EXPORT_SYMBOL(drm_agp_bind); int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_agp_binding_t request; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_agp_binding request; if (copy_from_user - (&request, (drm_agp_binding_t __user *) arg, sizeof(request))) + (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) return -EFAULT; return drm_agp_bind(dev, &request); @@ -422,9 +422,9 @@ int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, * unbind_agp(). Frees it via free_agp() as well as the entry itself * and unlinks from the doubly linked list it's inserted in. */ -int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request) +int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; if (!dev->agp || !dev->agp->acquired) return -EINVAL; @@ -446,12 +446,12 @@ EXPORT_SYMBOL(drm_agp_free); int drm_agp_free_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_agp_buffer_t request; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_agp_buffer request; if (copy_from_user - (&request, (drm_agp_buffer_t __user *) arg, sizeof(request))) + (&request, (struct drm_agp_buffer __user *) arg, sizeof(request))) return -EFAULT; return drm_agp_free(dev, &request); @@ -467,9 +467,9 @@ int drm_agp_free_ioctl(struct inode *inode, struct file *filp, * via the inter_module_* functions. Creates and initializes a drm_agp_head * structure. */ -drm_agp_head_t *drm_agp_init(drm_device_t *dev) +struct drm_agp_head *drm_agp_init(struct drm_device *dev) { - drm_agp_head_t *head = NULL; + struct drm_agp_head *head = NULL; if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS))) return NULL; @@ -554,16 +554,16 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle) #define AGP_REQUIRED_MAJOR 0 #define AGP_REQUIRED_MINOR 102 -static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) { +static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) { return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); } -static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages, +static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_pages, struct page **pages) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); struct page **cur_page, **last_page = pages + num_pages; DRM_AGP_MEM *mem; @@ -590,12 +590,12 @@ static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages, return 0; } -static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, +static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, unsigned long offset, int cached) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); DRM_AGP_MEM *mem = agp_be->mem; int ret; @@ -612,10 +612,10 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, return ret; } -static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { +static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); DRM_DEBUG("drm_agp_unbind_ttm\n"); if (agp_be->mem->is_bound) @@ -624,10 +624,10 @@ static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { return 0; } -static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { +static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); DRM_AGP_MEM *mem = agp_be->mem; DRM_DEBUG("drm_agp_clear_ttm\n"); @@ -640,13 +640,13 @@ static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { agp_be->mem = NULL; } -static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) { +static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) { - drm_agp_ttm_backend_t *agp_be; + struct drm_agp_ttm_backend *agp_be; if (backend) { DRM_DEBUG("drm_agp_destroy_ttm\n"); - agp_be = container_of(backend, drm_agp_ttm_backend_t, backend); + agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); if (agp_be) { if (agp_be->mem) { backend->func->clear(backend); @@ -656,7 +656,7 @@ static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) { } } -static drm_ttm_backend_func_t agp_ttm_backend = +static struct drm_ttm_backend_func agp_ttm_backend = { .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust, .populate = drm_agp_populate, @@ -666,10 +666,10 @@ static drm_ttm_backend_func_t agp_ttm_backend = .destroy = drm_agp_destroy_ttm, }; -drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev) +struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev) { - drm_agp_ttm_backend_t *agp_be; + struct drm_agp_ttm_backend *agp_be; struct agp_kern_info *info; if (!dev->agp) { diff --git a/linux-core/drm_auth.c b/linux-core/drm_auth.c index 6948d858..4c48d872 100644 --- a/linux-core/drm_auth.c +++ b/linux-core/drm_auth.c @@ -45,15 +45,15 @@ * the one with matching magic number, while holding the drm_device::struct_mutex * lock. */ -static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) +static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic) { - drm_file_t *retval = NULL; - drm_magic_entry_t *pt; - drm_hash_item_t *hash; + struct drm_file *retval = NULL; + struct drm_magic_entry *pt; + struct drm_hash_item *hash; - mutex_lock(&dev->struct_mutex); - if (!drm_ht_find_item(&dev->magiclist, (unsigned long) magic, &hash)) { - pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item); + mutex_lock(&dev->struct_mutex); + if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { + pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); retval = pt->priv; } mutex_unlock(&dev->struct_mutex); @@ -71,10 +71,10 @@ static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) * associated the magic number hash key in drm_device::magiclist, while holding * the drm_device::struct_mutex lock. */ -static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, +static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, drm_magic_t magic) { - drm_magic_entry_t *entry; + struct drm_magic_entry *entry; DRM_DEBUG("%d\n", magic); @@ -101,10 +101,10 @@ static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, * Searches and unlinks the entry in drm_device::magiclist with the magic * number hash key, while holding the drm_device::struct_mutex lock. */ -static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) +static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) { - drm_magic_entry_t *pt; - drm_hash_item_t *hash; + struct drm_magic_entry *pt; + struct drm_hash_item *hash; DRM_DEBUG("%d\n", magic); @@ -113,7 +113,7 @@ static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) mutex_unlock(&dev->struct_mutex); return -EINVAL; } - pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item); + pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); drm_ht_remove_item(&dev->magiclist, hash); list_del(&pt->head); mutex_unlock(&dev->struct_mutex); @@ -141,9 +141,9 @@ int drm_getmagic(struct inode *inode, struct file *filp, { static drm_magic_t sequence = 0; static DEFINE_SPINLOCK(lock); - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_auth_t auth; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_auth auth; /* Find unique magic */ if (priv->magic) { @@ -161,7 +161,7 @@ int drm_getmagic(struct inode *inode, struct file *filp, } DRM_DEBUG("%u\n", auth.magic); - if (copy_to_user((drm_auth_t __user *) arg, &auth, sizeof(auth))) + if (copy_to_user((struct drm_auth __user *) arg, &auth, sizeof(auth))) return -EFAULT; return 0; } @@ -180,12 +180,12 @@ int drm_getmagic(struct inode *inode, struct file *filp, int drm_authmagic(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_auth_t auth; - drm_file_t *file; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_auth auth; + struct drm_file *file; - if (copy_from_user(&auth, (drm_auth_t __user *) arg, sizeof(auth))) + if (copy_from_user(&auth, (struct drm_auth __user *) arg, sizeof(auth))) return -EFAULT; DRM_DEBUG("%u\n", auth.magic); if ((file = drm_find_file(dev, auth.magic))) { diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index ab257825..374be04e 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -49,10 +49,10 @@ * */ -static void drm_bo_destroy_locked(drm_buffer_object_t * bo); -static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo); -static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo); -static void drm_bo_unmap_virtual(drm_buffer_object_t * bo); +static void drm_bo_destroy_locked(struct drm_buffer_object * bo); +static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo); +static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo); +static void drm_bo_unmap_virtual(struct drm_buffer_object * bo); static inline uint32_t drm_bo_type_flags(unsigned type) { @@ -63,9 +63,9 @@ static inline uint32_t drm_bo_type_flags(unsigned type) * bo locked. dev->struct_mutex locked. */ -void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) +void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo) { - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); DRM_ASSERT_LOCKED(&bo->mutex); @@ -74,9 +74,9 @@ void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) list_add_tail(&bo->pinned_lru, &man->pinned); } -void drm_bo_add_to_lru(drm_buffer_object_t * bo) +void drm_bo_add_to_lru(struct drm_buffer_object * bo) { - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); @@ -89,7 +89,7 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo) } } -static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) +static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci) { #ifdef DRM_ODD_MM_COMPAT int ret; @@ -112,7 +112,7 @@ static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) return 0; } -static void drm_bo_vm_post_move(drm_buffer_object_t * bo) +static void drm_bo_vm_post_move(struct drm_buffer_object * bo) { #ifdef DRM_ODD_MM_COMPAT int ret; @@ -133,9 +133,9 @@ static void drm_bo_vm_post_move(drm_buffer_object_t * bo) * Call bo->mutex locked. */ -static int drm_bo_add_ttm(drm_buffer_object_t * bo) +static int drm_bo_add_ttm(struct drm_buffer_object * bo) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; int ret = 0; bo->ttm = NULL; @@ -164,16 +164,16 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) return ret; } -static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, +static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int evict, int no_wait) { - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); int new_is_pci = drm_mem_reg_is_pci(dev, mem); - drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type]; - drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type]; + struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type]; + struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type]; int ret = 0; if (old_is_pci || new_is_pci) @@ -201,9 +201,9 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) { - drm_bo_mem_reg_t *old_mem = &bo->mem; - uint32_t save_flags = old_mem->flags; - uint32_t save_mask = old_mem->mask; + struct drm_bo_mem_reg *old_mem = &bo->mem; + uint64_t save_flags = old_mem->flags; + uint64_t save_mask = old_mem->mask; *old_mem = *mem; mem->mm_node = NULL; @@ -266,7 +266,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, * Wait until the buffer is idle. */ -int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, +int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, int no_wait) { int ret; @@ -292,10 +292,10 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, return 0; } -static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) +static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors) { - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; if (bo->fence) { if (bm->nice_mode) { @@ -327,10 +327,10 @@ static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) * fence object and removing from lru lists and memory managers. */ -static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) +static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all) { - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -389,10 +389,10 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) * to the buffer object. Then destroy it. */ -static void drm_bo_destroy_locked(drm_buffer_object_t * bo) +static void drm_bo_destroy_locked(struct drm_buffer_object * bo) { - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -438,19 +438,19 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo) * Call dev->struct_mutex locked. */ -static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all) +static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; - drm_buffer_object_t *entry, *nentry; + struct drm_buffer_object *entry, *nentry; struct list_head *list, *next; list_for_each_safe(list, next, &bm->ddestroy) { - entry = list_entry(list, drm_buffer_object_t, ddestroy); + entry = list_entry(list, struct drm_buffer_object, ddestroy); nentry = NULL; if (next != &bm->ddestroy) { - nentry = list_entry(next, drm_buffer_object_t, + nentry = list_entry(next, struct drm_buffer_object, ddestroy); atomic_inc(&nentry->usage); } @@ -470,12 +470,12 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) - drm_device_t *dev = (drm_device_t *) data; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_device *dev = (struct drm_device *) data; + struct drm_buffer_manager *bm = &dev->bm; #else - drm_buffer_manager_t *bm = - container_of(work, drm_buffer_manager_t, wq.work); - drm_device_t *dev = container_of(bm, drm_device_t, bm); + struct drm_buffer_manager *bm = + container_of(work, struct drm_buffer_manager, wq.work); + struct drm_device *dev = container_of(bm, struct drm_device, bm); #endif DRM_DEBUG("Delayed delete Worker\n"); @@ -493,7 +493,7 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) +void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo) { struct drm_buffer_object *tmp_bo = *bo; bo = NULL; @@ -505,10 +505,10 @@ void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) } } -static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) +static void drm_bo_base_deref_locked(struct drm_file * priv, struct drm_user_object * uo) { - drm_buffer_object_t *bo = - drm_user_object_entry(uo, drm_buffer_object_t, base); + struct drm_buffer_object *bo = + drm_user_object_entry(uo, struct drm_buffer_object, base); DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); @@ -516,10 +516,10 @@ static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) drm_bo_usage_deref_locked(&bo); } -static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) +static void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) { struct drm_buffer_object *tmp_bo = *bo; - drm_device_t *dev = tmp_bo->dev; + struct drm_device *dev = tmp_bo->dev; *bo = NULL; if (atomic_dec_and_test(&tmp_bo->usage)) { @@ -535,16 +535,16 @@ static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) * and deregister fence object usage. */ -int drm_fence_buffer_objects(drm_file_t * priv, +int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, uint32_t fence_flags, - drm_fence_object_t * fence, - drm_fence_object_t ** used_fence) + struct drm_fence_object * fence, + struct drm_fence_object ** used_fence) { - drm_device_t *dev = priv->head->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_device *dev = priv->head->dev; + struct drm_buffer_manager *bm = &dev->bm; - drm_buffer_object_t *entry; + struct drm_buffer_object *entry; uint32_t fence_type = 0; int count = 0; int ret = 0; @@ -602,7 +602,7 @@ int drm_fence_buffer_objects(drm_file_t * priv, l = f_list.next; while (l != &f_list) { prefetch(l->next); - entry = list_entry(l, drm_buffer_object_t, lru); + entry = list_entry(l, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); @@ -635,12 +635,12 @@ EXPORT_SYMBOL(drm_fence_buffer_objects); * bo->mutex locked */ -static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, +static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, int no_wait) { int ret = 0; - drm_device_t *dev = bo->dev; - drm_bo_mem_reg_t evict_mem; + struct drm_device *dev = bo->dev; + struct drm_bo_mem_reg evict_mem; /* * Someone might have modified the buffer before we took the buffer mutex. @@ -705,14 +705,14 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, return ret; } -static int drm_bo_mem_force_space(drm_device_t * dev, - drm_bo_mem_reg_t * mem, +static int drm_bo_mem_force_space(struct drm_device * dev, + struct drm_bo_mem_reg * mem, uint32_t mem_type, int no_wait) { - drm_mm_node_t *node; - drm_buffer_manager_t *bm = &dev->bm; - drm_buffer_object_t *entry; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_mm_node *node; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_buffer_object *entry; + struct drm_mem_type_manager *man = &bm->man[mem_type]; struct list_head *lru; unsigned long num_pages = mem->num_pages; int ret; @@ -728,7 +728,7 @@ static int drm_bo_mem_force_space(drm_device_t * dev, if (lru->next == lru) break; - entry = list_entry(lru->next, drm_buffer_object_t, lru); + entry = list_entry(lru->next, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); @@ -754,7 +754,7 @@ static int drm_bo_mem_force_space(drm_device_t * dev, return 0; } -static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, +static int drm_bo_mt_compatible(struct drm_mem_type_manager * man, uint32_t mem_type, uint32_t mask, uint32_t * res_mask) { @@ -791,12 +791,12 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, return 1; } -int drm_bo_mem_space(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, int no_wait) +int drm_bo_mem_space(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int no_wait) { - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man; uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; @@ -806,7 +806,7 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, int type_found = 0; int type_ok = 0; int has_eagain = 0; - drm_mm_node_t *node = NULL; + struct drm_mm_node *node = NULL; int ret; mem->mm_node = NULL; @@ -883,8 +883,8 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, EXPORT_SYMBOL(drm_bo_mem_space); -static int drm_bo_new_mask(drm_buffer_object_t * bo, - uint32_t new_mask, uint32_t hint) +static int drm_bo_new_mask(struct drm_buffer_object * bo, + uint64_t new_mask, uint32_t hint) { uint32_t new_props; @@ -921,11 +921,11 @@ static int drm_bo_new_mask(drm_buffer_object_t * bo, * Call dev->struct_mutex locked. */ -drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv, +struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * priv, uint32_t handle, int check_owner) { - drm_user_object_t *uo; - drm_buffer_object_t *bo; + struct drm_user_object *uo; + struct drm_buffer_object *bo; uo = drm_lookup_user_object(priv, handle); @@ -939,7 +939,7 @@ drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv, return NULL; } - bo = drm_user_object_entry(uo, drm_buffer_object_t, base); + bo = drm_user_object_entry(uo, struct drm_buffer_object, base); atomic_inc(&bo->usage); return bo; } @@ -950,9 +950,9 @@ drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv, * Doesn't do any fence flushing as opposed to the drm_bo_busy function. */ -static int drm_bo_quick_busy(drm_buffer_object_t * bo) +static int drm_bo_quick_busy(struct drm_buffer_object * bo) { - drm_fence_object_t *fence = bo->fence; + struct drm_fence_object *fence = bo->fence; BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { @@ -970,9 +970,9 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo) * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. */ -static int drm_bo_busy(drm_buffer_object_t * bo) +static int drm_bo_busy(struct drm_buffer_object * bo) { - drm_fence_object_t *fence = bo->fence; + struct drm_fence_object *fence = bo->fence; BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { @@ -990,7 +990,7 @@ static int drm_bo_busy(drm_buffer_object_t * bo) return 0; } -static int drm_bo_read_cached(drm_buffer_object_t * bo) +static int drm_bo_read_cached(struct drm_buffer_object * bo) { int ret = 0; @@ -1004,7 +1004,7 @@ static int drm_bo_read_cached(drm_buffer_object_t * bo) * Wait until a buffer is unmapped. */ -static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait) +static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait) { int ret = 0; @@ -1020,7 +1020,7 @@ static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait) return ret; } -static int drm_bo_check_unfenced(drm_buffer_object_t * bo) +static int drm_bo_check_unfenced(struct drm_buffer_object * bo) { int ret; @@ -1042,7 +1042,7 @@ static int drm_bo_check_unfenced(drm_buffer_object_t * bo) * the buffer "unfenced" after validating, but before fencing. */ -static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait, +static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait, int eagain_if_wait) { int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); @@ -1075,8 +1075,8 @@ static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait, * Bo locked. */ -static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, - drm_bo_arg_reply_t * rep) +static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo, + struct drm_bo_info_rep *rep) { rep->handle = bo->base.hash.key; rep->flags = bo->mem.flags; @@ -1102,12 +1102,12 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, * unregistered. */ -static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, +static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, uint32_t map_flags, unsigned hint, - drm_bo_arg_reply_t * rep) + struct drm_bo_info_rep *rep) { - drm_buffer_object_t *bo; - drm_device_t *dev = priv->head->dev; + struct drm_buffer_object *bo; + struct drm_device *dev = priv->head->dev; int ret = 0; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; @@ -1183,11 +1183,11 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, return ret; } -static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle) +static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) { - drm_device_t *dev = priv->head->dev; - drm_buffer_object_t *bo; - drm_ref_object_t *ro; + struct drm_device *dev = priv->head->dev; + struct drm_buffer_object *bo; + struct drm_ref_object *ro; int ret = 0; mutex_lock(&dev->struct_mutex); @@ -1215,12 +1215,12 @@ static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle) * Call struct-sem locked. */ -static void drm_buffer_user_object_unmap(drm_file_t * priv, - drm_user_object_t * uo, - drm_ref_t action) +static void drm_buffer_user_object_unmap(struct drm_file * priv, + struct drm_user_object * uo, + enum drm_ref_type action) { - drm_buffer_object_t *bo = - drm_user_object_entry(uo, drm_buffer_object_t, base); + struct drm_buffer_object *bo = + drm_user_object_entry(uo, struct drm_buffer_object, base); /* * We DON'T want to take the bo->lock here, because we want to @@ -1238,13 +1238,13 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, * Note that new_mem_flags are NOT transferred to the bo->mem.mask. */ -int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, +int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced) { - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; int ret = 0; - drm_bo_mem_reg_t mem; + struct drm_bo_mem_reg mem; /* * Flush outstanding fences. */ @@ -1300,7 +1300,7 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, return ret; } -static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) +static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) { uint32_t flag_diff = (mem->mask ^ mem->flags); @@ -1318,10 +1318,10 @@ static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) return 1; } -static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) +static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man; uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; uint32_t i; @@ -1351,7 +1351,8 @@ static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) return 0; } - DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask); + DRM_ERROR("Illegal fake buffer flags 0x%016llx\n", + (unsigned long long) mem->mask); return -EINVAL; } @@ -1359,23 +1360,46 @@ static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) * bo locked. */ -static int drm_buffer_object_validate(drm_buffer_object_t * bo, +static int drm_buffer_object_validate(struct drm_buffer_object * bo, + uint32_t fence_class, int move_unfenced, int no_wait) { - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; + uint32_t ftype; int ret; - DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, - bo->mem.flags); - ret = - driver->fence_type(bo, &bo->fence_class, &bo->fence_type); + DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n", + (unsigned long long) bo->mem.mask, + (unsigned long long) bo->mem.flags); + + ret = driver->fence_type(bo, &ftype); + if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); return ret; } + /* + * We're switching command submission mechanism, + * or cannot simply rely on the hardware serializing for us. + * + * Wait for buffer idle. + */ + + if ((fence_class != bo->fence_class) || + ((ftype ^ bo->fence_type) & bo->fence_type)) { + + ret = drm_bo_wait(bo, 0, 0, no_wait); + + if (ret) + return ret; + + } + + bo->fence_class = fence_class; + bo->fence_type = ftype; ret = drm_bo_wait_unmapped(bo, no_wait); if (ret) return ret; @@ -1465,12 +1489,14 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return 0; } -static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, - uint32_t flags, uint32_t mask, uint32_t hint, - drm_bo_arg_reply_t * rep) +static int drm_bo_handle_validate(struct drm_file * priv, + uint32_t handle, + uint32_t fence_class, + uint64_t flags, uint64_t mask, uint32_t hint, + struct drm_bo_info_rep *rep) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; int ret; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; @@ -1493,7 +1519,8 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, goto out; ret = - drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE), + drm_buffer_object_validate(bo, fence_class, + !(hint & DRM_BO_HINT_DONT_FENCE), no_wait); drm_bo_fill_rep_arg(bo, rep); @@ -1505,11 +1532,11 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, return ret; } -static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, - drm_bo_arg_reply_t * rep) +static int drm_bo_handle_info(struct drm_file *priv, uint32_t handle, + struct drm_bo_info_rep *rep) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; mutex_lock(&dev->struct_mutex); bo = drm_lookup_buffer_object(priv, handle, 1); @@ -1527,11 +1554,12 @@ static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, return 0; } -static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, - uint32_t hint, drm_bo_arg_reply_t * rep) +static int drm_bo_handle_wait(struct drm_file *priv, uint32_t handle, + uint32_t hint, + struct drm_bo_info_rep *rep) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; int ret; @@ -1559,17 +1587,17 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, return ret; } -int drm_buffer_object_create(drm_device_t *dev, +int drm_buffer_object_create(struct drm_device *dev, unsigned long size, - drm_bo_type_t type, - uint32_t mask, + enum drm_bo_type type, + uint64_t mask, uint32_t hint, uint32_t page_alignment, unsigned long buffer_start, - drm_buffer_object_t ** buf_obj) + struct drm_buffer_object ** buf_obj) { - drm_buffer_manager_t *bm = &dev->bm; - drm_buffer_object_t *bo; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_buffer_object *bo; int ret = 0; unsigned long num_pages; @@ -1614,8 +1642,8 @@ int drm_buffer_object_create(drm_device_t *dev, bo->buffer_start = buffer_start; } bo->priv_flags = 0; - bo->mem.flags = 0; - bo->mem.mask = 0; + bo->mem.flags = 0ULL; + bo->mem.mask = 0ULL; atomic_inc(&bm->count); ret = drm_bo_new_mask(bo, mask, hint); @@ -1629,7 +1657,7 @@ int drm_buffer_object_create(drm_device_t *dev, if (ret) goto out_err; } - ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK); + ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK); if (ret) goto out_err; @@ -1644,10 +1672,10 @@ int drm_buffer_object_create(drm_device_t *dev, return ret; } -static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo, +static int drm_bo_add_user_object(struct drm_file * priv, struct drm_buffer_object * bo, int shareable) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; mutex_lock(&dev->struct_mutex); @@ -1665,21 +1693,20 @@ static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo, return ret; } -static int drm_bo_lock_test(drm_device_t * dev, struct file *filp) +static int drm_bo_lock_test(struct drm_device * dev, struct file *filp) { LOCK_TEST_WITH_RETURN(dev, filp); return 0; } -int drm_bo_ioctl(DRM_IOCTL_ARGS) +int drm_bo_op_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t rep; + struct drm_bo_op_arg arg; + struct drm_bo_op_req *req = &arg.d.req; + struct drm_bo_info_rep rep; unsigned long next; - drm_user_object_t *uo; - drm_buffer_object_t *entry; + int ret; if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); @@ -1694,97 +1721,29 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) continue; } - rep.ret = 0; + ret = 0; switch (req->op) { - case drm_bo_create: - rep.ret = drm_bo_lock_test(dev, filp); - if (rep.ret) - break; - rep.ret = - drm_buffer_object_create(priv->head->dev, - req->size, - req->type, - req->mask, - req->hint, - req->page_alignment, - req->buffer_start, &entry); - if (rep.ret) - break; - - rep.ret = - drm_bo_add_user_object(priv, entry, - req-> - mask & - DRM_BO_FLAG_SHAREABLE); - if (rep.ret) - drm_bo_usage_deref_unlocked(&entry); - - if (rep.ret) - break; - - mutex_lock(&entry->mutex); - drm_bo_fill_rep_arg(entry, &rep); - mutex_unlock(&entry->mutex); - break; - case drm_bo_unmap: - rep.ret = drm_buffer_object_unmap(priv, req->handle); - break; - case drm_bo_map: - rep.ret = drm_buffer_object_map(priv, req->handle, - req->mask, - req->hint, &rep); - break; - case drm_bo_destroy: - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, req->handle); - if (!uo || (uo->type != drm_buffer_type) - || uo->owner != priv) { - mutex_unlock(&dev->struct_mutex); - rep.ret = -EINVAL; - break; - } - rep.ret = drm_remove_user_object(priv, uo); - mutex_unlock(&dev->struct_mutex); - break; - case drm_bo_reference: - rep.ret = drm_user_object_ref(priv, req->handle, - drm_buffer_type, &uo); - if (rep.ret) - break; - - rep.ret = drm_bo_handle_info(priv, req->handle, &rep); - break; - case drm_bo_unreference: - rep.ret = drm_user_object_unref(priv, req->handle, - drm_buffer_type); - break; case drm_bo_validate: - rep.ret = drm_bo_lock_test(dev, filp); - - if (rep.ret) + ret = drm_bo_lock_test(dev, filp); + if (ret) break; - rep.ret = - drm_bo_handle_validate(priv, req->handle, req->mask, - req->arg_handle, req->hint, - &rep); + ret = drm_bo_handle_validate(priv, req->bo_req.handle, + req->bo_req.fence_class, + req->bo_req.flags, + req->bo_req.mask, + req->bo_req.hint, + &rep); break; case drm_bo_fence: - rep.ret = drm_bo_lock_test(dev, filp); - if (rep.ret) - break; - /**/ break; - case drm_bo_info: - rep.ret = drm_bo_handle_info(priv, req->handle, &rep); - break; - case drm_bo_wait_idle: - rep.ret = drm_bo_handle_wait(priv, req->handle, - req->hint, &rep); + ret = -EINVAL; + DRM_ERROR("Function is not implemented yet.\n"); break; case drm_bo_ref_fence: - rep.ret = -EINVAL; + ret = -EINVAL; DRM_ERROR("Function is not implemented yet.\n"); + break; default: - rep.ret = -EINVAL; + ret = -EINVAL; } next = arg.next; @@ -1792,17 +1751,221 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) * A signal interrupted us. Make sure the ioctl is restartable. */ - if (rep.ret == -EAGAIN) + if (ret == -EAGAIN) return -EAGAIN; arg.handled = 1; - arg.d.rep = rep; + arg.d.rep.ret = ret; + arg.d.rep.bo_info = rep; DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); data = next; } while (data); return 0; } +int drm_bo_create_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_create_arg arg; + struct drm_bo_create_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; + struct drm_buffer_object *entry; + int ret = 0; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_bo_lock_test(dev, filp); + if (ret) + goto out; + + ret = drm_buffer_object_create(priv->head->dev, + req->size, req->type, req->mask, + req->hint, req->page_alignment, + req->buffer_start, &entry); + if (ret) + goto out; + + ret = drm_bo_add_user_object(priv, entry, + req->mask & DRM_BO_FLAG_SHAREABLE); + if (ret) { + drm_bo_usage_deref_unlocked(&entry); + goto out; + } + + mutex_lock(&entry->mutex); + drm_bo_fill_rep_arg(entry, rep); + mutex_unlock(&entry->mutex); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); +out: + return ret; +} + + +int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_handle_arg arg; + struct drm_user_object *uo; + int ret = 0; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(priv, arg.handle); + if (!uo || (uo->type != drm_buffer_type) || uo->owner != priv) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + ret = drm_remove_user_object(priv, uo); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int drm_bo_map_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_map_wait_idle_arg arg; + struct drm_bo_info_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; + int ret; + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_buffer_object_map(priv, req->handle, req->mask, + req->hint, rep); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_handle_arg arg; + int ret; + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_buffer_object_unmap(priv, arg.handle); + return ret; +} + + +int drm_bo_reference_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_reference_info_arg arg; + struct drm_bo_handle_arg *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; + struct drm_user_object *uo; + int ret; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_user_object_ref(priv, req->handle, + drm_buffer_type, &uo); + if (ret) + return ret; + + ret = drm_bo_handle_info(priv, req->handle, rep); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_handle_arg arg; + int ret = 0; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_user_object_unref(priv, arg.handle, drm_buffer_type); + return ret; +} + +int drm_bo_info_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_reference_info_arg arg; + struct drm_bo_handle_arg *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; + int ret; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_bo_handle_info(priv, req->handle, rep); + if (ret) + return ret; + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_map_wait_idle_arg arg; + struct drm_bo_info_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; + int ret; + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_bo_handle_wait(priv, req->handle, + req->hint, rep); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + + + /** *Clean the unfenced list and put on regular LRU. *This is part of the memory manager cleanup and should only be @@ -1810,18 +1973,18 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) *Call dev->struct_sem locked. */ -static void drm_bo_clean_unfenced(drm_device_t *dev) +static void drm_bo_clean_unfenced(struct drm_device *dev) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; struct list_head *head, *list; - drm_buffer_object_t *entry; + struct drm_buffer_object *entry; head = &bm->unfenced; list = head->next; while(list != head) { prefetch(list->next); - entry = list_entry(list, drm_buffer_object_t, lru); + entry = list_entry(list, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); @@ -1836,11 +1999,11 @@ static void drm_bo_clean_unfenced(drm_device_t *dev) } } -static int drm_bo_leave_list(drm_buffer_object_t * bo, +static int drm_bo_leave_list(struct drm_buffer_object * bo, uint32_t mem_type, int free_pinned, int allow_errors) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; int ret = 0; mutex_lock(&bo->mutex); @@ -1887,20 +2050,20 @@ static int drm_bo_leave_list(drm_buffer_object_t * bo, } -static drm_buffer_object_t *drm_bo_entry(struct list_head *list, +static struct drm_buffer_object *drm_bo_entry(struct list_head *list, int pinned_list) { if (pinned_list) - return list_entry(list, drm_buffer_object_t, pinned_lru); + return list_entry(list, struct drm_buffer_object, pinned_lru); else - return list_entry(list, drm_buffer_object_t, lru); + return list_entry(list, struct drm_buffer_object, lru); } /* * dev->struct_mutex locked. */ -static int drm_bo_force_list_clean(drm_device_t * dev, +static int drm_bo_force_list_clean(struct drm_device * dev, struct list_head *head, unsigned mem_type, int free_pinned, @@ -1908,7 +2071,7 @@ static int drm_bo_force_list_clean(drm_device_t * dev, int pinned_list) { struct list_head *list, *next, *prev; - drm_buffer_object_t *entry, *nentry; + struct drm_buffer_object *entry, *nentry; int ret; int do_restart; @@ -1965,10 +2128,10 @@ restart: return 0; } -int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) +int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem_type]; int ret = -EINVAL; if (mem_type >= DRM_BO_MEM_TYPES) { @@ -2007,11 +2170,11 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) *point since we have the hardware lock. */ -static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) +static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type) { int ret; - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem_type]; if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type); @@ -2033,13 +2196,13 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) return ret; } -int drm_bo_init_mm(drm_device_t * dev, +int drm_bo_init_mm(struct drm_device * dev, unsigned type, unsigned long p_offset, unsigned long p_size) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int ret = -EINVAL; - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; if (type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory type %d\n", type); @@ -2082,12 +2245,12 @@ EXPORT_SYMBOL(drm_bo_init_mm); * any clients still running when we set the initialized flag to zero. */ -int drm_bo_driver_finish(drm_device_t * dev) +int drm_bo_driver_finish(struct drm_device * dev) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int ret = 0; unsigned i = DRM_BO_MEM_TYPES; - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); @@ -2133,10 +2296,10 @@ int drm_bo_driver_finish(drm_device_t * dev) return ret; } -int drm_bo_driver_init(drm_device_t * dev) +int drm_bo_driver_init(struct drm_device * dev) { - drm_bo_driver_t *driver = dev->driver->bo_driver; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; int ret = -EINVAL; mutex_lock(&dev->bm.init_mutex); @@ -2175,11 +2338,67 @@ EXPORT_SYMBOL(drm_bo_driver_init); int drm_mm_init_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; + struct drm_mm_init_arg arg; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; + int ret; - int ret = 0; - drm_mm_init_arg_t arg; - drm_buffer_manager_t *bm = &dev->bm; - drm_bo_driver_t *driver = dev->driver->bo_driver; + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + ret = -EINVAL; + if (arg.magic != DRM_BO_INIT_MAGIC) { + DRM_ERROR("You are using an old libdrm that is not compatible with\n" + "\tthe kernel DRM module. Please upgrade your libdrm.\n"); + return -EINVAL; + } + if (arg.major != DRM_BO_INIT_MAJOR) { + DRM_ERROR("libdrm and kernel DRM buffer object interface major\n" + "\tversion don't match. Got %d, expected %d,\n", + arg.major, DRM_BO_INIT_MAJOR); + return -EINVAL; + } + if (arg.minor > DRM_BO_INIT_MINOR) { + DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n" + "\tlibdrm buffer object interface version is %d.%d.\n" + "\tkernel DRM buffer object interface version is %d.%d\n", + arg.major, arg.minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR); + return -EINVAL; + } + + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + if (!bm->initialized) { + DRM_ERROR("DRM memory manager was not initialized.\n"); + goto out; + } + if (arg.mem_type == 0) { + DRM_ERROR("System memory buffers already initialized.\n"); + goto out; + } + ret = drm_bo_init_mm(dev, arg.mem_type, + arg.p_offset, arg.p_size); + +out: + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_mm_type_arg arg; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; + int ret; if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); @@ -2188,59 +2407,78 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - switch (arg.req.op) { - case mm_init: - ret = -EINVAL; - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - if (!bm->initialized) { - DRM_ERROR("DRM memory manager was not initialized.\n"); - break; - } - if (arg.req.mem_type == 0) { - DRM_ERROR - ("System memory buffers already initialized.\n"); - break; - } - ret = drm_bo_init_mm(dev, arg.req.mem_type, - arg.req.p_offset, arg.req.p_size); - break; - case mm_takedown: - LOCK_TEST_WITH_RETURN(dev, filp); - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - ret = -EINVAL; - if (!bm->initialized) { - DRM_ERROR("DRM memory manager was not initialized\n"); - break; - } - if (arg.req.mem_type == 0) { - DRM_ERROR("No takedown for System memory buffers.\n"); - break; - } - ret = 0; - if (drm_bo_clean_mm(dev, arg.req.mem_type)) { - DRM_ERROR("Memory manager type %d not clean. " - "Delaying takedown\n", arg.req.mem_type); - } - break; - case mm_lock: - LOCK_TEST_WITH_RETURN(dev, filp); - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - ret = drm_bo_lock_mm(dev, arg.req.mem_type); - break; - case mm_unlock: - LOCK_TEST_WITH_RETURN(dev, filp); - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - ret = 0; - break; - default: - DRM_ERROR("Function not implemented yet\n"); + LOCK_TEST_WITH_RETURN(dev, filp); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = -EINVAL; + if (!bm->initialized) { + DRM_ERROR("DRM memory manager was not initialized\n"); + goto out; + } + if (arg.mem_type == 0) { + DRM_ERROR("No takedown for System memory buffers.\n"); + goto out; + } + ret = 0; + if (drm_bo_clean_mm(dev, arg.mem_type)) { + DRM_ERROR("Memory manager type %d not clean. " + "Delaying takedown\n", arg.mem_type); + } +out: + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_mm_lock_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_mm_type_arg arg; + struct drm_bo_driver *driver = dev->driver->bo_driver; + int ret; + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; } + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + LOCK_TEST_WITH_RETURN(dev, filp); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = drm_bo_lock_mm(dev, arg.mem_type); + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_mm_type_arg arg; + struct drm_bo_driver *driver = dev->driver->bo_driver; + int ret; + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + LOCK_TEST_WITH_RETURN(dev, filp); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = 0; + mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->bm.init_mutex); if (ret) @@ -2254,10 +2492,10 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) * buffer object vm functions. */ -int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem) +int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { if (mem->mem_type == DRM_BO_MEM_LOCAL) @@ -2288,13 +2526,13 @@ EXPORT_SYMBOL(drm_mem_reg_is_pci); * Otherwise returns zero. */ -int drm_bo_pci_offset(drm_device_t * dev, - drm_bo_mem_reg_t * mem, +int drm_bo_pci_offset(struct drm_device *dev, + struct drm_bo_mem_reg *mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; *bus_size = 0; if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) @@ -2317,9 +2555,9 @@ int drm_bo_pci_offset(drm_device_t * dev, * Call bo->mutex locked. */ -void drm_bo_unmap_virtual(drm_buffer_object_t * bo) +void drm_bo_unmap_virtual(struct drm_buffer_object * bo) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; @@ -2329,11 +2567,11 @@ void drm_bo_unmap_virtual(drm_buffer_object_t * bo) unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); } -static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) +static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo) { - drm_map_list_t *list = &bo->map_list; + struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; DRM_ASSERT_LOCKED(&dev->struct_mutex); if (list->user_token) { @@ -2355,11 +2593,11 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) drm_bo_usage_deref_locked(&bo); } -static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) +static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo) { - drm_map_list_t *list = &bo->map_list; + struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; DRM_ASSERT_LOCKED(&dev->struct_mutex); list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); @@ -2391,7 +2629,7 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) return -ENOMEM; } - list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT; + list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT; return 0; } diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 8ef2a8ff..5e21173c 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -35,9 +35,9 @@ * have not been requested to free also pinned regions. */ -static void drm_bo_free_old_node(drm_buffer_object_t * bo) +static void drm_bo_free_old_node(struct drm_buffer_object * bo) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) { mutex_lock(&bo->dev->struct_mutex); @@ -48,11 +48,11 @@ static void drm_bo_free_old_node(drm_buffer_object_t * bo) old_mem->mm_node = NULL; } -int drm_bo_move_ttm(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int drm_bo_move_ttm(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_ttm_t *ttm = bo->ttm; - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_ttm *ttm = bo->ttm; + struct drm_bo_mem_reg *old_mem = &bo->mem; uint32_t save_flags = old_mem->flags; uint32_t save_mask = old_mem->mask; int ret; @@ -102,11 +102,11 @@ EXPORT_SYMBOL(drm_bo_move_ttm); * Call bo->mutex locked. */ -int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem, +int drm_mem_reg_ioremap(struct drm_device * dev, struct drm_bo_mem_reg * mem, void **virtual) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; unsigned long bus_offset; unsigned long bus_size; unsigned long bus_base; @@ -137,11 +137,11 @@ int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem, * Call bo->mutex locked. */ -void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem, +void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem, void *virtual) { - drm_buffer_manager_t *bm; - drm_mem_type_manager_t *man; + struct drm_buffer_manager *bm; + struct drm_mem_type_manager *man; bm = &dev->bm; man = &bm->man[mem->mem_type]; @@ -164,7 +164,7 @@ static int drm_copy_io_page(void *dst, void *src, unsigned long page) return 0; } -static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page) +static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long page) { struct page *d = drm_ttm_get_page(ttm, page); void *dst; @@ -182,7 +182,7 @@ static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page) return 0; } -static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) +static int drm_copy_ttm_io_page(struct drm_ttm * ttm, void *dst, unsigned long page) { struct page *s = drm_ttm_get_page(ttm, page); void *src; @@ -200,14 +200,14 @@ static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) return 0; } -int drm_bo_move_memcpy(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int drm_bo_move_memcpy(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_device_t *dev = bo->dev; - drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; - drm_ttm_t *ttm = bo->ttm; - drm_bo_mem_reg_t *old_mem = &bo->mem; - drm_bo_mem_reg_t old_copy = *old_mem; + struct drm_device *dev = bo->dev; + struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; + struct drm_ttm *ttm = bo->ttm; + struct drm_bo_mem_reg *old_mem = &bo->mem; + struct drm_bo_mem_reg old_copy = *old_mem; void *old_iomap; void *new_iomap; int ret; @@ -281,12 +281,12 @@ EXPORT_SYMBOL(drm_bo_move_memcpy); * object. Call bo->mutex locked. */ -int drm_buffer_object_transfer(drm_buffer_object_t * bo, - drm_buffer_object_t ** new_obj) +int drm_buffer_object_transfer(struct drm_buffer_object * bo, + struct drm_buffer_object ** new_obj) { - drm_buffer_object_t *fbo; - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_object *fbo; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); if (!fbo) @@ -323,20 +323,20 @@ int drm_buffer_object_transfer(drm_buffer_object_t * bo, * We cannot restart until it has finished. */ -int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, +int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, int evict, int no_wait, uint32_t fence_class, uint32_t fence_type, - uint32_t fence_flags, drm_bo_mem_reg_t * new_mem) + uint32_t fence_flags, struct drm_bo_mem_reg * new_mem) { - drm_device_t *dev = bo->dev; - drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_device *dev = bo->dev; + struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; + struct drm_bo_mem_reg *old_mem = &bo->mem; int ret; uint32_t save_flags = old_mem->flags; uint32_t save_mask = old_mem->mask; - drm_buffer_object_t *old_obj; + struct drm_buffer_object *old_obj; if (bo->fence) drm_fence_usage_deref_unlocked(&bo->fence); diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index 2f3e4b2a..c1e23b5c 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -36,21 +36,21 @@ #include <linux/vmalloc.h> #include "drmP.h" -unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource) +unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource) { return pci_resource_start(dev->pdev, resource); } EXPORT_SYMBOL(drm_get_resource_start); -unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource) +unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource) { return pci_resource_len(dev->pdev, resource); } EXPORT_SYMBOL(drm_get_resource_len); -drm_map_list_t *drm_find_matching_map(drm_device_t *dev, drm_local_map_t *map) +struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map_t *map) { - drm_map_list_t *entry; + struct drm_map_list *entry; list_for_each_entry(entry, &dev->maplist, head) { if (entry->map && map->type == entry->map->type && ((entry->map->offset == map->offset) || @@ -63,7 +63,7 @@ drm_map_list_t *drm_find_matching_map(drm_device_t *dev, drm_local_map_t *map) } EXPORT_SYMBOL(drm_find_matching_map); -static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash, +static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, unsigned long user_token, int hashed_handle) { int use_hashed_handle; @@ -101,12 +101,13 @@ static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash, * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where * applicable and if supported by the kernel. */ -static int drm_addmap_core(drm_device_t * dev, unsigned int offset, - unsigned int size, drm_map_type_t type, - drm_map_flags_t flags, drm_map_list_t ** maplist) +static int drm_addmap_core(struct drm_device *dev, unsigned int offset, + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, + struct drm_map_list **maplist) { - drm_map_t *map; - drm_map_list_t *list; + struct drm_map *map; + struct drm_map_list *list; drm_dma_handle_t *dmah; unsigned long user_token; int ret; @@ -212,7 +213,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, } break; case _DRM_AGP: { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; int valid = 0; if (!drm_core_has_AGP(dev)) { @@ -310,11 +311,11 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, return 0; } -int drm_addmap(drm_device_t * dev, unsigned int offset, - unsigned int size, drm_map_type_t type, - drm_map_flags_t flags, drm_local_map_t ** map_ptr) +int drm_addmap(struct drm_device *dev, unsigned int offset, + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, drm_local_map_t ** map_ptr) { - drm_map_list_t *list; + struct drm_map_list *list; int rc; rc = drm_addmap_core(dev, offset, size, type, flags, &list); @@ -328,11 +329,11 @@ EXPORT_SYMBOL(drm_addmap); int drm_addmap_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_map_t map; - drm_map_list_t *maplist; - drm_map_t __user *argp = (void __user *)arg; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_map map; + struct drm_map_list *maplist; + struct drm_map __user *argp = (void __user *)arg; int err; if (!(filp->f_mode & 3)) @@ -351,7 +352,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, if (err) return err; - if (copy_to_user(argp, maplist->map, sizeof(drm_map_t))) + if (copy_to_user(argp, maplist->map, sizeof(struct drm_map))) return -EFAULT; /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ @@ -367,7 +368,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, * \param inode device inode. * \param filp file pointer. * \param cmd command. - * \param arg pointer to a drm_map_t structure. + * \param arg pointer to a struct drm_map structure. * \return zero on success or a negative value on error. * * Searches the map on drm_device::maplist, removes it from the list, see if @@ -376,9 +377,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, * * \sa drm_addmap */ -int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) +int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) { - drm_map_list_t *r_list = NULL, *list_t; + struct drm_map_list *r_list = NULL, *list_t; drm_dma_handle_t dmah; int found = 0; @@ -433,7 +434,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) } EXPORT_SYMBOL(drm_rmmap_locked); -int drm_rmmap(drm_device_t *dev, drm_local_map_t *map) +int drm_rmmap(struct drm_device *dev, drm_local_map_t *map) { int ret; @@ -457,14 +458,14 @@ EXPORT_SYMBOL(drm_rmmap); int drm_rmmap_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_map_t request; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_map request; drm_local_map_t *map = NULL; - drm_map_list_t *r_list; + struct drm_map_list *r_list; int ret; - if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) { + if (copy_from_user(&request, (struct drm_map __user *) arg, sizeof(request))) { return -EFAULT; } @@ -512,7 +513,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, * * Frees any pages and buffers associated with the given entry. */ -static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry) +static void drm_cleanup_buf_error(struct drm_device *dev, struct drm_buf_entry * entry) { int i; @@ -549,20 +550,20 @@ static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry) /** * Add AGP buffers for DMA transfers * - * \param dev drm_device_t to which the buffers are to be added. - * \param request pointer to a drm_buf_desc_t describing the request. + * \param dev struct drm_device to which the buffers are to be added. + * \param request pointer to a struct drm_buf_desc describing the request. * \return zero on success or a negative number on failure. * * After some sanity checks creates a drm_buf structure for each buffer and * reallocates the buffer list of the same size order to accommodate the new * buffers. */ -int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) +int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request) { - drm_device_dma_t *dma = dev->dma; - drm_buf_entry_t *entry; - drm_agp_mem_t *agp_entry; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf_entry *entry; + struct drm_agp_mem *agp_entry; + struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; @@ -573,7 +574,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) int total; int byte_count; int i, valid; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!dma) return -EINVAL; @@ -727,24 +728,24 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) EXPORT_SYMBOL(drm_addbufs_agp); #endif /* __OS_HAS_AGP */ -int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) +int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int count; int order; int size; int total; int page_order; - drm_buf_entry_t *entry; + struct drm_buf_entry *entry; drm_dma_handle_t *dmah; - drm_buf_t *buf; + struct drm_buf *buf; int alignment; unsigned long offset; int i; int byte_count; int page_count; unsigned long *temp_pagelist; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; @@ -953,11 +954,11 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) } EXPORT_SYMBOL(drm_addbufs_pci); -static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) +static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc * request) { - drm_device_dma_t *dma = dev->dma; - drm_buf_entry_t *entry; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf_entry *entry; + struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; @@ -968,7 +969,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) int total; int byte_count; int i; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; @@ -1115,11 +1116,11 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) return 0; } -int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) +int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request) { - drm_device_dma_t *dma = dev->dma; - drm_buf_entry_t *entry; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf_entry *entry; + struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; @@ -1130,7 +1131,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) int total; int byte_count; int i; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) return -EINVAL; @@ -1283,7 +1284,7 @@ EXPORT_SYMBOL(drm_addbufs_fb); * \param inode device inode. * \param filp file pointer. * \param cmd command. - * \param arg pointer to a drm_buf_desc_t request. + * \param arg pointer to a struct drm_buf_desc request. * \return zero on success or a negative number on failure. * * According with the memory type specified in drm_buf_desc::flags and the @@ -1294,15 +1295,15 @@ EXPORT_SYMBOL(drm_addbufs_fb); int drm_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_buf_desc_t request; - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_buf_desc request; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; int ret; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; - if (copy_from_user(&request, (drm_buf_desc_t __user *) arg, + if (copy_from_user(&request, (struct drm_buf_desc __user *) arg, sizeof(request))) return -EFAULT; @@ -1347,11 +1348,11 @@ int drm_addbufs(struct inode *inode, struct file *filp, int drm_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; - drm_buf_info_t request; - drm_buf_info_t __user *argp = (void __user *)arg; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; + struct drm_buf_info request; + struct drm_buf_info __user *argp = (void __user *)arg; int i; int count; @@ -1382,10 +1383,10 @@ int drm_infobufs(struct inode *inode, struct file *filp, if (request.count >= count) { for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) { - drm_buf_desc_t __user *to = + struct drm_buf_desc __user *to = &request.list[count]; - drm_buf_entry_t *from = &dma->bufs[i]; - drm_freelist_t *list = &dma->bufs[i].freelist; + struct drm_buf_entry *from = &dma->bufs[i]; + struct drm_freelist *list = &dma->bufs[i].freelist; if (copy_to_user(&to->count, &from->buf_count, sizeof(from->buf_count)) || @@ -1435,12 +1436,12 @@ int drm_infobufs(struct inode *inode, struct file *filp, int drm_markbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; - drm_buf_desc_t request; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; + struct drm_buf_desc request; int order; - drm_buf_entry_t *entry; + struct drm_buf_entry *entry; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; @@ -1449,7 +1450,7 @@ int drm_markbufs(struct inode *inode, struct file *filp, return -EINVAL; if (copy_from_user(&request, - (drm_buf_desc_t __user *) arg, sizeof(request))) + (struct drm_buf_desc __user *) arg, sizeof(request))) return -EFAULT; DRM_DEBUG("%d, %d, %d\n", @@ -1485,13 +1486,13 @@ int drm_markbufs(struct inode *inode, struct file *filp, int drm_freebufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; - drm_buf_free_t request; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; + struct drm_buf_free request; int i; int idx; - drm_buf_t *buf; + struct drm_buf *buf; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; @@ -1500,7 +1501,7 @@ int drm_freebufs(struct inode *inode, struct file *filp, return -EINVAL; if (copy_from_user(&request, - (drm_buf_free_t __user *) arg, sizeof(request))) + (struct drm_buf_free __user *) arg, sizeof(request))) return -EFAULT; DRM_DEBUG("%d\n", request.count); @@ -1541,15 +1542,15 @@ int drm_freebufs(struct inode *inode, struct file *filp, int drm_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; - drm_buf_map_t __user *argp = (void __user *)arg; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; + struct drm_buf_map __user *argp = (void __user *)arg; int retcode = 0; const int zero = 0; unsigned long virtual; unsigned long address; - drm_buf_map_t request; + struct drm_buf_map request; int i; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1575,7 +1576,7 @@ int drm_mapbufs(struct inode *inode, struct file *filp, && (dma->flags & _DRM_DMA_USE_SG)) || (drm_core_check_feature(dev, DRIVER_FB_DMA) && (dma->flags & _DRM_DMA_USE_FB))) { - drm_map_t *map = dev->agp_buffer_map; + struct drm_map *map = dev->agp_buffer_map; unsigned long token = dev->agp_buffer_token; if (!map) { diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 08d20d06..9a6da7e9 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -196,15 +196,16 @@ static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, return ret; } -static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, + +static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, struct fault_data *data) { unsigned long address = data->address; - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page = NULL; - drm_ttm_t *ttm; - drm_device_t *dev; + struct drm_ttm *ttm; + struct drm_device *dev; unsigned long pfn; int err; unsigned long bus_base; @@ -261,7 +262,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, page_offset = (address - vma->vm_start) >> PAGE_SHIFT; if (bus_size) { - drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type]; + struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); @@ -350,11 +351,11 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, unsigned long address, int *type) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page; - drm_ttm_t *ttm; - drm_device_t *dev; + struct drm_ttm *ttm; + struct drm_device *dev; mutex_lock(&bo->mutex); @@ -394,7 +395,7 @@ out_unlock: int drm_bo_map_bound(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data; int ret = 0; unsigned long bus_base; unsigned long bus_offset; @@ -405,7 +406,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma) BUG_ON(ret); if (bus_size) { - drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type]; + struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type]; unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT; pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma); ret = io_remap_pfn_range(vma, vma->vm_start, pfn, @@ -417,7 +418,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma) } -int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) +int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) { p_mm_entry_t *entry, *n_entry; vma_entry_t *v_entry; @@ -453,7 +454,7 @@ int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) return 0; } -void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) +void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) { p_mm_entry_t *entry, *n; vma_entry_t *v_entry, *v_n; @@ -485,7 +486,7 @@ void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) -int drm_bo_lock_kmm(drm_buffer_object_t * bo) +int drm_bo_lock_kmm(struct drm_buffer_object * bo) { p_mm_entry_t *entry; int lock_ok = 1; @@ -517,7 +518,7 @@ int drm_bo_lock_kmm(drm_buffer_object_t * bo) return -EAGAIN; } -void drm_bo_unlock_kmm(drm_buffer_object_t * bo) +void drm_bo_unlock_kmm(struct drm_buffer_object * bo) { p_mm_entry_t *entry; @@ -528,7 +529,7 @@ void drm_bo_unlock_kmm(drm_buffer_object_t * bo) } } -int drm_bo_remap_bound(drm_buffer_object_t *bo) +int drm_bo_remap_bound(struct drm_buffer_object *bo) { vma_entry_t *v_entry; int ret = 0; @@ -544,7 +545,7 @@ int drm_bo_remap_bound(drm_buffer_object_t *bo) return ret; } -void drm_bo_finish_unmap(drm_buffer_object_t *bo) +void drm_bo_finish_unmap(struct drm_buffer_object *bo) { vma_entry_t *v_entry; diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 024059ac..0b00ba47 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -306,8 +306,10 @@ extern int drm_bo_map_bound(struct vm_area_struct *vma); #endif -/* fixme when functions are upstreamed */ +/* fixme when functions are upstreamed - upstreamed for 2.6.23 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) #define DRM_IDR_COMPAT_FN +#endif #ifdef DRM_IDR_COMPAT_FN int idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data), void *data); diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index 195c7fb5..a0b1a7ec 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -56,7 +56,7 @@ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex * lock. */ -void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) +void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle) { mutex_lock(&dev->struct_mutex); idr_remove(&dev->ctx_idr, ctx_handle); @@ -72,7 +72,7 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) * Allocate a new idr from drm_device::ctx_idr while holding the * drm_device::struct_mutex lock. */ -static int drm_ctxbitmap_next(drm_device_t * dev) +static int drm_ctxbitmap_next(struct drm_device *dev) { int new_id; int ret; @@ -101,7 +101,7 @@ again: * * Initialise the drm_device::ctx_idr */ -int drm_ctxbitmap_init(drm_device_t * dev) +int drm_ctxbitmap_init(struct drm_device *dev) { idr_init(&dev->ctx_idr); return 0; @@ -115,7 +115,7 @@ int drm_ctxbitmap_init(drm_device_t * dev) * Free all idr members using drm_ctx_sarea_free helper function * while holding the drm_device::struct_mutex lock. */ -void drm_ctxbitmap_cleanup(drm_device_t * dev) +void drm_ctxbitmap_cleanup(struct drm_device *dev) { mutex_lock(&dev->struct_mutex); idr_remove_all(&dev->ctx_idr); @@ -143,12 +143,12 @@ void drm_ctxbitmap_cleanup(drm_device_t * dev) int drm_getsareactx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ctx_priv_map_t __user *argp = (void __user *)arg; - drm_ctx_priv_map_t request; - drm_map_t *map; - drm_map_list_t *_entry; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_ctx_priv_map __user *argp = (void __user *)arg; + struct drm_ctx_priv_map request; + struct drm_map *map; + struct drm_map_list *_entry; if (copy_from_user(&request, argp, sizeof(request))) return -EFAULT; @@ -194,14 +194,14 @@ int drm_getsareactx(struct inode *inode, struct file *filp, int drm_setsareactx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ctx_priv_map_t request; - drm_map_t *map = NULL; - drm_map_list_t *r_list = NULL; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_ctx_priv_map request; + struct drm_map *map = NULL; + struct drm_map_list *r_list = NULL; if (copy_from_user(&request, - (drm_ctx_priv_map_t __user *) arg, sizeof(request))) + (struct drm_ctx_priv_map __user *) arg, sizeof(request))) return -EFAULT; mutex_lock(&dev->struct_mutex); @@ -243,7 +243,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp, * * Attempt to set drm_device::context_flag. */ -static int drm_context_switch(drm_device_t * dev, int old, int new) +static int drm_context_switch(struct drm_device *dev, int old, int new) { if (test_and_set_bit(0, &dev->context_flag)) { DRM_ERROR("Reentering -- FIXME\n"); @@ -271,7 +271,7 @@ static int drm_context_switch(drm_device_t * dev, int old, int new) * hardware lock is held, clears the drm_device::context_flag and wakes up * drm_device::context_wait. */ -static int drm_context_switch_complete(drm_device_t * dev, int new) +static int drm_context_switch_complete(struct drm_device *dev, int new) { dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ dev->last_switch = jiffies; @@ -301,9 +301,9 @@ static int drm_context_switch_complete(drm_device_t * dev, int new) int drm_resctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_ctx_res_t res; - drm_ctx_t __user *argp = (void __user *)arg; - drm_ctx_t ctx; + struct drm_ctx_res res; + struct drm_ctx_res __user *argp = (void __user *)arg; + struct drm_ctx ctx; int i; if (copy_from_user(&res, argp, sizeof(res))) @@ -338,11 +338,11 @@ int drm_resctx(struct inode *inode, struct file *filp, int drm_addctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ctx_list_t *ctx_entry; - drm_ctx_t __user *argp = (void __user *)arg; - drm_ctx_t ctx; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_ctx_list *ctx_entry; + struct drm_ctx __user *argp = (void __user *)arg; + struct drm_ctx ctx; if (copy_from_user(&ctx, argp, sizeof(ctx))) return -EFAULT; @@ -406,8 +406,8 @@ int drm_modctx(struct inode *inode, struct file *filp, int drm_getctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_ctx_t __user *argp = (void __user *)arg; - drm_ctx_t ctx; + struct drm_ctx __user *argp = (void __user *)arg; + struct drm_ctx ctx; if (copy_from_user(&ctx, argp, sizeof(ctx))) return -EFAULT; @@ -434,11 +434,11 @@ int drm_getctx(struct inode *inode, struct file *filp, int drm_switchctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ctx_t ctx; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_ctx ctx; - if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); @@ -459,11 +459,11 @@ int drm_switchctx(struct inode *inode, struct file *filp, int drm_newctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ctx_t ctx; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_ctx ctx; - if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); @@ -486,11 +486,11 @@ int drm_newctx(struct inode *inode, struct file *filp, int drm_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ctx_t ctx; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_ctx ctx; - if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); @@ -505,7 +505,7 @@ int drm_rmctx(struct inode *inode, struct file *filp, mutex_lock(&dev->ctxlist_mutex); if (!list_empty(&dev->ctxlist)) { - drm_ctx_list_t *pos, *n; + struct drm_ctx_list *pos, *n; list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { if (pos->handle == ctx.handle) { diff --git a/linux-core/drm_dma.c b/linux-core/drm_dma.c index a7eee1a4..d2a88d52 100644 --- a/linux-core/drm_dma.c +++ b/linux-core/drm_dma.c @@ -43,7 +43,7 @@ * * Allocate and initialize a drm_device_dma structure. */ -int drm_dma_setup(drm_device_t * dev) +int drm_dma_setup(struct drm_device * dev) { int i; @@ -67,9 +67,9 @@ int drm_dma_setup(drm_device_t * dev) * Free all pages associated with DMA buffers, the buffers and pages lists, and * finally the the drm_device::dma structure itself. */ -void drm_dma_takedown(drm_device_t * dev) +void drm_dma_takedown(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i, j; if (!dma) @@ -129,7 +129,7 @@ void drm_dma_takedown(drm_device_t * dev) * * Resets the fields of \p buf. */ -void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf) +void drm_free_buffer(struct drm_device * dev, struct drm_buf * buf) { if (!buf) return; @@ -152,9 +152,9 @@ void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf) * * Frees each buffer associated with \p filp not already on the hardware. */ -void drm_core_reclaim_buffers(drm_device_t *dev, struct file *filp) +void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; if (!dma) diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index 7129980b..d6cdba56 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -44,7 +44,7 @@ int drm_adddraw(DRM_IOCTL_ARGS) { DRM_DEVICE; unsigned long irqflags; - drm_draw_t draw; + struct drm_draw draw; int new_id = 0; int ret; @@ -67,7 +67,7 @@ again: DRM_DEBUG("%d\n", draw.handle); - DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw)); + DRM_COPY_TO_USER_IOCTL((struct drm_draw __user *)data, draw, sizeof(draw)); return 0; } @@ -78,10 +78,10 @@ again: int drm_rmdraw(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_draw_t draw; + struct drm_draw draw; unsigned long irqflags; - DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data, + DRM_COPY_FROM_USER_IOCTL(draw, (struct drm_draw __user *) data, sizeof(draw)); spin_lock_irqsave(&dev->drw_lock, irqflags); @@ -99,13 +99,13 @@ int drm_rmdraw(DRM_IOCTL_ARGS) int drm_update_drawable_info(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_update_draw_t update; + struct drm_update_draw update; unsigned long irqflags; - drm_clip_rect_t *rects; + struct drm_clip_rect *rects; struct drm_drawable_info *info; int err; - DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data, + DRM_COPY_FROM_USER_IOCTL(update, (struct drm_update_draw __user *) data, sizeof(update)); info = idr_find(&dev->drw_idr, update.handle); @@ -123,7 +123,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) switch (update.type) { case DRM_DRAWABLE_CLIPRECTS: if (update.num != info->num_rects) { - rects = drm_alloc(update.num * sizeof(drm_clip_rect_t), + rects = drm_alloc(update.num * sizeof(struct drm_clip_rect), DRM_MEM_BUFS); } else rects = info->rects; @@ -135,7 +135,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) } if (update.num && DRM_COPY_FROM_USER(rects, - (drm_clip_rect_t __user *) + (struct drm_clip_rect __user *) (unsigned long)update.data, update.num * sizeof(*rects))) { @@ -148,7 +148,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) if (rects != info->rects) { drm_free(info->rects, info->num_rects * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + sizeof(struct drm_clip_rect), DRM_MEM_BUFS); } info->rects = rects; @@ -168,7 +168,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) error: if (rects != info->rects) - drm_free(rects, update.num * sizeof(drm_clip_rect_t), + drm_free(rects, update.num * sizeof(struct drm_clip_rect), DRM_MEM_BUFS); return err; @@ -177,7 +177,7 @@ error: /** * Caller must hold the drawable spinlock! */ -drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) +struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id) { return idr_find(&dev->drw_idr, id); } @@ -189,14 +189,14 @@ static int drm_drawable_free(int idr, void *p, void *data) if (info) { drm_free(info->rects, info->num_rects * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + sizeof(struct drm_clip_rect), DRM_MEM_BUFS); drm_free(info, sizeof(*info), DRM_MEM_BUFS); } return 0; } -void drm_drawable_free_all(drm_device_t *dev) +void drm_drawable_free_all(struct drm_device *dev) { idr_for_each(&dev->drw_idr, drm_drawable_free, NULL); idr_remove_all(&dev->drw_idr); diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 0d446a12..84efbfe7 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -48,14 +48,14 @@ #include "drmP.h" #include "drm_core.h" -static void drm_cleanup(drm_device_t * dev); +static void drm_cleanup(struct drm_device * dev); int drm_fb_loaded = 0; static int drm_version(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); /** Ioctl table */ -static drm_ioctl_desc_t drm_ioctls[] = { +static struct drm_ioctl_desc drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0}, [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0}, [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0}, @@ -117,12 +117,43 @@ static drm_ioctl_desc_t drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, + + // [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, + + [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + + [DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl, DRM_AUTH }, + [DRM_IOCTL_NR(DRM_IOCTL_MM_TAKEDOWN)] = {drm_mm_takedown_ioctl, + DRM_AUTH }, + [DRM_IOCTL_NR(DRM_IOCTL_MM_LOCK)] = {drm_mm_lock_ioctl, + DRM_AUTH }, + [DRM_IOCTL_NR(DRM_IOCTL_MM_UNLOCK)] = {drm_mm_unlock_ioctl, + DRM_AUTH }, + + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_CREATE)] = {drm_fence_create_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_DESTROY)] = {drm_fence_destroy_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_REFERENCE)] = {drm_fence_reference_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_UNREFERENCE)] = {drm_fence_unreference_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_SIGNALED)] = {drm_fence_signaled_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_FLUSH)] = {drm_fence_flush_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_WAIT)] = {drm_fence_wait_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_EMIT)] = {drm_fence_emit_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_BUFFERS)] = {drm_fence_buffers_ioctl, DRM_AUTH}, + + [DRM_IOCTL_NR(DRM_IOCTL_BO_CREATE)] = {drm_bo_create_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_DESTROY)] = {drm_bo_destroy_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_MAP)] = {drm_bo_map_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_UNMAP)] = {drm_bo_unmap_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_REFERENCE)] = {drm_bo_reference_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_UNREFERENCE)] = {drm_bo_unreference_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_OP)] = {drm_bo_op_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_INFO)] = {drm_bo_info_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_WAIT_IDLE)] = {drm_bo_wait_idle_ioctl, DRM_AUTH}, + + - [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) @@ -137,11 +168,11 @@ static drm_ioctl_desc_t drm_ioctls[] = { * * \sa drm_device */ -int drm_lastclose(drm_device_t * dev) +int drm_lastclose(struct drm_device * dev) { - drm_magic_entry_t *pt, *next; - drm_map_list_t *r_list, *list_t; - drm_vma_entry_t *vma, *vma_temp; + struct drm_magic_entry *pt, *next; + struct drm_map_list *r_list, *list_t; + struct drm_vma_entry *vma, *vma_temp; int i; DRM_DEBUG("\n"); @@ -189,7 +220,7 @@ int drm_lastclose(drm_device_t * dev) /* Clear AGP information */ if (drm_core_has_AGP(dev) && dev->agp) { - drm_agp_mem_t *entry, *tempe; + struct drm_agp_mem *entry, *tempe; /* Remove AGP resources, but leave dev->agp intact until drv_cleanup is called. */ @@ -257,7 +288,7 @@ int drm_lastclose(drm_device_t * dev) void drm_cleanup_pci(struct pci_dev *pdev) { - drm_device_t *dev = pci_get_drvdata(pdev); + struct drm_device *dev = pci_get_drvdata(pdev); pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); @@ -343,7 +374,7 @@ EXPORT_SYMBOL(drm_init); * * \sa drm_init */ -static void drm_cleanup(drm_device_t * dev) +static void drm_cleanup(struct drm_device * dev) { DRM_DEBUG("\n"); @@ -388,8 +419,8 @@ static void drm_cleanup(drm_device_t * dev) void drm_exit(struct drm_driver *driver) { int i; - drm_device_t *dev = NULL; - drm_head_t *head; + struct drm_device *dev = NULL; + struct drm_head *head; DRM_DEBUG("\n"); if (drm_fb_loaded) { @@ -517,10 +548,10 @@ module_exit(drm_core_exit); static int drm_version(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_version_t __user *argp = (void __user *)arg; - drm_version_t version; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_version __user *argp = (void __user *)arg; + struct drm_version version; int len; if (copy_from_user(&version, argp, sizeof(version))) @@ -553,9 +584,9 @@ static int drm_version(struct inode *inode, struct file *filp, int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ioctl_desc_t *ioctl; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_ioctl_desc *ioctl; drm_ioctl_t *func; unsigned int nr = DRM_IOCTL_NR(cmd); int retcode = -EINVAL; @@ -604,7 +635,7 @@ EXPORT_SYMBOL(drm_ioctl); drm_local_map_t *drm_getsarea(struct drm_device *dev) { - drm_map_list_t *entry; + struct drm_map_list *entry; list_for_each_entry(entry, &dev->maplist, head) { if (entry->map && entry->map->type == _DRM_SHM && diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 5215feb6..9b2fa405 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -34,17 +34,17 @@ * Typically called by the IRQ handler. */ -void drm_fence_handler(drm_device_t * dev, uint32_t class, +void drm_fence_handler(struct drm_device * dev, uint32_t class, uint32_t sequence, uint32_t type) { int wake = 0; uint32_t diff; uint32_t relevant; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; struct list_head *head; - drm_fence_object_t *fence, *next; + struct drm_fence_object *fence, *next; int found = 0; int is_exe = (type & DRM_FENCE_TYPE_EXE); int ge_last_exe; @@ -114,9 +114,9 @@ void drm_fence_handler(drm_device_t * dev, uint32_t class, EXPORT_SYMBOL(drm_fence_handler); -static void drm_fence_unring(drm_device_t * dev, struct list_head *ring) +static void drm_fence_unring(struct drm_device * dev, struct list_head *ring) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; write_lock_irqsave(&fm->lock, flags); @@ -124,11 +124,11 @@ static void drm_fence_unring(drm_device_t * dev, struct list_head *ring) write_unlock_irqrestore(&fm->lock, flags); } -void drm_fence_usage_deref_locked(drm_fence_object_t ** fence) +void drm_fence_usage_deref_locked(struct drm_fence_object ** fence) { struct drm_fence_object *tmp_fence = *fence; struct drm_device *dev = tmp_fence->dev; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; DRM_ASSERT_LOCKED(&dev->struct_mutex); *fence = NULL; @@ -142,11 +142,11 @@ void drm_fence_usage_deref_locked(drm_fence_object_t ** fence) } } -void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence) +void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence) { struct drm_fence_object *tmp_fence = *fence; struct drm_device *dev = tmp_fence->dev; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; *fence = NULL; if (atomic_dec_and_test(&tmp_fence->usage)) { @@ -180,22 +180,22 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst, } -static void drm_fence_object_destroy(drm_file_t *priv, drm_user_object_t * base) +static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base) { - drm_fence_object_t *fence = - drm_user_object_entry(base, drm_fence_object_t, base); + struct drm_fence_object *fence = + drm_user_object_entry(base, struct drm_fence_object, base); drm_fence_usage_deref_locked(&fence); } -int drm_fence_object_signaled(drm_fence_object_t * fence, +int drm_fence_object_signaled(struct drm_fence_object * fence, uint32_t mask, int poke_flush) { unsigned long flags; int signaled; struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_driver *driver = dev->driver->fence_driver; if (poke_flush) driver->poke_flush(dev, fence->class); @@ -207,8 +207,8 @@ int drm_fence_object_signaled(drm_fence_object_t * fence, return signaled; } -static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, - drm_fence_driver_t * driver, uint32_t sequence) +static void drm_fence_flush_exe(struct drm_fence_class_manager * fc, + struct drm_fence_driver * driver, uint32_t sequence) { uint32_t diff; @@ -224,13 +224,13 @@ static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, } } -int drm_fence_object_flush(drm_fence_object_t * fence, +int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; unsigned long flags; if (type & ~fence->type) { @@ -262,14 +262,14 @@ int drm_fence_object_flush(drm_fence_object_t * fence, * wrapped around and reused. */ -void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence) +void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t sequence) { - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t old_sequence; unsigned long flags; - drm_fence_object_t *fence; + struct drm_fence_object *fence; uint32_t diff; write_lock_irqsave(&fm->lock, flags); @@ -290,7 +290,7 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence) mutex_unlock(&dev->struct_mutex); return; } - fence = drm_fence_reference_locked(list_entry(fc->ring.next, drm_fence_object_t, ring)); + fence = drm_fence_reference_locked(list_entry(fc->ring.next, struct drm_fence_object, ring)); mutex_unlock(&dev->struct_mutex); diff = (old_sequence - fence->sequence) & driver->sequence_mask; read_unlock_irqrestore(&fm->lock, flags); @@ -302,13 +302,13 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence) EXPORT_SYMBOL(drm_fence_flush_old); -static int drm_fence_lazy_wait(drm_fence_object_t *fence, +static int drm_fence_lazy_wait(struct drm_fence_object *fence, int ignore_signals, uint32_t mask) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; int signaled; unsigned long _end = jiffies + 3*DRM_HZ; int ret = 0; @@ -336,11 +336,11 @@ static int drm_fence_lazy_wait(drm_fence_object_t *fence, return 0; } -int drm_fence_object_wait(drm_fence_object_t * fence, +int drm_fence_object_wait(struct drm_fence_object * fence, int lazy, int ignore_signals, uint32_t mask) { struct drm_device *dev = fence->dev; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_driver *driver = dev->driver->fence_driver; int ret = 0; unsigned long _end; int signaled; @@ -403,13 +403,13 @@ int drm_fence_object_wait(drm_fence_object_t * fence, return 0; } -int drm_fence_object_emit(drm_fence_object_t * fence, +int drm_fence_object_emit(struct drm_fence_object * fence, uint32_t fence_flags, uint32_t class, uint32_t type) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_driver_t *driver = dev->driver->fence_driver; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_driver *driver = dev->driver->fence_driver; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; unsigned long flags; uint32_t sequence; uint32_t native_type; @@ -435,14 +435,14 @@ int drm_fence_object_emit(drm_fence_object_t * fence, return 0; } -static int drm_fence_object_init(drm_device_t * dev, uint32_t class, +static int drm_fence_object_init(struct drm_device * dev, uint32_t class, uint32_t type, uint32_t fence_flags, - drm_fence_object_t * fence) + struct drm_fence_object * fence) { int ret = 0; unsigned long flags; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; mutex_lock(&dev->struct_mutex); atomic_set(&fence->usage, 1); @@ -471,10 +471,10 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t class, return ret; } -int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence, +int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; mutex_lock(&dev->struct_mutex); @@ -491,12 +491,12 @@ out: } EXPORT_SYMBOL(drm_fence_add_user_object); -int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, - unsigned flags, drm_fence_object_t ** c_fence) +int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t type, + unsigned flags, struct drm_fence_object ** c_fence) { - drm_fence_object_t *fence; + struct drm_fence_object *fence; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE); if (!fence) @@ -514,11 +514,11 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, EXPORT_SYMBOL(drm_fence_object_create); -void drm_fence_manager_init(drm_device_t * dev) +void drm_fence_manager_init(struct drm_device * dev) { - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *class; - drm_fence_driver_t *fed = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *class; + struct drm_fence_driver *fed = dev->driver->fence_driver; int i; rwlock_init(&fm->lock); @@ -544,15 +544,15 @@ void drm_fence_manager_init(drm_device_t * dev) write_unlock(&fm->lock); } -void drm_fence_manager_takedown(drm_device_t * dev) +void drm_fence_manager_takedown(struct drm_device * dev) { } -drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle) +struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle) { - drm_device_t *dev = priv->head->dev; - drm_user_object_t *uo; - drm_fence_object_t *fence; + struct drm_device *dev = priv->head->dev; + struct drm_user_object *uo; + struct drm_fence_object *fence; mutex_lock(&dev->struct_mutex); uo = drm_lookup_user_object(priv, handle); @@ -560,19 +560,18 @@ drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle) mutex_unlock(&dev->struct_mutex); return NULL; } - fence = drm_fence_reference_locked(drm_user_object_entry(uo, drm_fence_object_t, base)); + fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base)); mutex_unlock(&dev->struct_mutex); return fence; } -int drm_fence_ioctl(DRM_IOCTL_ARGS) +int drm_fence_create_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; - drm_fence_object_t *fence; - drm_user_object_t *uo; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg arg; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -582,92 +581,288 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) } DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - switch (arg.op) { - case drm_fence_create: - if (arg.flags & DRM_FENCE_FLAG_EMIT) - LOCK_TEST_WITH_RETURN(dev, filp); - ret = drm_fence_object_create(dev, arg.class, - arg.type, arg.flags, &fence); - if (ret) - return ret; - ret = drm_fence_add_user_object(priv, fence, - arg.flags & - DRM_FENCE_FLAG_SHAREABLE); - if (ret) { - drm_fence_usage_deref_unlocked(&fence); - return ret; - } - arg.handle = fence->base.hash.key; + if (arg.flags & DRM_FENCE_FLAG_EMIT) + LOCK_TEST_WITH_RETURN(dev, filp); + ret = drm_fence_object_create(dev, arg.class, + arg.type, arg.flags, &fence); + if (ret) + return ret; + ret = drm_fence_add_user_object(priv, fence, + arg.flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) { + drm_fence_usage_deref_unlocked(&fence); + return ret; + } + + /* + * usage > 0. No need to lock dev->struct_mutex; + */ + + atomic_inc(&fence->usage); + arg.handle = fence->base.hash.key; - break; - case drm_fence_destroy: - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, arg.handle); - if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - ret = drm_remove_user_object(priv, uo); + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(&fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + +int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg arg; + struct drm_user_object *uo; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(priv, arg.handle); + if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) { mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + ret = drm_remove_user_object(priv, uo); + mutex_unlock(&dev->struct_mutex); + return ret; +} + + +int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg arg; + struct drm_fence_object *fence; + struct drm_user_object *uo; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + ret = drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo); + if (ret) return ret; - case drm_fence_reference: - ret = - drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo); - if (ret) - return ret; - fence = drm_lookup_fence_object(priv, arg.handle); - break; - case drm_fence_unreference: - ret = drm_user_object_unref(priv, arg.handle, drm_fence_type); - return ret; - case drm_fence_signaled: - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - break; - case drm_fence_flush: - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - ret = drm_fence_object_flush(fence, arg.type); - break; - case drm_fence_wait: - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - ret = - drm_fence_object_wait(fence, - arg.flags & DRM_FENCE_FLAG_WAIT_LAZY, - 0, arg.type); - break; - case drm_fence_emit: - LOCK_TEST_WITH_RETURN(dev, filp); - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - ret = drm_fence_object_emit(fence, arg.flags, arg.class, - arg.type); - break; - case drm_fence_buffers: - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized\n"); - return -EINVAL; - } - LOCK_TEST_WITH_RETURN(dev, filp); - ret = drm_fence_buffer_objects(priv, NULL, arg.flags, - NULL, &fence); - if (ret) - return ret; - ret = drm_fence_add_user_object(priv, fence, - arg.flags & - DRM_FENCE_FLAG_SHAREABLE); - if (ret) - return ret; - arg.handle = fence->base.hash.key; - break; - default: + fence = drm_lookup_fence_object(priv, arg.handle); + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(&fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + + +int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg arg; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + return drm_user_object_unref(priv, arg.handle, drm_fence_type); +} + +int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg arg; + struct drm_fence_object *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); return -EINVAL; } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(&fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + +int drm_fence_flush_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg arg; + struct drm_fence_object *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_flush(fence, arg.type); + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(&fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + + +int drm_fence_wait_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg arg; + struct drm_fence_object *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_wait(fence, + arg.flags & DRM_FENCE_FLAG_WAIT_LAZY, + 0, arg.type); + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(&fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + + +int drm_fence_emit_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg arg; + struct drm_fence_object *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + LOCK_TEST_WITH_RETURN(dev, filp); + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_emit(fence, arg.flags, arg.class, + arg.type); + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(&fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + +int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg arg; + struct drm_fence_object *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized\n"); + return -EINVAL; + } + LOCK_TEST_WITH_RETURN(dev, filp); + ret = drm_fence_buffer_objects(priv, NULL, arg.flags, + NULL, &fence); + if (ret) + return ret; + ret = drm_fence_add_user_object(priv, fence, + arg.flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) + return ret; + atomic_inc(&fence->usage); + arg.handle = fence->base.hash.key; + read_lock_irqsave(&fm->lock, flags); arg.class = fence->class; arg.type = fence->type; diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index e54d5079..d542d4e3 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -39,9 +39,9 @@ #include <linux/poll.h> static int drm_open_helper(struct inode *inode, struct file *filp, - drm_device_t * dev); + struct drm_device * dev); -static int drm_setup(drm_device_t * dev) +static int drm_setup(struct drm_device * dev) { drm_local_map_t *map; int i; @@ -128,7 +128,7 @@ static int drm_setup(drm_device_t * dev) */ int drm_open(struct inode *inode, struct file *filp) { - drm_device_t *dev = NULL; + struct drm_device *dev = NULL; int minor = iminor(inode); int retcode = 0; @@ -176,7 +176,7 @@ EXPORT_SYMBOL(drm_open); */ int drm_stub_open(struct inode *inode, struct file *filp) { - drm_device_t *dev = NULL; + struct drm_device *dev = NULL; int minor = iminor(inode); int err = -ENODEV; const struct file_operations *old_fops; @@ -232,10 +232,10 @@ static int drm_cpu_valid(void) * filp and add it into the double linked list in \p dev. */ static int drm_open_helper(struct inode *inode, struct file *filp, - drm_device_t * dev) + struct drm_device * dev) { int minor = iminor(inode); - drm_file_t *priv; + struct drm_file *priv; int ret; int i,j; @@ -320,8 +320,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp, /** No-op. */ int drm_fasync(int fd, struct file *filp, int on) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; int retcode; DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, @@ -335,10 +335,10 @@ EXPORT_SYMBOL(drm_fasync); static void drm_object_release(struct file *filp) { - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; struct list_head *head; - drm_user_object_t *user_object; - drm_ref_object_t *ref_object; + struct drm_user_object *user_object; + struct drm_ref_object *ref_object; int i; /* @@ -351,7 +351,7 @@ static void drm_object_release(struct file *filp) { head = &priv->refd_objects; while (head->next != head) { - ref_object = list_entry(head->next, drm_ref_object_t, list); + ref_object = list_entry(head->next, struct drm_ref_object, list); drm_remove_ref_object(priv, ref_object); head = &priv->refd_objects; } @@ -362,7 +362,7 @@ static void drm_object_release(struct file *filp) { head = &priv->user_objects; while (head->next != head) { - user_object = list_entry(head->next, drm_user_object_t, list); + user_object = list_entry(head->next, struct drm_user_object, list); drm_remove_user_object(priv, user_object); head = &priv->user_objects; } @@ -386,8 +386,8 @@ static void drm_object_release(struct file *filp) { */ int drm_release(struct inode *inode, struct file *filp) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev; int retcode = 0; lock_kernel(); @@ -466,7 +466,7 @@ int drm_release(struct inode *inode, struct file *filp) mutex_lock(&dev->ctxlist_mutex); if (!list_empty(&dev->ctxlist)) { - drm_ctx_list_t *pos, *n; + struct drm_ctx_list *pos, *n; list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { if (pos->tag == priv && @@ -488,7 +488,7 @@ int drm_release(struct inode *inode, struct file *filp) mutex_lock(&dev->struct_mutex); drm_object_release(filp); if (priv->remove_auth_on_close == 1) { - drm_file_t *temp; + struct drm_file *temp; list_for_each_entry(temp, &dev->filelist, lhead) temp->authenticated = 0; diff --git a/linux-core/drm_hashtab.c b/linux-core/drm_hashtab.c index 6f17e114..a8ec8468 100644 --- a/linux-core/drm_hashtab.c +++ b/linux-core/drm_hashtab.c @@ -36,7 +36,7 @@ #include "drm_hashtab.h" #include <linux/hash.h> -int drm_ht_create(drm_open_hash_t * ht, unsigned int order) +int drm_ht_create(struct drm_open_hash * ht, unsigned int order) { unsigned int i; @@ -63,9 +63,9 @@ int drm_ht_create(drm_open_hash_t * ht, unsigned int order) return 0; } -void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key) +void drm_ht_verbose_list(struct drm_open_hash * ht, unsigned long key) { - drm_hash_item_t *entry; + struct drm_hash_item *entry; struct hlist_head *h_list; struct hlist_node *list; unsigned int hashed_key; @@ -75,15 +75,15 @@ void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key) DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); h_list = &ht->table[hashed_key]; hlist_for_each(list, h_list) { - entry = hlist_entry(list, drm_hash_item_t, head); + entry = hlist_entry(list, struct drm_hash_item, head); DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); } } -static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht, +static struct hlist_node *drm_ht_find_key(struct drm_open_hash * ht, unsigned long key) { - drm_hash_item_t *entry; + struct drm_hash_item *entry; struct hlist_head *h_list; struct hlist_node *list; unsigned int hashed_key; @@ -91,7 +91,7 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht, hashed_key = hash_long(key, ht->order); h_list = &ht->table[hashed_key]; hlist_for_each(list, h_list) { - entry = hlist_entry(list, drm_hash_item_t, head); + entry = hlist_entry(list, struct drm_hash_item, head); if (entry->key == key) return list; if (entry->key > key) @@ -100,9 +100,9 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht, return NULL; } -int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item) +int drm_ht_insert_item(struct drm_open_hash * ht, struct drm_hash_item * item) { - drm_hash_item_t *entry; + struct drm_hash_item *entry; struct hlist_head *h_list; struct hlist_node *list, *parent; unsigned int hashed_key; @@ -112,7 +112,7 @@ int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item) h_list = &ht->table[hashed_key]; parent = NULL; hlist_for_each(list, h_list) { - entry = hlist_entry(list, drm_hash_item_t, head); + entry = hlist_entry(list, struct drm_hash_item, head); if (entry->key == key) return -EINVAL; if (entry->key > key) @@ -131,7 +131,7 @@ int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item) * Just insert an item and return any "bits" bit key that hasn't been * used before. */ -int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item, +int drm_ht_just_insert_please(struct drm_open_hash * ht, struct drm_hash_item * item, unsigned long seed, int bits, int shift, unsigned long add) { @@ -155,8 +155,8 @@ int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item, return 0; } -int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key, - drm_hash_item_t ** item) +int drm_ht_find_item(struct drm_open_hash * ht, unsigned long key, + struct drm_hash_item ** item) { struct hlist_node *list; @@ -164,11 +164,11 @@ int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key, if (!list) return -EINVAL; - *item = hlist_entry(list, drm_hash_item_t, head); + *item = hlist_entry(list, struct drm_hash_item, head); return 0; } -int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key) +int drm_ht_remove_key(struct drm_open_hash * ht, unsigned long key) { struct hlist_node *list; @@ -181,14 +181,14 @@ int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key) return -EINVAL; } -int drm_ht_remove_item(drm_open_hash_t * ht, drm_hash_item_t * item) +int drm_ht_remove_item(struct drm_open_hash * ht, struct drm_hash_item * item) { hlist_del_init(&item->head); ht->fill--; return 0; } -void drm_ht_remove(drm_open_hash_t * ht) +void drm_ht_remove(struct drm_open_hash * ht) { if (ht->table) { if (ht->use_vmalloc) diff --git a/linux-core/drm_hashtab.h b/linux-core/drm_hashtab.h index 613091c9..0f137677 100644 --- a/linux-core/drm_hashtab.h +++ b/linux-core/drm_hashtab.h @@ -37,31 +37,31 @@ #define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) -typedef struct drm_hash_item{ +struct drm_hash_item { struct hlist_node head; unsigned long key; -} drm_hash_item_t; +}; -typedef struct drm_open_hash{ +struct drm_open_hash { unsigned int size; unsigned int order; unsigned int fill; struct hlist_head *table; int use_vmalloc; -} drm_open_hash_t; +}; -extern int drm_ht_create(drm_open_hash_t *ht, unsigned int order); -extern int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item); -extern int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item, +extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order); +extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); +extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, unsigned long seed, int bits, int shift, unsigned long add); -extern int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key, drm_hash_item_t **item); +extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); -extern void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key); -extern int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key); -extern int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item); -extern void drm_ht_remove(drm_open_hash_t *ht); +extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); +extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); +extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); +extern void drm_ht_remove(struct drm_open_hash *ht); #endif diff --git a/linux-core/drm_ioc32.c b/linux-core/drm_ioc32.c index bbab3ea2..b1162785 100644 --- a/linux-core/drm_ioc32.c +++ b/linux-core/drm_ioc32.c @@ -82,7 +82,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd, unsigned long arg) { drm_version32_t v32; - drm_version_t __user *version; + struct drm_version __user *version; int err; if (copy_from_user(&v32, (void __user *)arg, sizeof(v32))) @@ -129,7 +129,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd, unsigned long arg) { drm_unique32_t uq32; - drm_unique_t __user *u; + struct drm_unique __user *u; int err; if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) @@ -159,7 +159,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd, unsigned long arg) { drm_unique32_t uq32; - drm_unique_t __user *u; + struct drm_unique __user *u; if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) return -EFAULT; @@ -179,8 +179,8 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd, typedef struct drm_map32 { u32 offset; /**< Requested physical address (0 for SAREA)*/ u32 size; /**< Requested physical size (bytes) */ - drm_map_type_t type; /**< Type of memory to map */ - drm_map_flags_t flags; /**< Flags */ + enum drm_map_type type; /**< Type of memory to map */ + enum drm_map_flags flags; /**< Flags */ u32 handle; /**< User-space: "Handle" to pass to mmap() */ int mtrr; /**< MTRR slot used */ } drm_map32_t; @@ -190,7 +190,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd, { drm_map32_t __user *argp = (void __user *)arg; drm_map32_t m32; - drm_map_t __user *map; + struct drm_map __user *map; int idx, err; void *handle; @@ -228,7 +228,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd, { drm_map32_t __user *argp = (void __user *)arg; drm_map32_t m32; - drm_map_t __user *map; + struct drm_map __user *map; int err; void *handle; @@ -270,7 +270,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd, unsigned long arg) { drm_map32_t __user *argp = (void __user *)arg; - drm_map_t __user *map; + struct drm_map __user *map; u32 handle; if (get_user(handle, &argp->handle)) @@ -300,7 +300,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd, { drm_client32_t c32; drm_client32_t __user *argp = (void __user *)arg; - drm_client_t __user *client; + struct drm_client __user *client; int idx, err; if (get_user(idx, &argp->idx)) @@ -333,7 +333,7 @@ typedef struct drm_stats32 { u32 count; struct { u32 value; - drm_stat_type_t type; + enum drm_stat_type type; } data[15]; } drm_stats32_t; @@ -342,7 +342,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd, { drm_stats32_t s32; drm_stats32_t __user *argp = (void __user *)arg; - drm_stats_t __user *stats; + struct drm_stats __user *stats; int i, err; stats = compat_alloc_user_space(sizeof(*stats)); @@ -379,7 +379,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd, unsigned long arg) { drm_buf_desc32_t __user *argp = (void __user *)arg; - drm_buf_desc_t __user *buf; + struct drm_buf_desc __user *buf; int err; unsigned long agp_start; @@ -411,7 +411,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd, { drm_buf_desc32_t b32; drm_buf_desc32_t __user *argp = (void __user *)arg; - drm_buf_desc_t __user *buf; + struct drm_buf_desc __user *buf; if (copy_from_user(&b32, argp, sizeof(b32))) return -EFAULT; @@ -440,8 +440,8 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, drm_buf_info32_t req32; drm_buf_info32_t __user *argp = (void __user *)arg; drm_buf_desc32_t __user *to; - drm_buf_info_t __user *request; - drm_buf_desc_t __user *list; + struct drm_buf_info __user *request; + struct drm_buf_desc __user *list; size_t nbytes; int i, err; int count, actual; @@ -457,11 +457,11 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t))) return -EFAULT; - nbytes = sizeof(*request) + count * sizeof(drm_buf_desc_t); + nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc); request = compat_alloc_user_space(nbytes); if (!access_ok(VERIFY_WRITE, request, nbytes)) return -EFAULT; - list = (drm_buf_desc_t *) (request + 1); + list = (struct drm_buf_desc *) (request + 1); if (__put_user(count, &request->count) || __put_user(list, &request->list)) @@ -477,7 +477,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, if (count >= actual) for (i = 0; i < actual; ++i) if (__copy_in_user(&to[i], &list[i], - offsetof(drm_buf_desc_t, flags))) + offsetof(struct drm_buf_desc, flags))) return -EFAULT; if (__put_user(actual, &argp->count)) @@ -505,8 +505,8 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, drm_buf_map32_t __user *argp = (void __user *)arg; drm_buf_map32_t req32; drm_buf_pub32_t __user *list32; - drm_buf_map_t __user *request; - drm_buf_pub_t __user *list; + struct drm_buf_map __user *request; + struct drm_buf_pub __user *list; int i, err; int count, actual; size_t nbytes; @@ -519,11 +519,11 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, if (count < 0) return -EINVAL; - nbytes = sizeof(*request) + count * sizeof(drm_buf_pub_t); + nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub); request = compat_alloc_user_space(nbytes); if (!access_ok(VERIFY_WRITE, request, nbytes)) return -EFAULT; - list = (drm_buf_pub_t *) (request + 1); + list = (struct drm_buf_pub *) (request + 1); if (__put_user(count, &request->count) || __put_user(list, &request->list)) @@ -539,7 +539,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, if (count >= actual) for (i = 0; i < actual; ++i) if (__copy_in_user(&list32[i], &list[i], - offsetof(drm_buf_pub_t, address)) + offsetof(struct drm_buf_pub, address)) || __get_user(addr, &list[i].address) || __put_user((unsigned long)addr, &list32[i].address)) @@ -562,7 +562,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd, unsigned long arg) { drm_buf_free32_t req32; - drm_buf_free_t __user *request; + struct drm_buf_free __user *request; drm_buf_free32_t __user *argp = (void __user *)arg; if (copy_from_user(&req32, argp, sizeof(req32))) @@ -589,7 +589,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd, unsigned long arg) { drm_ctx_priv_map32_t req32; - drm_ctx_priv_map_t __user *request; + struct drm_ctx_priv_map __user *request; drm_ctx_priv_map32_t __user *argp = (void __user *)arg; if (copy_from_user(&req32, argp, sizeof(req32))) @@ -610,7 +610,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd, static int compat_drm_getsareactx(struct file *file, unsigned int cmd, unsigned long arg) { - drm_ctx_priv_map_t __user *request; + struct drm_ctx_priv_map __user *request; drm_ctx_priv_map32_t __user *argp = (void __user *)arg; int err; unsigned int ctx_id; @@ -648,7 +648,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd, { drm_ctx_res32_t __user *argp = (void __user *)arg; drm_ctx_res32_t res32; - drm_ctx_res_t __user *res; + struct drm_ctx_res __user *res; int err; if (copy_from_user(&res32, argp, sizeof(res32))) @@ -658,7 +658,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd, if (!access_ok(VERIFY_WRITE, res, sizeof(*res))) return -EFAULT; if (__put_user(res32.count, &res->count) - || __put_user((drm_ctx_t __user *)(unsigned long)res32.contexts, + || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts, &res->contexts)) return -EFAULT; @@ -679,7 +679,7 @@ typedef struct drm_dma32 { int send_count; /**< Number of buffers to send */ u32 send_indices; /**< List of handles to buffers */ u32 send_sizes; /**< Lengths of data to send */ - drm_dma_flags_t flags; /**< Flags */ + enum drm_dma_flags flags; /**< Flags */ int request_count; /**< Number of buffers requested */ int request_size; /**< Desired size for buffers */ u32 request_indices; /**< Buffer information */ @@ -692,7 +692,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd, { drm_dma32_t d32; drm_dma32_t __user *argp = (void __user *)arg; - drm_dma_t __user *d; + struct drm_dma __user *d; int err; if (copy_from_user(&d32, argp, sizeof(d32))) @@ -740,7 +740,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd, { drm_agp_mode32_t __user *argp = (void __user *)arg; drm_agp_mode32_t m32; - drm_agp_mode_t __user *mode; + struct drm_agp_mode __user *mode; if (get_user(m32.mode, &argp->mode)) return -EFAULT; @@ -772,7 +772,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd, { drm_agp_info32_t __user *argp = (void __user *)arg; drm_agp_info32_t i32; - drm_agp_info_t __user *info; + struct drm_agp_info __user *info; int err; info = compat_alloc_user_space(sizeof(*info)); @@ -813,7 +813,7 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd, { drm_agp_buffer32_t __user *argp = (void __user *)arg; drm_agp_buffer32_t req32; - drm_agp_buffer_t __user *request; + struct drm_agp_buffer __user *request; int err; if (copy_from_user(&req32, argp, sizeof(req32))) @@ -845,7 +845,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd, unsigned long arg) { drm_agp_buffer32_t __user *argp = (void __user *)arg; - drm_agp_buffer_t __user *request; + struct drm_agp_buffer __user *request; u32 handle; request = compat_alloc_user_space(sizeof(*request)); @@ -868,7 +868,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd, { drm_agp_binding32_t __user *argp = (void __user *)arg; drm_agp_binding32_t req32; - drm_agp_binding_t __user *request; + struct drm_agp_binding __user *request; if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; @@ -887,7 +887,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, unsigned long arg) { drm_agp_binding32_t __user *argp = (void __user *)arg; - drm_agp_binding_t __user *request; + struct drm_agp_binding __user *request; u32 handle; request = compat_alloc_user_space(sizeof(*request)); @@ -910,7 +910,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd, unsigned long arg) { drm_scatter_gather32_t __user *argp = (void __user *)arg; - drm_scatter_gather_t __user *request; + struct drm_scatter_gather __user *request; int err; unsigned long x; @@ -938,7 +938,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd, unsigned long arg) { drm_scatter_gather32_t __user *argp = (void __user *)arg; - drm_scatter_gather_t __user *request; + struct drm_scatter_gather __user *request; unsigned long x; request = compat_alloc_user_space(sizeof(*request)); @@ -953,13 +953,13 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd, } struct drm_wait_vblank_request32 { - drm_vblank_seq_type_t type; + enum drm_vblank_seq_type type; unsigned int sequence; u32 signal; }; struct drm_wait_vblank_reply32 { - drm_vblank_seq_type_t type; + enum drm_vblank_seq_type type; unsigned int sequence; s32 tval_sec; s32 tval_usec; @@ -975,7 +975,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, { drm_wait_vblank32_t __user *argp = (void __user *)arg; drm_wait_vblank32_t req32; - drm_wait_vblank_t __user *request; + union drm_wait_vblank __user *request; int err; if (copy_from_user(&req32, argp, sizeof(req32))) diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index 97df972f..a7bacbb8 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -52,10 +52,10 @@ int drm_getunique(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_unique_t __user *argp = (void __user *)arg; - drm_unique_t u; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_unique __user *argp = (void __user *)arg; + struct drm_unique u; if (copy_from_user(&u, argp, sizeof(u))) return -EFAULT; @@ -86,15 +86,15 @@ int drm_getunique(struct inode *inode, struct file *filp, int drm_setunique(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_unique_t u; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_unique u; int domain, bus, slot, func, ret; if (dev->unique_len || dev->unique) return -EBUSY; - if (copy_from_user(&u, (drm_unique_t __user *) arg, sizeof(u))) + if (copy_from_user(&u, (struct drm_unique __user *) arg, sizeof(u))) return -EFAULT; if (!u.unique_len || u.unique_len > 1024) @@ -134,7 +134,7 @@ int drm_setunique(struct inode *inode, struct file *filp, return 0; } -static int drm_set_busid(drm_device_t * dev) +static int drm_set_busid(struct drm_device * dev) { int len; if (dev->unique != NULL) @@ -179,11 +179,11 @@ static int drm_set_busid(drm_device_t * dev) int drm_getmap(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_map_t __user *argp = (void __user *)arg; - drm_map_t map; - drm_map_list_t *r_list = NULL; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_map __user *argp = (void __user *)arg; + struct drm_map map; + struct drm_map_list *r_list = NULL; struct list_head *list; int idx; int i; @@ -201,7 +201,7 @@ int drm_getmap(struct inode *inode, struct file *filp, i = 0; list_for_each(list, &dev->maplist) { if (i == idx) { - r_list = list_entry(list, drm_map_list_t, head); + r_list = list_entry(list, struct drm_map_list, head); break; } i++; @@ -240,11 +240,11 @@ int drm_getmap(struct inode *inode, struct file *filp, int drm_getclient(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_client_t __user *argp = (drm_client_t __user *)arg; - drm_client_t client; - drm_file_t *pt; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_client __user *argp = (struct drm_client __user *)arg; + struct drm_client client; + struct drm_file *pt; int idx; int i; @@ -289,9 +289,9 @@ int drm_getclient(struct inode *inode, struct file *filp, int drm_getstats(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_stats_t stats; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_stats stats; int i; memset(&stats, 0, sizeof(stats)); @@ -311,7 +311,7 @@ int drm_getstats(struct inode *inode, struct file *filp, mutex_unlock(&dev->struct_mutex); - if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats))) + if (copy_to_user((struct drm_stats __user *) arg, &stats, sizeof(stats))) return -EFAULT; return 0; } @@ -330,10 +330,10 @@ int drm_getstats(struct inode *inode, struct file *filp, int drm_setversion(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_set_version_t sv; - drm_set_version_t retv; + struct drm_set_version sv; + struct drm_set_version retv; int if_version; - drm_set_version_t __user *argp = (void __user *)data; + struct drm_set_version __user *argp = (void __user *)data; if (copy_from_user(&sv, argp, sizeof(sv))) return -EFAULT; diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 88716712..140ceca6 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -53,10 +53,10 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_irq_busid_t __user *argp = (void __user *)arg; - drm_irq_busid_t p; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_irq_busid __user *argp = (void __user *)arg; + struct drm_irq_busid p; if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; @@ -86,7 +86,7 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp, * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions * before and after the installation. */ -static int drm_irq_install(drm_device_t * dev) +static int drm_irq_install(struct drm_device * dev) { int ret; unsigned long sh_flags = 0; @@ -154,7 +154,7 @@ static int drm_irq_install(drm_device_t * dev) * * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq. */ -int drm_irq_uninstall(drm_device_t * dev) +int drm_irq_uninstall(struct drm_device * dev) { int irq_enabled; @@ -195,13 +195,13 @@ EXPORT_SYMBOL(drm_irq_uninstall); int drm_control(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_control_t ctl; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_control ctl; /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ - if (copy_from_user(&ctl, (drm_control_t __user *) arg, sizeof(ctl))) + if (copy_from_user(&ctl, (struct drm_control __user *) arg, sizeof(ctl))) return -EFAULT; switch (ctl.func) { @@ -242,10 +242,10 @@ int drm_control(struct inode *inode, struct file *filp, */ int drm_wait_vblank(DRM_IOCTL_ARGS) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_wait_vblank_t __user *argp = (void __user *)data; - drm_wait_vblank_t vblwait; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + union drm_wait_vblank __user *argp = (void __user *)data; + union drm_wait_vblank vblwait; struct timeval now; int ret = 0; unsigned int flags, seq; @@ -292,7 +292,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) unsigned long irqflags; struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_sigs2 : &dev->vbl_sigs; - drm_vbl_sig_t *vbl_sig; + struct drm_vbl_sig *vbl_sig; spin_lock_irqsave(&dev->vbl_lock, irqflags); @@ -322,7 +322,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) if (! (vbl_sig = - drm_alloc(sizeof(drm_vbl_sig_t), DRM_MEM_DRIVER))) { + drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) { return -ENOMEM; } @@ -369,7 +369,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) * * If a signal is not requested, then calls vblank_wait(). */ -void drm_vbl_send_signals(drm_device_t * dev) +void drm_vbl_send_signals(struct drm_device * dev) { unsigned long flags; int i; @@ -377,7 +377,7 @@ void drm_vbl_send_signals(drm_device_t * dev) spin_lock_irqsave(&dev->vbl_lock, flags); for (i = 0; i < 2; i++) { - drm_vbl_sig_t *vbl_sig, *tmp; + struct drm_vbl_sig *vbl_sig, *tmp; struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs; unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 : &dev->vbl_received); @@ -413,7 +413,7 @@ EXPORT_SYMBOL(drm_vbl_send_signals); */ static void drm_locked_tasklet_func(unsigned long data) { - drm_device_t *dev = (drm_device_t*)data; + struct drm_device *dev = (struct drm_device*)data; unsigned long irqflags; spin_lock_irqsave(&dev->tasklet_lock, irqflags); @@ -450,7 +450,7 @@ static void drm_locked_tasklet_func(unsigned long data) * context, it must not make any assumptions about this. Also, the HW lock will * be held with the kernel context or any client context. */ -void drm_locked_tasklet(drm_device_t *dev, void (*func)(drm_device_t*)) +void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device*)) { unsigned long irqflags; static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0); diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index f02df36b..1ba01aab 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -51,15 +51,15 @@ static int drm_notifier(void *priv); int drm_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; DECLARE_WAITQUEUE(entry, current); - drm_lock_t lock; + struct drm_lock lock; int ret = 0; ++priv->lock_count; - if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock))) + if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) return -EFAULT; if (lock.context == DRM_KERNEL_CONTEXT) { @@ -152,12 +152,12 @@ int drm_lock(struct inode *inode, struct file *filp, int drm_unlock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_lock_t lock; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_lock lock; unsigned long irqflags; - if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock))) + if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) return -EFAULT; if (lock.context == DRM_KERNEL_CONTEXT) { @@ -202,7 +202,7 @@ int drm_unlock(struct inode *inode, struct file *filp, * * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. */ -int drm_lock_take(drm_lock_data_t *lock_data, +int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; @@ -252,7 +252,7 @@ int drm_lock_take(drm_lock_data_t *lock_data, * Resets the lock file pointer. * Marks the lock as held by the given context, via the \p cmpxchg instruction. */ -static int drm_lock_transfer(drm_lock_data_t *lock_data, +static int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; @@ -278,7 +278,7 @@ static int drm_lock_transfer(drm_lock_data_t *lock_data, * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task * waiting on the lock queue. */ -int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context) +int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; @@ -320,7 +320,7 @@ int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context) */ static int drm_notifier(void *priv) { - drm_sigdata_t *s = (drm_sigdata_t *) priv; + struct drm_sigdata *s = (struct drm_sigdata *) priv; unsigned int old, new, prev; /* Allow signal delivery if lock isn't held */ @@ -351,7 +351,7 @@ static int drm_notifier(void *priv) * having to worry about starvation. */ -void drm_idlelock_take(drm_lock_data_t *lock_data) +void drm_idlelock_take(struct drm_lock_data *lock_data) { int ret = 0; @@ -370,7 +370,7 @@ void drm_idlelock_take(drm_lock_data_t *lock_data) } EXPORT_SYMBOL(drm_idlelock_take); -void drm_idlelock_release(drm_lock_data_t *lock_data) +void drm_idlelock_release(struct drm_lock_data *lock_data) { unsigned int old, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c index b1423c12..f68a3a3e 100644 --- a/linux-core/drm_memory.c +++ b/linux-core/drm_memory.c @@ -38,9 +38,9 @@ static struct { spinlock_t lock; - drm_u64_t cur_used; - drm_u64_t low_threshold; - drm_u64_t high_threshold; + uint64_t cur_used; + uint64_t low_threshold; + uint64_t high_threshold; } drm_memctl = { .lock = SPIN_LOCK_UNLOCKED }; @@ -82,9 +82,9 @@ void drm_free_memctl(size_t size) } EXPORT_SYMBOL(drm_free_memctl); -void drm_query_memctl(drm_u64_t *cur_used, - drm_u64_t *low_threshold, - drm_u64_t *high_threshold) +void drm_query_memctl(uint64_t *cur_used, + uint64_t *low_threshold, + uint64_t *high_threshold) { spin_lock(&drm_memctl.lock); *cur_used = drm_memctl.cur_used; @@ -214,7 +214,7 @@ void drm_free_pages(unsigned long address, int order, int area) #if __OS_HAS_AGP static void *agp_remap(unsigned long offset, unsigned long size, - drm_device_t * dev) + struct drm_device * dev) { unsigned long *phys_addr_map, i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE; @@ -258,12 +258,12 @@ static void *agp_remap(unsigned long offset, unsigned long size, /** Wrapper around agp_allocate_memory() */ #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) -DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) +DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) { return drm_agp_allocate_memory(pages, type); } #else -DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) +DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) { return drm_agp_allocate_memory(dev->agp->bridge, pages, type); } @@ -289,7 +289,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle) #else /* __OS_HAS_AGP*/ static void *agp_remap(unsigned long offset, unsigned long size, - drm_device_t * dev) + struct drm_device * dev) { return NULL; } diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c index 2caf596b..cf0d92fa 100644 --- a/linux-core/drm_mm.c +++ b/linux-core/drm_mm.c @@ -44,26 +44,26 @@ #include "drmP.h" #include <linux/slab.h> -unsigned long drm_mm_tail_space(drm_mm_t *mm) +unsigned long drm_mm_tail_space(struct drm_mm *mm) { struct list_head *tail_node; - drm_mm_node_t *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, drm_mm_node_t, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) return 0; return entry->size; } -int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size) +int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) { struct list_head *tail_node; - drm_mm_node_t *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, drm_mm_node_t, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) return -ENOMEM; @@ -75,13 +75,13 @@ int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size) } -static int drm_mm_create_tail_node(drm_mm_t *mm, +static int drm_mm_create_tail_node(struct drm_mm *mm, unsigned long start, unsigned long size) { - drm_mm_node_t *child; + struct drm_mm_node *child; - child = (drm_mm_node_t *) + child = (struct drm_mm_node *) drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return -ENOMEM; @@ -98,13 +98,13 @@ static int drm_mm_create_tail_node(drm_mm_t *mm, } -int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size) +int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size) { struct list_head *tail_node; - drm_mm_node_t *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, drm_mm_node_t, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) { return drm_mm_create_tail_node(mm, entry->start + entry->size, size); } @@ -112,12 +112,12 @@ int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size) return 0; } -static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent, +static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, unsigned long size) { - drm_mm_node_t *child; + struct drm_mm_node *child; - child = (drm_mm_node_t *) + child = (struct drm_mm_node *) drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return NULL; @@ -137,12 +137,12 @@ static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent, return child; } -drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, +struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, unsigned long size, unsigned alignment) { - drm_mm_node_t *align_splitoff = NULL; - drm_mm_node_t *child; + struct drm_mm_node *align_splitoff = NULL; + struct drm_mm_node *child; unsigned tmp = 0; if (alignment) @@ -173,26 +173,26 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, * Otherwise add to the free stack. */ -void drm_mm_put_block(drm_mm_node_t * cur) +void drm_mm_put_block(struct drm_mm_node * cur) { - drm_mm_t *mm = cur->mm; + struct drm_mm *mm = cur->mm; struct list_head *cur_head = &cur->ml_entry; struct list_head *root_head = &mm->ml_entry; - drm_mm_node_t *prev_node = NULL; - drm_mm_node_t *next_node; + struct drm_mm_node *prev_node = NULL; + struct drm_mm_node *next_node; int merged = 0; if (cur_head->prev != root_head) { - prev_node = list_entry(cur_head->prev, drm_mm_node_t, ml_entry); + prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry); if (prev_node->free) { prev_node->size += cur->size; merged = 1; } } if (cur_head->next != root_head) { - next_node = list_entry(cur_head->next, drm_mm_node_t, ml_entry); + next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry); if (next_node->free) { if (merged) { prev_node->size += next_node->size; @@ -217,14 +217,14 @@ void drm_mm_put_block(drm_mm_node_t * cur) } EXPORT_SYMBOL(drm_mm_put_block); -drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, +struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, unsigned long size, unsigned alignment, int best_match) { struct list_head *list; const struct list_head *free_stack = &mm->fl_entry; - drm_mm_node_t *entry; - drm_mm_node_t *best; + struct drm_mm_node *entry; + struct drm_mm_node *best; unsigned long best_size; unsigned wasted; @@ -232,7 +232,7 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, best_size = ~0UL; list_for_each(list, free_stack) { - entry = list_entry(list, drm_mm_node_t, fl_entry); + entry = list_entry(list, struct drm_mm_node, fl_entry); wasted = 0; if (entry->size < size) @@ -258,14 +258,14 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, return best; } -int drm_mm_clean(drm_mm_t * mm) +int drm_mm_clean(struct drm_mm * mm) { struct list_head *head = &mm->ml_entry; return (head->next->next == head); } -int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) +int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) { INIT_LIST_HEAD(&mm->ml_entry); INIT_LIST_HEAD(&mm->fl_entry); @@ -275,12 +275,12 @@ int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) EXPORT_SYMBOL(drm_mm_init); -void drm_mm_takedown(drm_mm_t * mm) +void drm_mm_takedown(struct drm_mm * mm) { struct list_head *bnode = mm->fl_entry.next; - drm_mm_node_t *entry; + struct drm_mm_node *entry; - entry = list_entry(bnode, drm_mm_node_t, fl_entry); + entry = list_entry(bnode, struct drm_mm_node, fl_entry); if (entry->ml_entry.next != &mm->ml_entry || entry->fl_entry.next != &mm->fl_entry) { diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 567a7d2b..3d866333 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -30,10 +30,10 @@ #include "drmP.h" -int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, +int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, int shareable) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -51,12 +51,12 @@ int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, return 0; } -drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key) +struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key) { - drm_device_t *dev = priv->head->dev; - drm_hash_item_t *hash; + struct drm_device *dev = priv->head->dev; + struct drm_hash_item *hash; int ret; - drm_user_object_t *item; + struct drm_user_object *item; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -64,10 +64,10 @@ drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key) if (ret) { return NULL; } - item = drm_hash_entry(hash, drm_user_object_t, hash); + item = drm_hash_entry(hash, struct drm_user_object, hash); if (priv != item->owner) { - drm_open_hash_t *ht = &priv->refd_object_hash[_DRM_REF_USE]; + struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE]; ret = drm_ht_find_item(ht, (unsigned long)item, &hash); if (ret) { DRM_ERROR("Object not registered for usage\n"); @@ -77,9 +77,9 @@ drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key) return item; } -static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item) +static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; if (atomic_dec_and_test(&item->refcount)) { @@ -90,7 +90,7 @@ static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item) } } -int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item) +int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item) { DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); @@ -105,8 +105,8 @@ int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item) return 0; } -static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro, - drm_ref_t action) +static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro, + enum drm_ref_type action) { int ret = 0; @@ -124,12 +124,12 @@ static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro, return ret; } -int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object, - drm_ref_t ref_action) +int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object, + enum drm_ref_type ref_action) { int ret = 0; - drm_ref_object_t *item; - drm_open_hash_t *ht = &priv->refd_object_hash[ref_action]; + struct drm_ref_object *item; + struct drm_open_hash *ht = &priv->refd_object_hash[ref_action]; DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); if (!referenced_object->shareable && priv != referenced_object->owner) { @@ -181,11 +181,11 @@ int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object, return ret; } -drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, - drm_user_object_t * referenced_object, - drm_ref_t ref_action) +struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, + struct drm_user_object * referenced_object, + enum drm_ref_type ref_action) { - drm_hash_item_t *hash; + struct drm_hash_item *hash; int ret; DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); @@ -194,32 +194,32 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, if (ret) return NULL; - return drm_hash_entry(hash, drm_ref_object_t, hash); + return drm_hash_entry(hash, struct drm_ref_object, hash); } -static void drm_remove_other_references(drm_file_t * priv, - drm_user_object_t * ro) +static void drm_remove_other_references(struct drm_file * priv, + struct drm_user_object * ro) { int i; - drm_open_hash_t *ht; - drm_hash_item_t *hash; - drm_ref_object_t *item; + struct drm_open_hash *ht; + struct drm_hash_item *hash; + struct drm_ref_object *item; for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) { ht = &priv->refd_object_hash[i]; while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) { - item = drm_hash_entry(hash, drm_ref_object_t, hash); + item = drm_hash_entry(hash, struct drm_ref_object, hash); drm_remove_ref_object(priv, item); } } } -void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item) +void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item) { int ret; - drm_user_object_t *user_object = (drm_user_object_t *) item->hash.key; - drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action]; - drm_ref_t unref_action; + struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key; + struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action]; + enum drm_ref_type unref_action; DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); unref_action = item->unref_action; @@ -244,12 +244,12 @@ void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item) } -int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, - drm_object_type_t type, drm_user_object_t ** object) +int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, + enum drm_object_type type, struct drm_user_object ** object) { - drm_device_t *dev = priv->head->dev; - drm_user_object_t *uo; - drm_hash_item_t *hash; + struct drm_device *dev = priv->head->dev; + struct drm_user_object *uo; + struct drm_hash_item *hash; int ret; mutex_lock(&dev->struct_mutex); @@ -258,7 +258,7 @@ int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, DRM_ERROR("Could not find user object to reference.\n"); goto out_err; } - uo = drm_hash_entry(hash, drm_user_object_t, hash); + uo = drm_hash_entry(hash, struct drm_user_object, hash); if (uo->type != type) { ret = -EINVAL; goto out_err; @@ -274,12 +274,12 @@ int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, return ret; } -int drm_user_object_unref(drm_file_t * priv, uint32_t user_token, - drm_object_type_t type) +int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, + enum drm_object_type type) { - drm_device_t *dev = priv->head->dev; - drm_user_object_t *uo; - drm_ref_object_t *ro; + struct drm_device *dev = priv->head->dev; + struct drm_user_object *uo; + struct drm_ref_object *ro; int ret; mutex_lock(&dev->struct_mutex); diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index f82d6628..f792dc84 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -39,14 +39,14 @@ struct drm_device; #define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) -typedef enum { +enum drm_object_type { drm_fence_type, drm_buffer_type, drm_ttm_type /* * Add other user space object types here. */ -} drm_object_type_t; +}; /* * A user object is a structure that helps the drm give out user handles @@ -55,20 +55,20 @@ typedef enum { * Designed to be accessible using a user space 32-bit handle. */ -typedef struct drm_user_object { - drm_hash_item_t hash; +struct drm_user_object { + struct drm_hash_item hash; struct list_head list; - drm_object_type_t type; + enum drm_object_type type; atomic_t refcount; int shareable; - drm_file_t *owner; - void (*ref_struct_locked) (drm_file_t * priv, + struct drm_file *owner; + void (*ref_struct_locked) (struct drm_file * priv, struct drm_user_object * obj, - drm_ref_t ref_action); - void (*unref) (drm_file_t * priv, struct drm_user_object * obj, - drm_ref_t unref_action); - void (*remove) (drm_file_t * priv, struct drm_user_object * obj); -} drm_user_object_t; + enum drm_ref_type ref_action); + void (*unref) (struct drm_file * priv, struct drm_user_object * obj, + enum drm_ref_type unref_action); + void (*remove) (struct drm_file * priv, struct drm_user_object * obj); +}; /* * A ref object is a structure which is used to @@ -77,24 +77,24 @@ typedef struct drm_user_object { * process exits. Designed to be accessible using a pointer to the _user_ object. */ -typedef struct drm_ref_object { - drm_hash_item_t hash; +struct drm_ref_object { + struct drm_hash_item hash; struct list_head list; atomic_t refcount; - drm_ref_t unref_action; -} drm_ref_object_t; + enum drm_ref_type unref_action; +}; /** * Must be called with the struct_mutex held. */ -extern int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, +extern int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, int shareable); /** * Must be called with the struct_mutex held. */ -extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, +extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key); /* @@ -104,23 +104,23 @@ extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, * This function may temporarily release the struct_mutex. */ -extern int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item); +extern int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item); /* * Must be called with the struct_mutex held. May temporarily release it. */ -extern int drm_add_ref_object(drm_file_t * priv, - drm_user_object_t * referenced_object, - drm_ref_t ref_action); +extern int drm_add_ref_object(struct drm_file * priv, + struct drm_user_object * referenced_object, + enum drm_ref_type ref_action); /* * Must be called with the struct_mutex held. */ -drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, - drm_user_object_t * referenced_object, - drm_ref_t ref_action); +struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, + struct drm_user_object * referenced_object, + enum drm_ref_type ref_action); /* * Must be called with the struct_mutex held. * If "item" has been obtained by a call to drm_lookup_ref_object. You may not @@ -128,19 +128,19 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, * This function may temporarily release the struct_mutex. */ -extern void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item); -extern int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, - drm_object_type_t type, - drm_user_object_t ** object); -extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token, - drm_object_type_t type); +extern void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item); +extern int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, + enum drm_object_type type, + struct drm_user_object ** object); +extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, + enum drm_object_type type); /*************************************************** * Fence objects. (drm_fence.c) */ -typedef struct drm_fence_object { - drm_user_object_t base; +struct drm_fence_object { + struct drm_user_object base; struct drm_device *dev; atomic_t usage; @@ -156,29 +156,29 @@ typedef struct drm_fence_object { uint32_t sequence; uint32_t flush_mask; uint32_t submitted_flush; -} drm_fence_object_t; +}; #define _DRM_FENCE_CLASSES 8 #define _DRM_FENCE_TYPE_EXE 0x00 -typedef struct drm_fence_class_manager { +struct drm_fence_class_manager { struct list_head ring; uint32_t pending_flush; wait_queue_head_t fence_queue; int pending_exe_flush; uint32_t last_exe_flush; uint32_t exe_flush_sequence; -} drm_fence_class_manager_t; +}; -typedef struct drm_fence_manager { +struct drm_fence_manager { int initialized; rwlock_t lock; - drm_fence_class_manager_t class[_DRM_FENCE_CLASSES]; + struct drm_fence_class_manager class[_DRM_FENCE_CLASSES]; uint32_t num_classes; atomic_t count; -} drm_fence_manager_t; +}; -typedef struct drm_fence_driver { +struct drm_fence_driver { uint32_t num_classes; uint32_t wrap_diff; uint32_t flush_diff; @@ -189,7 +189,7 @@ typedef struct drm_fence_driver { int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * breadcrumb, uint32_t * native_type); void (*poke_flush) (struct drm_device * dev, uint32_t class); -} drm_fence_driver_t; +}; extern void drm_fence_handler(struct drm_device *dev, uint32_t class, uint32_t sequence, uint32_t type); @@ -197,23 +197,31 @@ extern void drm_fence_manager_init(struct drm_device *dev); extern void drm_fence_manager_takedown(struct drm_device *dev); extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, uint32_t sequence); -extern int drm_fence_object_flush(drm_fence_object_t * fence, uint32_t type); -extern int drm_fence_object_signaled(drm_fence_object_t * fence, +extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type); +extern int drm_fence_object_signaled(struct drm_fence_object * fence, uint32_t type, int flush); -extern void drm_fence_usage_deref_locked(drm_fence_object_t ** fence); -extern void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence); +extern void drm_fence_usage_deref_locked(struct drm_fence_object ** fence); +extern void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence); extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src); extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, struct drm_fence_object *src); -extern int drm_fence_object_wait(drm_fence_object_t * fence, +extern int drm_fence_object_wait(struct drm_fence_object * fence, int lazy, int ignore_signals, uint32_t mask); extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, uint32_t fence_flags, uint32_t class, - drm_fence_object_t ** c_fence); -extern int drm_fence_add_user_object(drm_file_t * priv, - drm_fence_object_t * fence, int shareable); -extern int drm_fence_ioctl(DRM_IOCTL_ARGS); - + struct drm_fence_object ** c_fence); +extern int drm_fence_add_user_object(struct drm_file * priv, + struct drm_fence_object * fence, int shareable); + +extern int drm_fence_create_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_reference_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_flush_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_wait_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_emit_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS); /************************************************** *TTMs */ @@ -235,7 +243,7 @@ extern int drm_fence_ioctl(DRM_IOCTL_ARGS); #define DRM_BE_FLAG_BOUND_CACHED 0x00000002 struct drm_ttm_backend; -typedef struct drm_ttm_backend_func { +struct drm_ttm_backend_func { int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend); int (*populate) (struct drm_ttm_backend * backend, unsigned long num_pages, struct page ** pages); @@ -244,16 +252,16 @@ typedef struct drm_ttm_backend_func { unsigned long offset, int cached); int (*unbind) (struct drm_ttm_backend * backend); void (*destroy) (struct drm_ttm_backend * backend); -} drm_ttm_backend_func_t; +}; -typedef struct drm_ttm_backend { +struct drm_ttm_backend { uint32_t flags; int mem_type; - drm_ttm_backend_func_t *func; -} drm_ttm_backend_t; + struct drm_ttm_backend_func *func; +}; -typedef struct drm_ttm { +struct drm_ttm { struct page **pages; uint32_t page_flags; unsigned long num_pages; @@ -262,7 +270,7 @@ typedef struct drm_ttm { struct drm_device *dev; int destroy; uint32_t mapping_offset; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; enum { ttm_bound, ttm_evicted, @@ -270,14 +278,14 @@ typedef struct drm_ttm { ttm_unpopulated, } state; -} drm_ttm_t; +}; -extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size); -extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset); -extern void drm_ttm_unbind(drm_ttm_t * ttm); -extern void drm_ttm_evict(drm_ttm_t * ttm); -extern void drm_ttm_fixup_caching(drm_ttm_t * ttm); -extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index); +extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size); +extern int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset); +extern void drm_ttm_unbind(struct drm_ttm * ttm); +extern void drm_ttm_evict(struct drm_ttm * ttm); +extern void drm_ttm_fixup_caching(struct drm_ttm * ttm); +extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index); /* * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, @@ -285,7 +293,7 @@ extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index); * when the last vma exits. */ -extern int drm_destroy_ttm(drm_ttm_t * ttm); +extern int drm_destroy_ttm(struct drm_ttm * ttm); #define DRM_FLAG_MASKED(_old, _new, _mask) {\ (_old) ^= (((_old) ^ (_new)) & (_mask)); \ @@ -308,19 +316,19 @@ extern int drm_destroy_ttm(drm_ttm_t * ttm); * Buffer objects. (drm_bo.c, drm_bo_move.c) */ -typedef struct drm_bo_mem_reg { - drm_mm_node_t *mm_node; +struct drm_bo_mem_reg { + struct drm_mm_node *mm_node; unsigned long size; unsigned long num_pages; uint32_t page_alignment; uint32_t mem_type; - uint32_t flags; - uint32_t mask; -} drm_bo_mem_reg_t; + uint64_t flags; + uint64_t mask; +}; -typedef struct drm_buffer_object { +struct drm_buffer_object { struct drm_device *dev; - drm_user_object_t base; + struct drm_user_object base; /* * If there is a possibility that the usage variable is zero, @@ -329,30 +337,30 @@ typedef struct drm_buffer_object { atomic_t usage; unsigned long buffer_start; - drm_bo_type_t type; + enum drm_bo_type type; unsigned long offset; atomic_t mapped; - drm_bo_mem_reg_t mem; + struct drm_bo_mem_reg mem; struct list_head lru; struct list_head ddestroy; uint32_t fence_type; uint32_t fence_class; - drm_fence_object_t *fence; + struct drm_fence_object *fence; uint32_t priv_flags; wait_queue_head_t event_queue; struct mutex mutex; /* For pinned buffers */ - drm_mm_node_t *pinned_node; + struct drm_mm_node *pinned_node; uint32_t pinned_mem_type; struct list_head pinned_lru; /* For vm */ - drm_ttm_t *ttm; - drm_map_list_t map_list; + struct drm_ttm *ttm; + struct drm_map_list map_list; uint32_t memory_type; unsigned long bus_offset; uint32_t vm_flags; @@ -364,15 +372,15 @@ typedef struct drm_buffer_object { struct list_head p_mm_list; #endif -} drm_buffer_object_t; +}; #define _DRM_BO_FLAG_UNFENCED 0x00000001 #define _DRM_BO_FLAG_EVICTED 0x00000002 -typedef struct drm_mem_type_manager { +struct drm_mem_type_manager { int has_type; int use_type; - drm_mm_t manager; + struct drm_mm manager; struct list_head lru; struct list_head pinned; uint32_t flags; @@ -380,7 +388,7 @@ typedef struct drm_mem_type_manager { unsigned long io_offset; unsigned long io_size; void *io_addr; -} drm_mem_type_manager_t; +}; #define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ #define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ @@ -390,13 +398,13 @@ typedef struct drm_mem_type_manager { #define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */ #define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ -typedef struct drm_buffer_manager { +struct drm_buffer_manager { struct mutex init_mutex; struct mutex evict_mutex; int nice_mode; int initialized; - drm_file_t *last_to_validate; - drm_mem_type_manager_t man[DRM_BO_MEM_TYPES]; + struct drm_file *last_to_validate; + struct drm_mem_type_manager man[DRM_BO_MEM_TYPES]; struct list_head unfenced; struct list_head ddestroy; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) @@ -407,51 +415,64 @@ typedef struct drm_buffer_manager { uint32_t fence_type; unsigned long cur_pages; atomic_t count; -} drm_buffer_manager_t; +}; -typedef struct drm_bo_driver { +struct drm_bo_driver { const uint32_t *mem_type_prio; const uint32_t *mem_busy_prio; uint32_t num_mem_type_prio; uint32_t num_mem_busy_prio; - drm_ttm_backend_t *(*create_ttm_backend_entry) + struct drm_ttm_backend *(*create_ttm_backend_entry) (struct drm_device * dev); - int (*fence_type) (struct drm_buffer_object *bo, uint32_t * class, uint32_t * type); - int (*invalidate_caches) (struct drm_device * dev, uint32_t flags); + int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type); + int (*invalidate_caches) (struct drm_device * dev, uint64_t flags); int (*init_mem_type) (struct drm_device * dev, uint32_t type, - drm_mem_type_manager_t * man); + struct drm_mem_type_manager * man); uint32_t(*evict_mask) (struct drm_buffer_object *bo); int (*move) (struct drm_buffer_object * bo, int evict, int no_wait, struct drm_bo_mem_reg * new_mem); -} drm_bo_driver_t; +}; /* * buffer objects (drm_bo.c) */ -extern int drm_bo_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_create_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_map_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_reference_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_info_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_op_ioctl(DRM_IOCTL_ARGS); + + extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS); +extern int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS); +extern int drm_mm_lock_ioctl(DRM_IOCTL_ARGS); +extern int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS); extern int drm_bo_driver_finish(struct drm_device *dev); extern int drm_bo_driver_init(struct drm_device *dev); extern int drm_bo_pci_offset(struct drm_device *dev, - drm_bo_mem_reg_t * mem, + struct drm_bo_mem_reg * mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size); -extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem); +extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem); -extern void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo); -extern int drm_fence_buffer_objects(drm_file_t * priv, +extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo); +extern int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, uint32_t fence_flags, - drm_fence_object_t * fence, - drm_fence_object_t ** used_fence); -extern void drm_bo_add_to_lru(drm_buffer_object_t * bo); -extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, + struct drm_fence_object * fence, + struct drm_fence_object ** used_fence); +extern void drm_bo_add_to_lru(struct drm_buffer_object * bo); +extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, int no_wait); -extern int drm_bo_mem_space(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, int no_wait); -extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, +extern int drm_bo_mem_space(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int no_wait); +extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced); /* @@ -459,18 +480,18 @@ extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, * drm_bo_move.c */ -extern int drm_bo_move_ttm(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem); -extern int drm_bo_move_memcpy(drm_buffer_object_t * bo, +extern int drm_bo_move_ttm(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem); +extern int drm_bo_move_memcpy(struct drm_buffer_object * bo, int evict, - int no_wait, drm_bo_mem_reg_t * new_mem); -extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, + int no_wait, struct drm_bo_mem_reg * new_mem); +extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, int evict, int no_wait, uint32_t fence_class, uint32_t fence_type, uint32_t fence_flags, - drm_bo_mem_reg_t * new_mem); + struct drm_bo_mem_reg * new_mem); #ifdef CONFIG_DEBUG_MUTEXES #define DRM_ASSERT_LOCKED(_mutex) \ diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h index 2ea105c5..9d0d3f69 100644 --- a/linux-core/drm_os_linux.h +++ b/linux-core/drm_os_linux.h @@ -52,8 +52,8 @@ /** Read/write memory barrier */ #define DRM_MEMORYBARRIER() mb() /** DRM device local declaration */ -#define DRM_DEVICE drm_file_t *priv = filp->private_data; \ - drm_device_t *dev = priv->head->dev +#define DRM_DEVICE struct drm_file *priv = filp->private_data; \ + struct drm_device *dev = priv->head->dev /** IRQ handler arguments and return type and values */ #define DRM_IRQ_ARGS int irq, void *arg diff --git a/linux-core/drm_pci.c b/linux-core/drm_pci.c index 76252204..a608eed3 100644 --- a/linux-core/drm_pci.c +++ b/linux-core/drm_pci.c @@ -47,7 +47,7 @@ /** * \brief Allocate a PCI consistent memory block, for DMA. */ -drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, +drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align, dma_addr_t maxaddr) { drm_dma_handle_t *dmah; @@ -123,7 +123,7 @@ EXPORT_SYMBOL(drm_pci_alloc); * * This function is for internal use in the Linux-specific DRM core code. */ -void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) +void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah) { unsigned long addr; size_t sz; @@ -167,7 +167,7 @@ void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) /** * \brief Free a PCI consistent memory block */ -void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) +void drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah) { __drm_pci_free(dev, dmah); kfree(dmah); diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index e93a0406..08bf99d6 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -90,7 +90,7 @@ static struct drm_proc_list { * "/proc/dri/%minor%/", and each entry in proc_list as * "/proc/dri/%minor%/%name%". */ -int drm_proc_init(drm_device_t * dev, int minor, +int drm_proc_init(struct drm_device * dev, int minor, struct proc_dir_entry *root, struct proc_dir_entry **dev_root) { struct proc_dir_entry *ent; @@ -165,7 +165,7 @@ int drm_proc_cleanup(int minor, struct proc_dir_entry *root, static int drm_name_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; if (offset > DRM_PROC_LIMIT) { @@ -207,10 +207,10 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request, static int drm__vm_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_map_t *map; - drm_map_list_t *r_list; + struct drm_map *map; + struct drm_map_list *r_list; /* Hardcoded from _DRM_FRAME_BUFFER, _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, @@ -264,7 +264,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request, static int drm_vm_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -287,10 +287,10 @@ static int drm_vm_info(char *buf, char **start, off_t offset, int request, static int drm__queues_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; int i; - drm_queue_t *q; + struct drm_queue *q; if (offset > DRM_PROC_LIMIT) { *eof = 1; @@ -337,7 +337,7 @@ static int drm__queues_info(char *buf, char **start, off_t offset, static int drm_queues_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -360,9 +360,9 @@ static int drm_queues_info(char *buf, char **start, off_t offset, int request, static int drm__bufs_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; if (!dma || offset > DRM_PROC_LIMIT) { @@ -409,7 +409,7 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request, static int drm_bufs_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -432,13 +432,13 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request, static int drm__objects_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_buffer_manager_t *bm = &dev->bm; - drm_fence_manager_t *fm = &dev->fm; - drm_u64_t used_mem; - drm_u64_t low_mem; - drm_u64_t high_mem; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_fence_manager *fm = &dev->fm; + uint64_t used_mem; + uint64_t low_mem; + uint64_t high_mem; if (offset > DRM_PROC_LIMIT) { @@ -496,7 +496,7 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request, static int drm_objects_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -519,9 +519,9 @@ static int drm_objects_info(char *buf, char **start, off_t offset, int request, static int drm__clients_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_file_t *priv; + struct drm_file *priv; if (offset > DRM_PROC_LIMIT) { *eof = 1; @@ -552,7 +552,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset, static int drm_clients_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -566,9 +566,9 @@ static int drm_clients_info(char *buf, char **start, off_t offset, static int drm__vma_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_vma_entry_t *pt; + struct drm_vma_entry *pt; struct vm_area_struct *vma; #if defined(__i386__) unsigned int pgprot; @@ -625,7 +625,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request, static int drm_vma_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index c0d6db24..7c13610d 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -36,7 +36,7 @@ #define DEBUG_SCATTER 0 -void drm_sg_cleanup(drm_sg_mem_t * entry) +void drm_sg_cleanup(struct drm_sg_mem *entry) { struct page *page; int i; @@ -63,9 +63,9 @@ EXPORT_SYMBOL(drm_sg_cleanup); # define ScatterHandle(x) (unsigned int)(x) #endif -int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request) +int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) { - drm_sg_mem_t *entry; + struct drm_sg_mem *entry; unsigned long pages, i, j; DRM_DEBUG("%s\n", __FUNCTION__); @@ -190,9 +190,9 @@ EXPORT_SYMBOL(drm_sg_alloc); int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_scatter_gather_t __user *argp = (void __user *)arg; - drm_scatter_gather_t request; + struct drm_file *priv = filp->private_data; + struct drm_scatter_gather __user *argp = (void __user *)arg; + struct drm_scatter_gather request; int ret; if (copy_from_user(&request, argp, sizeof(request))) @@ -214,16 +214,16 @@ int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, int drm_sg_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_scatter_gather_t request; - drm_sg_mem_t *entry; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_scatter_gather request; + struct drm_sg_mem *entry; if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; if (copy_from_user(&request, - (drm_scatter_gather_t __user *) arg, + (struct drm_scatter_gather __user *) arg, sizeof(request))) return -EFAULT; diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c index e15db6d6..118e82ae 100644 --- a/linux-core/drm_sman.c +++ b/linux-core/drm_sman.c @@ -38,13 +38,13 @@ #include "drm_sman.h" -typedef struct drm_owner_item { - drm_hash_item_t owner_hash; +struct drm_owner_item { + struct drm_hash_item owner_hash; struct list_head sman_list; struct list_head mem_blocks; -} drm_owner_item_t; +}; -void drm_sman_takedown(drm_sman_t * sman) +void drm_sman_takedown(struct drm_sman * sman) { drm_ht_remove(&sman->user_hash_tab); drm_ht_remove(&sman->owner_hash_tab); @@ -56,12 +56,12 @@ void drm_sman_takedown(drm_sman_t * sman) EXPORT_SYMBOL(drm_sman_takedown); int -drm_sman_init(drm_sman_t * sman, unsigned int num_managers, +drm_sman_init(struct drm_sman * sman, unsigned int num_managers, unsigned int user_order, unsigned int owner_order) { int ret = 0; - sman->mm = (drm_sman_mm_t *) drm_calloc(num_managers, sizeof(*sman->mm), + sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm), DRM_MEM_MM); if (!sman->mm) { ret = -ENOMEM; @@ -88,8 +88,8 @@ EXPORT_SYMBOL(drm_sman_init); static void *drm_sman_mm_allocate(void *private, unsigned long size, unsigned alignment) { - drm_mm_t *mm = (drm_mm_t *) private; - drm_mm_node_t *tmp; + struct drm_mm *mm = (struct drm_mm *) private; + struct drm_mm_node *tmp; tmp = drm_mm_search_free(mm, size, alignment, 1); if (!tmp) { @@ -101,30 +101,30 @@ static void *drm_sman_mm_allocate(void *private, unsigned long size, static void drm_sman_mm_free(void *private, void *ref) { - drm_mm_node_t *node = (drm_mm_node_t *) ref; + struct drm_mm_node *node = (struct drm_mm_node *) ref; drm_mm_put_block(node); } static void drm_sman_mm_destroy(void *private) { - drm_mm_t *mm = (drm_mm_t *) private; + struct drm_mm *mm = (struct drm_mm *) private; drm_mm_takedown(mm); drm_free(mm, sizeof(*mm), DRM_MEM_MM); } static unsigned long drm_sman_mm_offset(void *private, void *ref) { - drm_mm_node_t *node = (drm_mm_node_t *) ref; + struct drm_mm_node *node = (struct drm_mm_node *) ref; return node->start; } int -drm_sman_set_range(drm_sman_t * sman, unsigned int manager, +drm_sman_set_range(struct drm_sman * sman, unsigned int manager, unsigned long start, unsigned long size) { - drm_sman_mm_t *sman_mm; - drm_mm_t *mm; + struct drm_sman_mm *sman_mm; + struct drm_mm *mm; int ret; BUG_ON(manager >= sman->num_managers); @@ -153,8 +153,8 @@ drm_sman_set_range(drm_sman_t * sman, unsigned int manager, EXPORT_SYMBOL(drm_sman_set_range); int -drm_sman_set_manager(drm_sman_t * sman, unsigned int manager, - drm_sman_mm_t * allocator) +drm_sman_set_manager(struct drm_sman * sman, unsigned int manager, + struct drm_sman_mm * allocator) { BUG_ON(manager >= sman->num_managers); sman->mm[manager] = *allocator; @@ -163,16 +163,16 @@ drm_sman_set_manager(drm_sman_t * sman, unsigned int manager, } EXPORT_SYMBOL(drm_sman_set_manager); -static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman, +static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman, unsigned long owner) { int ret; - drm_hash_item_t *owner_hash_item; - drm_owner_item_t *owner_item; + struct drm_hash_item *owner_hash_item; + struct drm_owner_item *owner_item; ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item); if (!ret) { - return drm_hash_entry(owner_hash_item, drm_owner_item_t, + return drm_hash_entry(owner_hash_item, struct drm_owner_item, owner_hash); } @@ -194,14 +194,14 @@ out: return NULL; } -drm_memblock_item_t *drm_sman_alloc(drm_sman_t *sman, unsigned int manager, +struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager, unsigned long size, unsigned alignment, unsigned long owner) { void *tmp; - drm_sman_mm_t *sman_mm; - drm_owner_item_t *owner_item; - drm_memblock_item_t *memblock; + struct drm_sman_mm *sman_mm; + struct drm_owner_item *owner_item; + struct drm_memblock_item *memblock; BUG_ON(manager >= sman->num_managers); @@ -246,9 +246,9 @@ out: EXPORT_SYMBOL(drm_sman_alloc); -static void drm_sman_free(drm_memblock_item_t *item) +static void drm_sman_free(struct drm_memblock_item *item) { - drm_sman_t *sman = item->sman; + struct drm_sman *sman = item->sman; list_del(&item->owner_list); drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash); @@ -256,40 +256,40 @@ static void drm_sman_free(drm_memblock_item_t *item) drm_free(item, sizeof(*item), DRM_MEM_MM); } -int drm_sman_free_key(drm_sman_t *sman, unsigned int key) +int drm_sman_free_key(struct drm_sman *sman, unsigned int key) { - drm_hash_item_t *hash_item; - drm_memblock_item_t *memblock_item; + struct drm_hash_item *hash_item; + struct drm_memblock_item *memblock_item; if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item)) return -EINVAL; - memblock_item = drm_hash_entry(hash_item, drm_memblock_item_t, user_hash); + memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, user_hash); drm_sman_free(memblock_item); return 0; } EXPORT_SYMBOL(drm_sman_free_key); -static void drm_sman_remove_owner(drm_sman_t *sman, - drm_owner_item_t *owner_item) +static void drm_sman_remove_owner(struct drm_sman *sman, + struct drm_owner_item *owner_item) { list_del(&owner_item->sman_list); drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash); drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); } -int drm_sman_owner_clean(drm_sman_t *sman, unsigned long owner) +int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) { - drm_hash_item_t *hash_item; - drm_owner_item_t *owner_item; + struct drm_hash_item *hash_item; + struct drm_owner_item *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { return -1; } - owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash); + owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); if (owner_item->mem_blocks.next == &owner_item->mem_blocks) { drm_sman_remove_owner(sman, owner_item); return -1; @@ -300,10 +300,10 @@ int drm_sman_owner_clean(drm_sman_t *sman, unsigned long owner) EXPORT_SYMBOL(drm_sman_owner_clean); -static void drm_sman_do_owner_cleanup(drm_sman_t *sman, - drm_owner_item_t *owner_item) +static void drm_sman_do_owner_cleanup(struct drm_sman *sman, + struct drm_owner_item *owner_item) { - drm_memblock_item_t *entry, *next; + struct drm_memblock_item *entry, *next; list_for_each_entry_safe(entry, next, &owner_item->mem_blocks, owner_list) { @@ -312,28 +312,28 @@ static void drm_sman_do_owner_cleanup(drm_sman_t *sman, drm_sman_remove_owner(sman, owner_item); } -void drm_sman_owner_cleanup(drm_sman_t *sman, unsigned long owner) +void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner) { - drm_hash_item_t *hash_item; - drm_owner_item_t *owner_item; + struct drm_hash_item *hash_item; + struct drm_owner_item *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { return; } - owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash); + owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); drm_sman_do_owner_cleanup(sman, owner_item); } EXPORT_SYMBOL(drm_sman_owner_cleanup); -void drm_sman_cleanup(drm_sman_t *sman) +void drm_sman_cleanup(struct drm_sman *sman) { - drm_owner_item_t *entry, *next; + struct drm_owner_item *entry, *next; unsigned int i; - drm_sman_mm_t *sman_mm; + struct drm_sman_mm *sman_mm; list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) { drm_sman_do_owner_cleanup(sman, entry); diff --git a/linux-core/drm_sman.h b/linux-core/drm_sman.h index ddc732a1..39a39fef 100644 --- a/linux-core/drm_sman.h +++ b/linux-core/drm_sman.h @@ -50,7 +50,7 @@ * for memory management. */ -typedef struct drm_sman_mm { +struct drm_sman_mm { /* private info. If allocated, needs to be destroyed by the destroy function */ void *private; @@ -74,30 +74,30 @@ typedef struct drm_sman_mm { "alloc" function */ unsigned long (*offset) (void *private, void *ref); -} drm_sman_mm_t; +}; -typedef struct drm_memblock_item { +struct drm_memblock_item { struct list_head owner_list; - drm_hash_item_t user_hash; + struct drm_hash_item user_hash; void *mm_info; - drm_sman_mm_t *mm; + struct drm_sman_mm *mm; struct drm_sman *sman; -} drm_memblock_item_t; +}; -typedef struct drm_sman { - drm_sman_mm_t *mm; +struct drm_sman { + struct drm_sman_mm *mm; int num_managers; - drm_open_hash_t owner_hash_tab; - drm_open_hash_t user_hash_tab; + struct drm_open_hash owner_hash_tab; + struct drm_open_hash user_hash_tab; struct list_head owner_items; -} drm_sman_t; +}; /* * Take down a memory manager. This function should only be called after a * successful init and after a call to drm_sman_cleanup. */ -extern void drm_sman_takedown(drm_sman_t * sman); +extern void drm_sman_takedown(struct drm_sman * sman); /* * Allocate structures for a manager. @@ -112,7 +112,7 @@ extern void drm_sman_takedown(drm_sman_t * sman); * */ -extern int drm_sman_init(drm_sman_t * sman, unsigned int num_managers, +extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers, unsigned int user_order, unsigned int owner_order); /* @@ -120,7 +120,7 @@ extern int drm_sman_init(drm_sman_t * sman, unsigned int num_managers, * manager unless a customized allogator is used. */ -extern int drm_sman_set_range(drm_sman_t * sman, unsigned int manager, +extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager, unsigned long start, unsigned long size); /* @@ -129,23 +129,23 @@ extern int drm_sman_set_range(drm_sman_t * sman, unsigned int manager, * so it can be destroyed after this call. */ -extern int drm_sman_set_manager(drm_sman_t * sman, unsigned int mananger, - drm_sman_mm_t * allocator); +extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger, + struct drm_sman_mm * allocator); /* * Allocate a memory block. Aligment is not implemented yet. */ -extern drm_memblock_item_t *drm_sman_alloc(drm_sman_t * sman, - unsigned int manager, - unsigned long size, - unsigned alignment, - unsigned long owner); +extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman, + unsigned int manager, + unsigned long size, + unsigned alignment, + unsigned long owner); /* * Free a memory block identified by its user hash key. */ -extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key); +extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key); /* * returns 1 iff there are no stale memory blocks associated with this owner. @@ -154,7 +154,7 @@ extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key); * resources associated with owner. */ -extern int drm_sman_owner_clean(drm_sman_t * sman, unsigned long owner); +extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner); /* * Frees all stale memory blocks associated with this owner. Note that this @@ -164,13 +164,13 @@ extern int drm_sman_owner_clean(drm_sman_t * sman, unsigned long owner); * is not going to be referenced anymore. */ -extern void drm_sman_owner_cleanup(drm_sman_t * sman, unsigned long owner); +extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner); /* * Frees all stale memory blocks associated with the memory manager. * See idling above. */ -extern void drm_sman_cleanup(drm_sman_t * sman); +extern void drm_sman_cleanup(struct drm_sman * sman); #endif diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index b96408ab..eba6deed 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -50,11 +50,11 @@ MODULE_PARM_DESC(debug, "Enable debug output"); module_param_named(cards_limit, drm_cards_limit, int, 0444); module_param_named(debug, drm_debug, int, 0600); -drm_head_t **drm_heads; +struct drm_head **drm_heads; struct drm_sysfs_class *drm_class; struct proc_dir_entry *drm_proc_root; -static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev, +static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver) { @@ -160,9 +160,9 @@ error_out_unreg: * create the proc init entry via proc_init(). This routines assigns * minor numbers to secondary heads of multi-headed cards */ -static int drm_get_head(drm_device_t * dev, drm_head_t * head) +static int drm_get_head(struct drm_device * dev, struct drm_head * head) { - drm_head_t **heads = drm_heads; + struct drm_head **heads = drm_heads; int ret; int minor; @@ -171,7 +171,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head) for (minor = 0; minor < drm_cards_limit; minor++, heads++) { if (!*heads) { - *head = (drm_head_t) { + *head = (struct drm_head) { .dev = dev, .device = MKDEV(DRM_MAJOR, minor), .minor = minor, @@ -202,7 +202,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head) err_g2: drm_proc_cleanup(minor, drm_proc_root, head->dev_root); err_g1: - *head = (drm_head_t) { + *head = (struct drm_head) { .dev = NULL}; return ret; } @@ -221,7 +221,7 @@ err_g1: int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver) { - drm_device_t *dev; + struct drm_device *dev; int ret; DRM_DEBUG("\n"); @@ -282,7 +282,7 @@ EXPORT_SYMBOL(drm_get_dev); * "drm" data, otherwise unregisters the "drm" data, frees the dev list and * unregisters the character device. */ -int drm_put_dev(drm_device_t * dev) +int drm_put_dev(struct drm_device * dev) { DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name); @@ -310,7 +310,7 @@ int drm_put_dev(drm_device_t * dev) * last minor released. * */ -int drm_put_head(drm_head_t * head) +int drm_put_head(struct drm_head * head) { int minor = head->minor; @@ -319,7 +319,7 @@ int drm_put_head(drm_head_t * head) drm_proc_cleanup(minor, drm_proc_root, head->dev_root); drm_sysfs_device_remove(head->dev_class); - *head = (drm_head_t){.dev = NULL}; + *head = (struct drm_head){.dev = NULL}; drm_heads[minor] = NULL; return 0; diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c index 9b2f5dce..1090e719 100644 --- a/linux-core/drm_sysfs.c +++ b/linux-core/drm_sysfs.c @@ -123,7 +123,7 @@ void drm_sysfs_destroy(struct drm_sysfs_class *cs) static ssize_t show_dri(struct class_device *class_device, char *buf) { - drm_device_t * dev = ((drm_head_t *)class_get_devdata(class_device))->dev; + struct drm_device * dev = ((struct drm_head *)class_get_devdata(class_device))->dev; if (dev->driver->dri_library_name) return dev->driver->dri_library_name(dev, buf); return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name); @@ -148,7 +148,7 @@ static struct class_device_attribute class_device_attrs[] = { * created with a call to drm_sysfs_create(). */ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, - drm_head_t * head) + struct drm_head * head) { struct simple_dev *s_dev = NULL; int i, retval; diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 31503c9c..60c64cba 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -45,7 +45,7 @@ static void drm_ttm_cache_flush(void) * Use kmalloc if possible. Otherwise fall back to vmalloc. */ -static void ttm_alloc_pages(drm_ttm_t * ttm) +static void ttm_alloc_pages(struct drm_ttm * ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); ttm->pages = NULL; @@ -66,7 +66,7 @@ static void ttm_alloc_pages(drm_ttm_t * ttm) } } -static void ttm_free_pages(drm_ttm_t * ttm) +static void ttm_free_pages(struct drm_ttm * ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); @@ -105,7 +105,7 @@ static struct page *drm_ttm_alloc_page(void) * for range of pages in a ttm. */ -static int drm_set_caching(drm_ttm_t * ttm, int noncached) +static int drm_set_caching(struct drm_ttm * ttm, int noncached) { int i; struct page **cur_page; @@ -142,12 +142,12 @@ static int drm_set_caching(drm_ttm_t * ttm, int noncached) * Free all resources associated with a ttm. */ -int drm_destroy_ttm(drm_ttm_t * ttm) +int drm_destroy_ttm(struct drm_ttm * ttm) { int i; struct page **cur_page; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (!ttm) return 0; @@ -159,7 +159,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm) } if (ttm->pages) { - drm_buffer_manager_t *bm = &ttm->dev->bm; + struct drm_buffer_manager *bm = &ttm->dev->bm; if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) drm_set_caching(ttm, 0); @@ -191,10 +191,10 @@ int drm_destroy_ttm(drm_ttm_t * ttm) return 0; } -struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index) +struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index) { struct page *p; - drm_buffer_manager_t *bm = &ttm->dev->bm; + struct drm_buffer_manager *bm = &ttm->dev->bm; p = ttm->pages[index]; if (!p) { @@ -207,11 +207,11 @@ struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index) return p; } -static int drm_ttm_populate(drm_ttm_t * ttm) +static int drm_ttm_populate(struct drm_ttm * ttm) { struct page *page; unsigned long i; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (ttm->state != ttm_unpopulated) return 0; @@ -231,10 +231,10 @@ static int drm_ttm_populate(drm_ttm_t * ttm) * Initialize a ttm. */ -drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size) +struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size) { - drm_bo_driver_t *bo_driver = dev->driver->bo_driver; - drm_ttm_t *ttm; + struct drm_bo_driver *bo_driver = dev->driver->bo_driver; + struct drm_ttm *ttm; if (!bo_driver) return NULL; @@ -275,9 +275,9 @@ drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size) * Unbind a ttm region from the aperture. */ -void drm_ttm_evict(drm_ttm_t * ttm) +void drm_ttm_evict(struct drm_ttm * ttm) { - drm_ttm_backend_t *be = ttm->be; + struct drm_ttm_backend *be = ttm->be; int ret; if (ttm->state == ttm_bound) { @@ -288,11 +288,11 @@ void drm_ttm_evict(drm_ttm_t * ttm) ttm->state = ttm_evicted; } -void drm_ttm_fixup_caching(drm_ttm_t * ttm) +void drm_ttm_fixup_caching(struct drm_ttm * ttm) { if (ttm->state == ttm_evicted) { - drm_ttm_backend_t *be = ttm->be; + struct drm_ttm_backend *be = ttm->be; if (be->func->needs_ub_cache_adjust(be)) { drm_set_caching(ttm, 0); } @@ -300,7 +300,7 @@ void drm_ttm_fixup_caching(drm_ttm_t * ttm) } } -void drm_ttm_unbind(drm_ttm_t * ttm) +void drm_ttm_unbind(struct drm_ttm * ttm) { if (ttm->state == ttm_bound) drm_ttm_evict(ttm); @@ -308,11 +308,11 @@ void drm_ttm_unbind(drm_ttm_t * ttm) drm_ttm_fixup_caching(ttm); } -int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) +int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset) { int ret = 0; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (!ttm) return -EINVAL; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 72d63c10..265a59d8 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -85,11 +85,11 @@ pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_map_t *map = NULL; - drm_map_list_t *r_list; - drm_hash_item_t *hash; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_map *map = NULL; + struct drm_map_list *r_list; + struct drm_hash_item *hash; /* * Find the right map @@ -103,7 +103,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) goto vm_nopage_error; - r_list = drm_hash_entry(hash, drm_map_list_t, hash); + r_list = drm_hash_entry(hash, struct drm_map_list, hash); map = r_list->map; if (map && map->type == _DRM_AGP) { @@ -172,7 +172,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_map_t *map = (drm_map_t *) vma->vm_private_data; + struct drm_map *map = (struct drm_map *) vma->vm_private_data; unsigned long offset; unsigned long i; struct page *page; @@ -203,11 +203,11 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, */ static void drm_vm_shm_close(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_vma_entry_t *pt, *temp; - drm_map_t *map; - drm_map_list_t *r_list; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_vma_entry *pt, *temp; + struct drm_map *map; + struct drm_map_list *r_list; int found_maps = 0; DRM_DEBUG("0x%08lx,0x%08lx\n", @@ -285,9 +285,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; unsigned long offset; unsigned long page_nr; struct page *page; @@ -321,10 +321,10 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_map_t *map = (drm_map_t *) vma->vm_private_data; - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_sg_mem_t *entry = dev->sg; + struct drm_map *map = (struct drm_map *) vma->vm_private_data; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_sg_mem *entry = dev->sg; unsigned long offset; unsigned long map_offset; unsigned long page_offset; @@ -418,9 +418,9 @@ static struct vm_operations_struct drm_vm_sg_ops = { */ static void drm_vm_open_locked(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_vma_entry_t *vma_entry; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_vma_entry *vma_entry; DRM_DEBUG("0x%08lx,0x%08lx\n", vma->vm_start, vma->vm_end - vma->vm_start); @@ -436,8 +436,8 @@ static void drm_vm_open_locked(struct vm_area_struct *vma) static void drm_vm_open(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; mutex_lock(&dev->struct_mutex); drm_vm_open_locked(vma); @@ -454,9 +454,9 @@ static void drm_vm_open(struct vm_area_struct *vma) */ static void drm_vm_close(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_vma_entry_t *pt, *temp; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_vma_entry *pt, *temp; DRM_DEBUG("0x%08lx,0x%08lx\n", vma->vm_start, vma->vm_end - vma->vm_start); @@ -486,9 +486,9 @@ static void drm_vm_close(struct vm_area_struct *vma) */ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev; - drm_device_dma_t *dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev; + struct drm_device_dma *dma; unsigned long length = vma->vm_end - vma->vm_start; dev = priv->head->dev; @@ -524,7 +524,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) return 0; } -unsigned long drm_core_get_map_ofs(drm_map_t * map) +unsigned long drm_core_get_map_ofs(struct drm_map * map) { return map->offset; } @@ -555,11 +555,11 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); */ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_map_t *map = NULL; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_map *map = NULL; unsigned long offset = 0; - drm_hash_item_t *hash; + struct drm_hash_item *hash; DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", vma->vm_start, vma->vm_end, vma->vm_pgoff); @@ -585,7 +585,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) return -EINVAL; } - map = drm_hash_entry(hash, drm_map_list_t, hash)->map; + map = drm_hash_entry(hash, struct drm_map_list, hash)->map; if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) return -EPERM; @@ -676,8 +676,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) int drm_mmap(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; int ret; mutex_lock(&dev->struct_mutex); @@ -713,11 +713,11 @@ EXPORT_SYMBOL(drm_mmap); static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, unsigned long address) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page = NULL; - drm_ttm_t *ttm; - drm_device_t *dev; + struct drm_ttm *ttm; + struct drm_device *dev; unsigned long pfn; int err; unsigned long bus_base; @@ -766,7 +766,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, page_offset = (address - vma->vm_start) >> PAGE_SHIFT; if (bus_size) { - drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type]; + struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); @@ -798,7 +798,7 @@ out_unlock: static void drm_bo_vm_open_locked(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; drm_vm_open_locked(vma); atomic_inc(&bo->usage); @@ -815,8 +815,8 @@ static void drm_bo_vm_open_locked(struct vm_area_struct *vma) static void drm_bo_vm_open(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; - drm_device_t *dev = bo->dev; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; + struct drm_device *dev = bo->dev; mutex_lock(&dev->struct_mutex); drm_bo_vm_open_locked(vma); @@ -831,8 +831,8 @@ static void drm_bo_vm_open(struct vm_area_struct *vma) static void drm_bo_vm_close(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; - drm_device_t *dev = bo->dev; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; + struct drm_device *dev = bo->dev; drm_vm_close(vma); if (bo) { diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 49379434..1e6d8cd3 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -46,9 +46,9 @@ #define I810_BUF_UNMAPPED 0 #define I810_BUF_MAPPED 1 -static inline void i810_print_status_page(drm_device_t * dev) +static inline void i810_print_status_page(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = dev->dev_private; u32 *temp = dev_priv->hw_status_page; int i; @@ -64,16 +64,16 @@ static inline void i810_print_status_page(drm_device_t * dev) } } -static drm_buf_t *i810_freelist_get(drm_device_t * dev) +static struct drm_buf *i810_freelist_get(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; int used; /* Linear search might not be the best solution */ for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; /* In use is already a pointer */ used = cmpxchg(buf_priv->in_use, I810_BUF_FREE, @@ -89,7 +89,7 @@ static drm_buf_t *i810_freelist_get(drm_device_t * dev) * yet, the hardware updates in use for us once its on the ring buffer. */ -static int i810_freelist_put(drm_device_t * dev, drm_buf_t * buf) +static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf) { drm_i810_buf_priv_t *buf_priv = buf->dev_private; int used; @@ -106,10 +106,10 @@ static int i810_freelist_put(drm_device_t * dev, drm_buf_t * buf) static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev; drm_i810_private_t *dev_priv; - drm_buf_t *buf; + struct drm_buf *buf; drm_i810_buf_priv_t *buf_priv; lock_kernel(); @@ -139,10 +139,10 @@ static const struct file_operations i810_buffer_fops = { .fasync = drm_fasync, }; -static int i810_map_buffer(drm_buf_t * buf, struct file *filp) +static int i810_map_buffer(struct drm_buf * buf, struct file *filp) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_buf_priv_t *buf_priv = buf->dev_private; drm_i810_private_t *dev_priv = dev->dev_private; const struct file_operations *old_fops; @@ -171,7 +171,7 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp) return retcode; } -static int i810_unmap_buffer(drm_buf_t * buf) +static int i810_unmap_buffer(struct drm_buf * buf) { drm_i810_buf_priv_t *buf_priv = buf->dev_private; int retcode = 0; @@ -191,10 +191,10 @@ static int i810_unmap_buffer(drm_buf_t * buf) return retcode; } -static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d, +static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, struct file *filp) { - drm_buf_t *buf; + struct drm_buf *buf; drm_i810_buf_priv_t *buf_priv; int retcode = 0; @@ -221,9 +221,9 @@ static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d, return retcode; } -static int i810_dma_cleanup(drm_device_t * dev) +static int i810_dma_cleanup(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private @@ -252,7 +252,7 @@ static int i810_dma_cleanup(drm_device_t * dev) dev->dev_private = NULL; for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; if (buf_priv->kernel_virtual && buf->total) @@ -262,7 +262,7 @@ static int i810_dma_cleanup(drm_device_t * dev) return 0; } -static int i810_wait_ring(drm_device_t * dev, int n) +static int i810_wait_ring(struct drm_device * dev, int n) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_ring_buffer_t *ring = &(dev_priv->ring); @@ -295,7 +295,7 @@ static int i810_wait_ring(drm_device_t * dev, int n) return iters; } -static void i810_kernel_lost_context(drm_device_t * dev) +static void i810_kernel_lost_context(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_ring_buffer_t *ring = &(dev_priv->ring); @@ -307,9 +307,9 @@ static void i810_kernel_lost_context(drm_device_t * dev) ring->space += ring->Size; } -static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv) +static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int my_idx = 24; u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx); int i; @@ -320,7 +320,7 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv) } for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; buf_priv->in_use = hw_status++; @@ -342,11 +342,11 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv) return 0; } -static int i810_dma_initialize(drm_device_t * dev, +static int i810_dma_initialize(struct drm_device * dev, drm_i810_private_t * dev_priv, drm_i810_init_t * init) { - drm_map_list_t *r_list; + struct drm_map_list *r_list; memset(dev_priv, 0, sizeof(drm_i810_private_t)); list_for_each_entry(r_list, &dev->maplist, head) { @@ -495,8 +495,8 @@ static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg) static int i810_dma_init(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv; drm_i810_init_t init; int retcode = 0; @@ -553,7 +553,7 @@ static int i810_dma_init(struct inode *inode, struct file *filp, * Use 'volatile' & local var tmp to force the emitted values to be * identical to the verified ones. */ -static void i810EmitContextVerified(drm_device_t * dev, +static void i810EmitContextVerified(struct drm_device * dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -586,7 +586,7 @@ static void i810EmitContextVerified(drm_device_t * dev, ADVANCE_LP_RING(); } -static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code) +static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; int i, j = 0; @@ -619,7 +619,7 @@ static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code) /* Need to do some additional checking when setting the dest buffer. */ -static void i810EmitDestVerified(drm_device_t * dev, +static void i810EmitDestVerified(struct drm_device * dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -654,7 +654,7 @@ static void i810EmitDestVerified(drm_device_t * dev, ADVANCE_LP_RING(); } -static void i810EmitState(drm_device_t * dev) +static void i810EmitState(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -685,14 +685,14 @@ static void i810EmitState(drm_device_t * dev) /* need to verify */ -static void i810_dma_dispatch_clear(drm_device_t * dev, int flags, +static void i810_dma_dispatch_clear(struct drm_device * dev, int flags, unsigned int clear_color, unsigned int clear_zval) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int pitch = dev_priv->pitch; int cpp = 2; int i; @@ -760,12 +760,12 @@ static void i810_dma_dispatch_clear(drm_device_t * dev, int flags, } } -static void i810_dma_dispatch_swap(drm_device_t * dev) +static void i810_dma_dispatch_swap(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int pitch = dev_priv->pitch; int cpp = 2; int i; @@ -806,13 +806,13 @@ static void i810_dma_dispatch_swap(drm_device_t * dev) } } -static void i810_dma_dispatch_vertex(drm_device_t * dev, - drm_buf_t * buf, int discard, int used) +static void i810_dma_dispatch_vertex(struct drm_device * dev, + struct drm_buf * buf, int discard, int used) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_buf_priv_t *buf_priv = buf->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_clip_rect_t *box = sarea_priv->boxes; + struct drm_clip_rect *box = sarea_priv->boxes; int nbox = sarea_priv->nbox; unsigned long address = (unsigned long)buf->bus_address; unsigned long start = address - dev->agp->base; @@ -886,7 +886,7 @@ static void i810_dma_dispatch_vertex(drm_device_t * dev, } } -static void i810_dma_dispatch_flip(drm_device_t * dev) +static void i810_dma_dispatch_flip(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; int pitch = dev_priv->pitch; @@ -933,7 +933,7 @@ static void i810_dma_dispatch_flip(drm_device_t * dev) } -static void i810_dma_quiescent(drm_device_t * dev) +static void i810_dma_quiescent(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -952,10 +952,10 @@ static void i810_dma_quiescent(drm_device_t * dev) i810_wait_ring(dev, dev_priv->ring.Size - 8); } -static int i810_flush_queue(drm_device_t * dev) +static int i810_flush_queue(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i, ret = 0; RING_LOCALS; @@ -971,7 +971,7 @@ static int i810_flush_queue(drm_device_t * dev) i810_wait_ring(dev, dev_priv->ring.Size - 8); for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE, @@ -987,9 +987,9 @@ static int i810_flush_queue(drm_device_t * dev) } /* Must be called with the lock held */ -static void i810_reclaim_buffers(drm_device_t *dev, struct file *filp) +static void i810_reclaim_buffers(struct drm_device *dev, struct file *filp) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; if (!dma) @@ -1002,7 +1002,7 @@ static void i810_reclaim_buffers(drm_device_t *dev, struct file *filp) i810_flush_queue(dev); for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; if (buf->filp == filp && buf_priv) { @@ -1020,8 +1020,8 @@ static void i810_reclaim_buffers(drm_device_t *dev, struct file *filp) static int i810_flush_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; LOCK_TEST_WITH_RETURN(dev, filp); @@ -1032,9 +1032,9 @@ static int i810_flush_ioctl(struct inode *inode, struct file *filp, static int i810_dma_vertex(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1068,8 +1068,8 @@ static int i810_dma_vertex(struct inode *inode, struct file *filp, static int i810_clear_bufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_clear_t clear; if (copy_from_user @@ -1091,8 +1091,8 @@ static int i810_clear_bufs(struct inode *inode, struct file *filp, static int i810_swap_bufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; DRM_DEBUG("i810_swap_bufs\n"); @@ -1105,8 +1105,8 @@ static int i810_swap_bufs(struct inode *inode, struct file *filp, static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1119,8 +1119,8 @@ static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; int retcode = 0; drm_i810_dma_t d; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; @@ -1140,7 +1140,7 @@ static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", current->pid, retcode, d.granted); - if (copy_to_user((drm_dma_t __user *) arg, &d, sizeof(d))) + if (copy_to_user((void __user *) arg, &d, sizeof(d))) return -EFAULT; sarea_priv->last_dispatch = (int)hw_status[5]; @@ -1161,7 +1161,7 @@ static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, return 0; } -static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used, +static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used, unsigned int last_render) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1224,9 +1224,9 @@ static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used, static int i810_dma_mc(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1255,8 +1255,8 @@ static int i810_dma_mc(struct inode *inode, struct file *filp, static int i810_rstatus(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; return (int)(((u32 *) (dev_priv->hw_status_page))[4]); @@ -1265,8 +1265,8 @@ static int i810_rstatus(struct inode *inode, struct file *filp, static int i810_ov0_info(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; drm_i810_overlay_t data; @@ -1281,8 +1281,8 @@ static int i810_ov0_info(struct inode *inode, struct file *filp, static int i810_fstatus(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; LOCK_TEST_WITH_RETURN(dev, filp); @@ -1292,8 +1292,8 @@ static int i810_fstatus(struct inode *inode, struct file *filp, static int i810_ov0_flip(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; LOCK_TEST_WITH_RETURN(dev, filp); @@ -1305,7 +1305,7 @@ static int i810_ov0_flip(struct inode *inode, struct file *filp, /* Not sure why this isn't set all the time: */ -static void i810_do_init_pageflip(drm_device_t * dev) +static void i810_do_init_pageflip(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1315,7 +1315,7 @@ static void i810_do_init_pageflip(drm_device_t * dev) dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; } -static int i810_do_cleanup_pageflip(drm_device_t * dev) +static int i810_do_cleanup_pageflip(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1330,8 +1330,8 @@ static int i810_do_cleanup_pageflip(drm_device_t * dev) static int i810_flip_bufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); @@ -1345,7 +1345,7 @@ static int i810_flip_bufs(struct inode *inode, struct file *filp, return 0; } -int i810_driver_load(drm_device_t *dev, unsigned long flags) +int i810_driver_load(struct drm_device *dev, unsigned long flags) { /* i810 has 4 more counters */ dev->counters += 4; @@ -1357,12 +1357,12 @@ int i810_driver_load(drm_device_t *dev, unsigned long flags) return 0; } -void i810_driver_lastclose(drm_device_t * dev) +void i810_driver_lastclose(struct drm_device * dev) { i810_dma_cleanup(dev); } -void i810_driver_preclose(drm_device_t * dev, DRMFILE filp) +void i810_driver_preclose(struct drm_device * dev, DRMFILE filp) { if (dev->dev_private) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1372,18 +1372,18 @@ void i810_driver_preclose(drm_device_t * dev, DRMFILE filp) } } -void i810_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) +void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) { i810_reclaim_buffers(dev, filp); } -int i810_driver_dma_quiescent(drm_device_t * dev) +int i810_driver_dma_quiescent(struct drm_device * dev) { i810_dma_quiescent(dev); return 0; } -drm_ioctl_desc_t i810_ioctls[] = { +struct drm_ioctl_desc i810_ioctls[] = { [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH}, [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH}, @@ -1414,7 +1414,7 @@ int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); * \returns * A value of 1 is always retured to indictate every i810 is AGP. */ -int i810_driver_device_is_agp(drm_device_t * dev) +int i810_driver_device_is_agp(struct drm_device * dev) { return 1; } diff --git a/linux-core/i810_drm.h b/linux-core/i810_drm.h index beec4a2a..db59550d 100644 --- a/linux-core/i810_drm.h +++ b/linux-core/i810_drm.h @@ -163,7 +163,7 @@ typedef struct _drm_i810_sarea { unsigned int dirty; unsigned int nbox; - drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS]; /* Maintain an LRU of contiguous regions of texture space. If * you think you own a region of texture memory, and it has an diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h index 69d79499..3627d774 100644 --- a/linux-core/i810_drv.h +++ b/linux-core/i810_drv.h @@ -77,8 +77,8 @@ typedef struct _drm_i810_ring_buffer { } drm_i810_ring_buffer_t; typedef struct drm_i810_private { - drm_map_t *sarea_map; - drm_map_t *mmio_map; + struct drm_map *sarea_map; + struct drm_map *mmio_map; drm_i810_sarea_t *sarea_priv; drm_i810_ring_buffer_t ring; @@ -88,7 +88,7 @@ typedef struct drm_i810_private { dma_addr_t dma_status_page; - drm_buf_t *mmap_buffer; + struct drm_buf *mmap_buffer; u32 front_di1, back_di1, zi1; @@ -115,17 +115,17 @@ typedef struct drm_i810_private { } drm_i810_private_t; /* i810_dma.c */ -extern int i810_driver_dma_quiescent(drm_device_t * dev); -extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev, +extern int i810_driver_dma_quiescent(struct drm_device * dev); +extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp); extern int i810_driver_load(struct drm_device *, unsigned long flags); -extern void i810_driver_lastclose(drm_device_t * dev); -extern void i810_driver_preclose(drm_device_t * dev, DRMFILE filp); -extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev, +extern void i810_driver_lastclose(struct drm_device * dev); +extern void i810_driver_preclose(struct drm_device * dev, DRMFILE filp); +extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp); -extern int i810_driver_device_is_agp(drm_device_t * dev); +extern int i810_driver_device_is_agp(struct drm_device * dev); -extern drm_ioctl_desc_t i810_ioctls[]; +extern struct drm_ioctl_desc i810_ioctls[]; extern int i810_max_ioctl; #define I810_BASE(reg) ((unsigned long) \ diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 8589f467..bf500cc6 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -33,14 +33,13 @@ #include "i915_drm.h" #include "i915_drv.h" -drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev) +struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev) { return drm_agp_init_ttm(dev); } -int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type) +int i915_fence_types(struct drm_buffer_object *bo, uint32_t * type) { - *class = 0; if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) *type = 3; else @@ -48,7 +47,7 @@ int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type) return 0; } -int i915_invalidate_caches(drm_device_t * dev, uint32_t flags) +int i915_invalidate_caches(struct drm_device * dev, uint64_t flags) { /* * FIXME: Only emit once per batchbuffer submission. @@ -64,8 +63,8 @@ int i915_invalidate_caches(drm_device_t * dev, uint32_t flags) return i915_emit_mi_flush(dev, flush_cmd); } -int i915_init_mem_type(drm_device_t * dev, uint32_t type, - drm_mem_type_manager_t * man) +int i915_init_mem_type(struct drm_device * dev, uint32_t type, + struct drm_mem_type_manager * man) { switch (type) { case DRM_BO_MEM_LOCAL: @@ -106,7 +105,7 @@ int i915_init_mem_type(drm_device_t * dev, uint32_t type, return 0; } -uint32_t i915_evict_mask(drm_buffer_object_t *bo) +uint32_t i915_evict_mask(struct drm_buffer_object *bo) { switch (bo->mem.mem_type) { case DRM_BO_MEM_LOCAL: @@ -117,7 +116,7 @@ uint32_t i915_evict_mask(drm_buffer_object_t *bo) } } -static void i915_emit_copy_blit(drm_device_t * dev, +static void i915_emit_copy_blit(struct drm_device * dev, uint32_t src_offset, uint32_t dst_offset, uint32_t pages, int direction) @@ -151,10 +150,10 @@ static void i915_emit_copy_blit(drm_device_t * dev, return; } -static int i915_move_blit(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +static int i915_move_blit(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; int dir = 0; if ((old_mem->mem_type == new_mem->mem_type) && @@ -181,11 +180,11 @@ static int i915_move_blit(drm_buffer_object_t * bo, * then blit and subsequently move out again. */ -static int i915_move_flip(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +static int i915_move_flip(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_device_t *dev = bo->dev; - drm_bo_mem_reg_t tmp_mem; + struct drm_device *dev = bo->dev; + struct drm_bo_mem_reg tmp_mem; int ret; tmp_mem = *new_mem; @@ -217,10 +216,10 @@ out_cleanup: return ret; } -int i915_move(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int i915_move(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 49437066..e337e1d2 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -39,7 +39,7 @@ static struct pci_device_id pciidlist[] = { }; #ifdef I915_HAVE_FENCE -static drm_fence_driver_t i915_fence_driver = { +static struct drm_fence_driver i915_fence_driver = { .num_classes = 1, .wrap_diff = (1U << (BREADCRUMB_BITS - 1)), .flush_diff = (1U << (BREADCRUMB_BITS - 2)), @@ -55,7 +55,7 @@ static drm_fence_driver_t i915_fence_driver = { static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL}; static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL}; -static drm_bo_driver_t i915_bo_driver = { +static struct drm_bo_driver i915_bo_driver = { .mem_type_prio = i915_mem_prios, .mem_busy_prio = i915_busy_prios, .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t), diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index 00873485..6f0de2ca 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -38,12 +38,12 @@ * Implements an intel sync flush operation. */ -static void i915_perform_flush(drm_device_t * dev) +static void i915_perform_flush(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[0]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[0]; + struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t flush_flags = 0; uint32_t flush_sequence = 0; uint32_t i_status; @@ -109,9 +109,9 @@ static void i915_perform_flush(drm_device_t * dev) } -void i915_poke_flush(drm_device_t * dev, uint32_t class) +void i915_poke_flush(struct drm_device * dev, uint32_t class) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; write_lock_irqsave(&fm->lock, flags); @@ -119,7 +119,7 @@ void i915_poke_flush(drm_device_t * dev, uint32_t class) write_unlock_irqrestore(&fm->lock, flags); } -int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, +int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * sequence, uint32_t * native_type) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -135,16 +135,16 @@ int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, return 0; } -void i915_fence_handler(drm_device_t * dev) +void i915_fence_handler(struct drm_device * dev) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; write_lock(&fm->lock); i915_perform_flush(dev); write_unlock(&fm->lock); } -int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags) +int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags) { /* * We have an irq that tells us when we have a new breadcrumb. diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c index ef6f1e44..1eb6d9e6 100644 --- a/linux-core/mga_drv.c +++ b/linux-core/mga_drv.c @@ -36,7 +36,7 @@ #include "drm_pciids.h" -static int mga_driver_device_is_agp(drm_device_t * dev); +static int mga_driver_device_is_agp(struct drm_device * dev); static struct pci_device_id pciidlist[] = { mga_PCI_IDS @@ -127,7 +127,7 @@ MODULE_LICENSE("GPL and additional rights"); * \returns * If the device is a PCI G450, zero is returned. Otherwise 2 is returned. */ -static int mga_driver_device_is_agp(drm_device_t * dev) +static int mga_driver_device_is_agp(struct drm_device * dev) { const struct pci_dev * const pdev = dev->pdev; diff --git a/linux-core/nouveau_drv.c b/linux-core/nouveau_drv.c index ac030d89..6c73b0d3 100644 --- a/linux-core/nouveau_drv.c +++ b/linux-core/nouveau_drv.c @@ -32,7 +32,7 @@ static struct pci_device_id pciidlist[] = { nouveau_PCI_IDS }; -extern drm_ioctl_desc_t nouveau_ioctls[]; +extern struct drm_ioctl_desc nouveau_ioctls[]; extern int nouveau_max_ioctl; static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c new file mode 100644 index 00000000..a65317cd --- /dev/null +++ b/linux-core/nouveau_sgdma.c @@ -0,0 +1,318 @@ +#include "drmP.h" +#include "nouveau_drv.h" + +#define NV_CTXDMA_PAGE_SHIFT 12 +#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) +#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) + +struct nouveau_sgdma_be { + struct drm_ttm_backend backend; + struct drm_device *dev; + + int pages; + int pages_populated; + dma_addr_t *pagelist; + int is_bound; + + unsigned int pte_start; +}; + +static int +nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be) +{ + return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); +} + +static int +nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages, + struct page **pages) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + int p, d, o; + + DRM_DEBUG("num_pages = %ld\n", num_pages); + + if (nvbe->pagelist) + return DRM_ERR(EINVAL); + nvbe->pages = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT; + nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t), + DRM_MEM_PAGES); + + nvbe->pages_populated = d = 0; + for (p = 0; p < num_pages; p++) { + for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) { + nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev, + pages[p], o, + NV_CTXDMA_PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(nvbe->pagelist[d])) { + be->func->clear(be); + DRM_ERROR("pci_map_page failed\n"); + return DRM_ERR(EINVAL); + } + nvbe->pages_populated = ++d; + } + } + + return 0; +} + +static void +nouveau_sgdma_clear(struct drm_ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + int d; + + DRM_DEBUG("\n"); + + if (nvbe && nvbe->pagelist) { + if (nvbe->is_bound) + be->func->unbind(be); + + for (d = 0; d < nvbe->pages_populated; d--) { + pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d], + NV_CTXDMA_PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + } + drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t), + DRM_MEM_PAGES); + } +} + +static int +nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start, + int cached) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + uint64_t offset = (pg_start << PAGE_SHIFT); + uint32_t i; + + DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", pg_start, offset, cached); + + if (offset & NV_CTXDMA_PAGE_MASK) + return DRM_ERR(EINVAL); + nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT); + if (dev_priv->card_type < NV_50) + nvbe->pte_start += 2; /* skip ctxdma header */ + + for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) { + uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start]; + + if (pteval & NV_CTXDMA_PAGE_MASK) { + DRM_ERROR("Bad pteval 0x%llx\n", pteval); + return DRM_ERR(EINVAL); + } + + if (dev_priv->card_type < NV_50) { + INSTANCE_WR(gpuobj, i, pteval | 3); + } else { + INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21); + INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000); + } + } + + nvbe->is_bound = 1; + return 0; +} + +static int +nouveau_sgdma_unbind(struct drm_ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; + + DRM_DEBUG("\n"); + + if (nvbe->is_bound) { + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + unsigned int pte; + + pte = nvbe->pte_start; + while (pte < (nvbe->pte_start + nvbe->pages)) { + uint64_t pteval = dev_priv->gart_info.sg_dummy_bus; + + if (dev_priv->card_type < NV_50) { + INSTANCE_WR(gpuobj, pte, pteval | 3); + } else { + INSTANCE_WR(gpuobj, (pte<<1)+0, 0x00000010); + INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000004); + } + + pte++; + } + + nvbe->is_bound = 0; + } + + return 0; +} + +static void +nouveau_sgdma_destroy(struct drm_ttm_backend *be) +{ + DRM_DEBUG("\n"); + if (be) { + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + if (nvbe) { + if (nvbe->pagelist) + be->func->clear(be); + drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM); + } + } +} + +static struct drm_ttm_backend_func nouveau_sgdma_backend = { + .needs_ub_cache_adjust = nouveau_sgdma_needs_ub_cache_adjust, + .populate = nouveau_sgdma_populate, + .clear = nouveau_sgdma_clear, + .bind = nouveau_sgdma_bind, + .unbind = nouveau_sgdma_unbind, + .destroy = nouveau_sgdma_destroy +}; + +struct drm_ttm_backend * +nouveau_sgdma_init_ttm(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_sgdma_be *nvbe; + + if (!dev_priv->gart_info.sg_ctxdma) + return NULL; + + nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM); + if (!nvbe) + return NULL; + + nvbe->dev = dev; + + nvbe->backend.func = &nouveau_sgdma_backend; + nvbe->backend.mem_type = DRM_BO_MEM_TT; + + return &nvbe->backend; +} + +int +nouveau_sgdma_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = NULL; + uint32_t aper_size, obj_size; + int i, ret; + + if (dev_priv->card_type < NV_50) { + aper_size = (64 * 1024 * 1024); + obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; + obj_size += 8; /* ctxdma header */ + } else { + /* 1 entire VM page table */ + aper_size = (512 * 1024 * 1024); + obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; + } + + if ((ret = nouveau_gpuobj_new(dev, -1, obj_size, 16, + NVOBJ_FLAG_ALLOW_NO_REFS | + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &gpuobj))) { + DRM_ERROR("Error creating sgdma object: %d\n", ret); + return ret; + } + + if (dev_priv->card_type < NV_50) { + dev_priv->gart_info.sg_dummy_page = + alloc_page(GFP_KERNEL|__GFP_DMA32); + SetPageLocked(dev_priv->gart_info.sg_dummy_page); + dev_priv->gart_info.sg_dummy_bus = + pci_map_page(dev->pdev, + dev_priv->gart_info.sg_dummy_page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + + /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and + * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE + * on those cards? */ + INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | + (1 << 12) /* PT present */ | + (0 << 13) /* PT *not* linear */ | + (NV_DMA_ACCESS_RW << 14) | + (NV_DMA_TARGET_PCI << 16)); + INSTANCE_WR(gpuobj, 1, aper_size - 1); + for (i=2; i<2+(aper_size>>12); i++) { + INSTANCE_WR(gpuobj, i, + dev_priv->gart_info.sg_dummy_bus | 3); + } + } else { + for (i=0; i<obj_size; i+=8) { + INSTANCE_WR(gpuobj, (i+0)/4, 0); //x00000010); + INSTANCE_WR(gpuobj, (i+4)/4, 0); //0x00000004); + } + } + + dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; + dev_priv->gart_info.aper_base = 0; + dev_priv->gart_info.aper_size = aper_size; + dev_priv->gart_info.sg_ctxdma = gpuobj; + return 0; +} + +void +nouveau_sgdma_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->gart_info.sg_dummy_page) { + pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus, + NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + unlock_page(dev_priv->gart_info.sg_dummy_page); + __free_page(dev_priv->gart_info.sg_dummy_page); + dev_priv->gart_info.sg_dummy_page = NULL; + dev_priv->gart_info.sg_dummy_bus = 0; + } + + nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma); +} + +int +nouveau_sgdma_nottm_hack_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_ttm_backend *be; + struct drm_scatter_gather sgreq; + int ret; + + dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev); + if (!dev_priv->gart_info.sg_be) + return DRM_ERR(ENOMEM); + be = dev_priv->gart_info.sg_be; + + /* Hack the aperture size down to the amount of system memory + * we're going to bind into it. + */ + if (dev_priv->gart_info.aper_size > 32*1024*1024) + dev_priv->gart_info.aper_size = 32*1024*1024; + + sgreq.size = dev_priv->gart_info.aper_size; + if ((ret = drm_sg_alloc(dev, &sgreq))) { + DRM_ERROR("drm_sg_alloc failed: %d\n", ret); + return ret; + } + dev_priv->gart_info.sg_handle = sgreq.handle; + + if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist))) { + DRM_ERROR("failed populate: %d\n", ret); + return ret; + } + + if ((ret = be->func->bind(be, 0, 0))) { + DRM_ERROR("failed bind: %d\n", ret); + return ret; + } + + return 0; +} + +void +nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev) +{ +} + diff --git a/linux-core/sis_drv.c b/linux-core/sis_drv.c index 114ec8f9..b4c3f93b 100644 --- a/linux-core/sis_drv.c +++ b/linux-core/sis_drv.c @@ -36,7 +36,7 @@ static struct pci_device_id pciidlist[] = { }; -static int sis_driver_load(drm_device_t *dev, unsigned long chipset) +static int sis_driver_load(struct drm_device *dev, unsigned long chipset) { drm_sis_private_t *dev_priv; int ret; @@ -55,7 +55,7 @@ static int sis_driver_load(drm_device_t *dev, unsigned long chipset) return ret; } -static int sis_driver_unload(drm_device_t *dev) +static int sis_driver_unload(struct drm_device *dev) { drm_sis_private_t *dev_priv = dev->dev_private; diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index 21c1f2d7..edbf8bf4 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -93,7 +93,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS) mutex_lock(&dev->struct_mutex); #if defined(__linux__) && defined(CONFIG_FB_SIS) { - drm_sman_mm_t sman_mm; + struct drm_sman_mm sman_mm; sman_mm.private = (void *)0xFFFFFFFF; sman_mm.allocate = sis_sman_mm_allocate; sman_mm.free = sis_sman_mm_free; @@ -122,14 +122,14 @@ static int sis_fb_init(DRM_IOCTL_ARGS) return 0; } -static int sis_drm_alloc(drm_device_t * dev, drm_file_t * priv, +static int sis_drm_alloc(struct drm_device * dev, struct drm_file * priv, unsigned long data, int pool) { drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *) data; drm_sis_mem_t mem; int retval = 0; - drm_memblock_item_t *item; + struct drm_memblock_item *item; DRM_COPY_FROM_USER_IOCTL(mem, argp, sizeof(mem)); @@ -228,9 +228,9 @@ static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS) return sis_drm_alloc(dev, priv, data, AGP_TYPE); } -static drm_local_map_t *sis_reg_init(drm_device_t *dev) +static drm_local_map_t *sis_reg_init(struct drm_device *dev) { - drm_map_list_t *entry; + struct drm_map_list *entry; drm_local_map_t *map; list_for_each_entry(entry, &dev->maplist, head) { @@ -245,7 +245,7 @@ static drm_local_map_t *sis_reg_init(drm_device_t *dev) } int -sis_idle(drm_device_t *dev) +sis_idle(struct drm_device *dev) { drm_sis_private_t *dev_priv = dev->dev_private; uint32_t idle_reg; @@ -314,10 +314,10 @@ void sis_lastclose(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); } -void sis_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) +void sis_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) { drm_sis_private_t *dev_priv = dev->dev_private; - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; mutex_lock(&dev->struct_mutex); if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { @@ -334,7 +334,7 @@ void sis_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) return; } -drm_ioctl_desc_t sis_ioctls[] = { +struct drm_ioctl_desc sis_ioctls[] = { [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH}, [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_drm_free, DRM_AUTH}, [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index ebc8c371..0461b3c7 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -32,19 +32,18 @@ #include "via_drm.h" #include "via_drv.h" -drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t * dev) +struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev) { return drm_agp_init_ttm(dev); } -int via_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type) +int via_fence_types(struct drm_buffer_object *bo, uint32_t * type) { - *class = 0; *type = 3; return 0; } -int via_invalidate_caches(drm_device_t * dev, uint32_t flags) +int via_invalidate_caches(struct drm_device * dev, uint64_t flags) { /* * FIXME: Invalidate texture caches here. @@ -54,7 +53,7 @@ int via_invalidate_caches(drm_device_t * dev, uint32_t flags) } -static int via_vram_info(drm_device_t *dev, +static int via_vram_info(struct drm_device *dev, unsigned long *offset, unsigned long *size) { @@ -82,8 +81,8 @@ static int via_vram_info(drm_device_t *dev, return 0; } -int via_init_mem_type(drm_device_t * dev, uint32_t type, - drm_mem_type_manager_t * man) +int via_init_mem_type(struct drm_device * dev, uint32_t type, + struct drm_mem_type_manager * man) { switch (type) { case DRM_BO_MEM_LOCAL: @@ -144,7 +143,7 @@ int via_init_mem_type(drm_device_t * dev, uint32_t type, return 0; } -uint32_t via_evict_mask(drm_buffer_object_t *bo) +uint32_t via_evict_mask(struct drm_buffer_object *bo) { switch (bo->mem.mem_type) { case DRM_BO_MEM_LOCAL: diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c index 2f508374..6422609c 100644 --- a/linux-core/via_dmablit.c +++ b/linux-core/via_dmablit.c @@ -206,7 +206,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) */ static void -via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine) +via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -286,7 +286,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg) } static void -via_abort_dmablit(drm_device_t *dev, int engine) +via_abort_dmablit(struct drm_device *dev, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -294,7 +294,7 @@ via_abort_dmablit(drm_device_t *dev, int engine) } static void -via_dmablit_engine_off(drm_device_t *dev, int engine) +via_dmablit_engine_off(struct drm_device *dev, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -311,7 +311,7 @@ via_dmablit_engine_off(drm_device_t *dev, int engine) */ void -via_dmablit_handler(drm_device_t *dev, int engine, int from_irq) +via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; @@ -432,7 +432,7 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que */ static int -via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine) +via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -465,7 +465,7 @@ static void via_dmablit_timer(unsigned long data) { drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; - drm_device_t *dev = blitq->dev; + struct drm_device *dev = blitq->dev; int engine = (int) (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); @@ -509,7 +509,7 @@ via_dmablit_workqueue(struct work_struct *work) #else drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); #endif - drm_device_t *dev = blitq->dev; + struct drm_device *dev = blitq->dev; unsigned long irqsave; drm_via_sg_info_t *cur_sg; int cur_released; @@ -552,7 +552,7 @@ via_dmablit_workqueue(struct work_struct *work) void -via_init_dmablit(drm_device_t *dev) +via_init_dmablit(struct drm_device *dev) { int i,j; drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -568,7 +568,7 @@ via_init_dmablit(drm_device_t *dev) blitq->head = 0; blitq->cur = 0; blitq->serviced = 0; - blitq->num_free = VIA_NUM_BLIT_SLOTS; + blitq->num_free = VIA_NUM_BLIT_SLOTS - 1; blitq->num_outstanding = 0; blitq->is_active = 0; blitq->aborting = 0; @@ -594,7 +594,7 @@ via_init_dmablit(drm_device_t *dev) static int -via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) +via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) { int draw = xfer->to_fb; int ret = 0; @@ -740,7 +740,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq) static int -via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer) +via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; drm_via_sg_info_t *vsg; diff --git a/linux-core/via_dmablit.h b/linux-core/via_dmablit.h index f6ae03ec..726ad25d 100644 --- a/linux-core/via_dmablit.h +++ b/linux-core/via_dmablit.h @@ -59,7 +59,7 @@ typedef struct _drm_via_sg_info { } drm_via_sg_info_t; typedef struct _drm_via_blitq { - drm_device_t *dev; + struct drm_device *dev; uint32_t cur_blit_handle; uint32_t done_blit_handle; unsigned serviced; diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index 02249939..a8db3d12 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -39,10 +39,10 @@ */ -static uint32_t via_perform_flush(drm_device_t *dev, uint32_t class) +static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_class_manager_t *fc = &dev->fm.class[class]; + struct drm_fence_class_manager *fc = &dev->fm.class[class]; uint32_t pending_flush_types = 0; uint32_t signaled_flush_types = 0; uint32_t status; @@ -113,7 +113,7 @@ static uint32_t via_perform_flush(drm_device_t *dev, uint32_t class) * Emit a fence sequence. */ -int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, +int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * sequence, uint32_t * native_type) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; @@ -152,10 +152,10 @@ int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, * Manual poll (from the fence manager). */ -void via_poke_flush(drm_device_t * dev, uint32_t class) +void via_poke_flush(struct drm_device * dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; uint32_t pending_flush; @@ -200,11 +200,11 @@ int via_fence_has_irq(struct drm_device * dev, uint32_t class, void via_fence_timer(unsigned long data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; uint32_t pending_flush; - drm_fence_class_manager_t *fc = &dev->fm.class[0]; + struct drm_fence_class_manager *fc = &dev->fm.class[0]; if (!dev_priv) return; diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c index d97269f5..1ac51050 100644 --- a/linux-core/via_mm.c +++ b/linux-core/via_mm.c @@ -129,7 +129,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS) drm_via_mem_t mem; int retval = 0; - drm_memblock_item_t *item; + struct drm_memblock_item *item; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; unsigned long tmpSize; @@ -190,10 +190,10 @@ int via_mem_free(DRM_IOCTL_ARGS) } -void via_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) +void via_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) { drm_via_private_t *dev_priv = dev->dev_private; - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; mutex_lock(&dev->struct_mutex); if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { diff --git a/shared-core/drm.h b/shared-core/drm.h index b4195419..db913b1f 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -127,16 +127,9 @@ #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) #if defined(__linux__) -#if defined(__KERNEL__) -typedef __u64 drm_u64_t; -#else -typedef unsigned long long drm_u64_t; -#endif - typedef unsigned int drm_handle_t; #else #include <sys/types.h> -typedef u_int64_t drm_u64_t; typedef unsigned long drm_handle_t; /**< To mapped regions */ #endif typedef unsigned int drm_context_t; /**< GLXContext handle */ @@ -152,31 +145,31 @@ typedef unsigned int drm_magic_t; /**< Magic for authentication */ * \note KW: Actually it's illegal to change either for * backwards-compatibility reasons. */ -typedef struct drm_clip_rect { +struct drm_clip_rect { unsigned short x1; unsigned short y1; unsigned short x2; unsigned short y2; -} drm_clip_rect_t; +}; /** * Drawable information. */ -typedef struct drm_drawable_info { +struct drm_drawable_info { unsigned int num_rects; - drm_clip_rect_t *rects; -} drm_drawable_info_t; + struct drm_clip_rect *rects; +}; /** * Texture region, */ -typedef struct drm_tex_region { +struct drm_tex_region { unsigned char next; unsigned char prev; unsigned char in_use; unsigned char padding; unsigned int age; -} drm_tex_region_t; +}; /** * Hardware lock. @@ -185,10 +178,10 @@ typedef struct drm_tex_region { * processor bus contention on a multiprocessor system, there should not be any * other data stored in the same cache line. */ -typedef struct drm_hw_lock { +struct drm_hw_lock { __volatile__ unsigned int lock; /**< lock variable */ char padding[60]; /**< Pad to cache line */ -} drm_hw_lock_t; +}; /* This is beyond ugly, and only works on GCC. However, it allows me to use * drm.h in places (i.e., in the X-server) where I can't use size_t. The real @@ -211,7 +204,7 @@ typedef struct drm_hw_lock { * * \sa drmGetVersion(). */ -typedef struct drm_version { +struct drm_version { int version_major; /**< Major version */ int version_minor; /**< Minor version */ int version_patchlevel; /**< Patch level */ @@ -221,35 +214,35 @@ typedef struct drm_version { char __user *date; /**< User-space buffer to hold date */ DRM_SIZE_T desc_len; /**< Length of desc buffer */ char __user *desc; /**< User-space buffer to hold desc */ -} drm_version_t; +}; /** * DRM_IOCTL_GET_UNIQUE ioctl argument type. * * \sa drmGetBusid() and drmSetBusId(). */ -typedef struct drm_unique { +struct drm_unique { DRM_SIZE_T unique_len; /**< Length of unique */ char __user *unique; /**< Unique name for driver instantiation */ -} drm_unique_t; +}; #undef DRM_SIZE_T -typedef struct drm_list { +struct drm_list { int count; /**< Length of user-space structures */ - drm_version_t __user *version; -} drm_list_t; + struct drm_version __user *version; +}; -typedef struct drm_block { +struct drm_block { int unused; -} drm_block_t; +}; /** * DRM_IOCTL_CONTROL ioctl argument type. * * \sa drmCtlInstHandler() and drmCtlUninstHandler(). */ -typedef struct drm_control { +struct drm_control { enum { DRM_ADD_COMMAND, DRM_RM_COMMAND, @@ -257,12 +250,12 @@ typedef struct drm_control { DRM_UNINST_HANDLER } func; int irq; -} drm_control_t; +}; /** * Type of memory to map. */ -typedef enum drm_map_type { +enum drm_map_type { _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ _DRM_REGISTERS = 1, /**< no caching, no core dump */ _DRM_SHM = 2, /**< shared, cached */ @@ -270,12 +263,12 @@ typedef enum drm_map_type { _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ _DRM_TTM = 6 -} drm_map_type_t; +}; /** * Memory mapping flags. */ -typedef enum drm_map_flags { +enum drm_map_flags { _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ _DRM_READ_ONLY = 0x02, _DRM_LOCKED = 0x04, /**< shared, cached, locked */ @@ -283,12 +276,12 @@ typedef enum drm_map_flags { _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ _DRM_REMOVABLE = 0x40 /**< Removable mapping */ -} drm_map_flags_t; +}; -typedef struct drm_ctx_priv_map { +struct drm_ctx_priv_map { unsigned int ctx_id; /**< Context requesting private mapping */ void *handle; /**< Handle of map */ -} drm_ctx_priv_map_t; +}; /** * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls @@ -296,30 +289,30 @@ typedef struct drm_ctx_priv_map { * * \sa drmAddMap(). */ -typedef struct drm_map { +struct drm_map { unsigned long offset; /**< Requested physical address (0 for SAREA)*/ unsigned long size; /**< Requested physical size (bytes) */ - drm_map_type_t type; /**< Type of memory to map */ - drm_map_flags_t flags; /**< Flags */ + enum drm_map_type type; /**< Type of memory to map */ + enum drm_map_flags flags; /**< Flags */ void *handle; /**< User-space: "Handle" to pass to mmap() */ /**< Kernel-space: kernel-virtual address */ int mtrr; /**< MTRR slot used */ /* Private data */ -} drm_map_t; +}; /** * DRM_IOCTL_GET_CLIENT ioctl argument type. */ -typedef struct drm_client { +struct drm_client { int idx; /**< Which client desired? */ int auth; /**< Is client authenticated? */ unsigned long pid; /**< Process ID */ unsigned long uid; /**< User ID */ unsigned long magic; /**< Magic */ unsigned long iocs; /**< Ioctl count */ -} drm_client_t; +}; -typedef enum { +enum drm_stat_type { _DRM_STAT_LOCK, _DRM_STAT_OPENS, _DRM_STAT_CLOSES, @@ -337,23 +330,23 @@ typedef enum { _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ _DRM_STAT_MISSED /**< Missed DMA opportunity */ /* Add to the *END* of the list */ -} drm_stat_type_t; +}; /** * DRM_IOCTL_GET_STATS ioctl argument type. */ -typedef struct drm_stats { +struct drm_stats { unsigned long count; struct { unsigned long value; - drm_stat_type_t type; + enum drm_stat_type type; } data[15]; -} drm_stats_t; +}; /** * Hardware locking flags. */ -typedef enum drm_lock_flags { +enum drm_lock_flags { _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ @@ -363,17 +356,17 @@ typedef enum drm_lock_flags { full-screen DGA-like mode. */ _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ -} drm_lock_flags_t; +}; /** * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. * * \sa drmGetLock() and drmUnlock(). */ -typedef struct drm_lock { +struct drm_lock { int context; - drm_lock_flags_t flags; -} drm_lock_t; + enum drm_lock_flags flags; +}; /** * DMA flags @@ -383,7 +376,7 @@ typedef struct drm_lock { * * \sa drm_dma. */ -typedef enum drm_dma_flags { +enum drm_dma_flags { /* Flags for DMA buffer dispatch */ _DRM_DMA_BLOCK = 0x01, /**< * Block until buffer dispatched. @@ -402,14 +395,14 @@ typedef enum drm_dma_flags { _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ -} drm_dma_flags_t; +}; /** * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. * * \sa drmAddBufs(). */ -typedef struct drm_buf_desc { +struct drm_buf_desc { int count; /**< Number of buffers of this size */ int size; /**< Size in bytes */ int low_mark; /**< Low water mark */ @@ -425,48 +418,48 @@ typedef struct drm_buf_desc { * Start address of where the AGP buffers are * in the AGP aperture */ -} drm_buf_desc_t; +}; /** * DRM_IOCTL_INFO_BUFS ioctl argument type. */ -typedef struct drm_buf_info { +struct drm_buf_info { int count; /**< Number of buffers described in list */ - drm_buf_desc_t __user *list; /**< List of buffer descriptions */ -} drm_buf_info_t; + struct drm_buf_desc __user *list; /**< List of buffer descriptions */ +}; /** * DRM_IOCTL_FREE_BUFS ioctl argument type. */ -typedef struct drm_buf_free { +struct drm_buf_free { int count; int __user *list; -} drm_buf_free_t; +}; /** * Buffer information * * \sa drm_buf_map. */ -typedef struct drm_buf_pub { +struct drm_buf_pub { int idx; /**< Index into the master buffer list */ int total; /**< Buffer size */ int used; /**< Amount of buffer in use (for DMA) */ void __user *address; /**< Address of buffer */ -} drm_buf_pub_t; +}; /** * DRM_IOCTL_MAP_BUFS ioctl argument type. */ -typedef struct drm_buf_map { +struct drm_buf_map { int count; /**< Length of the buffer list */ #if defined(__cplusplus) void __user *c_virtual; #else void __user *virtual; /**< Mmap'd area in user-virtual */ #endif - drm_buf_pub_t __user *list; /**< Buffer information */ -} drm_buf_map_t; + struct drm_buf_pub __user *list; /**< Buffer information */ +}; /** * DRM_IOCTL_DMA ioctl argument type. @@ -475,48 +468,48 @@ typedef struct drm_buf_map { * * \sa drmDMA(). */ -typedef struct drm_dma { +struct drm_dma { int context; /**< Context handle */ int send_count; /**< Number of buffers to send */ int __user *send_indices; /**< List of handles to buffers */ int __user *send_sizes; /**< Lengths of data to send */ - drm_dma_flags_t flags; /**< Flags */ + enum drm_dma_flags flags; /**< Flags */ int request_count; /**< Number of buffers requested */ int request_size; /**< Desired size for buffers */ int __user *request_indices; /**< Buffer information */ int __user *request_sizes; int granted_count; /**< Number of buffers granted */ -} drm_dma_t; +}; -typedef enum { +enum drm_ctx_flags { _DRM_CONTEXT_PRESERVED = 0x01, _DRM_CONTEXT_2DONLY = 0x02 -} drm_ctx_flags_t; +}; /** * DRM_IOCTL_ADD_CTX ioctl argument type. * * \sa drmCreateContext() and drmDestroyContext(). */ -typedef struct drm_ctx { +struct drm_ctx { drm_context_t handle; - drm_ctx_flags_t flags; -} drm_ctx_t; + enum drm_ctx_flags flags; +}; /** * DRM_IOCTL_RES_CTX ioctl argument type. */ -typedef struct drm_ctx_res { +struct drm_ctx_res { int count; - drm_ctx_t __user *contexts; -} drm_ctx_res_t; + struct drm_ctx __user *contexts; +}; /** * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. */ -typedef struct drm_draw { +struct drm_draw { drm_drawable_t handle; -} drm_draw_t; +}; /** * DRM_IOCTL_UPDATE_DRAW ioctl argument type. @@ -525,53 +518,53 @@ typedef enum { DRM_DRAWABLE_CLIPRECTS, } drm_drawable_info_type_t; -typedef struct drm_update_draw { +struct drm_update_draw { drm_drawable_t handle; unsigned int type; unsigned int num; unsigned long long data; -} drm_update_draw_t; +}; /** * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. */ -typedef struct drm_auth { +struct drm_auth { drm_magic_t magic; -} drm_auth_t; +}; /** * DRM_IOCTL_IRQ_BUSID ioctl argument type. * * \sa drmGetInterruptFromBusID(). */ -typedef struct drm_irq_busid { +struct drm_irq_busid { int irq; /**< IRQ number */ int busnum; /**< bus number */ int devnum; /**< device number */ int funcnum; /**< function number */ -} drm_irq_busid_t; +}; -typedef enum { +enum drm_vblank_seq_type { _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ -} drm_vblank_seq_type_t; +}; #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \ _DRM_VBLANK_NEXTONMISS) struct drm_wait_vblank_request { - drm_vblank_seq_type_t type; + enum drm_vblank_seq_type type; unsigned int sequence; unsigned long signal; }; struct drm_wait_vblank_reply { - drm_vblank_seq_type_t type; + enum drm_vblank_seq_type type; unsigned int sequence; long tval_sec; long tval_usec; @@ -582,41 +575,41 @@ struct drm_wait_vblank_reply { * * \sa drmWaitVBlank(). */ -typedef union drm_wait_vblank { +union drm_wait_vblank { struct drm_wait_vblank_request request; struct drm_wait_vblank_reply reply; -} drm_wait_vblank_t; +}; /** * DRM_IOCTL_AGP_ENABLE ioctl argument type. * * \sa drmAgpEnable(). */ -typedef struct drm_agp_mode { +struct drm_agp_mode { unsigned long mode; /**< AGP mode */ -} drm_agp_mode_t; +}; /** * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. * * \sa drmAgpAlloc() and drmAgpFree(). */ -typedef struct drm_agp_buffer { +struct drm_agp_buffer { unsigned long size; /**< In bytes -- will round to page boundary */ unsigned long handle; /**< Used for binding / unbinding */ unsigned long type; /**< Type of memory to allocate */ unsigned long physical; /**< Physical used by i810 */ -} drm_agp_buffer_t; +}; /** * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. * * \sa drmAgpBind() and drmAgpUnbind(). */ -typedef struct drm_agp_binding { +struct drm_agp_binding { unsigned long handle; /**< From drm_agp_buffer */ unsigned long offset; /**< In bytes -- will round to page boundary */ -} drm_agp_binding_t; +}; /** * DRM_IOCTL_AGP_INFO ioctl argument type. @@ -625,7 +618,7 @@ typedef struct drm_agp_binding { * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), * drmAgpVendorId() and drmAgpDeviceId(). */ -typedef struct drm_agp_info { +struct drm_agp_info { int agp_version_major; int agp_version_minor; unsigned long mode; @@ -639,25 +632,25 @@ typedef struct drm_agp_info { unsigned short id_vendor; unsigned short id_device; /*@} */ -} drm_agp_info_t; +}; /** * DRM_IOCTL_SG_ALLOC ioctl argument type. */ -typedef struct drm_scatter_gather { +struct drm_scatter_gather { unsigned long size; /**< In bytes -- will round to page boundary */ unsigned long handle; /**< Used for mapping / unmapping */ -} drm_scatter_gather_t; +}; /** * DRM_IOCTL_SET_VERSION ioctl argument type. */ -typedef struct drm_set_version { +struct drm_set_version { int drm_di_major; int drm_di_minor; int drm_dd_major; int drm_dd_minor; -} drm_set_version_t; +}; #define DRM_FENCE_FLAG_EMIT 0x00000001 @@ -670,25 +663,15 @@ typedef struct drm_set_version { #define DRM_FENCE_TYPE_EXE 0x00000001 -typedef struct drm_fence_arg { - unsigned handle; - int class; - unsigned type; - unsigned flags; - unsigned signaled; - unsigned expand_pad[4]; /*Future expansion */ - enum { - drm_fence_create, - drm_fence_destroy, - drm_fence_reference, - drm_fence_unreference, - drm_fence_signaled, - drm_fence_flush, - drm_fence_wait, - drm_fence_emit, - drm_fence_buffers - } op; -} drm_fence_arg_t; +struct drm_fence_arg { + unsigned int handle; + unsigned int class; + unsigned int type; + unsigned int flags; + unsigned int signaled; + unsigned int pad64; + uint64_t expand_pad[3]; /*Future expansion */ +}; /* Buffer permissions, referring to how the GPU uses the buffers. * these translate to fence types used for the buffers. @@ -696,9 +679,9 @@ typedef struct drm_fence_arg { * a command (batch-) buffer is exe. Can be or-ed together. */ -#define DRM_BO_FLAG_READ 0x00000001 -#define DRM_BO_FLAG_WRITE 0x00000002 -#define DRM_BO_FLAG_EXE 0x00000004 +#define DRM_BO_FLAG_READ (1ULL << 0) +#define DRM_BO_FLAG_WRITE (1ULL << 1) +#define DRM_BO_FLAG_EXE (1ULL << 2) /* * Status flags. Can be read to determine the actual state of a buffer. @@ -711,25 +694,25 @@ typedef struct drm_fence_arg { * or lock. * Flags: Acknowledge */ -#define DRM_BO_FLAG_NO_EVICT 0x00000010 +#define DRM_BO_FLAG_NO_EVICT (1ULL << 4) /* * Mask: Require that the buffer is placed in mappable memory when validated. * If not set the buffer may or may not be in mappable memory when validated. * Flags: If set, the buffer is in mappable memory. */ -#define DRM_BO_FLAG_MAPPABLE 0x00000020 +#define DRM_BO_FLAG_MAPPABLE (1ULL << 5) /* Mask: The buffer should be shareable with other processes. * Flags: The buffer is shareable with other processes. */ -#define DRM_BO_FLAG_SHAREABLE 0x00000040 +#define DRM_BO_FLAG_SHAREABLE (1ULL << 6) /* Mask: If set, place the buffer in cache-coherent memory if available. * If clear, never place the buffer in cache coherent memory if validated. * Flags: The buffer is currently in cache-coherent memory. */ -#define DRM_BO_FLAG_CACHED 0x00000080 +#define DRM_BO_FLAG_CACHED (1ULL << 7) /* Mask: Make sure that every time this buffer is validated, * it ends up on the same location provided that the memory mask is the same. @@ -738,23 +721,23 @@ typedef struct drm_fence_arg { * part of buffer manager shutdown or locking. * Flags: Acknowledge. */ -#define DRM_BO_FLAG_NO_MOVE 0x00000100 +#define DRM_BO_FLAG_NO_MOVE (1ULL << 8) /* Mask: Make sure the buffer is in cached memory when mapped for reading. * Flags: Acknowledge. */ -#define DRM_BO_FLAG_READ_CACHED 0x00080000 +#define DRM_BO_FLAG_READ_CACHED (1ULL << 19) /* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set. * Flags: Acknowledge. */ -#define DRM_BO_FLAG_FORCE_CACHING 0x00002000 +#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13) /* * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear. * Flags: Acknowledge. */ -#define DRM_BO_FLAG_FORCE_MAPPABLE 0x00004000 +#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14) /* * Memory type flags that can be or'ed together in the mask, but only @@ -762,21 +745,25 @@ typedef struct drm_fence_arg { */ /* System memory */ -#define DRM_BO_FLAG_MEM_LOCAL 0x01000000 +#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24) /* Translation table memory */ -#define DRM_BO_FLAG_MEM_TT 0x02000000 +#define DRM_BO_FLAG_MEM_TT (1ULL << 25) /* Vram memory */ -#define DRM_BO_FLAG_MEM_VRAM 0x04000000 +#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26) /* Up to the driver to define. */ -#define DRM_BO_FLAG_MEM_PRIV0 0x08000000 -#define DRM_BO_FLAG_MEM_PRIV1 0x10000000 -#define DRM_BO_FLAG_MEM_PRIV2 0x20000000 -#define DRM_BO_FLAG_MEM_PRIV3 0x40000000 -#define DRM_BO_FLAG_MEM_PRIV4 0x80000000 +#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27) +#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28) +#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29) +#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30) +#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31) +/* We can add more of these now with a 64-bit flag type */ /* Memory flag mask */ -#define DRM_BO_MASK_MEM 0xFF000000 -#define DRM_BO_MASK_MEMTYPE 0xFF0000A0 +#define DRM_BO_MASK_MEM 0x00000000FF000000ULL +#define DRM_BO_MASK_MEMTYPE 0x00000000FF0000A0ULL + +/* Driver-private flags */ +#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL /* Don't block on validate and map */ #define DRM_BO_HINT_DONT_BLOCK 0x00000002 @@ -785,40 +772,46 @@ typedef struct drm_fence_arg { #define DRM_BO_HINT_WAIT_LAZY 0x00000008 #define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010 +#define DRM_BO_INIT_MAGIC 0xfe769812 +#define DRM_BO_INIT_MAJOR 0 +#define DRM_BO_INIT_MINOR 1 -typedef enum { + +enum drm_bo_type { drm_bo_type_dc, drm_bo_type_user, drm_bo_type_fake, drm_bo_type_kernel, /* for initial kernel allocations */ -}drm_bo_type_t; - - -typedef struct drm_bo_arg_request { - unsigned handle; /* User space handle */ - unsigned mask; - unsigned hint; - drm_u64_t size; - drm_bo_type_t type; - unsigned arg_handle; - drm_u64_t buffer_start; - unsigned page_alignment; - unsigned expand_pad[4]; /*Future expansion */ +}; + +struct drm_bo_info_req { + uint64_t mask; + uint64_t flags; + unsigned int handle; + unsigned int hint; + unsigned int fence_class; + unsigned int pad64; +}; + +struct drm_bo_create_req { + uint64_t mask; + uint64_t size; + uint64_t buffer_start; + unsigned int hint; + unsigned int page_alignment; + enum drm_bo_type type; + unsigned int pad64; +}; + +struct drm_bo_op_req { enum { - drm_bo_create, drm_bo_validate, - drm_bo_map, - drm_bo_unmap, drm_bo_fence, - drm_bo_destroy, - drm_bo_reference, - drm_bo_unreference, - drm_bo_info, - drm_bo_wait_idle, - drm_bo_ref_fence + drm_bo_ref_fence, } op; -} drm_bo_arg_request_t; - + unsigned int arg_handle; + struct drm_bo_info_req bo_req; +}; /* * Reply flags @@ -826,30 +819,64 @@ typedef struct drm_bo_arg_request { #define DRM_BO_REP_BUSY 0x00000001 -typedef struct drm_bo_arg_reply { +struct drm_bo_info_rep { + uint64_t flags; + uint64_t mask; + uint64_t size; + uint64_t offset; + uint64_t arg_handle; + uint64_t buffer_start; + unsigned int handle; + unsigned int fence_flags; + unsigned int rep_flags; + unsigned int page_alignment; + unsigned int desired_tile_stride; + unsigned int hw_tile_stride; + unsigned int tile_info; + unsigned int pad64; + uint64_t expand_pad[4]; /*Future expansion */ +}; + +struct drm_bo_arg_rep { + struct drm_bo_info_rep bo_info; int ret; - unsigned handle; - unsigned flags; - drm_u64_t size; - drm_u64_t offset; - drm_u64_t arg_handle; - unsigned mask; - drm_u64_t buffer_start; - unsigned fence_flags; - unsigned rep_flags; - unsigned page_alignment; - unsigned expand_pad[4]; /*Future expansion */ -}drm_bo_arg_reply_t; - - -typedef struct drm_bo_arg{ - int handled; - drm_u64_t next; + unsigned int pad64; +}; + +struct drm_bo_create_arg { + union { + struct drm_bo_create_req req; + struct drm_bo_info_rep rep; + } d; +}; + +struct drm_bo_handle_arg { + unsigned int handle; +}; + +struct drm_bo_reference_info_arg { + union { + struct drm_bo_handle_arg req; + struct drm_bo_info_rep rep; + } d; +}; + +struct drm_bo_map_wait_idle_arg { union { - drm_bo_arg_request_t req; - drm_bo_arg_reply_t rep; + struct drm_bo_info_req req; + struct drm_bo_info_rep rep; } d; -} drm_bo_arg_t; +}; + +struct drm_bo_op_arg { + uint64_t next; + union { + struct drm_bo_op_req req; + struct drm_bo_arg_rep rep; + } d; + int handled; + unsigned int pad64; +}; #define DRM_BO_MEM_LOCAL 0 #define DRM_BO_MEM_TT 1 @@ -862,25 +889,18 @@ typedef struct drm_bo_arg{ #define DRM_BO_MEM_TYPES 8 /* For now. */ -typedef union drm_mm_init_arg{ - struct { - enum { - mm_init, - mm_takedown, - mm_query, - mm_lock, - mm_unlock - } op; - drm_u64_t p_offset; - drm_u64_t p_size; - unsigned mem_type; - unsigned expand_pad[8]; /*Future expansion */ - } req; - struct { - drm_handle_t mm_sarea; - unsigned expand_pad[8]; /*Future expansion */ - } rep; -} drm_mm_init_arg_t; +struct drm_mm_type_arg { + unsigned int mem_type; +}; + +struct drm_mm_init_arg { + unsigned int magic; + unsigned int major; + unsigned int minor; + unsigned int mem_type; + uint64_t p_offset; + uint64_t p_size; +}; /** * \name Ioctls Definitions @@ -893,65 +913,87 @@ typedef union drm_mm_init_arg{ #define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) #define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) -#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) -#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) -#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t) -#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t) -#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, drm_map_t) -#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, drm_client_t) -#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, drm_stats_t) -#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, drm_set_version_t) - -#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t) -#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t) -#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t) -#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t) -#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t) -#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t) -#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t) -#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t) -#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t) -#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t) -#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t) - -#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, drm_map_t) - -#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, drm_ctx_priv_map_t) -#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, drm_ctx_priv_map_t) - -#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t) -#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t) -#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t) -#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t) -#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t) -#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t) -#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t) -#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t) -#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t) -#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t) -#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t) -#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t) -#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t) +#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) +#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) +#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) +#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) +#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) +#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) +#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) +#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) + +#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) +#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) +#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) +#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) +#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) +#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) +#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) +#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) +#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) +#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) +#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) + +#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) + +#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) +#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) + +#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) +#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) +#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) +#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) +#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) +#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) +#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) +#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) +#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) +#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) +#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) +#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) +#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) #define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) #define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) -#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t) -#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t) -#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t) -#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t) -#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t) -#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t) +#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) +#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) +#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) +#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) +#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) +#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) + +#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, struct drm_scatter_gather) +#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) + +#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) + +#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) + +#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg) +#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg) +#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg) +#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg) + +#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_DESTROY DRM_IOWR(0xc5, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg) + +#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg) +#define DRM_IOCTL_BO_DESTROY DRM_IOWR(0xce, struct drm_bo_handle_arg) +#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg) +#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg) +#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg) +#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg) +#define DRM_IOCTL_BO_OP DRM_IOWR(0xd3, struct drm_bo_op_arg) +#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg) +#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg) -#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, drm_scatter_gather_t) -#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, drm_scatter_gather_t) - -#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t) - -#define DRM_IOCTL_FENCE DRM_IOWR(0x3b, drm_fence_arg_t) -#define DRM_IOCTL_BUFOBJ DRM_IOWR(0x3d, drm_bo_arg_t) -#define DRM_IOCTL_MM_INIT DRM_IOWR(0x3e, drm_mm_init_arg_t) - -#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t) /*@}*/ @@ -966,4 +1008,53 @@ typedef union drm_mm_init_arg{ #define DRM_COMMAND_BASE 0x40 #define DRM_COMMAND_END 0xA0 +/* typedef area */ +#if !defined(__KERNEL__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) +typedef struct drm_clip_rect drm_clip_rect_t; +typedef struct drm_drawable_info drm_drawable_info_t; +typedef struct drm_tex_region drm_tex_region_t; +typedef struct drm_hw_lock drm_hw_lock_t; +typedef struct drm_version drm_version_t; +typedef struct drm_unique drm_unique_t; +typedef struct drm_list drm_list_t; +typedef struct drm_block drm_block_t; +typedef struct drm_control drm_control_t; +typedef enum drm_map_type drm_map_type_t; +typedef enum drm_map_flags drm_map_flags_t; +typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; +typedef struct drm_map drm_map_t; +typedef struct drm_client drm_client_t; +typedef enum drm_stat_type drm_stat_type_t; +typedef struct drm_stats drm_stats_t; +typedef enum drm_lock_flags drm_lock_flags_t; +typedef struct drm_lock drm_lock_t; +typedef enum drm_dma_flags drm_dma_flags_t; +typedef struct drm_buf_desc drm_buf_desc_t; +typedef struct drm_buf_info drm_buf_info_t; +typedef struct drm_buf_free drm_buf_free_t; +typedef struct drm_buf_pub drm_buf_pub_t; +typedef struct drm_buf_map drm_buf_map_t; +typedef struct drm_dma drm_dma_t; +typedef union drm_wait_vblank drm_wait_vblank_t; +typedef struct drm_agp_mode drm_agp_mode_t; +typedef enum drm_ctx_flags drm_ctx_flags_t; +typedef struct drm_ctx drm_ctx_t; +typedef struct drm_ctx_res drm_ctx_res_t; +typedef struct drm_draw drm_draw_t; +typedef struct drm_update_draw drm_update_draw_t; +typedef struct drm_auth drm_auth_t; +typedef struct drm_irq_busid drm_irq_busid_t; +typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; +typedef struct drm_agp_buffer drm_agp_buffer_t; +typedef struct drm_agp_binding drm_agp_binding_t; +typedef struct drm_agp_info drm_agp_info_t; +typedef struct drm_scatter_gather drm_scatter_gather_t; +typedef struct drm_set_version drm_set_version_t; + +typedef struct drm_fence_arg drm_fence_arg_t; +typedef struct drm_mm_type_arg drm_mm_type_arg_t; +typedef struct drm_mm_init_arg drm_mm_init_arg_t; +typedef enum drm_bo_type drm_bo_type_t; +#endif + #endif diff --git a/shared-core/drm_sarea.h b/shared-core/drm_sarea.h index 43d1114f..34050a6d 100644 --- a/shared-core/drm_sarea.h +++ b/shared-core/drm_sarea.h @@ -50,29 +50,35 @@ #define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000 /** SAREA drawable */ -typedef struct drm_sarea_drawable { +struct drm_sarea_drawable { unsigned int stamp; unsigned int flags; -} drm_sarea_drawable_t; +}; /** SAREA frame */ -typedef struct drm_sarea_frame { +struct drm_sarea_frame { unsigned int x; unsigned int y; unsigned int width; unsigned int height; unsigned int fullscreen; -} drm_sarea_frame_t; +}; /** SAREA */ -typedef struct drm_sarea { +struct drm_sarea { /** first thing is always the DRM locking structure */ - drm_hw_lock_t lock; + struct drm_hw_lock lock; /** \todo Use readers/writer lock for drm_sarea::drawable_lock */ - drm_hw_lock_t drawable_lock; - drm_sarea_drawable_t drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */ - drm_sarea_frame_t frame; /**< frame */ + struct drm_hw_lock drawable_lock; + struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */ + struct drm_sarea_frame frame; /**< frame */ drm_context_t dummy_context; -} drm_sarea_t; +}; + +#ifndef __KERNEL__ +typedef struct drm_sarea_drawable drm_sarea_drawable_t; +typedef struct drm_sarea_frame drm_sarea_frame_t; +typedef struct drm_sarea drm_sarea_t; +#endif #endif /* _DRM_SAREA_H_ */ diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 535a061a..5fb9fcff 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -47,7 +47,7 @@ * the head pointer changes, so that EBUSY only happens if the ring * actually stalls for (eg) 3 seconds. */ -int i915_wait_ring(drm_device_t * dev, int n, const char *caller) +int i915_wait_ring(struct drm_device * dev, int n, const char *caller) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_ring_buffer_t *ring = &(dev_priv->ring); @@ -74,7 +74,7 @@ int i915_wait_ring(drm_device_t * dev, int n, const char *caller) return DRM_ERR(EBUSY); } -void i915_kernel_lost_context(drm_device_t * dev) +void i915_kernel_lost_context(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_ring_buffer_t *ring = &(dev_priv->ring); @@ -89,7 +89,7 @@ void i915_kernel_lost_context(drm_device_t * dev) dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; } -static int i915_dma_cleanup(drm_device_t * dev) +static int i915_dma_cleanup(struct drm_device * dev) { /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private @@ -125,7 +125,7 @@ static int i915_dma_cleanup(drm_device_t * dev) return 0; } -static int i915_initialize(drm_device_t * dev, +static int i915_initialize(struct drm_device * dev, drm_i915_private_t * dev_priv, drm_i915_init_t * init) { @@ -212,7 +212,7 @@ static int i915_initialize(drm_device_t * dev, return 0; } -static int i915_dma_resume(drm_device_t * dev) +static int i915_dma_resume(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -358,7 +358,7 @@ static int validate_cmd(int cmd) return ret; } -static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords) +static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; int i; @@ -397,12 +397,12 @@ static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords) return 0; } -static int i915_emit_box(drm_device_t * dev, - drm_clip_rect_t __user * boxes, +static int i915_emit_box(struct drm_device * dev, + struct drm_clip_rect __user * boxes, int i, int DR1, int DR4) { drm_i915_private_t *dev_priv = dev->dev_private; - drm_clip_rect_t box; + struct drm_clip_rect box; RING_LOCALS; if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { @@ -440,7 +440,7 @@ static int i915_emit_box(drm_device_t * dev, * emit. For now, do it in both places: */ -void i915_emit_breadcrumb(drm_device_t *dev) +void i915_emit_breadcrumb(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -461,7 +461,7 @@ void i915_emit_breadcrumb(drm_device_t *dev) } -int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush) +int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush) { drm_i915_private_t *dev_priv = dev->dev_private; uint32_t flush_cmd = CMD_MI_FLUSH; @@ -482,7 +482,7 @@ int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush) } -static int i915_dispatch_cmdbuffer(drm_device_t * dev, +static int i915_dispatch_cmdbuffer(struct drm_device * dev, drm_i915_cmdbuffer_t * cmd) { #ifdef I915_HAVE_FENCE @@ -520,11 +520,11 @@ static int i915_dispatch_cmdbuffer(drm_device_t * dev, return 0; } -static int i915_dispatch_batchbuffer(drm_device_t * dev, +static int i915_dispatch_batchbuffer(struct drm_device * dev, drm_i915_batchbuffer_t * batch) { drm_i915_private_t *dev_priv = dev->dev_private; - drm_clip_rect_t __user *boxes = batch->cliprects; + struct drm_clip_rect __user *boxes = batch->cliprects; int nbox = batch->num_cliprects; int i = 0, count; RING_LOCALS; @@ -568,7 +568,7 @@ static int i915_dispatch_batchbuffer(drm_device_t * dev, return 0; } -static void i915_do_dispatch_flip(drm_device_t * dev, int pipe, int sync) +static void i915_do_dispatch_flip(struct drm_device * dev, int pipe, int sync) { drm_i915_private_t *dev_priv = dev->dev_private; u32 num_pages, current_page, next_page, dspbase; @@ -620,7 +620,7 @@ static void i915_do_dispatch_flip(drm_device_t * dev, int pipe, int sync) dev_priv->sarea_priv->pf_current_page |= next_page << shift; } -void i915_dispatch_flip(drm_device_t * dev, int pipes, int sync) +void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync) { drm_i915_private_t *dev_priv = dev->dev_private; int i; @@ -642,7 +642,7 @@ void i915_dispatch_flip(drm_device_t * dev, int pipes, int sync) #endif } -static int i915_quiescent(drm_device_t * dev) +static int i915_quiescent(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -683,7 +683,7 @@ static int i915_batchbuffer(DRM_IOCTL_ARGS) if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, batch.num_cliprects * - sizeof(drm_clip_rect_t))) + sizeof(struct drm_clip_rect))) return DRM_ERR(EFAULT); ret = i915_dispatch_batchbuffer(dev, &batch); @@ -712,7 +712,7 @@ static int i915_cmdbuffer(DRM_IOCTL_ARGS) if (cmdbuf.num_cliprects && DRM_VERIFYAREA_READ(cmdbuf.cliprects, cmdbuf.num_cliprects * - sizeof(drm_clip_rect_t))) { + sizeof(struct drm_clip_rect))) { DRM_ERROR("Fault accessing cliprects\n"); return DRM_ERR(EFAULT); } @@ -727,7 +727,7 @@ static int i915_cmdbuffer(DRM_IOCTL_ARGS) return 0; } -static int i915_do_cleanup_pageflip(drm_device_t * dev) +static int i915_do_cleanup_pageflip(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; int i, pipes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2; @@ -876,7 +876,7 @@ static int i915_mmio(DRM_IOCTL_ARGS) e = &mmio_table[mmio.reg]; base = (u8 *) dev_priv->mmio_map->handle + e->offset; - switch (mmio.read_write) { + switch (mmio.read_write) { case I915_MMIO_READ: if (!(e->flag & I915_MMIO_MAY_READ)) return DRM_ERR(EINVAL); @@ -943,7 +943,7 @@ static int i915_set_status_page(DRM_IOCTL_ARGS) return 0; } -int i915_driver_load(drm_device_t *dev, unsigned long flags) +int i915_driver_load(struct drm_device *dev, unsigned long flags) { /* i915 has 4 more counters */ dev->counters += 4; @@ -955,7 +955,7 @@ int i915_driver_load(drm_device_t *dev, unsigned long flags) return 0; } -void i915_driver_lastclose(drm_device_t * dev) +void i915_driver_lastclose(struct drm_device * dev) { if (dev->dev_private) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -965,7 +965,7 @@ void i915_driver_lastclose(drm_device_t * dev) i915_dma_cleanup(dev); } -void i915_driver_preclose(drm_device_t * dev, DRMFILE filp) +void i915_driver_preclose(struct drm_device * dev, DRMFILE filp) { if (dev->dev_private) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -973,7 +973,7 @@ void i915_driver_preclose(drm_device_t * dev, DRMFILE filp) } } -drm_ioctl_desc_t i915_ioctls[] = { +struct drm_ioctl_desc i915_ioctls[] = { [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH}, @@ -1007,7 +1007,7 @@ int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); * \returns * A value of 1 is always retured to indictate every i9x5 is AGP. */ -int i915_driver_device_is_agp(drm_device_t * dev) +int i915_driver_device_is_agp(struct drm_device * dev) { return 1; } diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index 1c6ff4d3..3a90df6e 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -64,7 +64,7 @@ typedef struct _drm_i915_init { } drm_i915_init_t; typedef struct _drm_i915_sarea { - drm_tex_region_t texList[I915_NR_TEX_REGIONS + 1]; + struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; int last_upload; /* last time texture was uploaded */ int last_enqueue; /* last time a buffer was enqueued */ int last_dispatch; /* age of the most recently dispatched buffer */ @@ -194,7 +194,7 @@ typedef struct _drm_i915_batchbuffer { int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ int num_cliprects; /* mulitpass with multiple cliprects? */ - drm_clip_rect_t __user *cliprects; /* pointer to userspace cliprects */ + struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ } drm_i915_batchbuffer_t; /* As above, but pass a pointer to userspace buffer which can be @@ -206,7 +206,7 @@ typedef struct _drm_i915_cmdbuffer { int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ int num_cliprects; /* mulitpass with multiple cliprects? */ - drm_clip_rect_t __user *cliprects; /* pointer to userspace cliprects */ + struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ } drm_i915_cmdbuffer_t; /* Userspace can request & wait on irq's: @@ -283,7 +283,7 @@ typedef struct drm_i915_vblank_pipe { */ typedef struct drm_i915_vblank_swap { drm_drawable_t drawable; - drm_vblank_seq_type_t seqtype; + enum drm_vblank_seq_type seqtype; unsigned int sequence; } drm_i915_vblank_swap_t; diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index e9447f2b..1a2220a5 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -145,35 +145,35 @@ enum intel_chip_family { CHIP_I965 = 0x08, }; -extern drm_ioctl_desc_t i915_ioctls[]; +extern struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; /* i915_dma.c */ -extern void i915_kernel_lost_context(drm_device_t * dev); +extern void i915_kernel_lost_context(struct drm_device * dev); extern int i915_driver_load(struct drm_device *, unsigned long flags); -extern void i915_driver_lastclose(drm_device_t * dev); -extern void i915_driver_preclose(drm_device_t * dev, DRMFILE filp); -extern int i915_driver_device_is_agp(drm_device_t * dev); +extern void i915_driver_lastclose(struct drm_device * dev); +extern void i915_driver_preclose(struct drm_device * dev, DRMFILE filp); +extern int i915_driver_device_is_agp(struct drm_device * dev); extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); -extern void i915_emit_breadcrumb(drm_device_t *dev); -extern void i915_dispatch_flip(drm_device_t * dev, int pipes, int sync); -extern int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush); +extern void i915_emit_breadcrumb(struct drm_device *dev); +extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync); +extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush); extern int i915_driver_firstopen(struct drm_device *dev); /* i915_irq.c */ extern int i915_irq_emit(DRM_IOCTL_ARGS); extern int i915_irq_wait(DRM_IOCTL_ARGS); -extern int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence); -extern int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence); +extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence); +extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence); extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); -extern void i915_driver_irq_preinstall(drm_device_t * dev); -extern void i915_driver_irq_postinstall(drm_device_t * dev); -extern void i915_driver_irq_uninstall(drm_device_t * dev); +extern void i915_driver_irq_preinstall(struct drm_device * dev); +extern void i915_driver_irq_postinstall(struct drm_device * dev); +extern void i915_driver_irq_uninstall(struct drm_device * dev); extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS); extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS); -extern int i915_emit_irq(drm_device_t * dev); +extern int i915_emit_irq(struct drm_device * dev); extern void i915_user_irq_on(drm_i915_private_t *dev_priv); extern void i915_user_irq_off(drm_i915_private_t *dev_priv); extern int i915_vblank_swap(DRM_IOCTL_ARGS); @@ -184,31 +184,31 @@ extern int i915_mem_free(DRM_IOCTL_ARGS); extern int i915_mem_init_heap(DRM_IOCTL_ARGS); extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS); extern void i915_mem_takedown(struct mem_block **heap); -extern void i915_mem_release(drm_device_t * dev, +extern void i915_mem_release(struct drm_device * dev, DRMFILE filp, struct mem_block *heap); #ifdef I915_HAVE_FENCE /* i915_fence.c */ -extern void i915_fence_handler(drm_device_t *dev); -extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t class, +extern void i915_fence_handler(struct drm_device *dev); +extern int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class, uint32_t flags, uint32_t *sequence, uint32_t *native_type); -extern void i915_poke_flush(drm_device_t *dev, uint32_t class); -extern int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags); +extern void i915_poke_flush(struct drm_device *dev, uint32_t class); +extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags); #endif #ifdef I915_HAVE_BUFFER /* i915_buffer.c */ -extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev); -extern int i915_fence_types(drm_buffer_object_t *bo, uint32_t *class, uint32_t *type); -extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags); -extern int i915_init_mem_type(drm_device_t *dev, uint32_t type, - drm_mem_type_manager_t *man); -extern uint32_t i915_evict_mask(drm_buffer_object_t *bo); -extern int i915_move(drm_buffer_object_t *bo, int evict, - int no_wait, drm_bo_mem_reg_t *new_mem); +extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev); +extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *type); +extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); +extern int i915_init_mem_type(struct drm_device *dev, uint32_t type, + struct drm_mem_type_manager *man); +extern uint32_t i915_evict_mask(struct drm_buffer_object *bo); +extern int i915_move(struct drm_buffer_object *bo, int evict, + int no_wait, struct drm_bo_mem_reg *new_mem); #endif @@ -249,7 +249,7 @@ extern int i915_move(drm_buffer_object_t *bo, int evict, I915_WRITE(LP_RING + RING_TAIL, outring); \ } while(0) -extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller); +extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index 40724fae..698ecced 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -43,7 +43,8 @@ * This function must be called with the drawable spinlock held. */ static void -i915_dispatch_vsync_flip(drm_device_t *dev, drm_drawable_info_t *drw, int pipe) +i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw, + int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -69,7 +70,7 @@ i915_dispatch_vsync_flip(drm_device_t *dev, drm_drawable_info_t *drw, int pipe) if (x2 > 0 && y2 > 0) { int i, num_rects = drw->num_rects; - drm_clip_rect_t *rect = drw->rects; + struct drm_clip_rect *rect = drw->rects; for (i = 0; i < num_rects; i++) if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 || @@ -88,14 +89,14 @@ i915_dispatch_vsync_flip(drm_device_t *dev, drm_drawable_info_t *drw, int pipe) * * This function will be called with the HW lock held. */ -static void i915_vblank_tasklet(drm_device_t *dev) +static void i915_vblank_tasklet(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct list_head *list, *tmp, hits, *hit; int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages; unsigned counter[2] = { atomic_read(&dev->vbl_received), atomic_read(&dev->vbl_received2) }; - drm_drawable_info_t *drw; + struct drm_drawable_info *drw; drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; u32 cpp = dev_priv->cpp, offsets[3]; u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | @@ -145,7 +146,7 @@ static void i915_vblank_tasklet(drm_device_t *dev) list_for_each(hit, &hits) { drm_i915_vbl_swap_t *swap_cmp = list_entry(hit, drm_i915_vbl_swap_t, head); - drm_drawable_info_t *drw_cmp = + struct drm_drawable_info *drw_cmp = drm_get_drawable_info(dev, swap_cmp->drw_id); if (drw_cmp && @@ -203,7 +204,7 @@ static void i915_vblank_tasklet(drm_device_t *dev) list_for_each(hit, &hits) { drm_i915_vbl_swap_t *swap_hit = list_entry(hit, drm_i915_vbl_swap_t, head); - drm_clip_rect_t *rect; + struct drm_clip_rect *rect; int num_rects, pipe, front, back; unsigned short top, bottom; @@ -281,7 +282,7 @@ static void i915_vblank_tasklet(drm_device_t *dev) irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) { - drm_device_t *dev = (drm_device_t *) arg; + struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u16 temp; u32 pipea_stats, pipeb_stats; @@ -343,7 +344,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) return IRQ_HANDLED; } -int i915_emit_irq(drm_device_t * dev) +int i915_emit_irq(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -387,7 +388,7 @@ void i915_user_irq_off(drm_i915_private_t *dev_priv) } -static int i915_wait_irq(drm_device_t * dev, int irq_nr) +static int i915_wait_irq(struct drm_device * dev, int irq_nr) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = 0; @@ -415,7 +416,7 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr) return ret; } -static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence, +static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence, atomic_t *counter) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -436,12 +437,12 @@ static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence, return ret; } -int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence) +int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence) { return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received); } -int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence) +int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence) { return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2); } @@ -494,7 +495,7 @@ int i915_irq_wait(DRM_IOCTL_ARGS) return i915_wait_irq(dev, irqwait.irq_seq); } -static void i915_enable_interrupt (drm_device_t *dev) +static void i915_enable_interrupt (struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -620,7 +621,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) swap.sequence--; if ((curseq - swap.sequence) <= (1<<23)) { - drm_drawable_info_t *drw; + struct drm_drawable_info *drw; LOCK_TEST_WITH_RETURN(dev, filp); @@ -698,7 +699,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) /* drm_dma.h hooks */ -void i915_driver_irq_preinstall(drm_device_t * dev) +void i915_driver_irq_preinstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -707,7 +708,7 @@ void i915_driver_irq_preinstall(drm_device_t * dev) I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); } -void i915_driver_irq_postinstall(drm_device_t * dev) +void i915_driver_irq_postinstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -728,7 +729,7 @@ void i915_driver_irq_postinstall(drm_device_t * dev) I915_WRITE(I915REG_INSTPM, ( 1 << 5) | ( 1 << 21)); } -void i915_driver_irq_uninstall(drm_device_t * dev) +void i915_driver_irq_uninstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u16 temp; diff --git a/shared-core/i915_mem.c b/shared-core/i915_mem.c index 13f19f3a..582687ad 100644 --- a/shared-core/i915_mem.c +++ b/shared-core/i915_mem.c @@ -43,11 +43,11 @@ * block to allocate, and the ring is drained prior to allocations -- * in other words allocation is expensive. */ -static void mark_block(drm_device_t * dev, struct mem_block *p, int in_use) +static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_tex_region_t *list; + struct drm_tex_region *list; unsigned shift, nr; unsigned start; unsigned end; @@ -208,7 +208,7 @@ static int init_heap(struct mem_block **heap, int start, int size) /* Free all blocks associated with the releasing file. */ -void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap) +void i915_mem_release(struct drm_device * dev, DRMFILE filp, struct mem_block *heap) { struct mem_block *p; diff --git a/shared-core/mach64_dma.c b/shared-core/mach64_dma.c index 60f55900..d833475f 100644 --- a/shared-core/mach64_dma.c +++ b/shared-core/mach64_dma.c @@ -418,7 +418,7 @@ void mach64_dump_engine_info(drm_mach64_private_t * dev_priv) * pointed by the ring head. */ static void mach64_dump_buf_info(drm_mach64_private_t * dev_priv, - drm_buf_t * buf) + struct drm_buf * buf) { u32 addr = GETBUFADDR(buf); u32 used = buf->used >> 2; @@ -522,7 +522,7 @@ void mach64_dump_ring_info(drm_mach64_private_t * dev_priv) list_for_each(ptr, &dev_priv->pending) { drm_mach64_freelist_t *entry = list_entry(ptr, drm_mach64_freelist_t, list); - drm_buf_t *buf = entry->buf; + struct drm_buf *buf = entry->buf; u32 buf_addr = GETBUFADDR(buf); @@ -572,7 +572,7 @@ void mach64_dump_ring_info(drm_mach64_private_t * dev_priv) * DMA operation. It is left here since it so tricky to get DMA operating * properly in some architectures and hardware. */ -static int mach64_bm_dma_test(drm_device_t * dev) +static int mach64_bm_dma_test(struct drm_device * dev) { drm_mach64_private_t *dev_priv = dev->dev_private; drm_dma_handle_t *cpu_addr_dmah; @@ -752,7 +752,7 @@ static int mach64_bm_dma_test(drm_device_t * dev) * Called during the DMA initialization ioctl to initialize all the necessary * software and hardware state for DMA operation. */ -static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init) +static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init) { drm_mach64_private_t *dev_priv; u32 tmp; @@ -974,7 +974,7 @@ int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv) volatile u32 *ring_read; struct list_head *ptr; drm_mach64_freelist_t *entry; - drm_buf_t *buf = NULL; + struct drm_buf *buf = NULL; u32 *buf_ptr; u32 used, reg, target; int fifo, count, found, ret, no_idle_wait; @@ -1117,7 +1117,7 @@ int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv) /** \name DMA cleanup */ /*@{*/ -int mach64_do_cleanup_dma(drm_device_t * dev) +int mach64_do_cleanup_dma(struct drm_device * dev) { DRM_DEBUG("%s\n", __FUNCTION__); @@ -1223,9 +1223,9 @@ int mach64_engine_reset(DRM_IOCTL_ARGS) /** \name Freelist management */ /*@{*/ -int mach64_init_freelist(drm_device_t * dev) +int mach64_init_freelist(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_freelist_t *entry; struct list_head *ptr; @@ -1249,7 +1249,7 @@ int mach64_init_freelist(drm_device_t * dev) return 0; } -void mach64_destroy_freelist(drm_device_t * dev) +void mach64_destroy_freelist(struct drm_device * dev) { drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_freelist_t *entry; @@ -1381,7 +1381,7 @@ static int mach64_do_reclaim_completed(drm_mach64_private_t * dev_priv) return 1; } -drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv) +struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv) { drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; drm_mach64_freelist_t *entry; @@ -1427,7 +1427,7 @@ drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv) return entry->buf; } -int mach64_freelist_put(drm_mach64_private_t * dev_priv, drm_buf_t * copy_buf) +int mach64_freelist_put(drm_mach64_private_t * dev_priv, struct drm_buf * copy_buf) { struct list_head *ptr; drm_mach64_freelist_t *entry; @@ -1461,11 +1461,11 @@ int mach64_freelist_put(drm_mach64_private_t * dev_priv, drm_buf_t * copy_buf) /** \name DMA buffer request and submission IOCTL handler */ /*@{*/ -static int mach64_dma_get_buffers(DRMFILE filp, drm_device_t * dev, - drm_dma_t * d) +static int mach64_dma_get_buffers(DRMFILE filp, struct drm_device * dev, + struct drm_dma * d) { int i; - drm_buf_t *buf; + struct drm_buf *buf; drm_mach64_private_t *dev_priv = dev->dev_private; for (i = d->granted_count; i < d->request_count; i++) { @@ -1495,13 +1495,13 @@ static int mach64_dma_get_buffers(DRMFILE filp, drm_device_t * dev, int mach64_dma_buffers(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; - drm_dma_t d; + struct drm_device_dma *dma = dev->dma; + struct drm_dma d; int ret = 0; LOCK_TEST_WITH_RETURN(dev, filp); - DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t *) data, sizeof(d)); + DRM_COPY_FROM_USER_IOCTL(d, (struct drm_dma *) data, sizeof(d)); /* Please don't send us buffers. */ @@ -1525,12 +1525,12 @@ int mach64_dma_buffers(DRM_IOCTL_ARGS) ret = mach64_dma_get_buffers(filp, dev, &d); } - DRM_COPY_TO_USER_IOCTL((drm_dma_t *) data, d, sizeof(d)); + DRM_COPY_TO_USER_IOCTL((struct drm_dma *) data, d, sizeof(d)); return ret; } -void mach64_driver_lastclose(drm_device_t * dev) +void mach64_driver_lastclose(struct drm_device * dev) { mach64_do_cleanup_dma(dev); } diff --git a/shared-core/mach64_drm.h b/shared-core/mach64_drm.h index 083f959d..1f5fd842 100644 --- a/shared-core/mach64_drm.h +++ b/shared-core/mach64_drm.h @@ -130,7 +130,7 @@ typedef struct drm_mach64_sarea { /* The current cliprects, or a subset thereof. */ - drm_clip_rect_t boxes[MACH64_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[MACH64_NR_SAREA_CLIPRECTS]; unsigned int nbox; /* Counters for client-side throttling of rendering clients. @@ -139,7 +139,7 @@ typedef struct drm_mach64_sarea { /* Texture memory LRU. */ - drm_tex_region_t tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS + + struct drm_tex_region tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS + 1]; unsigned int tex_age[MACH64_NR_TEX_HEAPS]; int ctx_owner; diff --git a/shared-core/mach64_drv.h b/shared-core/mach64_drv.h index bb8b309e..a1b36751 100644 --- a/shared-core/mach64_drv.h +++ b/shared-core/mach64_drv.h @@ -55,7 +55,7 @@ typedef struct drm_mach64_freelist { struct list_head list; /* List pointers for free_list, placeholders, or pending list */ - drm_buf_t *buf; /* Pointer to the buffer */ + struct drm_buf *buf; /* Pointer to the buffer */ int discard; /* This flag is set when we're done (re)using a buffer */ u32 ring_ofs; /* dword offset in ring of last descriptor for this buffer */ } drm_mach64_freelist_t; @@ -108,7 +108,7 @@ typedef struct drm_mach64_private { drm_local_map_t *agp_textures; } drm_mach64_private_t; -extern drm_ioctl_desc_t mach64_ioctls[]; +extern struct drm_ioctl_desc mach64_ioctls[]; extern int mach64_max_ioctl; /* mach64_dma.c */ @@ -117,13 +117,13 @@ extern int mach64_dma_idle(DRM_IOCTL_ARGS); extern int mach64_dma_flush(DRM_IOCTL_ARGS); extern int mach64_engine_reset(DRM_IOCTL_ARGS); extern int mach64_dma_buffers(DRM_IOCTL_ARGS); -extern void mach64_driver_lastclose(drm_device_t * dev); +extern void mach64_driver_lastclose(struct drm_device * dev); -extern int mach64_init_freelist(drm_device_t * dev); -extern void mach64_destroy_freelist(drm_device_t * dev); -extern drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv); +extern int mach64_init_freelist(struct drm_device * dev); +extern void mach64_destroy_freelist(struct drm_device * dev); +extern struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv); extern int mach64_freelist_put(drm_mach64_private_t * dev_priv, - drm_buf_t * copy_buf); + struct drm_buf * copy_buf); extern int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv, int entries); @@ -137,7 +137,7 @@ extern int mach64_do_engine_reset(drm_mach64_private_t * dev_priv); extern int mach64_do_dma_idle(drm_mach64_private_t * dev_priv); extern int mach64_do_dma_flush(drm_mach64_private_t * dev_priv); -extern int mach64_do_cleanup_dma(drm_device_t * dev); +extern int mach64_do_cleanup_dma(struct drm_device * dev); /* mach64_state.c */ extern int mach64_dma_clear(DRM_IOCTL_ARGS); @@ -145,13 +145,13 @@ extern int mach64_dma_swap(DRM_IOCTL_ARGS); extern int mach64_dma_vertex(DRM_IOCTL_ARGS); extern int mach64_dma_blit(DRM_IOCTL_ARGS); extern int mach64_get_param(DRM_IOCTL_ARGS); -extern int mach64_driver_vblank_wait(drm_device_t * dev, +extern int mach64_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); extern irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS); -extern void mach64_driver_irq_preinstall(drm_device_t * dev); -extern void mach64_driver_irq_postinstall(drm_device_t * dev); -extern void mach64_driver_irq_uninstall(drm_device_t * dev); +extern void mach64_driver_irq_preinstall(struct drm_device * dev); +extern void mach64_driver_irq_postinstall(struct drm_device * dev); +extern void mach64_driver_irq_uninstall(struct drm_device * dev); /* ================================================================ * Registers @@ -798,7 +798,7 @@ do { \ #define DMALOCALS \ drm_mach64_freelist_t *_entry = NULL; \ - drm_buf_t *_buf = NULL; \ + struct drm_buf *_buf = NULL; \ u32 *_buf_wptr; int _outcount #define GETBUFPTR( __buf ) \ @@ -813,7 +813,7 @@ do { \ static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t * dev_priv, drm_mach64_freelist_t ** - entry, drm_buf_t * buf) + entry, struct drm_buf * buf) { struct list_head *ptr; #if MACH64_EXTRA_CHECKING diff --git a/shared-core/mach64_irq.c b/shared-core/mach64_irq.c index 663642db..4122dd91 100644 --- a/shared-core/mach64_irq.c +++ b/shared-core/mach64_irq.c @@ -42,7 +42,7 @@ irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS) { - drm_device_t *dev = (drm_device_t *) arg; + struct drm_device *dev = (struct drm_device *) arg; drm_mach64_private_t *dev_priv = (drm_mach64_private_t *) dev->dev_private; int status; @@ -70,7 +70,7 @@ irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS) return IRQ_NONE; } -int mach64_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) +int mach64_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) { unsigned int cur_vblank; int ret = 0; @@ -90,7 +90,7 @@ int mach64_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) /* drm_dma.h hooks */ -void mach64_driver_irq_preinstall(drm_device_t * dev) +void mach64_driver_irq_preinstall(struct drm_device * dev) { drm_mach64_private_t *dev_priv = (drm_mach64_private_t *) dev->dev_private; @@ -104,7 +104,7 @@ void mach64_driver_irq_preinstall(drm_device_t * dev) | MACH64_CRTC_VBLANK_INT); } -void mach64_driver_irq_postinstall(drm_device_t * dev) +void mach64_driver_irq_postinstall(struct drm_device * dev) { drm_mach64_private_t *dev_priv = (drm_mach64_private_t *) dev->dev_private; @@ -118,7 +118,7 @@ void mach64_driver_irq_postinstall(drm_device_t * dev) } -void mach64_driver_irq_uninstall(drm_device_t * dev) +void mach64_driver_irq_uninstall(struct drm_device * dev) { drm_mach64_private_t *dev_priv = (drm_mach64_private_t *) dev->dev_private; diff --git a/shared-core/mach64_state.c b/shared-core/mach64_state.c index 38cefca9..95ad1ec3 100644 --- a/shared-core/mach64_state.c +++ b/shared-core/mach64_state.c @@ -40,7 +40,7 @@ * 1.0 - Initial mach64 DRM * */ -drm_ioctl_desc_t mach64_ioctls[] = { +struct drm_ioctl_desc mach64_ioctls[] = { [DRM_IOCTL_NR(DRM_MACH64_INIT)] = {mach64_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_MACH64_CLEAR)] = {mach64_dma_clear, DRM_AUTH}, [DRM_IOCTL_NR(DRM_MACH64_SWAP)] = {mach64_dma_swap, DRM_AUTH}, @@ -86,10 +86,10 @@ static void mach64_print_dirty(const char *msg, unsigned int flags) * negative for an error */ static int mach64_emit_cliprect(DRMFILE filp, drm_mach64_private_t * dev_priv, - drm_clip_rect_t * box) + struct drm_clip_rect * box) { u32 sc_left_right, sc_top_bottom; - drm_clip_rect_t scissor; + struct drm_clip_rect scissor; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mach64_context_regs_t *regs = &sarea_priv->context_state; DMALOCALS; @@ -212,7 +212,7 @@ static __inline__ int mach64_emit_state(DRMFILE filp, * DMA command dispatch functions */ -static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev, +static int mach64_dma_dispatch_clear(DRMFILE filp, struct drm_device * dev, unsigned int flags, int cx, int cy, int cw, int ch, unsigned int clear_color, @@ -222,7 +222,7 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev, drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mach64_context_regs_t *ctx = &sarea_priv->context_state; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; u32 fb_bpp, depth_bpp; int i; DMALOCALS; @@ -355,12 +355,12 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev, return 0; } -static int mach64_dma_dispatch_swap(DRMFILE filp, drm_device_t * dev) +static int mach64_dma_dispatch_swap(DRMFILE filp, struct drm_device * dev) { drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; u32 fb_bpp; int i; DMALOCALS; @@ -545,12 +545,12 @@ static __inline__ int copy_from_user_vertex(u32 *to, } } -static int mach64_dma_dispatch_vertex(DRMFILE filp, drm_device_t * dev, +static int mach64_dma_dispatch_vertex(DRMFILE filp, struct drm_device * dev, drm_mach64_vertex_t * vertex) { drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_buf_t *copy_buf; + struct drm_buf *copy_buf; void *buf = vertex->buf; unsigned long used = vertex->used; int ret = 0; @@ -640,13 +640,13 @@ static __inline__ int copy_from_user_blit(u32 *to, return 0; } -static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev, +static int mach64_dma_dispatch_blit(DRMFILE filp, struct drm_device * dev, drm_mach64_blit_t * blit) { drm_mach64_private_t *dev_priv = dev->dev_private; int dword_shift, dwords; unsigned long used; - drm_buf_t *copy_buf; + struct drm_buf *copy_buf; int verify_ret = 0; DMALOCALS; diff --git a/shared-core/mga_dma.c b/shared-core/mga_dma.c index d48313c7..9bed3b34 100644 --- a/shared-core/mga_dma.c +++ b/shared-core/mga_dma.c @@ -46,7 +46,7 @@ #define MINIMAL_CLEANUP 0 #define FULL_CLEANUP 1 -static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup); +static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup); /* ================================================================ * Engine control @@ -224,7 +224,7 @@ void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv) #define MGA_BUFFER_FREE 0 #if MGA_FREELIST_DEBUG -static void mga_freelist_print(drm_device_t * dev) +static void mga_freelist_print(struct drm_device * dev) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *entry; @@ -245,10 +245,10 @@ static void mga_freelist_print(drm_device_t * dev) } #endif -static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv) +static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv) { - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_freelist_t *entry; int i; @@ -291,7 +291,7 @@ static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv) return 0; } -static void mga_freelist_cleanup(drm_device_t * dev) +static void mga_freelist_cleanup(struct drm_device * dev) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *entry; @@ -311,10 +311,10 @@ static void mga_freelist_cleanup(drm_device_t * dev) #if 0 /* FIXME: Still needed? */ -static void mga_freelist_reset(drm_device_t * dev) +static void mga_freelist_reset(struct drm_device * dev) { drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; int i; @@ -326,7 +326,7 @@ static void mga_freelist_reset(drm_device_t * dev) } #endif -static drm_buf_t *mga_freelist_get(drm_device_t * dev) +static struct drm_buf *mga_freelist_get(struct drm_device * dev) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *next; @@ -359,7 +359,7 @@ static drm_buf_t *mga_freelist_get(drm_device_t * dev) return NULL; } -int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf) +int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; @@ -393,7 +393,7 @@ int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf) * DMA initialization, cleanup */ -int mga_driver_load(drm_device_t *dev, unsigned long flags) +int mga_driver_load(struct drm_device *dev, unsigned long flags) { drm_mga_private_t * dev_priv; @@ -433,7 +433,7 @@ int mga_driver_load(drm_device_t *dev, unsigned long flags) * * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap */ -static int mga_do_agp_dma_bootstrap(drm_device_t * dev, +static int mga_do_agp_dma_bootstrap(struct drm_device *dev, drm_mga_dma_bootstrap_t * dma_bs) { drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; @@ -443,11 +443,11 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev, const unsigned secondary_size = dma_bs->secondary_bin_count * dma_bs->secondary_bin_size; const unsigned agp_size = (dma_bs->agp_size << 20); - drm_buf_desc_t req; - drm_agp_mode_t mode; - drm_agp_info_t info; - drm_agp_buffer_t agp_req; - drm_agp_binding_t bind_req; + struct drm_buf_desc req; + struct drm_agp_mode mode; + struct drm_agp_info info; + struct drm_agp_buffer agp_req; + struct drm_agp_binding bind_req; /* Acquire AGP. */ err = drm_agp_acquire(dev); @@ -548,7 +548,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev, #ifdef __linux__ { - drm_map_list_t *_entry; + struct drm_map_list *_entry; unsigned long agp_token = 0; list_for_each_entry(_entry, &dev->maplist, head) { @@ -603,7 +603,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev, * * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap */ -static int mga_do_pci_dma_bootstrap(drm_device_t * dev, +static int mga_do_pci_dma_bootstrap(struct drm_device * dev, drm_mga_dma_bootstrap_t * dma_bs) { drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; @@ -611,7 +611,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev, unsigned int primary_size; unsigned int bin_count; int err; - drm_buf_desc_t req; + struct drm_buf_desc req; if (dev->dma == NULL) { @@ -696,7 +696,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev, } -static int mga_do_dma_bootstrap(drm_device_t * dev, +static int mga_do_dma_bootstrap(struct drm_device * dev, drm_mga_dma_bootstrap_t * dma_bs) { const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); @@ -799,7 +799,7 @@ int mga_dma_bootstrap(DRM_IOCTL_ARGS) } -static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init) +static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) { drm_mga_private_t *dev_priv; int ret; @@ -939,7 +939,7 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init) return 0; } -static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup) +static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup) { int err = 0; DRM_DEBUG("\n"); @@ -967,8 +967,8 @@ static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup) if (dev_priv->used_new_dma_init) { if (dev_priv->agp_handle != 0) { - drm_agp_binding_t unbind_req; - drm_agp_buffer_t free_req; + struct drm_agp_binding unbind_req; + struct drm_agp_buffer free_req; unbind_req.handle = dev_priv->agp_handle; drm_agp_unbind(dev, &unbind_req); @@ -1043,11 +1043,11 @@ int mga_dma_flush(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; - drm_lock_t lock; + struct drm_lock lock; LOCK_TEST_WITH_RETURN(dev, filp); - DRM_COPY_FROM_USER_IOCTL(lock, (drm_lock_t __user *) data, + DRM_COPY_FROM_USER_IOCTL(lock, (struct drm_lock __user *) data, sizeof(lock)); DRM_DEBUG("%s%s%s\n", @@ -1089,9 +1089,9 @@ int mga_dma_reset(DRM_IOCTL_ARGS) * DMA buffer management */ -static int mga_dma_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d) +static int mga_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) { - drm_buf_t *buf; + struct drm_buf *buf; int i; for (i = d->granted_count; i < d->request_count; i++) { @@ -1116,10 +1116,10 @@ static int mga_dma_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d) int mga_dma_buffers(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; - drm_dma_t __user *argp = (void __user *)data; - drm_dma_t d; + struct drm_dma __user *argp = (void __user *)data; + struct drm_dma d; int ret = 0; LOCK_TEST_WITH_RETURN(dev, filp); @@ -1158,7 +1158,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS) /** * Called just before the module is unloaded. */ -int mga_driver_unload(drm_device_t * dev) +int mga_driver_unload(struct drm_device * dev) { drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); dev->dev_private = NULL; @@ -1169,12 +1169,12 @@ int mga_driver_unload(drm_device_t * dev) /** * Called when the last opener of the device is closed. */ -void mga_driver_lastclose(drm_device_t * dev) +void mga_driver_lastclose(struct drm_device * dev) { mga_do_cleanup_dma(dev, FULL_CLEANUP); } -int mga_driver_dma_quiescent(drm_device_t * dev) +int mga_driver_dma_quiescent(struct drm_device * dev) { drm_mga_private_t *dev_priv = dev->dev_private; return mga_do_wait_for_idle(dev_priv); diff --git a/shared-core/mga_drm.h b/shared-core/mga_drm.h index 5bcdbfab..15c2dea2 100644 --- a/shared-core/mga_drm.h +++ b/shared-core/mga_drm.h @@ -181,7 +181,7 @@ typedef struct _drm_mga_sarea { /* The current cliprects, or a subset thereof. */ - drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS]; unsigned int nbox; /* Information about the most recently used 3d drawable. The @@ -202,7 +202,7 @@ typedef struct _drm_mga_sarea { unsigned int exported_nback; int exported_back_x, exported_front_x, exported_w; int exported_back_y, exported_front_y, exported_h; - drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS]; /* Counters for aging textures and for client-side throttling. */ @@ -216,7 +216,7 @@ typedef struct _drm_mga_sarea { /* LRU lists for texture memory in agp space and on the card. */ - drm_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1]; + struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1]; unsigned int texAge[MGA_NR_TEX_HEAPS]; /* Mechanism to validate card state. diff --git a/shared-core/mga_drv.h b/shared-core/mga_drv.h index bce82135..10096a95 100644 --- a/shared-core/mga_drv.h +++ b/shared-core/mga_drv.h @@ -65,7 +65,7 @@ typedef struct drm_mga_freelist { struct drm_mga_freelist *next; struct drm_mga_freelist *prev; drm_mga_age_t age; - drm_buf_t *buf; + struct drm_buf *buf; } drm_mga_freelist_t; typedef struct { @@ -148,7 +148,7 @@ typedef struct drm_mga_private { unsigned int agp_size; } drm_mga_private_t; -extern drm_ioctl_desc_t mga_ioctls[]; +extern struct drm_ioctl_desc mga_ioctls[]; extern int mga_max_ioctl; /* mga_dma.c */ @@ -157,10 +157,10 @@ extern int mga_dma_init(DRM_IOCTL_ARGS); extern int mga_dma_flush(DRM_IOCTL_ARGS); extern int mga_dma_reset(DRM_IOCTL_ARGS); extern int mga_dma_buffers(DRM_IOCTL_ARGS); -extern int mga_driver_load(drm_device_t *dev, unsigned long flags); -extern int mga_driver_unload(drm_device_t * dev); -extern void mga_driver_lastclose(drm_device_t * dev); -extern int mga_driver_dma_quiescent(drm_device_t * dev); +extern int mga_driver_load(struct drm_device *dev, unsigned long flags); +extern int mga_driver_unload(struct drm_device * dev); +extern void mga_driver_lastclose(struct drm_device * dev); +extern int mga_driver_dma_quiescent(struct drm_device * dev); extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv); @@ -168,7 +168,7 @@ extern void mga_do_dma_flush(drm_mga_private_t * dev_priv); extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv); extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv); -extern int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf); +extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf); /* mga_warp.c */ extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv); @@ -176,12 +176,12 @@ extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv); extern int mga_warp_init(drm_mga_private_t * dev_priv); /* mga_irq.c */ -extern int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence); -extern int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence); +extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence); +extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); -extern void mga_driver_irq_preinstall(drm_device_t * dev); -extern void mga_driver_irq_postinstall(drm_device_t * dev); -extern void mga_driver_irq_uninstall(drm_device_t * dev); +extern void mga_driver_irq_preinstall(struct drm_device * dev); +extern void mga_driver_irq_postinstall(struct drm_device * dev); +extern void mga_driver_irq_uninstall(struct drm_device * dev); extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/shared-core/mga_irq.c b/shared-core/mga_irq.c index 490d1fbb..8b555e2e 100644 --- a/shared-core/mga_irq.c +++ b/shared-core/mga_irq.c @@ -38,7 +38,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) { - drm_device_t *dev = (drm_device_t *) arg; + struct drm_device *dev = (struct drm_device *) arg; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; int status; int handled = 0; @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) return IRQ_NONE; } -int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) +int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) { unsigned int cur_vblank; int ret = 0; @@ -98,7 +98,7 @@ int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) return ret; } -int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence) +int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; unsigned int cur_fence; @@ -117,7 +117,7 @@ int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence) return ret; } -void mga_driver_irq_preinstall(drm_device_t * dev) +void mga_driver_irq_preinstall(struct drm_device * dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; @@ -127,7 +127,7 @@ void mga_driver_irq_preinstall(drm_device_t * dev) MGA_WRITE(MGA_ICLEAR, ~0); } -void mga_driver_irq_postinstall(drm_device_t * dev) +void mga_driver_irq_postinstall(struct drm_device * dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; @@ -137,7 +137,7 @@ void mga_driver_irq_postinstall(drm_device_t * dev) MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); } -void mga_driver_irq_uninstall(drm_device_t * dev) +void mga_driver_irq_uninstall(struct drm_device * dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; if (!dev_priv) diff --git a/shared-core/mga_state.c b/shared-core/mga_state.c index 8e5cb334..6d93c9e4 100644 --- a/shared-core/mga_state.c +++ b/shared-core/mga_state.c @@ -43,7 +43,7 @@ */ static void mga_emit_clip_rect(drm_mga_private_t * dev_priv, - drm_clip_rect_t * box) + struct drm_clip_rect * box) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; @@ -504,12 +504,12 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv, * */ -static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear) +static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int nbox = sarea_priv->nbox; int i; DMA_LOCALS; @@ -525,7 +525,7 @@ static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear) ADVANCE_DMA(); for (i = 0; i < nbox; i++) { - drm_clip_rect_t *box = &pbox[i]; + struct drm_clip_rect *box = &pbox[i]; u32 height = box->y2 - box->y1; DRM_DEBUG(" from=%d,%d to=%d,%d\n", @@ -594,12 +594,12 @@ static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear) FLUSH_DMA(); } -static void mga_dma_dispatch_swap(drm_device_t * dev) +static void mga_dma_dispatch_swap(struct drm_device * dev) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int nbox = sarea_priv->nbox; int i; DMA_LOCALS; @@ -626,7 +626,7 @@ static void mga_dma_dispatch_swap(drm_device_t * dev) MGA_DWGCTL, MGA_DWGCTL_COPY); for (i = 0; i < nbox; i++) { - drm_clip_rect_t *box = &pbox[i]; + struct drm_clip_rect *box = &pbox[i]; u32 height = box->y2 - box->y1; u32 start = box->y1 * dev_priv->front_pitch; @@ -651,7 +651,7 @@ static void mga_dma_dispatch_swap(drm_device_t * dev) DRM_DEBUG("%s... done.\n", __FUNCTION__); } -static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf) +static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; @@ -698,7 +698,7 @@ static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf) FLUSH_DMA(); } -static void mga_dma_dispatch_indices(drm_device_t * dev, drm_buf_t * buf, +static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf, unsigned int start, unsigned int end) { drm_mga_private_t *dev_priv = dev->dev_private; @@ -747,7 +747,7 @@ static void mga_dma_dispatch_indices(drm_device_t * dev, drm_buf_t * buf, /* This copies a 64 byte aligned agp region to the frambuffer with a * standard blit, the ioctl needs to do checking. */ -static void mga_dma_dispatch_iload(drm_device_t * dev, drm_buf_t * buf, +static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf, unsigned int dstorg, unsigned int length) { drm_mga_private_t *dev_priv = dev->dev_private; @@ -800,12 +800,12 @@ static void mga_dma_dispatch_iload(drm_device_t * dev, drm_buf_t * buf, FLUSH_DMA(); } -static void mga_dma_dispatch_blit(drm_device_t * dev, drm_mga_blit_t * blit) +static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int nbox = sarea_priv->nbox; u32 scandir = 0, i; DMA_LOCALS; @@ -917,8 +917,8 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_vertex_t vertex; @@ -957,8 +957,8 @@ static int mga_dma_indices(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_indices_t indices; @@ -996,9 +996,9 @@ static int mga_dma_indices(DRM_IOCTL_ARGS) static int mga_dma_iload(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_mga_private_t *dev_priv = dev->dev_private; - drm_buf_t *buf; + struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_iload_t iload; DRM_DEBUG("\n"); @@ -1158,7 +1158,7 @@ static int mga_wait_fence(DRM_IOCTL_ARGS) return 0; } -drm_ioctl_desc_t mga_ioctls[] = { +struct drm_ioctl_desc mga_ioctls[] = { [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, DRM_AUTH}, [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, DRM_AUTH}, diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h index e2a9ea83..4016f004 100644 --- a/shared-core/nouveau_drm.h +++ b/shared-core/nouveau_drm.h @@ -114,8 +114,8 @@ enum nouveau_card_type { NV_04 =4, NV_05 =5, NV_10 =10, - NV_11 =10, - NV_15 =10, + NV_11 =11, + NV_15 =11, NV_17 =17, NV_20 =20, NV_25 =20, @@ -137,7 +137,7 @@ enum nouveau_bus_type { struct drm_nouveau_sarea { /* the cliprects */ - drm_clip_rect_t boxes[NOUVEAU_MAX_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[NOUVEAU_MAX_SAREA_CLIPRECTS]; unsigned int nbox; }; diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 4fa979e6..9e11f9b7 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -118,6 +118,10 @@ struct nouveau_fifo struct nouveau_gpuobj_ref *ramin_grctx; uint32_t pgraph_ctx [340]; /* XXX dynamic alloc ? */ + /* NV50 VM */ + struct nouveau_gpuobj *vm_pd; + struct nouveau_gpuobj_ref *vm_gart_pt; + /* Objects */ struct nouveau_gpuobj_ref *ramin; /* Private instmem */ struct mem_block *ramin_heap; /* Private PRAMIN heap */ @@ -153,6 +157,7 @@ struct nouveau_engine_func { struct { int (*init)(struct drm_device *dev); + uint64_t (*read)(struct drm_device *dev); void (*takedown)(struct drm_device *dev); } timer; @@ -220,8 +225,24 @@ struct drm_nouveau_private { /* base physical adresses */ uint64_t fb_phys; uint64_t fb_available_size; - uint64_t agp_phys; - uint64_t agp_available_size; + + struct { + enum { + NOUVEAU_GART_NONE = 0, + NOUVEAU_GART_AGP, + NOUVEAU_GART_SGDMA + } type; + uint64_t aper_base; + uint64_t aper_size; + + struct nouveau_gpuobj *sg_ctxdma; + struct page *sg_dummy_page; + dma_addr_t sg_dummy_bus; + + /* nottm hack */ + struct drm_ttm_backend *sg_be; + unsigned long sg_handle; + } gart_info; /* the mtrr covering the FB */ int fb_mtrr; @@ -307,6 +328,10 @@ extern int nouveau_gpuobj_dma_new(struct drm_device *, int channel, int class, uint64_t offset, uint64_t size, int access, int target, struct nouveau_gpuobj **); +extern int nouveau_gpuobj_gart_dma_new(struct drm_device *, int channel, + uint64_t offset, uint64_t size, + int access, struct nouveau_gpuobj **, + uint32_t *o_ret); extern int nouveau_gpuobj_gr_new(struct drm_device *, int channel, int class, struct nouveau_gpuobj **); extern int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS); @@ -317,6 +342,13 @@ extern void nouveau_irq_preinstall(struct drm_device*); extern void nouveau_irq_postinstall(struct drm_device*); extern void nouveau_irq_uninstall(struct drm_device*); +/* nouveau_sgdma.c */ +extern int nouveau_sgdma_init(struct drm_device *); +extern void nouveau_sgdma_takedown(struct drm_device *); +extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); +extern int nouveau_sgdma_nottm_hack_init(struct drm_device *); +extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *); + /* nv04_fb.c */ extern int nv04_fb_init(struct drm_device *dev); extern void nv04_fb_takedown(struct drm_device *dev); @@ -438,6 +470,7 @@ extern void nv50_mc_takedown(struct drm_device *dev); /* nv04_timer.c */ extern int nv04_timer_init(struct drm_device *dev); +extern uint64_t nv04_timer_read(struct drm_device *dev); extern void nv04_timer_takedown(struct drm_device *dev); extern long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index c769f58f..230c8298 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -99,6 +99,7 @@ static int nouveau_fifo_instmem_configure(struct drm_device *dev) (1 << 16) /* 64 Bytes entry*/); /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */ break; + case NV_11: case NV_10: case NV_04: case NV_03: @@ -210,24 +211,27 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) } if (cb->flags & NOUVEAU_MEM_AGP) { - DRM_DEBUG("Creating CB in AGP memory\n"); + ret = nouveau_gpuobj_gart_dma_new(dev, channel, + cb->start, cb->size, + NV_DMA_ACCESS_RO, + &pushbuf, + &chan->pushbuf_base); + } else + if (cb->flags & NOUVEAU_MEM_PCI) { ret = nouveau_gpuobj_dma_new(dev, channel, - NV_CLASS_DMA_IN_MEMORY, - cb->start, cb->size, - NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP, &pushbuf); - } else if ( cb->flags & NOUVEAU_MEM_PCI) { - DRM_DEBUG("Creating CB in PCI memory\n"); - ret = nouveau_gpuobj_dma_new(dev, channel, - NV_CLASS_DMA_IN_MEMORY, - cb->start, - cb->size, - NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI_NONLINEAR, &pushbuf); + NV_CLASS_DMA_IN_MEMORY, + cb->start, cb->size, + NV_DMA_ACCESS_RO, + NV_DMA_TARGET_PCI_NONLINEAR, + &pushbuf); + chan->pushbuf_base = 0; } else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new (dev, channel, NV_CLASS_DMA_IN_MEMORY, cb->start, cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM, &pushbuf); + chan->pushbuf_base = 0; } else { /* NV04 cmdbuf hack, from original ddx.. not sure of it's * exact reason for existing :) PCI access to cmdbuf in @@ -238,6 +242,7 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) cb->start + drm_get_resource_start(dev, 1), cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI, &pushbuf); + chan->pushbuf_base = 0; } if (ret) { @@ -249,11 +254,12 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) if ((ret = nouveau_gpuobj_ref_add(dev, channel, 0, pushbuf, &chan->pushbuf))) { DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret); + if (pushbuf != dev_priv->gart_info.sg_ctxdma) + nouveau_gpuobj_del(dev, &pushbuf); return ret; } - dev_priv->fifos[channel]->pushbuf_base = 0; - dev_priv->fifos[channel]->pushbuf_mem = cb; + chan->pushbuf_mem = cb; return 0; } @@ -468,7 +474,7 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) DRM_DEVICE; struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_fifo_alloc init; - drm_map_list_t *entry; + struct drm_map_list *entry; struct nouveau_fifo *chan; int res; @@ -525,7 +531,7 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) * finally, the ioctl table ***********************************/ -drm_ioctl_desc_t nouveau_ioctls[] = { +struct drm_ioctl_desc nouveau_ioctls[] = { [DRM_IOCTL_NR(DRM_NOUVEAU_FIFO_ALLOC)] = {nouveau_ioctl_fifo_alloc, DRM_AUTH}, [DRM_IOCTL_NR(DRM_NOUVEAU_GROBJ_ALLOC)] = {nouveau_ioctl_grobj_alloc, DRM_AUTH}, [DRM_IOCTL_NR(DRM_NOUVEAU_NOTIFIER_ALLOC)] = {nouveau_ioctl_notifier_alloc, DRM_AUTH}, diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 451262a1..f7baf89e 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -246,6 +246,61 @@ static void nouveau_nv04_context_switch(struct drm_device *dev) } #endif + +struct nouveau_bitfield_names +{ + uint32_t mask; + const char * name; +}; + +static struct nouveau_bitfield_names nouveau_nstatus_names[] = +{ + { NV03_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, + { NV03_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, + { NV03_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, + { NV03_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } +}; + +static struct nouveau_bitfield_names nouveau_nsource_names[] = +{ + { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, + { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, + { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, + { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, + { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, + { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, + { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, + { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, + { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, + { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, + { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, + { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, + { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, +}; + +static void +nouveau_print_bitfield_names(uint32_t value, + const struct nouveau_bitfield_names *namelist, + const int namelist_len) +{ + int i; + for(i=0; i<namelist_len; ++i) { + uint32_t mask = namelist[i].mask; + if(value & mask) { + printk(" %s", namelist[i].name); + value &= ~mask; + } + } + if(value) + printk(" (unknown bits 0x%08x)", value); +} + static void nouveau_graph_dump_trap_info(struct drm_device *dev) { @@ -253,21 +308,30 @@ nouveau_graph_dump_trap_info(struct drm_device *dev) uint32_t address; uint32_t channel, class; uint32_t method, subc, data; + uint32_t nsource, nstatus; address = NV_READ(0x400704); channel = (address >> 20) & 0x1F; subc = (address >> 16) & 0x7; method = address & 0x1FFC; data = NV_READ(0x400708); + nsource = NV_READ(NV03_PGRAPH_NSOURCE); + nstatus = NV_READ(NV03_PGRAPH_NSTATUS); if (dev_priv->card_type < NV_50) { class = NV_READ(0x400160 + subc*4) & 0xFFFF; } else { class = NV_READ(0x400814); } - DRM_ERROR("NV: nSource: 0x%08x, nStatus: 0x%08x\n", - NV_READ(0x400108), NV_READ(0x400104)); - DRM_ERROR("NV: Channel %d/%d (class 0x%04x) -" + DRM_ERROR("nSource:"); + nouveau_print_bitfield_names(nsource, nouveau_nsource_names, + ARRAY_SIZE(nouveau_nsource_names)); + printk(", nStatus:"); + nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names, + ARRAY_SIZE(nouveau_nstatus_names)); + printk("\n"); + + DRM_ERROR("NV: Channel %d/%d (class 0x%04x) - " "Method 0x%04x, Data 0x%08x\n", channel, subc, class, method, data ); @@ -286,8 +350,8 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) uint32_t nsource, nstatus, instance, notify; DRM_DEBUG("NV: PGRAPH notify interrupt\n"); - nstatus = NV_READ(0x00400104); - nsource = NV_READ(0x00400108); + nstatus = NV_READ(NV03_PGRAPH_NSTATUS); + nsource = NV_READ(NV03_PGRAPH_NSOURCE); DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); /* if this wasn't NOTIFICATION_PENDING, dump extra trap info */ @@ -308,8 +372,8 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) uint32_t nsource, nstatus, instance, notify; DRM_DEBUG("NV: PGRAPH buffer notify interrupt\n"); - nstatus = NV_READ(0x00400104); - nsource = NV_READ(0x00400108); + nstatus = NV_READ(NV03_PGRAPH_NSTATUS); + nsource = NV_READ(NV03_PGRAPH_NSOURCE); DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); instance = NV_READ(0x00400158); @@ -332,8 +396,8 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) DRM_ERROR("NV: PGRAPH error interrupt\n"); - nstatus = NV_READ(0x00400104); - nsource = NV_READ(0x00400108); + nstatus = NV_READ(NV03_PGRAPH_NSTATUS); + nsource = NV_READ(NV03_PGRAPH_NSOURCE); DRM_ERROR("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); instance = NV_READ(0x00400158); @@ -355,6 +419,7 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev) nouveau_nv04_context_switch(dev); break; case NV_10: + case NV_11: case NV_17: nouveau_nv10_context_switch(dev); break; diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 2b2418fb..7a923e17 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -209,12 +209,11 @@ void nouveau_mem_takedown(struct mem_block **heap) void nouveau_mem_close(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + nouveau_mem_takedown(&dev_priv->agp_heap); nouveau_mem_takedown(&dev_priv->fb_heap); - if ( dev_priv->pci_heap ) - { + if (dev_priv->pci_heap) nouveau_mem_takedown(&dev_priv->pci_heap); - } } /* returns the amount of FB ram in bytes */ @@ -253,6 +252,7 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev) } break; case NV_10: + case NV_11: case NV_17: case NV_20: case NV_30: @@ -281,93 +281,68 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev) return 0; } - - -int nouveau_mem_init(struct drm_device *dev) +static int +nouveau_mem_init_agp(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t fb_size; - drm_scatter_gather_t sgreq; - dev_priv->agp_phys=0; - dev_priv->fb_phys=0; - sgreq . size = 4 << 20; //4MB of PCI scatter-gather zone - - /* init AGP */ - dev_priv->agp_heap=NULL; - if (drm_device_is_agp(dev)) - { - int err; - drm_agp_info_t info; - drm_agp_mode_t mode; - drm_agp_buffer_t agp_req; - drm_agp_binding_t bind_req; - - err = drm_agp_acquire(dev); - if (err) { - DRM_ERROR("Unable to acquire AGP: %d\n", err); - goto no_agp; - } - - err = drm_agp_info(dev, &info); - if (err) { - DRM_ERROR("Unable to get AGP info: %d\n", err); - goto no_agp; - } - - /* see agp.h for the AGPSTAT_* modes available */ - mode.mode = info.mode; - err = drm_agp_enable(dev, mode); - if (err) { - DRM_ERROR("Unable to enable AGP: %d\n", err); - goto no_agp; - } - - agp_req.size = info.aperture_size; - agp_req.type = 0; - err = drm_agp_alloc(dev, &agp_req); - if (err) { - DRM_ERROR("Unable to alloc AGP: %d\n", err); - goto no_agp; - } + struct drm_agp_info info; + struct drm_agp_mode mode; + struct drm_agp_buffer agp_req; + struct drm_agp_binding bind_req; + int ret; + + ret = drm_agp_acquire(dev); + if (ret) { + DRM_ERROR("Unable to acquire AGP: %d\n", ret); + return ret; + } - bind_req.handle = agp_req.handle; - bind_req.offset = 0; - err = drm_agp_bind(dev, &bind_req); - if (err) { - DRM_ERROR("Unable to bind AGP: %d\n", err); - goto no_agp; - } + ret = drm_agp_info(dev, &info); + if (ret) { + DRM_ERROR("Unable to get AGP info: %d\n", ret); + return ret; + } - if (nouveau_mem_init_heap(&dev_priv->agp_heap, - 0, info.aperture_size)) - goto no_agp; + /* see agp.h for the AGPSTAT_* modes available */ + mode.mode = info.mode; + ret = drm_agp_enable(dev, mode); + if (ret) { + DRM_ERROR("Unable to enable AGP: %d\n", ret); + return ret; + } - dev_priv->agp_phys = info.aperture_base; - dev_priv->agp_available_size = info.aperture_size; - goto have_agp; + agp_req.size = info.aperture_size; + agp_req.type = 0; + ret = drm_agp_alloc(dev, &agp_req); + if (ret) { + DRM_ERROR("Unable to alloc AGP: %d\n", ret); + return ret; } -no_agp: + bind_req.handle = agp_req.handle; + bind_req.offset = 0; + ret = drm_agp_bind(dev, &bind_req); + if (ret) { + DRM_ERROR("Unable to bind AGP: %d\n", ret); + return ret; + } - if ( dev_priv->card_type >= NV_50 ) goto no_pci; + dev_priv->gart_info.type = NOUVEAU_GART_AGP; + dev_priv->gart_info.aper_base = info.aperture_base; + dev_priv->gart_info.aper_size = info.aperture_size; + return 0; +} - dev_priv->pci_heap = NULL; - DRM_DEBUG("Allocating sg memory for PCI DMA\n"); - if ( drm_sg_alloc(dev, &sgreq) ) - { - DRM_ERROR("Unable to allocate 4MB of scatter-gather pages for PCI DMA!"); - goto no_pci; - } +int nouveau_mem_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t fb_size; + int ret = 0; - if ( nouveau_mem_init_heap(&dev_priv->pci_heap, 0, - dev->sg->pages * PAGE_SIZE)) - { - DRM_ERROR("Unable to initialize pci_heap!"); - goto no_pci; - } + dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; + dev_priv->fb_phys = 0; + dev_priv->gart_info.type = NOUVEAU_GART_NONE; -no_pci: -have_agp: /* setup a mtrr over the FB */ dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), nouveau_mem_fb_amount(dev), @@ -398,6 +373,54 @@ have_agp: dev_priv->fb_nomap_heap=NULL; } + /* Init AGP / NV50 PCIEGART */ + if (drm_device_is_agp(dev) && dev->agp) { + if ((ret = nouveau_mem_init_agp(dev))) + DRM_ERROR("Error initialising AGP: %d\n", ret); + } + + /*Note: this is *not* just NV50 code, but only used on NV50 for now */ + if (dev_priv->gart_info.type == NOUVEAU_GART_NONE && + dev_priv->card_type >= NV_50) { + ret = nouveau_sgdma_init(dev); + if (!ret) { + ret = nouveau_sgdma_nottm_hack_init(dev); + if (ret) + nouveau_sgdma_takedown(dev); + } + + if (ret) + DRM_ERROR("Error initialising SG DMA: %d\n", ret); + } + + if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { + if (nouveau_mem_init_heap(&dev_priv->agp_heap, + 0, dev_priv->gart_info.aper_size)) { + if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { + nouveau_sgdma_nottm_hack_takedown(dev); + nouveau_sgdma_takedown(dev); + } + } + } + + /* NV04-NV40 PCIEGART */ + if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) { + struct drm_scatter_gather sgreq; + + DRM_DEBUG("Allocating sg memory for PCI DMA\n"); + sgreq.size = 4 << 20; //4MB of PCI scatter-gather zone + + if (drm_sg_alloc(dev, &sgreq)) { + DRM_ERROR("Unable to allocate 4MB of scatter-gather" + " pages for PCI DMA!"); + } else { + if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0, + dev->sg->pages * PAGE_SIZE)) { + DRM_ERROR("Unable to initialize pci_heap!"); + } + } + } + return 0; } @@ -468,13 +491,18 @@ alloc_ok: if (flags&NOUVEAU_MEM_MAPPED) { - drm_map_list_t *entry; + struct drm_map_list *entry; int ret = 0; block->flags|=NOUVEAU_MEM_MAPPED; - if (type == NOUVEAU_MEM_AGP) + if (type == NOUVEAU_MEM_AGP) { + if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) ret = drm_addmap(dev, block->start, block->size, _DRM_AGP, 0, &block->map); + else + ret = drm_addmap(dev, block->start, block->size, + _DRM_SCATTER_GATHER, 0, &block->map); + } else if (type == NOUVEAU_MEM_FB) ret = drm_addmap(dev, block->start + dev_priv->fb_phys, block->size, _DRM_FRAME_BUFFER, diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 36dba654..238e3c8b 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -37,7 +37,8 @@ nouveau_notifier_init_channel(struct drm_device *dev, int channel, DRMFILE filp) int flags, ret; /*TODO: PCI notifier blocks */ - if (dev_priv->agp_heap) + if (dev_priv->agp_heap && + dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_FB_ACCEPTABLE; else flags = NOUVEAU_MEM_FB; diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index 146c4f1c..f0025d7a 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -596,7 +596,7 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, switch (target) { case NV_DMA_TARGET_AGP: - offset += dev_priv->agp_phys; + offset += dev_priv->gart_info.aper_base; break; case NV_DMA_TARGET_PCI_NONLINEAR: /*assume the "offset" is a virtual memory address*/ @@ -672,10 +672,10 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, pci_map_page(dev->pdev, dev->sg->pagelist[idx], 0, - DMA_31BIT_MASK, + PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dev->sg->busaddr[idx] == 0) { + if (dma_mapping_error(dev->sg->busaddr[idx])) { return DRM_ERR(ENOMEM); } } @@ -689,10 +689,20 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, } } } else { - INSTANCE_WR(*gpuobj, 0, 0x00190000 | class); + uint32_t flags0, flags5; + + if (target == NV_DMA_TARGET_VIDMEM) { + flags0 = 0x00190000; + flags5 = 0x00010000; + } else { + flags0 = 0x7fc00000; + flags5 = 0x00080000; + } + + INSTANCE_WR(*gpuobj, 0, flags0 | class); INSTANCE_WR(*gpuobj, 1, offset + size - 1); INSTANCE_WR(*gpuobj, 2, offset); - INSTANCE_WR(*gpuobj, 5, 0x00010000); + INSTANCE_WR(*gpuobj, 5, flags5); } (*gpuobj)->engine = NVOBJ_ENGINE_SW; @@ -700,6 +710,42 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, return 0; } +int +nouveau_gpuobj_gart_dma_new(struct drm_device *dev, int channel, + uint64_t offset, uint64_t size, int access, + struct nouveau_gpuobj **gpuobj, + uint32_t *o_ret) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || + (dev_priv->card_type >= NV_50 && + dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { + ret = nouveau_gpuobj_dma_new(dev, channel, + NV_CLASS_DMA_IN_MEMORY, + offset, size, access, + NV_DMA_TARGET_AGP, gpuobj); + if (o_ret) + *o_ret = 0; + } else + if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { + *gpuobj = dev_priv->gart_info.sg_ctxdma; + if (offset & ~0xffffffffULL) { + DRM_ERROR("obj offset exceeds 32-bits\n"); + return DRM_ERR(EINVAL); + } + if (o_ret) + *o_ret = (uint32_t)offset; + ret = (*gpuobj != NULL) ? 0 : DRM_ERR(EINVAL); + } else { + DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type); + return DRM_ERR(EINVAL); + } + + return ret; +} + /* Context objects in the instance RAM have the following structure. * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. @@ -857,7 +903,7 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct nouveau_gpuobj *vram = NULL, *tt = NULL; - int ret; + int ret, i; DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", channel, vram_h, tt_h); @@ -870,6 +916,29 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, return ret; } + /* NV50 VM, point offset 0-512MiB at shared PCIEGART table */ + if (dev_priv->card_type >= NV_50) { + uint32_t vm_offset; + + vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; + vm_offset += chan->ramin->gpuobj->im_pramin->start; + if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, 0x4000, + 0, &chan->vm_pd, NULL))) + return ret; + for (i=0; i<0x4000; i+=8) { + INSTANCE_WR(chan->vm_pd, (i+0)/4, 0x00000000); + INSTANCE_WR(chan->vm_pd, (i+4)/4, 0xdeadcafe); + } + + if ((ret = nouveau_gpuobj_ref_add(dev, -1, 0, + dev_priv->gart_info.sg_ctxdma, + &chan->vm_gart_pt))) + return ret; + INSTANCE_WR(chan->vm_pd, (0+0)/4, + chan->vm_gart_pt->instance | 0x03); + INSTANCE_WR(chan->vm_pd, (0+4)/4, 0x00000000); + } + /* RAMHT */ if (dev_priv->card_type < NV_50) { ret = nouveau_gpuobj_ref_add(dev, -1, 0, dev_priv->ramht, @@ -899,40 +968,34 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, return ret; } - if (dev_priv->agp_heap) { - /* AGPGART ctxdma */ - if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->agp_available_size, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_AGP, &tt))) { - DRM_ERROR("Error creating AGP TT ctxdma: %d\n", DRM_ERR(ENOMEM)); - return DRM_ERR(ENOMEM); - } - - ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); - if (ret) { - DRM_ERROR("Error referencing AGP TT ctxdma: %d\n", ret); - return ret; - } + /* TT memory ctxdma */ + if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { + ret = nouveau_gpuobj_gart_dma_new(dev, channel, 0, + dev_priv->gart_info.aper_size, + NV_DMA_ACCESS_RW, &tt, NULL); + } else + if (dev_priv->pci_heap) { + ret = nouveau_gpuobj_dma_new(dev, channel, + NV_CLASS_DMA_IN_MEMORY, + 0, dev->sg->pages * PAGE_SIZE, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_PCI_NONLINEAR, &tt); + } else { + DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type); + ret = DRM_ERR(EINVAL); } - else if ( dev_priv->pci_heap) { - if (dev_priv -> card_type >= NV_50 ) return 0; /*no PCIGART for NV50*/ - /*PCI*/ - if((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, - 0, dev->sg->pages * PAGE_SIZE, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_PCI_NONLINEAR, &tt))) { - DRM_ERROR("Error creating PCI TT ctxdma: %d\n", DRM_ERR(ENOMEM)); - return 0; //this is noncritical - } - - ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); - if (ret) { - DRM_ERROR("Error referencing PCI TT ctxdma: %d\n", ret); - return ret; - } + if (ret) { + DRM_ERROR("Error creating TT ctxdma: %d\n", ret); + return ret; } + + ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); + if (ret) { + DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); + return ret; + } + return 0; } @@ -951,6 +1014,9 @@ nouveau_gpuobj_channel_takedown(struct drm_device *dev, int channel) } nouveau_gpuobj_ref_del(dev, &chan->ramht); + nouveau_gpuobj_del(dev, &chan->vm_pd); + nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt); + if (chan->ramin_heap) nouveau_mem_takedown(&chan->ramin_heap); if (chan->ramin) diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index a66d2d34..47d54b2a 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -110,6 +110,31 @@ #define NV04_PGRAPH_DEBUG_3 0x0040008c #define NV10_PGRAPH_DEBUG_4 0x00400090 #define NV03_PGRAPH_INTR 0x00400100 +#define NV03_PGRAPH_NSTATUS 0x00400104 +# define NV03_PGRAPH_NSTATUS_STATE_IN_USE (1<<23) +# define NV03_PGRAPH_NSTATUS_INVALID_STATE (1<<24) +# define NV03_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25) +# define NV03_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26) +#define NV03_PGRAPH_NSOURCE 0x00400108 +# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<< 0) +# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<< 1) +# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<< 2) +# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<< 3) +# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<< 4) +# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<< 5) +# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<< 6) +# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<< 7) +# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<< 8) +# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<< 9) +# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10) +# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11) +# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12) +# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13) +# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14) +# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15) +# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16) +# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17) +# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18) #define NV03_PGRAPH_INTR_EN 0x00400140 #define NV40_PGRAPH_INTR_EN 0x0040013C # define NV_PGRAPH_INTR_NOTIFY (1<< 0) diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 69e9c221..a26ecea3 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -88,6 +88,8 @@ static int nouveau_init_card_mappings(struct drm_device *dev) static int nouveau_stub_init(struct drm_device *dev) { return 0; } static void nouveau_stub_takedown(struct drm_device *dev) {} +static uint64_t nouveau_stub_timer_read(struct drm_device *dev) { return 0; } + static int nouveau_init_engine_ptrs(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -104,6 +106,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv04_fb_init; engine->fb.takedown = nv04_fb_takedown; @@ -130,6 +133,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv10_fb_init; engine->fb.takedown = nv10_fb_takedown; @@ -156,6 +160,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv10_fb_init; engine->fb.takedown = nv10_fb_takedown; @@ -182,6 +187,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv10_fb_init; engine->fb.takedown = nv10_fb_takedown; @@ -208,6 +214,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->mc.init = nv40_mc_init; engine->mc.takedown = nv40_mc_takedown; engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv40_fb_init; engine->fb.takedown = nv40_fb_takedown; @@ -235,6 +242,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->mc.init = nv50_mc_init; engine->mc.takedown = nv50_mc_takedown; engine->timer.init = nouveau_stub_init; + engine->timer.read = nouveau_stub_timer_read; engine->timer.takedown = nouveau_stub_takedown; engine->fb.init = nouveau_stub_init; engine->fb.takedown = nouveau_stub_takedown; @@ -332,7 +340,12 @@ static void nouveau_card_takedown(struct drm_device *dev) engine->fb.takedown(dev); engine->timer.takedown(dev); engine->mc.takedown(dev); + + nouveau_sgdma_nottm_hack_takedown(dev); + nouveau_sgdma_takedown(dev); + nouveau_gpuobj_takedown(dev); + nouveau_mem_close(dev); engine->instmem.takedown(dev); @@ -442,7 +455,7 @@ int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) getparam.value=dev_priv->fb_phys; break; case NOUVEAU_GETPARAM_AGP_PHYSICAL: - getparam.value=dev_priv->agp_phys; + getparam.value=dev_priv->gart_info.aper_base; break; case NOUVEAU_GETPARAM_PCI_PHYSICAL: if ( dev -> sg ) @@ -457,7 +470,7 @@ int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) getparam.value=dev_priv->fb_available_size; break; case NOUVEAU_GETPARAM_AGP_SIZE: - getparam.value=dev_priv->agp_available_size; + getparam.value=dev_priv->gart_info.aper_size; break; default: DRM_ERROR("unknown parameter %lld\n", getparam.param); @@ -509,16 +522,31 @@ int nouveau_ioctl_setparam(DRM_IOCTL_ARGS) void nouveau_wait_for_idle(struct drm_device *dev) { struct drm_nouveau_private *dev_priv=dev->dev_private; - switch(dev_priv->card_type) - { - case NV_03: - while(NV_READ(NV03_PGRAPH_STATUS)); - break; - case NV_50: - break; - default: - while(NV_READ(NV04_PGRAPH_STATUS)); - break; + switch(dev_priv->card_type) { + case NV_03: + while (NV_READ(NV03_PGRAPH_STATUS)); + break; + case NV_50: + break; + default: { + /* This stuff is more or less a copy of what is seen + * in nv28 kmmio dump. + */ + uint64_t started = dev_priv->Engine.timer.read(dev); + uint64_t stopped = started; + uint32_t status; + do { + uint32_t pmc_e = NV_READ(NV03_PMC_ENABLE); + status = NV_READ(NV04_PGRAPH_STATUS); + if (!status) + break; + stopped = dev_priv->Engine.timer.read(dev); + /* It'll never wrap anyway... */ + } while (stopped - started < 1000000000ULL); + if (status) + DRM_ERROR("timed out with status 0x%08x\n", + status); + } } } diff --git a/shared-core/nv04_instmem.c b/shared-core/nv04_instmem.c index fc3b116d..7cf06269 100644 --- a/shared-core/nv04_instmem.c +++ b/shared-core/nv04_instmem.c @@ -70,6 +70,7 @@ nv04_instmem_configure_fixed_tables(struct drm_device *dev) case NV_30: case NV_20: case NV_17: + case NV_11: case NV_10: case NV_04: case NV_03: diff --git a/shared-core/nv04_timer.c b/shared-core/nv04_timer.c index efe78da7..08a27f4f 100644 --- a/shared-core/nv04_timer.c +++ b/shared-core/nv04_timer.c @@ -17,6 +17,27 @@ nv04_timer_init(struct drm_device *dev) return 0; } +uint64_t +nv04_timer_read(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t low; + /* From kmmio dumps on nv28 this looks like how the blob does this. + * It reads the high dword twice, before and after. + * The only explanation seems to be that the 64-bit timer counter + * advances between high and low dword reads and may corrupt the + * result. Not confirmed. + */ + uint32_t high2 = NV_READ(NV04_PTIMER_TIME_1); + uint32_t high1; + do { + high1 = high2; + low = NV_READ(NV04_PTIMER_TIME_0); + high2 = NV_READ(NV04_PTIMER_TIME_1); + } while(high1 != high2); + return (((uint64_t)high2) << 32) | (uint64_t)low; +} + void nv04_timer_takedown(struct drm_device *dev) { diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index e6aa1e2a..8af3bd12 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -242,5 +242,8 @@ int nv20_graph_init(struct drm_device *dev) { void nv20_graph_takedown(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + + nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table); } diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index 23e0f7f0..d7138772 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -281,5 +281,8 @@ int nv30_graph_init(struct drm_device *dev) void nv30_graph_takedown(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + + nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table); } diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c index feab24c4..4933bbf3 100644 --- a/shared-core/nv50_fifo.c +++ b/shared-core/nv50_fifo.c @@ -263,7 +263,7 @@ nv50_fifo_create_context(struct drm_device *dev, int channel) INSTANCE_WR(ramfc, 0x54/4, 0x000f0000); INSTANCE_WR(ramfc, 0x7c/4, 0x30000001); INSTANCE_WR(ramfc, 0x78/4, 0x00000000); - INSTANCE_WR(ramfc, 0x4c/4, 0x00007fff); + INSTANCE_WR(ramfc, 0x4c/4, chan->pushbuf_mem->size - 1); if (!IS_G80) { INSTANCE_WR(chan->ramin->gpuobj, 0, channel); diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c index 54fe498b..6a04c158 100644 --- a/shared-core/nv50_graph.c +++ b/shared-core/nv50_graph.c @@ -271,7 +271,7 @@ nv50_graph_load_context(struct drm_device *dev, int channel) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31)); - int ret; + int ret; (void)ret; DRM_DEBUG("ch%d\n", channel); diff --git a/shared-core/r128_cce.c b/shared-core/r128_cce.c index a2ee18b7..167fc070 100644 --- a/shared-core/r128_cce.c +++ b/shared-core/r128_cce.c @@ -81,7 +81,7 @@ static u32 r128_cce_microcode[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; -static int R128_READ_PLL(drm_device_t * dev, int addr) +static int R128_READ_PLL(struct drm_device * dev, int addr) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -271,7 +271,7 @@ static void r128_do_cce_stop(drm_r128_private_t * dev_priv) /* Reset the engine. This will stop the CCE if it is running. */ -static int r128_do_engine_reset(drm_device_t * dev) +static int r128_do_engine_reset(struct drm_device * dev) { drm_r128_private_t *dev_priv = dev->dev_private; u32 clock_cntl_index, mclk_cntl, gen_reset_cntl; @@ -308,7 +308,7 @@ static int r128_do_engine_reset(drm_device_t * dev) return 0; } -static void r128_cce_init_ring_buffer(drm_device_t * dev, +static void r128_cce_init_ring_buffer(struct drm_device * dev, drm_r128_private_t * dev_priv) { u32 ring_start; @@ -347,7 +347,7 @@ static void r128_cce_init_ring_buffer(drm_device_t * dev, R128_WRITE(R128_BUS_CNTL, tmp); } -static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init) +static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) { drm_r128_private_t *dev_priv; @@ -584,7 +584,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init) return 0; } -int r128_do_cleanup_cce(drm_device_t * dev) +int r128_do_cleanup_cce(struct drm_device * dev) { /* Make sure interrupts are disabled here because the uninstall ioctl @@ -767,11 +767,11 @@ int r128_fullscreen(DRM_IOCTL_ARGS) #define R128_BUFFER_FREE 0 #if 0 -static int r128_freelist_init(drm_device_t * dev) +static int r128_freelist_init(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_r128_private_t *dev_priv = dev->dev_private; - drm_buf_t *buf; + struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_freelist_t *entry; int i; @@ -813,12 +813,12 @@ static int r128_freelist_init(drm_device_t * dev) } #endif -static drm_buf_t *r128_freelist_get(drm_device_t * dev) +static struct drm_buf *r128_freelist_get(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_buf_priv_t *buf_priv; - drm_buf_t *buf; + struct drm_buf *buf; int i, t; /* FIXME: Optimize -- use freelist code */ @@ -851,13 +851,13 @@ static drm_buf_t *r128_freelist_get(drm_device_t * dev) return NULL; } -void r128_freelist_reset(drm_device_t * dev) +void r128_freelist_reset(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_r128_buf_priv_t *buf_priv = buf->dev_private; buf_priv->age = 0; } @@ -884,10 +884,10 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n) return DRM_ERR(EBUSY); } -static int r128_cce_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d) +static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) { int i; - drm_buf_t *buf; + struct drm_buf *buf; for (i = d->granted_count; i < d->request_count; i++) { buf = r128_freelist_get(dev); @@ -911,10 +911,10 @@ static int r128_cce_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d) int r128_cce_buffers(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int ret = 0; - drm_dma_t __user *argp = (void __user *)data; - drm_dma_t d; + struct drm_dma __user *argp = (void __user *)data; + struct drm_dma d; LOCK_TEST_WITH_RETURN(dev, filp); diff --git a/shared-core/r128_drm.h b/shared-core/r128_drm.h index 6e8af313..e94a39c6 100644 --- a/shared-core/r128_drm.h +++ b/shared-core/r128_drm.h @@ -153,7 +153,7 @@ typedef struct drm_r128_sarea { /* The current cliprects, or a subset thereof. */ - drm_clip_rect_t boxes[R128_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS]; unsigned int nbox; /* Counters for client-side throttling of rendering clients. @@ -161,7 +161,7 @@ typedef struct drm_r128_sarea { unsigned int last_frame; unsigned int last_dispatch; - drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1]; + struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1]; unsigned int tex_age[R128_NR_TEX_HEAPS]; int ctx_owner; int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */ diff --git a/shared-core/r128_drv.h b/shared-core/r128_drv.h index 90868356..c9abd67b 100644 --- a/shared-core/r128_drv.h +++ b/shared-core/r128_drv.h @@ -57,7 +57,7 @@ typedef struct drm_r128_freelist { unsigned int age; - drm_buf_t *buf; + struct drm_buf *buf; struct drm_r128_freelist *next; struct drm_r128_freelist *prev; } drm_r128_freelist_t; @@ -118,7 +118,7 @@ typedef struct drm_r128_private { drm_local_map_t *cce_ring; drm_local_map_t *ring_rptr; drm_local_map_t *agp_textures; - drm_ati_pcigart_info gart_info; + struct ati_pcigart_info gart_info; } drm_r128_private_t; typedef struct drm_r128_buf_priv { @@ -129,7 +129,7 @@ typedef struct drm_r128_buf_priv { drm_r128_freelist_t *list_entry; } drm_r128_buf_priv_t; -extern drm_ioctl_desc_t r128_ioctls[]; +extern struct drm_ioctl_desc r128_ioctls[]; extern int r128_max_ioctl; /* r128_cce.c */ @@ -142,21 +142,21 @@ extern int r128_engine_reset(DRM_IOCTL_ARGS); extern int r128_fullscreen(DRM_IOCTL_ARGS); extern int r128_cce_buffers(DRM_IOCTL_ARGS); -extern void r128_freelist_reset(drm_device_t * dev); +extern void r128_freelist_reset(struct drm_device * dev); extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n); extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); -extern int r128_do_cleanup_cce(drm_device_t * dev); +extern int r128_do_cleanup_cce(struct drm_device * dev); -extern int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence); +extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); -extern void r128_driver_irq_preinstall(drm_device_t * dev); -extern void r128_driver_irq_postinstall(drm_device_t * dev); -extern void r128_driver_irq_uninstall(drm_device_t * dev); -extern void r128_driver_lastclose(drm_device_t * dev); -extern void r128_driver_preclose(drm_device_t * dev, DRMFILE filp); +extern void r128_driver_irq_preinstall(struct drm_device * dev); +extern void r128_driver_irq_postinstall(struct drm_device * dev); +extern void r128_driver_irq_uninstall(struct drm_device * dev); +extern void r128_driver_lastclose(struct drm_device * dev); +extern void r128_driver_preclose(struct drm_device * dev, DRMFILE filp); extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/shared-core/r128_irq.c b/shared-core/r128_irq.c index 87f8ca2b..c76fdca7 100644 --- a/shared-core/r128_irq.c +++ b/shared-core/r128_irq.c @@ -37,7 +37,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) { - drm_device_t *dev = (drm_device_t *) arg; + struct drm_device *dev = (struct drm_device *) arg; drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; int status; @@ -54,7 +54,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) return IRQ_NONE; } -int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) +int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) { unsigned int cur_vblank; int ret = 0; @@ -72,7 +72,7 @@ int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) return ret; } -void r128_driver_irq_preinstall(drm_device_t * dev) +void r128_driver_irq_preinstall(struct drm_device * dev) { drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; @@ -82,7 +82,7 @@ void r128_driver_irq_preinstall(drm_device_t * dev) R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); } -void r128_driver_irq_postinstall(drm_device_t * dev) +void r128_driver_irq_postinstall(struct drm_device * dev) { drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; @@ -90,7 +90,7 @@ void r128_driver_irq_postinstall(drm_device_t * dev) R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN); } -void r128_driver_irq_uninstall(drm_device_t * dev) +void r128_driver_irq_uninstall(struct drm_device * dev) { drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; if (!dev_priv) diff --git a/shared-core/r128_state.c b/shared-core/r128_state.c index 17b11e7d..b793d94b 100644 --- a/shared-core/r128_state.c +++ b/shared-core/r128_state.c @@ -38,7 +38,7 @@ */ static void r128_emit_clip_rects(drm_r128_private_t * dev_priv, - drm_clip_rect_t * boxes, int count) + struct drm_clip_rect * boxes, int count) { u32 aux_sc_cntl = 0x00000000; RING_LOCALS; @@ -352,13 +352,13 @@ static void r128_print_dirty(const char *msg, unsigned int flags) (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : ""); } -static void r128_cce_dispatch_clear(drm_device_t * dev, +static void r128_cce_dispatch_clear(struct drm_device * dev, drm_r128_clear_t * clear) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; unsigned int flags = clear->flags; int i; RING_LOCALS; @@ -458,12 +458,12 @@ static void r128_cce_dispatch_clear(drm_device_t * dev, } } -static void r128_cce_dispatch_swap(drm_device_t * dev) +static void r128_cce_dispatch_swap(struct drm_device * dev) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int i; RING_LOCALS; DRM_DEBUG("%s\n", __FUNCTION__); @@ -524,7 +524,7 @@ static void r128_cce_dispatch_swap(drm_device_t * dev) ADVANCE_RING(); } -static void r128_cce_dispatch_flip(drm_device_t * dev) +static void r128_cce_dispatch_flip(struct drm_device * dev) { drm_r128_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -567,7 +567,7 @@ static void r128_cce_dispatch_flip(drm_device_t * dev) ADVANCE_RING(); } -static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf) +static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_buf_priv_t *buf_priv = buf->dev_private; @@ -637,8 +637,8 @@ static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf) sarea_priv->nbox = 0; } -static void r128_cce_dispatch_indirect(drm_device_t * dev, - drm_buf_t * buf, int start, int end) +static void r128_cce_dispatch_indirect(struct drm_device * dev, + struct drm_buf * buf, int start, int end) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_buf_priv_t *buf_priv = buf->dev_private; @@ -692,8 +692,8 @@ static void r128_cce_dispatch_indirect(drm_device_t * dev, dev_priv->sarea_priv->last_dispatch++; } -static void r128_cce_dispatch_indices(drm_device_t * dev, - drm_buf_t * buf, +static void r128_cce_dispatch_indices(struct drm_device * dev, + struct drm_buf * buf, int start, int end, int count) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -777,11 +777,11 @@ static void r128_cce_dispatch_indices(drm_device_t * dev, } static int r128_cce_dispatch_blit(DRMFILE filp, - drm_device_t * dev, drm_r128_blit_t * blit) + struct drm_device * dev, drm_r128_blit_t * blit) { drm_r128_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; u32 *data; int dword_shift, dwords; @@ -887,7 +887,7 @@ static int r128_cce_dispatch_blit(DRMFILE filp, * have hardware stencil support. */ -static int r128_cce_dispatch_write_span(drm_device_t * dev, +static int r128_cce_dispatch_write_span(struct drm_device * dev, drm_r128_depth_t * depth) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -983,7 +983,7 @@ static int r128_cce_dispatch_write_span(drm_device_t * dev, return 0; } -static int r128_cce_dispatch_write_pixels(drm_device_t * dev, +static int r128_cce_dispatch_write_pixels(struct drm_device * dev, drm_r128_depth_t * depth) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -1105,7 +1105,7 @@ static int r128_cce_dispatch_write_pixels(drm_device_t * dev, return 0; } -static int r128_cce_dispatch_read_span(drm_device_t * dev, +static int r128_cce_dispatch_read_span(struct drm_device * dev, drm_r128_depth_t * depth) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -1148,7 +1148,7 @@ static int r128_cce_dispatch_read_span(drm_device_t * dev, return 0; } -static int r128_cce_dispatch_read_pixels(drm_device_t * dev, +static int r128_cce_dispatch_read_pixels(struct drm_device * dev, drm_r128_depth_t * depth) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -1220,7 +1220,7 @@ static int r128_cce_dispatch_read_pixels(drm_device_t * dev, * Polygon stipple */ -static void r128_cce_dispatch_stipple(drm_device_t * dev, u32 * stipple) +static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple) { drm_r128_private_t *dev_priv = dev->dev_private; int i; @@ -1269,7 +1269,7 @@ static int r128_cce_clear(DRM_IOCTL_ARGS) return 0; } -static int r128_do_init_pageflip(drm_device_t * dev) +static int r128_do_init_pageflip(struct drm_device * dev) { drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -1288,7 +1288,7 @@ static int r128_do_init_pageflip(drm_device_t * dev) return 0; } -static int r128_do_cleanup_pageflip(drm_device_t * dev) +static int r128_do_cleanup_pageflip(struct drm_device * dev) { drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -1354,8 +1354,8 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_vertex_t vertex; @@ -1413,8 +1413,8 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_indices_t elts; int count; @@ -1483,7 +1483,7 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) static int r128_cce_blit(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_blit_t blit; int ret; @@ -1571,8 +1571,8 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_r128_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_indirect_t indirect; #if 0 @@ -1675,7 +1675,7 @@ static int r128_getparam(DRM_IOCTL_ARGS) return 0; } -void r128_driver_preclose(drm_device_t * dev, DRMFILE filp) +void r128_driver_preclose(struct drm_device * dev, DRMFILE filp) { if (dev->dev_private) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -1685,12 +1685,12 @@ void r128_driver_preclose(drm_device_t * dev, DRMFILE filp) } } -void r128_driver_lastclose(drm_device_t * dev) +void r128_driver_lastclose(struct drm_device * dev) { r128_do_cleanup_cce(dev); } -drm_ioctl_desc_t r128_ioctls[] = { +struct drm_ioctl_desc r128_ioctls[] = { [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c index 0cd5d7e2..9cf352ae 100644 --- a/shared-core/r300_cmdbuf.c +++ b/shared-core/r300_cmdbuf.c @@ -55,7 +55,7 @@ static const int r300_cliprect_cntl[4] = { static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, drm_radeon_kcmd_buffer_t *cmdbuf, int n) { - drm_clip_rect_t box; + struct drm_clip_rect box; int nr; int i; RING_LOCALS; @@ -706,7 +706,7 @@ static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must * be careful about how this function is called. */ -static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf) +static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; @@ -778,14 +778,14 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, * commands on the DMA ring buffer. * Called by the ioctl handler function radeon_cp_cmdbuf. */ -int r300_do_cp_cmdbuf(drm_device_t *dev, +int r300_do_cp_cmdbuf(struct drm_device *dev, DRMFILE filp, - drm_file_t *filp_priv, + struct drm_file *filp_priv, drm_radeon_kcmd_buffer_t *cmdbuf) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf = NULL; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf = NULL; int emit_dispatch_age = 0; int ret = 0; diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c index ec2e688b..40a20e6c 100644 --- a/shared-core/radeon_cp.c +++ b/shared-core/radeon_cp.c @@ -36,7 +36,7 @@ #define RADEON_FIFO_DEBUG 0 -static int radeon_do_cleanup_cp(drm_device_t * dev); +static int radeon_do_cleanup_cp(struct drm_device * dev); /* CP microcode (from ATI) */ static const u32 R200_cp_microcode[][2] = { @@ -816,7 +816,7 @@ static const u32 R300_cp_microcode[][2] = { { 0000000000, 0000000000 }, }; -static int RADEON_READ_PLL(drm_device_t * dev, int addr) +static int RADEON_READ_PLL(struct drm_device * dev, int addr) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -1066,7 +1066,7 @@ static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv) /* Reset the engine. This will stop the CP if it is running. */ -static int radeon_do_engine_reset(drm_device_t * dev) +static int radeon_do_engine_reset(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset; @@ -1122,7 +1122,7 @@ static int radeon_do_engine_reset(drm_device_t * dev) return 0; } -static void radeon_cp_init_ring_buffer(drm_device_t * dev, +static void radeon_cp_init_ring_buffer(struct drm_device * dev, drm_radeon_private_t * dev_priv) { u32 ring_start, cur_read_ptr; @@ -1174,7 +1174,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev, } else #endif { - drm_sg_mem_t *entry = dev->sg; + struct drm_sg_mem *entry = dev->sg; unsigned long tmp_ofs, page_ofs; tmp_ofs = dev_priv->ring_rptr->offset - @@ -1390,7 +1390,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) } } -static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) +static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -1750,7 +1750,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) return 0; } -static int radeon_do_cleanup_cp(drm_device_t * dev) +static int radeon_do_cleanup_cp(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -1806,7 +1806,7 @@ static int radeon_do_cleanup_cp(drm_device_t * dev) * * Charl P. Botha <http://cpbotha.net> */ -static int radeon_do_resume_cp(drm_device_t * dev) +static int radeon_do_resume_cp(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -1933,7 +1933,7 @@ int radeon_cp_stop(DRM_IOCTL_ARGS) return 0; } -void radeon_do_release(drm_device_t * dev) +void radeon_do_release(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; int i, ret; @@ -2066,12 +2066,12 @@ int radeon_fullscreen(DRM_IOCTL_ARGS) * they can't get the lock. */ -drm_buf_t *radeon_freelist_get(drm_device_t * dev) +struct drm_buf *radeon_freelist_get(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv; - drm_buf_t *buf; + struct drm_buf *buf; int i, t; int start; @@ -2106,12 +2106,12 @@ drm_buf_t *radeon_freelist_get(drm_device_t * dev) } #if 0 -drm_buf_t *radeon_freelist_get(drm_device_t * dev) +struct drm_buf *radeon_freelist_get(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv; - drm_buf_t *buf; + struct drm_buf *buf; int i, t; int start; u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)); @@ -2140,15 +2140,15 @@ drm_buf_t *radeon_freelist_get(drm_device_t * dev) } #endif -void radeon_freelist_reset(drm_device_t * dev) +void radeon_freelist_reset(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; int i; dev_priv->last_buf = 0; for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; buf_priv->age = 0; } @@ -2190,11 +2190,11 @@ int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n) return DRM_ERR(EBUSY); } -static int radeon_cp_get_buffers(DRMFILE filp, drm_device_t * dev, - drm_dma_t * d) +static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev, + struct drm_dma * d) { int i; - drm_buf_t *buf; + struct drm_buf *buf; for (i = d->granted_count; i < d->request_count; i++) { buf = radeon_freelist_get(dev); @@ -2218,10 +2218,10 @@ static int radeon_cp_get_buffers(DRMFILE filp, drm_device_t * dev, int radeon_cp_buffers(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int ret = 0; - drm_dma_t __user *argp = (void __user *)data; - drm_dma_t d; + struct drm_dma __user *argp = (void __user *)data; + struct drm_dma d; LOCK_TEST_WITH_RETURN(dev, filp); diff --git a/shared-core/radeon_drm.h b/shared-core/radeon_drm.h index 6a57b804..b0ef702b 100644 --- a/shared-core/radeon_drm.h +++ b/shared-core/radeon_drm.h @@ -417,7 +417,7 @@ typedef struct { /* The current cliprects, or a subset thereof. */ - drm_clip_rect_t boxes[RADEON_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS]; unsigned int nbox; /* Counters for client-side throttling of rendering clients. @@ -426,7 +426,7 @@ typedef struct { unsigned int last_dispatch; unsigned int last_clear; - drm_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS + + struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS + 1]; unsigned int tex_age[RADEON_NR_TEX_HEAPS]; int ctx_owner; @@ -604,7 +604,7 @@ typedef struct drm_radeon_cmd_buffer { int bufsz; char __user *buf; int nbox; - drm_clip_rect_t __user *boxes; + struct drm_clip_rect __user *boxes; } drm_radeon_cmd_buffer_t; typedef struct drm_radeon_tex_image { diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h index 92a9b65e..2dca1e70 100644 --- a/shared-core/radeon_drv.h +++ b/shared-core/radeon_drv.h @@ -156,7 +156,7 @@ enum radeon_chip_flags { typedef struct drm_radeon_freelist { unsigned int age; - drm_buf_t *buf; + struct drm_buf *buf; struct drm_radeon_freelist *next; struct drm_radeon_freelist *prev; } drm_radeon_freelist_t; @@ -295,7 +295,7 @@ typedef struct drm_radeon_private { unsigned long pcigart_offset; unsigned int pcigart_offset_set; - drm_ati_pcigart_info gart_info; + struct ati_pcigart_info gart_info; u32 scratch_ages[5]; @@ -312,11 +312,11 @@ typedef struct drm_radeon_kcmd_buffer { int bufsz; char *buf; int nbox; - drm_clip_rect_t __user *boxes; + struct drm_clip_rect __user *boxes; } drm_radeon_kcmd_buffer_t; extern int radeon_no_wb; -extern drm_ioctl_desc_t radeon_ioctls[]; +extern struct drm_ioctl_desc radeon_ioctls[]; extern int radeon_max_ioctl; /* Check whether the given hardware address is inside the framebuffer or the @@ -345,8 +345,8 @@ extern int radeon_engine_reset(DRM_IOCTL_ARGS); extern int radeon_fullscreen(DRM_IOCTL_ARGS); extern int radeon_cp_buffers(DRM_IOCTL_ARGS); -extern void radeon_freelist_reset(drm_device_t * dev); -extern drm_buf_t *radeon_freelist_get(drm_device_t * dev); +extern void radeon_freelist_reset(struct drm_device * dev); +extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n); @@ -362,33 +362,33 @@ extern void radeon_mem_release(DRMFILE filp, struct mem_block *heap); extern int radeon_irq_emit(DRM_IOCTL_ARGS); extern int radeon_irq_wait(DRM_IOCTL_ARGS); -extern void radeon_do_release(drm_device_t * dev); -extern int radeon_driver_vblank_wait(drm_device_t * dev, +extern void radeon_do_release(struct drm_device * dev); +extern int radeon_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); -extern int radeon_driver_vblank_wait2(drm_device_t * dev, +extern int radeon_driver_vblank_wait2(struct drm_device * dev, unsigned int *sequence); extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); -extern void radeon_driver_irq_preinstall(drm_device_t * dev); -extern void radeon_driver_irq_postinstall(drm_device_t * dev); -extern void radeon_driver_irq_uninstall(drm_device_t * dev); -extern int radeon_vblank_crtc_get(drm_device_t *dev); -extern int radeon_vblank_crtc_set(drm_device_t *dev, int64_t value); +extern void radeon_driver_irq_preinstall(struct drm_device * dev); +extern void radeon_driver_irq_postinstall(struct drm_device * dev); +extern void radeon_driver_irq_uninstall(struct drm_device * dev); +extern int radeon_vblank_crtc_get(struct drm_device *dev); +extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value); extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); extern int radeon_driver_unload(struct drm_device *dev); extern int radeon_driver_firstopen(struct drm_device *dev); -extern void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp); -extern void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp); -extern void radeon_driver_lastclose(drm_device_t * dev); -extern int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv); +extern void radeon_driver_preclose(struct drm_device * dev, DRMFILE filp); +extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp); +extern void radeon_driver_lastclose(struct drm_device * dev); +extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv); extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); /* r300_cmdbuf.c */ extern void r300_init_reg_flags(void); -extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp, - drm_file_t* filp_priv, +extern int r300_do_cp_cmdbuf(struct drm_device *dev, DRMFILE filp, + struct drm_file* filp_priv, drm_radeon_kcmd_buffer_t* cmdbuf); /* Flags for stats.boxes diff --git a/shared-core/radeon_irq.c b/shared-core/radeon_irq.c index a4be86e3..ad8a0ac7 100644 --- a/shared-core/radeon_irq.c +++ b/shared-core/radeon_irq.c @@ -64,7 +64,7 @@ static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv, irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS) { - drm_device_t *dev = (drm_device_t *) arg; + struct drm_device *dev = (struct drm_device *) arg; drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; u32 stat; @@ -109,7 +109,7 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS) return IRQ_HANDLED; } -static int radeon_emit_irq(drm_device_t * dev) +static int radeon_emit_irq(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; unsigned int ret; @@ -127,7 +127,7 @@ static int radeon_emit_irq(drm_device_t * dev) return ret; } -static int radeon_wait_irq(drm_device_t * dev, int swi_nr) +static int radeon_wait_irq(struct drm_device * dev, int swi_nr) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; @@ -144,7 +144,7 @@ static int radeon_wait_irq(drm_device_t * dev, int swi_nr) return ret; } -int radeon_driver_vblank_do_wait(drm_device_t * dev, unsigned int *sequence, +int radeon_driver_vblank_do_wait(struct drm_device * dev, unsigned int *sequence, int crtc) { drm_radeon_private_t *dev_priv = @@ -184,12 +184,12 @@ int radeon_driver_vblank_do_wait(drm_device_t * dev, unsigned int *sequence, return ret; } -int radeon_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence) +int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence) { return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1); } -int radeon_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence) +int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence) { return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2); } @@ -242,7 +242,7 @@ int radeon_irq_wait(DRM_IOCTL_ARGS) return radeon_wait_irq(dev, irqwait.irq_seq); } -static void radeon_enable_interrupt(drm_device_t *dev) +static void radeon_enable_interrupt(struct drm_device *dev) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; @@ -259,7 +259,7 @@ static void radeon_enable_interrupt(drm_device_t *dev) /* drm_dma.h hooks */ -void radeon_driver_irq_preinstall(drm_device_t * dev) +void radeon_driver_irq_preinstall(struct drm_device * dev) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; @@ -273,7 +273,7 @@ void radeon_driver_irq_preinstall(drm_device_t * dev) RADEON_CRTC2_VBLANK_STAT)); } -void radeon_driver_irq_postinstall(drm_device_t * dev) +void radeon_driver_irq_postinstall(struct drm_device * dev) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; @@ -284,7 +284,7 @@ void radeon_driver_irq_postinstall(drm_device_t * dev) radeon_enable_interrupt(dev); } -void radeon_driver_irq_uninstall(drm_device_t * dev) +void radeon_driver_irq_uninstall(struct drm_device * dev) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; @@ -298,7 +298,7 @@ void radeon_driver_irq_uninstall(drm_device_t * dev) } -int radeon_vblank_crtc_get(drm_device_t *dev) +int radeon_vblank_crtc_get(struct drm_device *dev) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; u32 flag; @@ -315,7 +315,7 @@ int radeon_vblank_crtc_get(drm_device_t *dev) return value; } -int radeon_vblank_crtc_set(drm_device_t *dev, int64_t value) +int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c index 8ccd0981..13b09d44 100644 --- a/shared-core/radeon_state.c +++ b/shared-core/radeon_state.c @@ -39,7 +39,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * dev_priv, - drm_file_t * filp_priv, + struct drm_file * filp_priv, u32 * offset) { u64 off = *offset; @@ -90,7 +90,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * dev_priv, - drm_file_t * filp_priv, + struct drm_file * filp_priv, int id, u32 *data) { switch (id) { @@ -264,7 +264,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * dev_priv, - drm_file_t *filp_priv, + struct drm_file *filp_priv, drm_radeon_kcmd_buffer_t * cmdbuf, unsigned int *cmdsz) @@ -421,7 +421,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * */ static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv, - drm_clip_rect_t * box) + struct drm_clip_rect * box) { RING_LOCALS; @@ -439,7 +439,7 @@ static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv, /* Emit 1.1 state */ static int radeon_emit_state(drm_radeon_private_t * dev_priv, - drm_file_t * filp_priv, + struct drm_file * filp_priv, drm_radeon_context_regs_t * ctx, drm_radeon_texture_regs_t * tex, unsigned int dirty) @@ -608,7 +608,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, /* Emit 1.2 state */ static int radeon_emit_state2(drm_radeon_private_t * dev_priv, - drm_file_t * filp_priv, + struct drm_file * filp_priv, drm_radeon_state_t * state) { RING_LOCALS; @@ -844,7 +844,7 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) * CP command dispatch functions */ -static void radeon_cp_dispatch_clear(drm_device_t * dev, +static void radeon_cp_dispatch_clear(struct drm_device * dev, drm_radeon_clear_t * clear, drm_radeon_clear_rect_t * depth_boxes) { @@ -852,7 +852,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev, drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; unsigned int flags = clear->flags; u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0; int i; @@ -1335,12 +1335,12 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev, ADVANCE_RING(); } -static void radeon_cp_dispatch_swap(drm_device_t * dev) +static void radeon_cp_dispatch_swap(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int i; RING_LOCALS; DRM_DEBUG("\n"); @@ -1412,10 +1412,10 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev) ADVANCE_RING(); } -static void radeon_cp_dispatch_flip(drm_device_t * dev) +static void radeon_cp_dispatch_flip(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_sarea_t *sarea = (drm_sarea_t *) dev_priv->sarea->handle; + struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle; int offset = (dev_priv->sarea_priv->pfCurrentPage == 1) ? dev_priv->front_offset : dev_priv->back_offset; RING_LOCALS; @@ -1491,8 +1491,8 @@ typedef struct { unsigned int vc_format; } drm_radeon_tcl_prim_t; -static void radeon_cp_dispatch_vertex(drm_device_t * dev, - drm_buf_t * buf, +static void radeon_cp_dispatch_vertex(struct drm_device * dev, + struct drm_buf * buf, drm_radeon_tcl_prim_t * prim) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -1537,7 +1537,7 @@ static void radeon_cp_dispatch_vertex(drm_device_t * dev, } while (i < nbox); } -static void radeon_cp_discard_buffer(drm_device_t * dev, drm_buf_t * buf) +static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; @@ -1554,8 +1554,8 @@ static void radeon_cp_discard_buffer(drm_device_t * dev, drm_buf_t * buf) buf->used = 0; } -static void radeon_cp_dispatch_indirect(drm_device_t * dev, - drm_buf_t * buf, int start, int end) +static void radeon_cp_dispatch_indirect(struct drm_device * dev, + struct drm_buf * buf, int start, int end) { drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -1588,8 +1588,8 @@ static void radeon_cp_dispatch_indirect(drm_device_t * dev, } } -static void radeon_cp_dispatch_indices(drm_device_t * dev, - drm_buf_t * elt_buf, +static void radeon_cp_dispatch_indices(struct drm_device * dev, + struct drm_buf * elt_buf, drm_radeon_tcl_prim_t * prim) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -1647,13 +1647,13 @@ static void radeon_cp_dispatch_indices(drm_device_t * dev, #define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE static int radeon_cp_dispatch_texture(DRMFILE filp, - drm_device_t * dev, + struct drm_device * dev, drm_radeon_texture_t * tex, drm_radeon_tex_image_t * image) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_file_t *filp_priv; - drm_buf_t *buf; + struct drm_file *filp_priv; + struct drm_buf *buf; u32 format; u32 *buffer; const u8 __user *data; @@ -1881,7 +1881,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, return 0; } -static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple) +static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple) { drm_radeon_private_t *dev_priv = dev->dev_private; int i; @@ -2144,7 +2144,7 @@ static int radeon_cp_clear(DRM_IOCTL_ARGS) /* Not sure why this isn't set all the time: */ -static int radeon_do_init_pageflip(drm_device_t * dev) +static int radeon_do_init_pageflip(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -2216,10 +2216,10 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_file_t *filp_priv; + struct drm_file *filp_priv; drm_radeon_sarea_t *sarea_priv; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_radeon_vertex_t vertex; drm_radeon_tcl_prim_t prim; @@ -2306,10 +2306,10 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_file_t *filp_priv; + struct drm_file *filp_priv; drm_radeon_sarea_t *sarea_priv; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_radeon_indices_t elts; drm_radeon_tcl_prim_t prim; int count; @@ -2461,8 +2461,8 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_radeon_indirect_t indirect; RING_LOCALS; @@ -2535,10 +2535,10 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_file_t *filp_priv; + struct drm_file *filp_priv; drm_radeon_sarea_t *sarea_priv; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_radeon_vertex2_t vertex; int i; unsigned char laststate; @@ -2638,7 +2638,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) } static int radeon_emit_packets(drm_radeon_private_t * dev_priv, - drm_file_t * filp_priv, + struct drm_file * filp_priv, drm_radeon_cmd_header_t header, drm_radeon_kcmd_buffer_t *cmdbuf) { @@ -2763,8 +2763,8 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv, return 0; } -static int radeon_emit_packet3(drm_device_t * dev, - drm_file_t * filp_priv, +static int radeon_emit_packet3(struct drm_device * dev, + struct drm_file * filp_priv, drm_radeon_kcmd_buffer_t *cmdbuf) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -2789,16 +2789,16 @@ static int radeon_emit_packet3(drm_device_t * dev, return 0; } -static int radeon_emit_packet3_cliprect(drm_device_t *dev, - drm_file_t *filp_priv, +static int radeon_emit_packet3_cliprect(struct drm_device *dev, + struct drm_file *filp_priv, drm_radeon_kcmd_buffer_t *cmdbuf, int orig_nbox) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_clip_rect_t box; + struct drm_clip_rect box; unsigned int cmdsz; int ret; - drm_clip_rect_t __user *boxes = cmdbuf->boxes; + struct drm_clip_rect __user *boxes = cmdbuf->boxes; int i = 0; RING_LOCALS; @@ -2851,7 +2851,7 @@ static int radeon_emit_packet3_cliprect(drm_device_t *dev, return 0; } -static int radeon_emit_wait(drm_device_t * dev, int flags) +static int radeon_emit_wait(struct drm_device * dev, int flags) { drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -2884,9 +2884,9 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_file_t *filp_priv; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf = NULL; + struct drm_file *filp_priv; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf = NULL; int idx; drm_radeon_kcmd_buffer_t cmdbuf; drm_radeon_cmd_header_t header; @@ -3151,7 +3151,7 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_file_t *filp_priv; + struct drm_file *filp_priv; drm_radeon_setparam_t sp; struct drm_radeon_driver_file_fields *radeon_priv; @@ -3213,7 +3213,7 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) * * DRM infrastructure takes care of reclaiming dma buffers. */ -void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp) +void radeon_driver_preclose(struct drm_device * dev, DRMFILE filp) { if (dev->dev_private) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -3224,7 +3224,7 @@ void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp) } } -void radeon_driver_lastclose(drm_device_t * dev) +void radeon_driver_lastclose(struct drm_device * dev) { if (dev->dev_private) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -3237,7 +3237,7 @@ void radeon_driver_lastclose(drm_device_t * dev) radeon_do_release(dev); } -int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv) +int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_driver_file_fields *radeon_priv; @@ -3259,7 +3259,7 @@ int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv) return 0; } -void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp_priv) +void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp_priv) { struct drm_radeon_driver_file_fields *radeon_priv = filp_priv->driver_priv; @@ -3267,7 +3267,7 @@ void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp_priv) drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES); } -drm_ioctl_desc_t radeon_ioctls[] = { +struct drm_ioctl_desc radeon_ioctls[] = { [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c index 9a3ae1f1..a3fd8994 100644 --- a/shared-core/savage_bci.c +++ b/shared-core/savage_bci.c @@ -32,7 +32,7 @@ #define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ #define SAVAGE_FREELIST_DEBUG 0 -static int savage_do_cleanup_bci(drm_device_t *dev); +static int savage_do_cleanup_bci(struct drm_device *dev); static int savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n) @@ -203,11 +203,11 @@ uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, /* * Freelist management */ -static int savage_freelist_init(drm_device_t *dev) +static int savage_freelist_init(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; drm_savage_buf_priv_t *entry; int i; DRM_DEBUG("count=%d\n", dma->buf_count); @@ -236,7 +236,7 @@ static int savage_freelist_init(drm_device_t *dev) return 0; } -static drm_buf_t *savage_freelist_get(drm_device_t *dev) +static struct drm_buf *savage_freelist_get(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_buf_priv_t *tail = dev_priv->tail.prev; @@ -269,7 +269,7 @@ static drm_buf_t *savage_freelist_get(drm_device_t *dev) return NULL; } -void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf) +void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next; @@ -535,7 +535,7 @@ static void savage_fake_dma_flush(drm_savage_private_t *dev_priv) dev_priv->first_dma_page = dev_priv->current_dma_page = 0; } -int savage_driver_load(drm_device_t *dev, unsigned long chipset) +int savage_driver_load(struct drm_device *dev, unsigned long chipset) { drm_savage_private_t *dev_priv; @@ -557,7 +557,7 @@ int savage_driver_load(drm_device_t *dev, unsigned long chipset) * in drm_addmap. Therefore we add them manually before the maps are * initialized, and tear them down on last close. */ -int savage_driver_firstopen(drm_device_t *dev) +int savage_driver_firstopen(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; unsigned long mmio_base, fb_base, fb_size, aperture_base; @@ -654,7 +654,7 @@ int savage_driver_firstopen(drm_device_t *dev) /* * Delete MTRRs and free device-private data. */ -void savage_driver_lastclose(drm_device_t *dev) +void savage_driver_lastclose(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; int i; @@ -666,7 +666,7 @@ void savage_driver_lastclose(drm_device_t *dev) dev_priv->mtrr[i].size, DRM_MTRR_WC); } -int savage_driver_unload(drm_device_t *dev) +int savage_driver_unload(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; @@ -675,7 +675,7 @@ int savage_driver_unload(drm_device_t *dev) return 0; } -static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init) +static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init) { drm_savage_private_t *dev_priv = dev->dev_private; @@ -897,7 +897,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init) return 0; } -static int savage_do_cleanup_bci(drm_device_t *dev) +static int savage_do_cleanup_bci(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; @@ -1006,9 +1006,9 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS) * DMA buffer management */ -static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d) +static int savage_bci_get_buffers(DRMFILE filp, struct drm_device *dev, struct drm_dma *d) { - drm_buf_t *buf; + struct drm_buf *buf; int i; for (i = d->granted_count; i < d->request_count; i++) { @@ -1033,13 +1033,13 @@ static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d) int savage_bci_buffers(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; - drm_dma_t d; + struct drm_device_dma *dma = dev->dma; + struct drm_dma d; int ret = 0; LOCK_TEST_WITH_RETURN(dev, filp); - DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *)data, sizeof(d)); + DRM_COPY_FROM_USER_IOCTL(d, (struct drm_dma __user *)data, sizeof(d)); /* Please don't send us buffers. */ @@ -1063,14 +1063,14 @@ int savage_bci_buffers(DRM_IOCTL_ARGS) ret = savage_bci_get_buffers(filp, dev, &d); } - DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *)data, d, sizeof(d)); + DRM_COPY_TO_USER_IOCTL((struct drm_dma __user *)data, d, sizeof(d)); return ret; } -void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) +void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_savage_private_t *dev_priv = dev->dev_private; int i; @@ -1084,7 +1084,7 @@ void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) /*i830_flush_queue(dev);*/ for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_savage_buf_priv_t *buf_priv = buf->dev_private; if (buf->filp == filp && buf_priv && @@ -1100,7 +1100,7 @@ void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) drm_core_reclaim_buffers(dev, filp); } -drm_ioctl_desc_t savage_ioctls[] = { +struct drm_ioctl_desc savage_ioctls[] = { [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, DRM_AUTH}, [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, DRM_AUTH}, diff --git a/shared-core/savage_drm.h b/shared-core/savage_drm.h index 6526c9aa..b960d557 100644 --- a/shared-core/savage_drm.h +++ b/shared-core/savage_drm.h @@ -47,7 +47,7 @@ typedef struct _drm_savage_sarea { /* LRU lists for texture memory in agp space and on the card. */ - drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1]; + struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1]; unsigned int texAge[SAVAGE_NR_TEX_HEAPS]; /* Mechanism to validate card state. @@ -112,7 +112,7 @@ typedef struct drm_savage_cmdbuf { unsigned int vb_size; /* size of client vertex buffer in bytes */ unsigned int vb_stride; /* stride of vertices in 32bit words */ /* boxes in client's address space */ - drm_clip_rect_t __user *box_addr; + struct drm_clip_rect __user *box_addr; unsigned int nbox; /* number of clipping boxes */ } drm_savage_cmdbuf_t; diff --git a/shared-core/savage_drv.h b/shared-core/savage_drv.h index 88c571e1..e9e2231f 100644 --- a/shared-core/savage_drv.h +++ b/shared-core/savage_drv.h @@ -58,7 +58,7 @@ typedef struct drm_savage_buf_priv { struct drm_savage_buf_priv *next; struct drm_savage_buf_priv *prev; drm_savage_age_t age; - drm_buf_t *buf; + struct drm_buf *buf; } drm_savage_buf_priv_t; typedef struct drm_savage_dma_page { @@ -104,7 +104,7 @@ enum savage_family { S3_LAST }; -extern drm_ioctl_desc_t savage_ioctls[]; +extern struct drm_ioctl_desc savage_ioctls[]; extern int savage_max_ioctl; #define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) @@ -192,7 +192,7 @@ typedef struct drm_savage_private { /* Err, there is a macro wait_event in include/linux/wait.h. * Avoid unwanted macro expansion. */ void (*emit_clip_rect)(struct drm_savage_private *dev_priv, - const drm_clip_rect_t *pbox); + const struct drm_clip_rect *pbox); void (*dma_flush)(struct drm_savage_private *dev_priv); } drm_savage_private_t; @@ -203,22 +203,22 @@ extern int savage_bci_buffers(DRM_IOCTL_ARGS); /* BCI functions */ extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, unsigned int flags); -extern void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf); +extern void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf); extern void savage_dma_reset(drm_savage_private_t *dev_priv); extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page); extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n); -extern int savage_driver_load(drm_device_t *dev, unsigned long chipset); -extern int savage_driver_firstopen(drm_device_t *dev); -extern void savage_driver_lastclose(drm_device_t *dev); -extern int savage_driver_unload(drm_device_t *dev); -extern void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp); +extern int savage_driver_load(struct drm_device *dev, unsigned long chipset); +extern int savage_driver_firstopen(struct drm_device *dev); +extern void savage_driver_lastclose(struct drm_device *dev); +extern int savage_driver_unload(struct drm_device *dev); +extern void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp); /* state functions */ extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, - const drm_clip_rect_t *pbox); + const struct drm_clip_rect *pbox); extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv, - const drm_clip_rect_t *pbox); + const struct drm_clip_rect *pbox); #define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */ #define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */ diff --git a/shared-core/savage_state.c b/shared-core/savage_state.c index acc98f89..290796ee 100644 --- a/shared-core/savage_state.c +++ b/shared-core/savage_state.c @@ -27,7 +27,7 @@ #include "savage_drv.h" void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, - const drm_clip_rect_t *pbox) + const struct drm_clip_rect *pbox) { uint32_t scstart = dev_priv->state.s3d.new_scstart; uint32_t scend = dev_priv->state.s3d.new_scend; @@ -53,7 +53,7 @@ void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, } void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv, - const drm_clip_rect_t *pbox) + const struct drm_clip_rect *pbox) { uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; @@ -275,7 +275,7 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv, static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv, const drm_savage_cmd_header_t *cmd_header, - const drm_buf_t *dmabuf) + const struct drm_buf *dmabuf) { unsigned char reorder = 0; unsigned int prim = cmd_header->prim.prim; @@ -534,7 +534,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv, static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, const drm_savage_cmd_header_t *cmd_header, const uint16_t *idx, - const drm_buf_t *dmabuf) + const struct drm_buf *dmabuf) { unsigned char reorder = 0; unsigned int prim = cmd_header->idx.prim; @@ -790,7 +790,7 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv, const drm_savage_cmd_header_t *cmd_header, const drm_savage_cmd_header_t *data, unsigned int nbox, - const drm_clip_rect_t *boxes) + const struct drm_clip_rect *boxes) { unsigned int flags = cmd_header->clear0.flags; unsigned int clear_cmd; @@ -860,7 +860,7 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv, } static int savage_dispatch_swap(drm_savage_private_t *dev_priv, - unsigned int nbox, const drm_clip_rect_t *boxes) + unsigned int nbox, const struct drm_clip_rect *boxes) { unsigned int swap_cmd; unsigned int i; @@ -891,11 +891,11 @@ static int savage_dispatch_swap(drm_savage_private_t *dev_priv, static int savage_dispatch_draw(drm_savage_private_t *dev_priv, const drm_savage_cmd_header_t *start, const drm_savage_cmd_header_t *end, - const drm_buf_t *dmabuf, + const struct drm_buf *dmabuf, const unsigned int *vtxbuf, unsigned int vb_size, unsigned int vb_stride, unsigned int nbox, - const drm_clip_rect_t *boxes) + const struct drm_clip_rect *boxes) { unsigned int i, j; int ret; @@ -956,13 +956,13 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_savage_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; - drm_buf_t *dmabuf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *dmabuf; drm_savage_cmdbuf_t cmdbuf; drm_savage_cmd_header_t *kcmd_addr = NULL; drm_savage_cmd_header_t *first_draw_cmd; unsigned int *kvb_addr = NULL; - drm_clip_rect_t *kbox_addr = NULL; + struct drm_clip_rect *kbox_addr = NULL; unsigned int i, j; int ret = 0; @@ -1018,7 +1018,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) cmdbuf.vb_addr = kvb_addr; } if (cmdbuf.nbox) { - kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(drm_clip_rect_t), + kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(struct drm_clip_rect), DRM_MEM_DRIVER); if (kbox_addr == NULL) { ret = DRM_ERR(ENOMEM); @@ -1026,7 +1026,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) } if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr, - cmdbuf.nbox * sizeof(drm_clip_rect_t))) { + cmdbuf.nbox * sizeof(struct drm_clip_rect))) { ret = DRM_ERR(EFAULT); goto done; } @@ -1157,7 +1157,7 @@ done: /* If we didn't need to allocate them, these'll be NULL */ drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER); - drm_free(kbox_addr, cmdbuf.nbox * sizeof(drm_clip_rect_t), + drm_free(kbox_addr, cmdbuf.nbox * sizeof(struct drm_clip_rect), DRM_MEM_DRIVER); return ret; diff --git a/shared-core/sis_drv.h b/shared-core/sis_drv.h index ec572ad4..57d60133 100644 --- a/shared-core/sis_drv.h +++ b/shared-core/sis_drv.h @@ -58,7 +58,7 @@ enum sis_family { typedef struct drm_sis_private { drm_local_map_t *mmio; unsigned int idle_fault; - drm_sman_t sman; + struct drm_sman sman; unsigned int chipset; int vram_initialized; int agp_initialized; @@ -66,9 +66,9 @@ typedef struct drm_sis_private { unsigned long agp_offset; } drm_sis_private_t; -extern int sis_idle(drm_device_t *dev); -extern void sis_reclaim_buffers_locked(drm_device_t *dev, struct file *filp); -extern void sis_lastclose(drm_device_t *dev); +extern int sis_idle(struct drm_device *dev); +extern void sis_reclaim_buffers_locked(struct drm_device *dev, struct file *filp); +extern void sis_lastclose(struct drm_device *dev); #else #include "sis_ds.h" @@ -78,14 +78,14 @@ typedef struct drm_sis_private { memHeap_t *FBHeap; } drm_sis_private_t; -extern int sis_init_context(drm_device_t * dev, int context); -extern int sis_final_context(drm_device_t * dev, int context); +extern int sis_init_context(struct drm_device * dev, int context); +extern int sis_final_context(struct drm_device * dev, int context); #endif -extern drm_ioctl_desc_t sis_ioctls[]; +extern struct drm_ioctl_desc sis_ioctls[]; extern int sis_max_ioctl; #endif diff --git a/shared-core/via_dma.c b/shared-core/via_dma.c index 333c4bcf..48f46938 100644 --- a/shared-core/via_dma.c +++ b/shared-core/via_dma.c @@ -139,7 +139,7 @@ static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv, return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low); } -int via_dma_cleanup(drm_device_t * dev) +int via_dma_cleanup(struct drm_device * dev) { if (dev->dev_private) { drm_via_private_t *dev_priv = @@ -157,7 +157,7 @@ int via_dma_cleanup(drm_device_t * dev) return 0; } -static int via_initialize(drm_device_t * dev, +static int via_initialize(struct drm_device * dev, drm_via_private_t * dev_priv, drm_via_dma_init_t * init) { @@ -252,7 +252,7 @@ static int via_dma_init(DRM_IOCTL_ARGS) -static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd) +static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd) { drm_via_private_t *dev_priv; uint32_t *vb; @@ -306,7 +306,7 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd) return 0; } -int via_driver_dma_quiescent(drm_device_t * dev) +int via_driver_dma_quiescent(struct drm_device * dev) { drm_via_private_t *dev_priv = dev->dev_private; @@ -346,7 +346,7 @@ static int via_cmdbuffer(DRM_IOCTL_ARGS) return 0; } -static int via_dispatch_pci_cmdbuffer(drm_device_t * dev, +static int via_dispatch_pci_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd) { drm_via_private_t *dev_priv = dev->dev_private; @@ -718,7 +718,7 @@ via_dma_blit( DRM_IOCTL_ARGS ) { } #endif -drm_ioctl_desc_t via_ioctls[] = { +struct drm_ioctl_desc via_ioctls[] = { [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, DRM_AUTH}, [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, DRM_AUTH}, [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, DRM_AUTH|DRM_MASTER}, diff --git a/shared-core/via_drm.h b/shared-core/via_drm.h index 635e4637..b15785b3 100644 --- a/shared-core/via_drm.h +++ b/shared-core/via_drm.h @@ -54,7 +54,7 @@ #define VIA_NR_XVMC_LOCKS 5 #define VIA_MAX_CACHELINE_SIZE 64 #define XVMCLOCKPTR(saPriv,lockNo) \ - ((volatile drm_hw_lock_t *)(((((unsigned long) (saPriv)->XvMCLockArea) + \ + ((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \ (VIA_MAX_CACHELINE_SIZE - 1)) & \ ~(VIA_MAX_CACHELINE_SIZE - 1)) + \ VIA_MAX_CACHELINE_SIZE*(lockNo))) @@ -187,7 +187,7 @@ typedef struct _drm_via_tex_region { typedef struct _drm_via_sarea { unsigned int dirty; unsigned int nbox; - drm_clip_rect_t boxes[VIA_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS]; drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1]; int texAge; /* last time texture was uploaded */ int ctxOwner; /* last context to upload state */ diff --git a/shared-core/via_drv.c b/shared-core/via_drv.c index 0a478fef..9f099555 100644 --- a/shared-core/via_drv.c +++ b/shared-core/via_drv.c @@ -40,7 +40,7 @@ static struct pci_device_id pciidlist[] = { #ifdef VIA_HAVE_FENCE -static drm_fence_driver_t via_fence_driver = { +static struct drm_fence_driver via_fence_driver = { .num_classes = 1, .wrap_diff = (1 << 30), .flush_diff = (1 << 20), @@ -65,7 +65,7 @@ static uint32_t via_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM static uint32_t via_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL}; -static drm_bo_driver_t via_bo_driver = { +static struct drm_bo_driver via_bo_driver = { .mem_type_prio = via_mem_prios, .mem_busy_prio = via_busy_prios, .num_mem_type_prio = ARRAY_SIZE(via_mem_prios), diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h index baafbbff..05935c81 100644 --- a/shared-core/via_drv.h +++ b/shared-core/via_drv.h @@ -116,7 +116,7 @@ typedef struct drm_via_private { /* Memory manager stuff */ #ifdef VIA_HAVE_CORE_MM unsigned int idle_fault; - drm_sman_t sman; + struct drm_sman sman; int vram_initialized; int agp_initialized; unsigned long vram_offset; @@ -148,7 +148,7 @@ enum via_family { #define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) #define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val) -extern drm_ioctl_desc_t via_ioctls[]; +extern struct drm_ioctl_desc via_ioctls[]; extern int via_max_ioctl; extern int via_fb_init(DRM_IOCTL_ARGS); @@ -161,41 +161,41 @@ extern int via_wait_irq(DRM_IOCTL_ARGS); extern int via_dma_blit_sync( DRM_IOCTL_ARGS ); extern int via_dma_blit( DRM_IOCTL_ARGS ); -extern int via_driver_load(drm_device_t *dev, unsigned long chipset); -extern int via_driver_unload(drm_device_t *dev); -extern int via_final_context(drm_device_t * dev, int context); +extern int via_driver_load(struct drm_device *dev, unsigned long chipset); +extern int via_driver_unload(struct drm_device *dev); +extern int via_final_context(struct drm_device * dev, int context); -extern int via_do_cleanup_map(drm_device_t * dev); -extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence); +extern int via_do_cleanup_map(struct drm_device * dev); +extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); -extern void via_driver_irq_preinstall(drm_device_t * dev); -extern void via_driver_irq_postinstall(drm_device_t * dev); -extern void via_driver_irq_uninstall(drm_device_t * dev); +extern void via_driver_irq_preinstall(struct drm_device * dev); +extern void via_driver_irq_postinstall(struct drm_device * dev); +extern void via_driver_irq_uninstall(struct drm_device * dev); -extern int via_dma_cleanup(drm_device_t * dev); +extern int via_dma_cleanup(struct drm_device * dev); extern void via_init_command_verifier(void); -extern int via_driver_dma_quiescent(drm_device_t * dev); +extern int via_driver_dma_quiescent(struct drm_device * dev); extern void via_init_futex(drm_via_private_t *dev_priv); extern void via_cleanup_futex(drm_via_private_t *dev_priv); extern void via_release_futex(drm_via_private_t *dev_priv, int context); #ifdef VIA_HAVE_CORE_MM -extern void via_reclaim_buffers_locked(drm_device_t *dev, struct file *filp); -extern void via_lastclose(drm_device_t *dev); +extern void via_reclaim_buffers_locked(struct drm_device *dev, struct file *filp); +extern void via_lastclose(struct drm_device *dev); #else -extern int via_init_context(drm_device_t * dev, int context); +extern int via_init_context(struct drm_device * dev, int context); #endif #ifdef VIA_HAVE_DMABLIT -extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq); -extern void via_init_dmablit(drm_device_t *dev); +extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq); +extern void via_init_dmablit(struct drm_device *dev); #endif #ifdef VIA_HAVE_FENCE extern void via_fence_timer(unsigned long data); -extern void via_poke_flush(drm_device_t * dev, uint32_t class); -extern int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, +extern void via_poke_flush(struct drm_device * dev, uint32_t class); +extern int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * sequence, uint32_t * native_type); @@ -204,14 +204,14 @@ extern int via_fence_has_irq(struct drm_device * dev, uint32_t class, #endif #ifdef VIA_HAVE_BUFFER -extern drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t *dev); -extern int via_fence_types(drm_buffer_object_t *bo, uint32_t *class, uint32_t *type); -extern int via_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags); -extern int via_init_mem_type(drm_device_t *dev, uint32_t type, - drm_mem_type_manager_t *man); -extern uint32_t via_evict_mask(drm_buffer_object_t *bo); -extern int via_move(drm_buffer_object_t *bo, int evict, - int no_wait, drm_bo_mem_reg_t *new_mem); +extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev); +extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *type); +extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); +extern int via_init_mem_type(struct drm_device *dev, uint32_t type, + struct drm_mem_type_manager *man); +extern uint32_t via_evict_mask(struct drm_buffer_object *bo); +extern int via_move(struct drm_buffer_object *bo, int evict, + int no_wait, struct drm_bo_mem_reg *new_mem); #endif #endif diff --git a/shared-core/via_irq.c b/shared-core/via_irq.c index 2ac86970..040df548 100644 --- a/shared-core/via_irq.c +++ b/shared-core/via_irq.c @@ -99,7 +99,7 @@ static unsigned time_diff(struct timeval *now,struct timeval *then) irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) { - drm_device_t *dev = (drm_device_t *) arg; + struct drm_device *dev = (struct drm_device *) arg; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; int handled = 0; @@ -171,7 +171,7 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv) } } -int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) +int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; unsigned int cur_vblank; @@ -199,7 +199,7 @@ int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) } static int -via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence, +via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence, unsigned int *sequence) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; @@ -253,7 +253,7 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence, * drm_dma.h hooks */ -void via_driver_irq_preinstall(drm_device_t * dev) +void via_driver_irq_preinstall(struct drm_device * dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; @@ -302,7 +302,7 @@ void via_driver_irq_preinstall(drm_device_t * dev) } } -void via_driver_irq_postinstall(drm_device_t * dev) +void via_driver_irq_postinstall(struct drm_device * dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; @@ -321,7 +321,7 @@ void via_driver_irq_postinstall(drm_device_t * dev) } } -void via_driver_irq_uninstall(drm_device_t * dev) +void via_driver_irq_uninstall(struct drm_device * dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; diff --git a/shared-core/via_map.c b/shared-core/via_map.c index 037a1c2c..2381eaa9 100644 --- a/shared-core/via_map.c +++ b/shared-core/via_map.c @@ -25,7 +25,7 @@ #include "via_drm.h" #include "via_drv.h" -static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init) +static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init) { drm_via_private_t *dev_priv = dev->dev_private; int ret = 0; @@ -83,7 +83,7 @@ static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init) } -int via_do_cleanup_map(drm_device_t * dev) +int via_do_cleanup_map(struct drm_device * dev) { via_dma_cleanup(dev); @@ -111,7 +111,7 @@ int via_map_init(DRM_IOCTL_ARGS) return -EINVAL; } -int via_driver_load(drm_device_t *dev, unsigned long chipset) +int via_driver_load(struct drm_device *dev, unsigned long chipset) { drm_via_private_t *dev_priv; int ret = 0; @@ -133,7 +133,7 @@ int via_driver_load(drm_device_t *dev, unsigned long chipset) return ret; } -int via_driver_unload(drm_device_t *dev) +int via_driver_unload(struct drm_device *dev) { drm_via_private_t *dev_priv = dev->dev_private; diff --git a/shared-core/via_verifier.c b/shared-core/via_verifier.c index 4b844af0..038bea2f 100644 --- a/shared-core/via_verifier.c +++ b/shared-core/via_verifier.c @@ -252,10 +252,10 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words) static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq, unsigned long offset, unsigned long size, - drm_device_t * dev) + struct drm_device * dev) { #ifdef __linux__ - drm_map_list_t *r_list; + struct drm_map_list *r_list; #endif drm_local_map_t *map = seq->map_cache; @@ -967,7 +967,7 @@ via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer, int via_verify_command_stream(const uint32_t * buf, unsigned int size, - drm_device_t * dev, int agp) + struct drm_device * dev, int agp) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; @@ -1042,7 +1042,7 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size, } int -via_parse_command_stream(drm_device_t * dev, const uint32_t * buf, +via_parse_command_stream(struct drm_device * dev, const uint32_t * buf, unsigned int size) { diff --git a/shared-core/via_verifier.h b/shared-core/via_verifier.h index 84497c44..dac1db91 100644 --- a/shared-core/via_verifier.h +++ b/shared-core/via_verifier.h @@ -49,7 +49,7 @@ typedef struct { drm_via_sequence_t unfinished; int agp_texture; int multitex; - drm_device_t *dev; + struct drm_device *dev; drm_local_map_t *map_cache; uint32_t vertex_count; int agp; @@ -57,8 +57,8 @@ typedef struct { } drm_via_state_t; extern int via_verify_command_stream(const uint32_t * buf, unsigned int size, - drm_device_t *dev, int agp); -extern int via_parse_command_stream(drm_device_t *dev, const uint32_t * buf, + struct drm_device *dev, int agp); +extern int via_parse_command_stream(struct drm_device *dev, const uint32_t * buf, unsigned int size); #endif |