diff options
author | Gareth Hughes <gareth@users.sourceforge.net> | 2000-09-06 20:56:34 +0000 |
---|---|---|
committer | Gareth Hughes <gareth@users.sourceforge.net> | 2000-09-06 20:56:34 +0000 |
commit | 7db6449142ca24183f50e6f9dcf396b899b4113b (patch) | |
tree | 351a460b510edd1b4236323e98d41892e843593c /linux | |
parent | e3e2d66131a3615379fe81ea106d7133da084683 (diff) |
Sync with 2.4.0-test8-pre5 kernel.
Diffstat (limited to 'linux')
-rw-r--r-- | linux/agpsupport.c | 23 | ||||
-rw-r--r-- | linux/auth.c | 6 | ||||
-rw-r--r-- | linux/bufs.c | 85 | ||||
-rw-r--r-- | linux/context.c | 36 | ||||
-rw-r--r-- | linux/dma.c | 14 | ||||
-rw-r--r-- | linux/drawable.c | 3 | ||||
-rw-r--r-- | linux/fops.c | 3 | ||||
-rw-r--r-- | linux/gamma_dma.c | 12 | ||||
-rw-r--r-- | linux/gamma_drv.c | 70 | ||||
-rw-r--r-- | linux/i810_bufs.c | 70 | ||||
-rw-r--r-- | linux/i810_context.c | 33 | ||||
-rw-r--r-- | linux/i810_dma.c | 30 | ||||
-rw-r--r-- | linux/i810_drv.c | 74 | ||||
-rw-r--r-- | linux/ioctl.c | 30 | ||||
-rw-r--r-- | linux/lock.c | 3 | ||||
-rw-r--r-- | linux/mga_bufs.c | 94 | ||||
-rw-r--r-- | linux/mga_context.c | 33 | ||||
-rw-r--r-- | linux/mga_dma.c | 247 | ||||
-rw-r--r-- | linux/mga_drv.c | 74 | ||||
-rw-r--r-- | linux/mga_drv.h | 54 | ||||
-rw-r--r-- | linux/mga_state.c | 92 | ||||
-rw-r--r-- | linux/r128_bufs.c | 30 | ||||
-rw-r--r-- | linux/r128_context.c | 36 | ||||
-rw-r--r-- | linux/r128_dma.c | 63 | ||||
-rw-r--r-- | linux/r128_drv.c | 29 | ||||
-rw-r--r-- | linux/tdfx_context.c | 38 | ||||
-rw-r--r-- | linux/tdfx_drv.c | 82 |
27 files changed, 698 insertions, 666 deletions
diff --git a/linux/agpsupport.c b/linux/agpsupport.c index 42a1bc2f..24fd59cd 100644 --- a/linux/agpsupport.c +++ b/linux/agpsupport.c @@ -95,7 +95,8 @@ int drm_agp_info(struct inode *inode, struct file *filp, unsigned int cmd, info.id_vendor = kern->device->vendor; info.id_device = kern->device->device; - copy_to_user_ret((drm_agp_info_t *)arg, &info, sizeof(info), -EFAULT); + if (copy_to_user((drm_agp_info_t *)arg, &info, sizeof(info))) + return -EFAULT; return 0; } @@ -134,8 +135,8 @@ int drm_agp_enable(struct inode *inode, struct file *filp, unsigned int cmd, if (!dev->agp->acquired || !drm_agp.enable) return -EINVAL; - copy_from_user_ret(&mode, (drm_agp_mode_t *)arg, sizeof(mode), - -EFAULT); + if (copy_from_user(&mode, (drm_agp_mode_t *)arg, sizeof(mode))) + return -EFAULT; dev->agp->mode = mode.mode; (*drm_agp.enable)(mode.mode); @@ -155,8 +156,8 @@ int drm_agp_alloc(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long pages; u32 type; if (!dev->agp->acquired) return -EINVAL; - copy_from_user_ret(&request, (drm_agp_buffer_t *)arg, sizeof(request), - -EFAULT); + if (copy_from_user(&request, (drm_agp_buffer_t *)arg, sizeof(request))) + return -EFAULT; if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS))) return -ENOMEM; @@ -212,8 +213,8 @@ int drm_agp_unbind(struct inode *inode, struct file *filp, unsigned int cmd, drm_agp_mem_t *entry; if (!dev->agp->acquired) return -EINVAL; - copy_from_user_ret(&request, (drm_agp_binding_t *)arg, sizeof(request), - -EFAULT); + if (copy_from_user(&request, (drm_agp_binding_t *)arg, sizeof(request))) + return -EFAULT; if (!(entry = drm_agp_lookup_entry(dev, request.handle))) return -EINVAL; if (!entry->bound) return -EINVAL; @@ -231,8 +232,8 @@ int drm_agp_bind(struct inode *inode, struct file *filp, unsigned int cmd, int page; if (!dev->agp->acquired || !drm_agp.bind_memory) return -EINVAL; - copy_from_user_ret(&request, (drm_agp_binding_t *)arg, sizeof(request), - -EFAULT); + if (copy_from_user(&request, (drm_agp_binding_t *)arg, sizeof(request))) + return -EFAULT; if (!(entry = drm_agp_lookup_entry(dev, request.handle))) return -EINVAL; if (entry->bound) return -EINVAL; @@ -253,8 +254,8 @@ int drm_agp_free(struct inode *inode, struct file *filp, unsigned int cmd, drm_agp_mem_t *entry; if (!dev->agp->acquired) return -EINVAL; - copy_from_user_ret(&request, (drm_agp_buffer_t *)arg, sizeof(request), - -EFAULT); + if (copy_from_user(&request, (drm_agp_buffer_t *)arg, sizeof(request))) + return -EFAULT; if (!(entry = drm_agp_lookup_entry(dev, request.handle))) return -EINVAL; if (entry->bound) drm_unbind_agp(entry->memory); diff --git a/linux/auth.c b/linux/auth.c index 4556bd96..80bb4b65 100644 --- a/linux/auth.c +++ b/linux/auth.c @@ -137,7 +137,8 @@ int drm_getmagic(struct inode *inode, struct file *filp, unsigned int cmd, } DRM_DEBUG("%u\n", auth.magic); - copy_to_user_ret((drm_auth_t *)arg, &auth, sizeof(auth), -EFAULT); + if (copy_to_user((drm_auth_t *)arg, &auth, sizeof(auth))) + return -EFAULT; return 0; } @@ -149,7 +150,8 @@ int drm_authmagic(struct inode *inode, struct file *filp, unsigned int cmd, drm_auth_t auth; drm_file_t *file; - copy_from_user_ret(&auth, (drm_auth_t *)arg, sizeof(auth), -EFAULT); + if (copy_from_user(&auth, (drm_auth_t *)arg, sizeof(auth))) + return -EFAULT; DRM_DEBUG("%u\n", auth.magic); if ((file = drm_find_file(dev, auth.magic))) { file->authenticated = 1; diff --git a/linux/bufs.c b/linux/bufs.c index c00f051a..28e0eb5f 100644 --- a/linux/bufs.c +++ b/linux/bufs.c @@ -133,12 +133,13 @@ int drm_addmap(struct inode *inode, struct file *filp, unsigned int cmd, dev->maplist[dev->map_count-1] = map; up(&dev->struct_sem); - copy_to_user_ret((drm_map_t *)arg, map, sizeof(*map), -EFAULT); + if (copy_to_user((drm_map_t *)arg, map, sizeof(*map))) + return -EFAULT; if (map->type != _DRM_SHM) { - copy_to_user_ret(&((drm_map_t *)arg)->handle, + if (copy_to_user(&((drm_map_t *)arg)->handle, &map->offset, - sizeof(map->offset), - -EFAULT); + sizeof(map->offset))) + return -EFAULT; } return 0; } @@ -166,10 +167,10 @@ int drm_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, if (!dma) return -EINVAL; - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; count = request.count; order = drm_order(request.size); @@ -295,10 +296,10 @@ int drm_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, request.count = entry->buf_count; request.size = size; - copy_to_user_ret((drm_buf_desc_t *)arg, + if (copy_to_user((drm_buf_desc_t *)arg, &request, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; atomic_dec(&dev->buf_alloc); return 0; @@ -324,10 +325,10 @@ int drm_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, ++dev->buf_use; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_info_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) { if (dma->bufs[i].buf_count) ++count; @@ -338,28 +339,26 @@ int drm_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, if (request.count >= count) { for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) { if (dma->bufs[i].buf_count) { - copy_to_user_ret(&request.list[count].count, + if (copy_to_user(&request.list[count].count, &dma->bufs[i].buf_count, sizeof(dma->bufs[0] - .buf_count), - -EFAULT); - copy_to_user_ret(&request.list[count].size, + .buf_count)) || + copy_to_user(&request.list[count].size, &dma->bufs[i].buf_size, - sizeof(dma->bufs[0].buf_size), - -EFAULT); - copy_to_user_ret(&request.list[count].low_mark, + sizeof(dma->bufs[0].buf_size)) || + copy_to_user(&request.list[count].low_mark, &dma->bufs[i] .freelist.low_mark, sizeof(dma->bufs[0] - .freelist.low_mark), - -EFAULT); - copy_to_user_ret(&request.list[count] + .freelist.low_mark)) || + copy_to_user(&request.list[count] .high_mark, &dma->bufs[i] .freelist.high_mark, sizeof(dma->bufs[0] - .freelist.high_mark), - -EFAULT); + .freelist.high_mark))) + return -EFAULT; + DRM_DEBUG("%d %d %d %d %d\n", i, dma->bufs[i].buf_count, @@ -372,10 +371,10 @@ int drm_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, } request.count = count; - copy_to_user_ret((drm_buf_info_t *)arg, + if (copy_to_user((drm_buf_info_t *)arg, &request, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; return 0; } @@ -392,10 +391,10 @@ int drm_markbufs(struct inode *inode, struct file *filp, unsigned int cmd, if (!dma) return -EINVAL; - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; DRM_DEBUG("%d, %d, %d\n", request.size, request.low_mark, request.high_mark); @@ -427,17 +426,17 @@ int drm_freebufs(struct inode *inode, struct file *filp, unsigned int cmd, if (!dma) return -EINVAL; - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_free_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; DRM_DEBUG("%d\n", request.count); for (i = 0; i < request.count; i++) { - copy_from_user_ret(&idx, + if (copy_from_user(&idx, &request.list[i], - sizeof(idx), - -EFAULT); + sizeof(idx))) + return -EFAULT; if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("Index %d (of %d max)\n", idx, dma->buf_count - 1); @@ -480,10 +479,10 @@ int drm_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, ++dev->buf_use; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_map_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; if (request.count >= dma->buf_count) { down(¤t->mm->mmap_sem); @@ -529,10 +528,10 @@ done: request.count = dma->buf_count; DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); - copy_to_user_ret((drm_buf_map_t *)arg, + if (copy_to_user((drm_buf_map_t *)arg, &request, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; return retcode; } diff --git a/linux/context.c b/linux/context.c index ca491094..933fd0cd 100644 --- a/linux/context.c +++ b/linux/context.c @@ -129,19 +129,21 @@ int drm_resctx(struct inode *inode, struct file *filp, unsigned int cmd, int i; DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS); - copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT); + if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res))) + return -EFAULT; if (res.count >= DRM_RESERVED_CONTEXTS) { memset(&ctx, 0, sizeof(ctx)); for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ctx.handle = i; - copy_to_user_ret(&res.contexts[i], + if (copy_to_user(&res.contexts[i], &i, - sizeof(i), - -EFAULT); + sizeof(i))) + return -EFAULT; } } res.count = DRM_RESERVED_CONTEXTS; - copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT); + if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res))) + return -EFAULT; return 0; } @@ -153,7 +155,8 @@ int drm_addctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; if ((ctx.handle = drm_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) { /* Init kernel's context and get a new one. */ drm_init_queue(dev, dev->queuelist[ctx.handle], &ctx); @@ -161,7 +164,8 @@ int drm_addctx(struct inode *inode, struct file *filp, unsigned int cmd, } drm_init_queue(dev, dev->queuelist[ctx.handle], &ctx); DRM_DEBUG("%d\n", ctx.handle); - copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT); + if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx))) + return -EFAULT; return 0; } @@ -173,7 +177,8 @@ int drm_modctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_ctx_t ctx; drm_queue_t *q; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); @@ -206,7 +211,8 @@ int drm_getctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_ctx_t ctx; drm_queue_t *q; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); @@ -223,7 +229,8 @@ int drm_getctx(struct inode *inode, struct file *filp, unsigned int cmd, ctx.flags = q->flags; atomic_dec(&q->use_count); - copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT); + if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx))) + return -EFAULT; return 0; } @@ -235,7 +242,8 @@ int drm_switchctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); return drm_context_switch(dev, dev->last_context, ctx.handle); } @@ -247,7 +255,8 @@ int drm_newctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); drm_context_switch_complete(dev, ctx.handle); @@ -263,7 +272,8 @@ int drm_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_queue_t *q; drm_buf_t *buf; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); if (ctx.handle >= dev->queue_count) return -EINVAL; diff --git a/linux/dma.c b/linux/dma.c index ac2d1bc5..62340170 100644 --- a/linux/dma.c +++ b/linux/dma.c @@ -486,14 +486,16 @@ static int drm_dma_get_buffers_of_order(drm_device_t *dev, drm_dma_t *d, buf->pending); } buf->pid = current->pid; - copy_to_user_ret(&d->request_indices[i], + if (copy_to_user(&d->request_indices[i], &buf->idx, - sizeof(buf->idx), - -EFAULT); - copy_to_user_ret(&d->request_sizes[i], + sizeof(buf->idx))) + return -EFAULT; + + if (copy_to_user(&d->request_sizes[i], &buf->total, - sizeof(buf->total), - -EFAULT); + sizeof(buf->total))) + return -EFAULT; + ++d->granted_count; } return 0; diff --git a/linux/drawable.c b/linux/drawable.c index 03839f5b..1328054e 100644 --- a/linux/drawable.c +++ b/linux/drawable.c @@ -39,7 +39,8 @@ int drm_adddraw(struct inode *inode, struct file *filp, unsigned int cmd, draw.handle = 0; /* NOOP */ DRM_DEBUG("%d\n", draw.handle); - copy_to_user_ret((drm_draw_t *)arg, &draw, sizeof(draw), -EFAULT); + if (copy_to_user((drm_draw_t *)arg, &draw, sizeof(draw))) + return -EFAULT; return 0; } diff --git a/linux/fops.c b/linux/fops.c index f3966d96..8e373e5a 100644 --- a/linux/fops.c +++ b/linux/fops.c @@ -176,7 +176,8 @@ ssize_t drm_read(struct file *filp, char *buf, size_t count, loff_t *off) } else { cur = DRM_MIN(send, dev->buf_end - dev->buf_rp); } - copy_to_user_ret(buf, dev->buf_rp, cur, -EINVAL); + if (copy_to_user(buf, dev->buf_rp, cur)) + return -EFAULT; dev->buf_rp += cur; if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf; send -= cur; diff --git a/linux/gamma_dma.c b/linux/gamma_dma.c index 96ea0455..79433387 100644 --- a/linux/gamma_dma.c +++ b/linux/gamma_dma.c @@ -586,7 +586,8 @@ int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd, int retcode = 0; drm_dma_t d; - copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT); + if (copy_from_user(&d, (drm_dma_t *)arg, sizeof(d))) + return -EFAULT; DRM_DEBUG("%d %d: %d send, %d req\n", current->pid, d.context, d.send_count, d.request_count); @@ -621,7 +622,8 @@ int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd, DRM_DEBUG("%d returning, granted = %d\n", current->pid, d.granted_count); - copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT); + if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d))) + return -EFAULT; return retcode; } @@ -710,7 +712,8 @@ int gamma_control(struct inode *inode, struct file *filp, unsigned int cmd, drm_control_t ctl; int retcode; - copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT); + if (copy_from_user(&ctl, (drm_control_t *)arg, sizeof(ctl))) + return -EFAULT; switch (ctl.func) { case DRM_INST_HANDLER: @@ -742,7 +745,8 @@ int gamma_lock(struct inode *inode, struct file *filp, unsigned int cmd, dev->lck_start = start = get_cycles(); #endif - copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; if (lock.context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", diff --git a/linux/gamma_drv.c b/linux/gamma_drv.c index 4a353c6f..eeed2555 100644 --- a/linux/gamma_drv.c +++ b/linux/gamma_drv.c @@ -11,11 +11,11 @@ * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -23,7 +23,7 @@ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. - * + * * Authors: * Rickard E. (Rik) Faith <faith@valinux.com> * @@ -42,7 +42,7 @@ #define GAMMA_NAME "gamma" #define GAMMA_DESC "3dlabs GMX 2000" -#define GAMMA_DATE "20000719" +#define GAMMA_DATE "20000906" #define GAMMA_MAJOR 1 #define GAMMA_MINOR 0 #define GAMMA_PATCHLEVEL 0 @@ -87,7 +87,7 @@ static drm_ioctl_desc_t gamma_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { drm_mapbufs, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 }, - + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, 1, 1 }, @@ -120,7 +120,7 @@ MODULE_PARM_DESC(devices, * passed via the boot-loader (e.g., LILO). It calls the insmod option * routine, drm_parse_options. */ - + static int __init gamma_options(char *str) { @@ -134,7 +134,7 @@ __setup("gamma=", gamma_options); static int gamma_setup(drm_device_t *dev) { int i; - + atomic_set(&dev->ioctl_count, 0); atomic_set(&dev->vma_count, 0); dev->buf_use = 0; @@ -179,22 +179,22 @@ static int gamma_setup(drm_device_t *dev) #endif dev->ctx_start = 0; dev->lck_start = 0; - + dev->buf_rp = dev->buf; dev->buf_wp = dev->buf; dev->buf_end = dev->buf + DRM_BSZ; dev->buf_async = NULL; init_waitqueue_head(&dev->buf_readers); init_waitqueue_head(&dev->buf_writers); - + DRM_DEBUG("\n"); - + /* The kernel's context could be created here, but is now created in drm_dma_enqueue. This is more resource-efficient for hardware that does not do DMA, but may mean that drm_select_queue fails between the time the interrupt is initialized and the time the queues are initialized. */ - + return 0; } @@ -209,15 +209,15 @@ static int gamma_takedown(drm_device_t *dev) DRM_DEBUG("\n"); if (dev->irq) gamma_irq_uninstall(dev); - + down(&dev->struct_sem); del_timer(&dev->timer); - + if (dev->devname) { drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER); dev->devname = NULL; } - + if (dev->unique) { drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER); dev->unique = NULL; @@ -231,7 +231,7 @@ static int gamma_takedown(drm_device_t *dev) } dev->magiclist[i].head = dev->magiclist[i].tail = NULL; } - + /* Clear vma list (only built for debugging) */ if (dev->vmalist) { for (vma = dev->vmalist; vma; vma = vma_next) { @@ -240,7 +240,7 @@ static int gamma_takedown(drm_device_t *dev) } dev->vmalist = NULL; } - + /* Clear map area and mtrr information */ if (dev->maplist) { for (i = 0; i < dev->map_count; i++) { @@ -278,7 +278,7 @@ static int gamma_takedown(drm_device_t *dev) dev->maplist = NULL; dev->map_count = 0; } - + if (dev->queuelist) { for (i = 0; i < dev->queue_count; i++) { drm_waitlist_destroy(&dev->queuelist[i]->waitlist); @@ -304,7 +304,7 @@ static int gamma_takedown(drm_device_t *dev) wake_up_interruptible(&dev->lock.lock_queue); } up(&dev->struct_sem); - + return 0; } @@ -349,7 +349,7 @@ static int gamma_init(void) memset((void *)dev, 0, sizeof(*dev)); dev->count_lock = SPIN_LOCK_UNLOCKED; sema_init(&dev->struct_sem, 1); - + #ifdef MODULE drm_parse_options(gamma); #endif @@ -374,7 +374,7 @@ static int gamma_init(void) GAMMA_DATE, gamma_misc.minor, devices); - + return 0; } @@ -385,7 +385,7 @@ static void gamma_cleanup(void) drm_device_t *dev = &gamma_device; DRM_DEBUG("\n"); - + drm_proc_cleanup(); if (misc_deregister(&gamma_misc)) { DRM_ERROR("Cannot unload module\n"); @@ -405,17 +405,18 @@ int gamma_version(struct inode *inode, struct file *filp, unsigned int cmd, drm_version_t version; int len; - copy_from_user_ret(&version, + if (copy_from_user(&version, (drm_version_t *)arg, - sizeof(version), - -EFAULT); + sizeof(version))) + return -EFAULT; #define DRM_COPY(name,value) \ len = strlen(value); \ if (len > name##_len) len = name##_len; \ name##_len = strlen(value); \ if (len && name) { \ - copy_to_user_ret(name, value, len, -EFAULT); \ + if (copy_to_user(name, value, len)) \ + return -EFAULT; \ } version.version_major = GAMMA_MAJOR; @@ -426,10 +427,10 @@ int gamma_version(struct inode *inode, struct file *filp, unsigned int cmd, DRM_COPY(version.date, GAMMA_DATE); DRM_COPY(version.desc, GAMMA_DESC); - copy_to_user_ret((drm_version_t *)arg, + if (copy_to_user((drm_version_t *)arg, &version, - sizeof(version), - -EFAULT); + sizeof(version))) + return -EFAULT; return 0; } @@ -437,7 +438,7 @@ int gamma_open(struct inode *inode, struct file *filp) { drm_device_t *dev = &gamma_device; int retcode = 0; - + DRM_DEBUG("open_count = %d\n", dev->open_count); if (!(retcode = drm_open_helper(inode, filp, dev))) { #if LINUX_VERSION_CODE < 0x020333 @@ -504,7 +505,7 @@ int gamma_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, atomic_inc(&dev->ioctl_count); atomic_inc(&dev->total_ioctl); ++priv->ioctl_count; - + DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n", current->pid, cmd, nr, dev->device, priv->authenticated); @@ -524,7 +525,7 @@ int gamma_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, retcode = (func)(inode, filp, cmd, arg); } } - + atomic_dec(&dev->ioctl_count); return retcode; } @@ -537,8 +538,9 @@ int gamma_unlock(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_lock_t lock; - copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); - + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; + if (lock.context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", current->pid, lock.context); @@ -563,7 +565,7 @@ int gamma_unlock(struct inode *inode, struct file *filp, unsigned int cmd, atomic_inc(&dev->histo.lhld[drm_histogram_slot(get_cycles() - dev->lck_start)]); #endif - + unblock_all_signals(); return 0; } diff --git a/linux/i810_bufs.c b/linux/i810_bufs.c index fa1f84dc..a999e96b 100644 --- a/linux/i810_bufs.c +++ b/linux/i810_bufs.c @@ -56,10 +56,10 @@ int i810_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd, if (!dma) return -EINVAL; - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; count = request.count; order = drm_order(request.size); @@ -155,10 +155,10 @@ int i810_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd, request.count = entry->buf_count; request.size = size; - copy_to_user_ret((drm_buf_desc_t *)arg, + if (copy_to_user((drm_buf_desc_t *)arg, &request, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; atomic_dec(&dev->buf_alloc); dma->flags = _DRM_DMA_USE_AGP; @@ -170,10 +170,10 @@ int i810_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, { drm_buf_desc_t request; - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; if(request.flags & _DRM_AGP_BUFFER) return i810_addbufs_agp(inode, filp, cmd, arg); @@ -201,10 +201,10 @@ int i810_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, ++dev->buf_use; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_info_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) { if (dma->bufs[i].buf_count) ++count; @@ -215,28 +215,26 @@ int i810_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, if (request.count >= count) { for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) { if (dma->bufs[i].buf_count) { - copy_to_user_ret(&request.list[count].count, + if (copy_to_user(&request.list[count].count, &dma->bufs[i].buf_count, sizeof(dma->bufs[0] - .buf_count), - -EFAULT); - copy_to_user_ret(&request.list[count].size, + .buf_count)) || + copy_to_user(&request.list[count].size, &dma->bufs[i].buf_size, - sizeof(dma->bufs[0].buf_size), - -EFAULT); - copy_to_user_ret(&request.list[count].low_mark, + sizeof(dma->bufs[0].buf_size)) || + copy_to_user(&request.list[count].low_mark, &dma->bufs[i] .freelist.low_mark, sizeof(dma->bufs[0] - .freelist.low_mark), - -EFAULT); - copy_to_user_ret(&request.list[count] + .freelist.low_mark)) || + copy_to_user(&request.list[count] .high_mark, &dma->bufs[i] .freelist.high_mark, sizeof(dma->bufs[0] - .freelist.high_mark), - -EFAULT); + .freelist.high_mark))) + return -EFAULT; + DRM_DEBUG("%d %d %d %d %d\n", i, dma->bufs[i].buf_count, @@ -249,10 +247,10 @@ int i810_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, } request.count = count; - copy_to_user_ret((drm_buf_info_t *)arg, + if (copy_to_user((drm_buf_info_t *)arg, &request, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; return 0; } @@ -269,10 +267,10 @@ int i810_markbufs(struct inode *inode, struct file *filp, unsigned int cmd, if (!dma) return -EINVAL; - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; DRM_DEBUG("%d, %d, %d\n", request.size, request.low_mark, request.high_mark); @@ -304,17 +302,17 @@ int i810_freebufs(struct inode *inode, struct file *filp, unsigned int cmd, if (!dma) return -EINVAL; - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_free_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; DRM_DEBUG("%d\n", request.count); for (i = 0; i < request.count; i++) { - copy_from_user_ret(&idx, + if (copy_from_user(&idx, &request.list[i], - sizeof(idx), - -EFAULT); + sizeof(idx))) + return -EFAULT; if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("Index %d (of %d max)\n", idx, dma->buf_count - 1); diff --git a/linux/i810_context.c b/linux/i810_context.c index 85c0877b..c331beed 100644 --- a/linux/i810_context.c +++ b/linux/i810_context.c @@ -103,19 +103,21 @@ int i810_resctx(struct inode *inode, struct file *filp, unsigned int cmd, int i; DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS); - copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT); + if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res))) + return -EFAULT; if (res.count >= DRM_RESERVED_CONTEXTS) { memset(&ctx, 0, sizeof(ctx)); for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ctx.handle = i; - copy_to_user_ret(&res.contexts[i], + if (copy_to_user(&res.contexts[i], &i, - sizeof(i), - -EFAULT); + sizeof(i))) + return -EFAULT; } } res.count = DRM_RESERVED_CONTEXTS; - copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT); + if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res))) + return -EFAULT; return 0; } @@ -126,7 +128,8 @@ int i810_addctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; if ((ctx.handle = i810_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) { /* Skip kernel's context and get a new one. */ ctx.handle = i810_alloc_queue(dev); @@ -137,7 +140,8 @@ int i810_addctx(struct inode *inode, struct file *filp, unsigned int cmd, return -ENOMEM; } DRM_DEBUG("%d\n", ctx.handle); - copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT); + if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx))) + return -EFAULT; return 0; } @@ -153,10 +157,12 @@ int i810_getctx(struct inode *inode, struct file *filp, unsigned int cmd, { drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx))) + return -EFAULT; /* This is 0, because we don't hanlde any context flags */ ctx.flags = 0; - copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT); + if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx))) + return -EFAULT; return 0; } @@ -167,7 +173,8 @@ int i810_switchctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); return i810_context_switch(dev, dev->last_context, ctx.handle); } @@ -179,7 +186,8 @@ int i810_newctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); i810_context_switch_complete(dev, ctx.handle); @@ -193,7 +201,8 @@ int i810_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); if(ctx.handle != DRM_KERNEL_CONTEXT) { drm_ctxbitmap_free(dev, ctx.handle); diff --git a/linux/i810_dma.c b/linux/i810_dma.c index 14c78071..e2a86606 100644 --- a/linux/i810_dma.c +++ b/linux/i810_dma.c @@ -490,8 +490,8 @@ int i810_dma_init(struct inode *inode, struct file *filp, drm_i810_init_t init; int retcode = 0; - copy_from_user_ret(&init, (drm_i810_init_t *)arg, - sizeof(init), -EFAULT); + if (copy_from_user(&init, (drm_i810_init_t *)arg, sizeof(init))) + return -EFAULT; switch(init.func) { case I810_INIT_DMA: @@ -1005,7 +1005,8 @@ int i810_control(struct inode *inode, struct file *filp, unsigned int cmd, DRM_DEBUG( "i810_control\n"); - copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT); + if (copy_from_user(&ctl, (drm_control_t *)arg, sizeof(ctl))) + return -EFAULT; switch (ctl.func) { case DRM_INST_HANDLER: @@ -1178,7 +1179,8 @@ int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd, int ret = 0; drm_lock_t lock; - copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; if (lock.context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", @@ -1275,8 +1277,8 @@ int i810_dma_vertex(struct inode *inode, struct file *filp, dev_priv->sarea_priv; drm_i810_vertex_t vertex; - copy_from_user_ret(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex), - -EFAULT); + if (copy_from_user(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex))) + return -EFAULT; if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i810_dma_vertex called without lock held\n"); @@ -1307,8 +1309,8 @@ int i810_clear_bufs(struct inode *inode, struct file *filp, drm_device_t *dev = priv->dev; drm_i810_clear_t clear; - copy_from_user_ret(&clear, (drm_i810_clear_t *)arg, sizeof(clear), - -EFAULT); + if (copy_from_user(&clear, (drm_i810_clear_t *)arg, sizeof(clear))) + return -EFAULT; if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i810_clear_bufs called without lock held\n"); @@ -1365,7 +1367,8 @@ int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, dev_priv->sarea_priv; DRM_DEBUG("getbuf\n"); - copy_from_user_ret(&d, (drm_i810_dma_t *)arg, sizeof(d), -EFAULT); + if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d))) + return -EFAULT; if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i810_dma called without lock held\n"); @@ -1379,7 +1382,8 @@ int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", current->pid, retcode, d.granted); - copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT); + if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d))) + return -EFAULT; sarea_priv->last_dispatch = (int) hw_status[5]; return retcode; @@ -1404,14 +1408,16 @@ int i810_copybuf(struct inode *inode, struct file *filp, unsigned int cmd, return -EINVAL; } - copy_from_user_ret(&d, (drm_i810_copy_t *)arg, sizeof(d), -EFAULT); + if (copy_from_user(&d, (drm_i810_copy_t *)arg, sizeof(d))) + return -EFAULT; if(d.idx > dma->buf_count) return -EINVAL; buf = dma->buflist[ d.idx ]; buf_priv = buf->dev_private; if (buf_priv->currently_mapped != I810_BUF_MAPPED) return -EPERM; - copy_from_user_ret(buf_priv->virtual, d.address, d.used, -EFAULT); + if (copy_from_user(buf_priv->virtual, d.address, d.used)) + return -EFAULT; sarea_priv->last_dispatch = (int) hw_status[5]; diff --git a/linux/i810_drv.c b/linux/i810_drv.c index 183e7fd7..c0cd83db 100644 --- a/linux/i810_drv.c +++ b/linux/i810_drv.c @@ -1,6 +1,6 @@ /* i810_drv.c -- I810 driver -*- linux-c -*- * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com - * + * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. @@ -11,11 +11,11 @@ * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -35,7 +35,7 @@ #define I810_NAME "i810" #define I810_DESC "Intel I810" -#define I810_DATE "20000719" +#define I810_DATE "20000906" #define I810_MAJOR 1 #define I810_MINOR 1 #define I810_PATCHLEVEL 0 @@ -143,7 +143,7 @@ __setup("i810=", i810_options); static int i810_setup(drm_device_t *dev) { int i; - + atomic_set(&dev->ioctl_count, 0); atomic_set(&dev->vma_count, 0); dev->buf_use = 0; @@ -188,22 +188,22 @@ static int i810_setup(drm_device_t *dev) #endif dev->ctx_start = 0; dev->lck_start = 0; - + dev->buf_rp = dev->buf; dev->buf_wp = dev->buf; dev->buf_end = dev->buf + DRM_BSZ; dev->buf_async = NULL; init_waitqueue_head(&dev->buf_readers); init_waitqueue_head(&dev->buf_writers); - + DRM_DEBUG("\n"); - + /* The kernel's context could be created here, but is now created in drm_dma_enqueue. This is more resource-efficient for hardware that does not do DMA, but may mean that drm_select_queue fails between the time the interrupt is initialized and the time the queues are initialized. */ - + return 0; } @@ -218,15 +218,15 @@ static int i810_takedown(drm_device_t *dev) DRM_DEBUG("\n"); if (dev->irq) i810_irq_uninstall(dev); - + down(&dev->struct_sem); del_timer(&dev->timer); - + if (dev->devname) { drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER); dev->devname = NULL; } - + if (dev->unique) { drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER); dev->unique = NULL; @@ -244,7 +244,7 @@ static int i810_takedown(drm_device_t *dev) if (dev->agp) { drm_agp_mem_t *entry; drm_agp_mem_t *nexte; - + /* Remove AGP resources, but leave dev->agp intact until r128_cleanup is called. */ for (entry = dev->agp->memory; entry; entry = nexte) { @@ -254,10 +254,10 @@ static int i810_takedown(drm_device_t *dev) drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); } dev->agp->memory = NULL; - + if (dev->agp->acquired && drm_agp.release) (*drm_agp.release)(); - + dev->agp->acquired = 0; dev->agp->enabled = 0; } @@ -269,7 +269,7 @@ static int i810_takedown(drm_device_t *dev) } dev->vmalist = NULL; } - + /* Clear map area and mtrr information */ if (dev->maplist) { for (i = 0; i < dev->map_count; i++) { @@ -305,7 +305,7 @@ static int i810_takedown(drm_device_t *dev) dev->maplist = NULL; dev->map_count = 0; } - + if (dev->queuelist) { for (i = 0; i < dev->queue_count; i++) { drm_waitlist_destroy(&dev->queuelist[i]->waitlist); @@ -331,7 +331,7 @@ static int i810_takedown(drm_device_t *dev) wake_up_interruptible(&dev->lock.lock_queue); } up(&dev->struct_sem); - + return 0; } @@ -348,7 +348,7 @@ static int i810_init(void) memset((void *)dev, 0, sizeof(*dev)); dev->count_lock = SPIN_LOCK_UNLOCKED; sema_init(&dev->struct_sem, 1); - + #ifdef MODULE drm_parse_options(i810); #endif @@ -402,7 +402,7 @@ static void i810_cleanup(void) drm_device_t *dev = &i810_device; DRM_DEBUG("\n"); - + drm_proc_cleanup(); if (misc_deregister(&i810_misc)) { DRM_ERROR("Cannot unload module\n"); @@ -428,17 +428,18 @@ int i810_version(struct inode *inode, struct file *filp, unsigned int cmd, drm_version_t version; int len; - copy_from_user_ret(&version, + if (copy_from_user(&version, (drm_version_t *)arg, - sizeof(version), - -EFAULT); + sizeof(version))) + return -EFAULT; #define DRM_COPY(name,value) \ len = strlen(value); \ if (len > name##_len) len = name##_len; \ name##_len = strlen(value); \ if (len && name) { \ - copy_to_user_ret(name, value, len, -EFAULT); \ + if (copy_to_user(name, value, len)) \ + return -EFAULT; \ } version.version_major = I810_MAJOR; @@ -449,10 +450,10 @@ int i810_version(struct inode *inode, struct file *filp, unsigned int cmd, DRM_COPY(version.date, I810_DATE); DRM_COPY(version.desc, I810_DESC); - copy_to_user_ret((drm_version_t *)arg, + if (copy_to_user((drm_version_t *)arg, &version, - sizeof(version), - -EFAULT); + sizeof(version))) + return -EFAULT; return 0; } @@ -460,7 +461,7 @@ int i810_open(struct inode *inode, struct file *filp) { drm_device_t *dev = &i810_device; int retcode = 0; - + DRM_DEBUG("open_count = %d\n", dev->open_count); if (!(retcode = drm_open_helper(inode, filp, dev))) { #if LINUX_VERSION_CODE < 0x020333 @@ -497,7 +498,7 @@ int i810_release(struct inode *inode, struct file *filp) drm_lock_free(dev, &dev->lock.hw_lock->lock, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); - + /* FIXME: may require heavy-handed reset of hardware at this point, possibly processed via a callback to the X @@ -518,7 +519,7 @@ int i810_release(struct inode *inode, struct file *filp) dev->lock.lock_time = jiffies; atomic_inc(&dev->total_locks); break; /* Got lock */ - } + } /* Contention */ atomic_inc(&dev->total_sleeps); current->state = TASK_INTERRUPTIBLE; @@ -544,7 +545,7 @@ int i810_release(struct inode *inode, struct file *filp) if (priv->next) priv->next->prev = priv->prev; else dev->file_last = priv->prev; up(&dev->struct_sem); - + drm_free(priv, sizeof(*priv), DRM_MEM_FILES); #if LINUX_VERSION_CODE < 0x020333 MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */ @@ -584,7 +585,7 @@ int i810_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, atomic_inc(&dev->ioctl_count); atomic_inc(&dev->total_ioctl); ++priv->ioctl_count; - + DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n", current->pid, cmd, nr, dev->device, priv->authenticated); @@ -604,7 +605,7 @@ int i810_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, retcode = (func)(inode, filp, cmd, arg); } } - + atomic_dec(&dev->ioctl_count); return retcode; } @@ -616,8 +617,9 @@ int i810_unlock(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_lock_t lock; - copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); - + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; + if (lock.context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", current->pid, lock.context); @@ -641,7 +643,7 @@ int i810_unlock(struct inode *inode, struct file *filp, unsigned int cmd, atomic_inc(&dev->histo.lhld[drm_histogram_slot(get_cycles() - dev->lck_start)]); #endif - + unblock_all_signals(); return 0; } diff --git a/linux/ioctl.c b/linux/ioctl.c index b246f76e..2f4286b1 100644 --- a/linux/ioctl.c +++ b/linux/ioctl.c @@ -38,13 +38,15 @@ int drm_irq_busid(struct inode *inode, struct file *filp, unsigned int cmd, drm_irq_busid_t p; struct pci_dev *dev; - copy_from_user_ret(&p, (drm_irq_busid_t *)arg, sizeof(p), -EFAULT); + if (copy_from_user(&p, (drm_irq_busid_t *)arg, sizeof(p))) + return -EFAULT; dev = pci_find_slot(p.busnum, PCI_DEVFN(p.devnum, p.funcnum)); if (dev) p.irq = dev->irq; else p.irq = 0; DRM_DEBUG("%d:%d:%d => IRQ %d\n", p.busnum, p.devnum, p.funcnum, p.irq); - copy_to_user_ret((drm_irq_busid_t *)arg, &p, sizeof(p), -EFAULT); + if (copy_to_user((drm_irq_busid_t *)arg, &p, sizeof(p))) + return -EFAULT; return 0; } @@ -55,13 +57,15 @@ int drm_getunique(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_unique_t u; - copy_from_user_ret(&u, (drm_unique_t *)arg, sizeof(u), -EFAULT); + if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u))) + return -EFAULT; if (u.unique_len >= dev->unique_len) { - copy_to_user_ret(u.unique, dev->unique, dev->unique_len, - -EFAULT); + if (copy_to_user(u.unique, dev->unique, dev->unique_len)) + return -EFAULT; } u.unique_len = dev->unique_len; - copy_to_user_ret((drm_unique_t *)arg, &u, sizeof(u), -EFAULT); + if (copy_to_user((drm_unique_t *)arg, &u, sizeof(u))) + return -EFAULT; return 0; } @@ -72,15 +76,19 @@ int drm_setunique(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_unique_t u; - if (dev->unique_len || dev->unique) return -EBUSY; + if (dev->unique_len || dev->unique) + return -EBUSY; - copy_from_user_ret(&u, (drm_unique_t *)arg, sizeof(u), -EFAULT); - if (!u.unique_len) return -EINVAL; + if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u))) + return -EFAULT; + + if (!u.unique_len) + return -EINVAL; dev->unique_len = u.unique_len; dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER); - copy_from_user_ret(dev->unique, u.unique, dev->unique_len, - -EFAULT); + if (copy_from_user(dev->unique, u.unique, dev->unique_len)) + return -EFAULT; dev->unique[dev->unique_len] = '\0'; dev->devname = drm_alloc(strlen(dev->name) + strlen(dev->unique) + 2, diff --git a/linux/lock.c b/linux/lock.c index 33b2cc03..e5fd435f 100644 --- a/linux/lock.c +++ b/linux/lock.c @@ -218,7 +218,8 @@ int drm_finish(struct inode *inode, struct file *filp, unsigned int cmd, DRM_DEBUG("\n"); - copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; ret = drm_flush_block_and_flush(dev, lock.context, lock.flags); drm_flush_unblock(dev, lock.context, lock.flags); return ret; diff --git a/linux/mga_bufs.c b/linux/mga_bufs.c index b97eb495..3ec28a76 100644 --- a/linux/mga_bufs.c +++ b/linux/mga_bufs.c @@ -57,10 +57,10 @@ int mga_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd, if (!dma) return -EINVAL; - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; count = request.count; order = drm_order(request.size); @@ -173,10 +173,10 @@ int mga_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd, request.count = entry->buf_count; request.size = size; - copy_to_user_ret((drm_buf_desc_t *)arg, + if (copy_to_user((drm_buf_desc_t *)arg, &request, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; atomic_dec(&dev->buf_alloc); @@ -219,10 +219,10 @@ int mga_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd, if (!dma) return -EINVAL; - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; count = request.count; order = drm_order(request.size); @@ -348,10 +348,10 @@ int mga_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd, request.count = entry->buf_count; request.size = size; - copy_to_user_ret((drm_buf_desc_t *)arg, + if (copy_to_user((drm_buf_desc_t *)arg, &request, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; atomic_dec(&dev->buf_alloc); return 0; @@ -362,10 +362,10 @@ int mga_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, { drm_buf_desc_t request; - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; if(request.flags & _DRM_AGP_BUFFER) return mga_addbufs_agp(inode, filp, cmd, arg); @@ -393,10 +393,10 @@ int mga_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, ++dev->buf_use; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_info_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) { if (dma->bufs[i].buf_count) ++count; @@ -407,28 +407,26 @@ int mga_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, if (request.count >= count) { for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) { if (dma->bufs[i].buf_count) { - copy_to_user_ret(&request.list[count].count, + if (copy_to_user(&request.list[count].count, &dma->bufs[i].buf_count, sizeof(dma->bufs[0] - .buf_count), - -EFAULT); - copy_to_user_ret(&request.list[count].size, + .buf_count)) || + copy_to_user(&request.list[count].size, &dma->bufs[i].buf_size, - sizeof(dma->bufs[0].buf_size), - -EFAULT); - copy_to_user_ret(&request.list[count].low_mark, + sizeof(dma->bufs[0].buf_size)) || + copy_to_user(&request.list[count].low_mark, &dma->bufs[i] .freelist.low_mark, sizeof(dma->bufs[0] - .freelist.low_mark), - -EFAULT); - copy_to_user_ret(&request.list[count] + .freelist.low_mark)) || + copy_to_user(&request.list[count] .high_mark, &dma->bufs[i] .freelist.high_mark, sizeof(dma->bufs[0] - .freelist.high_mark), - -EFAULT); + .freelist.high_mark))) + return -EFAULT; + DRM_DEBUG("%d %d %d %d %d\n", i, dma->bufs[i].buf_count, @@ -441,10 +439,10 @@ int mga_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, } request.count = count; - copy_to_user_ret((drm_buf_info_t *)arg, + if (copy_to_user((drm_buf_info_t *)arg, &request, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; return 0; } @@ -461,10 +459,10 @@ int mga_markbufs(struct inode *inode, struct file *filp, unsigned int cmd, if (!dma) return -EINVAL; - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; DRM_DEBUG("%d, %d, %d\n", request.size, request.low_mark, request.high_mark); @@ -496,17 +494,17 @@ int mga_freebufs(struct inode *inode, struct file *filp, unsigned int cmd, if (!dma) return -EINVAL; - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_free_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; DRM_DEBUG("%d\n", request.count); for (i = 0; i < request.count; i++) { - copy_from_user_ret(&idx, + if (copy_from_user(&idx, &request.list[i], - sizeof(idx), - -EFAULT); + sizeof(idx))) + return -EFAULT; if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("Index %d (of %d max)\n", idx, dma->buf_count - 1); @@ -550,10 +548,10 @@ int mga_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, ++dev->buf_use; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_map_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; DRM_DEBUG("mga_mapbufs\n"); DRM_DEBUG("dma->flags : %x\n", dma->flags); @@ -628,10 +626,10 @@ int mga_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, request.count = dma->buf_count; DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); - copy_to_user_ret((drm_buf_map_t *)arg, + if (copy_to_user((drm_buf_map_t *)arg, &request, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; DRM_DEBUG("retcode : %d\n", retcode); diff --git a/linux/mga_context.c b/linux/mga_context.c index 9a73e6c1..43733c14 100644 --- a/linux/mga_context.c +++ b/linux/mga_context.c @@ -103,19 +103,21 @@ int mga_resctx(struct inode *inode, struct file *filp, unsigned int cmd, int i; DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS); - copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT); + if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res))) + return -EFAULT; if (res.count >= DRM_RESERVED_CONTEXTS) { memset(&ctx, 0, sizeof(ctx)); for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ctx.handle = i; - copy_to_user_ret(&res.contexts[i], + if (copy_to_user(&res.contexts[i], &i, - sizeof(i), - -EFAULT); + sizeof(i))) + return -EFAULT; } } res.count = DRM_RESERVED_CONTEXTS; - copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT); + if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res))) + return -EFAULT; return 0; } @@ -126,7 +128,8 @@ int mga_addctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; if ((ctx.handle = mga_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) { /* Skip kernel's context and get a new one. */ ctx.handle = mga_alloc_queue(dev); @@ -137,7 +140,8 @@ int mga_addctx(struct inode *inode, struct file *filp, unsigned int cmd, return -ENOMEM; } DRM_DEBUG("%d\n", ctx.handle); - copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT); + if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx))) + return -EFAULT; return 0; } @@ -153,10 +157,12 @@ int mga_getctx(struct inode *inode, struct file *filp, unsigned int cmd, { drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx))) + return -EFAULT; /* This is 0, because we don't hanlde any context flags */ ctx.flags = 0; - copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT); + if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx))) + return -EFAULT; return 0; } @@ -167,7 +173,8 @@ int mga_switchctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); return mga_context_switch(dev, dev->last_context, ctx.handle); } @@ -179,7 +186,8 @@ int mga_newctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); mga_context_switch_complete(dev, ctx.handle); @@ -193,7 +201,8 @@ int mga_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); if(ctx.handle != DRM_KERNEL_CONTEXT) { drm_ctxbitmap_free(dev, ctx.handle); diff --git a/linux/mga_dma.c b/linux/mga_dma.c index 4b861220..f80fb489 100644 --- a/linux/mga_dma.c +++ b/linux/mga_dma.c @@ -11,11 +11,11 @@ * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -51,7 +51,7 @@ static int mga_flush_queue(drm_device_t *dev); static unsigned long mga_alloc_page(drm_device_t *dev) { unsigned long address; - + DRM_DEBUG("%s\n", __FUNCTION__); address = __get_free_page(GFP_KERNEL); if(address == 0UL) { @@ -59,7 +59,7 @@ static unsigned long mga_alloc_page(drm_device_t *dev) } atomic_inc(&virt_to_page(address)->count); set_bit(PG_locked, &virt_to_page(address)->flags); - + return address; } @@ -82,19 +82,6 @@ static void mga_delay(void) return; } -#ifdef __i386__ -void mga_flush_write_combine(void) -{ - int xchangeDummy; - DRM_DEBUG("%s\n", __FUNCTION__); - - __asm__ volatile(" push %%eax ; xchg %%eax, %0 ; pop %%eax" : : "m" (xchangeDummy)); - __asm__ volatile(" push %%eax ; push %%ebx ; push %%ecx ; push %%edx ;" - " movl $0,%%eax ; cpuid ; pop %%edx ; pop %%ecx ; pop %%ebx ;" - " pop %%eax" : /* no outputs */ : /* no inputs */ ); -} -#endif - /* These are two age tags that will never be sent to * the hardware */ #define MGA_BUF_USED 0xffffffff @@ -115,7 +102,7 @@ static int mga_freelist_init(drm_device_t *dev) if(dev_priv->head == NULL) return -ENOMEM; memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t)); dev_priv->head->age = MGA_BUF_USED; - + for (i = 0; i < dma->buf_count; i++) { buf = dma->buflist[ i ]; buf_priv = buf->dev_private; @@ -135,7 +122,7 @@ static int mga_freelist_init(drm_device_t *dev) buf_priv->dispatched = 0; dev_priv->head->next = item; } - + return 0; } @@ -153,7 +140,7 @@ static void mga_freelist_cleanup(drm_device_t *dev) item = item->next; drm_free(prev, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); } - + dev_priv->head = dev_priv->tail = NULL; } @@ -169,15 +156,15 @@ static inline void mga_dma_quiescent(drm_device_t *dev) DRM_DEBUG("%s\n", __FUNCTION__); end = jiffies + (HZ*3); while(1) { - if(!test_and_set_bit(MGA_IN_DISPATCH, + if(!test_and_set_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status)) { break; } if((signed)(end - jiffies) <= 0) { - DRM_ERROR("irqs: %d wanted %d\n", - atomic_read(&dev->total_irq), + DRM_ERROR("irqs: %d wanted %d\n", + atomic_read(&dev->total_irq), atomic_read(&dma->total_lost)); - DRM_ERROR("lockup\n"); + DRM_ERROR("lockup\n"); goto out_nolock; } for (i = 0 ; i < 2000 ; i++) mga_delay(); @@ -186,13 +173,13 @@ static inline void mga_dma_quiescent(drm_device_t *dev) DRM_DEBUG("quiescent status : %x\n", MGA_READ(MGAREG_STATUS)); while((MGA_READ(MGAREG_STATUS) & 0x00030001) != 0x00020000) { if((signed)(end - jiffies) <= 0) { - DRM_ERROR("irqs: %d wanted %d\n", - atomic_read(&dev->total_irq), + DRM_ERROR("irqs: %d wanted %d\n", + atomic_read(&dev->total_irq), atomic_read(&dma->total_lost)); - DRM_ERROR("lockup\n"); + DRM_ERROR("lockup\n"); goto out_status; } - for (i = 0 ; i < 2000 ; i++) mga_delay(); + for (i = 0 ; i < 2000 ; i++) mga_delay(); } sarea_priv->dirty |= MGA_DMA_FLUSH; @@ -216,13 +203,13 @@ static void mga_reset_freelist(drm_device_t *dev) } /* Least recently used : - * These operations are not atomic b/c they are protected by the + * These operations are not atomic b/c they are protected by the * hardware lock */ drm_buf_t *mga_freelist_get(drm_device_t *dev) { DECLARE_WAITQUEUE(entry, current); - drm_mga_private_t *dev_priv = + drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; drm_mga_freelist_t *prev; drm_mga_freelist_t *next; @@ -230,17 +217,17 @@ drm_buf_t *mga_freelist_get(drm_device_t *dev) DRM_DEBUG("%s : tail->age : %d last_prim_age : %d\n", __FUNCTION__, dev_priv->tail->age, dev_priv->last_prim_age); - + if(failed >= 1000 && dev_priv->tail->age >= dev_priv->last_prim_age) { - DRM_DEBUG("I'm waiting on the freelist!!! %d\n", + DRM_DEBUG("I'm waiting on the freelist!!! %d\n", dev_priv->last_prim_age); set_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status); current->state = TASK_INTERRUPTIBLE; add_wait_queue(&dev_priv->buf_queue, &entry); for (;;) { mga_dma_schedule(dev, 0); - if(!test_bit(MGA_IN_GETBUF, - &dev_priv->dispatch_status)) + if(!test_bit(MGA_IN_GETBUF, + &dev_priv->dispatch_status)) break; atomic_inc(&dev->total_sleeps); schedule(); @@ -253,7 +240,7 @@ drm_buf_t *mga_freelist_get(drm_device_t *dev) current->state = TASK_RUNNING; remove_wait_queue(&dev_priv->buf_queue, &entry); } - + if(dev_priv->tail->age < dev_priv->last_prim_age) { prev = dev_priv->tail->prev; next = dev_priv->tail; @@ -272,7 +259,7 @@ failed_getbuf: int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf) { - drm_mga_private_t *dev_priv = + drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; drm_mga_freelist_t *prev; @@ -303,7 +290,7 @@ int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf) next->prev = head; next->next = prev; } - + return 0; } @@ -315,41 +302,41 @@ static int mga_init_primary_bufs(drm_device_t *dev, drm_mga_init_t *init) int offset = init->reserved_map_agpstart; DRM_DEBUG("%s\n", __FUNCTION__); - dev_priv->primary_size = ((init->primary_size + PAGE_SIZE - 1) / + dev_priv->primary_size = ((init->primary_size + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE; size_of_buf = dev_priv->primary_size / MGA_NUM_PRIM_BUFS; dev_priv->warp_ucode_size = init->warp_ucode_size; - dev_priv->prim_bufs = drm_alloc(sizeof(drm_mga_prim_buf_t *) * - (MGA_NUM_PRIM_BUFS + 1), + dev_priv->prim_bufs = drm_alloc(sizeof(drm_mga_prim_buf_t *) * + (MGA_NUM_PRIM_BUFS + 1), DRM_MEM_DRIVER); if(dev_priv->prim_bufs == NULL) { DRM_ERROR("Unable to allocate memory for prim_buf\n"); return -ENOMEM; } - memset(dev_priv->prim_bufs, + memset(dev_priv->prim_bufs, 0, sizeof(drm_mga_prim_buf_t *) * (MGA_NUM_PRIM_BUFS + 1)); - + temp = init->warp_ucode_size + dev_priv->primary_size; temp = ((temp + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE; - - dev_priv->ioremap = drm_ioremap(dev->agp->base + offset, + + dev_priv->ioremap = drm_ioremap(dev->agp->base + offset, temp); if(dev_priv->ioremap == NULL) { DRM_DEBUG("Ioremap failed\n"); return -ENOMEM; } init_waitqueue_head(&dev_priv->wait_queue); - + for(i = 0; i < MGA_NUM_PRIM_BUFS; i++) { - prim_buffer = drm_alloc(sizeof(drm_mga_prim_buf_t), + prim_buffer = drm_alloc(sizeof(drm_mga_prim_buf_t), DRM_MEM_DRIVER); if(prim_buffer == NULL) return -ENOMEM; memset(prim_buffer, 0, sizeof(drm_mga_prim_buf_t)); prim_buffer->phys_head = offset + dev->agp->base; - prim_buffer->current_dma_ptr = - prim_buffer->head = - (u32 *) (dev_priv->ioremap + - offset - + prim_buffer->current_dma_ptr = + prim_buffer->head = + (u32 *) (dev_priv->ioremap + + offset - init->reserved_map_agpstart); prim_buffer->num_dwords = 0; prim_buffer->max_dwords = size_of_buf / sizeof(u32); @@ -361,11 +348,11 @@ static int mga_init_primary_bufs(drm_device_t *dev, drm_mga_init_t *init) dev_priv->prim_bufs[i] = prim_buffer; } dev_priv->current_prim_idx = 0; - dev_priv->next_prim = - dev_priv->last_prim = + dev_priv->next_prim = + dev_priv->last_prim = dev_priv->current_prim = dev_priv->prim_bufs[0]; - dev_priv->next_prim_age = 2; + dev_priv->next_prim_age = 2; dev_priv->last_prim_age = 1; set_bit(MGA_BUF_IN_USE, &dev_priv->current_prim->buffer_status); return 0; @@ -384,7 +371,7 @@ void mga_fire_primary(drm_device_t *dev, drm_mga_prim_buf_t *prim) DRM_DEBUG("%s\n", __FUNCTION__); dev_priv->last_prim = prim; - + /* We never check for overflow, b/c there is always room */ PRIMPTR(prim); if(num_dwords <= 0) { @@ -399,17 +386,17 @@ void mga_fire_primary(drm_device_t *dev, drm_mga_prim_buf_t *prim) end = jiffies + (HZ*3); if(sarea_priv->dirty & MGA_DMA_FLUSH) { - DRM_DEBUG("Dma top flush\n"); + DRM_DEBUG("Dma top flush\n"); while((MGA_READ(MGAREG_STATUS) & 0x00030001) != 0x00020000) { if((signed)(end - jiffies) <= 0) { - DRM_ERROR("irqs: %d wanted %d\n", - atomic_read(&dev->total_irq), + DRM_ERROR("irqs: %d wanted %d\n", + atomic_read(&dev->total_irq), atomic_read(&dma->total_lost)); DRM_ERROR("lockup in fire primary " "(Dma Top Flush)\n"); goto out_prim_wait; } - + for (i = 0 ; i < 4096 ; i++) mga_delay(); } sarea_priv->dirty &= ~(MGA_DMA_FLUSH); @@ -417,29 +404,27 @@ void mga_fire_primary(drm_device_t *dev, drm_mga_prim_buf_t *prim) DRM_DEBUG("Status wait\n"); while((MGA_READ(MGAREG_STATUS) & 0x00020001) != 0x00020000) { if((signed)(end - jiffies) <= 0) { - DRM_ERROR("irqs: %d wanted %d\n", - atomic_read(&dev->total_irq), + DRM_ERROR("irqs: %d wanted %d\n", + atomic_read(&dev->total_irq), atomic_read(&dma->total_lost)); DRM_ERROR("lockup in fire primary " "(Status Wait)\n"); goto out_prim_wait; } - + for (i = 0 ; i < 4096 ; i++) mga_delay(); } } -#ifdef __i386__ mga_flush_write_combine(); -#endif atomic_inc(&dev_priv->pending_bufs); MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL); MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp); prim->num_dwords = 0; sarea_priv->last_enqueue = prim->prim_age; - + next_idx = prim->idx + 1; - if(next_idx >= MGA_NUM_PRIM_BUFS) + if(next_idx >= MGA_NUM_PRIM_BUFS) next_idx = 0; dev_priv->next_prim = dev_priv->prim_bufs[next_idx]; @@ -462,28 +447,28 @@ int mga_advance_primary(drm_device_t *dev) drm_device_dma_t *dma = dev->dma; int next_prim_idx; int ret = 0; - + /* This needs to reset the primary buffer if available, * we should collect stats on how many times it bites * it's tail */ DRM_DEBUG("%s\n", __FUNCTION__); - + next_prim_idx = dev_priv->current_prim_idx + 1; if(next_prim_idx >= MGA_NUM_PRIM_BUFS) next_prim_idx = 0; prim_buffer = dev_priv->prim_bufs[next_prim_idx]; set_bit(MGA_IN_WAIT, &dev_priv->dispatch_status); - + /* In use is cleared in interrupt handler */ - + if(test_and_set_bit(MGA_BUF_IN_USE, &prim_buffer->buffer_status)) { add_wait_queue(&dev_priv->wait_queue, &entry); current->state = TASK_INTERRUPTIBLE; for (;;) { mga_dma_schedule(dev, 0); - if(!test_and_set_bit(MGA_BUF_IN_USE, - &prim_buffer->buffer_status)) + if(!test_and_set_bit(MGA_BUF_IN_USE, + &prim_buffer->buffer_status)) break; atomic_inc(&dev->total_sleeps); atomic_inc(&dma->total_missed_sched); @@ -545,9 +530,9 @@ static inline int mga_decide_to_fire(drm_device_t *dev) atomic_inc(&dma->total_prio); return 1; } - + if(atomic_read(&dev_priv->pending_bufs) <= MGA_NUM_PRIM_BUFS - 1) { - if(test_bit(MGA_BUF_SWAP_PENDING, + if(test_bit(MGA_BUF_SWAP_PENDING, &dev_priv->next_prim->buffer_status)) { atomic_inc(&dma->total_dmas); return 1; @@ -583,16 +568,16 @@ int mga_dma_schedule(drm_device_t *dev, int locked) retval = -EBUSY; goto sch_out_wakeup; } - + DRM_DEBUG("%s\n", __FUNCTION__); - if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) || + if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) || test_bit(MGA_IN_WAIT, &dev_priv->dispatch_status) || test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status)) { locked = 1; } - - if (!locked && + + if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { atomic_inc(&dma->total_missed_lock); clear_bit(0, &dev->dma_flag); @@ -606,7 +591,7 @@ int mga_dma_schedule(drm_device_t *dev, int locked) /* Fire dma buffer */ if(mga_decide_to_fire(dev)) { DRM_DEBUG("idx :%d\n", dev_priv->next_prim->idx); - clear_bit(MGA_BUF_FORCE_FIRE, + clear_bit(MGA_BUF_FORCE_FIRE, &dev_priv->next_prim->buffer_status); if(dev_priv->current_prim == dev_priv->next_prim) { /* Schedule overflow for a later time */ @@ -620,7 +605,7 @@ int mga_dma_schedule(drm_device_t *dev, int locked) } else { DRM_DEBUG("I can't get the dispatch lock\n"); } - + if (!locked) { if (drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { @@ -642,8 +627,8 @@ sch_out_wakeup: DRM_DEBUG("Waking up buf queue\n"); wake_up_interruptible(&dev_priv->buf_queue); } else if (test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status)) { - DRM_DEBUG("Not waking buf_queue on %d %d\n", - atomic_read(&dev->total_irq), + DRM_DEBUG("Not waking buf_queue on %d %d\n", + atomic_read(&dev->total_irq), dev_priv->last_prim_age); } @@ -664,7 +649,7 @@ static void mga_dma_service(int irq, void *device, struct pt_regs *regs) last_prim_buffer = dev_priv->last_prim; last_prim_buffer->num_dwords = 0; last_prim_buffer->sec_used = 0; - dev_priv->sarea_priv->last_dispatch = + dev_priv->sarea_priv->last_dispatch = dev_priv->last_prim_age = last_prim_buffer->prim_age; clear_bit(MGA_BUF_IN_USE, &last_prim_buffer->buffer_status); wake_up_interruptible(&dev_priv->wait_queue); @@ -686,12 +671,12 @@ int mga_dma_cleanup(drm_device_t *dev) DRM_DEBUG("%s\n", __FUNCTION__); if(dev->dev_private) { - drm_mga_private_t *dev_priv = + drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; - + if(dev_priv->ioremap) { - int temp = (dev_priv->warp_ucode_size + - dev_priv->primary_size + + int temp = (dev_priv->warp_ucode_size + + dev_priv->primary_size + PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE; drm_ioremapfree((void *) dev_priv->ioremap, temp); @@ -712,7 +697,7 @@ int mga_dma_cleanup(drm_device_t *dev) } } drm_free(dev_priv->prim_bufs, sizeof(void *) * - (MGA_NUM_PRIM_BUFS + 1), + (MGA_NUM_PRIM_BUFS + 1), DRM_MEM_DRIVER); } if(dev_priv->head != NULL) { @@ -720,7 +705,7 @@ int mga_dma_cleanup(drm_device_t *dev) } - drm_free(dev->dev_private, sizeof(drm_mga_private_t), + drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); dev->dev_private = NULL; } @@ -747,12 +732,12 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) { DRM_DEBUG("reserved_map or buffer_map are invalid\n"); return -EINVAL; } - + dev_priv->reserved_map_idx = init->reserved_map_idx; dev_priv->buffer_map_idx = init->buffer_map_idx; sarea_map = dev->maplist[0]; - dev_priv->sarea_priv = (drm_mga_sarea_t *) - ((u8 *)sarea_map->handle + + dev_priv->sarea_priv = (drm_mga_sarea_t *) + ((u8 *)sarea_map->handle + init->sarea_priv_offset); /* Scale primary size to the next page */ @@ -773,18 +758,18 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) { dev_priv->vertexsize = 0; DRM_DEBUG("chipset: %d ucode_size: %d backOffset: %x depthOffset: %x\n", - dev_priv->chipset, dev_priv->warp_ucode_size, + dev_priv->chipset, dev_priv->warp_ucode_size, dev_priv->backOffset, dev_priv->depthOffset); DRM_DEBUG("cpp: %d sgram: %d stride: %d maccess: %x\n", - dev_priv->cpp, dev_priv->sgram, dev_priv->stride, + dev_priv->cpp, dev_priv->sgram, dev_priv->stride, dev_priv->mAccess); - - memcpy(&dev_priv->WarpIndex, &init->WarpIndex, + + memcpy(&dev_priv->WarpIndex, &init->WarpIndex, sizeof(drm_mga_warp_index_t) * MGA_MAX_WARP_PIPES); - for (i = 0 ; i < MGA_MAX_WARP_PIPES ; i++) + for (i = 0 ; i < MGA_MAX_WARP_PIPES ; i++) DRM_DEBUG("warp pipe %d: installed: %d phys: %lx size: %x\n", - i, + i, dev_priv->WarpIndex[i].installed, dev_priv->WarpIndex[i].phys_addr, dev_priv->WarpIndex[i].size); @@ -801,7 +786,7 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) { return -ENOMEM; } - dev_priv->status_page = + dev_priv->status_page = ioremap_nocache(virt_to_bus((void *)dev_priv->real_status_page), PAGE_SIZE); @@ -812,15 +797,15 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) { } /* Write status page when secend or softrap occurs */ - MGA_WRITE(MGAREG_PRIMPTR, + MGA_WRITE(MGAREG_PRIMPTR, virt_to_bus((void *)dev_priv->real_status_page) | 0x00000003); - + /* Private is now filled in, initialize the hardware */ { PRIMLOCALS; PRIMGETPTR( dev_priv ); - + PRIMOUTREG(MGAREG_DMAPAD, 0); PRIMOUTREG(MGAREG_DMAPAD, 0); PRIMOUTREG(MGAREG_DWGSYNC, 0x0100); @@ -828,15 +813,13 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) { /* Poll for the first buffer to insure that * the status register will be correct */ - -#ifdef __i386__ + mga_flush_write_combine(); -#endif MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL); - MGA_WRITE(MGAREG_PRIMEND, ((phys_head + num_dwords * 4) | + MGA_WRITE(MGAREG_PRIMEND, ((phys_head + num_dwords * 4) | PDEA_pagpxfer_enable)); - + while(MGA_READ(MGAREG_DWGSYNC) != 0x0100) ; } @@ -854,11 +837,12 @@ int mga_dma_init(struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_mga_init_t init; - + DRM_DEBUG("%s\n", __FUNCTION__); - copy_from_user_ret(&init, (drm_mga_init_t *)arg, sizeof(init), -EFAULT); - + if (copy_from_user(&init, (drm_mga_init_t *)arg, sizeof(init))) + return -EFAULT; + switch(init.func) { case MGA_INIT_DMA: return mga_dma_initialize(dev, &init); @@ -874,7 +858,7 @@ int mga_irq_install(drm_device_t *dev, int irq) int retcode; if (!irq) return -EINVAL; - + down(&dev->struct_sem); if (dev->irq) { up(&dev->struct_sem); @@ -882,7 +866,7 @@ int mga_irq_install(drm_device_t *dev, int irq) } dev->irq = irq; up(&dev->struct_sem); - + DRM_DEBUG("install irq handler %d\n", irq); dev->context_flag = 0; @@ -923,7 +907,7 @@ int mga_irq_uninstall(drm_device_t *dev) irq = dev->irq; dev->irq = 0; up(&dev->struct_sem); - + if (!irq) return -EINVAL; DRM_DEBUG("remove irq handler %d\n", irq); MGA_WRITE(MGAREG_ICLEAR, 0x00000001); @@ -938,8 +922,9 @@ int mga_control(struct inode *inode, struct file *filp, unsigned int cmd, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_control_t ctl; - - copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT); + + if (copy_from_user(&ctl, (drm_control_t *)arg, sizeof(ctl))) + return -EFAULT; DRM_DEBUG("%s\n", __FUNCTION__); @@ -964,21 +949,21 @@ static int mga_flush_queue(drm_device_t *dev) if(dev_priv == NULL) { return 0; } - + if(dev_priv->next_prim->num_dwords != 0) { current->state = TASK_INTERRUPTIBLE; add_wait_queue(&dev_priv->flush_queue, &entry); set_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status); mga_dma_schedule(dev, 0); for (;;) { - if (!test_bit(MGA_IN_FLUSH, - &dev_priv->dispatch_status)) + if (!test_bit(MGA_IN_FLUSH, + &dev_priv->dispatch_status)) break; atomic_inc(&dev->total_sleeps); schedule(); if (signal_pending(current)) { ret = -EINTR; /* Can't restart */ - clear_bit(MGA_IN_FLUSH, + clear_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status); break; } @@ -1006,11 +991,11 @@ void mga_reclaim_buffers(drm_device_t *dev, pid_t pid) drm_buf_t *buf = dma->buflist[ i ]; drm_mga_buf_priv_t *buf_priv = buf->dev_private; - /* Only buffers that need to get reclaimed ever - * get set to free + /* Only buffers that need to get reclaimed ever + * get set to free */ if (buf->pid == pid && buf_priv) { - if(buf_priv->my_freelist->age == MGA_BUF_USED) + if(buf_priv->my_freelist->age == MGA_BUF_USED) buf_priv->my_freelist->age = MGA_BUF_FREE; } } @@ -1026,14 +1011,15 @@ int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd, drm_lock_t lock; DRM_DEBUG("%s\n", __FUNCTION__); - copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; if (lock.context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", current->pid, lock.context); return -EINVAL; } - + DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", lock.context, current->pid, dev->lock.hw_lock->lock, lock.flags); @@ -1041,7 +1027,7 @@ int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd, if (lock.context < 0) { return -EINVAL; } - + /* Only one queue: */ @@ -1060,7 +1046,7 @@ int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd, atomic_inc(&dev->total_locks); break; /* Got lock */ } - + /* Contention */ atomic_inc(&dev->total_sleeps); current->state = TASK_INTERRUPTIBLE; @@ -1073,7 +1059,7 @@ int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd, current->state = TASK_RUNNING; remove_wait_queue(&dev->lock.lock_queue, &entry); } - + if (!ret) { sigemptyset(&dev->sigmask); sigaddset(&dev->sigmask, SIGSTOP); @@ -1090,12 +1076,12 @@ int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd, mga_dma_quiescent(dev); } } - + DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); return ret; } - -int mga_flush_ioctl(struct inode *inode, struct file *filp, + +int mga_flush_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; @@ -1104,7 +1090,8 @@ int mga_flush_ioctl(struct inode *inode, struct file *filp, drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); - copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("mga_flush_ioctl called without lock held\n"); diff --git a/linux/mga_drv.c b/linux/mga_drv.c index e7547c06..c49cef58 100644 --- a/linux/mga_drv.c +++ b/linux/mga_drv.c @@ -1,6 +1,6 @@ /* mga_drv.c -- Matrox g200/g400 driver -*- linux-c -*- * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com - * + * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. @@ -11,11 +11,11 @@ * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -36,7 +36,7 @@ #define MGA_NAME "mga" #define MGA_DESC "Matrox g200/g400" -#define MGA_DATE "20000831" +#define MGA_DATE "20000906" #define MGA_MAJOR 2 #define MGA_MINOR 0 #define MGA_PATCHLEVEL 0 @@ -144,7 +144,7 @@ __setup("mga=", mga_options); static int mga_setup(drm_device_t *dev) { int i; - + atomic_set(&dev->ioctl_count, 0); atomic_set(&dev->vma_count, 0); dev->buf_use = 0; @@ -187,22 +187,22 @@ static int mga_setup(drm_device_t *dev) dev->ctx_start = 0; dev->lck_start = 0; - + dev->buf_rp = dev->buf; dev->buf_wp = dev->buf; dev->buf_end = dev->buf + DRM_BSZ; dev->buf_async = NULL; init_waitqueue_head(&dev->buf_readers); init_waitqueue_head(&dev->buf_writers); - + DRM_DEBUG("\n"); - + /* The kernel's context could be created here, but is now created in drm_dma_enqueue. This is more resource-efficient for hardware that does not do DMA, but may mean that drm_select_queue fails between the time the interrupt is initialized and the time the queues are initialized. */ - + return 0; } @@ -217,15 +217,15 @@ static int mga_takedown(drm_device_t *dev) DRM_DEBUG("\n"); if (dev->irq) mga_irq_uninstall(dev); - + down(&dev->struct_sem); del_timer(&dev->timer); - + if (dev->devname) { drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER); dev->devname = NULL; } - + if (dev->unique) { drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER); dev->unique = NULL; @@ -243,7 +243,7 @@ static int mga_takedown(drm_device_t *dev) if (dev->agp) { drm_agp_mem_t *entry; drm_agp_mem_t *nexte; - + /* Remove AGP resources, but leave dev->agp intact until cleanup is called. */ for (entry = dev->agp->memory; entry; entry = nexte) { @@ -253,10 +253,10 @@ static int mga_takedown(drm_device_t *dev) drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); } dev->agp->memory = NULL; - + if (dev->agp->acquired && drm_agp.release) (*drm_agp.release)(); - + dev->agp->acquired = 0; dev->agp->enabled = 0; } @@ -268,7 +268,7 @@ static int mga_takedown(drm_device_t *dev) } dev->vmalist = NULL; } - + /* Clear map area and mtrr information */ if (dev->maplist) { for (i = 0; i < dev->map_count; i++) { @@ -304,7 +304,7 @@ static int mga_takedown(drm_device_t *dev) dev->maplist = NULL; dev->map_count = 0; } - + if (dev->queuelist) { for (i = 0; i < dev->queue_count; i++) { drm_waitlist_destroy(&dev->queuelist[i]->waitlist); @@ -330,7 +330,7 @@ static int mga_takedown(drm_device_t *dev) wake_up_interruptible(&dev->lock.lock_queue); } up(&dev->struct_sem); - + return 0; } @@ -347,7 +347,7 @@ static int mga_init(void) memset((void *)dev, 0, sizeof(*dev)); dev->count_lock = SPIN_LOCK_UNLOCKED; sema_init(&dev->struct_sem, 1); - + #ifdef MODULE drm_parse_options(mga); #endif @@ -407,7 +407,7 @@ static void mga_cleanup(void) drm_device_t *dev = &mga_device; DRM_DEBUG("\n"); - + drm_proc_cleanup(); if (misc_deregister(&mga_misc)) { DRM_ERROR("Cannot unload module\n"); @@ -419,7 +419,7 @@ static void mga_cleanup(void) #ifdef CONFIG_MTRR if(dev->agp && dev->agp->agp_mtrr) { int retval; - retval = mtrr_del(dev->agp->agp_mtrr, + retval = mtrr_del(dev->agp->agp_mtrr, dev->agp->agp_info.aper_base, dev->agp->agp_info.aper_size * 1024*1024); DRM_DEBUG("mtrr_del = %d\n", retval); @@ -444,17 +444,18 @@ int mga_version(struct inode *inode, struct file *filp, unsigned int cmd, drm_version_t version; int len; - copy_from_user_ret(&version, + if (copy_from_user(&version, (drm_version_t *)arg, - sizeof(version), - -EFAULT); + sizeof(version))) + return -EFAULT; #define DRM_COPY(name,value) \ len = strlen(value); \ if (len > name##_len) len = name##_len; \ name##_len = strlen(value); \ if (len && name) { \ - copy_to_user_ret(name, value, len, -EFAULT); \ + if (copy_to_user(name, value, len)) \ + return -EFAULT; \ } version.version_major = MGA_MAJOR; @@ -465,10 +466,10 @@ int mga_version(struct inode *inode, struct file *filp, unsigned int cmd, DRM_COPY(version.date, MGA_DATE); DRM_COPY(version.desc, MGA_DESC); - copy_to_user_ret((drm_version_t *)arg, + if (copy_to_user((drm_version_t *)arg, &version, - sizeof(version), - -EFAULT); + sizeof(version))) + return -EFAULT; return 0; } @@ -476,7 +477,7 @@ int mga_open(struct inode *inode, struct file *filp) { drm_device_t *dev = &mga_device; int retcode = 0; - + DRM_DEBUG("open_count = %d\n", dev->open_count); if (!(retcode = drm_open_helper(inode, filp, dev))) { #if LINUX_VERSION_CODE < 0x020333 @@ -513,7 +514,7 @@ int mga_release(struct inode *inode, struct file *filp) drm_lock_free(dev, &dev->lock.hw_lock->lock, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); - + /* FIXME: may require heavy-handed reset of hardware at this point, possibly processed via a callback to the X @@ -534,7 +535,7 @@ int mga_release(struct inode *inode, struct file *filp) dev->lock.lock_time = jiffies; atomic_inc(&dev->total_locks); break; /* Got lock */ - } + } /* Contention */ atomic_inc(&dev->total_sleeps); current->state = TASK_INTERRUPTIBLE; @@ -560,7 +561,7 @@ int mga_release(struct inode *inode, struct file *filp) if (priv->next) priv->next->prev = priv->prev; else dev->file_last = priv->prev; up(&dev->struct_sem); - + drm_free(priv, sizeof(*priv), DRM_MEM_FILES); #if LINUX_VERSION_CODE < 0x020333 MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */ @@ -601,7 +602,7 @@ int mga_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, atomic_inc(&dev->ioctl_count); atomic_inc(&dev->total_ioctl); ++priv->ioctl_count; - + DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n", current->pid, cmd, nr, dev->device, priv->authenticated); @@ -621,7 +622,7 @@ int mga_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, retcode = (func)(inode, filp, cmd, arg); } } - + atomic_dec(&dev->ioctl_count); return retcode; } @@ -633,8 +634,9 @@ int mga_unlock(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_lock_t lock; - copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); - + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; + if (lock.context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", current->pid, lock.context); diff --git a/linux/mga_drv.h b/linux/mga_drv.h index 0a6a13b6..1360cf63 100644 --- a/linux/mga_drv.h +++ b/linux/mga_drv.h @@ -11,11 +11,11 @@ * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -98,7 +98,7 @@ typedef struct _drm_mga_private { wait_queue_head_t wait_queue; /* Processes waiting until interrupt */ wait_queue_head_t buf_queue; /* Processes waiting for a free buf */ /* Some validated register values: - */ + */ u32 mAccess; } drm_mga_private_t; @@ -129,7 +129,6 @@ extern int mga_dma_init(struct inode *inode, struct file *filp, extern int mga_dma_cleanup(drm_device_t *dev); extern int mga_flush_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern void mga_flush_write_combine(void); extern unsigned int mga_create_sync_tag(drm_device_t *dev); extern drm_buf_t *mga_freelist_get(drm_device_t *dev); extern int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf); @@ -138,9 +137,9 @@ extern void mga_reclaim_buffers(drm_device_t *dev, pid_t pid); /* mga_bufs.c */ -extern int mga_addbufs(struct inode *inode, struct file *filp, +extern int mga_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int mga_infobufs(struct inode *inode, struct file *filp, +extern int mga_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int mga_markbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); @@ -180,6 +179,7 @@ extern int mga_rmctx(struct inode *inode, struct file *filp, extern int mga_context_switch(drm_device_t *dev, int old, int new); extern int mga_context_switch_complete(drm_device_t *dev, int new); +#define mga_flush_write_combine() mb() typedef enum { TT_GENERAL, @@ -202,7 +202,7 @@ typedef struct { #define ISREG0(r) (r >= DWGREG0 && r <= DWGREG0_END) #define ADRINDEX0(r) (u8)((r - DWGREG0) >> 2) #define ADRINDEX1(r) (u8)(((r - DWGREG1) >> 2) | 0x80) -#define ADRINDEX(r) (ISREG0(r) ? ADRINDEX0(r) : ADRINDEX1(r)) +#define ADRINDEX(r) (ISREG0(r) ? ADRINDEX0(r) : ADRINDEX1(r)) #define MGA_VERBOSE 0 #define MGA_NUM_PRIM_BUFS 8 @@ -396,8 +396,8 @@ drm_mga_prim_buf_t *tmp_buf = \ #define DC_atype_zi 0x30 #define DC_atype_blk 0x40 #define DC_atype_i 0x70 -#define DC_linear_xy 0x0 -#define DC_linear_linear 0x80 +#define DC_linear_xy 0x0 +#define DC_linear_linear 0x80 #define DC_zmode_nozcmp 0x0 #define DC_zmode_ze 0x200 #define DC_zmode_zne 0x300 @@ -405,16 +405,16 @@ drm_mga_prim_buf_t *tmp_buf = \ #define DC_zmode_zlte 0x500 #define DC_zmode_zgt 0x600 #define DC_zmode_zgte 0x700 -#define DC_solid_disable 0x0 -#define DC_solid_enable 0x800 -#define DC_arzero_disable 0x0 -#define DC_arzero_enable 0x1000 -#define DC_sgnzero_disable 0x0 -#define DC_sgnzero_enable 0x2000 -#define DC_shftzero_disable 0x0 -#define DC_shftzero_enable 0x4000 -#define DC_bop_SHIFT 16 -#define DC_trans_SHIFT 20 +#define DC_solid_disable 0x0 +#define DC_solid_enable 0x800 +#define DC_arzero_disable 0x0 +#define DC_arzero_enable 0x1000 +#define DC_sgnzero_disable 0x0 +#define DC_sgnzero_enable 0x2000 +#define DC_shftzero_disable 0x0 +#define DC_shftzero_enable 0x4000 +#define DC_bop_SHIFT 16 +#define DC_trans_SHIFT 20 #define DC_bltmod_bmonolef 0x0 #define DC_bltmod_bmonowf 0x8000000 #define DC_bltmod_bplan 0x2000000 @@ -423,21 +423,21 @@ drm_mga_prim_buf_t *tmp_buf = \ #define DC_bltmod_bu32rgb 0xe000000 #define DC_bltmod_bu24bgr 0x16000000 #define DC_bltmod_bu24rgb 0x1e000000 -#define DC_pattern_disable 0x0 -#define DC_pattern_enable 0x20000000 -#define DC_transc_disable 0x0 -#define DC_transc_enable 0x40000000 -#define DC_clipdis_disable 0x0 -#define DC_clipdis_enable 0x80000000 +#define DC_pattern_disable 0x0 +#define DC_pattern_enable 0x20000000 +#define DC_transc_disable 0x0 +#define DC_transc_enable 0x40000000 +#define DC_clipdis_disable 0x0 +#define DC_clipdis_enable 0x80000000 -#define SETADD_mode_vertlist 0x0 +#define SETADD_mode_vertlist 0x0 #define MGA_CLEAR_CMD (DC_opcod_trap | DC_arzero_enable | \ DC_sgnzero_enable | DC_shftzero_enable | \ (0xC << DC_bop_SHIFT) | DC_clipdis_enable | \ DC_solid_enable | DC_transc_enable) - + #define MGA_COPY_CMD (DC_opcod_bitblt | DC_atype_rpl | DC_linear_xy | \ DC_solid_disable | DC_arzero_disable | \ diff --git a/linux/mga_state.c b/linux/mga_state.c index c8963964..0c2f5729 100644 --- a/linux/mga_state.c +++ b/linux/mga_state.c @@ -11,11 +11,11 @@ * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -38,13 +38,13 @@ * change these values */ -#define MGAEMITCLIP_SIZE 10 -#define MGAEMITCTX_SIZE 20 -#define MGAG200EMITTEX_SIZE 20 -#define MGAG400EMITTEX0_SIZE 30 -#define MGAG400EMITTEX1_SIZE 25 -#define MGAG400EMITPIPE_SIZE 55 -#define MGAG200EMITPIPE_SIZE 15 +#define MGAEMITCLIP_SIZE 10 +#define MGAEMITCTX_SIZE 20 +#define MGAG200EMITTEX_SIZE 20 +#define MGAG400EMITTEX0_SIZE 30 +#define MGAG400EMITTEX1_SIZE 25 +#define MGAG400EMITPIPE_SIZE 55 +#define MGAG200EMITPIPE_SIZE 15 #define MAX_STATE_SIZE ((MGAEMITCLIP_SIZE * MGA_NR_SAREA_CLIPRECTS) + \ MGAEMITCTX_SIZE + MGAG400EMITTEX0_SIZE + \ @@ -107,7 +107,7 @@ static void mgaEmitContext(drm_mga_private_t * dev_priv) PRIMOUTREG(MGAREG_TDUALSTAGE0, regs[MGA_CTXREG_TDUAL0]); PRIMOUTREG(MGAREG_TDUALSTAGE1, regs[MGA_CTXREG_TDUAL1]); PRIMOUTREG(MGAREG_FCOL, regs[MGA_CTXREG_FCOL]); - + PRIMOUTREG(MGAREG_STENCIL, regs[MGA_CTXREG_STENCIL]); PRIMOUTREG(MGAREG_STENCILCTL, regs[MGA_CTXREG_STENCILCTL]); PRIMOUTREG(MGAREG_DMAPAD, 0); @@ -156,7 +156,7 @@ static void mgaG200EmitTex(drm_mga_private_t * dev_priv) PRIMADVANCE(dev_priv); } -#define TMC_dualtex_enable 0x80 +#define TMC_dualtex_enable 0x80 static void mgaG400EmitTex0(drm_mga_private_t * dev_priv) { @@ -170,9 +170,7 @@ static void mgaG400EmitTex0(drm_mga_private_t * dev_priv) /* This takes a max of 30 dwords */ - PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] - | 0x00008000 - ); + PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | 0x00008000); PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL]); PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER]); PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL]); @@ -220,8 +218,8 @@ static void mgaG400EmitTex1(drm_mga_private_t * dev_priv, int source ) /* This takes 25 dwords */ - PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | TMC_map1_enable | - 0x00008000); + PRIMOUTREG(MGAREG_TEXCTL2, + regs[MGA_TEXREG_CTL2] | TMC_map1_enable | 0x00008000); PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL]); PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER]); PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL]); @@ -254,7 +252,7 @@ static void mgaG400EmitPipe(drm_mga_private_t * dev_priv) drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int pipe = sarea_priv->WarpPipe; float fParam = 12800.0f; - int multitex = (sarea_priv->TexState[0][MGA_TEXREG_CTL2] & + int multitex = (sarea_priv->TexState[0][MGA_TEXREG_CTL2] & TMC_dualtex_enable); PRIMLOCALS; DRM_DEBUG("%s\n", __FUNCTION__); @@ -263,7 +261,7 @@ static void mgaG400EmitPipe(drm_mga_private_t * dev_priv) /* This takes 50 dwords */ - /* Establish vertex size. + /* Establish vertex size. */ PRIMOUTREG(MGAREG_WIADDR2, WIA_wmode_suspend); PRIMOUTREG(MGAREG_DMAPAD, 0); @@ -280,11 +278,9 @@ static void mgaG400EmitPipe(drm_mga_private_t * dev_priv) PRIMOUTREG(MGAREG_DMAPAD, 0); PRIMOUTREG(MGAREG_DWGSYNC, 0x7000); PRIMOUTREG(MGAREG_DMAPAD, 0); - + if (multitex) { - PRIMOUTREG(MGAREG_TEXCTL2, 0 - | 0x00008000 - ); + PRIMOUTREG(MGAREG_TEXCTL2, 0 | 0x00008000); PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0); PRIMOUTREG(MGAREG_TEXCTL2, 0x80 | 0x00008000); PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0); @@ -399,10 +395,10 @@ static void mgaEmitState(drm_mga_private_t * dev_priv) DRM_DEBUG("%s\n", __FUNCTION__); if (dev_priv->chipset == MGA_CARD_TYPE_G400) { - int multitex = (sarea_priv->TexState[0][MGA_TEXREG_CTL2] & + int multitex = (sarea_priv->TexState[0][MGA_TEXREG_CTL2] & TMC_dualtex_enable); - dirty = ~0; + dirty = ~0; if (dirty & MGA_UPLOAD_PIPE /* && (sarea_priv->WarpPipe != dev_priv->WarpPipe || */ @@ -600,7 +596,7 @@ static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf) if (buf->used) { /* WARNING: if you change any of the state functions verify - * these numbers (Overestimating this doesn't hurt). + * these numbers (Overestimating this doesn't hurt). */ buf_priv->dispatched = 1; PRIM_OVERFLOW(dev, dev_priv, @@ -665,7 +661,7 @@ static void mga_dma_dispatch_indices(drm_device_t * dev, if (start != end) { /* WARNING: if you change any of the state functions verify - * these numbers (Overestimating this doesn't hurt). + * these numbers (Overestimating this doesn't hurt). */ buf_priv->dispatched = 1; PRIM_OVERFLOW(dev, dev_priv, @@ -856,8 +852,8 @@ int mga_clear_bufs(struct inode *inode, struct file *filp, drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_clear_t clear; - copy_from_user_ret(&clear, (drm_mga_clear_t *) arg, sizeof(clear), - -EFAULT); + if (copy_from_user(&clear, (drm_mga_clear_t *) arg, sizeof(clear))) + return -EFAULT; DRM_DEBUG("%s\n", __FUNCTION__); if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { @@ -872,14 +868,12 @@ int mga_clear_bufs(struct inode *inode, struct file *filp, */ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CTX; mga_dma_dispatch_clear(dev, clear.flags, - clear.clear_color, + clear.clear_color, clear.clear_depth, clear.clear_color_mask, clear.clear_depth_mask); PRIMUPDATE(dev_priv); -#ifdef __i386__ mga_flush_write_combine(); -#endif mga_dma_schedule(dev, 1); return 0; } @@ -909,9 +903,7 @@ int mga_swap_bufs(struct inode *inode, struct file *filp, PRIMUPDATE(dev_priv); set_bit(MGA_BUF_SWAP_PENDING, &dev_priv->current_prim->buffer_status); -#ifdef __i386__ mga_flush_write_combine(); -#endif mga_dma_schedule(dev, 1); return 0; } @@ -932,8 +924,8 @@ int mga_iload(struct inode *inode, struct file *filp, DRM_DEBUG("%s\n", __FUNCTION__); DRM_DEBUG("Starting Iload\n"); - copy_from_user_ret(&iload, (drm_mga_iload_t *) arg, sizeof(iload), - -EFAULT); + if (copy_from_user(&iload, (drm_mga_iload_t *) arg, sizeof(iload))) + return -EFAULT; if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("mga_iload called without lock held\n"); @@ -959,9 +951,7 @@ int mga_iload(struct inode *inode, struct file *filp, AGEBUF(dev_priv, buf_priv); buf_priv->discard = 1; mga_freelist_put(dev, buf); -#ifdef __i386__ mga_flush_write_combine(); -#endif mga_dma_schedule(dev, 1); return 0; } @@ -979,8 +969,8 @@ int mga_vertex(struct inode *inode, struct file *filp, drm_mga_vertex_t vertex; DRM_DEBUG("%s\n", __FUNCTION__); - copy_from_user_ret(&vertex, (drm_mga_vertex_t *) arg, - sizeof(vertex), -EFAULT); + if (copy_from_user(&vertex, (drm_mga_vertex_t *) arg, sizeof(vertex))) + return -EFAULT; if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("mga_vertex called without lock held\n"); @@ -1009,9 +999,7 @@ int mga_vertex(struct inode *inode, struct file *filp, mga_dma_dispatch_vertex(dev, buf); PRIMUPDATE(dev_priv); -#ifdef __i386__ mga_flush_write_combine(); -#endif mga_dma_schedule(dev, 1); return 0; } @@ -1030,8 +1018,8 @@ int mga_indices(struct inode *inode, struct file *filp, drm_mga_indices_t indices; DRM_DEBUG("%s\n", __FUNCTION__); - copy_from_user_ret(&indices, (drm_mga_indices_t *) arg, - sizeof(indices), -EFAULT); + if (copy_from_user(&indices, (drm_mga_indices_t *) arg, sizeof(indices))) + return -EFAULT; if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("mga_indices called without lock held\n"); @@ -1058,9 +1046,7 @@ int mga_indices(struct inode *inode, struct file *filp, mga_dma_dispatch_indices(dev, buf, indices.start, indices.end); PRIMUPDATE(dev_priv); -#ifdef __i386__ mga_flush_write_combine(); -#endif mga_dma_schedule(dev, 1); return 0; } @@ -1078,10 +1064,12 @@ static int mga_dma_get_buffers(drm_device_t * dev, drm_dma_t * d) if (!buf) break; buf->pid = current->pid; - copy_to_user_ret(&d->request_indices[i], - &buf->idx, sizeof(buf->idx), -EFAULT); - copy_to_user_ret(&d->request_sizes[i], - &buf->total, sizeof(buf->total), -EFAULT); + if (copy_to_user(&d->request_indices[i], + &buf->idx, sizeof(buf->idx))) + return -EFAULT; + if (copy_to_user(&d->request_sizes[i], + &buf->total, sizeof(buf->total))) + return -EFAULT; ++d->granted_count; } return 0; @@ -1097,7 +1085,8 @@ int mga_dma(struct inode *inode, struct file *filp, unsigned int cmd, drm_dma_t d; DRM_DEBUG("%s\n", __FUNCTION__); - copy_from_user_ret(&d, (drm_dma_t *) arg, sizeof(d), -EFAULT); + if (copy_from_user(&d, (drm_dma_t *) arg, sizeof(d))) + return -EFAULT; DRM_DEBUG("%d %d: %d send, %d req\n", current->pid, d.context, d.send_count, d.request_count); @@ -1132,6 +1121,7 @@ int mga_dma(struct inode *inode, struct file *filp, unsigned int cmd, DRM_DEBUG("%d returning, granted = %d\n", current->pid, d.granted_count); - copy_to_user_ret((drm_dma_t *) arg, &d, sizeof(d), -EFAULT); + if (copy_to_user((drm_dma_t *) arg, &d, sizeof(d))) + return -EFAULT; return retcode; } diff --git a/linux/r128_bufs.c b/linux/r128_bufs.c index bd81dcdc..7e76441e 100644 --- a/linux/r128_bufs.c +++ b/linux/r128_bufs.c @@ -60,10 +60,10 @@ int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd, if (!dma) return -EINVAL; - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; count = request.count; order = drm_order(request.size); @@ -173,10 +173,10 @@ int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd, request.count = entry->buf_count; request.size = size; - copy_to_user_ret((drm_buf_desc_t *)arg, + if (copy_to_user((drm_buf_desc_t *)arg, &request, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; dma->flags = _DRM_DMA_USE_AGP; @@ -195,10 +195,10 @@ int r128_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, if (!dev_priv || dev_priv->is_pci) return -EINVAL; - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_desc_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) if (request.flags & _DRM_AGP_BUFFER) @@ -234,10 +234,10 @@ int r128_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, ++dev->buf_use; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); - copy_from_user_ret(&request, + if (copy_from_user(&request, (drm_buf_map_t *)arg, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; if (request.count >= dma->buf_count) { if (dma->flags & _DRM_DMA_USE_AGP) { @@ -300,10 +300,10 @@ int r128_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, request.count = dma->buf_count; DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); - copy_to_user_ret((drm_buf_map_t *)arg, + if (copy_to_user((drm_buf_map_t *)arg, &request, - sizeof(request), - -EFAULT); + sizeof(request))) + return -EFAULT; return retcode; } diff --git a/linux/r128_context.c b/linux/r128_context.c index 2dd716d4..9cadadba 100644 --- a/linux/r128_context.c +++ b/linux/r128_context.c @@ -103,19 +103,21 @@ int r128_resctx(struct inode *inode, struct file *filp, unsigned int cmd, int i; DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS); - copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT); + if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res))) + return -EFAULT; if (res.count >= DRM_RESERVED_CONTEXTS) { memset(&ctx, 0, sizeof(ctx)); for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ctx.handle = i; - copy_to_user_ret(&res.contexts[i], + if (copy_to_user(&res.contexts[i], &i, - sizeof(i), - -EFAULT); + sizeof(i))) + return -EFAULT; } } res.count = DRM_RESERVED_CONTEXTS; - copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT); + if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res))) + return -EFAULT; return 0; } @@ -127,7 +129,8 @@ int r128_addctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; if ((ctx.handle = r128_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) { /* Skip kernel's context and get a new one. */ ctx.handle = r128_alloc_queue(dev); @@ -139,7 +142,8 @@ int r128_addctx(struct inode *inode, struct file *filp, unsigned int cmd, return -ENOMEM; } - copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT); + if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx))) + return -EFAULT; return 0; } @@ -148,7 +152,8 @@ int r128_modctx(struct inode *inode, struct file *filp, unsigned int cmd, { drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx))) + return -EFAULT; if (ctx.flags==_DRM_CONTEXT_PRESERVED) r128_res_ctx.handle=ctx.handle; return 0; @@ -159,10 +164,12 @@ int r128_getctx(struct inode *inode, struct file *filp, unsigned int cmd, { drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx))) + return -EFAULT; /* This is 0, because we don't hanlde any context flags */ ctx.flags = 0; - copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT); + if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx))) + return -EFAULT; return 0; } @@ -173,7 +180,8 @@ int r128_switchctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); return r128_context_switch(dev, dev->last_context, ctx.handle); } @@ -185,7 +193,8 @@ int r128_newctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); r128_context_switch_complete(dev, ctx.handle); @@ -199,7 +208,8 @@ int r128_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); drm_ctxbitmap_free(dev, ctx.handle); diff --git a/linux/r128_dma.c b/linux/r128_dma.c index d4b75bed..bcba6782 100644 --- a/linux/r128_dma.c +++ b/linux/r128_dma.c @@ -68,26 +68,8 @@ int R128_READ_PLL(drm_device_t *dev, int addr) return R128_READ(R128_CLOCK_CNTL_DATA); } -#ifdef __i386__ -static void r128_flush_write_combine(void) -{ - int xchangeDummy; - - __asm__ volatile("push %%eax ;" - "xchg %%eax, %0 ;" - "pop %%eax" : : "m" (xchangeDummy)); - __asm__ volatile("push %%eax ;" - "push %%ebx ;" - "push %%ecx ;" - "push %%edx ;" - "movl $0,%%eax ;" - "cpuid ;" - "pop %%edx ;" - "pop %%ecx ;" - "pop %%ebx ;" - "pop %%eax" : /* no outputs */ : /* no inputs */ ); -} -#endif +#define r128_flush_write_combine() mb() + static void r128_status(drm_device_t *dev) { @@ -213,8 +195,8 @@ int r128_init_cce(struct inode *inode, struct file *filp, drm_device_t *dev = priv->dev; drm_r128_init_t init; - copy_from_user_ret(&init, (drm_r128_init_t *)arg, sizeof(init), - -EFAULT); + if (copy_from_user(&init, (drm_r128_init_t *)arg, sizeof(init))) + return -EFAULT; switch (init.func) { case R128_INIT_CCE: @@ -498,10 +480,8 @@ static int r128_submit_packets_ring_secure(drm_device_t *dev, dev_priv->ring_start, write * sizeof(u32)); -#ifdef __i386__ /* Make sure WC cache has been flushed */ r128_flush_write_combine(); -#endif dev_priv->sarea_priv->ring_write = write; R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write); @@ -603,10 +583,8 @@ static int r128_submit_packets_ring(drm_device_t *dev, dev_priv->ring_start, write * sizeof(u32)); -#ifdef __i386__ /* Make sure WC cache has been flushed */ r128_flush_write_combine(); -#endif dev_priv->sarea_priv->ring_write = write; R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write); @@ -686,8 +664,8 @@ int r128_submit_pkt(struct inode *inode, struct file *filp, return -EINVAL; } - copy_from_user_ret(&packet, (drm_r128_packet_t *)arg, sizeof(packet), - -EFAULT); + if (copy_from_user(&packet, (drm_r128_packet_t *)arg, sizeof(packet))) + return -EFAULT; c = packet.count; size = c * sizeof(*buffer); @@ -702,7 +680,8 @@ int r128_submit_pkt(struct inode *inode, struct file *filp, } if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM; - copy_from_user_ret(buffer, packet.buffer, size, -EFAULT); + if (copy_from_user(buffer, packet.buffer, size)) + return -EFAULT; if (dev_priv->cce_secure) ret = r128_submit_packets_ring_secure(dev, buffer, &c); @@ -712,7 +691,8 @@ int r128_submit_pkt(struct inode *inode, struct file *filp, c += left; } else { if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM; - copy_from_user_ret(buffer, packet.buffer, size, -EFAULT); + if (copy_from_user(buffer, packet.buffer, size)) + return -EFAULT; if (dev_priv->cce_secure) ret = r128_submit_packets_pio_secure(dev, buffer, &c); @@ -723,8 +703,8 @@ int r128_submit_pkt(struct inode *inode, struct file *filp, kfree(buffer); packet.count = c; - copy_to_user_ret((drm_r128_packet_t *)arg, &packet, sizeof(packet), - -EFAULT); + if (copy_to_user((drm_r128_packet_t *)arg, &packet, sizeof(packet))) + return -EFAULT; if (ret) return ret; else if (c > 0) return -EAGAIN; @@ -772,10 +752,8 @@ static int r128_send_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v) r128_mark_vertbufs_done(dev); } -#ifdef __i386__ /* Make sure WC cache has been flushed (if in PIO mode) */ if (!dev_priv->cce_is_bm_mode) r128_flush_write_combine(); -#endif /* FIXME: Add support for sending vertex buffer to the CCE here instead of in client code. The v->prim holds the primitive @@ -863,14 +841,13 @@ static int r128_get_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v) buf = r128_freelist_get(dev); if (!buf) break; buf->pid = current->pid; - copy_to_user_ret(&v->request_indices[i], + if (copy_to_user(&v->request_indices[i], &buf->idx, - sizeof(buf->idx), - -EFAULT); - copy_to_user_ret(&v->request_sizes[i], + sizeof(buf->idx)) || + copy_to_user(&v->request_sizes[i], &buf->total, - sizeof(buf->total), - -EFAULT); + sizeof(buf->total))) + return -EFAULT; ++v->granted_count; } return 0; @@ -897,7 +874,8 @@ int r128_vertex_buf(struct inode *inode, struct file *filp, unsigned int cmd, return -EINVAL; } - copy_from_user_ret(&v, (drm_r128_vertex_t *)arg, sizeof(v), -EFAULT); + if (copy_from_user(&v, (drm_r128_vertex_t *)arg, sizeof(v))) + return -EFAULT; DRM_DEBUG("%d: %d send, %d req\n", current->pid, v.send_count, v.request_count); @@ -924,7 +902,8 @@ int r128_vertex_buf(struct inode *inode, struct file *filp, unsigned int cmd, DRM_DEBUG("%d returning, granted = %d\n", current->pid, v.granted_count); - copy_to_user_ret((drm_r128_vertex_t *)arg, &v, sizeof(v), -EFAULT); + if (copy_to_user((drm_r128_vertex_t *)arg, &v, sizeof(v))) + return -EFAULT; return retcode; } diff --git a/linux/r128_drv.c b/linux/r128_drv.c index bb24b13e..9a9b2d16 100644 --- a/linux/r128_drv.c +++ b/linux/r128_drv.c @@ -35,7 +35,7 @@ #define R128_NAME "r128" #define R128_DESC "ATI Rage 128" -#define R128_DATE "20000719" +#define R128_DATE "20000906" #define R128_MAJOR 1 #define R128_MINOR 0 #define R128_PATCHLEVEL 0 @@ -420,17 +420,18 @@ int r128_version(struct inode *inode, struct file *filp, unsigned int cmd, drm_version_t version; int len; - copy_from_user_ret(&version, + if (copy_from_user(&version, (drm_version_t *)arg, - sizeof(version), - -EFAULT); + sizeof(version))) + return -EFAULT; #define DRM_COPY(name,value) \ len = strlen(value); \ if (len > name##_len) len = name##_len; \ name##_len = strlen(value); \ if (len && name) { \ - copy_to_user_ret(name, value, len, -EFAULT); \ + if (copy_to_user(name, value, len)) \ + return -EFAULT; \ } version.version_major = R128_MAJOR; @@ -441,10 +442,10 @@ int r128_version(struct inode *inode, struct file *filp, unsigned int cmd, DRM_COPY(version.date, R128_DATE); DRM_COPY(version.desc, R128_DESC); - copy_to_user_ret((drm_version_t *)arg, + if (copy_to_user((drm_version_t *)arg, &version, - sizeof(version), - -EFAULT); + sizeof(version))) + return -EFAULT; return 0; } @@ -466,7 +467,7 @@ int r128_open(struct inode *inode, struct file *filp) } spin_unlock(&dev->count_lock); } - + return retcode; } @@ -500,7 +501,7 @@ int r128_release(struct inode *inode, struct file *filp) } spin_unlock(&dev->count_lock); } - + unlock_kernel(); return retcode; } @@ -559,7 +560,8 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, dev->lck_start = start = get_cycles(); #endif - copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; if (lock.context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", @@ -664,7 +666,6 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd, dev->sigdata.context = lock.context; dev->sigdata.lock = dev->lock.hw_lock; block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); - if (lock.flags & _DRM_LOCK_READY) { /* Wait for space in DMA/FIFO */ } @@ -699,7 +700,8 @@ int r128_unlock(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_lock_t lock; - copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; if (lock.context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", @@ -728,7 +730,6 @@ int r128_unlock(struct inode *inode, struct file *filp, unsigned int cmd, current->priority = DEF_PRIORITY; } #endif - unblock_all_signals(); return 0; } diff --git a/linux/tdfx_context.c b/linux/tdfx_context.c index d6903c0a..1fd73310 100644 --- a/linux/tdfx_context.c +++ b/linux/tdfx_context.c @@ -105,19 +105,21 @@ int tdfx_resctx(struct inode *inode, struct file *filp, unsigned int cmd, int i; DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS); - copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT); + if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res))) + return -EFAULT; if (res.count >= DRM_RESERVED_CONTEXTS) { memset(&ctx, 0, sizeof(ctx)); for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ctx.handle = i; - copy_to_user_ret(&res.contexts[i], + if (copy_to_user(&res.contexts[i], &i, - sizeof(i), - -EFAULT); + sizeof(i))) + return -EFAULT; } } res.count = DRM_RESERVED_CONTEXTS; - copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT); + if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res))) + return -EFAULT; return 0; } @@ -129,7 +131,8 @@ int tdfx_addctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; if ((ctx.handle = tdfx_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) { /* Skip kernel's context and get a new one. */ ctx.handle = tdfx_alloc_queue(dev); @@ -141,7 +144,8 @@ int tdfx_addctx(struct inode *inode, struct file *filp, unsigned int cmd, return -ENOMEM; } - copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT); + if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx))) + return -EFAULT; return 0; } @@ -150,7 +154,8 @@ int tdfx_modctx(struct inode *inode, struct file *filp, unsigned int cmd, { drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx))) + return -EFAULT; if (ctx.flags==_DRM_CONTEXT_PRESERVED) tdfx_res_ctx.handle=ctx.handle; return 0; @@ -161,10 +166,12 @@ int tdfx_getctx(struct inode *inode, struct file *filp, unsigned int cmd, { drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT); - /* This is 0, because we don't hanlde any context flags */ + if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx))) + return -EFAULT; + /* This is 0, because we don't handle any context flags */ ctx.flags = 0; - copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT); + if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx))) + return -EFAULT; return 0; } @@ -175,7 +182,8 @@ int tdfx_switchctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); return tdfx_context_switch(dev, dev->last_context, ctx.handle); } @@ -187,7 +195,8 @@ int tdfx_newctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); tdfx_context_switch_complete(dev, ctx.handle); @@ -201,7 +210,8 @@ int tdfx_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_ctx_t ctx; - copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT); + if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx))) + return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); drm_ctxbitmap_free(dev, ctx.handle); diff --git a/linux/tdfx_drv.c b/linux/tdfx_drv.c index 07febea1..811108e2 100644 --- a/linux/tdfx_drv.c +++ b/linux/tdfx_drv.c @@ -11,11 +11,11 @@ * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -23,7 +23,7 @@ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. - * + * * Authors: * Rickard E. (Rik) Faith <faith@valinux.com> * Daryll Strauss <daryll@valinux.com> @@ -36,7 +36,7 @@ #define TDFX_NAME "tdfx" #define TDFX_DESC "3dfx Banshee/Voodoo3+" -#define TDFX_DATE "20000719" +#define TDFX_DATE "20000906" #define TDFX_MAJOR 1 #define TDFX_MINOR 0 #define TDFX_PATCHLEVEL 0 @@ -76,7 +76,7 @@ static drm_ioctl_desc_t tdfx_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, - + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { tdfx_addctx, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { tdfx_rmctx, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { tdfx_modctx, 1, 1 }, @@ -128,7 +128,7 @@ __setup("tdfx=", tdfx_options); static int tdfx_setup(drm_device_t *dev) { int i; - + atomic_set(&dev->ioctl_count, 0); atomic_set(&dev->vma_count, 0); dev->buf_use = 0; @@ -170,7 +170,7 @@ static int tdfx_setup(drm_device_t *dev) dev->ctx_start = 0; dev->lck_start = 0; - + dev->buf_rp = dev->buf; dev->buf_wp = dev->buf; dev->buf_end = dev->buf + DRM_BSZ; @@ -179,15 +179,15 @@ static int tdfx_setup(drm_device_t *dev) init_waitqueue_head(&dev->buf_writers); tdfx_res_ctx.handle=-1; - + DRM_DEBUG("\n"); - + /* The kernel's context could be created here, but is now created in drm_dma_enqueue. This is more resource-efficient for hardware that does not do DMA, but may mean that drm_select_queue fails between the time the interrupt is initialized and the time the queues are initialized. */ - + return 0; } @@ -203,12 +203,12 @@ static int tdfx_takedown(drm_device_t *dev) down(&dev->struct_sem); del_timer(&dev->timer); - + if (dev->devname) { drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER); dev->devname = NULL; } - + if (dev->unique) { drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER); dev->unique = NULL; @@ -227,7 +227,7 @@ static int tdfx_takedown(drm_device_t *dev) if (dev->agp) { drm_agp_mem_t *temp; drm_agp_mem_t *temp_next; - + temp = dev->agp->memory; while(temp != NULL) { temp_next = temp->next; @@ -246,7 +246,7 @@ static int tdfx_takedown(drm_device_t *dev) } dev->vmalist = NULL; } - + /* Clear map area and mtrr information */ if (dev->maplist) { for (i = 0; i < dev->map_count; i++) { @@ -284,14 +284,14 @@ static int tdfx_takedown(drm_device_t *dev) dev->maplist = NULL; dev->map_count = 0; } - + if (dev->lock.hw_lock) { dev->lock.hw_lock = NULL; /* SHM removed */ dev->lock.pid = 0; wake_up_interruptible(&dev->lock.lock_queue); } up(&dev->struct_sem); - + return 0; } @@ -308,7 +308,7 @@ static int tdfx_init(void) memset((void *)dev, 0, sizeof(*dev)); dev->count_lock = SPIN_LOCK_UNLOCKED; sema_init(&dev->struct_sem, 1); - + #ifdef MODULE drm_parse_options(tdfx); #endif @@ -340,7 +340,7 @@ static int tdfx_init(void) TDFX_PATCHLEVEL, TDFX_DATE, tdfx_misc.minor); - + return 0; } @@ -351,7 +351,7 @@ static void tdfx_cleanup(void) drm_device_t *dev = &tdfx_device; DRM_DEBUG("\n"); - + drm_proc_cleanup(); if (misc_deregister(&tdfx_misc)) { DRM_ERROR("Cannot unload module\n"); @@ -379,17 +379,18 @@ int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd, drm_version_t version; int len; - copy_from_user_ret(&version, + if (copy_from_user(&version, (drm_version_t *)arg, - sizeof(version), - -EFAULT); + sizeof(version))) + return -EFAULT; #define DRM_COPY(name,value) \ len = strlen(value); \ if (len > name##_len) len = name##_len; \ name##_len = strlen(value); \ if (len && name) { \ - copy_to_user_ret(name, value, len, -EFAULT); \ + if (copy_to_user(name, value, len)) \ + return -EFAULT; \ } version.version_major = TDFX_MAJOR; @@ -400,10 +401,10 @@ int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd, DRM_COPY(version.date, TDFX_DATE); DRM_COPY(version.desc, TDFX_DESC); - copy_to_user_ret((drm_version_t *)arg, + if (copy_to_user((drm_version_t *)arg, &version, - sizeof(version), - -EFAULT); + sizeof(version))) + return -EFAULT; return 0; } @@ -411,7 +412,7 @@ int tdfx_open(struct inode *inode, struct file *filp) { drm_device_t *dev = &tdfx_device; int retcode = 0; - + DRM_DEBUG("open_count = %d\n", dev->open_count); if (!(retcode = drm_open_helper(inode, filp, dev))) { #if LINUX_VERSION_CODE < 0x020333 @@ -479,7 +480,7 @@ int tdfx_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, atomic_inc(&dev->ioctl_count); atomic_inc(&dev->total_ioctl); ++priv->ioctl_count; - + DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n", current->pid, cmd, nr, dev->device, priv->authenticated); @@ -499,7 +500,7 @@ int tdfx_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, retcode = (func)(inode, filp, cmd, arg); } } - + atomic_dec(&dev->ioctl_count); return retcode; } @@ -518,7 +519,8 @@ int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd, dev->lck_start = start = get_cycles(); #endif - copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; if (lock.context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", @@ -536,7 +538,7 @@ int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd, if (lock.context < 0 || lock.context >= dev->queue_count) return -EINVAL; #endif - + if (!ret) { #if 0 if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) @@ -548,7 +550,7 @@ int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd, /* Can't take lock if we just had it and there is contention. */ DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n", - lock.context, current->pid, j, + lock.context, current->pid, j, dev->lock.lock_time, jiffies); current->state = TASK_INTERRUPTIBLE; current->policy |= SCHED_YIELD; @@ -571,7 +573,7 @@ int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd, atomic_inc(&dev->total_locks); break; /* Got lock */ } - + /* Contention */ atomic_inc(&dev->total_sleeps); current->state = TASK_INTERRUPTIBLE; @@ -615,7 +617,6 @@ int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd, #endif if (!ret) { -#if LINUX_VERSION_CODE >= 0x020400 /* KERNEL_VERSION(2,4,0) */ sigemptyset(&dev->sigmask); sigaddset(&dev->sigmask, SIGSTOP); sigaddset(&dev->sigmask, SIGTSTP); @@ -624,7 +625,7 @@ int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd, dev->sigdata.context = lock.context; dev->sigdata.lock = dev->lock.hw_lock; block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); -#endif + if (lock.flags & _DRM_LOCK_READY) { /* Wait for space in DMA/FIFO */ } @@ -647,7 +648,7 @@ int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd, #if DRM_DMA_HISTOGRAM atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]); #endif - + return ret; } @@ -659,8 +660,9 @@ int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd, drm_device_t *dev = priv->dev; drm_lock_t lock; - copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); - + if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) + return -EFAULT; + if (lock.context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", current->pid, lock.context); @@ -688,9 +690,7 @@ int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd, current->priority = DEF_PRIORITY; } #endif - -#if LINUX_VERSION_CODE >= 0x020400 /* KERNEL_VERSION(2,4,0) */ + unblock_all_signals(); -#endif return 0; } |