diff options
author | Keith Whitwell <keith@tungstengraphics.com> | 2005-02-10 11:02:56 +0000 |
---|---|---|
committer | Keith Whitwell <keith@tungstengraphics.com> | 2005-02-10 11:02:56 +0000 |
commit | 08790293b13bb4562307309461400dad22c72eaf (patch) | |
tree | 84d01184e5bcb7e9d77ecf7246562eda8ddde4d7 /shared | |
parent | 2cdfb2cc8527a6a805b159006885268607383448 (diff) |
Stephane's port of Eric's race fix
Diffstat (limited to 'shared')
-rw-r--r-- | shared/radeon_drv.h | 29 | ||||
-rw-r--r-- | shared/radeon_state.c | 154 |
2 files changed, 81 insertions, 102 deletions
diff --git a/shared/radeon_drv.h b/shared/radeon_drv.h index f48a95c1..89e68833 100644 --- a/shared/radeon_drv.h +++ b/shared/radeon_drv.h @@ -976,25 +976,26 @@ do { \ } while (0) -#define OUT_RING_USER_TABLE( tab, sz ) do { \ +#define OUT_RING_TABLE( tab, sz ) do { \ int _size = (sz); \ - int __user *_tab = (tab); \ + int *_tab = (int *)(tab); \ \ if (write + _size > mask) { \ - int i = (mask+1) - write; \ - if (DRM_COPY_FROM_USER_UNCHECKED( (int *)(ring+write), \ - _tab, i*4 )) \ - return DRM_ERR(EFAULT); \ + int _i = (mask+1) - write; \ + _size -= _i; \ + while (_i > 0) { \ + *(int *)(ring + write) = *_tab++; \ + write++; \ + _i--; \ + } \ write = 0; \ - _size -= i; \ - _tab += i; \ + _tab += _i; \ + } \ + while (_size > 0) { \ + *(ring + write) = *_tab++; \ + write++; \ + _size--; \ } \ - \ - if (_size && DRM_COPY_FROM_USER_UNCHECKED( (int *)(ring+write), \ - _tab, _size*4 )) \ - return DRM_ERR(EFAULT); \ - \ - write += _size; \ write &= mask; \ } while (0) diff --git a/shared/radeon_state.c b/shared/radeon_state.c index ba8e73a4..99c1899b 100644 --- a/shared/radeon_state.c +++ b/shared/radeon_state.c @@ -64,21 +64,6 @@ static __inline__ int radeon_check_and_fixup_offset( drm_radeon_private_t *dev_p return 0; } -static __inline__ int radeon_check_and_fixup_offset_user( drm_radeon_private_t *dev_priv, - drm_file_t *filp_priv, - u32 __user *offset ) { - u32 off; - - DRM_GET_USER_UNCHECKED( off, offset ); - - if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &off ) ) - return DRM_ERR( EINVAL ); - - DRM_PUT_USER_UNCHECKED( offset, off ); - - return 0; -} - static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_priv, drm_file_t *filp_priv, int id, @@ -86,18 +71,16 @@ static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_ switch ( id ) { case RADEON_EMIT_PP_MISC: - if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv, - &data[( RADEON_RB3D_DEPTHOFFSET - - RADEON_PP_MISC ) / 4] ) ) { + if (radeon_check_and_fixup_offset(dev_priv, filp_priv, + &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { DRM_ERROR( "Invalid depth buffer offset\n" ); return DRM_ERR( EINVAL ); } break; case RADEON_EMIT_PP_CNTL: - if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv, - &data[( RADEON_RB3D_COLOROFFSET - - RADEON_PP_CNTL ) / 4] ) ) { + if (radeon_check_and_fixup_offset(dev_priv, filp_priv, + &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { DRM_ERROR( "Invalid colour buffer offset\n" ); return DRM_ERR( EINVAL ); } @@ -109,8 +92,8 @@ static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_ case R200_EMIT_PP_TXOFFSET_3: case R200_EMIT_PP_TXOFFSET_4: case R200_EMIT_PP_TXOFFSET_5: - if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv, - &data[0] ) ) { + if (radeon_check_and_fixup_offset(dev_priv, filp_priv, + &data[0])) { DRM_ERROR( "Invalid R200 texture offset\n" ); return DRM_ERR( EINVAL ); } @@ -119,9 +102,8 @@ static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_ case RADEON_EMIT_PP_TXFILTER_0: case RADEON_EMIT_PP_TXFILTER_1: case RADEON_EMIT_PP_TXFILTER_2: - if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv, - &data[( RADEON_PP_TXOFFSET_0 - - RADEON_PP_TXFILTER_0 ) / 4] ) ) { + if (radeon_check_and_fixup_offset(dev_priv, filp_priv, + &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { DRM_ERROR( "Invalid R100 texture offset\n" ); return DRM_ERR( EINVAL ); } @@ -135,9 +117,9 @@ static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_ case R200_EMIT_PP_CUBIC_OFFSETS_5: { int i; for ( i = 0; i < 5; i++ ) { - if ( radeon_check_and_fixup_offset_user( dev_priv, - filp_priv, - &data[i] ) ) { + if (radeon_check_and_fixup_offset(dev_priv, + filp_priv, + &data[i])) { DRM_ERROR( "Invalid R200 cubic texture offset\n" ); return DRM_ERR( EINVAL ); } @@ -221,17 +203,11 @@ static __inline__ int radeon_check_and_fixup_packet3( drm_radeon_private_t *dev_ drm_file_t *filp_priv, drm_radeon_cmd_buffer_t *cmdbuf, unsigned int *cmdsz ) { - u32 tmp[4]; - u32 __user *cmd = (u32 __user *)cmdbuf->buf; - - if ( DRM_COPY_FROM_USER_UNCHECKED( tmp, cmd, sizeof( tmp ) ) ) { - DRM_ERROR( "Failed to copy data from user space\n" ); - return DRM_ERR( EFAULT ); - } + u32 *cmd = (u32 *) cmdbuf->buf; - *cmdsz = 2 + ( ( tmp[0] & RADEON_CP_PACKET_COUNT_MASK ) >> 16 ); - - if ( ( tmp[0] & 0xc0000000 ) != RADEON_CP_PACKET3 ) { + *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16); + + if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { DRM_ERROR( "Not a type 3 packet\n" ); return DRM_ERR( EINVAL ); } @@ -242,32 +218,27 @@ static __inline__ int radeon_check_and_fixup_packet3( drm_radeon_private_t *dev_ } /* Check client state and fix it up if necessary */ - if ( tmp[0] & 0x8000 ) { /* MSB of opcode: next DWORD GUI_CNTL */ + if (cmd[0] & 0x8000) { /* MSB of opcode: next DWORD GUI_CNTL */ u32 offset; - if ( tmp[1] & ( RADEON_GMC_SRC_PITCH_OFFSET_CNTL + if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) { - offset = tmp[2] << 10; + offset = cmd[2] << 10; if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) { DRM_ERROR( "Invalid first packet offset\n" ); return DRM_ERR( EINVAL ); } - tmp[2] = ( tmp[2] & 0xffc00000 ) | offset >> 10; + cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; } - if ( ( tmp[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL ) && - ( tmp[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) { - offset = tmp[3] << 10; + if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && + (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { + offset = cmd[3] << 10; if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) { DRM_ERROR( "Invalid second packet offset\n" ); return DRM_ERR( EINVAL ); } - tmp[3] = ( tmp[3] & 0xffc00000 ) | offset >> 10; - } - - if ( DRM_COPY_TO_USER_UNCHECKED( cmd, tmp, sizeof( tmp ) ) ) { - DRM_ERROR( "Failed to copy data to user space\n" ); - return DRM_ERR( EFAULT ); + cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; } } @@ -2451,7 +2422,7 @@ static int radeon_emit_packets( { int id = (int)header.packet.packet_id; int sz, reg; - int __user *data = (int __user *)cmdbuf->buf; + int *data = (int *)cmdbuf->buf; RING_LOCALS; if (id >= RADEON_MAX_STATE_PACKETS) @@ -2472,7 +2443,7 @@ static int radeon_emit_packets( BEGIN_RING(sz+1); OUT_RING( CP_PACKET0( reg, (sz-1) ) ); - OUT_RING_USER_TABLE( data, sz ); + OUT_RING_TABLE(data, sz); ADVANCE_RING(); cmdbuf->buf += sz * sizeof(int); @@ -2486,7 +2457,6 @@ static __inline__ int radeon_emit_scalars( drm_radeon_cmd_buffer_t *cmdbuf ) { int sz = header.scalars.count; - int __user *data = (int __user *)cmdbuf->buf; int start = header.scalars.offset; int stride = header.scalars.stride; RING_LOCALS; @@ -2495,7 +2465,7 @@ static __inline__ int radeon_emit_scalars( OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) ); OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) ); - OUT_RING_USER_TABLE( data, sz ); + OUT_RING_TABLE(cmdbuf->buf, sz); ADVANCE_RING(); cmdbuf->buf += sz * sizeof(int); cmdbuf->bufsz -= sz * sizeof(int); @@ -2510,7 +2480,6 @@ static __inline__ int radeon_emit_scalars2( drm_radeon_cmd_buffer_t *cmdbuf ) { int sz = header.scalars.count; - int __user *data = (int __user *)cmdbuf->buf; int start = ((unsigned int)header.scalars.offset) + 0x100; int stride = header.scalars.stride; RING_LOCALS; @@ -2519,7 +2488,7 @@ static __inline__ int radeon_emit_scalars2( OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) ); OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) ); - OUT_RING_USER_TABLE( data, sz ); + OUT_RING_TABLE(cmdbuf->buf, sz); ADVANCE_RING(); cmdbuf->buf += sz * sizeof(int); cmdbuf->bufsz -= sz * sizeof(int); @@ -2532,7 +2501,6 @@ static __inline__ int radeon_emit_vectors( drm_radeon_cmd_buffer_t *cmdbuf ) { int sz = header.vectors.count; - int __user *data = (int __user *)cmdbuf->buf; int start = header.vectors.offset; int stride = header.vectors.stride; RING_LOCALS; @@ -2541,7 +2509,7 @@ static __inline__ int radeon_emit_vectors( OUT_RING( CP_PACKET0( RADEON_SE_TCL_VECTOR_INDX_REG, 0 ) ); OUT_RING( start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_VECTOR_DATA_REG, (sz-1) ) ); - OUT_RING_USER_TABLE( data, sz ); + OUT_RING_TABLE(cmdbuf->buf, sz); ADVANCE_RING(); cmdbuf->buf += sz * sizeof(int); @@ -2556,7 +2524,6 @@ static int radeon_emit_packet3( drm_device_t *dev, { drm_radeon_private_t *dev_priv = dev->dev_private; unsigned int cmdsz; - int __user *cmd = (int __user *)cmdbuf->buf; int ret; RING_LOCALS; @@ -2569,7 +2536,7 @@ static int radeon_emit_packet3( drm_device_t *dev, } BEGIN_RING( cmdsz ); - OUT_RING_USER_TABLE( cmd, cmdsz ); + OUT_RING_TABLE(cmdbuf->buf, cmdsz); ADVANCE_RING(); cmdbuf->buf += cmdsz * 4; @@ -2586,7 +2553,6 @@ static int radeon_emit_packet3_cliprect( drm_device_t *dev, drm_radeon_private_t *dev_priv = dev->dev_private; drm_clip_rect_t box; unsigned int cmdsz; - int __user *cmd = (int __user *)cmdbuf->buf; int ret; drm_clip_rect_t __user *boxes = cmdbuf->boxes; int i = 0; @@ -2628,7 +2594,7 @@ static int radeon_emit_packet3_cliprect( drm_device_t *dev, } BEGIN_RING( cmdsz ); - OUT_RING_USER_TABLE( cmd, cmdsz ); + OUT_RING_TABLE(cmdbuf->buf, cmdsz); ADVANCE_RING(); } while ( ++i < cmdbuf->nbox ); @@ -2681,7 +2647,8 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ) int idx; drm_radeon_cmd_buffer_t cmdbuf; drm_radeon_cmd_header_t header; - int orig_nbox; + int orig_nbox, orig_bufsz; + char *kbuf; LOCK_TEST_WITH_RETURN( dev, filp ); @@ -2699,23 +2666,28 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ) VB_AGE_TEST_WITH_RETURN( dev_priv ); - if (DRM_VERIFYAREA_READ( cmdbuf.buf, cmdbuf.bufsz )) - return DRM_ERR(EFAULT); + if (cmdbuf.bufsz > 64 * 1024 || cmdbuf.bufsz < 0) { + return DRM_ERR(EINVAL); + } - if (cmdbuf.nbox && - DRM_VERIFYAREA_READ(cmdbuf.boxes, - cmdbuf.nbox * sizeof(drm_clip_rect_t))) - return DRM_ERR(EFAULT); + /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid + * races between checking values and using those values in other code, + * and simply to avoid a lot of function calls to copy in data. + */ + orig_bufsz = cmdbuf.bufsz; + if (orig_bufsz != 0) { + kbuf = DRM(alloc)(cmdbuf.bufsz, DRM_MEM_DRIVER); + if (kbuf == NULL) + return DRM_ERR(ENOMEM); + if (DRM_COPY_FROM_USER(kbuf, cmdbuf.buf, cmdbuf.bufsz)) + return DRM_ERR(EFAULT); + cmdbuf.buf = kbuf; + } orig_nbox = cmdbuf.nbox; while ( cmdbuf.bufsz >= sizeof(header) ) { - - if (DRM_GET_USER_UNCHECKED( header.i, (int __user *)cmdbuf.buf )) { - DRM_ERROR("__get_user %p\n", cmdbuf.buf); - return DRM_ERR(EFAULT); - } - + header.i = *(int *)cmdbuf.buf; cmdbuf.buf += sizeof(header); cmdbuf.bufsz -= sizeof(header); @@ -2724,7 +2696,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ) DRM_DEBUG("RADEON_CMD_PACKET\n"); if (radeon_emit_packets( dev_priv, filp_priv, header, &cmdbuf )) { DRM_ERROR("radeon_emit_packets failed\n"); - return DRM_ERR(EINVAL); + goto err; } break; @@ -2732,7 +2704,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ) DRM_DEBUG("RADEON_CMD_SCALARS\n"); if (radeon_emit_scalars( dev_priv, header, &cmdbuf )) { DRM_ERROR("radeon_emit_scalars failed\n"); - return DRM_ERR(EINVAL); + goto err; } break; @@ -2740,7 +2712,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ) DRM_DEBUG("RADEON_CMD_VECTORS\n"); if (radeon_emit_vectors( dev_priv, header, &cmdbuf )) { DRM_ERROR("radeon_emit_vectors failed\n"); - return DRM_ERR(EINVAL); + goto err; } break; @@ -2750,14 +2722,14 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ) if ( idx < 0 || idx >= dma->buf_count ) { DRM_ERROR( "buffer index %d (of %d max)\n", idx, dma->buf_count - 1 ); - return DRM_ERR(EINVAL); + goto err; } buf = dma->buflist[idx]; if ( buf->filp != filp || buf->pending ) { DRM_ERROR( "bad buffer %p %p %d\n", buf->filp, filp, buf->pending); - return DRM_ERR(EINVAL); + goto err; } radeon_cp_discard_buffer( dev, buf ); @@ -2767,7 +2739,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ) DRM_DEBUG("RADEON_CMD_PACKET3\n"); if (radeon_emit_packet3( dev, filp_priv, &cmdbuf )) { DRM_ERROR("radeon_emit_packet3 failed\n"); - return DRM_ERR(EINVAL); + goto err; } break; @@ -2775,7 +2747,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ) DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n"); if (radeon_emit_packet3_cliprect( dev, filp_priv, &cmdbuf, orig_nbox )) { DRM_ERROR("radeon_emit_packet3_clip failed\n"); - return DRM_ERR(EINVAL); + goto err; } break; @@ -2783,7 +2755,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ) DRM_DEBUG("RADEON_CMD_SCALARS2\n"); if (radeon_emit_scalars2( dev_priv, header, &cmdbuf )) { DRM_ERROR("radeon_emit_scalars2 failed\n"); - return DRM_ERR(EINVAL); + goto err; } break; @@ -2791,21 +2763,27 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ) DRM_DEBUG("RADEON_CMD_WAIT\n"); if (radeon_emit_wait( dev, header.wait.flags )) { DRM_ERROR("radeon_emit_wait failed\n"); - return DRM_ERR(EINVAL); + goto err; } break; default: DRM_ERROR("bad cmd_type %d at %p\n", header.header.cmd_type, cmdbuf.buf - sizeof(header)); - return DRM_ERR(EINVAL); + goto err; } } - + if (orig_bufsz != 0) + DRM(free)(kbuf, orig_bufsz, DRM_MEM_DRIVER); DRM_DEBUG("DONE\n"); COMMIT_RING(); return 0; + +err: + if (orig_bufsz != 0) + DRM(free)(kbuf, orig_bufsz, DRM_MEM_DRIVER); + return DRM_ERR(EINVAL); } |