/* radeon_state.c -- State support for Radeon -*- linux-c -*- * * Copyright 2000 VA Linux Systems, Inc., Fremont, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Gareth Hughes * Kevin E. Martin */ #include "radeon.h" #include "drmP.h" #include "drm.h" #include "drm_sarea.h" #include "radeon_drm.h" #include "radeon_drv.h" /* ================================================================ * Helper functions for client state checking and fixup */ static __inline__ int radeon_check_and_fixup_offset( drm_radeon_private_t *dev_priv, drm_file_t *filp_priv, u32 *offset ) { u32 off = *offset; struct drm_radeon_driver_file_fields *radeon_priv; if ( off >= dev_priv->fb_location && off < ( dev_priv->gart_vm_start + dev_priv->gart_size ) ) return 0; radeon_priv = filp_priv->driver_priv; off += radeon_priv->radeon_fb_delta; DRM_DEBUG( "offset fixed up to 0x%x\n", off ); if ( off < dev_priv->fb_location || off >= ( dev_priv->gart_vm_start + dev_priv->gart_size ) ) return DRM_ERR( EINVAL ); *offset = off; return 0; } static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_priv, drm_file_t *filp_priv, int id, u32 __user *data ) { switch ( id ) { case RADEON_EMIT_PP_MISC: if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { DRM_ERROR( "Invalid depth buffer offset\n" ); return DRM_ERR( EINVAL ); } break; case RADEON_EMIT_PP_CNTL: if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { DRM_ERROR( "Invalid colour buffer offset\n" ); return DRM_ERR( EINVAL ); } break; case R200_EMIT_PP_TXOFFSET_0: case R200_EMIT_PP_TXOFFSET_1: case R200_EMIT_PP_TXOFFSET_2: case R200_EMIT_PP_TXOFFSET_3: case R200_EMIT_PP_TXOFFSET_4: case R200_EMIT_PP_TXOFFSET_5: if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &data[0])) { DRM_ERROR( "Invalid R200 texture offset\n" ); return DRM_ERR( EINVAL ); } break; case RADEON_EMIT_PP_TXFILTER_0: case RADEON_EMIT_PP_TXFILTER_1: case RADEON_EMIT_PP_TXFILTER_2: if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { DRM_ERROR( "Invalid R100 texture offset\n" ); return DRM_ERR( EINVAL ); } break; case R200_EMIT_PP_CUBIC_OFFSETS_0: case R200_EMIT_PP_CUBIC_OFFSETS_1: case R200_EMIT_PP_CUBIC_OFFSETS_2: case R200_EMIT_PP_CUBIC_OFFSETS_3: case R200_EMIT_PP_CUBIC_OFFSETS_4: case R200_EMIT_PP_CUBIC_OFFSETS_5: { int i; for ( i = 0; i < 5; i++ ) { if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &data[i])) { DRM_ERROR( "Invalid R200 cubic texture offset\n" ); return DRM_ERR( EINVAL ); } } break; } case RADEON_EMIT_PP_CUBIC_OFFSETS_T0: case RADEON_EMIT_PP_CUBIC_OFFSETS_T1: case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{ int i; for (i = 0; i < 5; i++) { if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &data[i])) { DRM_ERROR ("Invalid R100 cubic texture offset\n"); return DRM_ERR(EINVAL); } } } break; case RADEON_EMIT_RB3D_COLORPITCH: case RADEON_EMIT_RE_LINE_PATTERN: case RADEON_EMIT_SE_LINE_WIDTH: case RADEON_EMIT_PP_LUM_MATRIX: case RADEON_EMIT_PP_ROT_MATRIX_0: case RADEON_EMIT_RB3D_STENCILREFMASK: case RADEON_EMIT_SE_VPORT_XSCALE: case RADEON_EMIT_SE_CNTL: case RADEON_EMIT_SE_CNTL_STATUS: case RADEON_EMIT_RE_MISC: case RADEON_EMIT_PP_BORDER_COLOR_0: case RADEON_EMIT_PP_BORDER_COLOR_1: case RADEON_EMIT_PP_BORDER_COLOR_2: case RADEON_EMIT_SE_ZBIAS_FACTOR: case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT: case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED: case R200_EMIT_PP_TXCBLEND_0: case R200_EMIT_PP_TXCBLEND_1: case R200_EMIT_PP_TXCBLEND_2: case R200_EMIT_PP_TXCBLEND_3: case R200_EMIT_PP_TXCBLEND_4: case R200_EMIT_PP_TXCBLEND_5: case R200_EMIT_PP_TXCBLEND_6: case R200_EMIT_PP_TXCBLEND_7: case R200_EMIT_TCL_LIGHT_MODEL_CTL_0: case R200_EMIT_TFACTOR_0: case R200_EMIT_VTX_FMT_0: case R200_EMIT_VAP_CTL: case R200_EMIT_MATRIX_SELECT_0: case R200_EMIT_TEX_PROC_CTL_2: case R200_EMIT_TCL_UCP_VERT_BLEND_CTL: case R200_EMIT_PP_TXFILTER_0: case R200_EMIT_PP_TXFILTER_1: case R200_EMIT_PP_TXFILTER_2: case R200_EMIT_PP_TXFILTER_3: case R200_EMIT_PP_TXFILTER_4: case R200_EMIT_PP_TXFILTER_5: case R200_EMIT_VTE_CNTL: case R200_EMIT_OUTPUT_VTX_COMP_SEL: case R200_EMIT_PP_TAM_DEBUG3: case R200_EMIT_PP_CNTL_X: case R200_EMIT_RB3D_DEPTHXY_OFFSET: case R200_EMIT_RE_AUX_SCISSOR_CNTL: case R200_EMIT_RE_SCISSOR_TL_0: case R200_EMIT_RE_SCISSOR_TL_1: case R200_EMIT_RE_SCISSOR_TL_2: case R200_EMIT_SE_VAP_CNTL_STATUS: case R200_EMIT_SE_VTX_STATE_CNTL: case R200_EMIT_RE_POINTSIZE: case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0: case R200_EMIT_PP_CUBIC_FACES_0: case R200_EMIT_PP_CUBIC_FACES_1: case R200_EMIT_PP_CUBIC_FACES_2: case R200_EMIT_PP_CUBIC_FACES_3: case R200_EMIT_PP_CUBIC_FACES_4: case R200_EMIT_PP_CUBIC_FACES_5: case RADEON_EMIT_PP_TEX_SIZE_0: case RADEON_EMIT_PP_TEX_SIZE_1: case RADEON_EMIT_PP_TEX_SIZE_2: case R200_EMIT_RB3D_BLENDCOLOR: case R200_EMIT_TCL_POINT_SPRITE_CNTL: case RADEON_EMIT_PP_CUBIC_FACES_0: case RADEON_EMIT_PP_CUBIC_FACES_1: case RADEON_EMIT_PP_CUBIC_FACES_2: case R200_EMIT_PP_TRI_PERF_CNTL: /* These packets don't contain memory offsets */ break; default: DRM_ERROR( "Unknown state packet ID %d\n", id ); return DRM_ERR( EINVAL ); } return 0; } static __inline__ int radeon_check_and_fixup_packet3( drm_radeon_private_t *dev_priv, drm_file_t *filp_priv, drm_radeon_cmd_buffer_t *cmdbuf, unsigned int *cmdsz ) { u32 *cmd = (u32 *) cmdbuf->buf; *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16); if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { DRM_ERROR( "Not a type 3 packet\n" ); return DRM_ERR( EINVAL ); } if ( 4 * *cmdsz > cmdbuf->bufsz ) { DRM_ERROR( "Packet size larger than size of data provided\n" ); return DRM_ERR( EINVAL ); } /* Check client state and fix it up if necessary */ if (cmd[0] & 0x8000) { /* MSB of opcode: next DWORD GUI_CNTL */ u32 offset; if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) { offset = cmd[2] << 10; if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) { DRM_ERROR( "Invalid first packet offset\n" ); return DRM_ERR( EINVAL ); } cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; } if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { offset = cmd[3] << 10; if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) { DRM_ERROR( "Invalid second packet offset\n" ); return DRM_ERR( EINVAL ); } cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; } } return 0; } /* ================================================================ * CP hardware state programming functions */ static __inline__ void radeon_emit_clip_rect( drm_radeon_private_t *dev_priv, drm_clip_rect_t *box ) { RING_LOCALS; DRM_DEBUG( " box: x1=%d y1=%d x2=%d y2=%d\n", box->x1, box->y1, box->x2, box->y2 ); BEGIN_RING( 4 ); OUT_RING( CP_PACKET0( RADEON_RE_TOP_LEFT, 0 ) ); OUT_RING( (box->y1 << 16) | box->x1 ); OUT_RING( CP_PACKET0( RADEON_RE_WIDTH_HEIGHT, 0 ) ); OUT_RING( ((box->y2 - 1) << 16) | (box->x2 - 1) ); ADVANCE_RING(); } /* Emit 1.1 state */ static int radeon_emit_state( drm_radeon_private_t *dev_priv, drm_file_t *filp_priv, drm_radeon_context_regs_t *ctx, drm_radeon_texture_regs_t *tex, unsigned int dirty ) { RING_LOCALS; DRM_DEBUG( "dirty=0x%08x\n", dirty ); if ( dirty & RADEON_UPLOAD_CONTEXT ) { if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &ctx->rb3d_depthoffset ) ) { DRM_ERROR( "Invalid depth buffer offset\n" ); return DRM_ERR( EINVAL ); } if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &ctx->rb3d_coloroffset ) ) { DRM_ERROR( "Invalid depth buffer offset\n" ); return DRM_ERR( EINVAL ); } BEGIN_RING( 14 ); OUT_RING( CP_PACKET0( RADEON_PP_MISC, 6 ) ); OUT_RING( ctx->pp_misc ); OUT_RING( ctx->pp_fog_color ); OUT_RING( ctx->re_solid_color ); OUT_RING( ctx->rb3d_blendcntl ); OUT_RING( ctx->rb3d_depthoffset ); OUT_RING( ctx->rb3d_depthpitch ); OUT_RING( ctx->rb3d_zstencilcntl ); OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 2 ) ); OUT_RING( ctx->pp_cntl ); OUT_RING( ctx->rb3d_cntl ); OUT_RING( ctx->rb3d_coloroffset ); OUT_RING( CP_PACKET0( RADEON_RB3D_COLORPITCH, 0 ) ); OUT_RING( ctx->rb3d_colorpitch ); ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_VERTFMT ) { BEGIN_RING( 2 ); OUT_RING( CP_PACKET0( RADEON_SE_COORD_FMT, 0 ) ); OUT_RING( ctx->se_coord_fmt ); ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_LINE ) { BEGIN_RING( 5 ); OUT_RING( CP_PACKET0( RADEON_RE_LINE_PATTERN, 1 ) ); OUT_RING( ctx->re_line_pattern ); OUT_RING( ctx->re_line_state ); OUT_RING( CP_PACKET0( RADEON_SE_LINE_WIDTH, 0 ) ); OUT_RING( ctx->se_line_width ); ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_BUMPMAP ) { BEGIN_RING( 5 ); OUT_RING( CP_PACKET0( RADEON_PP_LUM_MATRIX, 0 ) ); OUT_RING( ctx->pp_lum_matrix ); OUT_RING( CP_PACKET0( RADEON_PP_ROT_MATRIX_0, 1 ) ); OUT_RING( ctx->pp_rot_matrix_0 ); OUT_RING( ctx->pp_rot_matrix_1 ); ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_MASKS ) { BEGIN_RING( 4 ); OUT_RING( CP_PACKET0( RADEON_RB3D_STENCILREFMASK, 2 ) ); OUT_RING( ctx->rb3d_stencilrefmask ); OUT_RING( ctx->rb3d_ropcntl ); OUT_RING( ctx->rb3d_planemask ); ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_VIEWPORT ) { BEGIN_RING( 7 ); OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) ); OUT_RING( ctx->se_vport_xscale ); OUT_RING( ctx->se_vport_xoffset ); OUT_RING( ctx->se_vport_yscale ); OUT_RING( ctx->se_vport_yoffset ); OUT_RING( ctx->se_vport_zscale ); OUT_RING( ctx->se_vport_zoffset ); ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_SETUP ) { BEGIN_RING( 4 ); OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) ); OUT_RING( ctx->se_cntl ); OUT_RING( CP_PACKET0( RADEON_SE_CNTL_STATUS, 0 ) ); OUT_RING( ctx->se_cntl_status ); ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_MISC ) { BEGIN_RING( 2 ); OUT_RING( CP_PACKET0( RADEON_RE_MISC, 0 ) ); OUT_RING( ctx->re_misc ); ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_TEX0 ) { if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &tex[0].pp_txoffset ) ) { DRM_ERROR( "Invalid texture offset for unit 0\n" ); return DRM_ERR( EINVAL ); } BEGIN_RING( 9 ); OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_0, 5 ) ); OUT_RING( tex[0].pp_txfilter ); OUT_RING( tex[0].pp_txformat ); OUT_RING( tex[0].pp_txoffset ); OUT_RING( tex[0].pp_txcblend ); OUT_RING( tex[0].pp_txablend ); OUT_RING( tex[0].pp_tfactor ); OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_0, 0 ) ); OUT_RING( tex[0].pp_border_color ); ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_TEX1 ) { if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &tex[1].pp_txoffset ) ) { DRM_ERROR( "Invalid texture offset for unit 1\n" ); return DRM_ERR( EINVAL ); } BEGIN_RING( 9 ); OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_1, 5 ) ); OUT_RING( tex[1].pp_txfilter ); OUT_RING( tex[1].pp_txformat ); OUT_RING( tex[1].pp_txoffset ); OUT_RING( tex[1].pp_txcblend ); OUT_RING( tex[1].pp_txablend ); OUT_RING( tex[1].pp_tfactor ); OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_1, 0 ) ); OUT_RING( tex[1].pp_border_color ); ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_TEX2 ) { if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &tex[2].pp_txoffset ) ) { DRM_ERROR( "Invalid texture offset for unit 2\n" ); return DRM_ERR( EINVAL ); } BEGIN_RING( 9 ); OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_2, 5 ) ); OUT_RING( tex[2].pp_txfilter ); OUT_RING( tex[2].pp_txformat ); OUT_RING( tex[2].pp_txoffset ); OUT_RING( tex[2].pp_txcblend ); OUT_RING( tex[2].pp_txablend ); OUT_RING( tex[2].pp_tfactor ); OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_2, 0 ) ); OUT_RING( tex[2].pp_border_color ); ADVANCE_RING(); } return 0; } /* Emit 1.2 state */ static int radeon_emit_state2( drm_radeon_private_t *dev_priv, drm_file_t *filp_priv, drm_radeon_state_t *state ) { RING_LOCALS; if (state->dirty & RADEON_UPLOAD_ZBIAS) { BEGIN_RING( 3 ); OUT_RING( CP_PACKET0( RADEON_SE_ZBIAS_FACTOR, 1 ) ); OUT_RING( state->context2.se_zbias_factor ); OUT_RING( state->context2.se_zbias_constant ); ADVANCE_RING(); } return radeon_emit_state( dev_priv, filp_priv, &state->context, state->tex, state->dirty ); } /* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in * 1.3 cmdbuffers allow all previous state to be updated as well as * the tcl scalar and vector areas. */ static struct { int start; int len; const char *name; } packet[RADEON_MAX_STATE_PACKETS] = { { RADEON_PP_MISC,7,"RADEON_PP_MISC" }, { RADEON_PP_CNTL,3,"RADEON_PP_CNTL" }, { RADEON_RB3D_COLORPITCH,1,"RADEON_RB3D_COLORPITCH" }, { RADEON_RE_LINE_PATTERN,2,"RADEON_RE_LINE_PATTERN" }, { RADEON_SE_LINE_WIDTH,1,"RADEON_SE_LINE_WIDTH" }, { RADEON_PP_LUM_MATRIX,1,"RADEON_PP_LUM_MATRIX" }, { RADEON_PP_ROT_MATRIX_0,2,"RADEON_PP_ROT_MATRIX_0" }, { RADEON_RB3D_STENCILREFMASK,3,"RADEON_RB3D_STENCILREFMASK" }, { RADEON_SE_VPORT_XSCALE,6,"RADEON_SE_VPORT_XSCALE" }, { RADEON_SE_CNTL,2,"RADEON_SE_CNTL" }, { RADEON_SE_CNTL_STATUS,1,"RADEON_SE_CNTL_STATUS" }, { RADEON_RE_MISC,1,"RADEON_RE_MISC" }, { RADEON_PP_TXFILTER_0,6,"RADEON_PP_TXFILTER_0" }, { RADEON_PP_BORDER_COLOR_0,1,"RADEON_PP_BORDER_COLOR_0" }, { RADEON_PP_TXFILTER_1,6,"RADEON_PP_TXFILTER_1" }, { RADEON_PP_BORDER_COLOR_1,1,"RADEON_PP_BORDER_COLOR_1" }, { RADEON_PP_TXFILTER_2,6,"RADEON_PP_TXFILTER_2" }, { RADEON_PP_BORDER_COLOR_2,1,"RADEON_PP_BORDER_COLOR_2" }, { RADEON_SE_ZBIAS_FACTOR,2,"RADEON_SE_ZBIAS_FACTOR" }, { RADEON_SE_TCL_OUTPUT_VTX_FMT,11,"RADEON_SE_TCL_OUTPUT_VTX_FMT" }, { RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED,17,"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED" }, { R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0" }, { R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1" }, { R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2" }, { R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3" }, { R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4" }, { R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5" }, { R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6" }, { R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7" }, { R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0" }, { R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0" }, { R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0" }, { R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL" }, { R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0" }, { R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2" }, { R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL" }, { R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0" }, { R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1" }, { R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2" }, { R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3" }, { R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4" }, { R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5" }, { R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0" }, { R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1" }, { R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2" }, { R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3" }, { R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4" }, { R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5" }, { R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL" }, { R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL" }, { R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3" }, { R200_PP_CNTL_X, 1, "R200_PP_CNTL_X" }, { R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET" }, { R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL" }, { R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0" }, { R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1" }, { R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2" }, { R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS" }, { R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL" }, { R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE" }, { R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0" }, { R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0" }, /* 61 */ { R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0" }, /* 62 */ { R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1" }, { R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1" }, { R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2" }, { R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2" }, { R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3" }, { R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3" }, { R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4" }, { R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4" }, { R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5" }, { R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5" }, { RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0" }, { RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1" }, { RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2" }, { R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR" }, { R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"}, { RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"}, { RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"}, { RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"}, { RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"}, { RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"}, { RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"}, { R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"}, }; /* ================================================================ * Performance monitoring functions */ static void radeon_clear_box( drm_radeon_private_t *dev_priv, int x, int y, int w, int h, int r, int g, int b ) { u32 color; RING_LOCALS; x += dev_priv->sarea_priv->boxes[0].x1; y += dev_priv->sarea_priv->boxes[0].y1; switch ( dev_priv->color_fmt ) { case RADEON_COLOR_FORMAT_RGB565: color = (((r & 0xf8) << 8) | ((g & 0xfc) << 3) | ((b & 0xf8) >> 3)); break; case RADEON_COLOR_FORMAT_ARGB8888: default: color = (((0xff) << 24) | (r << 16) | (g << 8) | b); break; } BEGIN_RING( 4 ); RADEON_WAIT_UNTIL_3D_IDLE(); OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) ); OUT_RING( 0xffffffff ); ADVANCE_RING(); BEGIN_RING( 6 ); OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_SOLID_COLOR | (dev_priv->color_fmt << 8) | RADEON_GMC_SRC_DATATYPE_COLOR | RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS ); if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) { OUT_RING( dev_priv->front_pitch_offset ); } else { OUT_RING( dev_priv->back_pitch_offset ); } OUT_RING( color ); OUT_RING( (x << 16) | y ); OUT_RING( (w << 16) | h ); ADVANCE_RING(); } static void radeon_cp_performance_boxes( drm_radeon_private_t *dev_priv ) { /* Collapse various things into a wait flag -- trying to * guess if userspase slept -- better just to have them tell us. */ if (dev_priv->stats.last_frame_reads > 1 || dev_priv->stats.last_clear_reads > dev_priv->stats.clears) { dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; } if (dev_priv->stats.freelist_loops) { dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; } /* Purple box for page flipping */ if ( dev_priv->stats.boxes & RADEON_BOX_FLIP ) radeon_clear_box( dev_priv, 4, 4, 8, 8, 255, 0, 255 ); /* Red box if we have to wait for idle at any point */ if ( dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE ) radeon_clear_box( dev_priv, 16, 4, 8, 8, 255, 0, 0 ); /* Blue box: lost context? */ /* Yellow box for texture swaps */ if ( dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD ) radeon_clear_box( dev_priv, 40, 4, 8, 8, 255, 255, 0 ); /* Green box if hardware never idles (as far as we can tell) */ if ( !(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE) ) radeon_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 ); /* Draw bars indicating number of buffers allocated * (not a great measure, easily confused) */ if (dev_priv->stats.requested_bufs) { if (dev_priv->stats.requested_bufs > 100) dev_priv->stats.requested_bufs = 100; radeon_clear_box( dev_priv, 4, 16, dev_priv->stats.requested_bufs, 4, 196, 128, 128 ); } memset( &dev_priv->stats, 0, sizeof(dev_priv->stats) ); } /* ================================================================ * CP command dispatch functions */ static void radeon_cp_dispatch_clear( drm_device_t *dev, drm_radeon_clear_t *clear, drm_radeon_clear_rect_t *depth_boxes ) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; int nbox = sarea_priv->nbox; drm_clip_rect_t *pbox = sarea_priv->boxes; unsigned int flags = clear->flags; u32 rb3d_cntl = 0, rb3d_stencilrefmask= 0; int i; RING_LOCALS; DRM_DEBUG( "flags = 0x%x\n", flags ); dev_priv->stats.clears++; if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) { unsigned int tmp = flags; flags &= ~(RADEON_FRONT | RADEON_BACK); if ( tmp & RADEON_FRONT ) flags |= RADEON_BACK; if ( tmp & RADEON_BACK ) flags |= RADEON_FRONT; } if ( flags & (RADEON_FRONT | RADEON_BACK) ) { BEGIN_RING( 4 ); /* Ensure the 3D stream is idle before doing a * 2D fill to clear the front or back buffer. */ RADEON_WAIT_UNTIL_3D_IDLE(); OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) ); OUT_RING( clear->color_mask ); ADVANCE_RING(); /* Make sure we restore the 3D state next time. */ dev_priv->sarea_priv->ctx_owner = 0; for ( i = 0 ; i < nbox ; i++ ) { int x = pbox[i].x1; int y = pbox[i].y1; int w = pbox[i].x2 - x; int h = pbox[i].y2 - y; DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n", x, y, w, h, flags ); if ( flags & RADEON_FRONT ) { BEGIN_RING( 6 ); OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_SOLID_COLOR | (dev_priv->color_fmt << 8) | RADEON_GMC_SRC_DATATYPE_COLOR | RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS ); OUT_RING( dev_priv->front_pitch_offset ); OUT_RING( clear->clear_color ); OUT_RING( (x << 16) | y ); OUT_RING( (w << 16) | h ); ADVANCE_RING(); } if ( flags & RADEON_BACK ) { BEGIN_RING( 6 ); OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_SOLID_COLOR | (dev_priv->color_fmt << 8) | RADEON_GMC_SRC_DATATYPE_COLOR | RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS ); OUT_RING( dev_priv->back_pitch_offset ); OUT_RING( clear->clear_color ); OUT_RING( (x << 16) | y ); OUT_RING( (w << 16) | h ); ADVANCE_RING(); } } } / * next paragraph) shall be included in all copies or substantial portions * of the Software. * * **************************************************************************/ /* * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> */ #include "drmP.h" static void drm_fm_update_pointers(drm_fence_manager_t * fm, struct list_head *list, int no_types, uint32_t type) { int i; for (i = 0; i < no_types; ++i) { if (type & (1 << i)) { fm->fence_types[i] = list; } } } /* * Typically called by the IRQ handler. */ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type) { int i; int wake = 0; int largest = 0; uint32_t diff; uint32_t relevant; int index = 0; drm_fence_manager_t *fm = &dev->fm; drm_fence_driver_t *driver = dev->driver->fence_driver; struct list_head *list; struct list_head *fence_list; drm_fence_object_t *fence; int found = 0; for (i = 0; i < driver->no_types; ++i) { if (!(type & (1 << i))) continue; list = fm->fence_types[i]; fence_list = list->next; if (fence_list == &fm->ring) continue; fence = list_entry(fence_list, drm_fence_object_t, ring); diff = (sequence - fence->sequence) & driver->sequence_mask; if (diff < driver->wrap_diff) { if (diff >= largest) { largest = diff; index = i; found = 1; } } } if (!found) return; /* * Start with the fence object with the lowest sequence number, affected by * the type mask of this call. Update signaled fields, * Check if we need to wake sleeping processes */ list = fm->fence_types[index]->next; do { if (list == &fm->ring) { drm_fm_update_pointers(fm, list->prev, driver->no_types, type); break; } fence = list_entry(list, drm_fence_object_t, ring); diff = (sequence - fence->sequence) & driver->sequence_mask; if (diff >= driver->wrap_diff) { drm_fm_update_pointers(fm, fence->ring.prev, driver->no_types, type); break; } relevant = type & fence->type; if ((fence->signaled | relevant) != fence->signaled) { fence->signaled |= relevant; fence->submitted_flush |= relevant; wake = 1; } relevant = fence->flush_mask & ~(fence->signaled | fence->submitted_flush); if (relevant) { fm->pending_flush |= relevant; fence->submitted_flush = fence->flush_mask; } list = list->next; /* * Remove a completely signaled fence from the * fence manager ring. */ if (!(fence->type & ~fence->signaled)) { fence_list = &fence->ring; for (i = 0; i < driver->no_types; ++i) { if (fm->fence_types[i] == fence_list) fm->fence_types[i] = fence_list->prev; } list_del_init(fence_list); } } while (1); /* * Wake sleeping processes. */ if (wake) { DRM_WAKEUP(&fm->fence_queue); } } EXPORT_SYMBOL(drm_fence_handler); static void drm_fence_unring(drm_device_t * dev, struct list_head *ring) { drm_fence_manager_t *fm = &dev->fm; drm_fence_driver_t *driver = dev->driver->fence_driver; unsigned long flags; int i; write_lock_irqsave(&fm->lock, flags); for (i = 0; i < driver->no_types; ++i) { if (fm->fence_types[i] == ring) fm->fence_types[i] = ring->prev; } list_del_init(ring); write_unlock_irqrestore(&fm->lock, flags); } void drm_fence_usage_deref_locked(drm_device_t * dev, drm_fence_object_t * fence) { if (atomic_dec_and_test(&fence->usage)) { drm_fence_unring(dev, &fence->ring); drm_free(fence, sizeof(*fence), DRM_MEM_FENCE); } } void drm_fence_usage_deref_unlocked(drm_device_t * dev, drm_fence_object_t * fence) { if (atomic_dec_and_test(&fence->usage)) { mutex_lock(&dev->struct_mutex); if (atomic_read(&fence->usage) == 0) { drm_fence_unring(dev, &fence->ring); drm_free(fence, sizeof(*fence), DRM_MEM_FENCE); } mutex_unlock(&dev->struct_mutex); } } static void drm_fence_object_destroy(drm_file_t * priv, drm_user_object_t * base) { drm_device_t *dev = priv->head->dev; drm_fence_object_t *fence = drm_user_object_entry(base, drm_fence_object_t, base); drm_fence_usage_deref_locked(dev, fence); } static int fence_signaled(drm_device_t * dev, drm_fence_object_t * fence, uint32_t mask, int poke_flush) { unsigned long flags; int signaled; drm_fence_manager_t *fm = &dev->fm; drm_fence_driver_t *driver = dev->driver->fence_driver; if (poke_flush) driver->poke_flush(dev); read_lock_irqsave(&fm->lock, flags); signaled = (fence->type & mask & fence->signaled) == (fence->type & mask); read_unlock_irqrestore(&fm->lock, flags); return signaled; } static void drm_fence_flush_exe(drm_fence_manager_t * fm, drm_fence_driver_t * driver, uint32_t sequence) { uint32_t diff; if (!fm->pending_exe_flush) { struct list_head *list; /* * Last_exe_flush is invalid. Find oldest sequence. */ list = fm->fence_types[_DRM_FENCE_TYPE_EXE]; if (list->next == &fm->ring) { return; } else { drm_fence_object_t *fence = list_entry(list->next, drm_fence_object_t, ring); fm->last_exe_flush = (fence->sequence - 1) & driver->sequence_mask; } diff = (sequence - fm->last_exe_flush) & driver->sequence_mask; if (diff >= driver->wrap_diff) return; fm->exe_flush_sequence = sequence; fm->pending_exe_flush = 1; } else { diff = (sequence - fm->exe_flush_sequence) & driver->sequence_mask; if (diff < driver->wrap_diff) { fm->exe_flush_sequence = sequence; } } } int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type) { return ((fence->signaled & type) == type); } /* * Make sure old fence objects are signaled before their fence sequences are * wrapped around and reused. */ int drm_fence_object_flush(drm_device_t * dev, drm_fence_object_t * fence, uint32_t type) { drm_fence_manager_t *fm = &dev->fm; drm_fence_driver_t *driver = dev->driver->fence_driver; unsigned long flags; if (type & ~fence->type) { DRM_ERROR("Flush trying to extend fence type\n"); return -EINVAL; } write_lock_irqsave(&fm->lock, flags); fence->flush_mask |= type; if (fence->submitted_flush == fence->signaled) { if ((fence->type & DRM_FENCE_EXE) && !(fence->submitted_flush & DRM_FENCE_EXE)) { drm_fence_flush_exe(fm, driver, fence->sequence); fence->submitted_flush |= DRM_FENCE_EXE; } else { fm->pending_flush |= (fence->flush_mask & ~fence->submitted_flush); fence->submitted_flush = fence->flush_mask; } } write_unlock_irqrestore(&fm->lock, flags); driver->poke_flush(dev); return 0; } void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence) { drm_fence_manager_t *fm = &dev->fm; drm_fence_driver_t *driver = dev->driver->fence_driver; uint32_t old_sequence; unsigned long flags; drm_fence_object_t *fence; uint32_t diff; mutex_lock(&dev->struct_mutex); read_lock_irqsave(&fm->lock, flags); if (fm->ring.next == &fm->ring) { read_unlock_irqrestore(&fm->lock, flags); mutex_unlock(&dev->struct_mutex); return; } old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask; fence = list_entry(fm->ring.next, drm_fence_object_t, ring); atomic_inc(&fence->usage); mutex_unlock(&dev->struct_mutex); diff = (old_sequence - fence->sequence) & driver->sequence_mask; read_unlock_irqrestore(&fm->lock, flags); if (diff < driver->wrap_diff) { drm_fence_object_flush(dev, fence, fence->type); } drm_fence_usage_deref_unlocked(dev, fence); } EXPORT_SYMBOL(drm_fence_flush_old); int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence, int lazy, int ignore_signals, uint32_t mask) { drm_fence_manager_t *fm = &dev->fm; drm_fence_driver_t *driver = dev->driver->fence_driver; int ret = 0; unsigned long _end; if (mask & ~fence->type) { DRM_ERROR("Wait trying to extend fence type\n"); return -EINVAL; } if (fence_signaled(dev, fence, mask, 0)) return 0; _end = jiffies + 3 * DRM_HZ; drm_fence_object_flush(dev, fence, mask); if (lazy && driver->lazy_capable) { do { DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ, fence_signaled(dev, fence, mask, 1)); if (time_after_eq(jiffies, _end)) break; } while (ret == -EINTR && ignore_signals); if (time_after_eq(jiffies, _end) && (ret != 0)) ret = -EBUSY; return ret; } else { int signaled; do { signaled = fence_signaled(dev, fence, mask, 1); } while (!signaled && !time_after_eq(jiffies, _end)); if (!signaled) return -EBUSY; } return 0; } int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence, uint32_t type) { drm_fence_manager_t *fm = &dev->fm; drm_fence_driver_t *driver = dev->driver->fence_driver; unsigned long flags; uint32_t sequence; int ret; drm_fence_unring(dev, &fence->ring); ret = driver->emit(dev, &sequence); if (ret) return ret; write_lock_irqsave(&fm->lock, flags); fence->type = type; fence->flush_mask = 0x00; fence->submitted_flush = 0x00; fence->signaled = 0x00; fence->sequence = sequence; list_add_tail(&fence->ring, &fm->ring); write_unlock_irqrestore(&fm->lock, flags); return 0; } int drm_fence_object_init(drm_device_t * dev, uint32_t type, int emit, drm_fence_object_t * fence) { int ret = 0; unsigned long flags; drm_fence_manager_t *fm = &dev->fm; mutex_lock(&dev->struct_mutex); atomic_set(&fence->usage, 1); mutex_unlock(&dev->struct_mutex); write_lock_irqsave(&fm->lock, flags); INIT_LIST_HEAD(&fence->ring); fence->class = 0; fence->type = type; fence->flush_mask = 0; fence->submitted_flush = 0; fence->signaled = 0; fence->sequence = 0; write_unlock_irqrestore(&fm->lock, flags); if (emit) { ret = drm_fence_object_emit(dev, fence, type); } return ret; } EXPORT_SYMBOL(drm_fence_object_init); static int drm_fence_object_create(drm_file_t * priv, uint32_t type, int emit, int shareable, uint32_t * user_handle, drm_fence_object_t ** c_fence) { drm_device_t *dev = priv->head->dev; int ret; drm_fence_object_t *fence; fence = drm_calloc(1, sizeof(*fence), DRM_MEM_FENCE); if (!fence) return -ENOMEM; ret = drm_fence_object_init(dev, type, emit, fence); if (ret) { drm_fence_usage_deref_unlocked(dev, fence); return ret; } mutex_lock(&dev->struct_mutex); ret = drm_add_user_object(priv, &fence->base, shareable); mutex_unlock(&dev->struct_mutex); if (ret) { drm_fence_usage_deref_unlocked(dev, fence); *c_fence = NULL; *user_handle = 0; return ret; } fence->base.type = drm_fence_type; fence->base.remove = &drm_fence_object_destroy; *user_handle = fence->base.hash.key; *c_fence = fence; return 0; } void drm_fence_manager_init(drm_device_t * dev) { drm_fence_manager_t *fm = &dev->fm; drm_fence_driver_t *fed = dev->driver->fence_driver; int i; fm->lock = RW_LOCK_UNLOCKED; INIT_LIST_HEAD(&fm->ring); fm->pending_flush = 0; DRM_INIT_WAITQUEUE(&fm->fence_queue); fm->initialized = 0; if (fed) { fm->initialized = 1; for (i = 0; i < fed->no_types; ++i) { fm->fence_types[i] = &fm->ring; } } } void drm_fence_manager_takedown(drm_device_t * dev) { } drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle) { drm_device_t *dev = priv->head->dev; drm_user_object_t *uo; drm_fence_object_t *fence; mutex_lock(&dev->struct_mutex); uo = drm_lookup_user_object(priv, handle); if (!uo || (uo->type != drm_fence_type)) { mutex_unlock(&dev->struct_mutex); return NULL; } fence = drm_user_object_entry(uo, drm_fence_object_t, base); atomic_inc(&fence->usage); mutex_unlock(&dev->struct_mutex); return fence; } int drm_fence_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; drm_fence_arg_t arg; drm_fence_object_t *fence; drm_user_object_t *uo; unsigned long flags; ret = 0; if (!fm->initialized) { DRM_ERROR("The DRM driver does not support fencing.\n"); return -EINVAL; } DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); switch (arg.op) { case drm_fence_create:{ int emit = arg.flags & DRM_FENCE_FLAG_EMIT; if (emit) LOCK_TEST_WITH_RETURN(dev, filp); ret = drm_fence_object_create(priv, arg.type, emit, arg. flags & DRM_FENCE_FLAG_SHAREABLE, &arg.handle, &fence); if (ret) return ret; mutex_lock(&dev->struct_mutex); atomic_inc(&fence->usage); mutex_unlock(&dev->struct_mutex); break; } case drm_fence_destroy: mutex_lock(&dev->struct_mutex); uo = drm_lookup_user_object(priv, arg.handle); if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } ret = drm_remove_user_object(priv, uo); mutex_unlock(&dev->struct_mutex); return ret; case drm_fence_reference: ret = drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo); if (ret) return ret; fence = drm_lookup_fence_object(priv, arg.handle); break; case drm_fence_unreference: ret = drm_user_object_unref(priv, arg.handle, drm_fence_type); return ret; case drm_fence_signaled: fence = drm_lookup_fence_object(priv, arg.handle); if (!fence) return -EINVAL; break; case drm_fence_flush: fence = drm_lookup_fence_object(priv, arg.handle); if (!fence) return -EINVAL; ret = drm_fence_object_flush(dev, fence, arg.type); break; case drm_fence_wait: fence = drm_lookup_fence_object(priv, arg.handle); if (!fence) return -EINVAL; ret = drm_fence_object_wait(dev, fence, arg.flags & DRM_FENCE_FLAG_WAIT_LAZY, arg. flags & DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS, arg.type); break; case drm_fence_emit: LOCK_TEST_WITH_RETURN(dev, filp); fence = drm_lookup_fence_object(priv, arg.handle); if (!fence) return -EINVAL; ret = drm_fence_object_emit(dev, fence, arg.type); break; default: return -EINVAL; } read_lock_irqsave(&fm->lock, flags); arg.class = fence->class; arg.type = fence->type; arg.signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(dev, fence); DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; }