tr_cmds.c (10813B)
1 /* 2 =========================================================================== 3 Copyright (C) 1999-2005 Id Software, Inc. 4 5 This file is part of Quake III Arena source code. 6 7 Quake III Arena source code is free software; you can redistribute it 8 and/or modify it under the terms of the GNU General Public License as 9 published by the Free Software Foundation; either version 2 of the License, 10 or (at your option) any later version. 11 12 Quake III Arena source code is distributed in the hope that it will be 13 useful, but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with Foobar; if not, write to the Free Software 19 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 20 =========================================================================== 21 */ 22 #include "tr_local.h" 23 24 volatile renderCommandList_t *renderCommandList; 25 26 volatile qboolean renderThreadActive; 27 28 29 /* 30 ===================== 31 R_PerformanceCounters 32 ===================== 33 */ 34 void R_PerformanceCounters( void ) { 35 if ( !r_speeds->integer ) { 36 // clear the counters even if we aren't printing 37 Com_Memset( &tr.pc, 0, sizeof( tr.pc ) ); 38 Com_Memset( &backEnd.pc, 0, sizeof( backEnd.pc ) ); 39 return; 40 } 41 42 if (r_speeds->integer == 1) { 43 ri.Printf (PRINT_ALL, "%i/%i shaders/surfs %i leafs %i verts %i/%i tris %.2f mtex %.2f dc\n", 44 backEnd.pc.c_shaders, backEnd.pc.c_surfaces, tr.pc.c_leafs, backEnd.pc.c_vertexes, 45 backEnd.pc.c_indexes/3, backEnd.pc.c_totalIndexes/3, 46 R_SumOfUsedImages()/(1000000.0f), backEnd.pc.c_overDraw / (float)(glConfig.vidWidth * glConfig.vidHeight) ); 47 } else if (r_speeds->integer == 2) { 48 ri.Printf (PRINT_ALL, "(patch) %i sin %i sclip %i sout %i bin %i bclip %i bout\n", 49 tr.pc.c_sphere_cull_patch_in, tr.pc.c_sphere_cull_patch_clip, tr.pc.c_sphere_cull_patch_out, 50 tr.pc.c_box_cull_patch_in, tr.pc.c_box_cull_patch_clip, tr.pc.c_box_cull_patch_out ); 51 ri.Printf (PRINT_ALL, "(md3) %i sin %i sclip %i sout %i bin %i bclip %i bout\n", 52 tr.pc.c_sphere_cull_md3_in, tr.pc.c_sphere_cull_md3_clip, tr.pc.c_sphere_cull_md3_out, 53 tr.pc.c_box_cull_md3_in, tr.pc.c_box_cull_md3_clip, tr.pc.c_box_cull_md3_out ); 54 } else if (r_speeds->integer == 3) { 55 ri.Printf (PRINT_ALL, "viewcluster: %i\n", tr.viewCluster ); 56 } else if (r_speeds->integer == 4) { 57 if ( backEnd.pc.c_dlightVertexes ) { 58 ri.Printf (PRINT_ALL, "dlight srf:%i culled:%i verts:%i tris:%i\n", 59 tr.pc.c_dlightSurfaces, tr.pc.c_dlightSurfacesCulled, 60 backEnd.pc.c_dlightVertexes, backEnd.pc.c_dlightIndexes / 3 ); 61 } 62 } 63 else if (r_speeds->integer == 5 ) 64 { 65 ri.Printf( PRINT_ALL, "zFar: %.0f\n", tr.viewParms.zFar ); 66 } 67 else if (r_speeds->integer == 6 ) 68 { 69 ri.Printf( PRINT_ALL, "flare adds:%i tests:%i renders:%i\n", 70 backEnd.pc.c_flareAdds, backEnd.pc.c_flareTests, backEnd.pc.c_flareRenders ); 71 } 72 73 Com_Memset( &tr.pc, 0, sizeof( tr.pc ) ); 74 Com_Memset( &backEnd.pc, 0, sizeof( backEnd.pc ) ); 75 } 76 77 78 /* 79 ==================== 80 R_InitCommandBuffers 81 ==================== 82 */ 83 void R_InitCommandBuffers( void ) { 84 glConfig.smpActive = qfalse; 85 if ( r_smp->integer ) { 86 ri.Printf( PRINT_ALL, "Trying SMP acceleration...\n" ); 87 if ( GLimp_SpawnRenderThread( RB_RenderThread ) ) { 88 ri.Printf( PRINT_ALL, "...succeeded.\n" ); 89 glConfig.smpActive = qtrue; 90 } else { 91 ri.Printf( PRINT_ALL, "...failed.\n" ); 92 } 93 } 94 } 95 96 /* 97 ==================== 98 R_ShutdownCommandBuffers 99 ==================== 100 */ 101 void R_ShutdownCommandBuffers( void ) { 102 // kill the rendering thread 103 if ( glConfig.smpActive ) { 104 GLimp_WakeRenderer( NULL ); 105 glConfig.smpActive = qfalse; 106 } 107 } 108 109 /* 110 ==================== 111 R_IssueRenderCommands 112 ==================== 113 */ 114 int c_blockedOnRender; 115 int c_blockedOnMain; 116 117 void R_IssueRenderCommands( qboolean runPerformanceCounters ) { 118 renderCommandList_t *cmdList; 119 120 cmdList = &backEndData[tr.smpFrame]->commands; 121 assert(cmdList); // bk001205 122 // add an end-of-list command 123 *(int *)(cmdList->cmds + cmdList->used) = RC_END_OF_LIST; 124 125 // clear it out, in case this is a sync and not a buffer flip 126 cmdList->used = 0; 127 128 if ( glConfig.smpActive ) { 129 // if the render thread is not idle, wait for it 130 if ( renderThreadActive ) { 131 c_blockedOnRender++; 132 if ( r_showSmp->integer ) { 133 ri.Printf( PRINT_ALL, "R" ); 134 } 135 } else { 136 c_blockedOnMain++; 137 if ( r_showSmp->integer ) { 138 ri.Printf( PRINT_ALL, "." ); 139 } 140 } 141 142 // sleep until the renderer has completed 143 GLimp_FrontEndSleep(); 144 } 145 146 // at this point, the back end thread is idle, so it is ok 147 // to look at it's performance counters 148 if ( runPerformanceCounters ) { 149 R_PerformanceCounters(); 150 } 151 152 // actually start the commands going 153 if ( !r_skipBackEnd->integer ) { 154 // let it start on the new batch 155 if ( !glConfig.smpActive ) { 156 RB_ExecuteRenderCommands( cmdList->cmds ); 157 } else { 158 GLimp_WakeRenderer( cmdList ); 159 } 160 } 161 } 162 163 164 /* 165 ==================== 166 R_SyncRenderThread 167 168 Issue any pending commands and wait for them to complete. 169 After exiting, the render thread will have completed its work 170 and will remain idle and the main thread is free to issue 171 OpenGL calls until R_IssueRenderCommands is called. 172 ==================== 173 */ 174 void R_SyncRenderThread( void ) { 175 if ( !tr.registered ) { 176 return; 177 } 178 R_IssueRenderCommands( qfalse ); 179 180 if ( !glConfig.smpActive ) { 181 return; 182 } 183 GLimp_FrontEndSleep(); 184 } 185 186 /* 187 ============ 188 R_GetCommandBuffer 189 190 make sure there is enough command space, waiting on the 191 render thread if needed. 192 ============ 193 */ 194 void *R_GetCommandBuffer( int bytes ) { 195 renderCommandList_t *cmdList; 196 197 cmdList = &backEndData[tr.smpFrame]->commands; 198 199 // always leave room for the end of list command 200 if ( cmdList->used + bytes + 4 > MAX_RENDER_COMMANDS ) { 201 if ( bytes > MAX_RENDER_COMMANDS - 4 ) { 202 ri.Error( ERR_FATAL, "R_GetCommandBuffer: bad size %i", bytes ); 203 } 204 // if we run out of room, just start dropping commands 205 return NULL; 206 } 207 208 cmdList->used += bytes; 209 210 return cmdList->cmds + cmdList->used - bytes; 211 } 212 213 214 /* 215 ============= 216 R_AddDrawSurfCmd 217 218 ============= 219 */ 220 void R_AddDrawSurfCmd( drawSurf_t *drawSurfs, int numDrawSurfs ) { 221 drawSurfsCommand_t *cmd; 222 223 cmd = R_GetCommandBuffer( sizeof( *cmd ) ); 224 if ( !cmd ) { 225 return; 226 } 227 cmd->commandId = RC_DRAW_SURFS; 228 229 cmd->drawSurfs = drawSurfs; 230 cmd->numDrawSurfs = numDrawSurfs; 231 232 cmd->refdef = tr.refdef; 233 cmd->viewParms = tr.viewParms; 234 } 235 236 237 /* 238 ============= 239 RE_SetColor 240 241 Passing NULL will set the color to white 242 ============= 243 */ 244 void RE_SetColor( const float *rgba ) { 245 setColorCommand_t *cmd; 246 247 if ( !tr.registered ) { 248 return; 249 } 250 cmd = R_GetCommandBuffer( sizeof( *cmd ) ); 251 if ( !cmd ) { 252 return; 253 } 254 cmd->commandId = RC_SET_COLOR; 255 if ( !rgba ) { 256 static float colorWhite[4] = { 1, 1, 1, 1 }; 257 258 rgba = colorWhite; 259 } 260 261 cmd->color[0] = rgba[0]; 262 cmd->color[1] = rgba[1]; 263 cmd->color[2] = rgba[2]; 264 cmd->color[3] = rgba[3]; 265 } 266 267 268 /* 269 ============= 270 RE_StretchPic 271 ============= 272 */ 273 void RE_StretchPic ( float x, float y, float w, float h, 274 float s1, float t1, float s2, float t2, qhandle_t hShader ) { 275 stretchPicCommand_t *cmd; 276 277 if (!tr.registered) { 278 return; 279 } 280 cmd = R_GetCommandBuffer( sizeof( *cmd ) ); 281 if ( !cmd ) { 282 return; 283 } 284 cmd->commandId = RC_STRETCH_PIC; 285 cmd->shader = R_GetShaderByHandle( hShader ); 286 cmd->x = x; 287 cmd->y = y; 288 cmd->w = w; 289 cmd->h = h; 290 cmd->s1 = s1; 291 cmd->t1 = t1; 292 cmd->s2 = s2; 293 cmd->t2 = t2; 294 } 295 296 297 /* 298 ==================== 299 RE_BeginFrame 300 301 If running in stereo, RE_BeginFrame will be called twice 302 for each RE_EndFrame 303 ==================== 304 */ 305 void RE_BeginFrame( stereoFrame_t stereoFrame ) { 306 drawBufferCommand_t *cmd; 307 308 if ( !tr.registered ) { 309 return; 310 } 311 glState.finishCalled = qfalse; 312 313 tr.frameCount++; 314 tr.frameSceneNum = 0; 315 316 // 317 // do overdraw measurement 318 // 319 if ( r_measureOverdraw->integer ) 320 { 321 if ( glConfig.stencilBits < 4 ) 322 { 323 ri.Printf( PRINT_ALL, "Warning: not enough stencil bits to measure overdraw: %d\n", glConfig.stencilBits ); 324 ri.Cvar_Set( "r_measureOverdraw", "0" ); 325 r_measureOverdraw->modified = qfalse; 326 } 327 else if ( r_shadows->integer == 2 ) 328 { 329 ri.Printf( PRINT_ALL, "Warning: stencil shadows and overdraw measurement are mutually exclusive\n" ); 330 ri.Cvar_Set( "r_measureOverdraw", "0" ); 331 r_measureOverdraw->modified = qfalse; 332 } 333 else 334 { 335 R_SyncRenderThread(); 336 qglEnable( GL_STENCIL_TEST ); 337 qglStencilMask( ~0U ); 338 qglClearStencil( 0U ); 339 qglStencilFunc( GL_ALWAYS, 0U, ~0U ); 340 qglStencilOp( GL_KEEP, GL_INCR, GL_INCR ); 341 } 342 r_measureOverdraw->modified = qfalse; 343 } 344 else 345 { 346 // this is only reached if it was on and is now off 347 if ( r_measureOverdraw->modified ) { 348 R_SyncRenderThread(); 349 qglDisable( GL_STENCIL_TEST ); 350 } 351 r_measureOverdraw->modified = qfalse; 352 } 353 354 // 355 // texturemode stuff 356 // 357 if ( r_textureMode->modified ) { 358 R_SyncRenderThread(); 359 GL_TextureMode( r_textureMode->string ); 360 r_textureMode->modified = qfalse; 361 } 362 363 // 364 // gamma stuff 365 // 366 if ( r_gamma->modified ) { 367 r_gamma->modified = qfalse; 368 369 R_SyncRenderThread(); 370 R_SetColorMappings(); 371 } 372 373 // check for errors 374 if ( !r_ignoreGLErrors->integer ) { 375 int err; 376 377 R_SyncRenderThread(); 378 if ( ( err = qglGetError() ) != GL_NO_ERROR ) { 379 ri.Error( ERR_FATAL, "RE_BeginFrame() - glGetError() failed (0x%x)!\n", err ); 380 } 381 } 382 383 // 384 // draw buffer stuff 385 // 386 cmd = R_GetCommandBuffer( sizeof( *cmd ) ); 387 if ( !cmd ) { 388 return; 389 } 390 cmd->commandId = RC_DRAW_BUFFER; 391 392 if ( glConfig.stereoEnabled ) { 393 if ( stereoFrame == STEREO_LEFT ) { 394 cmd->buffer = (int)GL_BACK_LEFT; 395 } else if ( stereoFrame == STEREO_RIGHT ) { 396 cmd->buffer = (int)GL_BACK_RIGHT; 397 } else { 398 ri.Error( ERR_FATAL, "RE_BeginFrame: Stereo is enabled, but stereoFrame was %i", stereoFrame ); 399 } 400 } else { 401 if ( stereoFrame != STEREO_CENTER ) { 402 ri.Error( ERR_FATAL, "RE_BeginFrame: Stereo is disabled, but stereoFrame was %i", stereoFrame ); 403 } 404 if ( !Q_stricmp( r_drawBuffer->string, "GL_FRONT" ) ) { 405 cmd->buffer = (int)GL_FRONT; 406 } else { 407 cmd->buffer = (int)GL_BACK; 408 } 409 } 410 } 411 412 413 /* 414 ============= 415 RE_EndFrame 416 417 Returns the number of msec spent in the back end 418 ============= 419 */ 420 void RE_EndFrame( int *frontEndMsec, int *backEndMsec ) { 421 swapBuffersCommand_t *cmd; 422 423 if ( !tr.registered ) { 424 return; 425 } 426 cmd = R_GetCommandBuffer( sizeof( *cmd ) ); 427 if ( !cmd ) { 428 return; 429 } 430 cmd->commandId = RC_SWAP_BUFFERS; 431 432 R_IssueRenderCommands( qtrue ); 433 434 // use the other buffers next frame, because another CPU 435 // may still be rendering into the current ones 436 R_ToggleSmpFrame(); 437 438 if ( frontEndMsec ) { 439 *frontEndMsec = tr.frontEndMsec; 440 } 441 tr.frontEndMsec = 0; 442 if ( backEndMsec ) { 443 *backEndMsec = backEnd.pc.msec; 444 } 445 backEnd.pc.msec = 0; 446 } 447