/** * json_events - Read JSON event file from disk and call event callback. * @fn: File name to read or NULL for default. * @func: Callback to call for each event * @data: Abstract pointer to pass to func. * * The callback gets the data pointer, the event name, the event * in perf format and a description passed. * * Call func with each event in the json file * Return: -1 on failure, otherwise 0. */ int json_events(const char *fn, int (*func)(void *data, char *name, char *event, char *desc), void *data) { int err = -EIO; size_t size; jsmntok_t *tokens, *tok; int i, j, len; char *map; if (!fn) fn = json_default_name(); tokens = parse_json(fn, &map, &size, &len); if (!tokens) return -EIO; EXPECT(tokens->type == JSMN_ARRAY, tokens, "expected top level array"); tok = tokens + 1; for (i = 0; i < tokens->size; i++) { char *event = NULL, *desc = NULL, *name = NULL; struct msrmap *msr = NULL; jsmntok_t *msrval = NULL; jsmntok_t *precise = NULL; jsmntok_t *obj = tok++; EXPECT(obj->type == JSMN_OBJECT, obj, "expected object"); for (j = 0; j < obj->size; j += 2) { jsmntok_t *field, *val; int nz; field = tok + j; EXPECT(field->type == JSMN_STRING, tok + j, "Expected field name"); val = tok + j + 1; EXPECT(val->type == JSMN_STRING, tok + j + 1, "Expected string value"); nz = !json_streq(map, val, "0"); if (match_field(map, field, nz, &event, val)) { /* ok */ } else if (json_streq(map, field, "EventName")) { addfield(map, &name, "", "", val); } else if (json_streq(map, field, "BriefDescription")) { addfield(map, &desc, "", "", val); fixdesc(desc); } else if (json_streq(map, field, "PEBS") && nz && !strstr(desc, "(Precise Event)")) { precise = val; } else if (json_streq(map, field, "MSRIndex") && nz) { msr = lookup_msr(map, val); } else if (json_streq(map, field, "MSRValue")) { msrval = val; } else if (json_streq(map, field, "Errata") && !json_streq(map, val, "null")) { addfield(map, &desc, ". ", " Spec update: ", val); } else if (json_streq(map, field, "Data_LA") && nz) { addfield(map, &desc, ". ", " Supports address when precise", NULL); } /* ignore unknown fields */ } if (precise) { if (json_streq(map, precise, "2")) addfield(map, &desc, " ", "(Must be precise)", NULL); else addfield(map, &desc, " ", "(Precise event)", NULL); } if (msr != NULL) addfield(map, &event, ",", msr->pname, msrval); fixname(name); err = func(data, name, event, desc); free(event); free(desc); free(name); if (err) break; tok += j; } EXPECT(tok - tokens == len, tok, "unexpected objects at end"); err = 0; out_free: free_json(map, size, tokens); return err; }
void execute(struct command *t, int wanttty, int *pipein, int *pipeout) { bool forked = 0; struct biltins *bifunc; int pid = 0; int pv[2]; sigset_t sigset; static sigset_t csigset; static sigset_t ocsigset; static int onosigchld = 0; static int nosigchld = 0; UNREGISTER(forked); UNREGISTER(bifunc); UNREGISTER(wanttty); if (t == 0) return; if (t->t_dflg & F_AMPERSAND) wanttty = 0; switch (t->t_dtyp) { case NODE_COMMAND: if ((t->t_dcom[0][0] & (QUOTE | TRIM)) == QUOTE) (void) memmove(t->t_dcom[0], t->t_dcom[0] + 1, (Strlen(t->t_dcom[0] + 1) + 1) * sizeof(Char)); if ((t->t_dflg & F_REPEAT) == 0) Dfix(t); /* $ " ' \ */ if (t->t_dcom[0] == 0) return; /* fall into... */ case NODE_PAREN: if (t->t_dflg & F_PIPEOUT) mypipe(pipeout); /* * Must do << early so parent will know where input pointer should be. * If noexec then this is all we do. */ if (t->t_dflg & F_READ) { (void) close(0); heredoc(t->t_dlef); if (noexec) (void) close(0); } set(STRstatus, Strsave(STR0)); /* * This mess is the necessary kludge to handle the prefix builtins: * nice, nohup, time. These commands can also be used by themselves, * and this is not handled here. This will also work when loops are * parsed. */ while (t->t_dtyp == NODE_COMMAND) if (eq(t->t_dcom[0], STRnice)) if (t->t_dcom[1]) if (strchr("+-", t->t_dcom[1][0])) if (t->t_dcom[2]) { setname("nice"); t->t_nice = getn(t->t_dcom[1]); lshift(t->t_dcom, 2); t->t_dflg |= F_NICE; } else break; else { t->t_nice = 4; lshift(t->t_dcom, 1); t->t_dflg |= F_NICE; } else break; else if (eq(t->t_dcom[0], STRnohup)) if (t->t_dcom[1]) { t->t_dflg |= F_NOHUP; lshift(t->t_dcom, 1); } else break; else if (eq(t->t_dcom[0], STRtime)) if (t->t_dcom[1]) { t->t_dflg |= F_TIME; lshift(t->t_dcom, 1); } else break; else break; /* is it a command */ if (t->t_dtyp == NODE_COMMAND) { /* * Check if we have a builtin function and remember which one. */ bifunc = isbfunc(t); if (noexec) { /* * Continue for builtins that are part of the scripting language */ if (bifunc && bifunc->bfunct != dobreak && bifunc->bfunct != docontin && bifunc->bfunct != doelse && bifunc->bfunct != doend && bifunc->bfunct != doforeach && bifunc->bfunct != dogoto && bifunc->bfunct != doif && bifunc->bfunct != dorepeat && bifunc->bfunct != doswbrk && bifunc->bfunct != doswitch && bifunc->bfunct != dowhile && bifunc->bfunct != dozip) break; } } else { /* not a command */ bifunc = NULL; if (noexec) break; } /* * We fork only if we are timed, or are not the end of a parenthesized * list and not a simple builtin function. Simple meaning one that is * not pipedout, niced, nohupped, or &'d. It would be nice(?) to not * fork in some of these cases. */ /* * Prevent forking cd, pushd, popd, chdir cause this will cause the * shell not to change dir! */ if (bifunc && (bifunc->bfunct == dochngd || bifunc->bfunct == dopushd || bifunc->bfunct == dopopd)) t->t_dflg &= ~(F_NICE); if (((t->t_dflg & F_TIME) || ((t->t_dflg & F_NOFORK) == 0 && (!bifunc || t->t_dflg & (F_PIPEOUT | F_AMPERSAND | F_NICE | F_NOHUP)))) || /* * We have to fork for eval too. */ (bifunc && (t->t_dflg & (F_PIPEIN | F_PIPEOUT)) != 0 && bifunc->bfunct == doeval)) { if (t->t_dtyp == NODE_PAREN || t->t_dflg & (F_REPEAT | F_AMPERSAND) || bifunc) { forked++; /* * We need to block SIGCHLD here, so that if the process does * not die before we can set the process group */ if (wanttty >= 0 && !nosigchld) { sigemptyset(&sigset); sigaddset(&sigset, SIGCHLD); sigprocmask(SIG_BLOCK, &sigset, &csigset); nosigchld = 1; } pid = pfork(t, wanttty); if (pid == 0 && nosigchld) { sigprocmask(SIG_SETMASK, &csigset, NULL); nosigchld = 0; } else if (pid != 0 && (t->t_dflg & F_AMPERSAND)) backpid = pid; } else { int ochild, osetintr, ohaderr, odidfds; int oSHIN, oSHOUT, oSHERR, oOLDSTD, otpgrp; sigset_t osigset; /* * Prepare for the vfork by saving everything that the child * corrupts before it exec's. Note that in some signal * implementations which keep the signal info in user space * (e.g. Sun's) it will also be necessary to save and restore * the current sigaction's for the signals the child touches * before it exec's. */ if (wanttty >= 0 && !nosigchld && !noexec) { sigemptyset(&sigset); sigaddset(&sigset, SIGCHLD); sigprocmask(SIG_BLOCK, &sigset, &csigset); nosigchld = 1; } sigemptyset(&sigset); sigaddset(&sigset, SIGCHLD); sigaddset(&sigset, SIGINT); sigprocmask(SIG_BLOCK, &sigset, &osigset); ochild = child; osetintr = setintr; ohaderr = haderr; odidfds = didfds; oSHIN = SHIN; oSHOUT = SHOUT; oSHERR = SHERR; oOLDSTD = OLDSTD; otpgrp = tpgrp; ocsigset = csigset; onosigchld = nosigchld; Vsav = Vdp = NULL; Vexpath = 0; Vt = 0; pid = vfork(); if (pid < 0) { sigprocmask(SIG_SETMASK, &osigset, NULL); stderror(ERR_NOPROC); } forked++; if (pid) { /* parent */ child = ochild; setintr = osetintr; haderr = ohaderr; didfds = odidfds; SHIN = oSHIN; SHOUT = oSHOUT; SHERR = oSHERR; OLDSTD = oOLDSTD; tpgrp = otpgrp; csigset = ocsigset; nosigchld = onosigchld; xfree(Vsav); Vsav = NULL; xfree(Vdp); Vdp = NULL; xfree(Vexpath); Vexpath = NULL; blkfree((Char **) Vt); Vt = NULL; /* this is from pfork() */ palloc(pid, t); sigprocmask(SIG_SETMASK, &osigset, NULL); } else { /* child */ /* this is from pfork() */ int pgrp; bool ignint = 0; if (nosigchld) { sigprocmask(SIG_SETMASK, &csigset, NULL); nosigchld = 0; } if (setintr) ignint = (tpgrp == -1 && (t->t_dflg & F_NOINTERRUPT)) || (gointr && eq(gointr, STRminus)); pgrp = pcurrjob ? pcurrjob->p_jobid : getpid(); child++; if (setintr) { setintr = 0; if (ignint) { (void) signal(SIGINT, SIG_IGN); (void) signal(SIGQUIT, SIG_IGN); } else { (void) signal(SIGINT, vffree); (void) signal(SIGQUIT, SIG_DFL); } if (wanttty >= 0) { (void) signal(SIGTSTP, SIG_DFL); (void) signal(SIGTTIN, SIG_DFL); (void) signal(SIGTTOU, SIG_DFL); } (void) signal(SIGTERM, parterm); } else if (tpgrp == -1 && (t->t_dflg & F_NOINTERRUPT)) { (void) signal(SIGINT, SIG_IGN); (void) signal(SIGQUIT, SIG_IGN); } pgetty(wanttty, pgrp); if (t->t_dflg & F_NOHUP) (void) signal(SIGHUP, SIG_IGN); if (t->t_dflg & F_NICE) (void) setpriority(PRIO_PROCESS, 0, t->t_nice); } } } if (pid != 0) { /* * It would be better if we could wait for the whole job when we * knew the last process had been started. Pwait, in fact, does * wait for the whole job anyway, but this test doesn't really * express our intentions. */ if (didfds == 0 && t->t_dflg & F_PIPEIN) { (void) close(pipein[0]); (void) close(pipein[1]); } if ((t->t_dflg & F_PIPEOUT) == 0) { if (nosigchld) { sigprocmask(SIG_SETMASK, &csigset, NULL); nosigchld = 0; } if ((t->t_dflg & F_AMPERSAND) == 0) pwait(); } break; } doio(t, pipein, pipeout); if (t->t_dflg & F_PIPEOUT) { (void) close(pipeout[0]); (void) close(pipeout[1]); } /* * Perform a builtin function. If we are not forked, arrange for * possible stopping */ if (bifunc) { func(t, bifunc); if (forked) exitstat(); break; } if (t->t_dtyp != NODE_PAREN) { doexec(NULL, t); /* NOTREACHED */ } /* * For () commands must put new 0,1,2 in FSH* and recurse */ OLDSTD = dcopy(0, FOLDSTD); SHOUT = dcopy(1, FSHOUT); SHERR = dcopy(2, FSHERR); (void) close(SHIN); SHIN = -1; didfds = 0; wanttty = -1; t->t_dspr->t_dflg |= t->t_dflg & F_NOINTERRUPT; execute(t->t_dspr, wanttty, NULL, NULL); exitstat(); case NODE_PIPE: t->t_dcar->t_dflg |= F_PIPEOUT | (t->t_dflg & (F_PIPEIN | F_AMPERSAND | F_STDERR | F_NOINTERRUPT)); execute(t->t_dcar, wanttty, pipein, pv); t->t_dcdr->t_dflg |= F_PIPEIN | (t->t_dflg & (F_PIPEOUT | F_AMPERSAND | F_NOFORK | F_NOINTERRUPT)); if (wanttty > 0) wanttty = 0; /* got tty already */ execute(t->t_dcdr, wanttty, pv, pipeout); break; case NODE_LIST: if (t->t_dcar) { t->t_dcar->t_dflg |= t->t_dflg & F_NOINTERRUPT; execute(t->t_dcar, wanttty, NULL, NULL); /* * In strange case of A&B make a new job after A */ if (t->t_dcar->t_dflg & F_AMPERSAND && t->t_dcdr && (t->t_dcdr->t_dflg & F_AMPERSAND) == 0) pendjob(); } if (t->t_dcdr) { t->t_dcdr->t_dflg |= t->t_dflg & (F_NOFORK | F_NOINTERRUPT); execute(t->t_dcdr, wanttty, NULL, NULL); } break; case NODE_OR: case NODE_AND: if (t->t_dcar) { t->t_dcar->t_dflg |= t->t_dflg & F_NOINTERRUPT; execute(t->t_dcar, wanttty, NULL, NULL); if ((getn(value(STRstatus)) == 0) != (t->t_dtyp == NODE_AND)) return; } if (t->t_dcdr) { t->t_dcdr->t_dflg |= t->t_dflg & (F_NOFORK | F_NOINTERRUPT); execute(t->t_dcdr, wanttty, NULL, NULL); } break; } /* * Fall through for all breaks from switch * * If there will be no more executions of this command, flush all file * descriptors. Places that turn on the F_REPEAT bit are responsible for * doing donefds after the last re-execution */ if (didfds && !(t->t_dflg & F_REPEAT)) donefds(); }
static void event_clientmessageevent(XEvent *e) { XClientMessageEvent *ev = &e->xclient; struct client *c; struct _systray *sy; int type = 0; while(type < net_last && W->net_atom[type] != ev->message_type) ++type; /* * Systray message * _NET_WM_SYSTRAY_TRAY_OPCODE */ if(ev->window == W->systray.win && type == net_system_tray_opcode) { if(ev->data.l[1] == XEMBED_EMBEDDED_NOTIFY) { systray_add(ev->data.l[2]); systray_update(); } else if(ev->data.l[1] == XEMBED_REQUEST_FOCUS) { if((sy = systray_find(ev->data.l[2]))) ewmh_send_message(sy->win, sy->win, "_XEMBED", XEMBED_FOCUS_IN, XEMBED_FOCUS_CURRENT, 0, 0, 0); } } else if(ev->window == W->root) { /* WMFS message */ if(ev->data.l[4]) { /* Manage _WMFS_FUNCTION && _WMFS_CMD */ if(type == wmfs_function || type == wmfs_cmd) { int d; long unsigned int len; unsigned char *ret = NULL, *ret_cmd = NULL; void (*func)(Uicb); if(XGetWindowProperty(EVDPY(e), W->root, W->net_atom[wmfs_function], 0, 65536, False, W->net_atom[utf8_string], (Atom*)&d, &d, (long unsigned int*)&d, (long unsigned int*)&d, &ret) == Success && ret && ((func = uicb_name_func((char*)ret)))) { if(XGetWindowProperty(EVDPY(e), W->root, W->net_atom[wmfs_cmd], 0, 65536, False, W->net_atom[utf8_string], (Atom*)&d, &d, &len, (long unsigned int*)&d, &ret_cmd) == Success && len && ret_cmd) { func((Uicb)ret_cmd); XFree(ret_cmd); } else func(NULL); XFree(ret); } } } if(type == net_active_window) if((sy = systray_find(ev->data.l[0]))) XSetInputFocus(W->dpy, sy->win, RevertToNone, CurrentTime); } switch(type) { /* _NET_WM_STATE */ case net_wm_state: if((c = client_gb_win(ev->window))) ewmh_manage_state(ev->data.l, c); break; /* _NET_CLOSE_WINDOW */ case net_close_window: if((c = client_gb_win(ev->window))) client_close(c); break; /* _NET_WM_DESKTOP */ case net_wm_desktop: break; } }
void UpdateConstantByIdentifier( CBaseVSShader *pShader, IShaderDynamicAPI* pShaderAPI, IMaterialVar **params, SimpleEnvConstant *pConst, CProceduralContext *pContext, bool bPS, int iFirstMutable, int iFirstStatic ) { float data[4][4] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; int _register = RemapEnvironmentConstant( bPS, pConst->iHLSLRegister ); switch ( pConst->iEnvC_ID ) { default: Assert(0); case HLSLENV_TIME: data[0][ 0 ] = pShaderAPI->CurrentTime(); break; case HLSLENV_VIEW_ORIGIN: case HLSLENV_VIEW_FWD: case HLSLENV_VIEW_RIGHT: case HLSLENV_VIEW_UP: case HLSLENV_VIEW_WORLDDEPTH: Q_memcpy( data, gProcShaderCTRL->AccessEnvConstant( pConst->iEnvC_ID ), sizeof(float)*4 ); break; case HLSLENV_PIXEL_SIZE: { int bx, by; pShaderAPI->GetBackBufferDimensions( bx, by ); float scale = max( 1.0f, pConst->flSmartDefaultValues[0] ); data[0][ 0 ] = ( 1.0f / bx ) * scale; data[0][ 1 ] = ( 1.0f / by ) * scale; } break; case HLSLENV_FOG_PARAMS: Assert( bPS ); pShaderAPI->SetPixelShaderFogParams( _register ); return; case HLSLENV_STUDIO_LIGHTING_VS: //if ( pShader->UsingFlashlight( params ) ) // return; #ifndef SHADER_EDITOR_DLL_SWARM pShaderAPI->SetVertexShaderStateAmbientLightCube(); #else pShader->PI_SetVertexShaderAmbientLightCube(); pShader->PI_SetVertexShaderLocalLighting(); #endif return; case HLSLENV_STUDIO_LIGHTING_PS: if ( pShader->UsingFlashlight( params ) ) return; #ifndef SHADER_EDITOR_DLL_SWARM pShaderAPI->SetPixelShaderStateAmbientLightCube( SSEREG_AMBIENT_CUBE ); pShaderAPI->CommitPixelShaderLighting( SSEREG_LIGHT_INFO_ARRAY ); #else pShader->PI_SetPixelShaderAmbientLightCube( SSEREG_AMBIENT_CUBE ); pShader->PI_SetPixelShaderLocalLighting( SSEREG_LIGHT_INFO_ARRAY ); #endif return; #ifndef SHADER_EDITOR_DLL_2006 case HLSLENV_STUDIO_MORPHING: { pShader->SetHWMorphVertexShaderState_NoTex( VERTEX_SHADER_SHADER_SPECIFIC_CONST_10, VERTEX_SHADER_SHADER_SPECIFIC_CONST_11 ); bool bUnusedTexCoords[3] = { false, false, !pShaderAPI->IsHWMorphingEnabled() }; pShaderAPI->MarkUnusedVertexFields( 0, 3, bUnusedTexCoords ); } return; #endif case HLSLENV_FLASHLIGHT_VPMATRIX: { VMatrix worldToTexture; pShaderAPI->GetFlashlightState( worldToTexture ); Q_memcpy( data, worldToTexture.Base(), sizeof(float)*16 ); if ( bPS ) _register = SSEREG_FLASHLIGHT_TO_WORLD_TEXTURE; } break; case HLSLENV_FLASHLIGHT_DATA: { if ( !pShader->UsingFlashlight( params ) ) return; Assert( bPS ); VMatrix dummy; const FlashlightState_t &flashlightState = pShaderAPI->GetFlashlightState( dummy ); #ifndef SHADER_EDITOR_DLL_2006 float tweaks[4]; tweaks[0] = flashlightState.m_flShadowFilterSize / flashlightState.m_flShadowMapResolution; tweaks[1] = ShadowAttenFromState( flashlightState ); pShader->HashShadow2DJitter( flashlightState.m_flShadowJitterSeed, &tweaks[2], &tweaks[3] ); pShaderAPI->SetPixelShaderConstant( SSEREG_LIGHT_INFO_ARRAY+1, tweaks, 1 ); // c07 #endif float vScreenScale[4] = {1280.0f / 32.0f, 768.0f / 32.0f, 0, 0}; int nWidth, nHeight; pShaderAPI->GetBackBufferDimensions( nWidth, nHeight ); vScreenScale[0] = (float) nWidth / 32.0f; vScreenScale[1] = (float) nHeight / 32.0f; pShaderAPI->SetPixelShaderConstant( SSEREG_LIGHT_INFO_ARRAY + 5, vScreenScale, 1 ); // c11 #ifdef SHADER_EDITOR_DLL_2006 SetFlashLightColorFromState( flashlightState, pShaderAPI, SSEREG_LIGHT_INFO_ARRAY + 4 ); // c10 #else SetFlashLightColorFromState( flashlightState, pShaderAPI, SSEREG_LIGHT_INFO_ARRAY + 4, false ); // c10 #endif float atten_pos[8]; atten_pos[0] = flashlightState.m_fConstantAtten; // c08 atten_pos[1] = flashlightState.m_fLinearAtten; atten_pos[2] = flashlightState.m_fQuadraticAtten; atten_pos[3] = flashlightState.m_FarZ; atten_pos[4] = flashlightState.m_vecLightOrigin[0]; // c09 atten_pos[5] = flashlightState.m_vecLightOrigin[1]; atten_pos[6] = flashlightState.m_vecLightOrigin[2]; atten_pos[7] = 1.0f; pShaderAPI->SetPixelShaderConstant( SSEREG_LIGHT_INFO_ARRAY+2, atten_pos, 2 ); // PSREG_LIGHT_INFO_ARRAY + 3 !! } return; case HLSLENV_SMART_CALLBACK: { int index = pConst->iFastLookup; if ( index < 0 ) return; Assert( pConst->szSmartHelper ); pFnClCallback( func ) = gProcShaderCTRL->GetCallback( index )->func; func( &data[0][0] ); } break; case HLSLENV_SMART_VMT_MUTABLE: { if ( iFirstMutable < 0 ) break; Assert( pConst->iFastLookup >= 0 && pConst->iFastLookup < AMT_VMT_MUTABLE ); params[ iFirstMutable + pConst->iFastLookup ]->GetVecValue( &data[0][0], 4 ); } break; case HLSLENV_SMART_VMT_STATIC: { if ( iFirstStatic < 0 ) break; Assert( pConst->iFastLookup >= 0 && pConst->iFastLookup < AMT_VMT_STATIC ); params[ iFirstStatic + pConst->iFastLookup ]->GetVecValue( &data[0][0], 4 ); } break; case HLSLENV_SMART_RANDOM_FLOAT: { Assert( pConst->iSmartNumComps >= 0 && pConst->iSmartNumComps <= 3 ); for ( int i = 0; i <= pConst->iSmartNumComps; i++ ) data[0][i] = RandomFloat( pConst->flSmartDefaultValues[0], pConst->flSmartDefaultValues[1] ); } break; case HLSLENV_CUSTOM_MATRIX: { VMatrix matTmp, matTmpTranspose; switch ( pConst->iSmartNumComps ) { case CMATRIX_VIEW: pShaderAPI->GetMatrix( MATERIAL_VIEW, matTmp.Base() ); break; case CMATRIX_PROJ: pShaderAPI->GetMatrix( MATERIAL_PROJECTION, matTmp.Base() ); break; case CMATRIX_VIEWPROJ: { VMatrix matV, matP; pShaderAPI->GetMatrix( MATERIAL_VIEW, matV.Base() ); pShaderAPI->GetMatrix( MATERIAL_PROJECTION, matP.Base() ); MatrixMultiply( matV, matP, matTmp ); } break; case CMATRIX_VIEW_INV: { VMatrix matPre; pShaderAPI->GetMatrix( MATERIAL_VIEW, matPre.Base() ); MatrixInverseGeneral( matPre, matTmp ); } break; case CMATRIX_PROJ_INV: { VMatrix matPre; pShaderAPI->GetMatrix( MATERIAL_PROJECTION, matPre.Base() ); MatrixInverseGeneral( matPre, matTmp ); } break; case CMATRIX_VIEWPROJ_INV: { VMatrix matV, matP, matPre; pShaderAPI->GetMatrix( MATERIAL_VIEW, matV.Base() ); pShaderAPI->GetMatrix( MATERIAL_PROJECTION, matP.Base() ); MatrixMultiply( matV, matP, matPre ); MatrixInverseGeneral( matPre, matTmp ); } break; } MatrixTranspose( matTmp, matTmpTranspose ); Q_memcpy( &data[0][0], matTmpTranspose.Base(), sizeof(float) * 16 ); } break; case HLSLENV_LIGHTMAP_RGB: #ifdef SHADER_EDITOR_DLL_SWARM if ( pContext ) data[0][0] = pContext->flLightmapScaleFactor; else data[0][0] = 1.0f; #elif SHADER_EDITOR_DLL_2006 #else data[0][0] = pShaderAPI->GetLightMapScaleFactor(); #endif break; } Assert( pConst->iConstSize >= 1 && pConst->iConstSize <= 4 ); Assert( _register >= 0 ); if ( bPS ) pShaderAPI->SetPixelShaderConstant( _register, &data[0][0], pConst->iConstSize ); else pShaderAPI->SetVertexShaderConstant( _register, &data[0][0], pConst->iConstSize ); }
static Eina_Bool scale_rgba_in_to_out_clip_sample_internal(RGBA_Image *src, RGBA_Image *dst, RGBA_Draw_Context *dc, int src_region_x, int src_region_y, int src_region_w, int src_region_h, int dst_region_x, int dst_region_y, int dst_region_w, int dst_region_h) { int x, y; int *lin_ptr; DATA32 *buf = NULL, *dptr; DATA32 **row_ptr; DATA32 *ptr, *dst_ptr, *src_data, *dst_data; DATA8 *mask; int dst_clip_x, dst_clip_y, dst_clip_w, dst_clip_h; int src_w, src_h, dst_w, dst_h, mask_x, mask_y; RGBA_Gfx_Func func, func2 = NULL; RGBA_Image *mask_ie = dc->clip.mask; if (!(RECTS_INTERSECT(dst_region_x, dst_region_y, dst_region_w, dst_region_h, 0, 0, dst->cache_entry.w, dst->cache_entry.h))) return EINA_FALSE; if (!(RECTS_INTERSECT(src_region_x, src_region_y, src_region_w, src_region_h, 0, 0, src->cache_entry.w, src->cache_entry.h))) return EINA_FALSE; src_w = src->cache_entry.w; src_h = src->cache_entry.h; dst_w = dst->cache_entry.w; dst_h = dst->cache_entry.h; src_data = src->image.data; dst_data = dst->image.data; mask_x = dc->clip.mask_x; mask_y = dc->clip.mask_y; if (dc->clip.use) { dst_clip_x = dc->clip.x; dst_clip_y = dc->clip.y; dst_clip_w = dc->clip.w; dst_clip_h = dc->clip.h; if (dst_clip_x < 0) { dst_clip_w += dst_clip_x; dst_clip_x = 0; } if (dst_clip_y < 0) { dst_clip_h += dst_clip_y; dst_clip_y = 0; } if ((dst_clip_x + dst_clip_w) > dst_w) dst_clip_w = dst_w - dst_clip_x; if ((dst_clip_y + dst_clip_h) > dst_h) dst_clip_h = dst_h - dst_clip_y; } else { dst_clip_x = 0; dst_clip_y = 0; dst_clip_w = dst_w; dst_clip_h = dst_h; } if (dst_clip_x < dst_region_x) { dst_clip_w += dst_clip_x - dst_region_x; dst_clip_x = dst_region_x; } if ((dst_clip_x + dst_clip_w) > (dst_region_x + dst_region_w)) dst_clip_w = dst_region_x + dst_region_w - dst_clip_x; if (dst_clip_y < dst_region_y) { dst_clip_h += dst_clip_y - dst_region_y; dst_clip_y = dst_region_y; } if ((dst_clip_y + dst_clip_h) > (dst_region_y + dst_region_h)) dst_clip_h = dst_region_y + dst_region_h - dst_clip_y; if ((src_region_w <= 0) || (src_region_h <= 0) || (dst_region_w <= 0) || (dst_region_h <= 0) || (dst_clip_w <= 0) || (dst_clip_h <= 0)) return EINA_FALSE; /* sanitise x */ if (src_region_x < 0) { dst_region_x -= (src_region_x * dst_region_w) / src_region_w; dst_region_w += (src_region_x * dst_region_w) / src_region_w; src_region_w += src_region_x; src_region_x = 0; } if (src_region_x >= src_w) return EINA_FALSE; if ((src_region_x + src_region_w) > src_w) { dst_region_w = (dst_region_w * (src_w - src_region_x)) / (src_region_w); src_region_w = src_w - src_region_x; } if (dst_region_w <= 0) return EINA_FALSE; if (src_region_w <= 0) return EINA_FALSE; if (dst_clip_w <= 0) return EINA_FALSE; if (dst_clip_x >= dst_w) return EINA_FALSE; if (dst_clip_x < dst_region_x) { dst_clip_w += (dst_clip_x - dst_region_x); dst_clip_x = dst_region_x; } if ((dst_clip_x + dst_clip_w) > dst_w) { dst_clip_w = dst_w - dst_clip_x; } if (dst_clip_w <= 0) return EINA_FALSE; /* sanitise y */ if (src_region_y < 0) { dst_region_y -= (src_region_y * dst_region_h) / src_region_h; dst_region_h += (src_region_y * dst_region_h) / src_region_h; src_region_h += src_region_y; src_region_y = 0; } if (src_region_y >= src_h) return EINA_FALSE; if ((src_region_y + src_region_h) > src_h) { dst_region_h = (dst_region_h * (src_h - src_region_y)) / (src_region_h); src_region_h = src_h - src_region_y; } if (dst_region_h <= 0) return EINA_FALSE; if (src_region_h <= 0) return EINA_FALSE; if (dst_clip_h <= 0) return EINA_FALSE; if (dst_clip_y >= dst_h) return EINA_FALSE; if (dst_clip_y < dst_region_y) { dst_clip_h += (dst_clip_y - dst_region_y); dst_clip_y = dst_region_y; } if ((dst_clip_y + dst_clip_h) > dst_h) { dst_clip_h = dst_h - dst_clip_y; } if (dst_clip_h <= 0) return EINA_FALSE; /* figure out dst jump */ //dst_jump = dst_w - dst_clip_w; /* figure out dest start ptr */ dst_ptr = dst_data + dst_clip_x + (dst_clip_y * dst_w); if (!mask_ie) { if (dc->mul.use) func = evas_common_gfx_func_composite_pixel_color_span_get(src->cache_entry.flags.alpha, src->cache_entry.flags.alpha_sparse, dc->mul.col, dst->cache_entry.flags.alpha, dst_clip_w, dc->render_op); else func = evas_common_gfx_func_composite_pixel_span_get(src->cache_entry.flags.alpha, src->cache_entry.flags.alpha_sparse, dst->cache_entry.flags.alpha, dst_clip_w, dc->render_op); } else { func = evas_common_gfx_func_composite_pixel_mask_span_get(src->cache_entry.flags.alpha, src->cache_entry.flags.alpha_sparse, dst->cache_entry.flags.alpha, dst_clip_w, dc->render_op); if (dc->mul.use) func2 = evas_common_gfx_func_composite_pixel_color_span_get(src->cache_entry.flags.alpha, src->cache_entry.flags.alpha_sparse, dc->mul.col, dst->cache_entry.flags.alpha, dst_clip_w, EVAS_RENDER_COPY); // Adjust clipping info if (EINA_UNLIKELY((dst_clip_x - mask_x) < 0)) dst_clip_x = mask_x; if (EINA_UNLIKELY((dst_clip_y - mask_y) < 0)) dst_clip_y = mask_y; if (EINA_UNLIKELY((dst_clip_x - mask_x + dst_clip_w) > (int)mask_ie->cache_entry.w)) dst_clip_w = mask_ie->cache_entry.w - dst_clip_x + mask_x; if (EINA_UNLIKELY((dst_clip_y - mask_y + dst_clip_h) > (int)mask_ie->cache_entry.h)) dst_clip_h = mask_ie->cache_entry.h - dst_clip_y + mask_y; } if ((dst_region_w == src_region_w) && (dst_region_h == src_region_h)) { #ifdef HAVE_PIXMAN # ifdef PIXMAN_IMAGE_SCALE_SAMPLE if ((src->pixman.im) && (dst->pixman.im) && (!dc->clip.mask) && ((!dc->mul.use) || ((dc->mul.use) && (dc->mul.col == 0xffffffff))) && ((dc->render_op == _EVAS_RENDER_COPY) || (dc->render_op == _EVAS_RENDER_BLEND))) { pixman_op_t op = PIXMAN_OP_SRC; // _EVAS_RENDER_COPY if (dc->render_op == _EVAS_RENDER_BLEND) op = PIXMAN_OP_OVER; pixman_image_composite(op, src->pixman.im, NULL, dst->pixman.im, (dst_clip_x - dst_region_x) + src_region_x, (dst_clip_y - dst_region_y) + src_region_y, 0, 0, dst_clip_x, dst_clip_y, dst_clip_w, dst_clip_h); } else # endif #endif { ptr = src_data + ((dst_clip_y - dst_region_y + src_region_y) * src_w) + (dst_clip_x - dst_region_x) + src_region_x; /* image masking */ if (mask_ie) { if (dc->mul.use) buf = alloca(dst_clip_w * sizeof(DATA32)); for (y = 0; y < dst_clip_h; y++) { mask = mask_ie->image.data8 + ((dst_clip_y - mask_y + y) * mask_ie->cache_entry.w) + (dst_clip_x - mask_x); /* * blend here [clip_w *] ptr -> dst_ptr * */ if (dc->mul.use) { func2(ptr, NULL, dc->mul.col, buf, dst_clip_w); func(buf, mask, 0, dst_ptr, dst_clip_w); } else func(ptr, mask, 0, dst_ptr, dst_clip_w); ptr += src_w; dst_ptr += dst_w; } } else { for (y = 0; y < dst_clip_h; y++) { /* * blend here [clip_w *] ptr -> dst_ptr * */ func(ptr, NULL, dc->mul.col, dst_ptr, dst_clip_w); ptr += src_w; dst_ptr += dst_w; } } } } else { /* allocate scale lookup tables */ lin_ptr = alloca(dst_clip_w * sizeof(int)); row_ptr = alloca(dst_clip_h * sizeof(DATA32 *)); /* fill scale tables */ for (x = 0; x < dst_clip_w; x++) lin_ptr[x] = (((x + dst_clip_x - dst_region_x) * src_region_w) / dst_region_w) + src_region_x; for (y = 0; y < dst_clip_h; y++) row_ptr[y] = src_data + (((((y + dst_clip_y - dst_region_y) * src_region_h) / dst_region_h) + src_region_y) * src_w); /* scale to dst */ dptr = dst_ptr; #ifdef DIRECT_SCALE if ((!src->cache_entry.flags.alpha) && (!dst->cache_entry.flags.alpha) && (!dc->mul.use) && (!dc->clip.mask)) { for (y = 0; y < dst_clip_h; y++) { dst_ptr = dptr; for (x = 0; x < dst_clip_w; x++) { ptr = row_ptr[y] + lin_ptr[x]; *dst_ptr = *ptr; dst_ptr++; } dptr += dst_w; } } else #endif { unsigned int mul_col; /* a scanline buffer */ buf = alloca(dst_clip_w * sizeof(DATA32)); mul_col = dc->mul.use ? dc->mul.col : 0xFFFFFFFF; /* do we have enough data to start some additional thread ? */ if (use_thread && dst_clip_h > 32 && dst_clip_w * dst_clip_h > 4096) { /* Yes, we do ! */ Evas_Scale_Msg *msg; void *ref; Evas_Scale_Thread local; local.mask8 = dc->clip.mask; local.row_ptr = row_ptr; local.dptr = dptr; local.lin_ptr = lin_ptr; local.func = func; local.func2 = func2; local.dst_clip_x = dst_clip_x; local.dst_clip_y = dst_clip_y; local.dst_clip_h = dst_clip_h; local.dst_clip_w = dst_clip_w; local.dst_w = dst_w; local.mask_x = mask_x; local.mask_y = mask_y; local.mul_col = mul_col; msg = eina_thread_queue_send(thread_queue, sizeof (Evas_Scale_Msg), &ref); msg->task = &local; eina_thread_queue_send_done(thread_queue, ref); /* image masking */ if (dc->clip.mask) { _evas_common_scale_rgba_sample_scale_mask(0, dst_clip_x, dst_clip_y, dst_clip_w, dst_clip_h >> 1, dst_w, dc->clip.mask_x, dc->clip.mask_y, row_ptr, lin_ptr, dc->clip.mask, dptr, func, func2, mul_col); } else { _evas_common_scale_rgba_sample_scale_nomask(0, dst_clip_w, dst_clip_h >> 1, dst_w, row_ptr, lin_ptr, dptr, func, mul_col); } msg = eina_thread_queue_wait(main_queue, &ref); if (msg) eina_thread_queue_wait_done(main_queue, ref); }
void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) { func(data); }
/// Executes 'func' for each party member on the same map and in range (0:whole map) int party_foreachsamemap(int (*func)(struct block_list*,va_list),struct map_session_data *sd,int range,...) { struct party_data *p = NULL; struct battleground_data *bg = NULL; struct map_session_data *psd; int i,x0,y0,x1,y1; struct block_list *list[MAX_BG_MEMBERS]; int blockcount=0; int total = 0; //Return value. nullpo_ret(sd); if( map[sd->bl.m].flag.battleground && (bg = bg_team_search(sd->state.bg_id)) == NULL ) return 0; else if( !map[sd->bl.m].flag.battleground && (p = party_search(sd->status.party_id)) == NULL ) return 0; x0 = sd->bl.x-range; y0 = sd->bl.y-range; x1 = sd->bl.x+range; y1 = sd->bl.y+range; if( bg ) { for( i = 0; i < MAX_BG_MEMBERS; i++ ) { if( (psd = bg->members[i].sd) == NULL ) continue; if( psd->bl.m != sd->bl.m || !psd->bl.prev ) continue; if( range && (psd->bl.x < x0 || psd->bl.y < y0 || psd->bl.x > x1 || psd->bl.y > y1) ) continue; list[blockcount++] = &psd->bl; } } else if( p ) { for( i = 0; i < MAX_PARTY; i++ ) { if( (psd = p->data[i].sd) == NULL ) continue; if( psd->bl.m != sd->bl.m || !psd->bl.prev ) continue; if( range && (psd->bl.x < x0 || psd->bl.y < y0 || psd->bl.x > x1 || psd->bl.y > y1) ) continue; list[blockcount++] = &psd->bl; } } else return 0; map_freeblock_lock(); for( i = 0; i < blockcount; i++ ) { va_list ap; va_start(ap, range); total += func(list[i], ap); va_end(ap); } map_freeblock_unlock(); return total; }
void main () { func (2, 1., 2., 3.); pass ("func [OKI002]"); fflush (stdout); }
struct avl_node* avl_insert(struct avl_tree *tree, struct avl_node *node, avl_cmp_func *func) { __AVL_DEBUG_INSERT(node); struct avl_node *p=NULL,*cur; int cmp, bf, bf_old; cur = tree->root; while(cur) { cmp = func(cur, node, tree->aux); p = cur; if(cmp > 0) { cur = cur->left; }else if (cmp < 0){ cur = cur->right; }else { // duplicated key -> return return cur; } } avl_set_parent(node, p); avl_set_bf(node, 0); node->left = node->right = NULL; #ifdef _AVL_NEXT_POINTER node->prev = node->next = NULL; #endif // P is parent node of CUR if(p) { if(func(p, node, tree->aux) > 0) { p->left = node; #ifdef _AVL_NEXT_POINTER node->next = p; node->prev = p->prev; if (p->prev) p->prev->next = node; p->prev = node; #endif }else { p->right = node; #ifdef _AVL_NEXT_POINTER node->prev = p; node->next = p->next; if (p->next) p->next->prev = node; p->next = node; #endif } } else { // no parent .. make NODE as root tree->root = node; } // recursive balancing process .. scan from leaf to root bf = 0; while(node) { p = avl_parent(node); if (p) { // if parent exists bf_old = avl_bf(node); if (p->right == node) { node = _balance_tree(node, bf); p->right = node; }else { node = _balance_tree(node, bf); p->left = node; } // calculate balance facter BF for parent if (node->left == NULL && node->right == NULL) { // leaf node if (p->left == node) bf = -1; else bf = 1; } else { // index ndoe bf = 0; if (_abs(bf_old) < _abs(avl_bf(node))) { // if ABS of balance factor increases // cascade to parent if (p->left == node) bf = -1; else bf = 1; } } } else if(node == tree->root){ tree->root = _balance_tree(tree->root, bf); break; } if (bf == 0) break; node = p; } __AVL_DEBUG_DISPLAY(tree); return node; }
static graph_walk_status_ty graph_walk_inner(graph_ty *gp, graph_walk_status_ty (*func)(graph_recipe_ty *, graph_ty *), int nproc) { graph_recipe_list_nrc_ty walk; graph_walk_status_ty status; graph_walk_status_ty status2; graph_recipe_ty *grp; size_t j; size_t walk_pos; itab_ty *itp; string_list_ty single_thread; trace(("graph_walk(gp = %8.8lX, nproc = %d)\n{\n", (long)gp, nproc)); status = graph_walk_status_uptodate; itp = itab_alloc(nproc); /* * Reset the activity counter for each recipe. * * Recipes with no inputs are added to the list at this time, * all of their inputs are satisfied. */ graph_recipe_list_nrc_constructor(&walk); for (j = 0; j < gp->already_recipe->nrecipes; ++j) { grp = gp->already_recipe->recipe[j]; grp->input_satisfied = 0; grp->input_uptodate = 0; if (grp->output->nfiles != 0 && grp->input->nfiles == 0) { grp->input_uptodate = 1; graph_recipe_list_nrc_append(&walk, grp); } } /* * Turn the list upside down. We want to find all of the files * with outputs but no inputs. This is the initial list of file * nodes to walk. */ symtab_walk(gp->already, is_it_a_leaf, &walk); /* * Keep chewing up graph recipe nodes until no more are left to * be processed. */ string_list_constructor(&single_thread); walk_pos = 0; while (walk_pos < walk.nrecipes || itp->load > 0) { /* * Terminate elegantly, if asked to. */ if (desist_requested()) { desist: status = graph_walk_status_error; if (itp->load > 0 && !option_test(OPTION_SILENT)) { sub_context_ty *scp; scp = sub_context_new(); sub_var_set(scp, "Number", "%ld", (long)itp->load); sub_var_optional(scp, "Number"); error_intl(scp, i18n("waiting for outstanding processes")); sub_context_delete(scp); } /* * No matter what, don't do another recipe. * However, we must still wait for the * unfinished actions to complete in an orderly * fashion. */ no_persevere: assert(gp->already_recipe); walk_pos = gp->already_recipe->nrecipes * 2; continue; } /* * If there is available processing resource, and * outstanding recipe instances, run another recipe. */ trace(("itp->load = %ld;\n", (long)itp->load)); trace(("nproc = %d;\n", nproc)); trace(("walk_pos = %ld;\n", (long)walk_pos)); trace(("walk.nrecipes = %ld;\n", (long)walk.nrecipes)); while (itp->load < nproc && walk_pos < walk.nrecipes) { if (desist_requested()) goto desist; fp_sync(); /* * Extract a recipe from the list. Order does * not matter, they are all valid candidates * with up-to-date ingredients. * * However: the users expect a mostly * left-to-right order of evaluation. That * means taking the first one, NOT the last one. */ grp = walk.recipe[walk_pos++]; /* * Make sure there is no conflict with existing * single thread flags. Go hunting for a recipe * not in conflict. Come back later if we can't * find one. */ if ( grp->single_thread && string_list_intersect(grp->single_thread, &single_thread) ) { size_t k; graph_recipe_ty *kp; /* * go hunting */ for (k = walk_pos; k < walk.nrecipes; ++k) { kp = walk.recipe[k]; if ( !kp->single_thread || !string_list_intersect ( kp->single_thread, &single_thread ) ) break; } /* * Come back later if we find no alternative. */ if (k >= walk.nrecipes) { --walk_pos; break; } /* * Have the conflicting recipe and the * alternative change places. */ kp = walk.recipe[k]; trace(("k = %ld\n", (long)k)); trace(("kp = %08lX\n", (long)kp)); walk.recipe[walk_pos - 1] = kp; walk.recipe[k] = grp; grp = kp; } trace(("grp = %08lX;\n", (long)grp)); trace(("grp->input->nfiles = %ld;\n", (long)grp->input->nfiles)); trace(("grp->output->nfiles = %ld;\n", (long)grp->output->nfiles)); /* * Remember the single threading, so other * recipes avoid conflicting with *this* one. */ if (grp->single_thread) { string_list_append_list(&single_thread, grp->single_thread); } /* * run the recipe body */ run_a_recipe: status2 = func(grp, gp); /* * Look at what happened. */ if (grp->single_thread && status2 != graph_walk_status_wait) { string_list_remove_list(&single_thread, grp->single_thread); } switch (status2) { case graph_walk_status_wait: assert(itp); trace(("pid = %d;\n", graph_recipe_getpid(grp))); itab_assign(itp, graph_recipe_getpid(grp), grp); trace(("itp->load = %ld;\n", (long)itp->load)); break; case graph_walk_status_error: /* * It failed. Don't do anything with the * outputs of the recipe. Usually, we stop * altogether. */ trace(("error\n")); status = graph_walk_status_error; if (!option_test(OPTION_PERSEVERE)) goto no_persevere; break; case graph_walk_status_done_stop: /* * It worked, but we need to stop for * some reason. This is usually only * used by isit_uptodate. */ trace(("done_stop\n")); if (status == graph_walk_status_uptodate) status = graph_walk_status_done_stop; assert(itp->load == 0); goto done; case graph_walk_status_uptodate: star_as_specified('#'); /* fall through... */ case graph_walk_status_uptodate_done: /* * It worked. Now push all of the * recipes which depend on the outputs * of this recipe. */ implications_of_recipe(&walk, grp, 1); break; case graph_walk_status_done: /* * It worked. Now push all of the * recipes which depend on the outputs * of this recipe. */ implications_of_recipe(&walk, grp, 0); if (status == graph_walk_status_uptodate) status = graph_walk_status_done; break; } #ifdef HAVE_WAIT3 /* * We want to see if any children have exited. * Do not block (that's why wait3 is used). * Only do one at a time. */ if (itp->load > 0) { int pid; int exit_status; struct rusage ru; trace(("mark\n")); pid = os_wait3(&exit_status, WNOHANG, &ru); trace(("pid = %d\n", pid)); if (pid < 0) { sub_context_ty *scp; if (errno == EINTR) continue; scp = sub_context_new(); sub_errno_set(scp); fatal_intl(scp, i18n("wait(): $errno")); /* NOTREACHED */ } else if (pid > 0) { trace(("es = 0x%04X\n", exit_status)); grp = itab_query(itp, pid); if (grp) { /* if it's one of ours... */ trace(("...waited\n")); assert(pid == graph_recipe_getpid(grp)); if (grp->ocp->meter_p) grp->ocp->meter_p->ru = ru; graph_recipe_waited(grp, exit_status); itab_delete(itp, pid); trace(("itp->load = %ld;\n", (long)itp->load)); goto run_a_recipe; } } } #endif /* HAVE_WAIT3 */ } /* * Collect the results of execution, and kick off the * recipes that are blocked. Only do one at a time. */ if (itp->load > 0) { int pid; int exit_status; #ifdef HAVE_WAIT3 struct rusage ru; #endif trace(("mark\n")); #ifdef HAVE_WAIT3 pid = os_wait3(&exit_status, 0, &ru); #else pid = os_wait(&exit_status); #endif trace(("pid = %d\n", pid)); assert(pid != 0); if (pid == 0) errno = EINVAL; if (pid <= 0) { sub_context_ty *scp; if (errno == EINTR) continue; scp = sub_context_new(); sub_errno_set(scp); fatal_intl(scp, i18n("wait(): $errno")); /* NOTREACHED */ } trace(("es = 0x%04X\n", exit_status)); grp = itab_query(itp, pid); if (grp) { /* if it's one of ours... */ trace(("...waited\n")); assert(pid == graph_recipe_getpid(grp)); #ifdef HAVE_WAIT3 if (grp->ocp->meter_p) grp->ocp->meter_p->ru = ru; #endif graph_recipe_waited(grp, exit_status); itab_delete(itp, pid); trace(("itp->load = %ld;\n", (long)itp->load)); goto run_a_recipe; } } trace(("mark\n")); } done: itab_free(itp); /* * Confirmation for the user when things go wrong. */ if (status == graph_walk_status_error) symtab_walk(gp->already, excuse_me, gp); /* * Free up the list of recipes (which have been) / (to be) walked. */ graph_recipe_list_nrc_destructor(&walk); string_list_destructor(&single_thread); trace(("return %s;\n", graph_walk_status_name(status))); trace(("}\n")); return status; }
int uwerr (char* append) { const double epsilon = 2.0e-16; int i, n, label; int ndata, Wmax, W, Wopt, k; double **a_b, *a_bb, *a_proj, a_bb_proj; double *F_b, *F_bb, *F_bw; double *Gamma_F, C_F, C_Fopt, v_Fbb, dv_Fbb, tau, *tau_int; double *f_alpha, *h_alpha, *m_alpha, *data_ptr, func_res; double value, dvalue, ddvalue, tau_intbb, dtau_intbb; double chisqr, Qval, *p_r, p_r_mean, p_r_var, delta, lobd, *bins; char filename[80], format[80]; FILE *ofs; printf("[uwerr] The following arguments have been read:\n"); printf("[uwerr] nalpha = %d\n", nalpha); printf("[uwerr] nreplica = %d\n", nreplica); for(i=0; i<nreplica; i++) { printf("[uwerr] n_r(%2d) = %d\n", i, n_r[i]); } printf("[uwerr] npara = %d\n", npara); for(i=0; i<npara; i++) { printf("[uwerr] para(%2d) = %e\n", i, para[i]); } printf("[uwerr] ipo = %d\n", ipo); printf("[uwerr] s_tau = %e\n", s_tau); printf("[uwerr] obsname = %s\n", obsname); printf("[uwerr] append = %s\n", append); fprintf(stdout, "[uwerr]: Starting ...\n"); /************************************************************* * check if combination of values in ipo an func are allowed * *************************************************************/ label = ipo; if(ipo>0 && func!=NULL) { ipo = 0; } else if ( ipo==0 && func==NULL) { fprintf(stdout, "[uwerr] illegal values of func and ipo, return"); return(1); } fprintf(stdout, "[uwerr]: checked ipo and func\n"); /* ndata - total number of rows in data */ for( i=1, ndata = *n_r; i<nreplica; ndata += *(n_r + i++) ); /* Wmax - longest possible summation index + 1 */ MIN_INT(n_r, nreplica, &Wmax); fprintf(stdout, "[uwerr]: have ndata and Wmax ready\n"); /******************* * allocate memory * *******************/ F_b = (double *)calloc(nreplica, sizeof(double)); F_bb = (double *)calloc(1, sizeof(double)); F_bw = (double *)calloc(1, sizeof(double)); Gamma_F = (double *)calloc(Wmax, sizeof(double)); tau_int = (double *)calloc(Wmax, sizeof(double)); if (ipo==0 && func!=NULL) /* only necessary in case of derived quantity */ { a_b = (double**)calloc(nreplica, sizeof(double*)); a_bb = (double *)calloc(nalpha, sizeof(double)); for(n=0; n<nreplica; n++) { *(a_b+n)=(double*)calloc(nalpha, sizeof(double)); } } fprintf(stdout, "[uwerr]: allocated memory\n"); /********************************************************************* * calculate estimators for primary observable/derived quantity * *********************************************************************/ if(ipo>0 && func==NULL) /* here estimators for one of the prim. observables */ { data_ptr = *(data+ipo-1); /* points to column of ipo in data */ for(n=0; n<nreplica; n++) { ARITHMEAN(data_ptr, *(n_r+n), F_b+n); /* arithmetic mean for replia */ data_ptr = data_ptr + *(n_r+n); /* pointer set to beginning of next replia */ /* test */ fprintf(stdout, "[uwerr] F_b(%d) = %18.16e\n", n, *(F_b+n)); } ARITHMEAN(*(data+ipo-1), ndata, F_bb); /* mean including all data for ipo */ /* test */ fprintf(stdout, "[uwerr] F_bn = %18.16e\n", *F_bb); } else if (ipo==0 && func!=NULL) { /* estimators for derived quantity */ /* calculate means per replica and total mean */ for(i=0; i<nalpha; i++) { data_ptr = *(data+i); for(n=0; n<nreplica; n++) { ARITHMEAN(data_ptr, *(n_r+n), *(a_b+n)+i); data_ptr += *(n_r+n); } ARITHMEAN(*(data+i), ndata, a_bb+i); } /* calculate estimators per replica for derived quatity */ for(n=0; n<nreplica; n++) { func(nalpha, *(a_b+n), npara, para, F_b+n); /* est. for means per replicum */ } func(nalpha, a_bb, npara, para, F_bb); /* est. for total mean */ } /* in case of more than one replica calculate weighed mean of F_b's with weights n_r */ if(nreplica > 1) { WEIGHEDMEAN(F_b, nreplica, F_bw, n_r); /* test */ fprintf(stdout, "[uwerr] F_bw = %18.16e\n", *F_bw); } fprintf(stdout, "[uwerr]: have estimators ready\n"); /*********************************************** * calculate projection of data and mean value * ***********************************************/ if(ipo>0 && func==NULL) { a_proj = *(data + ipo - 1); /* data is projectet to itself in case of prim. observable */ a_bb_proj = *F_bb; /* projected mean is total mean */ } else if (ipo==0 && func!=NULL) { f_alpha = (double *)calloc(nalpha, sizeof(double)); h_alpha = (double *)calloc(nalpha, sizeof(double)); m_alpha = (double *)calloc(ndata, sizeof(double)); a_proj = (double *)calloc(ndata, sizeof(double)); /* calculate derivatives of func with respect to A_alpha */ for(i=0; i<nalpha; i++) { /* loop over all prim. observables */ SET_TO(h_alpha, nalpha, 0.0); STDDEV(*(data+i), ndata, h_alpha+i); /* test */ fprintf(stdout, "[uwerr] halpha = %18.16e\n", *(h_alpha+i)); if(*(h_alpha+i)==0.0) { fprintf(stdout, "[uwerr] Warning: no fluctuation in primary observable %d\n", i); *(f_alpha + i) = 0.0; } else { ADD_ASSIGN(m_alpha, a_bb, h_alpha, nalpha); func(nalpha, m_alpha, npara, para, &func_res); *(f_alpha+i) = func_res; SUB_ASSIGN(m_alpha, a_bb, h_alpha, nalpha); func(nalpha, m_alpha, npara, para, &func_res); *(f_alpha+i) -= func_res; *(f_alpha+i) = *(f_alpha+i) / (2.0 * *(h_alpha+i)); } } SET_TO(a_proj, ndata, 0.0); a_bb_proj = 0.0; for(i=0; i<nalpha; i++) { for(n=0; n<ndata; n++) { *(a_proj + n) = *(a_proj + n) + ( *(*(data+i)+n) ) * ( *(f_alpha+i) ); } a_bb_proj = a_bb_proj + *(a_bb+i) * (*(f_alpha+i)); } free(m_alpha); free(f_alpha); free(h_alpha); for(n=0; n<nreplica; n++) { free(*(a_b+n)); } free(a_b); free(a_bb); } fprintf(stdout, "[uwerr]: have projected data ready\n"); /********************************************************************** * calculate error, error of the error; automatic windowing condition * **********************************************************************/ /* (1) Gamma_F(t), t=0,...,Wmax */ SET_TO(Gamma_F, Wmax, 0.0); SET_TO(tau_int, Wmax, 0.0); for(i=0,v_Fbb=0.0; i<ndata; i++) { v_Fbb = v_Fbb + SQR( (*(a_proj+i) - a_bb_proj) ); } v_Fbb /= (double)ndata; C_F = v_Fbb; *Gamma_F = v_Fbb; /* test */ fprintf(stdout, "[uwerr] a_bb_proj = %18.16e\n", a_bb_proj); fprintf(stdout, "[uwerr] Gamma_F(%1d) = %18.16e\n", 0, *Gamma_F); if (*Gamma_F==0.0) { fprintf(stderr, "[uwerr] ERROR, no fluctuations; return\n"); strcpy(filename, obsname); strcat(filename,"_uwerr"); ofs = fopen(filename, append); if ((void*)ofs==NULL) { fprintf(stderr, "[uwerr] Could not open file %s\n", filename); return(1); } fprintf(ofs, "%d\t%18.16e\t%18.16e\t%18.16e\t%18.16e\t%18.16e\t%18.16e\t"\ "%18.16e\t%18.16e\n", label, *F_bb, 0.0, 0.0, 0.0, \ 0.0, -1.0, 0.0, 0.0); if (fclose(ofs)!=0) { fprintf(stderr, "[uwerr] Could not close file %s\n", filename); return(1); } return(-5); } *tau_int = 0.5; for(W=1; W<Wmax-1; W++) { /* calculate Gamma_F(W) */ data_ptr = a_proj; for(n=0; n<nreplica; n++) { for(i=0; i<(*(n_r+n)-W); i++) { *(Gamma_F+W) += (*(data_ptr+i) - a_bb_proj) * (*(data_ptr+i+W) - a_bb_proj); } data_ptr = data_ptr + *(n_r+n); } *(Gamma_F+W) = *(Gamma_F+W) / (double)(ndata-nreplica*W); /* test */ fprintf(stdout, "[uwerr] Gamma_F(%d) = %18.16e\n", W, *(Gamma_F+W)); C_F = C_F + 2.0 * *(Gamma_F+W); *tau_int = C_F / (2.0*v_Fbb); if(*tau_int < 0.5) { fprintf(stdout, "[uwerr] Warning: tau_int < 0.5; tau set to %f\n", TINY); tau = TINY; } else { tau = s_tau / log( (*tau_int+0.5) / (*tau_int-0.5) ); } /* test */ fprintf(stdout, "[uwerr] tau(%d) = %18.16e\n", W, tau); if( exp(-(double)W / tau) - tau / sqrt((double)(W*ndata)) < 0.0 ) { Wopt = W; /* test */ fprintf(stdout, "[uwerr] Wopt = %d\n", Wopt); break; } } if(W==Wmax-1) { fprintf(stdout, "[uwerr] windowing condition failed after W = %d\n", W); return(1); } else { SUM(Gamma_F+1, Wopt, &C_Fopt); C_Fopt = 2.0 * C_Fopt + *Gamma_F; /* test */ fprintf(stdout, "[uwerr] before: C_Fopt = %18.16e\n", C_Fopt); for(W=0; W<=Wopt; W++) { *(Gamma_F+W) = *(Gamma_F+W) + C_Fopt/((double)ndata); } SUM(Gamma_F+1, Wopt, &C_Fopt); C_Fopt = 2.0 * C_Fopt + *Gamma_F; /* test */ fprintf(stdout, "[uwerr] after: C_Fopt = %18.16e\n", C_Fopt); v_Fbb = *Gamma_F; *tau_int = 0.5*v_Fbb; for(W=1; W<=Wopt; W++) *(tau_int+W) = *(tau_int+W-1) + *(Gamma_F+W); for(W=0; W<=Wopt; W++) *(tau_int+W) /= v_Fbb; } fprintf(stdout, "[uwerr]: perfomed automatic windowing\n"); /*********************************** * bias cancellation of mean value * ***********************************/ if(nreplica > 1 ) { *F_bb = ( (double)nreplica * *F_bb - *F_bw ) / ((double)(nreplica-1)); } fprintf(stdout, "[uwerr]: leading bias cancelled\n"); /************************** * calculation of results * **************************/ value = *F_bb; dvalue = sqrt(C_Fopt/((double)ndata)); ddvalue = dvalue * sqrt((Wopt + 0.5)/ndata); tau_intbb = C_Fopt / (2.0 * v_Fbb); dtau_intbb = sqrt( 2.0 * ( 2.0*Wopt-3.0*tau_intbb + 1 + \ 1/(4.0*tau_intbb))/((double)ndata) ) * tau_intbb; dv_Fbb = sqrt(2.0*(tau_intbb + 1/(4.0*tau_intbb)) / (double)ndata) * v_Fbb; /******************************************* * consistency checks in case nreplica > 0 * *******************************************/ if(nreplica>1) { /* (1) calculate Q-value <---> determine goodness of the fit F_b(n) = F_bw = const. */ chisqr = 0.0; for(n=0; n<nreplica; n++) { chisqr = chisqr + SQR( *(F_b+n) - *F_bw ) / (C_Fopt/(double)(*(n_r+n))); } /* test */ fprintf(stdout, "[uwerr]: chisqr = %18.16e\n", chisqr); fprintf(stdout, "[uwerr]: n = %d \n", (nreplica-1)/2); Qval = 1.0 - incomp_gamma(chisqr/2.0, (nreplica-1)/2); /* (2) inspection of p_r's defined below in a histogramm */ p_r = (double *)calloc(nreplica, sizeof(double)); for(n=0; n<nreplica; n++) { *(p_r+n) = (*(F_b+n) - *F_bw) / \ (dvalue*sqrt(((double)ndata/(double)(*(n_r+n)))-1.0)); } ARITHMEAN(p_r, nreplica, &p_r_mean); VAR(p_r, nreplica, &p_r_var); k = 1 + (int)rint(log((double)nreplica)/log(2.0)); strcpy(filename, obsname); strcat(filename, "_uwerr_hist"); ofs = fopen(filename, append); fprintf(ofs, "# mean of p_r's:\tp_r_mean = %8.6e\n" \ "# variance of p_r's:\tp_r_var = %8.6e\n", \ p_r_mean, p_r_var); strcpy(format, "%%dst p_r(%2d) = %18.16e\n"); for(n=0; n<nreplica; n++) { fprintf(ofs, format, n, *(p_r+n)); } if(k<3) /* not enough classes for a meaningful histogramm */ { fprintf(ofs, "# [uwerr]: k = %d is to small\n", k); } else { ABS_MAX_DBL(p_r, nreplica, &lobd); /* max{|p_r's|} */ lobd = lobd *(1.0+TINY); delta = 2.0*lobd/(double)k; /* expected distribution around mean=0 */ lobd = -lobd; /* lower boundary of abscissa */ bins = (double *)calloc(k, sizeof(double)); /* contains number of entries */ SET_TO(bins, k, 0.0); /* for each class */ for(n=0; n<nreplica; n++) /* inc. bins(i) by 1, if p_r(n) is in class i */ { i = (int)((*(p_r+n) - lobd)/delta); *(bins + i) = *(bins + i) + 1.0; } fprintf(ofs, "# number of entries:\tnreplica = %d\n" \ "# number of classes:\tk = %d\n" \ "# lower boundary:\tlobd = %8.6e\n" \ "# bin width:\tdelta = %8.6e\n", \ nreplica, k, lobd, delta); strcpy(format, "%%hst %18.16e\t%18.16e\n"); for(i=0; i<k; i++) { fprintf(ofs, format, lobd+((double)i+0.5)*delta, *(bins+i)); } } fclose(ofs); } /************************** * output * **************************/ /* (1) value, dvalue, ... */ strcpy(filename, obsname); strcat(filename,"_uwerr"); ofs = fopen(filename, append); if ((void*)ofs==NULL) { fprintf(stderr, "[uwerr] Could not open file %s\n", filename); return(1); } strcpy(format, "%d\t%18.16e\t%18.16e\t%18.16e\t%18.16e\t%18.16e\t%18.16e\t%18.16e\t%18.16e\n"); fprintf(ofs, format, label, value, dvalue, ddvalue, tau_intbb, dtau_intbb, Qval, v_Fbb, dv_Fbb); if (fclose(ofs)!=0) { fprintf(stderr, "[uwerr] Could not close file %s\n", filename); return(1); } /* (2) Gamma_F */ strcpy(filename, obsname); strcat(filename, "_uwerr_Gamma"); ofs = fopen(filename, append); if ((void*)ofs==NULL) { fprintf(stderr, "[uwerr] Could not open file %s\n", filename); return(1); } strcpy(format, "%d\t%18.16e\n"); fprintf(ofs, "# obsname = %s \t ipo = %d", obsname, ipo); for(W=0; W<=Wopt; W++) { fprintf(ofs, format, W, *(Gamma_F+W)); } if (fclose(ofs)!=0) { fprintf(stderr, "[uwerr] Could not close file %s\n", filename); return(1); } /* (3) tau_int */ strcpy(filename, obsname); strcat(filename, "_uwerr_tauint"); ofs = fopen(filename, append); fprintf(ofs, "# obsname = %s \t ipo = %d", obsname, ipo); for(W=0; W<=Wopt; W++) { fprintf(ofs, format, W, *(tau_int+W)); } fclose(ofs); fprintf(stdout, "[uwerr]: output written\n"); /***************************** * free allocated disk space * *****************************/ free(F_b); free(F_bb); free(F_bw); free(Gamma_F); free(tau_int); if(ipo==0 && func!=NULL) { free(a_proj); } return(0); }
struct zpctoken * zpceval(struct zpctoken *srcqueue) { struct zpctoken *token = srcqueue; struct zpctoken *queue = NULL; struct zpctoken *tail = NULL; struct zpctoken *stack = NULL; struct zpctoken *token1 = token; struct zpctoken *token2; struct zpctoken *arg1; struct zpctoken *arg2; int64_t dest; zpcop_t *func; long radix; while (token) { token2 = token->next; if (zpcisvalue(token)) { zpcpushtoken(token, &stack); } else if (zpcisoper(token)) { if (!token1) { fprintf(stderr, "missing argument 1\n"); return NULL; } arg2 = NULL; if (zpcopnargtab[token->type] == 2) { arg2 = zpcpoptoken(&stack); if (!arg2) { fprintf(stderr, "missing argument 2\n"); return NULL; } } arg1 = token1; fprintf(stderr, "ARGS:\n"); if (arg1) { zpcprinttoken(arg1); } if (arg2) { zpcprinttoken(arg2); } switch (zpcopnargtab[token->type]) { case 2: if (!arg2) { fprintf(stderr, "invalid argument 2\n"); return NULL; } case 1: if (!arg1) { fprintf(stderr, "invalid argument 1\n"); return NULL; } break; } func = zpcevaltab[token->type]; if (func) { if (arg2) { dest = func(arg2, arg1); if (arg1->radix == 16 || arg2->radix == 16) { token1->radix = 16; } else if (arg1->radix == 8 || arg2->radix == 8) { token1->radix = 8; } else if (arg1->radix == 2 || arg2->radix == 2) { token1->radix = 2; } else { token1->radix = 10; } #if (SMARTTYPES) token1->type = arg1->type; token1->flags = arg1->flags; token1->sign = arg1->sign; #endif } else if (arg1) { dest = func(arg1, arg2); token1->radix = arg1->radix; #if (SMARTTYPES) token1->type = arg2->type; token1->flags = arg2->flags; token1->sign = arg2->sign; #endif } token1->data.ui64.i64 = dest; if (arg1->type == ZPCINT64 || arg1->type == ZPCUINT64) { radix = token1->radix; if (!radix) { radix = zpcradix; } token1->radix = radix; fprintf(stderr, "RADIX: %ld\n", radix); zpcprintstr64(token1, token1->data.ui64.u64, radix); } } } token = token2; } zpcqueuetoken(token1, &queue, &tail); return queue; }
int main() { int test[] = { 7, 8, 9 }; int result = func(test); return result; }
double integrate(double val) { double out; if (val <= SEG1) out = gauss_legendre(0., val); else if (val <= SEG2) out = BASE1 + gauss_legendre(SEG1, val); else if (val <= SEG3) out = BASE2 + gauss_legendre(SEG2, val); else out = BASE3 + gauss_legendre(SEG3, val); return out; } FORWARD(s_forward); /* spheroid */ xy.x = lp.lam * func(lp.phi); xy.y = integrate(fabs(lp.phi)); if (lp.phi < 0.) xy.y = -xy.y; return (xy); } #ifdef PROJ_HAVE_GSL FORWARD(s_forwardg); /* numerical integration n <> 0.5 */ double error; gsl_integration_qags(&P->func, 0., fabs(lp.phi), 1e-7, 1e-8, GSL_WORK, P->work, &xy.y, &error); xy.x = lp.lam * pow(cos(lp.phi), P->n[1]); if (lp.phi < 0.) xy.y = -xy.y; return (xy);
mrb_value mrb_exec_recursive(mrb_state *mrb, mrb_value (*func) (mrb_state *, mrb_value, mrb_value, int), mrb_value obj, void *arg) { // return mrb_exec_recursive(mrb, io_puts_ary, line, &out); return func(mrb, obj, *(mrb_value*)arg, 0); }
void OperationLaplacePrewavelet::upOpDim(SGPP::base::DataVector& alpha, SGPP::base::DataVector& result, size_t dim) { LaplaceUpGradientPrewavelet func(this->storage); SGPP::base::sweep<LaplaceUpGradientPrewavelet> s(func, this->storage); s.sweep1D(alpha, result, dim); }
int main() { func(); }
void operator()(int required, required2_type required2, opt1_type* opt1 = NULL, opt2_type* opt2 = NULL) const { double *opt1_ptr = (opt1) ? &opt1->front() : NULL; double *opt2_ptr = (opt2) ? &opt2->front() : NULL; func(required, &required2[0], opt1_ptr, opt2_ptr); }
int main() { int asdf = 3; int* ptr1 = func(4, &asdf); int* ptr2 = func(asdf, ptr1); return *func(5, ptr2) % 128; }
HybridSslConnection* hybrid_ssl_connect_with_fd(gint sk, ssl_callback func, gpointer user_data) { gint l; SSL *ssl; BIO *sbio; BIO *buf_io; BIO *ssl_bio; SSL_CTX *ssl_ctx; HybridSslConnection *ssl_conn; SSL_load_error_strings(); SSL_library_init(); if (!(ssl_ctx = SSL_CTX_new(TLSv1_client_method()))) { hybrid_debug_error("ssl", "initialize SSL CTX: %s", ERR_reason_error_string(ERR_get_error())); return NULL; } if (!(ssl = ssl_new_with_certs(ssl_ctx))) { return NULL; } if (!SSL_set_fd(ssl, sk)) { hybrid_debug_error("ssl", "add ssl to tcp socket:%s", ERR_reason_error_string(ERR_get_error())); return NULL; } sbio = BIO_new(BIO_s_socket()); BIO_set_fd(sbio, sk, BIO_NOCLOSE); SSL_set_bio(ssl, sbio, sbio); SSL_set_connect_state(ssl); reconnect: l = SSL_connect(ssl); switch (SSL_get_error(ssl, l)) { case SSL_ERROR_NONE: goto ssl_conn_sk_ok; case SSL_ERROR_WANT_WRITE: case SSL_ERROR_WANT_READ: usleep(100); goto reconnect; case SSL_ERROR_SYSCALL: case SSL_ERROR_WANT_X509_LOOKUP: case SSL_ERROR_ZERO_RETURN: case SSL_ERROR_SSL: default: hybrid_debug_error("ssl", "ssl hand-shake error:%s", ERR_reason_error_string(ERR_get_error())); return NULL; } ssl_conn_sk_ok: if (HYBRID_OK != ssl_verify_certs(ssl)) { return NULL; } SSL_set_mode(ssl, SSL_MODE_AUTO_RETRY); buf_io = BIO_new(BIO_f_buffer()); ssl_bio = BIO_new(BIO_f_ssl()); BIO_set_ssl(ssl_bio, ssl, BIO_NOCLOSE); BIO_push(buf_io, ssl_bio); ssl_conn = g_new0(HybridSslConnection, 1); ssl_conn->sk = sk; ssl_conn->ssl = ssl; ssl_conn->ssl_ctx = ssl_ctx; ssl_conn->conn_cb = func; ssl_conn->conn_data = user_data; ssl_conn->rbio = buf_io; ssl_conn->wbio = sbio; if (func) { func(ssl_conn, user_data); } return ssl_conn; }
int main(int argc, char *argv[]) { func(); func(); func(); return 0; }
func(int n) { if(n==1) return 1; return n + func(n-1); }
EAPI void evas_common_scale_rgba_sample_draw(RGBA_Image *src, RGBA_Image *dst, int dst_clip_x, int dst_clip_y, int dst_clip_w, int dst_clip_h, DATA32 mul_col, int render_op, int src_region_x, int src_region_y, int src_region_w, int src_region_h, int dst_region_x, int dst_region_y, int dst_region_w, int dst_region_h, RGBA_Image *mask_ie, int mask_x, int mask_y) { int x, y; int *lin_ptr; DATA32 *buf, *dptr; DATA32 **row_ptr; DATA32 *ptr, *dst_ptr, *src_data, *dst_data; DATA8 *mask; int src_w, src_h, dst_w, dst_h; RGBA_Gfx_Func func, func2 = NULL; if ((!src->image.data) || (!dst->image.data)) return; if (!(RECTS_INTERSECT(dst_region_x, dst_region_y, dst_region_w, dst_region_h, 0, 0, dst->cache_entry.w, dst->cache_entry.h))) return; if (!(RECTS_INTERSECT(src_region_x, src_region_y, src_region_w, src_region_h, 0, 0, src->cache_entry.w, src->cache_entry.h))) return; if ((src_region_w <= 0) || (src_region_h <= 0) || (dst_region_w <= 0) || (dst_region_h <= 0)) return; src_w = src->cache_entry.w; if (src_region_x >= src_w) return; src_h = src->cache_entry.h; if (src_region_y >= src_h) return; dst_w = dst->cache_entry.w; dst_h = dst->cache_entry.h; src_data = src->image.data; dst_data = dst->image.data; /* sanitise clip x */ if (dst_clip_x < 0) { dst_clip_w += dst_clip_x; dst_clip_x = 0; } if ((dst_clip_x + dst_clip_w) > dst_w) dst_clip_w = dst_w - dst_clip_x; if (dst_clip_x < dst_region_x) { dst_clip_w += dst_clip_x - dst_region_x; dst_clip_x = dst_region_x; } if (dst_clip_x >= dst_w) return; if ((dst_clip_x + dst_clip_w) > (dst_region_x + dst_region_w)) dst_clip_w = dst_region_x + dst_region_w - dst_clip_x; if (dst_clip_w <= 0) return; /* sanitise clip y */ if (dst_clip_y < 0) { dst_clip_h += dst_clip_y; dst_clip_y = 0; } if ((dst_clip_y + dst_clip_h) > dst_h) dst_clip_h = dst_h - dst_clip_y; if (dst_clip_y < dst_region_y) { dst_clip_h += dst_clip_y - dst_region_y; dst_clip_y = dst_region_y; } if (dst_clip_y >= dst_h) return; if ((dst_clip_y + dst_clip_h) > (dst_region_y + dst_region_h)) dst_clip_h = dst_region_y + dst_region_h - dst_clip_y; if (dst_clip_h <= 0) return; /* sanitise region x */ if (src_region_x < 0) { dst_region_x -= (src_region_x * dst_region_w) / src_region_w; dst_region_w += (src_region_x * dst_region_w) / src_region_w; src_region_w += src_region_x; src_region_x = 0; if (dst_clip_x < dst_region_x) { dst_clip_w += (dst_clip_x - dst_region_x); dst_clip_x = dst_region_x; } } if ((dst_clip_x + dst_clip_w) > dst_w) dst_clip_w = dst_w - dst_clip_x; if (dst_clip_w <= 0) return; if ((src_region_x + src_region_w) > src_w) { dst_region_w = (dst_region_w * (src_w - src_region_x)) / (src_region_w); src_region_w = src_w - src_region_x; } if ((dst_region_w <= 0) || (src_region_w <= 0)) return; /* sanitise region y */ if (src_region_y < 0) { dst_region_y -= (src_region_y * dst_region_h) / src_region_h; dst_region_h += (src_region_y * dst_region_h) / src_region_h; src_region_h += src_region_y; src_region_y = 0; if (dst_clip_y < dst_region_y) { dst_clip_h += (dst_clip_y - dst_region_y); dst_clip_y = dst_region_y; } } if ((dst_clip_y + dst_clip_h) > dst_h) dst_clip_h = dst_h - dst_clip_y; if (dst_clip_h <= 0) return; if ((src_region_y + src_region_h) > src_h) { dst_region_h = (dst_region_h * (src_h - src_region_y)) / (src_region_h); src_region_h = src_h - src_region_y; } if ((dst_region_h <= 0) || (src_region_h <= 0)) return; /* figure out dst jump */ //dst_jump = dst_w - dst_clip_w; /* figure out dest start ptr */ dst_ptr = dst_data + dst_clip_x + (dst_clip_y * dst_w); if (!mask_ie) { if (mul_col != 0xffffffff) func = evas_common_gfx_func_composite_pixel_color_span_get(src->cache_entry.flags.alpha, src->cache_entry.flags.alpha_sparse, mul_col, dst->cache_entry.flags.alpha, dst_clip_w, render_op); else func = evas_common_gfx_func_composite_pixel_span_get(src->cache_entry.flags.alpha, src->cache_entry.flags.alpha_sparse, dst->cache_entry.flags.alpha, dst_clip_w, render_op); } else { if (mul_col != 0xffffffff) { func = evas_common_gfx_func_composite_pixel_mask_span_get(src->cache_entry.flags.alpha, src->cache_entry.flags.alpha_sparse, dst->cache_entry.flags.alpha, dst_clip_w, render_op); func2 = evas_common_gfx_func_composite_pixel_color_span_get(src->cache_entry.flags.alpha, src->cache_entry.flags.alpha_sparse, mul_col, dst->cache_entry.flags.alpha, dst_clip_w, EVAS_RENDER_COPY); } else func = evas_common_gfx_func_composite_pixel_mask_span_get(src->cache_entry.flags.alpha, src->cache_entry.flags.alpha_sparse, dst->cache_entry.flags.alpha, dst_clip_w, render_op); // Adjust clipping info if (EINA_UNLIKELY((dst_clip_x - mask_x) < 0)) dst_clip_x = mask_x; if (EINA_UNLIKELY((dst_clip_y - mask_y) < 0)) dst_clip_y = mask_y; if (EINA_UNLIKELY((dst_clip_x - mask_x + dst_clip_w) > (int)mask_ie->cache_entry.w)) dst_clip_w = mask_ie->cache_entry.w - dst_clip_x + mask_x; if (EINA_UNLIKELY((dst_clip_y - mask_y + dst_clip_h) > (int)mask_ie->cache_entry.h)) dst_clip_h = mask_ie->cache_entry.h - dst_clip_y + mask_y; } if ((dst_region_w == src_region_w) && (dst_region_h == src_region_h)) { ptr = src_data + (((dst_clip_y - dst_region_y) + src_region_y) * src_w) + ((dst_clip_x - dst_region_x) + src_region_x); /* image masking */ if (mask_ie) { if (mul_col != 0xffffffff) buf = alloca(dst_clip_w * sizeof(DATA32)); for (y = 0; y < dst_clip_h; y++) { mask = mask_ie->image.data8 + ((dst_clip_y - mask_y + y) * mask_ie->cache_entry.w) + (dst_clip_x - mask_x); /* * blend here [clip_w *] ptr -> dst_ptr * */ if (mul_col != 0xffffffff) { func2(ptr, NULL, mul_col, buf, dst_clip_w); func(buf, mask, 0, dst_ptr, dst_clip_w); } else func(ptr, mask, 0, dst_ptr, dst_clip_w); ptr += src_w; dst_ptr += dst_w; } } else { for (y = 0; y < dst_clip_h; y++) { /* * blend here [clip_w *] ptr -> dst_ptr * */ func(ptr, NULL, mul_col, dst_ptr, dst_clip_w); ptr += src_w; dst_ptr += dst_w; } } } else { /* allocate scale lookup tables */ lin_ptr = alloca(dst_clip_w * sizeof(int)); row_ptr = alloca(dst_clip_h * sizeof(DATA32 *)); /* fill scale tables */ for (x = 0; x < dst_clip_w; x++) lin_ptr[x] = (((x + dst_clip_x - dst_region_x) * src_region_w) / dst_region_w) + src_region_x; for (y = 0; y < dst_clip_h; y++) row_ptr[y] = src_data + (((((y + dst_clip_y - dst_region_y) * src_region_h) / dst_region_h) + src_region_y) * src_w); /* scale to dst */ dptr = dst_ptr; /* a scanline buffer */ buf = alloca(dst_clip_w * sizeof(DATA32)); /* image masking */ if (mask_ie) { for (y = 0; y < dst_clip_h; y++) { dst_ptr = buf; mask = mask_ie->image.data8 + ((dst_clip_y - mask_y + y) * mask_ie->cache_entry.w) + (dst_clip_x - mask_x); for (x = 0; x < dst_clip_w; x++) { ptr = row_ptr[y] + lin_ptr[x]; *dst_ptr = *ptr; dst_ptr++; } /* * blend here [clip_w *] buf -> dptr * */ if (mul_col != 0xffffffff) func2(buf, NULL, mul_col, buf, dst_clip_w); func(buf, mask, 0, dptr, dst_clip_w); dptr += dst_w; } } else { for (y = 0; y < dst_clip_h; y++) { dst_ptr = buf; for (x = 0; x < dst_clip_w; x++) { ptr = row_ptr[y] + lin_ptr[x]; *dst_ptr = *ptr; dst_ptr++; } /* * blend here [clip_w *] buf -> dptr * */ func(buf, NULL, mul_col, dptr, dst_clip_w); dptr += dst_w; } } } }
int SDL_BlendFillRects(SDL_Surface * dst, const SDL_Rect * rects, int count, SDL_BlendMode blendMode, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { SDL_Rect rect; int i; int (*func)(SDL_Surface * dst, const SDL_Rect * rect, SDL_BlendMode blendMode, Uint8 r, Uint8 g, Uint8 b, Uint8 a) = NULL; int status = 0; if (!dst) { return SDL_SetError("Passed NULL destination surface"); } /* This function doesn't work on surfaces < 8 bpp */ if (dst->format->BitsPerPixel < 8) { return SDL_SetError("SDL_BlendFillRects(): Unsupported surface format"); } if (blendMode == SDL_BLENDMODE_BLEND || blendMode == SDL_BLENDMODE_ADD) { r = DRAW_MUL(r, a); g = DRAW_MUL(g, a); b = DRAW_MUL(b, a); } /* FIXME: Does this function pointer slow things down significantly? */ switch (dst->format->BitsPerPixel) { case 15: switch (dst->format->Rmask) { case 0x7C00: func = SDL_BlendFillRect_RGB555; } break; case 16: switch (dst->format->Rmask) { case 0xF800: func = SDL_BlendFillRect_RGB565; } break; case 32: switch (dst->format->Rmask) { case 0x00FF0000: if (!dst->format->Amask) { func = SDL_BlendFillRect_RGB888; } else { func = SDL_BlendFillRect_ARGB8888; } break; } break; default: break; } if (!func) { if (!dst->format->Amask) { func = SDL_BlendFillRect_RGB; } else { func = SDL_BlendFillRect_RGBA; } } for (i = 0; i < count; ++i) { /* Perform clipping */ if (!SDL_IntersectRect(&rects[i], &dst->clip_rect, &rect)) { continue; } status = func(dst, &rect, blendMode, r, g, b, a); } return status; }
static __ri void DmaExec( void (*func)(), u32 mem, u32 value ) { DMACh& reg = (DMACh&)psHu32(mem); tDMA_CHCR chcr(value); //It's invalid for the hardware to write a DMA while it is active, not without Suspending the DMAC if (reg.chcr.STR) { const uint channel = ChannelNumber(mem); //As the manual states "Fields other than STR can only be written to when the DMA is stopped" //Also "The DMA may not stop properly just by writing 0 to STR" //So the presumption is that STR can be written to (ala force stop the DMA) but nothing else //If the developer wishes to alter any of the other fields, it must be done AFTER the STR has been written, //it will not work before or during this event. if(chcr.STR == 0) { //DevCon.Warning(L"32bit Force Stopping %s (Current CHCR %x) while DMA active", ChcrName(mem), reg.chcr._u32, chcr._u32); reg.chcr.STR = 0; //We need to clear any existing DMA loops that are in progress else they will continue! if(channel == 1) { cpuClearInt( 10 ); QueuedDMA._u16 &= ~(1 << 10); //Clear any queued DMA requests for this channel } else if(channel == 2) { cpuClearInt( 11 ); QueuedDMA._u16 &= ~(1 << 11); //Clear any queued DMA requests for this channel } cpuClearInt( channel ); QueuedDMA._u16 &= ~(1 << channel); //Clear any queued DMA requests for this channel } //else DevCon.Warning(L"32bit Attempted to change %s CHCR (Currently %x) with %x while DMA active, ignoring QWC = %x", ChcrName(mem), reg.chcr._u32, chcr._u32, reg.qwc); return; } //if(reg.chcr.TAG != chcr.TAG && chcr.MOD == CHAIN_MODE) DevCon.Warning(L"32bit CHCR Tag on %s changed to %x from %x QWC = %x Channel Not Active", ChcrName(mem), chcr.TAG, reg.chcr.TAG, reg.qwc); reg.chcr.set(value); //Final Fantasy XII sets the DMA Mode to 3 which doesn't exist. On some channels (like SPR) this will break logic completely. so lets assume they mean chain. if (reg.chcr.MOD == 0x3) { static bool warned; //Check if the warning has already been output to console, to prevent constant spam. if (!warned) { DevCon.Warning(L"%s CHCR.MOD set to 3, assuming 1 (chain)", ChcrName(mem)); warned = true; } reg.chcr.MOD = 0x1; } if (reg.chcr.STR && dmacRegs.ctrl.DMAE && !psHu8(DMAC_ENABLER+2)) { func(); } else if(reg.chcr.STR) { //DevCon.Warning(L"32bit %s DMA Start while DMAC Disabled\n", ChcrName(mem)); QueuedDMA._u16 |= (1 << ChannelNumber(mem)); //Queue the DMA up to be started then the DMA's are Enabled and or the Suspend is lifted } //else QueuedDMA._u16 &~= (1 << ChannelNumber(mem)); // }
static void test_genericq (mpfr_prec_t p0, mpfr_prec_t p1, unsigned int N, int (*func)(mpfr_ptr, mpfr_srcptr, mpq_srcptr, mpfr_rnd_t), const char *op) { mpfr_prec_t prec; mpfr_t arg1, dst_big, dst_small, tmp; mpq_t arg2; mpfr_rnd_t rnd; int inexact, compare, compare2; unsigned int n; mpfr_inits (arg1, dst_big, dst_small, tmp, (mpfr_ptr) 0); mpq_init (arg2); for (prec = p0; prec <= p1; prec++) { mpfr_set_prec (arg1, prec); mpfr_set_prec (tmp, prec); mpfr_set_prec (dst_small, prec); for (n=0; n<N; n++) { mpfr_urandomb (arg1, RANDS); mpq_set_ui (arg2, randlimb (), randlimb() ); mpq_canonicalize (arg2); rnd = RND_RAND (); mpfr_set_prec (dst_big, prec+10); compare = func(dst_big, arg1, arg2, rnd); if (mpfr_can_round (dst_big, prec+10, rnd, rnd, prec)) { mpfr_set (tmp, dst_big, rnd); inexact = func(dst_small, arg1, arg2, rnd); if (mpfr_cmp (tmp, dst_small)) { printf ("Results differ for prec=%u rnd_mode=%s and %s_q:\n" "arg1=", (unsigned) prec, mpfr_print_rnd_mode (rnd), op); mpfr_dump (arg1); printf ("arg2="); mpq_out_str(stdout, 2, arg2); printf ("\ngot "); mpfr_dump (dst_small); printf ("expected "); mpfr_dump (tmp); printf ("approx "); mpfr_dump (dst_big); exit (1); } compare2 = mpfr_cmp (tmp, dst_big); /* if rounding to nearest, cannot know the sign of t - f(x) because of composed rounding: y = o(f(x)) and t = o(y) */ if (compare * compare2 >= 0) compare = compare + compare2; else compare = inexact; /* cannot determine sign(t-f(x)) */ if (((inexact == 0) && (compare != 0)) || ((inexact > 0) && (compare <= 0)) || ((inexact < 0) && (compare >= 0))) { printf ("Wrong inexact flag for rnd=%s and %s_q:\n" "expected %d, got %d", mpfr_print_rnd_mode (rnd), op, compare, inexact); printf ("arg1="); mpfr_dump (arg1); printf ("arg2="); mpq_out_str(stdout, 2, arg2); printf ("\ndstl="); mpfr_dump (dst_big); printf ("dsts="); mpfr_dump (dst_small); printf ("tmp ="); mpfr_dump (tmp); exit (1); } } } } mpq_clear (arg2); mpfr_clears (arg1, dst_big, dst_small, tmp, (mpfr_ptr) 0); }
enum htc_status_e HTC_RxStuff(struct http_conn *htc, htc_complete_f *func, vtim_real *t1, vtim_real *t2, vtim_real ti, vtim_real tn, int maxbytes) { vtim_dur tmo; vtim_real now; enum htc_status_e hs; ssize_t z; CHECK_OBJ_NOTNULL(htc, HTTP_CONN_MAGIC); AN(htc->rfd); assert(*htc->rfd > 0); AN(htc->ws->r); AN(htc->rxbuf_b); assert(htc->rxbuf_b <= htc->rxbuf_e); assert(htc->rxbuf_e <= htc->ws->r); AZ(isnan(tn)); if (t1 != NULL) assert(isnan(*t1)); if (htc->rxbuf_e == htc->ws->r) { /* Can't work with a zero size buffer */ WS_ReleaseP(htc->ws, htc->rxbuf_b); return (HTC_S_OVERFLOW); } z = (htc->ws->r - htc->rxbuf_b); if (z < maxbytes) maxbytes = z; /* Cap maxbytes at available WS */ while (1) { now = VTIM_real(); AZ(htc->pipeline_b); AZ(htc->pipeline_e); assert(htc->rxbuf_e <= htc->ws->r); hs = func(htc); if (hs == HTC_S_OVERFLOW || hs == HTC_S_JUNK) { WS_ReleaseP(htc->ws, htc->rxbuf_b); return (hs); } if (hs == HTC_S_COMPLETE) { WS_ReleaseP(htc->ws, htc->rxbuf_e); /* Got it, run with it */ if (t1 != NULL && isnan(*t1)) *t1 = now; if (t2 != NULL) *t2 = now; return (HTC_S_COMPLETE); } if (hs == HTC_S_MORE) { /* Working on it */ if (t1 != NULL && isnan(*t1)) *t1 = now; } else if (hs == HTC_S_EMPTY) htc->rxbuf_e = htc->rxbuf_b; else WRONG("htc_status_e"); tmo = tn - now; if (!isnan(ti) && ti < tn && hs == HTC_S_EMPTY) tmo = ti - now; z = maxbytes - (htc->rxbuf_e - htc->rxbuf_b); if (z <= 0) { /* maxbytes reached but not HTC_S_COMPLETE. Return * overflow. */ WS_ReleaseP(htc->ws, htc->rxbuf_b); return (HTC_S_OVERFLOW); } if (tmo <= 0.0) tmo = 1e-3; z = VTCP_read(*htc->rfd, htc->rxbuf_e, z, tmo); if (z == 0 || z == -1) { WS_ReleaseP(htc->ws, htc->rxbuf_b); return (HTC_S_EOF); } else if (z > 0) htc->rxbuf_e += z; else if (z == -2) { if (hs == HTC_S_EMPTY && ti <= now) { WS_ReleaseP(htc->ws, htc->rxbuf_b); return (HTC_S_IDLE); } if (tn <= now) { WS_ReleaseP(htc->ws, htc->rxbuf_b); return (HTC_S_TIMEOUT); } } } }
void Thread::VoidWrapper::call(void) { func(); }
void operator()() { func(req.get(), path); }
nsresult WrapperPromiseCallback::Call(JSContext* aCx, JS::Handle<JS::Value> aValue) { JS::ExposeObjectToActiveJS(mGlobal); JS::ExposeValueToActiveJS(aValue); JSAutoCompartment ac(aCx, mGlobal); JS::Rooted<JS::Value> value(aCx, aValue); if (!JS_WrapValue(aCx, &value)) { NS_WARNING("Failed to wrap value into the right compartment."); return NS_ERROR_FAILURE; } ErrorResult rv; // PromiseReactionTask step 6 JS::Rooted<JS::Value> retValue(aCx); JSCompartment* compartment; if (mNextPromise) { compartment = mNextPromise->Compartment(); } else { MOZ_ASSERT(mNextPromiseObj); compartment = js::GetObjectCompartment(mNextPromiseObj); } mCallback->Call(value, &retValue, rv, "promise callback", CallbackObject::eRethrowExceptions, compartment); rv.WouldReportJSException(); // PromiseReactionTask step 7 if (rv.Failed()) { JS::Rooted<JS::Value> value(aCx); { // Scope for JSAutoCompartment // Convert the ErrorResult to a JS exception object that we can reject // ourselves with. This will be exactly the exception that would get // thrown from a binding method whose ErrorResult ended up with whatever // is on "rv" right now. Do this in the promise reflector compartment. Maybe<JSAutoCompartment> ac; if (mNextPromise) { ac.emplace(aCx, mNextPromise->GlobalJSObject()); } else { ac.emplace(aCx, mNextPromiseObj); } DebugOnly<bool> conversionResult = ToJSValue(aCx, rv, &value); MOZ_ASSERT(conversionResult); } if (mNextPromise) { mNextPromise->RejectInternal(aCx, value); } else { JS::Rooted<JS::Value> ignored(aCx); ErrorResult rejectRv; mRejectFunc->Call(value, &ignored, rejectRv); // This reported any JS exceptions; we just have a pointless exception on // there now. rejectRv.SuppressException(); } return NS_OK; } // If the return value is the same as the promise itself, throw TypeError. if (retValue.isObject()) { JS::Rooted<JSObject*> valueObj(aCx, &retValue.toObject()); valueObj = js::CheckedUnwrap(valueObj); JS::Rooted<JSObject*> nextPromiseObj(aCx); if (mNextPromise) { nextPromiseObj = mNextPromise->GetWrapper(); } else { MOZ_ASSERT(mNextPromiseObj); nextPromiseObj = mNextPromiseObj; } // XXXbz shouldn't this check be over in ResolveInternal anyway? if (valueObj == nextPromiseObj) { const char* fileName = nullptr; uint32_t lineNumber = 0; // Try to get some information about the callback to report a sane error, // but don't try too hard (only deals with scripted functions). JS::Rooted<JSObject*> unwrapped(aCx, js::CheckedUnwrap(mCallback->Callback())); if (unwrapped) { JSAutoCompartment ac(aCx, unwrapped); if (JS_ObjectIsFunction(aCx, unwrapped)) { JS::Rooted<JS::Value> asValue(aCx, JS::ObjectValue(*unwrapped)); JS::Rooted<JSFunction*> func(aCx, JS_ValueToFunction(aCx, asValue)); MOZ_ASSERT(func); JSScript* script = JS_GetFunctionScript(aCx, func); if (script) { fileName = JS_GetScriptFilename(script); lineNumber = JS_GetScriptBaseLineNumber(aCx, script); } } } // We're back in aValue's compartment here. JS::Rooted<JSString*> fn(aCx, JS_NewStringCopyZ(aCx, fileName)); if (!fn) { // Out of memory. Promise will stay unresolved. JS_ClearPendingException(aCx); return NS_ERROR_OUT_OF_MEMORY; } JS::Rooted<JSString*> message(aCx, JS_NewStringCopyZ(aCx, "then() cannot return same Promise that it resolves.")); if (!message) { // Out of memory. Promise will stay unresolved. JS_ClearPendingException(aCx); return NS_ERROR_OUT_OF_MEMORY; } JS::Rooted<JS::Value> typeError(aCx); if (!JS::CreateError(aCx, JSEXN_TYPEERR, nullptr, fn, lineNumber, 0, nullptr, message, &typeError)) { // Out of memory. Promise will stay unresolved. JS_ClearPendingException(aCx); return NS_ERROR_OUT_OF_MEMORY; } if (mNextPromise) { mNextPromise->RejectInternal(aCx, typeError); } else { JS::Rooted<JS::Value> ignored(aCx); ErrorResult rejectRv; mRejectFunc->Call(typeError, &ignored, rejectRv); // This reported any JS exceptions; we just have a pointless exception // on there now. rejectRv.SuppressException(); } return NS_OK; } } // Otherwise, run resolver's resolve with value. if (!JS_WrapValue(aCx, &retValue)) { NS_WARNING("Failed to wrap value into the right compartment."); return NS_ERROR_FAILURE; } if (mNextPromise) { mNextPromise->ResolveInternal(aCx, retValue); } else { JS::Rooted<JS::Value> ignored(aCx); ErrorResult resolveRv; mResolveFunc->Call(retValue, &ignored, resolveRv); // This reported any JS exceptions; we just have a pointless exception // on there now. resolveRv.SuppressException(); } return NS_OK; }