dnssock_udp::dnssock_udp (int f, cb_t cb) : dnssock (false, cb), fd (f) { fdcb (fd, selread, wrap (this, &dnssock_udp::rcb)); }
VALUE wrap< sf::View >(const sf::View &image ) { return wrap(const_cast<sf::View*>(&image)); }
void act(gamedata &g, int jx, int jy, bool jb){ // Reset hidden/afterburner status g.p().hide = false; g.p().boost = false; g.p().gunthreat = 0; switch(g.p().state){ case 0: // OK planes switch(g.p().land){ case 0: // Stationary on runway // Check for mission 0 and mission 1 wins if (((g.mission == 0) || (g.mission == 1)) && (!g.p().drak) && (g.p().score >= g.targetscore)){ g.winner = g.p().side; } // Start Engine if (jy == -1){ g.p().s = 0.3*GAME_SPEED*GAME_SPEED; g.p().land = 1; if ((g.p().control) > 0){ g.sound.volume(g.p().control-1, 0.0); g.sound.loop(g.p().control-1, g.p().enginesample); } } break; case 1: // Taking off plane if (jy == -1){ g.p().s = dlimit(g.p().s + 0.3*GAME_SPEED*GAME_SPEED, 0.0, 6.0*GAME_SPEED); } // Take off plane if ((jx == -1) && (g.p().s > 2.0*GAME_SPEED) && (g.base[g.p().side].planed == 13)){ g.p().d++; g.p().rotate = g.p().maxrotate; g.p().land = 2; } if ((jx == 1) && (g.p().s > 2.0*GAME_SPEED) && (g.base[g.p().side].planed == 5)){ g.p().d--; g.p().rotate = g.p().maxrotate; g.p().land = 2; } // Off end of runway if (abs(int(g.p().x - g.base[g.p().side].planex)) > g.base[g.p().side].runwaylength){ g.p().land = 2; } break; case 2: // flying // Navigate plane if ((g.p().rotate == 0) && (jx !=0)){ g.p().d = wrap(g.p().d-jx,1,17); g.p().rotate = g.p().maxrotate; }else{ if (g.p().rotate > 0){ g.p().rotate--; } } // Acceleration / Afterburner Controls { double acceleration = g.accel[g.p().d] * GAME_SPEED * GAME_SPEED; if (g.p().burner){ if (jy == -1){ acceleration += 0.3*GAME_SPEED*GAME_SPEED; g.p().boost = true; } if ((g.p().s > 6.0*GAME_SPEED) && (jy != -1)){ acceleration -= 0.3*GAME_SPEED*GAME_SPEED; } g.p().s = dlimit(g.p().s + acceleration, 0.0, 12.0*GAME_SPEED); }else{ g.p().s = dlimit(g.p().s + acceleration, 0.0, 6.0*GAME_SPEED); } } // Stealth Controls if ((jy == -1) && (jx == 0) && (!jb) && (g.p().stealth)){ g.p().hide = true; } // Check for shotfire if (g.p().shotdelay == 0){ if ((jb) && (g.p().ammo > 0)){ fire_shot(g.p(), g.shot, g.sound, g.xmove, g.ymove); } // Check for bombdrop if ((jy == 1) && (g.p().bombs > 0)){ drop_bomb(g.p(), g.fall, g.sound, g.bombimage); } }else{ g.p().shotdelay--; } break; case 3: // Landing plane if ((((g.p().x - g.base[g.p().side].planex) < 2.0) && (g.base[g.p().side].planed == 13)) || (((g.p().x - g.base[g.p().side].planex) > -2.0) && (g.base[g.p().side].planed == 5))){ g.p().land = 0; g.p().x = g.base[g.p().side].planex; g.p().y = g.base[g.p().side].planey; g.p().d = g.base[g.p().side].planed; g.p().xs = 0; g.p().ys = 0; g.p().s = 0; g.p().ammo = g.p().maxammo; g.p().bombs = g.p().maxbombs; g.p().coms = 0; g.p().targetx = 0; g.p().targety = 0; g.p().cruiseheight = 0; } break; } // Set speed for planes g.p().xs = g.p().s * g.xmove[g.p().d]; g.p().ys = g.p().s * g.ymove[g.p().d]; // Check for stall if ((g.p().s < 1.0*GAME_SPEED) && (g.p().land == 2)){ g.p().state = 1; if ((g.p().control) > 0){ g.sound.stop(g.p().control-1); g.sound.play(SOUND_STALL); } } if (g.p().y < 0){ g.p().y = 0; g.p().ys = 0; g.p().state = 1; if ((g.p().control) > 0){ g.sound.stop(g.p().control-1); g.sound.play(SOUND_STALL); } } break; case 1: // Stalling planes // Navigate plane if ((g.p().rotate == 0) && (jx !=0)){ g.p().d = wrap(g.p().d-jx,1,17); g.p().rotate = g.p().maxrotate; }else{ if (g.p().rotate > 0){ g.p().rotate--; } } // Check for shotfire if (g.p().shotdelay == 0){ if ((jb) && (g.p().ammo > 0)){ fire_shot(g.p(), g.shot, g.sound, g.xmove, g.ymove); } // Check for bombdrop if ((jy == 1) && (g.p().bombs > 0)){ drop_bomb(g.p(), g.fall, g.sound, g.bombimage); } }else{ g.p().shotdelay--; } // Gravity and drag g.p().ys += 0.1 * GAME_SPEED * GAME_SPEED; if (fabs(g.p().xs) > 0.02 * GAME_SPEED * GAME_SPEED){ g.p().xs -= g.p().xs / fabs(g.p().xs) * 0.02 * GAME_SPEED * GAME_SPEED; } // Recover from Stall if ((g.p().ys > 3.0 * GAME_SPEED) && (g.p().d == 9)){ g.p().s = dlimit(g.p().ys, 3.0*GAME_SPEED, 6.0*GAME_SPEED); g.p().state = 0; g.p().xs = g.p().s * g.xmove[g.p().d]; g.p().ys = g.p().s * g.ymove[g.p().d]; g.p().coms = 0; if ((g.p().control) > 0){ double volume = g.p().s / (6.0*GAME_SPEED); g.sound.volume(g.p().control-1, volume * 0.5); g.sound.loop(g.p().control-1, g.p().enginesample); } } break; case 2: // Dead planes // Gravity and drag g.p().ys += 0.1 * GAME_SPEED * GAME_SPEED; if (fabs(g.p().xs) > 0.02 * GAME_SPEED * GAME_SPEED){ g.p().xs -= g.p().xs/ fabs(g.p().xs) * 0.02 * GAME_SPEED * GAME_SPEED; } // Smoking plane g.p().crash++; if (g.p().crash == int(5/GAME_SPEED)){ g.p().crash = 0; smoketype newsmoke; newsmoke.x = int(g.p().x); newsmoke.y = g.p().y; newsmoke.time = 0; g.smoke.add(newsmoke); } break; case 3: // Crashed planes g.p().crash++; if (g.p().crash == int(70/GAME_SPEED)){ if (!g.p().drak){ // Respawn plane to runway g.p().land = 0; g.p().state = 0; g.p().rotate = 0; g.p().crash = 0; g.p().x = g.base[g.p().side].planex; g.p().y = g.base[g.p().side].planey; g.p().d = g.base[g.p().side].planed; g.p().xs = 0; g.p().ys = 0; g.p().s = 0; g.p().ammo = g.p().maxammo; g.p().bombs = g.p().maxbombs; g.p().coms = 0; g.p().targetx = 0; g.p().targety = 0; g.p().cruiseheight = 0; }else{ // Expunge drak g.p().state = 4; } } break; case 4: // Expunged drak fighter (NB: shouldn't get here!) break; } // Move the planes g.p().x += g.p().xs; g.p().y += g.p().ys; // Control Engine Volume if ((g.p().control) > 0){ if ((g.p().state == 0) && (g.p().land > 0)){ double volume = g.p().s / (6.0*GAME_SPEED); g.sound.volume(g.p().control-1, volume * 0.5); if ((g.p().boost) && (g.p().enginesample == SOUND_JET)){ g.p().enginesample = SOUND_BURNER; g.sound.loop(g.p().control-1,SOUND_BURNER); } if ((!g.p().boost) && (g.p().enginesample == SOUND_BURNER)){ g.p().enginesample = SOUND_JET; g.sound.loop(g.p().control-1,SOUND_JET); } }else{ g.sound.stop(g.p().control-1); } } if (g.p().state < 3) detect_collisions(g); }
int ReliSock::put_bytes(const void *data, int sz) { int tw=0, header_size = isOutgoing_MD5_on() ? MAX_HEADER_SIZE:NORMAL_HEADER_SIZE; int nw, l_out; unsigned char * dta = NULL; // Check to see if we need to encrypt // Okay, this is a bug! H.W. 9/25/2001 if (get_encryption()) { if (!wrap((unsigned char *)data, sz, dta , l_out)) { dprintf(D_SECURITY, "Encryption failed\n"); if (dta != NULL) { free(dta); dta = NULL; } return -1; // encryption failed! } } else { if((dta = (unsigned char *) malloc(sz)) != 0) memcpy(dta, data, sz); } ignore_next_encode_eom = FALSE; for(nw=0;;) { if (snd_msg.buf.full()) { if (!snd_msg.snd_packet(peer_description(), _sock, FALSE, _timeout)) { if (dta != NULL) { free(dta); dta = NULL; } return FALSE; } } if (snd_msg.buf.empty()) { snd_msg.buf.seek(header_size); } if (dta && (tw = snd_msg.buf.put_max(&((char *)dta)[nw], sz-nw)) < 0) { free(dta); dta = NULL; return -1; } nw += tw; if (nw >= sz) { break; } } if (nw > 0) { _bytes_sent += nw; } if (dta != NULL) { free(dta); dta = NULL; } return nw; }
inline string attribute(unsigned short type, unsigned short acclaim, int p, int value, int minVal, int maxVal, int step, unit valueUnit) { string result; char tempStr[4]; snprintf(tempStr, 4, "%d", value); if (p & premission_read) { result += wrap("value")+":"+tempStr; result += ","; } snprintf(tempStr, 4, "%d", minVal); if (minVal != INT32_MIN) result += wrap("minValue")+":"+tempStr+","; snprintf(tempStr, 4, "%d", maxVal); if (maxVal != INT32_MAX) result += wrap("maxValue")+":"+tempStr+","; snprintf(tempStr, 4, "%d", step); if (step > 0) result += wrap("minStep")+":"+tempStr+","; result += wrap("perms")+":"; result += "["; if (p & premission_read) result += wrap("pr")+","; if (p & premission_write) result += wrap("pw")+","; if (p & premission_notify) result += wrap("ev")+","; result = result.substr(0, result.size()-1); result += "]"; result += ","; snprintf(tempStr, 4, "%X", type); result += wrap("type")+":"+wrap(tempStr); result += ","; snprintf(tempStr, 4, "%hd", acclaim); result += wrap("iid")+":"+tempStr; result += ","; switch (valueUnit) { case unit_arcDegree: result += wrap("unit")+":"+wrap("arcdegrees")+","; break; case unit_celsius: result += wrap("unit")+":"+wrap("celsius")+","; break; case unit_percentage: result += wrap("unit")+":"+wrap("percentage")+","; break; } result += "\"format\":\"int\""; return "{"+result+"}"; }
void RaceDialog::onSelectNextFace(MyGUI::Widget*) { mFaceIndex = wrap(mFaceIndex + 1, mAvailableHeads.size()); updatePreview(); }
bool DSoundBuf::get_output_buf( char **pBuffer, unsigned *pBufferSize, int iChunksize ) { ASSERT( !m_bBufferLocked ); iChunksize *= bytes_per_frame(); DWORD iCursorStart, iCursorEnd; HRESULT result; /* It's easiest to think of the cursor as a block, starting and ending at * the two values returned by GetCurrentPosition, that we can't write to. */ result = m_pBuffer->GetCurrentPosition( &iCursorStart, &iCursorEnd ); #ifndef _XBOX if( result == DSERR_BUFFERLOST ) { m_pBuffer->Restore(); result = m_pBuffer->GetCurrentPosition( &iCursorStart, &iCursorEnd ); } if( result != DS_OK ) { LOG->Warn( hr_ssprintf(result, "DirectSound::GetCurrentPosition failed") ); return false; } #endif memmove( &m_iLastCursors[0][0], &m_iLastCursors[1][0], sizeof(int)*6 ); m_iLastCursors[3][0] = iCursorStart; m_iLastCursors[3][1] = iCursorEnd; /* Some cards (Creative AudioPCI) have a no-write area even when not playing. I'm not * sure what that means, but it breaks the assumption that we can fill the whole writeahead * when prebuffering. */ if( !m_bPlaying ) iCursorEnd = iCursorStart; /* * Some cards (Game Theater XP 7.1 hercwdm.sys 5.12.01.4101 [466688b, 01-10-2003]) * have odd behavior when starting a sound: the start/end cursors go: * * 0,0 end cursor forced equal to start above (normal) * 4608, 1764 end cursor trailing the write cursor; except with old emulated * WaveOut devices, this shouldn't happen; it indicates that the * driver expects almost the whole buffer to be filled. Also, the * play cursor is too far ahead from the last call for the amount * of actual time passed. * 704, XXX start cursor moves back to where it should be. I don't have an exact * end cursor position, but in general from now on it stays about 5kb * ahead of start (which is where it should be). * * The second call is completely wrong; both the start and end cursors are meaningless. * Detect this: if the end cursor is close behind the start cursor, don't do anything. * (We can't; we have no idea what the cursors actually are.) */ { int iPrefetch = iCursorEnd - iCursorStart; wrap( iPrefetch, m_iBufferSize ); if( m_iBufferSize - iPrefetch < 1024*4 ) { LOG->Trace( "Strange DirectSound cursor ignored: %i..%i", iCursorStart, iCursorEnd ); return false; } } /* Update m_iBufferBytesFilled. */ { int iFirstByteFilled = m_iWriteCursor - m_iBufferBytesFilled; wrap( iFirstByteFilled, m_iBufferSize ); /* The number of bytes that have been played since the last time we got here: */ int bytes_played = iCursorStart - iFirstByteFilled; wrap( bytes_played, m_iBufferSize ); m_iBufferBytesFilled -= bytes_played; m_iBufferBytesFilled = max( 0, m_iBufferBytesFilled ); if( m_iExtraWriteahead ) { int used = min( m_iExtraWriteahead, bytes_played ); CString s = ssprintf("used %i of %i (%i..%i)", used, m_iExtraWriteahead, iCursorStart, iCursorEnd ); s += "; last: "; for( int i = 0; i < 4; ++i ) s += ssprintf( "%i, %i; ", m_iLastCursors[i][0], m_iLastCursors[i][1] ); LOG->Trace("%s", s.c_str()); m_iWriteAhead -= used; m_iExtraWriteahead -= used; } } CheckWriteahead( iCursorStart, iCursorEnd ); CheckUnderrun( iCursorStart, iCursorEnd ); /* If we already have enough bytes written ahead, stop. */ if( m_iBufferBytesFilled > m_iWriteAhead ) return false; int iNumBytesEmpty = m_iWriteAhead - m_iBufferBytesFilled; /* num_bytes_empty is the amount of free buffer space. If it's * too small, come back later. */ if( iNumBytesEmpty < iChunksize ) return false; // LOG->Trace("gave %i at %i (%i, %i) %i filled", iNumBytesEmpty, m_iWriteCursor, cursor, write, m_iBufferBytesFilled); /* Lock the audio buffer. */ result = m_pBuffer->Lock( m_iWriteCursor, iNumBytesEmpty, (LPVOID *) &m_pLockedBuf1, (DWORD *) &m_iLockedSize1, (LPVOID *) &m_pLockedBuf2, (DWORD *) &m_iLockedSize2, 0 ); #ifndef _XBOX if( result == DSERR_BUFFERLOST ) { m_pBuffer->Restore(); result = m_pBuffer->Lock( m_iWriteCursor, iNumBytesEmpty, (LPVOID *) &m_pLockedBuf1, (DWORD *) &m_iLockedSize1, (LPVOID *) &m_pLockedBuf2, (DWORD *) &m_iLockedSize2, 0 ); } #endif if( result != DS_OK ) { LOG->Warn( hr_ssprintf(result, "Couldn't lock the DirectSound buffer.") ); return false; } *pBuffer = m_pTempBuffer; *pBufferSize = m_iLockedSize1 + m_iLockedSize2; m_iWriteCursor += iNumBytesEmpty; if( m_iWriteCursor >= m_iBufferSize ) m_iWriteCursor -= m_iBufferSize; m_iBufferBytesFilled += iNumBytesEmpty; m_iWriteCursorPos += iNumBytesEmpty / bytes_per_frame(); m_bBufferLocked = true; return true; }
void prepare_galaxy_for_output(int n, struct GALAXY *g, struct GALAXY_OUTPUT *o) #endif { int j,ibin; #ifndef NO_PROPS_OUTPUTS o->Type = g->Type; o->SnapNum = g->SnapNum; o->CentralMvir = get_virial_mass(Halo[g->HaloNr].FirstHaloInFOFgroup); o->CentralRvir = get_virial_radius(Halo[g->HaloNr].FirstHaloInFOFgroup); o->Mvir = g->Mvir; o->Rvir = g->Rvir; o->Vvir = g->Vvir; for(j = 0; j < 3; j++) { o->Pos[j] = g->Pos[j]; o->DistanceToCentralGal[j] = wrap(Halo[Halo[g->HaloNr].FirstHaloInFOFgroup].Pos[j] - g->Pos[j],BoxSize);; } o->ColdGas = g->ColdGas; o->DiskMass = g->DiskMass; o->BulgeMass = g->BulgeMass; o->HotGas = g->HotGas; o->BlackHoleMass = g->BlackHoleMass; #endif #ifdef COMPUTE_SPECPHOT_PROPERTIES #ifndef POST_PROCESS_MAGS #ifdef OUTPUT_REST_MAGS /* Luminosities are converted into Mags in various bands */ for(j = 0; j < NMAG; j++) o->Mag[j] = lum_to_mag(g->Lum[j][n]); #endif #endif //ndef POST_PROCESS_MAGS #endif //COMPUTE_SPECPHOT_PROPERTIES #ifndef LIGHT_OUTPUT #ifndef NO_PROPS_OUTPUTS #ifdef GALAXYTREE o->HaloID = HaloIDs[g->HaloNr].HaloID; o->Redshift = ZZ[g->SnapNum]; int ii = (int) floor(o->Pos[0] * ScaleFactor); int jj = (int) floor(o->Pos[1] * ScaleFactor); int kk = (int) floor(o->Pos[2] * ScaleFactor); o->PeanoKey = peano_hilbert_key(ii, jj, kk, Hashbits); o->SubID = calc_big_db_subid_index(g->SnapNum, Halo[g->HaloNr].FileNr, Halo[g->HaloNr].SubhaloIndex); int tmpfirst = Halo[g->HaloNr].FirstHaloInFOFgroup; int lenmax = 0; int next = tmpfirst; while(next != -1) { if(Halo[next].Len > lenmax) { lenmax = Halo[next].Len; tmpfirst = next; } next = Halo[next].NextHaloInFOFgroup; } o->MMSubID = calc_big_db_subid_index(g->SnapNum, Halo[tmpfirst].FileNr, Halo[tmpfirst].SubhaloIndex); #endif o->LookBackTimeToSnap = NumToTime(g->SnapNum)*UnitTime_in_years/Hubble_h; o->InfallVmax = g->InfallVmax; o->InfallSnap = g->InfallSnap; o-> InfallHotGas = g-> InfallHotGas; o->HotRadius = g->HotRadius; #ifdef HALOPROPERTIES o->HaloM_Mean200 = g->HaloM_Mean200; o->HaloM_Crit200 = g->HaloM_Crit200; o->HaloM_TopHat = g->HaloM_TopHat; o->HaloVelDisp = g->HaloVelDisp; o->HaloVmax = g->HaloVmax; #endif o->Len = g->Len; o->Vmax = g->Vmax; o->BulgeSize = g->BulgeSize; o->EjectedMass = g->EjectedMass; o->BlackHoleGas = g->BlackHoleGas; for(j = 0; j < 3; j++) { o->Vel[j] = g->Vel[j]; #ifdef HALOSPIN o->HaloSpin[j] = g->HaloSpin[j]; #endif #ifndef H2_AND_RINGS o->GasSpin[j] = g->GasSpin[j]; o->StellarSpin[j] = g->StellarSpin[j]; #else o->DiskSpin[j] = g->DiskSpin[j]; #endif #ifdef HALOPROPERTIES o->HaloPos[j] = g->HaloPos[j]; o->HaloVel[j] = g->HaloVel[j]; o->HaloSpin[j] = g->HaloSpin[j]; #endif } o->XrayLum = g->XrayLum; o->GasDiskRadius = g->GasDiskRadius; o->StellarDiskRadius = g->StellarDiskRadius; o->CoolingRadius = g->CoolingRadius; o->ICM = g->ICM; //o->MetalsICM = CORRECTDBFLOAT(g->MetalsICM); o->MetalsICM = g->MetalsICM; o->QuasarAccretionRate = g->QuasarAccretionRate * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS; o->RadioAccretionRate = g->RadioAccretionRate * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS; o->CosInclination = g->CosInclination; if(g->Type == 2 || (g->Type == 1 && g->MergeOn == 1)) { o->OriMergTime=g->OriMergTime; o->MergTime = g->MergTime; } else { o->OriMergTime=0.0; o->MergTime = 0.0; } #ifndef GALAXYTREE o->HaloIndex = g->HaloNr; #endif #ifdef MBPID o->MostBoundID = g->MostBoundID; #endif #ifdef GALAXYTREE o->DisruptOn = g->DisruptOn; #endif #ifdef MERGE01 o->MergeOn = g->MergeOn; #endif //METALS /*o->MetalsColdGas = CORRECTDBFLOAT(g->MetalsColdGas); o->MetalsDiskMass = CORRECTDBFLOAT(g->MetalsDiskMass); o->MetalsBulgeMass = CORRECTDBFLOAT(g->MetalsBulgeMass); o->MetalsHotGas = CORRECTDBFLOAT(g->MetalsHotGas); o->MetalsEjectedMass = CORRECTDBFLOAT(g->MetalsEjectedMass); #ifdef METALS_SELF o->MetalsHotGasSelf = CORRECTDBFLOAT(g->MetalsHotGasSelf); #endif*/ o->MetalsColdGas = g->MetalsColdGas; o->MetalsDiskMass = g->MetalsDiskMass; o->MetalsBulgeMass = g->MetalsBulgeMass; o->MetalsHotGas = g->MetalsHotGas; o->MetalsEjectedMass = g->MetalsEjectedMass; #ifdef METALS_SELF o->MetalsHotGasSelf = g->MetalsHotGasSelf; #endif #ifdef TRACK_BURST o->BurstMass=g->BurstMass; #endif #ifdef H2_AND_RINGS for(j=0; j<RNUM; j++) { o->H2fractionr[j] = g -> H2fractionr[j]; o->ColdGasr[j] = g->ColdGasr[j]; o->DiskMassr[j] = g->DiskMassr[j]; o->MetalsColdGasr[j] = g->MetalsColdGasr[j]; o->MetalsDiskMassr[j] = g->MetalsDiskMassr[j]; } #endif //STAR FORMATION HISTORIES / RATES #ifdef STAR_FORMATION_HISTORY o->sfh_ibin=g->sfh_ibin; ibin=0; for (j=0;j<=o->sfh_ibin;j++) { #ifndef NORMALIZEDDB // o->sfh_time[j]=(g->sfh_t[j]+g->sfh_dt[j]/2.-NumToTime(g->SnapNum))*UnitTime_in_years/Hubble_h; //Time from middle of this sfh bin to snapshot - converted from code units to years //o->sfh_time[j]=(g->sfh_t[j]+g->sfh_dt[j]/2.)*UnitTime_in_years/Hubble_h; //ROB: Lookback time to middle of SFH bin, in years //ROB: Now use LookBackTimeToSnap + sfh_time instead. // o->sfh_dt[j]=g->sfh_dt[j]*UnitTime_in_years/Hubble_h; o->sfh_DiskMass[j]=g->sfh_DiskMass[j]; o->sfh_BulgeMass[j]=g->sfh_BulgeMass[j]; o->sfh_ICM[j]=g->sfh_ICM[j]; o->sfh_MetalsDiskMass[j]=g->sfh_MetalsDiskMass[j]; o->sfh_MetalsBulgeMass[j]=g->sfh_MetalsBulgeMass[j]; o->sfh_MetalsICM[j]=g->sfh_MetalsICM[j]; //#ifdef DETAILED_METALS_AND_MASS_RETURN #ifdef INDIVIDUAL_ELEMENTS o->sfh_ElementsDiskMass[j]=g->sfh_ElementsDiskMass[j]; o->sfh_ElementsBulgeMass[j]=g->sfh_ElementsBulgeMass[j]; o->sfh_ElementsICM[j]=g->sfh_ElementsICM[j]; #endif //#endif #ifdef TRACK_BURST o->sfh_BurstMass[j]=g->sfh_BurstMass[j]; #endif #else // NORMALIZEDDB sfh_bin[j].sfh_DiskMass = g->sfh_DiskMass[j]; sfh_bin[j].sfh_BulgeMass = g->sfh_BulgeMass[j]; sfh_bin[j].sfh_ICM = g->sfh_ICM[j]; sfh_bin[j].sfh_MetalsDiskMass = g->sfh_MetalsDiskMass[j]; sfh_bin[j].sfh_MetalsBulgeMass = g->sfh_MetalsBulgeMass[j]; sfh_bin[j].sfh_MetalsICM = g->sfh_MetalsICM[j]; sfh_bin[j].sfh_ibin = j; sfh_bin[j].snapnum = g->SnapNum; sfh_bin[j].GalID = -1; // TODO must be reset #endif // NORMALIZEDDB } //Set all non-used array elements to zero: // important if we want to read files in database that all values are valid SQLServer floats for (j=o->sfh_ibin+1;j<SFH_NBIN;j++) { #ifndef NORMALIZEDDB // o->sfh_time[j]=0.; // o->sfh_dt[j]=0.; o->sfh_DiskMass[j]=0.; o->sfh_BulgeMass[j]=0.; o->sfh_ICM[j]=0.; o->sfh_MetalsDiskMass[j]=metals_init(); o->sfh_MetalsBulgeMass[j]=metals_init(); o->sfh_MetalsICM[j]=metals_init(); #ifdef INDIVIDUAL_ELEMENTS o->sfh_ElementsDiskMass[j]=elements_init(); o->sfh_ElementsBulgeMass[j]=elements_init(); o->sfh_ElementsICM[j]=elements_init(); #endif #ifdef TRACK_BURST o->sfh_BurstMass[j]=0.; #endif #else sfh_bin[j].sfh_DiskMass=0; sfh_bin[j].sfh_BulgeMass=0; sfh_bin[j].sfh_ICM=0; sfh_bin[j].sfh_ibin = 0; sfh_bin[j].snapnum = g->SnapNum; // TODO other elements not important, are not being written anyway. Or are they used elsewhere? #endif } #endif //STAR_FORMATION_HISTORY #ifdef INDIVIDUAL_ELEMENTS /*for (j=0;j<ELEMENT_NUM;j++) { o->DiskMass_elements[j]=g->DiskMass_elements[j]; o->BulgeMass_elements[j]=g->BulgeMass_elements[j]; o->ColdGas_elements[j]=g->ColdGas_elements[j]; o->HotGas_elements[j]=g->HotGas_elements[j]; o->EjectedMass_elements[j]=g->EjectedMass_elements[j]; o->ICM_elements[j]=g->ICM_elements[j]; }*/ /*o->DiskMass_elements = CORRECTDBFLOAT(g->DiskMass_elements); o->BulgeMass_elements = CORRECTDBFLOAT(g->BulgeMass_elements); o->ColdGas_elements = CORRECTDBFLOAT(g->ColdGas_elements); o->HotGas_elements = CORRECTDBFLOAT(g->HotGas_elements); o->ICM_elements = CORRECTDBFLOAT(g->ICM_elements);*/ o->DiskMass_elements = g->DiskMass_elements; o->BulgeMass_elements = g->BulgeMass_elements; o->ColdGas_elements = g->ColdGas_elements; o->HotGas_elements = g->HotGas_elements; o->EjectedMass_elements = g->EjectedMass_elements; o->ICM_elements = g->ICM_elements; #endif o->PrimordialAccretionRate = CORRECTDBFLOAT(g->PrimordialAccretionRate * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS); o->CoolingRate = CORRECTDBFLOAT(g->CoolingRate * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS); o->CoolingRate_beforeAGN = CORRECTDBFLOAT(g->CoolingRate_beforeAGN * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS); //NOTE: in Msun/yr #ifdef SAVE_MEMORY o->Sfr = CORRECTDBFLOAT(g->Sfr * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS); o->SfrBulge = CORRECTDBFLOAT(g->SfrBulge * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS); #ifdef H2_AND_RINGS for(j=0; j<RNUM; j++) o->Sfrr[j] = g->Sfrr[j] * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS; #endif #else o->Sfr = CORRECTDBFLOAT(g->Sfr[n] * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS); o->SfrBulge = CORRECTDBFLOAT(g->SfrBulge[n] * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS); #endif #endif //NO_PROPS_OUTPUTS //MAGNITUDES #ifdef COMPUTE_SPECPHOT_PROPERTIES #ifdef POST_PROCESS_MAGS /* int N_vespa_files=29, N_vespa_AgeBins=16; double vespa_age[17]={0.000125893, 0.02000, 0.03000, 0.04800, 0.07400, 0.11500, 0.17700, 0.27500, 0.42500, 0.65800, 1.02000, 1.57000, 2.44000, 3.78000, 5.84000, 9.04000, 14.0000}; int vespa_sfh_IDs[29]={0, 1, 12, 16, 2, 22, 23, 27, 29, 36, 42, 44, 50, 53, 54, 55, 56, 57, 61, 62, 63, 64, 65, 71, 81, 82, 90, 94, 99}; double vespa_sfh[N_vespa_AgeBins], vespa_metal[N_vespa_AgeBins], dumb[6]; int ii, jj, kk; char buf[1000], sbuf[1000]; FILE *fa; for (ii=1;ii<N_vespa_files;ii++) { sprintf(buf, "./devel/vespa_sfh/disc_good_%d.txt", vespa_sfh_IDs[ii]); if(!(fa = fopen(buf, "r"))) { char sbuf[1000]; sprintf(sbuf, "can't open file `%s'\n", buf); terminate(sbuf); } for(jj=0;jj<6;jj++) fgets(buf, 300, fa); //read vespa SFH and metallicities for (jj=0;jj<N_vespa_AgeBins;jj++) { vespa_sfh[jj]=0.; vespa_metal[jj]=0.; fscanf(fa,"%lg %lg %lg %lg %lg %lg %lg %lg\n", &dumb[0], &dumb[1], &vespa_sfh[jj], &dumb[2], &vespa_metal[jj], &dumb[3], &dumb[4], &dumb[5]); } fclose(fa); for (jj=0;jj<SFH_NBIN;jj++) { if(jj<N_vespa_AgeBins) { o->sfh_time[jj]=(vespa_age[jj]+vespa_age[jj+1])/2.*1.e9; o->sfh_dt[jj]=(vespa_age[jj+1]-vespa_age[jj])/2.*1.e9; o->sfh_DiskMass[jj]=vespa_sfh[jj]; o->sfh_BulgeMass[jj]=0.; o->sfh_MetalsDiskMass[jj]=vespa_metal[jj]*o->sfh_DiskMass[jj]; o->sfh_MetalsBulgeMass[jj]=metals_init(); } else { o->sfh_time[jj]=0.; o->sfh_dt[jj]=0.; o->sfh_DiskMass[jj]=0.; o->sfh_BulgeMass[jj]=0.; o->sfh_MetalsDiskMass[jj]=metals_init(); o->sfh_MetalsBulgeMass[jj]=metals_init(); } } post_process_spec_mags(o); sprintf(buf, "./devel/vespa_sfh/output_spectradisc_good_%d.txt", vespa_sfh_IDs[ii]); if(!(fa = fopen(buf, "w"))) { char sbuf[1000]; sprintf(sbuf, "can't open file `%s'\n", buf); terminate(sbuf); } for(jj=0;jj<NMAG;jj++) fprintf(fa,"%e\n",o->Mag[jj]); fclose(fa); exit(0); } */ //Convert recorded star formation histories into mags #ifdef NORMALIZEDDB post_process_spec_mags(o, &(sfh_bin[0])); #else post_process_spec_mags(o); #endif #else //ndef POST_PROCESS_MAGS #ifdef OUTPUT_REST_MAGS // Luminosities are converted into Mags in various bands for(j = 0; j < NMAG; j++) { //o->Mag[j] = lum_to_mag(g->Lum[j][n]); -> DONE ON TOP FOR LIGHT_OUTPUT AS WELL o->MagBulge[j] = lum_to_mag(g->LumBulge[j][n]); o->MagDust[j] = lum_to_mag(g->LumDust[j][n]); #ifdef ICL o->MagICL[j] = lum_to_mag(g->ICLLum[j][n]); #endif } #ifdef REIONIZEPHOTON // printf("phot = %lg\n",g->ReionizePhot[n]); o->NPhotReion = log10(g->ReionizePhot[n]); #endif #if defined(READXFRAC) || defined(WITHRADIATIVETRANSFER) o->Xfrac3d = g->Xfrac3d; #endif #endif //OUTPUT_REST_MAGS #ifdef OUTPUT_OBS_MAGS #ifdef COMPUTE_OBS_MAGS // Luminosities in various bands for(j = 0; j < NMAG; j++) { o->ObsMag[j] = lum_to_mag(g->ObsLum[j][n]); o->ObsMagBulge[j] = lum_to_mag(g->ObsLumBulge[j][n]); o->ObsMagDust[j] = lum_to_mag(g->ObsLumDust[j][n]); #ifdef ICL o->ObsMagICL[j] = lum_to_mag(g->ObsICL[j][n]); #endif #ifdef OUTPUT_MOMAF_INPUTS o->dObsMag[j] = lum_to_mag(g->dObsLum[j][n]); o->dObsMagBulge[j] = lum_to_mag(g->dObsLumBulge[j][n]); o->dObsMagDust[j] = lum_to_mag(g->dObsLumDust[j][n]); #ifdef ICL o->dObsMagICL[j] = lum_to_mag(g->dObsICL[j][n]); #endif #endif } #endif //COMPUTE_OBS_MAGS #endif //OUTPUT_OBS_MAGS #endif //ndef POST_PROCESS_MAGS #endif //COMPUTE_SPECPHOT_PROPERTIES #ifndef NO_PROPS_OUTPUTS if((g->DiskMass+g->BulgeMass)> 0.0) { o->MassWeightAge = g->MassWeightAge[n] / (g->DiskMass+g->BulgeMass); o->MassWeightAge = o->MassWeightAge / 1000. * UnitTime_in_Megayears / Hubble_h; //Age in Gyr } else o->MassWeightAge = 0.; #endif #ifdef FIX_OUTPUT_UNITS fix_units_for_ouput(o); #endif #endif //ndef LIGHT_OUTPUT // DEBUG // printf(" EXIT sfh_bin %d %f\n",sfh_bin,sfh_bin[0].sfh_DiskMass); }
void ActorScroller::PositionItemsAndDrawPrimitives( bool bDrawPrimitives ) { if( m_SubActors.empty() ) return; float fNumItemsToDraw = m_fNumItemsToDraw; if( m_quadMask.GetVisible() ) { // write to z buffer so that top and bottom are clipped // Draw an extra item; this is the one that will be masked. fNumItemsToDraw++; float fPositionFullyOffScreenTop = -(fNumItemsToDraw)/2.f; float fPositionFullyOffScreenBottom = (fNumItemsToDraw)/2.f; m_exprTransformFunction.TransformItemCached( m_quadMask, fPositionFullyOffScreenTop, -1, m_iNumItems ); if( bDrawPrimitives ) m_quadMask.Draw(); m_exprTransformFunction.TransformItemCached( m_quadMask, fPositionFullyOffScreenBottom, m_iNumItems, m_iNumItems ); if( bDrawPrimitives ) m_quadMask.Draw(); } float fFirstItemToDraw = m_fCurrentItem - fNumItemsToDraw/2.f; float fLastItemToDraw = m_fCurrentItem + fNumItemsToDraw/2.f; int iFirstItemToDraw = (int) ceilf( fFirstItemToDraw ); int iLastItemToDraw = (int) ceilf( fLastItemToDraw ); if( !m_bLoop ) { iFirstItemToDraw = clamp( iFirstItemToDraw, 0, m_iNumItems ); iLastItemToDraw = clamp( iLastItemToDraw, 0, m_iNumItems ); } bool bDelayedDraw = m_bDrawByZPosition && !m_bLoop; vector<Actor*> subs; { // Shift m_SubActors so iFirstItemToDraw is at the beginning. int iNewFirstIndex = iFirstItemToDraw; int iDist = iNewFirstIndex - m_iFirstSubActorIndex; m_iFirstSubActorIndex = iNewFirstIndex; ShiftSubActors( iDist ); } int iNumToDraw = iLastItemToDraw - iFirstItemToDraw; for( int i = 0; i < iNumToDraw; ++i ) { int iItem = i + iFirstItemToDraw; float fPosition = iItem - m_fCurrentItem; int iIndex = i; // index into m_SubActors if( m_bLoop ) wrap( iIndex, m_SubActors.size() ); else if( iIndex < 0 || iIndex >= (int)m_SubActors.size() ) continue; // Optimization: Zero out unused parameters so that they don't create new, // unnecessary entries in the position cache. On scrollers with lots of // items, especially with Subdivisions > 1, m_exprTransformFunction uses // too much memory. if( !m_bFunctionDependsOnPositionOffset ) fPosition = 0; if( !m_bFunctionDependsOnItemIndex ) iItem = 0; m_exprTransformFunction.TransformItemCached( *m_SubActors[iIndex], fPosition, iItem, m_iNumItems ); if( bDrawPrimitives ) { if( bDelayedDraw ) subs.push_back( m_SubActors[iIndex] ); else m_SubActors[iIndex]->Draw(); } } if( bDelayedDraw ) { ActorUtil::SortByZPosition( subs ); FOREACH( Actor*, subs, a ) (*a)->Draw(); } }
VALUE _alloc(VALUE self) { return wrap(new Ogre::RotationalSpline); }
VALUE _get(VALUE self,VALUE index) { if(NUM2UINT(index) < _self->getNumPoints()) return wrap(_self->getPoint(NUM2ULONG(index))); return Qnil; }
/*@brief Copies all the relevant properties from the Galaxy structure into the Galaxy output structure, some units are corrected.*/ void prepare_galaxy_for_output(int n, struct GALAXY *g, struct GALAXY_OUTPUT *o) { int j; o->Type = g->Type; o->SnapNum = g->SnapNum; o->CentralMvir = get_virial_mass(Halo[g->HaloNr].FirstHaloInFOFgroup); o->Mvir = g->Mvir; o->Rvir = g->Rvir; o->Vvir = g->Vvir; for(j = 0; j < 3; j++) { o->Pos[j] = g->Pos[j]; o->DistanceToCentralGal[j] = wrap(Halo[Halo[g->HaloNr].FirstHaloInFOFgroup].Pos[j] - g->Pos[j],BoxSize);; } o->ColdGas = g->ColdGas; o->DiskMass = g->DiskMass; o->BulgeMass = g->BulgeMass; o->HotGas = g->HotGas; o->BlackHoleMass = g->BlackHoleMass; #ifndef POST_PROCESS_MAGS #ifdef OUTPUT_REST_MAGS /* Luminosities are converted into Mags in various bands */ for(j = 0; j < NMAG; j++) o->Mag[j] = lum_to_mag(g->Lum[j][n]); #endif #endif #ifndef LIGHT_OUTPUT #ifdef GALAXYTREE o->HaloID = HaloIDs[g->HaloNr].HaloID; o->Redshift = ZZ[g->SnapNum]; int ii = (int) floor(o->Pos[0] * ScaleFactor); int jj = (int) floor(o->Pos[1] * ScaleFactor); int kk = (int) floor(o->Pos[2] * ScaleFactor); o->PeanoKey = peano_hilbert_key(ii, jj, kk, Hashbits); o->SubID = calc_big_db_subid_index(g->SnapNum, Halo[g->HaloNr].FileNr, Halo[g->HaloNr].SubhaloIndex); int tmpfirst = Halo[g->HaloNr].FirstHaloInFOFgroup; int lenmax = 0; int next = tmpfirst; while(next != -1) { if(Halo[next].Len > lenmax) { lenmax = Halo[next].Len; tmpfirst = next; } next = Halo[next].NextHaloInFOFgroup; } o->MMSubID = calc_big_db_subid_index(g->SnapNum, Halo[tmpfirst].FileNr, Halo[tmpfirst].SubhaloIndex); #endif o->LookBackTimeToSnap = NumToTime(g->SnapNum)*UnitTime_in_years/Hubble_h; o->InfallVmax = g->InfallVmax; o->InfallSnap = g->InfallSnap; o->HotRadius = g->HotRadius; #ifdef HALOPROPERTIES o->HaloM_Mean200 = g->HaloM_Mean200; o->HaloM_Crit200 = g->HaloM_Crit200; o->HaloM_TopHat = g->HaloM_TopHat; o->HaloVelDisp = g->HaloVelDisp; o->HaloVmax = g->HaloVmax; #endif o->Len = g->Len; o->Vmax = g->Vmax; o->BulgeSize = g->BulgeSize; o->EjectedMass = g->EjectedMass; o->BlackHoleGas = g->BlackHoleGas; for(j = 0; j < 3; j++) { o->Vel[j] = g->Vel[j]; #ifdef HALOSPIN o->HaloSpin[j] = g->HaloSpin[j]; #endif o->GasSpin[j] = g->GasSpin[j]; o->StellarSpin[j] = g->StellarSpin[j]; #ifdef HALOPROPERTIES o->HaloPos[j] = g->HaloPos[j]; o->HaloVel[j] = g->HaloVel[j]; o->HaloSpin[j] = g->HaloSpin[j]; #endif } o->XrayLum = g->XrayLum; o->GasDiskRadius = g->GasDiskRadius; o->StellarDiskRadius = g->StellarDiskRadius; o->CoolingRadius = g->CoolingRadius; o->ICM = g->ICM; //o->MetalsICM = CORRECTDBFLOAT(g->MetalsICM); o->MetalsICM = g->MetalsICM; o->QuasarAccretionRate = g->QuasarAccretionRate * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS; o->RadioAccretionRate = g->RadioAccretionRate * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS; o->CosInclination = g->CosInclination; if(g->Type == 2 || (g->Type == 1 && g->MergeOn == 1)) { o->OriMergTime=g->OriMergTime; o->MergTime = g->MergTime; } else { o->OriMergTime=0.0; o->MergTime = 0.0; } #ifndef GALAXYTREE o->HaloIndex = g->HaloNr; #endif #ifdef MBPID o->MostBoundID = g->MostBoundID; #endif #ifdef GALAXYTREE o->DisruptOn = g->DisruptOn; #endif #ifdef MERGE01 o->MergeOn = g->MergeOn; #endif //METALS /*o->MetalsColdGas = CORRECTDBFLOAT(g->MetalsColdGas); o->MetalsDiskMass = CORRECTDBFLOAT(g->MetalsDiskMass); o->MetalsBulgeMass = CORRECTDBFLOAT(g->MetalsBulgeMass); o->MetalsHotGas = CORRECTDBFLOAT(g->MetalsHotGas); o->MetalsEjectedMass = CORRECTDBFLOAT(g->MetalsEjectedMass); #ifdef METALS_SELF o->MetalsHotGasSelf = CORRECTDBFLOAT(g->MetalsHotGasSelf); #endif*/ o->MetalsColdGas = g->MetalsColdGas; o->MetalsDiskMass = g->MetalsDiskMass; o->MetalsBulgeMass = g->MetalsBulgeMass; o->MetalsHotGas = g->MetalsHotGas; o->MetalsEjectedMass = g->MetalsEjectedMass; #ifdef METALS_SELF o->MetalsHotGasSelf = g->MetalsHotGasSelf; #endif //STAR FORMATION HISTORIES / RATES #ifdef STAR_FORMATION_HISTORY o->sfh_ibin=g->sfh_ibin; for (j=0;j<=o->sfh_ibin;j++) { // Lookback time from output snap to middle of SFh bin, in years o->sfh_time[j]=(g->sfh_t[j]+g->sfh_dt[j]/2.-NumToTime(g->SnapNum))*UnitTime_in_years/Hubble_h; //o->sfh_time[j]=(g->sfh_t[j]+g->sfh_dt[j]/2.)*UnitTime_in_years/Hubble_h; //ROB: Lookback time to middle of SFH bin, in years o->sfh_dt[j]=g->sfh_dt[j]*UnitTime_in_years/Hubble_h; o->sfh_DiskMass[j]=g->sfh_DiskMass[j]; o->sfh_BulgeMass[j]=g->sfh_BulgeMass[j]; o->sfh_ICM[j]=g->sfh_ICM[j]; o->sfh_MetalsDiskMass[j]=g->sfh_MetalsDiskMass[j]; o->sfh_MetalsBulgeMass[j]=g->sfh_MetalsBulgeMass[j]; o->sfh_MetalsICM[j]=g->sfh_MetalsICM[j]; #ifdef YIELDS o->sfh_ElementsDiskMass[j]=g->sfh_ElementsDiskMass[j]; o->sfh_ElementsBulgeMass[j]=g->sfh_ElementsBulgeMass[j]; o->sfh_ElementsICM[j]=g->sfh_ElementsICM[j]; #endif } for (j=o->sfh_ibin+1;j<SFH_NBIN;j++) { o->sfh_time[j]=0.; o->sfh_DiskMass[j]=0.; o->sfh_BulgeMass[j]=0.; o->sfh_ICM[j]=0.; o->sfh_MetalsDiskMass[j]=metals_init(); o->sfh_MetalsBulgeMass[j]=metals_init(); o->sfh_MetalsICM[j]=metals_init(); #ifdef YIELDS o->sfh_ElementsDiskMass[j]=elements_init(); o->sfh_ElementsBulgeMass[j]=elements_init(); o->sfh_ElementsICM[j]=elements_init(); #endif } #endif #ifdef YIELDS /*for (j=0;j<ELEMENT_NUM;j++) { o->DiskMass_elements[j]=g->DiskMass_elements[j]; o->BulgeMass_elements[j]=g->BulgeMass_elements[j]; o->ColdGas_elements[j]=g->ColdGas_elements[j]; o->HotGas_elements[j]=g->HotGas_elements[j]; o->EjectedMass_elements[j]=g->EjectedMass_elements[j]; o->ICM_elements[j]=g->ICM_elements[j]; }*/ /*o->DiskMass_elements = CORRECTDBFLOAT(g->DiskMass_elements); o->BulgeMass_elements = CORRECTDBFLOAT(g->BulgeMass_elements); o->ColdGas_elements = CORRECTDBFLOAT(g->ColdGas_elements); o->HotGas_elements = CORRECTDBFLOAT(g->HotGas_elements); o->ICM_elements = CORRECTDBFLOAT(g->ICM_elements);*/ o->DiskMass_elements = g->DiskMass_elements; o->BulgeMass_elements = g->BulgeMass_elements; o->ColdGas_elements = g->ColdGas_elements; o->HotGas_elements = g->HotGas_elements; o->EjectedMass_elements = g->EjectedMass_elements; o->ICM_elements = g->ICM_elements; #endif //NOTE: in Msun/yr #ifdef SAVE_MEMORY o->Sfr = CORRECTDBFLOAT(g->Sfr * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS); o->SfrBulge = CORRECTDBFLOAT(g->SfrBulge * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS); #else o->Sfr = CORRECTDBFLOAT(g->Sfr[n] * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS); o->SfrBulge = CORRECTDBFLOAT(g->SfrBulge[n] * UnitMass_in_g / UnitTime_in_s * SEC_PER_YEAR / SOLAR_MASS); #endif //MAGNITUDES #ifdef POST_PROCESS_MAGS //Convert recorded star formation histories into mags post_process_mags(o); #else //POST_PROCESS_MAGS #ifdef OUTPUT_REST_MAGS // Luminosities are converted into Mags in various bands for(j = 0; j < NMAG; j++) { //o->Mag[j] = lum_to_mag(g->Lum[j][n]); -> DONE ON TOP FOR LIGHT_OUTPUT AS WELL o->MagBulge[j] = lum_to_mag(g->LumBulge[j][n]); o->MagDust[j] = lum_to_mag(g->LumDust[j][n]); #ifdef ICL o->MagICL[j] = lum_to_mag(g->ICLLum[j][n]); #endif } if((g->DiskMass+g->BulgeMass)> 0.0) { o->MassWeightAge = g->MassWeightAge[n] / (g->DiskMass+g->BulgeMass); o->MassWeightAge = o->MassWeightAge / 1000. * UnitTime_in_Megayears / Hubble_h; //Age in Gyr } else o->MassWeightAge = 0.; #endif //OUTPUT_REST_MAGS #ifdef OUTPUT_OBS_MAGS #ifdef COMPUTE_OBS_MAGS // Luminosities in various bands for(j = 0; j < NMAG; j++) { o->ObsMag[j] = lum_to_mag(g->ObsLum[j][n]); o->ObsMagBulge[j] = lum_to_mag(g->ObsLumBulge[j][n]); o->ObsMagDust[j] = lum_to_mag(g->ObsLumDust[j][n]); #ifdef ICL o->ObsMagICL[j] = lum_to_mag(g->ObsICL[j][n]); #endif #ifdef OUTPUT_MOMAF_INPUTS o->dObsMag[j] = lum_to_mag(g->dObsLum[j][n]); o->dObsMagBulge[j] = lum_to_mag(g->dObsLumBulge[j][n]); o->dObsMagDust[j] = lum_to_mag(g->dObsLumDust[j][n]); #endif } #endif //COMPUTE_OBS_MAGS #endif //OUTPUT_OBS_MAGS #endif //POST_PROCESS_MAGS #ifdef FIX_OUTPUT_UNITS fix_units_for_ouput(o); #endif #endif //ndef LIGHT_OUTPUT }
VALUE _runWizard(VALUE self,VALUE page) { return wrap(_self->RunWizard(wrap<wxWizardPage*>(page))); }
dnssock_tcp::dnssock_tcp (int f, cb_t cb) : dnssock (true, cb), fd (f), write_ok (false) { fdcb (fd, selread, wrap (this, &dnssock_tcp::rcb)); fdcb (fd, selwrite, wrap (this, &dnssock_tcp::wcb, true)); }
int main (int argc, char **argv) { pid_t child; int an; vec<char *> av; char *fname = NULL; char *basename; enum { BAD, HEADER, CFILE, PYTHON } mode = BAD; void (*fn) (str) = NULL; int len; av.push_back (PATH_CPP); av.push_back ("-DRPCC"); av.push_back (NULL); for (an = 1; an < argc; an++) { char *arg = argv[an]; int arglen = strlen (arg); if (arg[0] == '-' && (arg[1] == 'D' || arg[1] == 'I')) av.push_back (arg); else if (!fname && arglen > 2 && arg[0] != '-' && arg[arglen-1] == 'x' && arg[arglen-2] == '.') fname = arg; else if (!strcmp (arg, "-h") && mode == BAD) mode = HEADER; else if (!strcmp (arg, "-c") && mode == BAD) mode = CFILE; else if (!strcmp (arg, "-python") && mode == BAD) mode = PYTHON; else if (!strcmp (arg, "-o") && !outfile && ++an < argc) outfile = argv[an]; else if (!strncmp (arg, "-o", 2) && !outfile && arg[2]) outfile = arg + 2; else if (!strcmp (arg, "-P") && !idprefix && ++an < argc) idprefix = argv[an]; else if (!strncmp (arg, "-P", 2) && !idprefix && arg[2]) idprefix = arg + 2; else usage (); } if (!fname) usage (); if (idprefix) idprefix = idprefix << "_"; av.push_back (fname); av.push_back (NULL); if ((basename = strrchr (fname, '/'))) basename++; else basename = fname; len = strlen (basename); switch (mode) { case HEADER: av[2] = "-DRPCC_H"; fn = genheader; if (!outfile) outfile = strbuf ("%.*sh", len - 1, basename); break; case CFILE: av[2] = "-DRPCC_C"; fn = gencfile; if (!outfile) outfile = strbuf ("%.*sC", len - 1, basename); break; case PYTHON: av[2] = "-DRPCC_P"; fn = genpython; if (!outfile) outfile = strbuf ("%.*spy", len - 1, basename); break; default: usage (); break; } child = runcpp (av.base ()); if (outfile != "-") { if (outfile[0] != '|') atexit (cleanup); setstdout (); } make_sync (0); yyparse (); checkliterals (); if (outfile != "-" && outfile[0] != '|') fn (outfile); else fn (fname); #if 0 chldcb (child, wrap (reapcpp)); amain (); #else int status; if (waitpid (child, &status, 0) < 0) fatal ("waitpid: %m\n"); reapcpp (status); #endif return 0; }
static int do_one_pass(journal_t *journal, struct recovery_info *info, enum passtype pass) { unsigned int first_commit_ID, next_commit_ID; unsigned long long next_log_block; int err, success = 0; journal_superblock_t * sb; journal_header_t * tmp; struct buffer_head * bh; unsigned int sequence; int blocktype; int tag_bytes = journal_tag_bytes(journal); __u32 crc32_sum = ~0; /* Transactional Checksums */ /* * First thing is to establish what we expect to find in the log * (in terms of transaction IDs), and where (in terms of log * block offsets): query the superblock. */ sb = journal->j_superblock; next_commit_ID = be32_to_cpu(sb->s_sequence); next_log_block = be32_to_cpu(sb->s_start); first_commit_ID = next_commit_ID; if (pass == PASS_SCAN) info->start_transaction = first_commit_ID; jbd_debug(1, "Starting recovery pass %d\n", pass); /* * Now we walk through the log, transaction by transaction, * making sure that each transaction has a commit block in the * expected place. Each complete transaction gets replayed back * into the main filesystem. */ while (1) { int flags; char * tagp; journal_block_tag_t * tag; struct buffer_head * obh; struct buffer_head * nbh; cond_resched(); /* If we already know where to stop the log traversal, * check right now that we haven't gone past the end of * the log. */ if (pass != PASS_SCAN) if (tid_geq(next_commit_ID, info->end_transaction)) break; jbd_debug(2, "Scanning for sequence ID %u at %llu/%lu\n", next_commit_ID, next_log_block, journal->j_last); /* Skip over each chunk of the transaction looking * either the next descriptor block or the final commit * record. */ jbd_debug(3, "JBD: checking block %llu\n", next_log_block); err = jread(&bh, journal, next_log_block); if (err) goto failed; next_log_block++; wrap(journal, next_log_block); /* What kind of buffer is it? * * If it is a descriptor block, check that it has the * expected sequence number. Otherwise, we're all done * here. */ tmp = (journal_header_t *)bh->b_data; if (tmp->h_magic != cpu_to_be32(JFS_MAGIC_NUMBER)) { brelse(bh); break; } blocktype = be32_to_cpu(tmp->h_blocktype); sequence = be32_to_cpu(tmp->h_sequence); jbd_debug(3, "Found magic %d, sequence %d\n", blocktype, sequence); if (sequence != next_commit_ID) { brelse(bh); break; } /* OK, we have a valid descriptor block which matches * all of the sequence number checks. What are we going * to do with it? That depends on the pass... */ switch(blocktype) { case JFS_DESCRIPTOR_BLOCK: /* If it is a valid descriptor block, replay it * in pass REPLAY; if journal_checksums enabled, then * calculate checksums in PASS_SCAN, otherwise, * just skip over the blocks it describes. */ if (pass != PASS_REPLAY) { if (pass == PASS_SCAN && JFS_HAS_COMPAT_FEATURE(journal, JFS_FEATURE_COMPAT_CHECKSUM) && !info->end_transaction) { if (calc_chksums(journal, bh, &next_log_block, &crc32_sum)) { brelse(bh); break; } brelse(bh); continue; } next_log_block += count_tags(journal, bh); wrap(journal, next_log_block); brelse(bh); continue; } /* A descriptor block: we can now write all of * the data blocks. Yay, useful work is finally * getting done here! */ tagp = &bh->b_data[sizeof(journal_header_t)]; while ((tagp - bh->b_data + tag_bytes) <= journal->j_blocksize) { unsigned long long io_block; tag = (journal_block_tag_t *) tagp; flags = be32_to_cpu(tag->t_flags); io_block = next_log_block++; wrap(journal, next_log_block); err = jread(&obh, journal, io_block); if (err) { /* Recover what we can, but * report failure at the end. */ success = err; printk (KERN_ERR "JBD: IO error %d recovering " "block %llu in log\n", err, io_block); } else { unsigned long long blocknr; J_ASSERT(obh != NULL); blocknr = read_tag_block(tag_bytes, tag); /* If the block has been * revoked, then we're all done * here. */ if (journal_test_revoke (journal, blocknr, next_commit_ID)) { brelse(obh); ++info->nr_revoke_hits; goto skip_write; } /* Find a buffer for the new * data being restored */ nbh = __getblk(journal->j_fs_dev, blocknr, journal->j_blocksize); if (nbh == NULL) { printk(KERN_ERR "JBD: Out of memory " "during recovery.\n"); err = -ENOMEM; brelse(bh); brelse(obh); goto failed; } lock_buffer(nbh); memcpy(nbh->b_data, obh->b_data, journal->j_blocksize); if (flags & JFS_FLAG_ESCAPE) { journal_header_t *header; header = (journal_header_t *) &nbh->b_data[0]; header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); } BUFFER_TRACE(nbh, "marking dirty"); set_buffer_uptodate(nbh); mark_buffer_dirty(nbh); BUFFER_TRACE(nbh, "marking uptodate"); ++info->nr_replays; /* ll_rw_block(WRITE, 1, &nbh); */ unlock_buffer(nbh); brelse(obh); brelse(nbh); } skip_write: tagp += tag_bytes; if (!(flags & JFS_FLAG_SAME_UUID)) tagp += 16; if (flags & JFS_FLAG_LAST_TAG) break; } brelse(bh); continue; case JFS_COMMIT_BLOCK: jbd_debug(3, "Commit block for #%u found\n", next_commit_ID); /* How to differentiate between interrupted commit * and journal corruption ? * * {nth transaction} * Checksum Verification Failed * | * ____________________ * | | * async_commit sync_commit * | | * | GO TO NEXT "Journal Corruption" * | TRANSACTION * | * {(n+1)th transanction} * | * _______|______________ * | | * Commit block found Commit block not found * | | * "Journal Corruption" | * _____________|_________ * | | * nth trans corrupt OR nth trans * and (n+1)th interrupted interrupted * before commit block * could reach the disk. * (Cannot find the difference in above * mentioned conditions. Hence assume * "Interrupted Commit".) */ /* Found an expected commit block: if checksums * are present verify them in PASS_SCAN; else not * much to do other than move on to the next sequence * number. */ if (pass == PASS_SCAN && JFS_HAS_COMPAT_FEATURE(journal, JFS_FEATURE_COMPAT_CHECKSUM)) { int chksum_err, chksum_seen; struct commit_header *cbh = (struct commit_header *)bh->b_data; unsigned found_chksum = be32_to_cpu(cbh->h_chksum[0]); chksum_err = chksum_seen = 0; jbd_debug(3, "Checksums %x %x\n", crc32_sum, found_chksum); if (info->end_transaction) { journal->j_failed_commit = info->end_transaction; brelse(bh); break; } if (crc32_sum == found_chksum && cbh->h_chksum_type == JBD2_CRC32_CHKSUM && cbh->h_chksum_size == JBD2_CRC32_CHKSUM_SIZE) chksum_seen = 1; else if (!(cbh->h_chksum_type == 0 && cbh->h_chksum_size == 0 && found_chksum == 0 && !chksum_seen)) /* * If fs is mounted using an old kernel and then * kernel with journal_chksum is used then we * get a situation where the journal flag has * checksum flag set but checksums are not * present i.e chksum = 0, in the individual * commit blocks. * Hence to avoid checksum failures, in this * situation, this extra check is added. */ chksum_err = 1; if (chksum_err) { info->end_transaction = next_commit_ID; jbd_debug(1, "Checksum_err %x %x\n", crc32_sum, found_chksum); if (!JFS_HAS_INCOMPAT_FEATURE(journal, JFS_FEATURE_INCOMPAT_ASYNC_COMMIT)){ journal->j_failed_commit = next_commit_ID; brelse(bh); break; } } crc32_sum = ~0; } brelse(bh); next_commit_ID++; continue; case JFS_REVOKE_BLOCK: /* If we aren't in the REVOKE pass, then we can * just skip over this block. */ if (pass != PASS_REVOKE) { brelse(bh); continue; } err = scan_revoke_records(journal, bh, next_commit_ID, info); brelse(bh); if (err) goto failed; continue; default: jbd_debug(3, "Unrecognised magic %d, end of scan.\n", blocktype); brelse(bh); goto done; } } done: /* * We broke out of the log scan loop: either we came to the * known end of the log or we found an unexpected block in the * log. If the latter happened, then we know that the "current" * transaction marks the end of the valid log. */ if (pass == PASS_SCAN) { if (!info->end_transaction) info->end_transaction = next_commit_ID; } else { /* It's really bad news if different passes end up at * different places (but possible due to IO errors). */ if (info->end_transaction != next_commit_ID) { printk (KERN_ERR "JBD: recovery pass %d ended at " "transaction %u, expected %u\n", pass, next_commit_ID, info->end_transaction); if (!success) success = -EIO; } } return success; failed: return err; }
static int do_one_pass(journal_t *journal, struct recovery_info *info, enum passtype pass) { unsigned int first_commit_ID, next_commit_ID; unsigned long next_log_block; int err, success = 0; journal_superblock_t * sb; journal_header_t * tmp; struct buffer_head * bh; unsigned int sequence; int blocktype; /* Precompute the maximum metadata descriptors in a descriptor block */ int MAX_BLOCKS_PER_DESC; MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t)) / sizeof(journal_block_tag_t)); /* * First thing is to establish what we expect to find in the log * (in terms of transaction IDs), and where (in terms of log * block offsets): query the superblock. */ sb = journal->j_superblock; next_commit_ID = be32_to_cpu(sb->s_sequence); next_log_block = be32_to_cpu(sb->s_start); first_commit_ID = next_commit_ID; if (pass == PASS_SCAN) info->start_transaction = first_commit_ID; jbd_debug(1, "Starting recovery pass %d\n", pass); /* * Now we walk through the log, transaction by transaction, * making sure that each transaction has a commit block in the * expected place. Each complete transaction gets replayed back * into the main filesystem. */ while (1) { int flags; char * tagp; journal_block_tag_t * tag; struct buffer_head * obh; struct buffer_head * nbh; cond_resched(); /* If we already know where to stop the log traversal, * check right now that we haven't gone past the end of * the log. */ if (pass != PASS_SCAN) if (tid_geq(next_commit_ID, info->end_transaction)) break; jbd_debug(2, "Scanning for sequence ID %u at %lu/%lu\n", next_commit_ID, next_log_block, journal->j_last); /* Skip over each chunk of the transaction looking * either the next descriptor block or the final commit * record. */ jbd_debug(3, "JBD: checking block %ld\n", next_log_block); err = jread(&bh, journal, next_log_block); if (err) goto failed; next_log_block++; wrap(journal, next_log_block); /* What kind of buffer is it? * * If it is a descriptor block, check that it has the * expected sequence number. Otherwise, we're all done * here. */ tmp = (journal_header_t *)bh->b_data; if (tmp->h_magic != cpu_to_be32(JFS_MAGIC_NUMBER)) { brelse(bh); break; } blocktype = be32_to_cpu(tmp->h_blocktype); sequence = be32_to_cpu(tmp->h_sequence); jbd_debug(3, "Found magic %d, sequence %d\n", blocktype, sequence); if (sequence != next_commit_ID) { brelse(bh); break; } /* OK, we have a valid descriptor block which matches * all of the sequence number checks. What are we going * to do with it? That depends on the pass... */ switch(blocktype) { case JFS_DESCRIPTOR_BLOCK: /* If it is a valid descriptor block, replay it * in pass REPLAY; otherwise, just skip over the * blocks it describes. */ if (pass != PASS_REPLAY) { next_log_block += count_tags(bh, journal->j_blocksize); wrap(journal, next_log_block); brelse(bh); continue; } /* A descriptor block: we can now write all of * the data blocks. Yay, useful work is finally * getting done here! */ tagp = &bh->b_data[sizeof(journal_header_t)]; while ((tagp - bh->b_data +sizeof(journal_block_tag_t)) <= journal->j_blocksize) { unsigned long io_block; tag = (journal_block_tag_t *) tagp; flags = be32_to_cpu(tag->t_flags); io_block = next_log_block++; wrap(journal, next_log_block); err = jread(&obh, journal, io_block); if (err) { /* Recover what we can, but * report failure at the end. */ success = err; printk (KERN_ERR "JBD: IO error %d recovering " "block %ld in log\n", err, io_block); } else { unsigned long blocknr; J_ASSERT(obh != NULL); blocknr = be32_to_cpu(tag->t_blocknr); /* If the block has been * revoked, then we're all done * here. */ if (journal_test_revoke (journal, blocknr, next_commit_ID)) { brelse(obh); ++info->nr_revoke_hits; goto skip_write; } /* Find a buffer for the new * data being restored */ nbh = __getblk(journal->j_fs_dev, blocknr, journal->j_blocksize); if (nbh == NULL) { printk(KERN_ERR "JBD: Out of memory " "during recovery.\n"); err = -ENOMEM; brelse(bh); brelse(obh); goto failed; } lock_buffer(nbh); memcpy(nbh->b_data, obh->b_data, journal->j_blocksize); if (flags & JFS_FLAG_ESCAPE) { *((__be32 *)nbh->b_data) = cpu_to_be32(JFS_MAGIC_NUMBER); } BUFFER_TRACE(nbh, "marking dirty"); set_buffer_uptodate(nbh); mark_buffer_dirty(nbh); BUFFER_TRACE(nbh, "marking uptodate"); ++info->nr_replays; /* ll_rw_block(WRITE, 1, &nbh); */ unlock_buffer(nbh); brelse(obh); brelse(nbh); } skip_write: tagp += sizeof(journal_block_tag_t); if (!(flags & JFS_FLAG_SAME_UUID)) tagp += 16; if (flags & JFS_FLAG_LAST_TAG) break; } brelse(bh); continue; case JFS_COMMIT_BLOCK: /* Found an expected commit block: not much to * do other than move on to the next sequence * number. */ brelse(bh); next_commit_ID++; continue; case JFS_REVOKE_BLOCK: /* If we aren't in the REVOKE pass, then we can * just skip over this block. */ if (pass != PASS_REVOKE) { brelse(bh); continue; } err = scan_revoke_records(journal, bh, next_commit_ID, info); brelse(bh); if (err) goto failed; continue; default: jbd_debug(3, "Unrecognised magic %d, end of scan.\n", blocktype); brelse(bh); goto done; } } done: /* * We broke out of the log scan loop: either we came to the * known end of the log or we found an unexpected block in the * log. If the latter happened, then we know that the "current" * transaction marks the end of the valid log. */ if (pass == PASS_SCAN) info->end_transaction = next_commit_ID; else { /* It's really bad news if different passes end up at * different places (but possible due to IO errors). */ if (info->end_transaction != next_commit_ID) { printk (KERN_ERR "JBD: recovery pass %d ended at " "transaction %u, expected %u\n", pass, next_commit_ID, info->end_transaction); if (!success) success = -EIO; } } return success; failed: return err; }
void hclient_t::sched_read () { selread_on = true; fdcb (fd, selread, wrap (this, &hclient_t::canread)); }
void RaceDialog::onSelectNextHair(MyGUI::Widget*) { mHairIndex = wrap(mHairIndex + 1, mAvailableHairs.size()); updatePreview(); }
static void sched_tpt_measurement () { delaycb (tpt_sample_period_secs, tpt_sample_period_nsecs, wrap (tpt_do_sample, false)); }
int ReliSock::put_bytes_nobuffer( char *buffer, int length, int send_size ) { int i, result, l_out; int pagesize = 65536; // Optimize large writes to be page sized. unsigned char * cur; unsigned char * buf = NULL; // First, encrypt the data if necessary if (get_encryption()) { if (!wrap((unsigned char *) buffer, length, buf , l_out)) { dprintf(D_SECURITY, "Encryption failed\n"); goto error; } } else { buf = (unsigned char *) malloc(length); memcpy(buf, buffer, length); } cur = buf; // Tell peer how big the transfer is going to be, if requested. // Note: send_size param is 1 (true) by default. this->encode(); if ( send_size ) { ASSERT( this->code(length) != FALSE ); ASSERT( this->end_of_message() != FALSE ); } // First drain outgoing buffers if ( !prepare_for_nobuffering(stream_encode) ) { // error flushing buffers; error message already printed goto error; } // Optimize transfer by writing in pagesized chunks. for(i = 0; i < length;) { // If there is less then a page left. if( (length - i) < pagesize ) { result = condor_write(peer_description(), _sock, (char *)cur, (length - i), _timeout); if( result < 0 ) { goto error; } cur += (length - i); i += (length - i); } else { // Send another page... result = condor_write(peer_description(), _sock, (char *)cur, pagesize, _timeout); if( result < 0 ) { goto error; } cur += pagesize; i += pagesize; } } if (i > 0) { _bytes_sent += i; } free(buf); return i; error: dprintf(D_ALWAYS, "ReliSock::put_bytes_nobuffer: Send failed.\n"); free(buf); return -1; }
void schedule_lose_patience_timer () { delaycb (1, 0, wrap (lose_patience_cb)); }
json_introspection_server_t::json_introspection_server_t (ptr<axprt> x) : m_x (x), m_srv (asrv::alloc (x, s_prog, wrap (this, &json_introspection_server_t::dispatch), false)) {}
int main (int argc, char *argv[]) { timeout = 120; noisy = false; zippity = false; srandom(time(0)); setprogname (argv[0]); int ch; int n = 1000; nconcur = 500; bool delay = false; timespec startat; startat.tv_nsec = 0; startat.tv_sec = 0; exited = false; hclient_id = 1; use_latencies = false; num_services = 1; tpt_sample_period_secs = 1; tpt_sample_period_nsecs = 0; int lat_stddv = 25; int lat_mean = 75; lose_patience_after = 0; id_cycler_t *svc_cycler = NULL; id_cycler_t *req_cycler = NULL; mode = NONE; bool no_pub = false; int tmp = 0; static rxx lose_patience_rxx ("(\\d+),(\\d+)"); while ((ch = getopt (argc, argv, "c:dlm:n:pr:t:v:zM:P:S:R:T:V:")) != -1) { switch (ch) { case 'c': if (!convertint (optarg, &nconcur)) usage (); if (noisy) warn << "Concurrency factor: " << nconcur << "\n"; break; case 'd': noisy = true; break; case 'l': use_latencies = true; if (noisy) warn << "Using Latencies\n"; break; case 'm': { switch (optarg[0]) { case 's': case 'S': mode = SEDA; if (noisy) warn << "In SEDA mode\n"; break; case 'o': case 'O': mode = OKWS; if (noisy) warn << "In OKWS mode\n"; break; case 'P': case 'p': mode = PHP; if (noisy) warn << "In PHP mode\n"; break; case 'f': case 'F': mode = FLASH; if (noisy) warn << "In FLASH mode\n"; break; default: usage (); break; } break; } case 'n': if (!convertint (optarg, &n)) usage (); if (noisy) warn << "Number of requests: " << n << "\n"; break; case 'p': no_pub = true; break; case 'r': if (!convertint (optarg, &tmp)) usage (); req_cycler = New id_cycler_t (true, tmp, 1); if (noisy) warn << "Ranging ids from 1 to " << tmp << " (randomly)\n"; break; case 't': { if (!convertint (optarg, &startat.tv_sec)) usage (); delay = true; if (noisy) warn << "Delaying start until time=" << startat.tv_sec << "\n"; time_t mytm = time (NULL); tmp = startat.tv_sec - mytm; if (tmp < 0) { warn << "time stamp alreached (it's " << mytm << " right now)!\n"; usage (); } if (noisy) { warn << "Starting in T minus " << tmp << " seconds\n"; } break; } case 'v': if (!convertint (optarg, &tmp)) usage (); svc_cycler = New id_cycler_t (true, tmp, 1); if (noisy) warn << "Randing services from 1 to " << tmp << " (randomly)\n"; break; case 'z': zippity = true; break; case 'M': if (!convertint (optarg, &lat_mean)) usage (); if (noisy) warn << "Mean of latencies: " << lat_mean << "\n"; break; case 'P': if (!convertint (optarg, &tmp)) usage (); tpt_sample_period_secs = tmp / THOUSAND; tpt_sample_period_nsecs = (tmp % THOUSAND) * MILLION; if (noisy) warn ("Sample throughput period=%d.%03d secs\n", tpt_sample_period_secs, tpt_sample_period_nsecs / MILLION); break; case 'R': req_cycler = New id_cycler_t (); if (!req_cycler->init (optarg)) usage (); break; case 'S': if (!convertint (optarg, &lat_stddv)) usage (); if (noisy) warn << "Standard dev. of latency: " << lat_stddv << "\n"; break; case 'T': if (!lose_patience_rxx.match (optarg) || !convertint (lose_patience_rxx[1], &n_still_patient) || !convertint (lose_patience_rxx[2], &lose_patience_after)) usage (); break; case 'V': svc_cycler = New id_cycler_t (); if (!svc_cycler->init (optarg)) usage (); break; default: usage (); } } argc -= optind; argv += optind; if (argc == 0) usage (); str dest = argv[0]; argc --; argv ++; // make the appropriate cyclers... if (argc > 0) { // in this case, the user supplied extra arguments after the hostname // and port; therefore, they're going to be making their own URL // by alternating static parts and cyclers. if (req_cycler) { warn << "Don't provide -r if you're going to make your own URI\n"; usage (); } if (svc_cycler) { warn << "Don't provide -v if you're going to make your own URI\n"; usage (); } for (int i = 0; i < argc; i++) { if (i % 2 == 0) { uri_parts.push_back (argv[i]); } else { id_cycler_t *tmp = New id_cycler_t (); if (!tmp->init (argv[i])) { warn << "Cannot parse ID cycler: " << argv[i] << "\n"; usage (); } id_cyclers.push_back (tmp); } } } else if (mode != NONE) { // no manual URL building required; just specify some defaults // though if none were specified if (!req_cycler) // roughly a million, but this way all reqs will have the same // number of digits req_cycler = New id_cycler_t (true, 900000, 100000); if (!svc_cycler) // don't cycle --- just always return 1 svc_cycler = New id_cycler_t (false, 1, 1); id_cyclers.push_back (svc_cycler); id_cyclers.push_back (req_cycler); switch (mode) { case SEDA: uri_parts.push_back ("mt"); uri_parts.push_back ("?id="); break; case OKWS: { uri_parts.push_back ("mt"); strbuf b ("?"); if (no_pub) b << "nopub=1&"; b << "id="; uri_parts.push_back (b); break; } case PHP: uri_parts.push_back ("mt"); uri_parts.push_back (".php?id="); break; case FLASH: uri_parts.push_back ("cgi-bin/mt"); uri_parts.push_back ("?"); break; default: break; } } // normdist (mean, std-dev, "precision") if (use_latencies) dist = New normdist_t (200,25); if (!hostport.match (dest)) usage (); host = hostport[1]; str port_s = hostport[3]; if (port_s) { if (!convertint (port_s, &port)) usage (); } else { port = 80; } struct timespec tsnow = sfs_get_tsnow (); // unless we don this, shit won't be initialized, and i'll // starting ripping my hair out as to why all of the timestamps // are negative clock_gettime (CLOCK_REALTIME, &tsnow); nrunning = 0; sdflag = true; nreq = n; nreq_fixed = n; tpt_last_nreq = nreq; if (delay) { timecb (startat, wrap (main2, n)); } else { main2 (n); } amain (); }
/* Note: halonr is here the FOF-background subhalo (i.e. main halo) */ void evolve_galaxies(int halonr, int ngal, int treenr, int cenngal) { int jj, p, q, nstep, centralgal, merger_centralgal, currenthalo, prevgal, start, i; double infallingGas, coolingGas, deltaT, Zcurr; double time, previoustime, newtime; double AGNaccreted, t_Edd; #ifdef STAR_FORMATION_HISTORY double age_in_years; #endif #ifdef HT09_DISRUPTION double CentralRadius, CentralMass, SatelliteRadius, SatelliteMass; #endif // Eddington time in code units // Bizarrely, code units are UnitTime_in_s/Hubble_h t_Edd=1.42e16*Hubble_h/UnitTime_in_s; //previoustime = NumToTime(Gal[0].SnapNum); previoustime = NumToTime(Halo[halonr].SnapNum-1); newtime = NumToTime(Halo[halonr].SnapNum); /* Time between snapshots */ deltaT = previoustime - newtime; /* Redshift of current Snapnum */ Zcurr = ZZ[Halo[halonr].SnapNum]; //if(halonr == 83) // for(p=0;p<ngal;p++) // printf("check halonr=%d id=%d type=%d\n", halonr, p,Gal[p].Type); centralgal = Gal[0].CentralGal; for (p =0;p<ngal;p++) mass_checks("Evolve_galaxies #0",p); if(Gal[centralgal].Type != 0 || Gal[centralgal].HaloNr != halonr) terminate("Something wrong here ..... \n"); /* Update all galaxies to same star-formation history time-bins. * Needed in case some galaxy has skipped a snapshot. */ #ifdef STAR_FORMATION_HISTORY age_in_years=(Age[0]-previoustime)*UnitTime_in_years/Hubble_h; //ROB: age_in_years is in units of "real years"! nstep=0; for (p=0; p<ngal; p++) sfh_update_bins(p,Halo[halonr].SnapNum-1,nstep,age_in_years); #endif //if(halonr == 84) // print_galaxy("check00", centralgal, halonr); //for(p=0;p<ngal;p++) // printf("prog=%d\n",Halo[halonr].FirstProgenitor); /* Handle the transfer of mass between satellites and central galaxies */ deal_with_satellites(centralgal, ngal); /* Delete inconsequential galaxies */ for (p =0;p<ngal;p++) if (Gal[p].Type ==2 && Gal[p].ColdGas+Gal[p].DiskMass+Gal[p].BulgeMass <1.e-8) Gal[p].Type = 3; else mass_checks("Evolve_galaxies #0.1",p); /* Calculate how much hot gas needs to be accreted to give the correct baryon fraction * in the main halo. This is the universal fraction, less any reduction due to reionization. */ infallingGas = infall_recipe(centralgal, ngal, Zcurr); Gal[centralgal].PrimordialAccretionRate=infallingGas/deltaT; //if(halonr > 35 && halonr < 40) // print_galaxy("check02", centralgal, halonr); /* All the physics are computed in a number of intervals between snapshots * equal to STEPS */ for (nstep = 0; nstep < STEPS; nstep++) { //printf("step=%d\n",nstep); /* time to present of the current step */ time = previoustime - (nstep + 0.5) * (deltaT / STEPS); /* Update all galaxies to the star-formation history time-bins of current step*/ #ifdef STAR_FORMATION_HISTORY age_in_years=(Age[0]-time)*UnitTime_in_years/Hubble_h; for (p=0; p<ngal; p++) sfh_update_bins(p,Halo[halonr].SnapNum-1,nstep,age_in_years); #endif //if(halonr > 35 && halonr < 40) // print_galaxy("check02.1", centralgal, halonr); /* Infall onto central galaxy only, if required to make up a baryon deficit */ #ifndef GUO13 #ifndef GUO10 if (infallingGas > 0.) #endif #endif add_infall_to_hot(centralgal, infallingGas / STEPS); //if(halonr == 84) // print_galaxy("check02.5", centralgal, halonr); mass_checks("Evolve_galaxies #0.5",centralgal); for (p = 0; p < ngal; p++) { //if((halonr > 28 && halonr < 32) || Gal[p].HaloNr==52) //if(Gal[p].SnapNum==31 || (halonr > 28 && halonr < 31)) // if(halonr ==140) // print_galaxy("check03", p, halonr); /* don't treat galaxies that have already merged */ if(Gal[p].Type == 3) continue; mass_checks("Evolve_galaxies #1",p); if (Gal[p].Type == 0 || Gal[p].Type == 1) { if((ReIncorporationRecipe == 0 && Gal[p].Type==0) || ReIncorporationRecipe > 0) reincorporate_gas(p, deltaT / STEPS); //if(halonr > 28 && halonr < 31) // print_galaxy("check04", p, halonr); /* determine cooling gas given halo properties and add it to the cold phase*/ mass_checks("Evolve_galaxies #1.5",p); coolingGas = cooling_recipe(p, deltaT / STEPS); cool_gas_onto_galaxy(p, coolingGas); //if(halonr > 28 && halonr < 31) //if(halonr ==140) //print_galaxy("check05", p, halonr); } mass_checks("Evolve_galaxies #2",p); #ifdef H2_AND_RINGS gas_inflow(p, deltaT / STEPS); //if(halonr > 38 && halonr < 40) //print_galaxy("check06", p, halonr); #endif /* stars form*/ starformation(p, centralgal, time, deltaT / STEPS, nstep); //int ii; //for (ii = 0; ii < ngal; ii++) // if(halonr > 28 && halonr < 31) //if(halonr ==140) // print_galaxy("check07", ii, halonr); mass_checks("Evolve_galaxies #3",p); } //for (p = 0; p < ngal; p++) /* Check for merger events */ //if(Gal[p].Type == 1) //for(p = 0; p < -1; p++) for(p = 0; p < ngal; p++) { //if(halonr == 84) // print_galaxy("check07.01", p, halonr); #ifdef MERGE01 if(Gal[p].Type == 2 || (Gal[p].Type == 1 && Gal[p].MergeOn == 1)) /* satellite galaxy */ #else if(Gal[p].Type == 2) #endif { Gal[p].MergTime -= deltaT / STEPS; #ifdef HT09_DISRUPTION Gal[p].MergRadius -= get_deltar(p, deltaT/STEPS ); if(Gal[p].MergRadius<0.) Gal[p].MergRadius=0.; //printf("merge radius=%f detlar=%f\n",Gal[p].MergRadius, 100.*get_deltar(p, deltaT/STEPS )); disruption_code (p, time); /* a merger has occured! */ //MergRadius is tracked for type 2's subject to disruption while MergTime is tracked for type 1's // if( ( Gal[p].Type == 2 && (Gal[p].MergRadius < Gal[centralgal].StellarDiskRadius+Gal[centralgal].BulgeSize || Gal[p].BulgeMass+Gal[p].DiskMass == 0) ) // || (Gal[p].Type == 1 && Gal[p].MergTime < 0.0)) if( Gal[p].MergRadius < Gal[centralgal].StellarDiskRadius+Gal[centralgal].BulgeSize || Gal[p].BulgeMass+Gal[p].DiskMass == 0 ) //if(Gal[p].MergTime < 0.0 || Gal[p].BulgeMass+Gal[p].DiskMass == 0) #else if(Gal[p].MergTime < 0.0) #endif { NumMergers++; #ifdef MERGE01 if(Gal[p].Type == 1) for(q = 0; q < ngal; q++) if(Gal[q].Type == 2 && Gal[p].CentralGal == p) Gal[q].CentralGal = cenngal; if(Gal[p].Type == 2) merger_centralgal = Gal[p].CentralGal; else merger_centralgal = cenngal; #else merger_centralgal = Gal[p].CentralGal; #endif mass_checks("Evolve_galaxies #4",p); mass_checks("Evolve_galaxies #4",merger_centralgal); mass_checks("Evolve_galaxies #4",centralgal); //if(halonr == 140) //print_galaxy("check08", p, halonr); //if(halonr == 140) //print_galaxy("check09", merger_centralgal, halonr); deal_with_galaxy_merger(p, merger_centralgal, centralgal, time, deltaT, nstep); //if(halonr == 140) //print_galaxy("check10", p, halonr); //if(halonr == 140) //print_galaxy("check11", merger_centralgal, halonr); mass_checks("Evolve_galaxies #5",p); mass_checks("Evolve_galaxies #5",merger_centralgal); mass_checks("Evolve_galaxies #5",centralgal); } }// if(Gal[p].Type == 2) }//loop on all galaxies to detect mergers /* Cool gas onto AGN */ if (BlackHoleGrowth == 1) { for (p = 0; p < ngal; p++) { AGNaccreted=min(Gal[p].BlackHoleGas, Gal[p].BlackHoleMass*BlackHoleAccretionRate*deltaT/(STEPS*t_Edd)); if (AGNaccreted > 0.) { Gal[p].BlackHoleMass += AGNaccreted; Gal[p].BlackHoleGas -= AGNaccreted; // Instantaneous accretion rate. This will get overwritten on each mini-step but that's OK Gal[p].QuasarAccretionRate = AGNaccreted*STEPS/deltaT; } } } //DELAYED ENRICHMENT AND MASS RETURN + FEEDBACK: No fixed yield or recycling fraction anymore. FB synced with enrichment for (p = 0; p < ngal; p++) { #ifdef DETAILED_METALS_AND_MASS_RETURN update_yields_and_return_mass(p, centralgal, deltaT/STEPS, nstep); #endif } #ifdef ALL_SKY_LIGHTCONE int nr, istep, ix, iy, iz; istep = Halo[halonr].SnapNum*STEPS + nstep; Gal[p].SnapNum = Halo[halonr].SnapNum; for (p = 0; p < ngal; p++) for (nr = 0; nr < NCONES; nr++) for (ix = 0; ix < NREPLICA; ix++) for (iy = 0; iy < NREPLICA; iy++) for (iz = 0; iz < NREPLICA; iz++) inside_lightcone(p, istep, nr, ix, iy, iz); #endif }/* end move forward in interval STEPS */ /* check the bulge size*/ //checkbulgesize_main(ngal); for(p = 0; p < ngal; p++) { if(Gal[p].Type == 2) { //if(halonr == 140) //print_galaxy("check12", p, halonr); /*#ifdef UPDATETYPETWO update_type_two_coordinate_and_velocity(treenr, p, centralgal); #else*/ #ifndef UPDATETYPETWO int jj; float tmppos; for(jj = 0; jj < 3; jj++) { tmppos = wrap(Gal[p].DistanceToCentralGal[jj],BoxSize); tmppos *= (Gal[p].MergTime/Gal[p].OriMergTime); Gal[p].Pos[jj] = Gal[p].MergCentralPos[jj] + tmppos; if(Gal[p].Pos[jj] < 0) Gal[p].Pos[jj] = BoxSize + Gal[p].Pos[jj]; if(Gal[p].Pos[jj] > BoxSize) Gal[p].Pos[jj] = Gal[p].Pos[jj] - BoxSize; } #endif /* Disruption of type 2 galaxies. Type 1 galaxies are not disrupted since usually * bayonic component is more compact than dark matter.*/ #ifdef DISRUPTION //if(halonr == 84) //print_galaxy("check13", p, halonr); disrupt(p, Gal[p].CentralGal); //if(halonr == 84) //print_galaxy("check014", p, halonr); #endif } //if(halonr > 20 && halonr < 31) //if(halonr ==140) // print_galaxy("check015", p, halonr); } for (p =0;p<ngal;p++) mass_checks("Evolve_galaxies #6",p); #ifdef COMPUTE_SPECPHOT_PROPERTIES #ifndef POST_PROCESS_MAGS int n; /* If this is an output snapshot apply the dust model to each galaxy */ for(n = 0; n < NOUT; n++) { if(Halo[halonr].SnapNum == ListOutputSnaps[n]) { for(p = 0; p < ngal; p++) dust_model(p, n, halonr); break; } } #endif //POST_PROCESS_MAGS #endif //COMPUTE_SPECPHOT_PROPERTIES /* now save the galaxies of all the progenitors (and free the associated storage) */ int prog = Halo[halonr].FirstProgenitor; while(prog >= 0) { int currentgal; for(i = 0, currentgal = HaloAux[prog].FirstGalaxy; i < HaloAux[prog].NGalaxies; i++) { int nextgal = HaloGal[currentgal].NextGalaxy; /* this will write this galaxy to an output file and free the storage associate with it */ output_galaxy(treenr, HaloGal[currentgal].HeapIndex); currentgal = nextgal; } prog = Halo[prog].NextProgenitor; } for(p = 0, prevgal = -1, currenthalo = -1, centralgal = -1, start = NGalTree; p < ngal; p++) { if(Gal[p].HaloNr != currenthalo) { currenthalo = Gal[p].HaloNr; HaloAux[currenthalo].FirstGalaxy = -1; HaloAux[currenthalo].NGalaxies = 0; } mass_checks("Evolve_galaxies #7",p); /* may be wrong (what/why?) */ if(Gal[p].Type != 3) { if(NHaloGal >= MaxHaloGal) { int oldmax = MaxHaloGal; AllocValue_MaxHaloGal *= ALLOC_INCREASE_FACTOR; MaxHaloGal = AllocValue_MaxHaloGal; if(MaxHaloGal<NHaloGal+1) MaxHaloGal=NHaloGal+1; HaloGal = myrealloc_movable(HaloGal, sizeof(struct GALAXY) * MaxHaloGal); HaloGalHeap = myrealloc_movable(HaloGalHeap, sizeof(int) * MaxHaloGal); for(i = oldmax; i < MaxHaloGal; i++) HaloGalHeap[i] = i; } Gal[p].SnapNum = Halo[currenthalo].SnapNum; #ifndef GUO10 #ifdef UPDATETYPETWO update_type_two_coordinate_and_velocity(treenr, p, Gal[0].CentralGal); #endif #endif /* when galaxies are outputed, the slot is filled with the * last galaxy in the heap. New galaxies always take the last spot */ int nextgal = HaloGalHeap[NHaloGal]; HaloGal[nextgal] = Gal[p]; HaloGal[nextgal].HeapIndex = NHaloGal; if(HaloAux[currenthalo].FirstGalaxy < 0) HaloAux[currenthalo].FirstGalaxy = nextgal; if(prevgal >= 0) HaloGal[prevgal].NextGalaxy = nextgal; prevgal = nextgal; HaloAux[currenthalo].NGalaxies++; NHaloGal++; #ifdef GALAXYTREE if(NGalTree >= MaxGalTree) { AllocValue_MaxGalTree *= ALLOC_INCREASE_FACTOR; MaxGalTree = AllocValue_MaxGalTree; if(MaxGalTree<NGalTree+1) MaxGalTree=NGalTree+1; GalTree = myrealloc_movable(GalTree, sizeof(struct galaxy_tree_data) * MaxGalTree); } HaloGal[nextgal].GalTreeIndex = NGalTree; memset(&GalTree[NGalTree], 0, sizeof(struct galaxy_tree_data)); GalTree[NGalTree].HaloGalIndex = nextgal; GalTree[NGalTree].SnapNum = Halo[currenthalo].SnapNum; GalTree[NGalTree].NextProgGal = -1; GalTree[NGalTree].DescendantGal = -1; GalTree[NGalTree].FirstProgGal = Gal[p].FirstProgGal; if(Gal[p].Type == 0) centralgal = NGalTree; NGalTree++; #endif } } #ifdef GALAXYTREE for(p = start; p < NGalTree; p++) { if(centralgal < 0) terminate("centralgal < 0"); GalTree[p].FOFCentralGal = centralgal; } #endif report_memory_usage(&HighMark, "evolve_galaxies"); }
errcode_t journal_find_head(journal_t *journal) { unsigned int next_commit_ID; blk64_t next_log_block, head_block; int err; journal_superblock_t *sb; journal_header_t *tmp; struct buffer_head *bh; unsigned int sequence; int blocktype; /* * First thing is to establish what we expect to find in the log * (in terms of transaction IDs), and where (in terms of log * block offsets): query the superblock. */ sb = journal->j_superblock; next_commit_ID = ext2fs_be32_to_cpu(sb->s_sequence); next_log_block = ext2fs_be32_to_cpu(sb->s_start); head_block = next_log_block; if (next_log_block == 0) return 0; bh = getblk(journal->j_dev, 0, journal->j_blocksize); if (bh == NULL) return ENOMEM; /* * Now we walk through the log, transaction by transaction, * making sure that each transaction has a commit block in the * expected place. Each complete transaction gets replayed back * into the main filesystem. */ while (1) { dbg_printf("Scanning for sequence ID %u at %lu/%lu\n", next_commit_ID, (unsigned long)next_log_block, journal->j_last); /* Skip over each chunk of the transaction looking * either the next descriptor block or the final commit * record. */ err = journal_bmap(journal, next_log_block, &bh->b_blocknr); if (err) goto err; mark_buffer_uptodate(bh, 0); ll_rw_block(READ, 1, &bh); err = bh->b_err; if (err) goto err; next_log_block++; wrap(journal, next_log_block); /* What kind of buffer is it? * * If it is a descriptor block, check that it has the * expected sequence number. Otherwise, we're all done * here. */ tmp = (journal_header_t *)bh->b_data; if (tmp->h_magic != ext2fs_cpu_to_be32(JFS_MAGIC_NUMBER)) { dbg_printf("JBD2: wrong magic 0x%x\n", tmp->h_magic); goto err; } blocktype = ext2fs_be32_to_cpu(tmp->h_blocktype); sequence = ext2fs_be32_to_cpu(tmp->h_sequence); dbg_printf("Found magic %d, sequence %d\n", blocktype, sequence); if (sequence != next_commit_ID) { dbg_printf("JBD2: Wrong sequence %d (wanted %d)\n", sequence, next_commit_ID); goto err; } /* OK, we have a valid descriptor block which matches * all of the sequence number checks. What are we going * to do with it? That depends on the pass... */ switch (blocktype) { case JFS_DESCRIPTOR_BLOCK: next_log_block += count_tags(journal, bh->b_data); wrap(journal, next_log_block); continue; case JFS_COMMIT_BLOCK: head_block = next_log_block; next_commit_ID++; continue; case JFS_REVOKE_BLOCK: continue; default: dbg_printf("Unrecognised magic %d, end of scan.\n", blocktype); err = -EINVAL; goto err; } } err: if (err == 0) { dbg_printf("head seq=%d blk=%llu\n", next_commit_ID, head_block); journal->j_transaction_sequence = next_commit_ID; journal->j_head = head_block; } brelse(bh); return err; }
List BlockLNLP::get_smap_coefficients() { return(wrap(smap_coefficients)); }
void Forest::simulateInteraction(Actor* actor) { _debugBoxes.clear(); _currentJumper = actor; Jumper* jumper = dynamic_cast<Jumper*>( actor ); if( !jumper ) return; CanopySimulator* canopy = jumper->getDominantCanopy(); _currentCanopy = canopy; _currentCanopyInfo = canopy->getGearRecord(); _jumperCanopyIsOpened = canopy->isOpened(); if( _jumperCanopyIsOpened ) { _currentCanopyCollision = CanopySimulator::getCollisionGeometry( canopy->getClump() ); _currentCanopyActor = canopy->getNxActor(); } else { _currentCanopyCollision = NULL; _currentCanopyActor = NULL; } // obtain collision atomic _currentJumperCollision = NULL; _currentJumperActor = NULL; switch( jumper->getPhase() ) { case ::jpFreeFalling: _currentJumperActor = jumper->getFreefallActor(); _currentJumperCollision = Jumper::getCollisionFF( jumper->getClump() ); assert( _currentJumperCollision ); break; case ::jpFlight: if( _jumperCanopyIsOpened ) { _currentJumperActor = jumper->getFlightActor(); _currentJumperCollision = Jumper::getCollisionFC( jumper->getClump() ); } else { _currentJumperActor = jumper->getFreefallActor(); _currentJumperCollision = Jumper::getCollisionFF( jumper->getClump() ); } assert( _currentJumperCollision ); break; } if( _currentJumperCollision ) { // collide jumper with forest _jumperOBB = calculateOBB( _currentJumperCollision->getGeometry(), _currentJumperCollision->getFrame()->getLTM(), 1.0f ); _jumperOBB.center = _currentJumperActor->getGlobalPosition(); _debugBoxes.push_back( _jumperOBB ); float testBoxSize = 250; Vector3f jumperPos = wrap( _jumperOBB.center ); _canopyBatch->forAllInstancesInAABB( jumperPos - Vector3f( testBoxSize,testBoxSize,testBoxSize ), jumperPos + Vector3f( testBoxSize,testBoxSize,testBoxSize ), onCollideJumper, this ); } if( _currentCanopyCollision && !canopy->isCohesionState() ) { _canopyOBB = calculateOBB( _currentCanopyCollision->getGeometry(), _currentCanopyCollision->getFrame()->getLTM(), 1.0f ); _canopyOBB.center = _currentCanopyActor->getGlobalPosition(); _debugBoxes.push_back( _canopyOBB ); float testBoxSize = 750; Vector3f canopyPos = wrap( _canopyOBB.center ); _canopyBatch->forAllInstancesInAABB( canopyPos - Vector3f( testBoxSize,testBoxSize,testBoxSize ), canopyPos + Vector3f( testBoxSize,testBoxSize,testBoxSize ), onCollideCanopy, this ); } _currentJumper = NULL; _currentJumperCollision = NULL; _currentJumperActor = NULL; _jumperCanopyIsOpened = false; _currentCanopyCollision = NULL; _currentCanopyActor = NULL; }
JSValue toJS(ExecState* state, JSDOMGlobalObject* globalObject, Blob& blob) { return wrap(state, globalObject, blob); }
void CXBMCRenderManager::WaitPresentTime(double presenttime) { double frametime; int fps = g_VideoReferenceClock.GetRefreshRate(&frametime); if(fps <= 0) { /* smooth video not enabled */ CDVDClock::WaitAbsoluteClock(presenttime * DVD_TIME_BASE); return; } bool ismaster = CDVDClock::IsMasterClock(); //the videoreferenceclock updates its clock on every vertical blank //we want every frame's presenttime to end up in the middle of two vblanks //if CDVDPlayerAudio is the master clock, we add a correction to the presenttime if (ismaster) presenttime += m_presentcorr * frametime; double clock = CDVDClock::WaitAbsoluteClock(presenttime * DVD_TIME_BASE) / DVD_TIME_BASE; double target = 0.5; double error = ( clock - presenttime ) / frametime - target; m_presenterr = error; // correct error so it targets the closest vblank error = wrap(error, 0.0 - target, 1.0 - target); // scale the error used for correction, // based on how much buffer we have on // that side of the target if(error > 0) error /= 2.0 * (1.0 - target); if(error < 0) error /= 2.0 * (0.0 + target); //save error in the buffer m_errorindex = (m_errorindex + 1) % ERRORBUFFSIZE; m_errorbuff[m_errorindex] = error; //get the average error from the buffer double avgerror = 0.0; for (int i = 0; i < ERRORBUFFSIZE; i++) avgerror += m_errorbuff[i]; avgerror /= ERRORBUFFSIZE; //if CDVDPlayerAudio is not the master clock, we change the clock speed slightly //to make every frame's presenttime end up in the middle of two vblanks if (!ismaster) { //integral correction, clamp to -0.5:0.5 range m_presentcorr = std::max(std::min(m_presentcorr + avgerror * 0.01, 0.1), -0.1); g_VideoReferenceClock.SetFineAdjust(1.0 - avgerror * 0.01 - m_presentcorr * 0.01); } else { //integral correction, wrap to -0.5:0.5 range m_presentcorr = wrap(m_presentcorr + avgerror * 0.01, target - 1.0, target); g_VideoReferenceClock.SetFineAdjust(1.0); } //printf("%f %f % 2.0f%% % f % f\n", presenttime, clock, m_presentcorr * 100, error, error_org); }