void CBloom::DoPostProcess(const CCamera* cam) { Renderer* renderer = Renderer::getInstance(); Vector4 bufferData(renderer->GetOutputFXBuffer()->GetRenderTargetWidth(), renderer->GetOutputFXBuffer()->GetRenderTargetHeight(), mBloomFactor, 0); ConstantBufferManager::BindFXBuffer(bufferData); // Blur Vert Pass renderer->PostProcess(renderer->GetInputFXBuffer(), NULL, renderer->GetOutputFXBuffer(), Shader::FX_BLOOM); renderer->SwapTempFXBuffers(); // Down sample - set viewport to half screen size renderer->SetViewport(renderer->mHalfScreenVP); renderer->PostProcess(renderer->GetInputFXBuffer(), NULL, &renderer->mColorHalf0, Shader::FX_BLURX); //// Blur Horiz Pass //// Up sample - set viewport to back to our screen resolution renderer->SetViewport(renderer->mOculusVP); bufferData.x = renderer->mColorHalf0.GetRenderTargetWidth(); bufferData.y = renderer->mColorHalf0.GetRenderTargetHeight(); ConstantBufferManager::BindFXBuffer(bufferData); renderer->PostProcess(&renderer->mColorHalf0, NULL, renderer->GetOutputFXBuffer(), Shader::FX_BLURY); renderer->SwapTempFXBuffers(); // Add blurred bloom to light buffer renderer->PostProcess(&renderer->mLightAccumBuffer, renderer->GetInputFXBuffer(), renderer->GetOutputFXBuffer(), Shader::POSTPROCESS); renderer->SwapTempFXBuffers(); }
void WorldSession::HandleGuildFinderGetApplications(WorldPacket& /*recvPacket*/) { sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Received CMSG_LF_GUILD_GET_APPLICATIONS"); // Empty opcode std::list<MembershipRequest> applicatedGuilds = sGuildFinderMgr->GetAllMembershipRequestsForPlayer(GetPlayer()->GetGUIDLow()); uint32 applicationsCount = applicatedGuilds.size(); WorldPacket data(SMSG_LF_GUILD_MEMBERSHIP_LIST_UPDATED, 7 + 54 * applicationsCount); data.WriteBits(applicationsCount, 20); if (applicationsCount > 0) { ByteBuffer bufferData(54 * applicationsCount); for (std::list<MembershipRequest>::const_iterator itr = applicatedGuilds.begin(); itr != applicatedGuilds.end(); ++itr) { Guild* guild = sGuildMgr->GetGuildById(itr->GetGuildId()); LFGuildSettings guildSettings = sGuildFinderMgr->GetGuildSettings(itr->GetGuildId()); MembershipRequest request = *itr; ObjectGuid guildGuid = ObjectGuid(guild->GetGUID()); data.WriteBit(guildGuid[1]); data.WriteBit(guildGuid[0]); data.WriteBit(guildGuid[5]); data.WriteBits(request.GetComment().size(), 11); data.WriteBit(guildGuid[3]); data.WriteBit(guildGuid[7]); data.WriteBit(guildGuid[4]); data.WriteBit(guildGuid[6]); data.WriteBit(guildGuid[2]); data.WriteBits(guild->GetName().size(), 8); bufferData.WriteByteSeq(guildGuid[2]); bufferData.WriteString(request.GetComment()); bufferData.WriteByteSeq(guildGuid[5]); bufferData.WriteString(guild->GetName()); bufferData << uint32(guildSettings.GetAvailability()); bufferData << uint32(request.GetExpiryTime() - time(NULL)); // Time left to application expiry (seconds) bufferData.WriteByteSeq(guildGuid[0]); bufferData.WriteByteSeq(guildGuid[6]); bufferData.WriteByteSeq(guildGuid[3]); bufferData.WriteByteSeq(guildGuid[7]); bufferData << uint32(guildSettings.GetClassRoles()); bufferData.WriteByteSeq(guildGuid[4]); bufferData.WriteByteSeq(guildGuid[1]); bufferData << uint32(time(NULL) - request.GetSubmitTime()); // Time since application (seconds) bufferData << uint32(guildSettings.GetInterests()); } data.FlushBits(); data.append(bufferData); } data << uint32(10 - sGuildFinderMgr->CountRequestsFromPlayer(GetPlayer()->GetGUIDLow())); // Applications count left GetPlayer()->SendDirectMessage(&data); }
Pyramid::Pyramid(float x, float z) { float d_x = x / 2; model_vertices.push_back(glm::vec3(d_x, d_x, 0.0f)); model_vertices.push_back(glm::vec3(d_x, -d_x, 0.0f)); model_vertices.push_back(glm::vec3(-d_x, -d_x, 0.0f)); model_vertices.push_back(glm::vec3(-d_x, d_x, 0.0f)); model_vertices.push_back(glm::vec3(0.0f, 0.0f, z)); vertex_buffer_data.push_back(model_vertices[0]); vertex_buffer_data.push_back(model_vertices[1]); vertex_buffer_data.push_back(model_vertices[1]); vertex_buffer_data.push_back(model_vertices[2]); vertex_buffer_data.push_back(model_vertices[2]); vertex_buffer_data.push_back(model_vertices[3]); vertex_buffer_data.push_back(model_vertices[3]); vertex_buffer_data.push_back(model_vertices[0]); vertex_buffer_data.push_back(model_vertices[0]); vertex_buffer_data.push_back(model_vertices[4]); vertex_buffer_data.push_back(model_vertices[1]); vertex_buffer_data.push_back(model_vertices[4]); vertex_buffer_data.push_back(model_vertices[2]); vertex_buffer_data.push_back(model_vertices[4]); vertex_buffer_data.push_back(model_vertices[3]); vertex_buffer_data.push_back(model_vertices[4]); bufferData(); }
void TheoraDecoder::readNextPacket() { // First, let's get our frame if (_hasVideo) { while (!_videoTrack->endOfTrack()) { // theora is one in, one out... if (ogg_stream_packetout(&_theoraOut, &_oggPacket) > 0) { if (_videoTrack->decodePacket(_oggPacket)) break; } else if (_theoraOut.e_o_s || _fileStream->eos()) { // If we can't get any more frames, we're done. _videoTrack->setEndOfVideo(); } else { // Queue more data bufferData(); while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) queuePage(&_oggPage); } // Update audio if we can queueAudio(); } } // Then make sure we have enough audio buffered ensureAudioBufferSize(); }
mlog::Buffer buffer(uint32_t group, mlog::LogLevel level) { size_t size = 0; for(Buffer * c = buffer_.next; c; c = c->next) ++size; size_t pos = pptr() - &cur_->data[0]; size = size * bufferSize + pos; size_t resultSize = size + sizeof(size_t) + sizeof(group) + sizeof(level) + 1; #if MLOG_USE_BUFFERS mstd::pbuffer result = mstd::buffers::instance().take(resultSize); #else mstd::rc_buffer result(resultSize); #endif char * p = bufferData(result); *mstd::pointer_cast<uint32_t*>(p) = group; p += sizeof(group); *mstd::pointer_cast<LogLevel*>(p) = level; p += sizeof(level); *mstd::pointer_cast<size_t*>(p) = size; p += sizeof(size); for(Buffer * c = &buffer_; ; c = c->next) { if(!c->next) { memcpy(p, &c->data[0], pos); p += pos; break; } else { memcpy(p, &c->data[0], bufferSize); p += bufferSize; } } *p = 0; return result; }
int init( void ) { // general settings srand( time( NULL ) ); glClearColor( skyColor.x, skyColor.y, skyColor.z, 1.0 ); glEnable( GL_DEPTH_TEST ); glEnable( GL_TEXTURE_3D ); glPolygonMode( GL_FRONT_AND_BACK, GL_FILL ); glPixelStorei( GL_PACK_ALIGNMENT, 1 ); glPixelStorei( GL_UNPACK_ALIGNMENT, 1 ); // set up data and shaders for( int y = 0; y < 3; ++y ) { for( int x = 0; x < 3; ++x ) { newMesh( x, y, LEFT | DOWN ); } } for( int y = 0; y < 3; ++y ) // normals must be calculated AFTER all vertices are set { for( int x = 0; x < 3; ++x ) { createMeshNormals( x, y ); } } resetView(); shaderProgram = InitShader( "vert.glsl", "frag.glsl" ); // buffer vertex and index data glGenBuffers( 9, (GLuint*)pointBuffer ); glGenBuffers( 9, (GLuint*)indexBuffer ); for( int x = 0; x < 3; ++x ) { for( int y = 0; y < 3; ++y ) { bufferData( x, y ); } } // create ground texture createAndBufferTexture(); // connect vertex attributes vPositionLoc = glGetAttribLocation( shaderProgram, "a_vPosition" ); glEnableVertexAttribArray( vPositionLoc ); vNormalLoc = glGetAttribLocation( shaderProgram, "a_vNormal" ); glEnableVertexAttribArray( vNormalLoc ); // get location of uniforms maxHeightLoc = glGetUniformLocation( shaderProgram, "u_fMaxHeight" ); mvMatrixLoc = glGetUniformLocation( shaderProgram, "u_mv_Matrix" ); glUniform1f( maxHeightLoc, maxHeight ); return 1; }
FontCustomPlatformData* createFontCustomPlatformData(SharedBuffer* buffer) { ASSERT_ARG(buffer, buffer); ATSFontContainerRef containerRef = 0; ATSFontRef fontRef = 0; RetainPtr<CGFontRef> cgFontRef; #if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) RetainPtr<CFDataRef> bufferData(AdoptCF, buffer->createCFData()); RetainPtr<CGDataProviderRef> dataProvider(AdoptCF, CGDataProviderCreateWithCFData(bufferData.get())); cgFontRef.adoptCF(CGFontCreateWithDataProvider(dataProvider.get())); if (!cgFontRef) return 0; #else // Use ATS to activate the font. // The value "3" means that the font is private and can't be seen by anyone else. ATSFontActivateFromMemory((void*)buffer->data(), buffer->size(), 3, kATSFontFormatUnspecified, NULL, kATSOptionFlagsDefault, &containerRef); if (!containerRef) return 0; ItemCount fontCount; ATSFontFindFromContainer(containerRef, kATSOptionFlagsDefault, 0, NULL, &fontCount); // We just support the first font in the list. if (fontCount == 0) { ATSFontDeactivate(containerRef, NULL, kATSOptionFlagsDefault); return 0; } ATSFontFindFromContainer(containerRef, kATSOptionFlagsDefault, 1, &fontRef, NULL); if (!fontRef) { ATSFontDeactivate(containerRef, NULL, kATSOptionFlagsDefault); return 0; } cgFontRef.adoptCF(CGFontCreateWithPlatformFont(&fontRef)); #ifndef BUILDING_ON_TIGER // Workaround for <rdar://problem/5675504>. if (cgFontRef && !CGFontGetNumberOfGlyphs(cgFontRef.get())) cgFontRef = 0; #endif if (!cgFontRef) { ATSFontDeactivate(containerRef, NULL, kATSOptionFlagsDefault); return 0; } #endif // !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) return new FontCustomPlatformData(containerRef, fontRef, cgFontRef.releaseRef()); }
Plane::Plane(float x, float y, int res) { float step_x = x / res; float step_y = y / res; for(float j = -y/2; j < y/2; j += step_y) { vertex_buffer_data.push_back(glm::vec3(-x, j, 0.0f)); vertex_buffer_data.push_back(glm::vec3(x, j, 0.0f)); } for(float i = -x/2; i < x/2; i += step_x) { vertex_buffer_data.push_back(glm::vec3(i, y, 0.0f)); vertex_buffer_data.push_back(glm::vec3(i, -y, 0.0f)); } bufferData(); }
void SoundManager::update() { Common::StackLock lock(_mutex); for (int i = 1; i < kChannelCount; i++) { if (!_channels[i]) continue; // Free the channel if it is no longer playing if (!isPlaying(i)) { freeChannel(i); continue; } // Try to buffer some more data bufferData(i); } }
void TheoraDecoder::ensureAudioBufferSize() { if (!_hasAudio) return; // Force at least some audio to be buffered while (_audioTrack->needsAudio()) { bufferData(); while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) queuePage(&_oggPage); bool queuedAudio = queueAudio(); if ((_vorbisOut.e_o_s || _fileStream->eos()) && !queuedAudio) { _audioTrack->setEndOfAudio(); break; } } }
int peakDetection(int input){ currentTime = sampleCounter/250; sampleCounter++; if(init<2){ buffer[loopCheck(init,4)] = input; init++; counter = init; }else{ bufferData(input); checkData(input); counter++; if(misscounter==5){ printf("Missed 5 times in a row.\n"); misscounter = 0; } } return 0; }
void high_isr(void) { // check for timer0 rollover indicating a millisecond has passed if (INTCONbits.TMR0IF) { INTCONbits.TMR0IF = FALSE; WriteTimer0(0x85); // load timer rgisters (0xFF (max val) - 0x7D (125) = 0x82) millis++; } // check for recieved CAN message if(PIR5bits.RXB1IF) { PIR5bits.RXB1IF = FALSE; // reset the flag // get data from recieve buffer ECANReceiveMessage(&id, data, &dataLen, &flags); bufferData(); // put data in an array } return; }
void drawScene() { if (options[OPT_LIGHTENING]) glDisable(GL_LIGHTING); drawAxes(); if (options[OPT_LIGHTENING]) glEnable(GL_LIGHTING); /* update buffer */ if (updateBuffer == TRUE) { bufferData(); glBindBuffer(GL_ARRAY_BUFFER,buffers[NORMALS]); glNormalPointer(GL_FLOAT, 0 , 0); glBindBuffer(GL_ARRAY_BUFFER,buffers[VERTICES]); glVertexPointer(3, GL_FLOAT, 0 , 0); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,buffers[INDICES]); updateBuffer = FALSE; } /* select drawing method */ switch (currentShape) { case SHAPE_IMM_GRID: case SHAPE_IMM_SPHERE: case SHAPE_IMM_TORUS: drawImmediateShape(&geometry); break; case SHAPE_GRID: case SHAPE_SPHERE: case SHAPE_TORUS: case SHAPE_INNER_GRID: case SHAPE_INNER_SHPERE: case SHAPE_INNER_TORUS: glDrawElements(GL_QUADS, geometry.rows * geometry.cols * 4, GL_UNSIGNED_INT, 0); break; } }
uint32_t SocketReader::getAvailableData(uint8_t * dataOut, const uint32_t MaxOut) throw (int) { if (isBufferEmpty()) { const int bufferResult = bufferData(); if (bufferResult < 0 && _BufferIndex == 0U) { throw bufferResult; } else if (_BufferIndex == 0U) { throw 0; } } const uint32_t BytesToTake = MIN(_BufferIndex - _BufferReadIndex, MaxOut); memcpy(dataOut, &_BufferedData[_BufferReadIndex], BytesToTake); _BufferReadIndex += BytesToTake; return BytesToTake; }
Sphere::Sphere(int ndiv, float r) { for (int i = 0; i < 20; i++) { drawtri( vdata[tindices[i][0]], vdata[tindices[i][1]], vdata[tindices[i][2]], ndiv, r ); } for (int i = 0; i < vertex_buffer_data.size(); i++) { glm::vec3 v = glm::vec3(1.0f, 1.0f, 1.0f); color_buffer_data.push_back(v); // glm::vec3 v = vertex_buffer_data[i] + glm::vec3(r / 2); // color_buffer_data.push_back(glm::normalize(v)); } bufferData(); }
void init() { glClearColor(0.0, 0.0, 0.0, 0.0); memset(options, 0, sizeof(bool) * OPTSIZE); options[OPT_STDOUT_TEXT] = TRUE; options[OPT_OSP_SHORT_LONG] = TRUE; uTessellation = WELL_FORM_TESS; /* set starting default setting */ currentShape = SHAPE_GRID; sprintf(tessString, "Tessellation: %d x %d",uTessellation,uTessellation); sprintf(bumpSizeString,"Number Of Bumps: %d x %d",uNumOfBumps,uNumOfBumps); strcpy(localViewString,"LocalView: Off "); strcpy(infoString, "Shader: Fixed pipeline"); strcpy(drawMethodString,currentDrawingMethod[currentShape][0]); createShape(currentShape); generateBuffers(); enableVertexArrays(); generateBuffers(); bufferData(); }
void PointBuffer::render() { bindBuffer(GL_ARRAY_BUFFER, m_glBuffer); if (m_dirty) { // upload point data bufferData(GL_ARRAY_BUFFER, size() * sizeof(Point), m_pointList.data(), GL_STATIC_DRAW); m_dirty = false; } // enable additive blending glDisable(GL_DEPTH_TEST); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glEnable(GL_TEXTURE_2D); glBindTexture(GL_TEXTURE_2D, m_cloudTexture); glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE); glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE); float quadratic[] = { 0.0f, 0.0f, 0.01f }; pointParameterFV(GL_POINT_DISTANCE_ATTENUATION_ARB, quadratic); pointParameterF(GL_POINT_SIZE_MIN_ARB, 0.0); pointParameterF(GL_POINT_SIZE_MAX_ARB, 64.0); glEnable(GL_POINT_SPRITE_ARB); glEnable(GL_POINT_SMOOTH ); glPointSize(64.0); glVertexPointer(3, GL_FLOAT, sizeof(Point), vertexOffset(0)); glColorPointer(4, GL_FLOAT, sizeof(Point), vertexOffset(sizeof(float) * 3)); glEnableClientState(GL_VERTEX_ARRAY); glEnableClientState(GL_COLOR_ARRAY); glDrawArrays(GL_POINTS, 0, size()); }
size_t EthernetUDP::write(const uint8_t *buffer, size_t size) { uint16_t bytes_written = bufferData(buffer, size); return bytes_written; }
Box::Box(float x, float y, float z) { float d_x = x / 2; float d_y = y / 2; float d_z = z / 2; model_vertices.push_back(glm::vec3(d_x, d_y, -d_z)); model_vertices.push_back(glm::vec3(d_x, -d_y, -d_z)); model_vertices.push_back(glm::vec3(-d_x, -d_y, -d_z)); model_vertices.push_back(glm::vec3(-d_x, d_y, -d_z)); model_vertices.push_back(glm::vec3(d_x, d_y, d_z)); model_vertices.push_back(glm::vec3(d_x, -d_y, d_z)); model_vertices.push_back(glm::vec3(-d_x, -d_y, d_z)); model_vertices.push_back(glm::vec3(-d_x, d_y, d_z)); vertex_buffer_data.push_back(model_vertices[0]); vertex_buffer_data.push_back(model_vertices[1]); vertex_buffer_data.push_back(model_vertices[5]); vertex_buffer_data.push_back(model_vertices[0]); vertex_buffer_data.push_back(model_vertices[4]); vertex_buffer_data.push_back(model_vertices[5]); for (int i = 0; i < 6; i++) { glm::vec3 v = glm::vec3(1.0f, 0.0f, 0.0f); color_buffer_data.push_back(v); } vertex_buffer_data.push_back(model_vertices[2]); vertex_buffer_data.push_back(model_vertices[1]); vertex_buffer_data.push_back(model_vertices[5]); vertex_buffer_data.push_back(model_vertices[2]); vertex_buffer_data.push_back(model_vertices[6]); vertex_buffer_data.push_back(model_vertices[5]); for (int i = 0; i < 6; i++) { glm::vec3 v = glm::vec3(0.0f, 1.0f, 0.0f); color_buffer_data.push_back(v); } vertex_buffer_data.push_back(model_vertices[2]); vertex_buffer_data.push_back(model_vertices[3]); vertex_buffer_data.push_back(model_vertices[7]); vertex_buffer_data.push_back(model_vertices[2]); vertex_buffer_data.push_back(model_vertices[6]); vertex_buffer_data.push_back(model_vertices[7]); for (int i = 0; i < 6; i++) { glm::vec3 v = glm::vec3(0.0f, 0.0f, 1.0f); color_buffer_data.push_back(v); } vertex_buffer_data.push_back(model_vertices[0]); vertex_buffer_data.push_back(model_vertices[3]); vertex_buffer_data.push_back(model_vertices[7]); vertex_buffer_data.push_back(model_vertices[0]); vertex_buffer_data.push_back(model_vertices[4]); vertex_buffer_data.push_back(model_vertices[7]); for (int i = 0; i < 6; i++) { glm::vec3 v = glm::vec3(1.0f, 1.0f, 0.0f); color_buffer_data.push_back(v); } vertex_buffer_data.push_back(model_vertices[0]); vertex_buffer_data.push_back(model_vertices[1]); vertex_buffer_data.push_back(model_vertices[2]); vertex_buffer_data.push_back(model_vertices[2]); vertex_buffer_data.push_back(model_vertices[3]); vertex_buffer_data.push_back(model_vertices[0]); for (int i = 0; i < 6; i++) { glm::vec3 v = glm::vec3(0.0f, 0.0f, 0.0f); color_buffer_data.push_back(v); } vertex_buffer_data.push_back(model_vertices[6]); vertex_buffer_data.push_back(model_vertices[5]); vertex_buffer_data.push_back(model_vertices[4]); vertex_buffer_data.push_back(model_vertices[4]); vertex_buffer_data.push_back(model_vertices[7]); vertex_buffer_data.push_back(model_vertices[6]); for (int i = 0; i < 6; i++) { glm::vec3 v = glm::vec3(1.0f, 0.0f, 1.0f); color_buffer_data.push_back(v); } bufferData(); }
size_t EthernetUDP::write(const uint8_t *buffer, size_t size) { uint16_t bytes_written = bufferData(_sock, _offset, buffer, size); _offset += bytes_written; return bytes_written; }
//////////////////////////////////////////////////////////////////////////////// // on init cb void on_init() { #ifndef _WIN32 // Configure debug output glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB); glDebugMessageCallbackARB( reinterpret_cast<GLDEBUGPROCARB>(&gl_debug_message_callback), NULL ); #endif // gen names glGenBuffers(1, &buffer); glGenTextures(1, &texture); glGenVertexArrays(1, &vertexArray); program = glCreateProgram(); // build buffer std::vector<GLint> bufferData(ELEM_CNT,-1); glBindBuffer(GL_TEXTURE_BUFFER, buffer); glBufferData(GL_TEXTURE_BUFFER, sizeof(GLint)*bufferData.size(), &bufferData[0], GL_STATIC_DRAW); glBindBuffer(GL_TEXTURE_BUFFER, 0); // build texture glBindTexture(GL_TEXTURE_BUFFER, texture); glTexBuffer(GL_TEXTURE_BUFFER, GL_R32I, buffer); // bind as image glBindImageTexture(0, texture, 0, GL_FALSE, 0, GL_READ_WRITE, GL_R32I); // build vao glBindVertexArray(vertexArray); // empty glBindVertexArray(0); // build program GLuint vertex = glCreateShader(GL_VERTEX_SHADER); glShaderSource(vertex, 5, vertexSrc, NULL); glCompileShader(vertex); glAttachShader(program, vertex); glDeleteShader(vertex); glLinkProgram(program); // set uniforms glProgramUniform1i(program, glGetUniformLocation(program, "imgData"), 0); // draw data glUseProgram(program); glBindVertexArray(vertexArray); glDrawArrays(GL_POINTS, 0, ELEM_CNT); // map buffer and check data validity glBindVertexArray(0); glBindBuffer(GL_TEXTURE_BUFFER, buffer); GLint *dataPtr = (GLint*) glMapBuffer(GL_TEXTURE_BUFFER, GL_READ_ONLY); std::cout << "buffer content : "; for(GLint i = 0; i<ELEM_CNT; ++i) std::cout << dataPtr[i] << ' '; std::cout << std::endl; glUnmapBuffer(GL_TEXTURE_BUFFER); #ifdef _WIN32 GLenum error = glGetError(); if(error!=GL_NO_ERROR) std::cerr << "caught " << gl_error_to_string(error) << '\n'; #endif }
FontCustomPlatformData* createFontCustomPlatformData(SharedBuffer* buffer) { ASSERT_ARG(buffer, buffer); #if ENABLE(OPENTYPE_SANITIZER) OpenTypeSanitizer sanitizer(buffer); RefPtr<SharedBuffer> transcodeBuffer = sanitizer.sanitize(); if (!transcodeBuffer) return 0; // validation failed. buffer = transcodeBuffer.get(); #endif ATSFontContainerRef containerRef = 0; ATSFontRef fontRef = 0; RetainPtr<CGFontRef> cgFontRef; #if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) RetainPtr<CFDataRef> bufferData(AdoptCF, buffer->createCFData()); RetainPtr<CGDataProviderRef> dataProvider(AdoptCF, CGDataProviderCreateWithCFData(bufferData.get())); cgFontRef.adoptCF(CGFontCreateWithDataProvider(dataProvider.get())); if (!cgFontRef) return 0; #else // Use ATS to activate the font. // The value "3" means that the font is private and can't be seen by anyone else. ATSFontActivateFromMemory((void*)buffer->data(), buffer->size(), 3, kATSFontFormatUnspecified, NULL, kATSOptionFlagsDefault, &containerRef); if (!containerRef) return 0; ItemCount fontCount; ATSFontFindFromContainer(containerRef, kATSOptionFlagsDefault, 0, NULL, &fontCount); // We just support the first font in the list. if (fontCount == 0) { ATSFontDeactivate(containerRef, NULL, kATSOptionFlagsDefault); return 0; } ATSFontFindFromContainer(containerRef, kATSOptionFlagsDefault, 1, &fontRef, NULL); if (!fontRef) { ATSFontDeactivate(containerRef, NULL, kATSOptionFlagsDefault); return 0; } cgFontRef.adoptCF(CGFontCreateWithPlatformFont(&fontRef)); #ifndef BUILDING_ON_TIGER // Workaround for <rdar://problem/5675504>. if (cgFontRef && !CGFontGetNumberOfGlyphs(cgFontRef.get())) cgFontRef = 0; #endif #if PLATFORM(APOLLO) // On Leopard the CGFontGetNumberOfGlyphs call is necessary to reject invalid fonts // On Snow Leopard this happens at ATSFontActivateFromMemory if (isOsLeopardOrGreater()) { static _CGFontGetNumberOfGlyphsFunc CGFontGetNumberOfGlyphsFunc = getCGFontGetNumberOfGlyphsFunc(); if (CGFontGetNumberOfGlyphsFunc && cgFontRef && !CGFontGetNumberOfGlyphsFunc(cgFontRef.get())) { cgFontRef = 0; } } #endif if (!cgFontRef) { ATSFontDeactivate(containerRef, NULL, kATSOptionFlagsDefault); return 0; } #endif // !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) return new FontCustomPlatformData(containerRef, fontRef, cgFontRef.releaseRef()); }
bool TheoraDecoder::loadStream(Common::SeekableReadStream *stream) { close(); _fileStream = stream; // start up Ogg stream synchronization layer ogg_sync_init(&_oggSync); // init supporting Vorbis structures needed in header parsing vorbis_info_init(&_vorbisInfo); vorbis_comment vorbisComment; vorbis_comment_init(&vorbisComment); // init supporting Theora structures needed in header parsing th_info theoraInfo; th_info_init(&theoraInfo); th_comment theoraComment; th_comment_init(&theoraComment); th_setup_info *theoraSetup = 0; uint theoraPackets = 0, vorbisPackets = 0; // Ogg file open; parse the headers // Only interested in Vorbis/Theora streams bool foundHeader = false; while (!foundHeader) { int ret = bufferData(); if (ret == 0) break; // FIXME: Shouldn't this error out? while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) { ogg_stream_state test; // is this a mandated initial header? If not, stop parsing if (!ogg_page_bos(&_oggPage)) { // don't leak the page; get it into the appropriate stream queuePage(&_oggPage); foundHeader = true; break; } ogg_stream_init(&test, ogg_page_serialno(&_oggPage)); ogg_stream_pagein(&test, &_oggPage); ogg_stream_packetout(&test, &_oggPacket); // identify the codec: try theora if (theoraPackets == 0 && th_decode_headerin(&theoraInfo, &theoraComment, &theoraSetup, &_oggPacket) >= 0) { // it is theora memcpy(&_theoraOut, &test, sizeof(test)); theoraPackets = 1; _hasVideo = true; } else if (vorbisPackets == 0 && vorbis_synthesis_headerin(&_vorbisInfo, &vorbisComment, &_oggPacket) >= 0) { // it is vorbis memcpy(&_vorbisOut, &test, sizeof(test)); vorbisPackets = 1; _hasAudio = true; } else { // whatever it is, we don't care about it ogg_stream_clear(&test); } } // fall through to non-bos page parsing } // we're expecting more header packets. while ((theoraPackets && theoraPackets < 3) || (vorbisPackets && vorbisPackets < 3)) { int ret; // look for further theora headers while (theoraPackets && (theoraPackets < 3) && (ret = ogg_stream_packetout(&_theoraOut, &_oggPacket))) { if (ret < 0) error("Error parsing Theora stream headers; corrupt stream?"); if (!th_decode_headerin(&theoraInfo, &theoraComment, &theoraSetup, &_oggPacket)) error("Error parsing Theora stream headers; corrupt stream?"); theoraPackets++; } // look for more vorbis header packets while (vorbisPackets && (vorbisPackets < 3) && (ret = ogg_stream_packetout(&_vorbisOut, &_oggPacket))) { if (ret < 0) error("Error parsing Vorbis stream headers; corrupt stream?"); if (vorbis_synthesis_headerin(&_vorbisInfo, &vorbisComment, &_oggPacket)) error("Error parsing Vorbis stream headers; corrupt stream?"); vorbisPackets++; if (vorbisPackets == 3) break; } // The header pages/packets will arrive before anything else we // care about, or the stream is not obeying spec if (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) { queuePage(&_oggPage); // demux into the appropriate stream } else { ret = bufferData(); // someone needs more data if (ret == 0) error("End of file while searching for codec headers."); } } // And now we have it all. Initialize decoders next if (_hasVideo) { _videoTrack = new TheoraVideoTrack(getDefaultHighColorFormat(), theoraInfo, theoraSetup); addTrack(_videoTrack); } th_info_clear(&theoraInfo); th_comment_clear(&theoraComment); th_setup_free(theoraSetup); if (_hasAudio) { _audioTrack = new VorbisAudioTrack(_soundType, _vorbisInfo); // Get enough audio data to start us off while (!_audioTrack->hasAudio()) { // Queue more data bufferData(); while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) queuePage(&_oggPage); queueAudio(); } addTrack(_audioTrack); } vorbis_comment_clear(&vorbisComment); return true; }
FontCustomPlatformData* createFontCustomPlatformData(SharedBuffer* buffer) { ASSERT_ARG(buffer, buffer); #if USE(OPENTYPE_SANITIZER) OpenTypeSanitizer sanitizer(buffer); RefPtr<SharedBuffer> transcodeBuffer = sanitizer.sanitize(); if (!transcodeBuffer) return 0; // validation failed. buffer = transcodeBuffer.get(); #else RefPtr<SharedBuffer> sfntBuffer; if (isWOFF(buffer)) { Vector<char> sfnt; if (!convertWOFFToSfnt(buffer, sfnt)) return 0; sfntBuffer = SharedBuffer::adoptVector(sfnt); buffer = sfntBuffer.get(); } #endif ATSFontContainerRef containerRef = 0; RetainPtr<CGFontRef> cgFontRef; #ifndef BUILDING_ON_LEOPARD RetainPtr<CFDataRef> bufferData(AdoptCF, buffer->createCFData()); RetainPtr<CGDataProviderRef> dataProvider(AdoptCF, CGDataProviderCreateWithCFData(bufferData.get())); cgFontRef.adoptCF(CGFontCreateWithDataProvider(dataProvider.get())); if (!cgFontRef) return 0; #else // Use ATS to activate the font. // The value "3" means that the font is private and can't be seen by anyone else. ATSFontActivateFromMemory((void*)buffer->data(), buffer->size(), 3, kATSFontFormatUnspecified, NULL, kATSOptionFlagsDefault, &containerRef); if (!containerRef) return 0; ItemCount fontCount; ATSFontFindFromContainer(containerRef, kATSOptionFlagsDefault, 0, NULL, &fontCount); // We just support the first font in the list. if (fontCount == 0) { ATSFontDeactivate(containerRef, NULL, kATSOptionFlagsDefault); return 0; } ATSFontRef fontRef = 0; ATSFontFindFromContainer(containerRef, kATSOptionFlagsDefault, 1, &fontRef, NULL); if (!fontRef) { ATSFontDeactivate(containerRef, NULL, kATSOptionFlagsDefault); return 0; } cgFontRef.adoptCF(CGFontCreateWithPlatformFont(&fontRef)); // Workaround for <rdar://problem/5675504>. if (cgFontRef && !CGFontGetNumberOfGlyphs(cgFontRef.get())) cgFontRef = 0; if (!cgFontRef) { ATSFontDeactivate(containerRef, NULL, kATSOptionFlagsDefault); return 0; } #endif // !defined(BUILDING_ON_LEOPARD) FontCustomPlatformData* fontCustomPlatformData = new FontCustomPlatformData(containerRef, cgFontRef.releaseRef()); #if USE(SKIA_ON_MAC_CHROMIUM) RemoteFontStream* stream = new RemoteFontStream(buffer); fontCustomPlatformData->m_typeface = SkTypeface::CreateFromStream(stream); stream->unref(); #endif return fontCustomPlatformData; }
/** * @brief Initializes buffers for trackball's representation. * Creates and fills the buffers. */ void initializeBuffers (void) { createBuffers(); bufferData(); }
void WorldSession::HandleGuildFinderBrowse(WorldPacket& recvPacket) { sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Received CMSG_LF_GUILD_BROWSE"); uint32 classRoles = 0; uint32 availability = 0; uint32 guildInterests = 0; uint32 playerLevel = 0; // Raw player level (1-85), do they use MAX_FINDER_LEVEL when on level 85 ? recvPacket >> classRoles >> availability >> guildInterests >> playerLevel; if (!(classRoles & GUILDFINDER_ALL_ROLES) || classRoles > GUILDFINDER_ALL_ROLES) return; if (!(availability & AVAILABILITY_ALWAYS) || availability > AVAILABILITY_ALWAYS) return; if (!(guildInterests & ALL_INTERESTS) || guildInterests > ALL_INTERESTS) return; if (playerLevel > sWorld->getIntConfig(CONFIG_MAX_PLAYER_LEVEL) || playerLevel < 1) return; Player* player = GetPlayer(); LFGuildPlayer settings(player->GetGUIDLow(), classRoles, availability, guildInterests, ANY_FINDER_LEVEL); LFGuildStore guildList = sGuildFinderMgr->GetGuildsMatchingSetting(settings, player->GetTeamId()); uint32 guildCount = guildList.size(); if (guildCount == 0) { WorldPacket packet(SMSG_LF_GUILD_BROWSE_UPDATED, 0); player->SendDirectMessage(&packet); return; } ByteBuffer bufferData(65 * guildCount); WorldPacket data(SMSG_LF_GUILD_BROWSE_UPDATED, 3 + guildCount * 65); // Estimated size data.WriteBits(guildCount, 19); for (LFGuildStore::const_iterator itr = guildList.begin(); itr != guildList.end(); ++itr) { LFGuildSettings guildSettings = itr->second; Guild* guild = sGuildMgr->GetGuildById(itr->first); ObjectGuid guildGUID = ObjectGuid(guild->GetGUID()); data.WriteBit(guildGUID[7]); data.WriteBit(guildGUID[5]); data.WriteBits(guild->GetName().size(), 8); data.WriteBit(guildGUID[0]); data.WriteBits(guildSettings.GetComment().size(), 11); data.WriteBit(guildGUID[4]); data.WriteBit(guildGUID[1]); data.WriteBit(guildGUID[2]); data.WriteBit(guildGUID[6]); data.WriteBit(guildGUID[3]); bufferData << uint32(guild->GetEmblemInfo().GetColor()); bufferData << uint32(guild->GetEmblemInfo().GetBorderStyle()); // Guessed bufferData << uint32(guild->GetEmblemInfo().GetStyle()); bufferData.WriteString(guildSettings.GetComment()); bufferData << uint8(0); // Unk bufferData.WriteByteSeq(guildGUID[5]); bufferData << uint32(guildSettings.GetInterests()); bufferData.WriteByteSeq(guildGUID[6]); bufferData.WriteByteSeq(guildGUID[4]); bufferData << uint32(guild->GetLevel()); bufferData.WriteString(guild->GetName()); bufferData << uint32(guild->GetAchievementMgr().GetAchievementPoints()); bufferData.WriteByteSeq(guildGUID[7]); bufferData << uint8(sGuildFinderMgr->HasRequest(player->GetGUIDLow(), guild->GetGUID())); // Request pending bufferData.WriteByteSeq(guildGUID[2]); bufferData.WriteByteSeq(guildGUID[0]); bufferData << uint32(guildSettings.GetAvailability()); bufferData.WriteByteSeq(guildGUID[1]); bufferData << uint32(guild->GetEmblemInfo().GetBackgroundColor()); bufferData << uint32(0); // Unk Int 2 (+ 128) // Always 0 or 1 bufferData << uint32(guild->GetEmblemInfo().GetBorderColor()); bufferData << uint32(guildSettings.GetClassRoles()); bufferData.WriteByteSeq(guildGUID[3]); bufferData << uint32(guild->GetMembersCount()); } data.FlushBits(); data.append(bufferData); player->SendDirectMessage(&data); }
void SoundManager::bufferData(uint16 channel) { if ((channel == 0) || !_channels[channel]) return; bufferData(*_channels[channel]); }
const Graphics::Surface *TheoraDecoder::decodeNextFrame() { // First, let's get our frame while (_theoraPacket) { // theora is one in, one out... if (ogg_stream_packetout(&_theoraOut, &_oggPacket) > 0) { if (_ppInc) { _ppLevel += _ppInc; th_decode_ctl(_theoraDecode, TH_DECCTL_SET_PPLEVEL, &_ppLevel, sizeof(_ppLevel)); _ppInc = 0; } if (th_decode_packetin(_theoraDecode, &_oggPacket, NULL) == 0) { _curFrame++; // Convert YUV data to RGB data th_ycbcr_buffer yuv; th_decode_ycbcr_out(_theoraDecode, yuv); translateYUVtoRGBA(yuv); if (_curFrame == 0) _startTime = g_system->getMillis(); double time = th_granule_time(_theoraDecode, _oggPacket.granulepos); // We need to calculate when the next frame should be shown // This is all in floating point because that's what the Ogg code gives us // Ogg is a lossy container format, so it doesn't always list the time to the // next frame. In such cases, we need to calculate it ourselves. if (time == -1.0) _nextFrameStartTime += _frameRate.getInverse().toDouble(); else _nextFrameStartTime = time; // break out break; } } else { // If we can't get any more frames, we're done. if (_theoraOut.e_o_s || _fileStream->eos()) { _endOfVideo = true; break; } // Queue more data bufferData(); while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) queuePage(&_oggPage); } // Update audio if we can queueAudio(); } // Force at least some audio to be buffered // TODO: 5 is very arbitrary. We probably should do something like QuickTime does. while (!_endOfAudio && _audStream->numQueuedStreams() < 5) { bufferData(); while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) queuePage(&_oggPage); bool queuedAudio = queueAudio(); if ((_vorbisOut.e_o_s || _fileStream->eos()) && !queuedAudio) { _endOfAudio = true; break; } } return &_displaySurface; }
bool TheoraDecoder::loadStream(Common::SeekableReadStream *stream) { close(); _endOfAudio = false; _endOfVideo = false; _fileStream = stream; // start up Ogg stream synchronization layer ogg_sync_init(&_oggSync); // init supporting Vorbis structures needed in header parsing vorbis_info_init(&_vorbisInfo); vorbis_comment_init(&_vorbisComment); // init supporting Theora structures needed in header parsing th_comment_init(&_theoraComment); th_info_init(&_theoraInfo); // Ogg file open; parse the headers // Only interested in Vorbis/Theora streams bool foundHeader = false; while (!foundHeader) { int ret = bufferData(); if (ret == 0) break; while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) { ogg_stream_state test; // is this a mandated initial header? If not, stop parsing if (!ogg_page_bos(&_oggPage)) { // don't leak the page; get it into the appropriate stream queuePage(&_oggPage); foundHeader = true; break; } ogg_stream_init(&test, ogg_page_serialno(&_oggPage)); ogg_stream_pagein(&test, &_oggPage); ogg_stream_packetout(&test, &_oggPacket); // identify the codec: try theora if (!_theoraPacket && th_decode_headerin(&_theoraInfo, &_theoraComment, &_theoraSetup, &_oggPacket) >= 0) { // it is theora memcpy(&_theoraOut, &test, sizeof(test)); _theoraPacket = 1; } else if (!_vorbisPacket && vorbis_synthesis_headerin(&_vorbisInfo, &_vorbisComment, &_oggPacket) >= 0) { // it is vorbis memcpy(&_vorbisOut, &test, sizeof(test)); _vorbisPacket = 1; } else { // whatever it is, we don't care about it ogg_stream_clear(&test); } } // fall through to non-bos page parsing } // we're expecting more header packets. while ((_theoraPacket && _theoraPacket < 3) || (_vorbisPacket && _vorbisPacket < 3)) { int ret; // look for further theora headers while (_theoraPacket && (_theoraPacket < 3) && (ret = ogg_stream_packetout(&_theoraOut, &_oggPacket))) { if (ret < 0) error("Error parsing Theora stream headers; corrupt stream?"); if (!th_decode_headerin(&_theoraInfo, &_theoraComment, &_theoraSetup, &_oggPacket)) error("Error parsing Theora stream headers; corrupt stream?"); _theoraPacket++; } // look for more vorbis header packets while (_vorbisPacket && (_vorbisPacket < 3) && (ret = ogg_stream_packetout(&_vorbisOut, &_oggPacket))) { if (ret < 0) error("Error parsing Vorbis stream headers; corrupt stream?"); if (vorbis_synthesis_headerin(&_vorbisInfo, &_vorbisComment, &_oggPacket)) error("Error parsing Vorbis stream headers; corrupt stream?"); _vorbisPacket++; if (_vorbisPacket == 3) break; } // The header pages/packets will arrive before anything else we // care about, or the stream is not obeying spec if (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) { queuePage(&_oggPage); // demux into the appropriate stream } else { ret = bufferData(); // someone needs more data if (ret == 0) error("End of file while searching for codec headers."); } } // and now we have it all. initialize decoders if (_theoraPacket) { _theoraDecode = th_decode_alloc(&_theoraInfo, _theoraSetup); debugN(1, "Ogg logical stream %lx is Theora %dx%d %.02f fps", _theoraOut.serialno, _theoraInfo.pic_width, _theoraInfo.pic_height, (double)_theoraInfo.fps_numerator / _theoraInfo.fps_denominator); switch (_theoraInfo.pixel_fmt) { case TH_PF_420: debug(1, " 4:2:0 video"); break; case TH_PF_422: debug(1, " 4:2:2 video"); break; case TH_PF_444: debug(1, " 4:4:4 video"); break; case TH_PF_RSVD: default: debug(1, " video\n (UNKNOWN Chroma sampling!)"); break; } if (_theoraInfo.pic_width != _theoraInfo.frame_width || _theoraInfo.pic_height != _theoraInfo.frame_height) debug(1, " Frame content is %dx%d with offset (%d,%d).", _theoraInfo.frame_width, _theoraInfo.frame_height, _theoraInfo.pic_x, _theoraInfo.pic_y); switch (_theoraInfo.colorspace){ case TH_CS_UNSPECIFIED: /* nothing to report */ break; case TH_CS_ITU_REC_470M: debug(1, " encoder specified ITU Rec 470M (NTSC) color."); break; case TH_CS_ITU_REC_470BG: debug(1, " encoder specified ITU Rec 470BG (PAL) color."); break; default: debug(1, "warning: encoder specified unknown colorspace (%d).", _theoraInfo.colorspace); break; } debug(1, "Encoded by %s", _theoraComment.vendor); if (_theoraComment.comments) { debug(1, "theora comment header:"); for (int i = 0; i < _theoraComment.comments; i++) { if (_theoraComment.user_comments[i]) { int len = _theoraComment.comment_lengths[i]; char *value = (char *)malloc(len + 1); if (value) { memcpy(value, _theoraComment.user_comments[i], len); value[len] = '\0'; debug(1, "\t%s", value); free(value); } } } } th_decode_ctl(_theoraDecode, TH_DECCTL_GET_PPLEVEL_MAX, &_ppLevelMax, sizeof(_ppLevelMax)); _ppLevel = _ppLevelMax; th_decode_ctl(_theoraDecode, TH_DECCTL_SET_PPLEVEL, &_ppLevel, sizeof(_ppLevel)); _ppInc = 0; } else { // tear down the partial theora setup th_info_clear(&_theoraInfo); th_comment_clear(&_theoraComment); } th_setup_free(_theoraSetup); _theoraSetup = 0; if (_vorbisPacket) { vorbis_synthesis_init(&_vorbisDSP, &_vorbisInfo); vorbis_block_init(&_vorbisDSP, &_vorbisBlock); debug(3, "Ogg logical stream %lx is Vorbis %d channel %ld Hz audio.", _vorbisOut.serialno, _vorbisInfo.channels, _vorbisInfo.rate); _audStream = Audio::makeQueuingAudioStream(_vorbisInfo.rate, _vorbisInfo.channels); // Get enough audio data to start us off while (_audStream->numQueuedStreams() == 0) { // Queue more data bufferData(); while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) queuePage(&_oggPage); queueAudio(); } if (_audStream) g_system->getMixer()->playStream(Audio::Mixer::kPlainSoundType, _audHandle, _audStream, -1, getVolume(), getBalance()); } else { // tear down the partial vorbis setup vorbis_info_clear(&_vorbisInfo); vorbis_comment_clear(&_vorbisComment); _endOfAudio = true; } _surface.create(_theoraInfo.frame_width, _theoraInfo.frame_height, g_system->getScreenFormat()); // Set up a display surface _displaySurface.pixels = _surface.getBasePtr(_theoraInfo.pic_x, _theoraInfo.pic_y); _displaySurface.w = _theoraInfo.pic_width; _displaySurface.h = _theoraInfo.pic_height; _displaySurface.format = _surface.format; _displaySurface.pitch = _surface.pitch; // Set the frame rate _frameRate = Common::Rational(_theoraInfo.fps_numerator, _theoraInfo.fps_denominator); return true; }
bool GLIShaderDebug::DebugBegin(uint bufferSize, void *buffer) { uint i; //Check the debug state if(debugState != DBS_None) { scite->OutputAppendString("== Unable to switch to debug mode. Already debugging? ==\n"); return false; } //Check the input data if(bufferSize == 0 || buffer == NULL) { return false; } //Clear all existing uniforms/frame buffer data uniformValues.clear(); frameBufferArray.clear(); //Read the input data NetworkBufferReceive bufferData(bufferSize, buffer); //Get the debug shader UID/GL ID if(!bufferData.Get(debugUID) || !bufferData.Get(debugGLID)) { return false; } //Get the shader source if(!bufferData.Get(debugSource)) { return false; } //Get the number of uniforms uint numUniforms = 0; if(!bufferData.Get(numUniforms)) { return false; } //Loop and get each uniform for(i=0; i<numUniforms; i++) { UniformData newUniform; uint isFloat; //Get the main uniform data if(!bufferData.Get(newUniform.name) || !bufferData.Get(newUniform.type) || !bufferData.Get(newUniform.numTypeElements) || !bufferData.Get(isFloat)) { return false; } //Assign if it is a float or not if(isFloat > 0) { newUniform.isFloatType = true; if(!bufferData.Get(newUniform.floatUniformData)) { return false; } //Return now if there is an uneven number of variables if(newUniform.floatUniformData.size() % newUniform.numTypeElements != 0) { return false; } } else { newUniform.isFloatType = false; if(!bufferData.Get(newUniform.intUniformData)) { return false; } //Return now if there is an uneven number of variables if(newUniform.intUniformData.size() % newUniform.numTypeElements != 0) { return false; } } //Add the uniform data uniformValues.push_back(newUniform); } //Get the number of frame buffers uint numFrameBuffers = 0; if(!bufferData.Get(numFrameBuffers)) { return false; } //Loop and get each frame buffer for(i=0; i<numFrameBuffers; i++) { //Add a new item frameBufferArray.push_back(FrameBufferData()); //Get a reference to it (faster than copying all this buffer data twice) FrameBufferData &newFrameBuffer = frameBufferArray.back(); //Get the main frame buffer data if(!bufferData.Get(newFrameBuffer.bufferType) || !bufferData.Get(newFrameBuffer.drawBuffer) || !bufferData.Get(newFrameBuffer.bufferWidth) || !bufferData.Get(newFrameBuffer.bufferHeight) || !bufferData.Get(newFrameBuffer.numPixelValues) || !bufferData.Get(newFrameBuffer.preBuffer) || !bufferData.Get(newFrameBuffer.postBuffer)) { frameBufferArray.pop_back(); return false; } //Check the size of the data if(newFrameBuffer.preBuffer.size() != newFrameBuffer.postBuffer.size() || newFrameBuffer.preBuffer.size() != (newFrameBuffer.bufferWidth * newFrameBuffer.bufferHeight * newFrameBuffer.numPixelValues)) { scite->OutputAppendString("== Invalid frame buffer data ==\n"); frameBufferArray.pop_back(); return false; } } //Check for any remaining network data if(bufferData.GetBufferSizeLeft() > 0) { scite->OutputAppendString("== Extra debug start data found in network buffer ==\n"); } //Flag that debug mode has started debugState = DBS_Init; //Update the watch/uniform display UpdateVariableDisplay(); //Show the window ShowWindow(dialogWindow, SW_SHOW); //Ensure the source that is being debugged is open OpenDebugSource(); return true; }