void FilterSwap4::process() { unsigned int const * first = reinterpret_cast<unsigned int const *>(inputBuffer()); unsigned int const * last = reinterpret_cast<unsigned int const *>(inputBuffer() + imageSize_); unsigned int * dest = reinterpret_cast<unsigned int *>(outputBuffer()); while (first < last) { *(dest++) = __bswap_32(*(first++)); } }
void FilterSwap3::process() { unsigned char const * first = inputBuffer(); unsigned char const * last = inputBuffer() + imageSize_; unsigned char * dest = outputBuffer(); char r, g, b; while (first < last) { b = *first++; g = *first++; r = *first++; *dest++ = r; *dest++ = g; *dest++ = b; } }
void GPU::merge(uint32_t *input, size *positions, size n) const { cl::make_kernel<cl::Buffer&,cl::Buffer&,cl::Buffer&> merge(this->kmerge); cl::make_kernel<cl::Buffer&,size,size> updatePositions(this->kupdatePositions); cl::CommandQueue queue(context,dev); size len=positions[n]; cl::Buffer inputBuffer(context, CL_MEM_READ_WRITE, sizeof(uint32_t)*len); queue.enqueueWriteBuffer(inputBuffer, false, 0, sizeof(uint32_t)*len, input); cl::Buffer outputBuffer(context, CL_MEM_READ_WRITE, sizeof(uint32_t)*len); cl::Buffer positionsBuffer(context, CL_MEM_READ_WRITE, sizeof(size)*(n+1)); queue.enqueueWriteBuffer(positionsBuffer, false, 0, (n+1)*sizeof(size), positions); if(n%2) queue.enqueueCopyBuffer(inputBuffer, outputBuffer, sizeof(uint32_t)*positions[n-1],sizeof(uint32_t)*positions[n-1], (positions[n]-positions[n-1])*sizeof(uint32_t)); //kopiowanie ostatniego fragmentu, który przez pewien czas może nie być mergowany (jak długo jest nieparzysta liczba list) while(n>1) { // std::clog<<n<<": "; // std::copy(positions,positions+n+1,std::ostream_iterator<size>(std::clog," ")); // std::clog<<std::endl; merge(cl::EnqueueArgs(queue,n/2),inputBuffer,outputBuffer,positionsBuffer); size o=n; n-=n/2; updatePositions(cl::EnqueueArgs(queue,1),positionsBuffer,o,n); std::swap(inputBuffer,outputBuffer); } queue.enqueueReadBuffer(inputBuffer, false, 0, sizeof(uint32_t)*len, input); queue.finish(); }
//--------------------------------------------------------------- void FilterYUV411toYUV::process() { unsigned char const * src_img = inputBuffer(); unsigned char * tgt_img = outputBuffer(); int srcImgSize = getImageSize(inputFormat_); for (int i = 0; i < srcImgSize; i += 6) { register int u = src_img[i]; register int y0 = src_img[i+1]; register int y1 = src_img[i+2]; register int v = src_img[i+3]; register int y2 = src_img[i+4]; register int y3 = src_img[i+5]; *(tgt_img++) = y0; *(tgt_img++) = u; *(tgt_img++) = v; *(tgt_img++) = y1; *(tgt_img++) = u; *(tgt_img++) = v; *(tgt_img++) = y2; *(tgt_img++) = u; *(tgt_img++) = v; *(tgt_img++) = y3; *(tgt_img++) = u; *(tgt_img++) = v; } }
void AMRDeinterleaver::doGetNextFrame() { // First, try getting a frame from the deinterleaving buffer: if (fDeinterleavingBuffer->retrieveFrame(fTo, fMaxSize, fFrameSize, fNumTruncatedBytes, fLastFrameHeader, fPresentationTime, fInputSource->isSynchronized())) { // Success! fNeedAFrame = False; fDurationInMicroseconds = uSecsPerFrame; // Call our own 'after getting' function. Because we're not a 'leaf' // source, we can call this directly, without risking // infinite recursion afterGetting(this); return; } // No luck, so ask our source for help: fNeedAFrame = True; if (!fInputSource->isCurrentlyAwaitingData()) { fInputSource->getNextFrame(fDeinterleavingBuffer->inputBuffer(), fDeinterleavingBuffer->inputBufferSize(), afterGettingFrame, this, FramedSource::handleClosure, this); } }
/* * * Adds data from the socket to the queue * */ NETWORK_PROTOCOL_RESULT QuickProtocol::updateProtocol() { Buffer inputBuffer(receiveQueueBuffer.getBuffer()+receiveQueueBuffer.getSize(), //Directly pointing to the buffer RECEIVEQUEUESIZE-receiveQueueBuffer.getSize()); NETWORK_SOCKET_RESULT result = socket->readIn(&inputBuffer); if(result != NETWORK_SOCKET_OUT_OF_BUFFER) { return NETWORK_PROTOCOL_OK; } else if(result != NETWORK_SOCKET_OK) //ERROR { return NETWORK_PROTOCOL_ERROR; } if(inputBuffer.getSize() <= 0) { printf("BUG: Data but no data !\n"); return NETWORK_PROTOCOL_ERROR; } //manually sync the new size receiveQueueBuffer.setSize(receiveQueueBuffer.getSize()+inputBuffer.getSize()); return NETWORK_PROTOCOL_OK; }
InputStream createInputCompressedFileStream(const std::string &fileName) { InputStream inputStream; boost::shared_ptr<InputCompressedFileStreamBuffer> inputBuffer(new InputCompressedFileStreamBuffer(fileName)); inputStream.setBuffer(inputBuffer); return inputStream; }
void FilterFlip::process() { unsigned char const * src = inputBuffer(); unsigned char * dst = outputBuffer() + rowSize_ * (inputFormat_.height - 1); for (unsigned int i = inputFormat_.height; i != 0; --i) { memcpy(dst, src, rowSize_); src += rowSize_; dst -= rowSize_; } }
void FilterHalfImage::process() { unsigned char const * src = inputBuffer() + offset_; unsigned char * dst = outputBuffer(); for (unsigned int i = 0; i < inputFormat_.height; i += 2) { memcpy(dst, src, rowSize_); src += rowSize2_; dst += rowSize_; } }
void FilterReverse::process() { unsigned char const * src = inputBuffer(); unsigned char * dst = outputBuffer() + rowSize_ * (inputFormat_.height - 1); int numberOfPixel = inputFormat_.height * inputFormat_.width; for (unsigned int i = numberOfPixel; i != 0; --i) { memcpy(dst, src, bytesPerPixel); src += bytesPerPixel; dst -= bytesPerPixel; } }
int main(int argc, char** argv) { /* Get number of devices */ int numberOfDevices; std::string input_filename = FILENAME_TESTFILE; std::string scope_filename = SCOPE_PARAMETERFILE; std::string output_filename = OUTPUT_FILENAME; if(argc > 1) { input_filename = argv[1]; scope_filename = argv[1]; } if(argc > 2) { output_filename = argv[2]; } //std::cout << "Args read (" << input_filename << ", " << output_filename << ")" << std::endl; InputBuffer inputBuffer(CHUNK_BUFFER_COUNT, 1); /* Initialize input buffer (with dynamic elements) */ ScopeReader::ScopeParameter parameter(scope_filename); //int nSegments = parameter.nbrSegments; //int nWaveforms = parameter.nbrWaveforms; int nSample = parameter.nbrSamples; ScopeReader reader(parameter, &inputBuffer, CHUNK_COUNT); GrayBatStream<Chunk> os(1,masterUri, fitterUri); std::thread sendingThread([&inputBuffer, &os](){ while(inputBuffer.isFinished()) { inputBuffer.popTry([&os](Chunk& t){ os.send(t); }); } os.quit(); }); //std::cout << "Buffer created." << std::endl; reader.readToBuffer(); //Make sure all results are written back sendingThread.join(); return 0; }
int main (void) { std::cout << "Initializing..."; boost::circular_buffer<Frame> outputBuffer(5); boost::circular_buffer<Frame> inputBuffer(5); PhysicalLayer physicalLayer(paFloat32, 2, 1000, 8000, paFloat32, 2, 500, 8000); physicalLayer.startDataStream(); std::cout << "\nReady for playback...\n\n"; std::system("PAUSE"); outputBuffer.push_back(Frame(255,255,255)); outputBuffer.push_back(Frame(0,0,0)); outputBuffer.push_back(Frame(1,2,3)); outputBuffer.push_back(Frame(4,5,6)); outputBuffer.push_back(Frame(7,8,9)); physicalLayer.send(&outputBuffer); std::system("PAUSE"); std::cout << "\n\n----------------------------------------------------------------------------\n"; std::cout << "Recorded frames:\n"; physicalLayer.receive(&inputBuffer); for (int i = 0; i < inputBuffer.size(); i++) { Frame funFrame = inputBuffer[i]; funFrame.coutHeader(); } std::cout << "\n\n----------------------------------------------------------------------------\n"; std::system("PAUSE"); std::cout << "\nStopping data streams...\n\n"; physicalLayer.stopDataStream(); return 0; }
/** * Image conversion. */ void FilterGray::process() { // Pointer to the input image. unsigned char const * src = inputBuffer(); // Pointer to the output image. unsigned char * dest = outputBuffer(); // Past the end pointer of the output image for termination condition. unsigned char * last = dest + outputSize_; while (dest < last) { *(dest++) = (unsigned char) ( ( *(src++) * params_->weightRed + *(src++) * params_->weightGreen + *(src++) * params_->weightBlue) / sum_) ; } }
void GPU::sort(uint32_t *p_input, size arraySize) const { cl::make_kernel<cl::Buffer&,size,size,size> sort(this->ksort); cl::CommandQueue queue(context,dev); size ceiling=1; while(ceiling<arraySize) ceiling<<=1; cl::Buffer inputBuffer(context, CL_MEM_READ_WRITE, sizeof(uint32_t)*ceiling); queue.enqueueWriteBuffer(inputBuffer, false, 0, sizeof(uint32_t)*arraySize, p_input); if(ceiling!=arraySize) { // z jakiejś przyczyny segfaultuje nawet na pocl // queue.enqueueFillBuffer(inputBuffer, std::numeric_limits<uint32_t>::max(), 0, sizeof(uint32_t)*ceiling); size rest=ceiling-arraySize; uint32_t *pattern=new uint32_t[rest]; std::fill(pattern, pattern+rest, std::numeric_limits<uint32_t>::max()); queue.enqueueWriteBuffer(inputBuffer, false, sizeof(uint32_t)*arraySize, rest*sizeof(uint32_t), pattern); delete []pattern; // ekstremalnie powolne // uint32_t pattern=std::numeric_limits<uint32_t>::max(); // for(size n=arraySize;n<ceiling;++n) // queue.enqueueWriteBuffer(inputBuffer, false, n*sizeof(uint32_t), sizeof(pattern), &pattern); } for(size n=1;(1<<n)<=ceiling;n++) { for(size k=n; k>=1; k--) { sort(cl::EnqueueArgs(queue,ceiling/2), inputBuffer, arraySize, n, k); #if 0 uint32_t *tmp=new uint32_t[ceiling]; queue.enqueueReadBuffer(inputBuffer, false, 0, sizeof(uint32_t)*ceiling, tmp); queue.finish(); std::cout<<"n="<<n<<" k="<<k<<": "; for(int j=0;j<ceiling;++j) std::cout<<tmp[j]<<" "; std::cout<<"\n"; delete []tmp; #endif } } queue.enqueueReadBuffer(inputBuffer, false, 0, sizeof(uint32_t)*arraySize, p_input); queue.finish(); }
void PHIAScene::dragMoveEvent( QGraphicsSceneDragDropEvent *e ) { if ( !e->mimeData()->hasFormat( "application/x-phi-dd" ) ) return e->ignore(); QByteArray input=e->mimeData()->data( "application/x-phi-dd" ); QBuffer inputBuffer( &input ); inputBuffer.open( QIODevice::ReadOnly ); QDataStream ds( &inputBuffer ); ds.setVersion( PHI_DSV ); quint8 version; QString id; qint32 opts; ds >> version >> id >> opts; PHIBaseItem *item=_view->page()->getElementById( id ); if ( !item ) return e->ignore(); // should never executed item->setData( PHIItem::DDragStartPos, e->scenePos() ); QGraphicsScene::dragMoveEvent( e ); if ( !item->dragRevertOnIgnore() && !item->dragRevertOnAccept() && item->dragMoveAction() ) return e->accept(); }
int main(int argc, char* argv[]) { std::string input_filename = FILENAME_TESTFILE; std::string scope_filename = SCOPE_PARAMETERFILE; std::string output_filename = OUTPUT_FILENAME; if(argc > 1) { input_filename = argv[1]; scope_filename = argv[1]; } if(argc > 2) { output_filename = argv[2]; } std::cout << "Args read (" << input_filename << ", " << output_filename << ")" << std::endl; OutputBuffer inputBuffer(CHUNK_BUFFER_COUNT, 1); GrayBatReader<Output> gbReader(masterUri, onlineDspUri); std::cout << "Buffer created." << std::endl; std::thread writerThread([&inputBuffer](){ std::fstream out; out.open("results.txt"); while(!inputBuffer.isFinished()) { auto elem = inputBuffer.pop(); out << elem.status << " " << elem.woffset << " "; std::cout << elem.status << " " << elem.woffset << " "; for(auto p : elem.param) out << p << " "; out << std::endl; std::cout << std::endl; } out.close(); }); gbReader.readToBuffer(); std::cout << "Data read." << std::endl; //Make sure all results are written back writerThread.join(); return 0; }
InputStream createInputFileStream(const std::string &fileName) { InputStream inputStream; boost::shared_ptr<InputFileStreamBuffer> inputBuffer(new InputFileStreamBuffer(fileName)); // TODO: would need a seperate error check, eof is not the same as file does not exist! // (for now, just assuming that there will be no 0 byte length files, and if there are, those are errors) // NOTE: this will spam the error message even in some cases when we are not really trying to read a file, // but just to check for its existance... (shit happens) //if (inputBuffer->isEof()) //{ // if (!input_file_stream_no_nonexisting_error_message) // { // Logger::getInstance()->error("createInputFileStream - File does not exist or is zero length."); // Logger::getInstance()->debug(fileName.c_str()); // } //} inputStream.setBuffer(inputBuffer); return inputStream; }
//--------------------------------------------------------------- void FilterOmni2Pan::process() { unsigned char const * srcImg = inputBuffer(); unsigned char * tgtImg = outputBuffer(); int * srcOffset = srcOffset_; if (outputFormat_.palette == Miro::RGB_24) { unsigned char * tgtImgEnd = tgtImg + IMAGE_WIDTH * IMAGE_HEIGHT * 3; for (; tgtImg != tgtImgEnd; ++srcOffset) { *tgtImg++ = *(srcImg + *srcOffset); *tgtImg++ = *(srcImg + *srcOffset + 1); *tgtImg++ = *(srcImg + *srcOffset + 2); } } else { unsigned char * tgtImgEnd = tgtImg + IMAGE_WIDTH * IMAGE_HEIGHT; for (; tgtImg != tgtImgEnd; ++srcOffset) { *tgtImg++ = *(srcImg + *srcOffset); } } }
//--------------------------------------------------------------- void FilterYUV422toRGB::process() { unsigned char const * src_img = inputBuffer(); unsigned char * tgt_img = outputBuffer(); unsigned int srcImgSize = getImageSize(inputFormat_); for (unsigned int i = 0; i < srcImgSize; i += 4) { register int u = src_img[i]; register int y0 = src_img[i+1]; register int v = src_img[i+2]; register int y1 = src_img[i+3]; *(tgt_img++) = t_r[(y0<<8)|v]; *(tgt_img++) = t_g2[(y0<<8)|t_g1[(u<<8)|v]]; *(tgt_img++) = t_b[(y0<<8)|u]; *(tgt_img++) = t_r[(y1<<8)|v]; *(tgt_img++) = t_g2[(y1<<8)|t_g1[(u<<8)|v]]; *(tgt_img++) = t_b[(y1<<8)|u]; } }
void motor(int speed1, int direction) { int i; int motortable[4]; if ( direction == FORWARD) for (int i = 0; i < 4; i++) motortable[i] = FORWARD[i]; else if ( direction == BACKWARD) for (int i = 0; i < 4; i++) motortable[i] = BACKWARD[i]; else if ( direction == TURNLEFT) for (int i = 0; i < 4; i++) motortable[i] = TURNLEFT[i]; else if ( direction == TURNRIGHT) for (int i = 0; i < 4; i++) motortable[i] = TURNRIGHT[i]; for ( ; ; ) { index1 = (++index1)%4; // read table as normal MotorPort.Motor = motortable[index1]; // output by portb Delay_ms(SPEEDTABLE[speed1]); // delay per step inputBuffer(); } }
void SendTable_WritePropList( const SendTable *pTable, const void *pState, const int nBits, bf_write *pOut, const int objectID, const int *pCheckProps, const int nCheckProps ) { if ( nCheckProps == 0 ) { // Write single final zero bit, signifying that there no changed properties pOut->WriteOneBit( 0 ); return; } bool bDebugWatch = Sendprop_UsingDebugWatch(); s_debug_info_shown = false; s_debug_bits_start = pOut->GetNumBitsWritten(); CSendTablePrecalc *pPrecalc = pTable->m_pPrecalc; CDeltaBitsWriter deltaBitsWriter( pOut ); bf_read inputBuffer( "SendTable_WritePropList->inputBuffer", pState, BitByte( nBits ), nBits ); CDeltaBitsReader inputBitsReader( &inputBuffer ); // Ok, they want to specify a small list of properties to check. int iToProp = NextProp( &inputBitsReader ); int i = 0; while ( i < nCheckProps ) { // Seek the 'to' state to the current property we want to check. while ( iToProp < pCheckProps[i] ) { SkipPropData( &inputBuffer, pPrecalc->GetProp( iToProp ) ); iToProp = NextProp( &inputBitsReader ); } if ( iToProp == PROP_SENTINEL ) { break; } else if ( iToProp == pCheckProps[i] ) { const SendProp *pProp = pPrecalc->GetProp( iToProp ); // Show debug stuff. if ( bDebugWatch ) { ShowEncodeDeltaWatchInfo( pTable, pProp, inputBuffer, objectID, iToProp ); } // See how many bits the data for this property takes up. int iStartBit = inputBuffer.GetNumBitsRead(); SkipPropData( &inputBuffer, pProp ); int nToStateBits = inputBuffer.GetNumBitsRead() - iStartBit; TRACE_PACKET( ( " Send Field (%s) = %d (%d bytes)\n", pProp->GetName(), nToStateBits, ( nToStateBits + 7 ) / 8 ) ); // Write the data into the output. deltaBitsWriter.WritePropIndex( iToProp ); inputBuffer.Seek( iStartBit ); pOut->WriteBitsFromBuffer( &inputBuffer, nToStateBits ); // Seek to the next prop. iToProp = NextProp( &inputBitsReader ); } ++i; } if ( s_debug_info_shown ) { int bits = pOut->GetNumBitsWritten() - s_debug_bits_start; ConDMsg( "= %i bits (%i bytes)\n", bits, Bits2Bytes(bits) ); } inputBitsReader.ForceFinished(); // avoid a benign assert }
void FilterCopy::process() { std::cout << __PRETTY_FUNCTION__ << std::endl; memcpy(outputBuffer(), inputBuffer(), imageSize_); }
bool CIMGCodec::Encode( const CImage& inputImage , CORE::CIOAccess& encodedOutput ) {GUCEF_TRACE; if ( inputImage.HasFrames() ) { CORE::CDynamicBuffer inputBuffer( 102400, true ); CORE::CDynamicBufferAccess bufferAccess( &inputBuffer, false ); // Fill our header TImageInfo imageInfo; imageInfo.version = GUCEF_IMAGE_TIMAGEINFO_VERSION; imageInfo.nrOfFramesInImage = inputImage.GetFrameCount(); // write header bufferAccess.Write( &imageInfo, sizeof( TImageInfo ), 1 ); // Now we add each frame's info + mipmap info TImageFrameInfo frameInfo; frameInfo.version = GUCEF_IMAGE_TIMAGEFRAMEINFO_VERSION; TImageMipMapLevelInfo mipMapInfo; mipMapInfo.version = GUCEF_IMAGE_TIMAGEMIPMAPLEVELINFO_VERSION; const CImage::TMipMapList* mipMapList = NULL; const TPixelMapPtr* pixelMap = NULL; for ( UInt32 frameNr=0; frameNr<imageInfo.nrOfFramesInImage; ++frameNr ) { mipMapList = &inputImage.GetFrame( frameNr ); frameInfo.nrOfMipmapLevels = static_cast< UInt32 >( mipMapList->size() ); bufferAccess.Write( &frameInfo, sizeof( TImageFrameInfo ), 1 ); // Now we add the info for each mipmap level for ( UInt32 mipLvl=0; mipLvl<frameInfo.nrOfMipmapLevels; ++mipLvl ) { pixelMap = &(*mipMapList)[ mipLvl ]; mipMapInfo.frameWidth = (*pixelMap)->GetWidthInPixels(); mipMapInfo.frameHeight = (*pixelMap)->GetHeightInPixels(); mipMapInfo.pixelStorageFormat = (*pixelMap)->GetPixelStorageFormat(); mipMapInfo.pixelComponentDataType = (*pixelMap)->GetPixelComponentDataType(); bufferAccess.Write( &mipMapInfo, sizeof( TImageMipMapLevelInfo ), 1 ); } } // Now it is time to add the pixel data itself for ( UInt32 frameNr=0; frameNr<imageInfo.nrOfFramesInImage; ++frameNr ) { mipMapList = &inputImage.GetFrame( frameNr ); for ( UInt32 mipLvl=0; mipLvl<mipMapList->size(); ++mipLvl ) { pixelMap = &(*mipMapList)[ mipLvl ]; bufferAccess.Write( (*pixelMap)->GetDataPtr(), (*pixelMap)->GetTotalSizeInBytes(), 1 ); } } // We have now merged all the image object data into a single buffer using the decoded 'ImageCodec' format // It is time to perform the actual Encode() return Encode( bufferAccess , encodedOutput ); } return false; }
void FilterCopy::process() { memcpy(outputBuffer(), inputBuffer(), imageSize_); }
// render markdown to HTML -- assumes UTF-8 encoding Error markdownToHTML(const std::string& markdownInput, const Extensions& extensions, const HTMLOptions& options, std::string* pHTMLOutput) { std::string input = markdownInput; boost::scoped_ptr<MathFilter> pMathFilter; if (extensions.ignoreMath) pMathFilter.reset(new MathFilter(&input, pHTMLOutput)); // setup input buffer SundownBuffer inputBuffer(input); if (!inputBuffer.allocated()) return allocationError(ERROR_LOCATION); // render table of contents if requested if (options.toc) { struct sd_callbacks htmlCallbacks; struct html_renderopt htmlOptions; ::sdhtml_toc_renderer(&htmlCallbacks, &htmlOptions); std::string tocOutput; Error error = renderMarkdown(inputBuffer, extensions, options.smartypants, &htmlCallbacks, &htmlOptions, &tocOutput); if (error) return error; pHTMLOutput->append("<div id=\"toc\">\n"); pHTMLOutput->append("<div id=\"toc_header\">Table of Contents</div>\n"); pHTMLOutput->append(tocOutput); pHTMLOutput->append("</div>\n"); pHTMLOutput->append("\n"); } // setup html renderer struct sd_callbacks htmlCallbacks; struct html_renderopt htmlOptions; int htmlRenderMode = 0; if (options.useXHTML) htmlRenderMode |= HTML_USE_XHTML; if (options.hardWrap) htmlRenderMode |= HTML_HARD_WRAP; if (options.toc) htmlRenderMode |= HTML_TOC; if (options.safelink) htmlRenderMode |= HTML_SAFELINK; if (options.skipHTML) htmlRenderMode |= HTML_SKIP_HTML; if (options.skipStyle) htmlRenderMode |= HTML_SKIP_STYLE; if (options.skipImages) htmlRenderMode |= HTML_SKIP_IMAGES; if (options.skipLinks) htmlRenderMode |= HTML_SKIP_LINKS; if (options.escape) htmlRenderMode |= HTML_ESCAPE; ::sdhtml_renderer(&htmlCallbacks, &htmlOptions, htmlRenderMode); // render page std::string output; Error error = renderMarkdown(inputBuffer, extensions, options.smartypants, &htmlCallbacks, &htmlOptions, &output); if (error) return error; // append output and return success pHTMLOutput->append(output); return Success(); }
LtcClock::LtcClock(bool masterClock, const string& deviceName) { registerAttributes(); _listener = unique_ptr<Listener>(new Listener()); _listener->setParameters(1, 0, Listener::SAMPLE_FMT_U8, deviceName); if (!_listener) { _listener.reset(); return; } _masterClock = masterClock; _continue = true; Log::get() << Log::MESSAGE << "LtcClock::" << __FUNCTION__ << " - Input clock enabled" << Log::endl; _ltcThread = thread([&]() { LTCDecoder* ltcDecoder = ltc_decoder_create(1920, 32); LTCFrameExt ltcFrame; vector<uint8_t> inputBuffer(256); long int total = 0; while (_continue) { if (!_listener->readFromQueue(inputBuffer)) { this_thread::sleep_for(chrono::milliseconds(5)); continue; } // Check all values to check whether the clock is paused or not bool paused = true; for (auto& v : inputBuffer) { if (v < 126 || v > 129) // This is for noise handling. There is not enough room for a clock in between. { paused = false; break; } } _clock.paused = paused; ltc_decoder_write(ltcDecoder, (ltcsnd_sample_t*)inputBuffer.data(), inputBuffer.size(), total); total += inputBuffer.size(); while (ltc_decoder_read(ltcDecoder, <cFrame)) { _ready = true; SMPTETimecode stime; ltc_frame_to_time(&stime, <cFrame.ltc, LTC_TC_CLOCK); Clock clock; clock.years = stime.years; clock.months = stime.months; clock.days = stime.days; clock.hours = stime.hours; clock.mins = stime.mins; clock.secs = stime.secs; // This updates the maximum frames per second, to be able to handle any framerate if (stime.frame == 0) { // Only accept some specific values if (_previousFrame == 24 || _previousFrame == 25 || _previousFrame == 30 || _previousFrame == 60) { // Small trick to handle errors if (_framerateChanged) { _maximumFramePerSec = _previousFrame + 1; _framerateChanged = false; } else { _framerateChanged = true; } } } _previousFrame = stime.frame; clock.frame = stime.frame * 120 / _maximumFramePerSec; _clock = clock; } if (_masterClock) { Values v; getClock(v); Timer::get().setMasterClock(v); } } ltc_decoder_free(ltcDecoder); }); }
int _tmain( int argc, _TCHAR* argv[] ) { cl_uint userPlatform = 0; cl_uint userDevice = 0; size_t iterations = 0; size_t length = 0; size_t algo = 1; cl_device_type deviceType = CL_DEVICE_TYPE_DEFAULT; bool defaultDevice = true; bool print_clInfo = false; bool systemMemory = false; /****************************************************************************** * Parameter parsing * ******************************************************************************/ try { // Declare the supported options. po::options_description desc( "OpenCL CopyBuffer command line options" ); desc.add_options() ( "help,h", "produces this help message" ) ( "version,v", "Print queryable version information from the Bolt CL library" ) ( "queryOpenCL,q", "Print queryable platform and device info and return" ) ( "gpu,g", "Report only OpenCL GPU devices" ) ( "cpu,c", "Report only OpenCL CPU devices" ) ( "all,a", "Report all OpenCL devices" ) ( "systemMemory,s", "Allocate vectors in system memory, otherwise device memory" ) ( "platform,p", po::value< cl_uint >( &userPlatform )->default_value( 0 ), "Specify the platform under test using the index reported by -q flag" ) ( "device,d", po::value< cl_uint >( &userDevice )->default_value( 0 ), "Specify the device under test using the index reported by the -q flag. " "Index is relative with respect to -g, -c or -a flags" ) ( "length,l", po::value< size_t >( &length )->default_value( 1048576 ), "Specify the length of scan array" ) ( "iterations,i", po::value< size_t >( &iterations )->default_value( 50 ), "Number of samples in timing loop" ) //( "algo,a", po::value< size_t >( &algo )->default_value( 1 ), "Algorithm used [1,2] 1:SCAN_BOLT, 2:XYZ" )//Not used in this file ; po::variables_map vm; po::store( po::parse_command_line( argc, argv, desc ), vm ); po::notify( vm ); if( vm.count( "version" ) ) { cl_uint libMajor, libMinor, libPatch; bolt::cl::getVersion( libMajor, libMinor, libPatch ); const int indent = countOf( "Bolt version: " ); bolt::tout << std::left << std::setw( indent ) << _T( "Bolt version: " ) << libMajor << _T( "." ) << libMinor << _T( "." ) << libPatch << std::endl; } if( vm.count( "help" ) ) { // This needs to be 'cout' as program-options does not support wcout yet std::cout << desc << std::endl; return 0; } if( vm.count( "queryOpenCL" ) ) { print_clInfo = true; } if( vm.count( "gpu" ) ) { deviceType = CL_DEVICE_TYPE_GPU; } if( vm.count( "cpu" ) ) { deviceType = CL_DEVICE_TYPE_CPU; } if( vm.count( "all" ) ) { deviceType = CL_DEVICE_TYPE_ALL; } if( vm.count( "systemMemory" ) ) { systemMemory = true; } } catch( std::exception& e ) { std::cout << _T( "Scan Benchmark error condition reported:" ) << std::endl << e.what() << std::endl; return 1; } /****************************************************************************** * Initialize platforms and devices * * /todo we should move this logic inside of the control class * ******************************************************************************/ // Query OpenCL for available platforms cl_int err = CL_SUCCESS; // Platform vector contains all available platforms on system std::vector< cl::Platform > platforms; bolt::cl::V_OPENCL( cl::Platform::get( &platforms ), "Platform::get() failed" ); if( print_clInfo ) { // /todo: port the printing code from test/scan to control class //std::for_each( platforms.begin( ), platforms.end( ), printPlatformFunctor( 0 ) ); return 0; } // Device info std::vector< cl::Device > devices; bolt::cl::V_OPENCL( platforms.at( userPlatform ).getDevices( deviceType, &devices ), "Platform::getDevices() failed" ); cl::Context myContext( devices.at( userDevice ) ); cl::CommandQueue myQueue( myContext, devices.at( userDevice ) ); // Now that the device we want is selected and we have created our own cl::CommandQueue, set it as the // default cl::CommandQueue for the Bolt API bolt::cl::control::getDefault( ).setCommandQueue( myQueue ); std::string strDeviceName = bolt::cl::control::getDefault( ).getDevice( ).getInfo< CL_DEVICE_NAME >( &err ); bolt::cl::V_OPENCL( err, "Device::getInfo< CL_DEVICE_NAME > failed" ); std::cout << "Device under test : " << strDeviceName << std::endl; /****************************************************************************** * Benchmark logic * ******************************************************************************/ bolt::statTimer& myTimer = bolt::statTimer::getInstance( ); myTimer.Reserve( 1, iterations ); size_t scanId = myTimer.getUniqueID( _T( "copybuffer" ), 0 ); size_t pruned = 0; double scanTime = std::numeric_limits< double >::max( ); double scanGB = ( length * sizeof( int ) ) / (1024.0 * 1024.0 * 1024.0); ::cl::CommandQueue& boltQueue = bolt::cl::control::getDefault( ).getCommandQueue( ); // ::cl::Buffer can not handle buffers of size 0 if( length > 0 ) { if( systemMemory ) { std::vector< int > input( length, 1 ); std::vector< int > output( length ); ::cl::Buffer inputBuffer( bolt::cl::control::getDefault( ).getContext( ), CL_MEM_USE_HOST_PTR|CL_MEM_READ_ONLY, length * sizeof( int ), input.data( ) ); ::cl::Buffer outputBuffer( bolt::cl::control::getDefault( ).getContext( ), CL_MEM_USE_HOST_PTR|CL_MEM_WRITE_ONLY, length * sizeof( int ), output.data( ) ); for( unsigned i = 0; i < iterations; ++i ) { myTimer.Start( scanId ); boltQueue.enqueueCopyBuffer( inputBuffer, outputBuffer, 0, 0, length * sizeof( int ) ); void* tmpPtr = boltQueue.enqueueMapBuffer( outputBuffer, true, CL_MAP_READ, 0, length * sizeof( int ) ); boltQueue.enqueueUnmapMemObject( outputBuffer, tmpPtr ); boltQueue.finish( ); myTimer.Stop( scanId ); } } else { ::cl::Buffer inputBuffer( bolt::cl::control::getDefault( ).getContext( ), CL_MEM_READ_ONLY, length * sizeof( int ) ); ::cl::Buffer outputBuffer( bolt::cl::control::getDefault( ).getContext( ), CL_MEM_WRITE_ONLY, length * sizeof( int ) ); for( unsigned i = 0; i < iterations; ++i ) { myTimer.Start( scanId ); boltQueue.enqueueCopyBuffer( inputBuffer, outputBuffer, 0, 0, length * sizeof( int ) ); boltQueue.finish( ); myTimer.Stop( scanId ); } } // Remove all timings that are outside of 2 stddev (keep 65% of samples); we ignore outliers to get a more consistent result pruned = myTimer.pruneOutliers( 1.0 ); scanTime = myTimer.getAverageTime( scanId ); } else { iterations = 0; } bolt::tout << std::left; bolt::tout << std::setw( colWidth ) << _T( "CopyBuffer profile: " ) << _T( "[" ) << iterations-pruned << _T( "] samples" ) << std::endl; bolt::tout << std::setw( colWidth ) << _T( " Size (GB): " ) << scanGB << std::endl; bolt::tout << std::setw( colWidth ) << _T( " Time (s): " ) << scanTime << std::endl; bolt::tout << std::setw( colWidth ) << _T( " Speed (GB/s): " ) << scanGB / scanTime << std::endl; bolt::tout << std::endl; // bolt::tout << myTimer; return 0; }
void run() { bool doDecode = false; static const int samples = 450*1024; static const int inputBufferSize = samples; static const int sampleSpaceBefore = 256; static const int sampleSpaceAfter = 256; FileStream in = File("captured.zdr", true).openRead(); UInt64 inputFileSizeRemaining = in.size(); Array<Byte> inputBuffer(inputBufferSize); int inputBufferRemaining = 0; Byte* inputPointer = 0; z_stream zs; memset(&zs, 0, sizeof(z_stream)); if (inflateInit(&zs) != Z_OK) throw Exception("inflateInit failed"); Array<Byte> buffer(sampleSpaceBefore + samples + sampleSpaceAfter); Byte* b = &buffer[0] + sampleSpaceBefore; for (int i = 0; i < sampleSpaceBefore; ++i) b[i - sampleSpaceBefore] = 0; for (int i = 0; i < sampleSpaceAfter; ++i) b[i + samples] = 0; int outputBytesRemaining = samples; Vector outputSize; NTSCCaptureDecoder<UInt32> decoder; if (doDecode) outputSize = Vector(960, 240); else outputSize = Vector(1824, 253); Bitmap<UInt32> decoded(outputSize); decoder.setOutputBuffer(decoded); decoded.fill(0); decoder.setInputBuffer(b); decoder.setOutputPixelsPerLine(1140); decoder.setYScale(1); decoder.setDoDecode(doDecode); FileStream outputStream = File("u:\\captured.bin", true).openWrite(); do { if (inputBufferRemaining == 0) { int bytesToRead = inputBufferSize; if (bytesToRead > inputFileSizeRemaining) bytesToRead = inputFileSizeRemaining; inputPointer = &inputBuffer[0]; in.read(inputPointer, bytesToRead); inputBufferRemaining = bytesToRead; inputFileSizeRemaining -= bytesToRead; } zs.avail_in = inputBufferRemaining; zs.next_in = inputPointer; zs.avail_out = outputBytesRemaining; zs.next_out = b + samples - outputBytesRemaining; int r = inflate(&zs, Z_SYNC_FLUSH); if (r != Z_STREAM_END && r != Z_OK) throw Exception("inflate failed"); outputBytesRemaining = zs.avail_out; inputPointer = zs.next_in; inputBufferRemaining = zs.avail_in; if (outputBytesRemaining == 0) { if (inflateReset(&zs) != Z_OK) throw Exception("inflateReset failed"); outputBytesRemaining = samples; console.write("."); if (doDecode) { decoder.decode(); outputStream.write(decoded.data(), decoded.stride()*outputSize.y); } else outputStream.write(b, 1824*253); } } while (inputFileSizeRemaining != 0); if (inflateEnd(&zs) != Z_OK) throw Exception("inflateEnd failed"); }
void run() { bool doDecode = false; static const int samples = 450*1024; static const int inputBufferSize = samples; static const int sampleSpaceBefore = 256; static const int sampleSpaceAfter = 256; FileHandle in = File("captured.zdr", true).openRead(); UInt64 inputFileSizeRemaining = in.size(); Array<Byte> inputBuffer(inputBufferSize); int inputBufferRemaining = 0; Byte* inputPointer = 0; z_stream zs; memset(&zs, 0, sizeof(z_stream)); if (inflateInit(&zs) != Z_OK) throw Exception("inflateInit failed"); Array<Byte> buffer(sampleSpaceBefore + samples + sampleSpaceAfter); Byte* b = &buffer[0] + sampleSpaceBefore; for (int i = 0; i < sampleSpaceBefore; ++i) b[i - sampleSpaceBefore] = 0; for (int i = 0; i < sampleSpaceAfter; ++i) b[i + samples] = 0; int outputBytesRemaining = samples; Vector outputSize; NTSCCaptureDecoder<UInt32> decoder; if (doDecode) outputSize = Vector(1280, 720); else outputSize = Vector(1824, 253); Bitmap<UInt32> decoded(outputSize); if (doDecode) decoder.setOutputBuffer( decoded.subBitmap(Vector(160, 0), Vector(960, 720))); else decoder.setOutputBuffer(decoded); decoded.fill(0); decoder.setInputBuffer(b); decoder.setOutputPixelsPerLine(1140); decoder.setYScale(3); decoder.setDoDecode(doDecode); _handle = fopen("u:\\captured2.avi","wb"); if (!_handle) throw Exception("Can't open file"); _VectorCount = 1; _VectorTable[0].x = _VectorTable[0].y = 0; for (int s = 1; s <= 10; ++s) { for (int y = -s; y <= s; ++y) for (int x = -s; x <= s; ++x) { if (abs(x) == s || abs(y) == s) { _VectorTable[_VectorCount].x = x; _VectorTable[_VectorCount].y = y; ++_VectorCount; } } } memset(&_zstream, 0, sizeof(_zstream)); _pitch = outputSize.x + 2*MAX_VECTOR; if (deflateInit(&_zstream, 4) != Z_OK) throw Exception("deflateInit failed"); _bufSize = 4*outputSize.x*outputSize.y + 2*(1+(outputSize.x/8)) * (1+(outputSize.y/8))+1024; _bufSize += _bufSize / 1000; _buf = malloc(_bufSize); if (!_buf) throw Exception("Out of memory"); _index = (UInt8*)malloc(16*4096); if (!_buf) throw Exception("Out of memory"); _indexsize = 16*4096; _indexused = 8; for (int i = 0; i < AVI_HEADER_SIZE; ++i) fputc(0, _handle); _frames = 0; _written = 0; _audioused = 0; _audiowritten = 0; int blockwidth = 16; int blockheight = 16; _pixelsize = 4; _bufsize = (outputSize.y + 2*MAX_VECTOR)*_pitch*_pixelsize+2048; _buf1.allocate(_bufsize); _buf2.allocate(_bufsize); _work.allocate(_bufsize); int xblocks = (outputSize.x/blockwidth); int xleft = outputSize.x % blockwidth; if (xleft) ++xblocks; int yblocks = (outputSize.y/blockheight); int yleft = outputSize.y % blockheight; if (yleft) ++yblocks; _blockcount = yblocks*xblocks; _blocks = new FrameBlock[_blockcount]; int i = 0; for (int y = 0; y < yblocks; ++y) { for (int x = 0; x < xblocks; ++x) { _blocks[i].start = ((y*blockheight) + MAX_VECTOR)*_pitch+ x*blockwidth + MAX_VECTOR; if (xleft && x == xblocks - 1) { _blocks[i].dx = xleft; } else { _blocks[i].dx = blockwidth; } if (yleft && y == yblocks - 1) { _blocks[i].dy = yleft; } else { _blocks[i].dy = blockheight; } ++i; } } memset(&_buf1[0], 0, _bufsize); memset(&_buf2[0], 0, _bufsize); memset(&_work[0], 0, _bufsize); _oldframe = &_buf1[0]; _newframe = &_buf2[0]; do { if (inputBufferRemaining == 0) { int bytesToRead = inputBufferSize; if (bytesToRead > inputFileSizeRemaining) bytesToRead = inputFileSizeRemaining; inputPointer = &inputBuffer[0]; in.read(inputPointer, bytesToRead); inputBufferRemaining = bytesToRead; inputFileSizeRemaining -= bytesToRead; } zs.avail_in = inputBufferRemaining; zs.next_in = inputPointer; zs.avail_out = outputBytesRemaining; zs.next_out = b + samples - outputBytesRemaining; int r = inflate(&zs, Z_SYNC_FLUSH); if (r != Z_STREAM_END && r != Z_OK) throw Exception("inflate failed"); outputBytesRemaining = zs.avail_out; inputPointer = zs.next_in; inputBufferRemaining = zs.avail_in; if (outputBytesRemaining == 0) { if (inflateReset(&zs) != Z_OK) throw Exception("inflateReset failed"); outputBytesRemaining = samples; console.write("."); decoder.decode(); bool keyFrame = false; if (_frames % 300 == 0) keyFrame = true; keyFrame = true; /* replace oldframe with new frame */ unsigned char* copyFrame = _newframe; _newframe = _oldframe; _oldframe = copyFrame; compress.linesDone = 0; compress.writeSize = _bufSize; compress.writeDone = 1; compress.writeBuf = (unsigned char *)_buf; /* Set a pointer to the first byte which will contain info about this frame */ unsigned char* firstByte = compress.writeBuf; *firstByte = 0; //Reset the work buffer _workUsed = 0; _workPos = 0; if (keyFrame) { /* Make a keyframe */ *firstByte |= Mask_KeyFrame; KeyframeHeader* header = (KeyframeHeader *)(compress.writeBuf + compress.writeDone); header->high_version = 0; // DBZV_VERSION_HIGH; header->low_version = 1; // DBZV_VERSION_LOW; header->compression = 1; // COMPRESSION_ZLIB header->format = 8; // ZMBV_FORMAT_32BPP header->blockwidth = 16; header->blockheight = 16; compress.writeDone += sizeof(KeyframeHeader); /* Copy the new frame directly over */ /* Restart deflate */ deflateReset(&_zstream); } for (int i = 0; i < outputSize.y; ++i) { void* rowPointer = decoded.data() + decoded.stride()*i; unsigned char* destStart = _newframe + _pixelsize*(MAX_VECTOR+(compress.linesDone+MAX_VECTOR)*_pitch); memcpy(destStart, rowPointer, outputSize.x * _pixelsize); destStart += _pitch * _pixelsize; compress.linesDone++; } if ((*compress.writeBuf) & Mask_KeyFrame) { /* Add the full frame data */ unsigned char* readFrame = _newframe + _pixelsize*(MAX_VECTOR+MAX_VECTOR*_pitch); for (int i = 0; i < outputSize.y; ++i) { memcpy(&_work[_workUsed], readFrame, outputSize.x*_pixelsize); readFrame += _pitch*_pixelsize; _workUsed += outputSize.x*_pixelsize; } } else { /* Add the delta frame data */ int written = 0; int lastvector = 0; signed char* vectors = (signed char*)&_work[_workUsed]; /* Align the following xor data on 4 byte boundary*/ _workUsed = (_workUsed + _blockcount*2 + 3) & ~3; int totalx = 0; int totaly = 0; for (int b = 0; b < _blockcount; ++b) { FrameBlock* block = &_blocks[b]; int bestvx = 0; int bestvy = 0; int bestchange = CompareBlock(0, 0, block); int possibles = 64; for (int v = 0; v < _VectorCount && possibles; ++v) { if (bestchange < 4) break; int vx = _VectorTable[v].x; int vy = _VectorTable[v].y; if (PossibleBlock(vx, vy, block) < 4) { --possibles; int testchange = CompareBlock(vx, vy, block); if (testchange < bestchange) { bestchange = testchange; bestvx = vx; bestvy = vy; } } } vectors[b*2+0] = (bestvx << 1); vectors[b*2+1] = (bestvy << 1); if (bestchange) { vectors[b*2+0] |= 1; long* pold=((long*)_oldframe) + block->start + bestvy*_pitch + bestvx; long* pnew=((long*)_newframe) + block->start; for (int y = 0; y < block->dy; ++y) { for (int x = 0; x < block->dx; ++x) { *((long*)&_work[_workUsed]) = pnew[x] ^ pold[x]; _workUsed += sizeof(long); } pold += _pitch; pnew += _pitch; } } } } /* Create the actual frame with compression */ _zstream.next_in = (Bytef *)&_work[0]; _zstream.avail_in = _workUsed; _zstream.total_in = 0; _zstream.next_out = (Bytef *)(compress.writeBuf + compress.writeDone); _zstream.avail_out = compress.writeSize - compress.writeDone; _zstream.total_out = 0; int res = deflate(&_zstream, Z_SYNC_FLUSH); int written = compress.writeDone + _zstream.total_out; CAPTURE_AddAviChunk( "00dc", written, _buf, keyFrame ? 0x10 : 0x0); ++_frames; //void CAPTURE_AddWave(UInt32 freq, UInt32 len, SInt16 * data) //{ // UInt left = WAVE_BUF - _audioused; // if (left > len) // left = len; // memcpy( &_audiobuf[_audioused], data, left*4); // _audioused += left; // _audiorate = freq; //} //if ( capture.video.audioused ) { // CAPTURE_AddAviChunk( "01wb", _audioused * 4, _audiobuf, 0); // _audiowritten = _audioused*4; // _audioused = 0; //} } } while (inputFileSizeRemaining != 0); if (inflateEnd(&zs) != Z_OK) throw Exception("inflateEnd failed"); int main_list; _header_pos = 0; /* Try and write an avi header */ AVIOUT4("RIFF"); // Riff header AVIOUTd(AVI_HEADER_SIZE + _written - 8 + _indexused); AVIOUT4("AVI "); AVIOUT4("LIST"); // List header main_list = _header_pos; AVIOUTd(0); // TODO size of list AVIOUT4("hdrl"); AVIOUT4("avih"); AVIOUTd(56); /* # of bytes to follow */ AVIOUTd((11*912*262*2)/315); /* Microseconds per frame */ // 1752256/105 ~= 16688 AVIOUTd(0); AVIOUTd(0); /* PaddingGranularity (whatever that might be) */ AVIOUTd(0x110); /* Flags,0x10 has index, 0x100 interleaved */ AVIOUTd(_frames); /* TotalFrames */ AVIOUTd(0); /* InitialFrames */ AVIOUTd(2); /* Stream count */ AVIOUTd(0); /* SuggestedBufferSize */ AVIOUTd(outputSize.x); /* Width */ AVIOUTd(outputSize.y); /* Height */ AVIOUTd(0); /* TimeScale: Unit used to measure time */ AVIOUTd(0); /* DataRate: Data rate of playback */ AVIOUTd(0); /* StartTime: Starting time of AVI data */ AVIOUTd(0); /* DataLength: Size of AVI data chunk */ /* Video stream list */ AVIOUT4("LIST"); AVIOUTd(4 + 8 + 56 + 8 + 40); /* Size of the list */ AVIOUT4("strl"); /* video stream header */ AVIOUT4("strh"); AVIOUTd(56); /* # of bytes to follow */ AVIOUT4("vids"); /* Type */ AVIOUT4(CODEC_4CC); /* Handler */ AVIOUTd(0); /* Flags */ AVIOUTd(0); /* Reserved, MS says: wPriority, wLanguage */ AVIOUTd(0); /* InitialFrames */ AVIOUTd(82137); /* Scale */ // 11*912*262 AVIOUTd(4921875); /* Rate: Rate/Scale == samples/second */ // 157500000 AVIOUTd(0); /* Start */ AVIOUTd(_frames); /* Length */ AVIOUTd(0); /* SuggestedBufferSize */ AVIOUTd(~0); /* Quality */ AVIOUTd(0); /* SampleSize */ AVIOUTd(0); /* Frame */ AVIOUTd(0); /* Frame */ /* The video stream format */ AVIOUT4("strf"); AVIOUTd(40); /* # of bytes to follow */ AVIOUTd(40); /* Size */ AVIOUTd(outputSize.x); /* Width */ AVIOUTd(outputSize.y); /* Height */ // OUTSHRT(1); OUTSHRT(24); /* Planes, Count */ AVIOUTd(0); AVIOUT4(CODEC_4CC); /* Compression */ AVIOUTd(outputSize.x*outputSize.y*4); /* SizeImage (in bytes?) */ AVIOUTd(0); /* XPelsPerMeter */ AVIOUTd(0); /* YPelsPerMeter */ AVIOUTd(0); /* ClrUsed: Number of colors used */ AVIOUTd(0); /* ClrImportant: Number of colors important */ /* Audio stream list */ AVIOUT4("LIST"); AVIOUTd(4 + 8 + 56 + 8 + 16); /* Length of list in bytes */ AVIOUT4("strl"); /* The audio stream header */ AVIOUT4("strh"); AVIOUTd(56); /* # of bytes to follow */ AVIOUT4("auds"); AVIOUTd(0); /* Format (Optionally) */ AVIOUTd(0); /* Flags */ AVIOUTd(0); /* Reserved, MS says: wPriority, wLanguage */ AVIOUTd(0); /* InitialFrames */ AVIOUTd(4); /* Scale */ AVIOUTd(_audiorate*4); /* Rate, actual rate is scale/rate */ AVIOUTd(0); /* Start */ if (!_audiorate) _audiorate = 1; AVIOUTd(_audiowritten/4); /* Length */ AVIOUTd(0); /* SuggestedBufferSize */ AVIOUTd(~0); /* Quality */ AVIOUTd(4); /* SampleSize */ AVIOUTd(0); /* Frame */ AVIOUTd(0); /* Frame */ /* The audio stream format */ AVIOUT4("strf"); AVIOUTd(16); /* # of bytes to follow */ AVIOUTw(1); /* Format, WAVE_ZMBV_FORMAT_PCM */ AVIOUTw(2); /* Number of channels */ AVIOUTd(_audiorate); /* SamplesPerSec */ AVIOUTd(_audiorate*4); /* AvgBytesPerSec*/ AVIOUTw(4); /* BlockAlign */ AVIOUTw(16); /* BitsPerSample */ int nmain = _header_pos - main_list - 4; /* Finish stream list, i.e. put number of bytes in the list to proper pos */ int njunk = AVI_HEADER_SIZE - 8 - 12 - _header_pos; AVIOUT4("JUNK"); AVIOUTd(njunk); /* Fix the size of the main list */ _header_pos = main_list; AVIOUTd(nmain); _header_pos = AVI_HEADER_SIZE - 12; AVIOUT4("LIST"); AVIOUTd(_written + 4); /* Length of list in bytes */ AVIOUT4("movi"); /* First add the index table to the end */ memcpy(_index, "idx1", 4); host_writed(_index+4, _indexused - 8 ); fwrite(_index, 1, _indexused, _handle); fseek(_handle, 0, SEEK_SET); fwrite(&_avi_header, 1, AVI_HEADER_SIZE, _handle); fclose(_handle); free(_index); free(_buf); _handle = 0; }
// render markdown to HTML -- assumes UTF-8 encoding Error markdownToHTML(const std::string& markdownInput, const Extensions& extensions, const HTMLOptions& options, std::string* pHTMLOutput) { // exclude fenced code blocks std::vector<ExcludePattern> excludePatterns; excludePatterns.push_back(ExcludePattern(boost::regex("^`{3,}[^\\n]*?$"), boost::regex("^`{3,}\\s*$"))); // exclude inline verbatim code excludePatterns.push_back(ExcludePattern(boost::regex("`[^\\n]+?`"))); // exclude indented code blocks excludePatterns.push_back(ExcludePattern( boost::regex("(\\A|\\A\\s*\\n|\\n\\s*\\n)(( {4}|\\t)[^\\n]*\\n)*(( {4}|\\t)[^\\n]*)"))); std::string input = markdownInput; boost::scoped_ptr<MathJaxFilter> pMathFilter; if (extensions.ignoreMath) { pMathFilter.reset(new MathJaxFilter(excludePatterns, &input, pHTMLOutput)); } // setup input buffer SundownBuffer inputBuffer(input); if (!inputBuffer.allocated()) return allocationError(ERROR_LOCATION); // render table of contents if requested if (options.toc) { struct sd_callbacks htmlCallbacks; struct html_renderopt htmlOptions; ::sdhtml_toc_renderer(&htmlCallbacks, &htmlOptions); std::string tocOutput; Error error = renderMarkdown(inputBuffer, extensions, options.smartypants, &htmlCallbacks, &htmlOptions, &tocOutput); if (error) return error; pHTMLOutput->append("<div id=\"toc\">\n"); pHTMLOutput->append("<div id=\"toc_header\">Table of Contents</div>\n"); pHTMLOutput->append(tocOutput); pHTMLOutput->append("</div>\n"); pHTMLOutput->append("\n"); } // setup html renderer struct sd_callbacks htmlCallbacks; struct html_renderopt htmlOptions; int htmlRenderMode = 0; if (options.useXHTML) htmlRenderMode |= HTML_USE_XHTML; if (options.hardWrap) htmlRenderMode |= HTML_HARD_WRAP; if (options.toc) htmlRenderMode |= HTML_TOC; if (options.safelink) htmlRenderMode |= HTML_SAFELINK; if (options.skipHTML) htmlRenderMode |= HTML_SKIP_HTML; if (options.skipStyle) htmlRenderMode |= HTML_SKIP_STYLE; if (options.skipImages) htmlRenderMode |= HTML_SKIP_IMAGES; if (options.skipLinks) htmlRenderMode |= HTML_SKIP_LINKS; if (options.escape) htmlRenderMode |= HTML_ESCAPE; ::sdhtml_renderer(&htmlCallbacks, &htmlOptions, htmlRenderMode); // render page std::string output; Error error = renderMarkdown(inputBuffer, extensions, options.smartypants, &htmlCallbacks, &htmlOptions, &output); if (error) return error; // append output and return success pHTMLOutput->append(output); return Success(); }