void Ruleset::loadDb() { if(mBeforeReplacementsPtr) delete mBeforeReplacementsPtr; if(mAfterReplacementsPtr) delete mAfterReplacementsPtr; mBeforeReplacementsPtr = new Replacements(mDb, "rulesetBefore", MAGIC_OWNER_ID); mAfterReplacementsPtr = new Replacements(mDb, "rulesetAfter", MAGIC_OWNER_ID); for(map<sqlite_int64, OutputFormat*>::iterator it = mChildren.begin(); it != mChildren.end(); it++) { delete it->second; } mChildren.clear(); //get outputFormats vector<string> rowids; stringstream strSql; strSql << "SELECT rowid FROM outputFormats"; exec(strSql, mDb, onAppendFirstColumnToVector, &rowids); for (vector<string>::iterator it = rowids.begin(); it != rowids.end(); it++) { sqlite_int64 rowid = cSqlInFormated<sqlite_int64>(*it); OutputFormat* newFormatPtr = new OutputFormat(mDb, rowid); mChildren[newFormatPtr->getRowId()] = newFormatPtr; } }
Codec findEncodingCodec(const OutputFormat &format, bool isVideo) { if (isVideo) return Codec { avcodec_find_encoder(format.defaultVideoCodecId()) }; else return Codec { avcodec_find_encoder(format.defaultAudioCodecId()) }; }
void Ruleset::removeOutputFormat(sqlite_int64 outputFormatId) { OutputFormat* outputFormat = mChildren[outputFormatId]; vector<InputRule*> rules = outputFormat->getInputRules(); for (vector<InputRule*>::iterator it = rules.begin(); it != rules.end(); it++) { outputFormat->removeInputRule((*it)->getRowId()); } stringstream strSql; strSql << "DELETE FROM outputFormats WHERE rowid = " << cSqlOutFormated(outputFormatId); exec(strSql, mDb); mChildren.erase(outputFormatId); delete outputFormat; return; }
void FormatContext::setFormat(const OutputFormat &format) { if (isOpened()) { cerr << "Can't set format for opened container\n"; return; } m_raw->oformat = const_cast<AVOutputFormat*>(format.raw()); m_raw->iformat = nullptr; }
void FormatContext::openCustomIOOutput(CustomIO *io, size_t internalBufferSize, OptionalErrorCode ec) { if (isOpened()) { throws_if(ec, Errors::FormatAlreadyOpened); return; } if (m_raw) { OutputFormat format = outputFormat(); if (format.isNull()) { fflog(AV_LOG_ERROR, "You must set output format for use with custom IO\n"); throws_if(ec, Errors::FormatNullOutputFormat); return; } } openCustomIO(io, internalBufferSize, true, ec); }
void FormatContext::close() { if (!m_raw) return; if (isOpened()) { closeCodecContexts(); AVIOContext *avio = m_raw->pb; if (isOutput()) { OutputFormat fmt = outputFormat(); if (!(fmt.flags() & AVFMT_NOFILE) && !(m_raw->flags & AVFMT_FLAG_CUSTOM_IO)) { avio_close(m_raw->pb); } avformat_free_context(m_raw); } else { avformat_close_input(&m_raw); } m_raw = avformat_alloc_context(); m_monitor.reset(new char); m_isOpened = false; m_streamsInfoFound = false; m_headerWriten = false; // To prevent free not out custom IO, e.g. setted via raw pointer access if (m_customIO) { // Close custom IO av_freep(&avio->buffer); av_freep(&avio); m_customIO = false; } } }
int main(int argc, char **argv) { if (argc < 3) return 1; av::init(); av::setFFmpegLoggingLevel(AV_LOG_DEBUG); string uri {argv[1]}; string out {argv[2]}; error_code ec; // // INPUT // FormatContext ictx; ssize_t videoStream = -1; VideoDecoderContext vdec; Stream vst; int count = 0; ictx.openInput(uri, ec); if (ec) { cerr << "Can't open input\n"; return 1; } ictx.findStreamInfo(ec); if (ec) { cerr << "Can't find streams: " << ec << ", " << ec.message() << endl; return 1; } for (size_t i = 0; i < ictx.streamsCount(); ++i) { auto st = ictx.stream(i); if (st.mediaType() == AVMEDIA_TYPE_VIDEO) { videoStream = i; vst = st; break; } } if (vst.isNull()) { cerr << "Video stream not found\n"; return 1; } if (vst.isValid()) { vdec = VideoDecoderContext(vst); vdec.setRefCountedFrames(true); cerr << "PTR: " << (void*)vdec.raw()->codec << endl; vdec.open(Codec(), ec); if (ec) { cerr << "Can't open decoder\n"; return 1; } } // // OUTPUT // OutputFormat ofrmt; FormatContext octx; ofrmt.setFormat("flv", out); octx.setFormat(ofrmt); Codec ocodec = findEncodingCodec(ofrmt); Stream ost = octx.addStream(ocodec); VideoEncoderContext encoder {ost}; // Settings encoder.setWidth(vdec.width() * 2); encoder.setHeight(vdec.height() * 2); encoder.setPixelFormat(vdec.pixelFormat()); encoder.setTimeBase(Rational{1, 1000}); encoder.setBitRate(vdec.bitRate()); encoder.addFlags(octx.outputFormat().isFlags(AVFMT_GLOBALHEADER) ? CODEC_FLAG_GLOBAL_HEADER : 0); ost.setFrameRate(vst.frameRate()); ost.setTimeBase(encoder.timeBase()); octx.openOutput(out, ec); if (ec) { cerr << "Can't open output\n"; return 1; } encoder.open(ec); if (ec) { cerr << "Can't opent encoder\n"; return 1; } octx.dump(); octx.writeHeader(); octx.flush(); // // RESCALER // VideoRescaler rescaler; // Rescaler will be inited on demaind // // PROCESS // while (true) { // READING Packet pkt = ictx.readPacket(ec); if (ec) { clog << "Packet reading error: " << ec << ", " << ec.message() << endl; break; } // EOF if (!pkt) { break; } if (pkt.streamIndex() != videoStream) { continue; } clog << "Read packet: pts=" << pkt.pts() << ", dts=" << pkt.dts() << " / " << pkt.pts().seconds() << " / " << pkt.timeBase() << " / st: " << pkt.streamIndex() << endl; // DECODING auto inpFrame = vdec.decode(pkt, ec); count++; if (count > 200) break; if (ec) { cerr << "Decoding error: " << ec << endl; return 1; } else if (!inpFrame) { cerr << "Empty frame\n"; continue; } clog << "inpFrame: pts=" << inpFrame.pts() << " / " << inpFrame.pts().seconds() << " / " << inpFrame.timeBase() << ", " << inpFrame.width() << "x" << inpFrame.height() << ", size=" << inpFrame.size() << ", ref=" << inpFrame.isReferenced() << ":" << inpFrame.refCount() << " / type: " << inpFrame.pictureType() << endl; // Change timebase inpFrame.setTimeBase(encoder.timeBase()); inpFrame.setStreamIndex(0); inpFrame.setPictureType(); clog << "inpFrame: pts=" << inpFrame.pts() << " / " << inpFrame.pts().seconds() << " / " << inpFrame.timeBase() << ", " << inpFrame.width() << "x" << inpFrame.height() << ", size=" << inpFrame.size() << ", ref=" << inpFrame.isReferenced() << ":" << inpFrame.refCount() << " / type: " << inpFrame.pictureType() << endl; // SCALE //VideoFrame outFrame {encoder.pixelFormat(), encoder.width(), encoder.height()}; //rescaler.rescale(outFrame, inpFrame, ec); auto outFrame = rescaler.rescale(inpFrame, ec); if (ec) { cerr << "Can't rescale frame: " << ec << ", " << ec.message() << endl; return 1; } clog << "outFrame: pts=" << outFrame.pts() << " / " << outFrame.pts().seconds() << " / " << outFrame.timeBase() << ", " << outFrame.width() << "x" << outFrame.height() << ", size=" << outFrame.size() << ", ref=" << outFrame.isReferenced() << ":" << outFrame.refCount() << " / type: " << outFrame.pictureType() << endl; // ENCODE Packet opkt = encoder.encode(outFrame, ec); if (ec) { cerr << "Encoding error: " << ec << endl; return 1; } else if (!opkt) { cerr << "Empty packet\n"; continue; } // Only one output stream opkt.setStreamIndex(0); clog << "Write packet: pts=" << opkt.pts() << ", dts=" << opkt.dts() << " / " << opkt.pts().seconds() << " / " << opkt.timeBase() << " / st: " << opkt.streamIndex() << endl; octx.writePacket(opkt, ec); if (ec) { cerr << "Error write packet: " << ec << ", " << ec.message() << endl; return 1; } } octx.writeTrailer(); ictx.close(); }
int main(int argc, char **argv) { if (argc < 3) return 1; av::init(); av::setFFmpegLoggingLevel(AV_LOG_TRACE); string uri (argv[1]); string out (argv[2]); ssize_t audioStream = -1; AudioDecoderContext adec; Stream ast; error_code ec; int count = 0; { // // INPUT // FormatContext ictx; ictx.openInput(uri, ec); if (ec) { cerr << "Can't open input\n"; return 1; } ictx.findStreamInfo(); for (size_t i = 0; i < ictx.streamsCount(); ++i) { auto st = ictx.stream(i); if (st.isAudio()) { audioStream = i; ast = st; break; } } cerr << audioStream << endl; if (ast.isNull()) { cerr << "Audio stream not found\n"; return 1; } if (ast.isValid()) { adec = AudioDecoderContext(ast); //Codec codec = findDecodingCodec(adec.raw()->codec_id); //adec.setCodec(codec); //adec.setRefCountedFrames(true); adec.open(ec); if (ec) { cerr << "Can't open codec\n"; return 1; } } // // OUTPUT // OutputFormat ofmt; FormatContext octx; ofmt = av::guessOutputFormat(out, out); clog << "Output format: " << ofmt.name() << " / " << ofmt.longName() << '\n'; octx.setFormat(ofmt); Codec ocodec = av::findEncodingCodec(ofmt, false); Stream ost = octx.addStream(ocodec); AudioEncoderContext enc (ost); clog << ocodec.name() << " / " << ocodec.longName() << ", audio: " << (ocodec.type()==AVMEDIA_TYPE_AUDIO) << '\n'; auto sampleFmts = ocodec.supportedSampleFormats(); auto sampleRates = ocodec.supportedSamplerates(); auto layouts = ocodec.supportedChannelLayouts(); clog << "Supported sample formats:\n"; for (const auto &fmt : sampleFmts) { clog << " " << av_get_sample_fmt_name(fmt) << '\n'; } clog << "Supported sample rates:\n"; for (const auto &rate : sampleRates) { clog << " " << rate << '\n'; } clog << "Supported sample layouts:\n"; for (const auto &lay : layouts) { char buf[128] = {0}; av_get_channel_layout_string(buf, sizeof(buf), av_get_channel_layout_nb_channels(lay), lay); clog << " " << buf << '\n'; } //return 0; // Settings #if 1 enc.setSampleRate(48000); enc.setSampleFormat(sampleFmts[0]); // Layout //enc.setChannelLayout(adec.channelLayout()); enc.setChannelLayout(AV_CH_LAYOUT_STEREO); //enc.setChannelLayout(AV_CH_LAYOUT_MONO); enc.setTimeBase(Rational(1, enc.sampleRate())); enc.setBitRate(adec.bitRate()); #else enc.setSampleRate(adec.sampleRate()); enc.setSampleFormat(adec.sampleFormat()); enc.setChannelLayout(adec.channelLayout()); enc.setTimeBase(adec.timeBase()); enc.setBitRate(adec.bitRate()); #endif octx.openOutput(out, ec); if (ec) { cerr << "Can't open output\n"; return 1; } enc.open(ec); if (ec) { cerr << "Can't open encoder\n"; return 1; } clog << "Encoder frame size: " << enc.frameSize() << '\n'; octx.dump(); octx.writeHeader(); octx.flush(); // // RESAMPLER // AudioResampler resampler(enc.channelLayout(), enc.sampleRate(), enc.sampleFormat(), adec.channelLayout(), adec.sampleRate(), adec.sampleFormat()); // // PROCESS // while (true) { Packet pkt = ictx.readPacket(ec); if (ec) { clog << "Packet reading error: " << ec << ", " << ec.message() << endl; break; } if (pkt.streamIndex() != audioStream) { continue; } clog << "Read packet: isNull=" << (bool)!pkt << ", " << pkt.pts() << "(nopts:" << pkt.pts().isNoPts() << ")" << " / " << pkt.pts().seconds() << " / " << pkt.timeBase() << " / st: " << pkt.streamIndex() << endl; #if 0 if (pkt.pts() == av::NoPts && pkt.timeBase() == Rational()) { clog << "Skip invalid timestamp packet: data=" << (void*)pkt.data() << ", size=" << pkt.size() << ", flags=" << pkt.flags() << " (corrupt:" << (pkt.flags() & AV_PKT_FLAG_CORRUPT) << ";key:" << (pkt.flags() & AV_PKT_FLAG_KEY) << ")" << ", side_data=" << (void*)pkt.raw()->side_data << ", side_data_count=" << pkt.raw()->side_data_elems << endl; //continue; } #endif auto samples = adec.decode(pkt, ec); count++; //if (count > 200) // break; if (ec) { cerr << "Decode error: " << ec << ", " << ec.message() << endl; return 1; } else if (!samples) { cerr << "Empty samples set\n"; //if (!pkt) // decoder flushed here // break; //continue; } clog << " Samples [in]: " << samples.samplesCount() << ", ch: " << samples.channelsCount() << ", freq: " << samples.sampleRate() << ", name: " << samples.channelsLayoutString() << ", pts: " << samples.pts().seconds() << ", ref=" << samples.isReferenced() << ":" << samples.refCount() << endl; // Empty samples set should not be pushed to the resampler, but it is valid case for the // end of reading: during samples empty, some cached data can be stored at the resampler // internal buffer, so we should consume it. if (samples) { resampler.push(samples, ec); if (ec) { clog << "Resampler push error: " << ec << ", text: " << ec.message() << endl; continue; } } // Pop resampler data bool getAll = !samples; while (true) { AudioSamples ouSamples(enc.sampleFormat(), enc.frameSize(), enc.channelLayout(), enc.sampleRate()); // Resample: bool hasFrame = resampler.pop(ouSamples, getAll, ec); if (ec) { clog << "Resampling status: " << ec << ", text: " << ec.message() << endl; break; } else if (!hasFrame) { break; } else clog << " Samples [ou]: " << ouSamples.samplesCount() << ", ch: " << ouSamples.channelsCount() << ", freq: " << ouSamples.sampleRate() << ", name: " << ouSamples.channelsLayoutString() << ", pts: " << ouSamples.pts().seconds() << ", ref=" << ouSamples.isReferenced() << ":" << ouSamples.refCount() << endl; // ENCODE ouSamples.setStreamIndex(0); ouSamples.setTimeBase(enc.timeBase()); Packet opkt = enc.encode(ouSamples, ec); if (ec) { cerr << "Encoding error: " << ec << ", " << ec.message() << endl; return 1; } else if (!opkt) { //cerr << "Empty packet\n"; continue; } opkt.setStreamIndex(0); clog << "Write packet: pts=" << opkt.pts() << ", dts=" << opkt.dts() << " / " << opkt.pts().seconds() << " / " << opkt.timeBase() << " / st: " << opkt.streamIndex() << endl; octx.writePacket(opkt, ec); if (ec) { cerr << "Error write packet: " << ec << ", " << ec.message() << endl; return 1; } } // For the first packets samples can be empty: decoder caching if (!pkt && !samples) break; } // // Is resampler flushed? // cerr << "Delay: " << resampler.delay() << endl; // // Flush encoder queue // clog << "Flush encoder:\n"; while (true) { AudioSamples null(nullptr); Packet opkt = enc.encode(null, ec); if (ec || !opkt) break; opkt.setStreamIndex(0); clog << "Write packet: pts=" << opkt.pts() << ", dts=" << opkt.dts() << " / " << opkt.pts().seconds() << " / " << opkt.timeBase() << " / st: " << opkt.streamIndex() << endl; octx.writePacket(opkt, ec); if (ec) { cerr << "Error write packet: " << ec << ", " << ec.message() << endl; return 1; } } octx.flush(); octx.writeTrailer(); } }
void FormatContext::openOutput(const string &uri, OutputFormat format, AVDictionary **options, OptionalErrorCode ec) { clear_if(ec); if (!m_raw) { throws_if(ec, Errors::Unallocated); return; } if (isOpened()) { throws_if(ec, Errors::FormatAlreadyOpened); return; } if (format.isNull()) format = outputFormat(); else setFormat(format); if (format.isNull()) { // Guess format format = guessOutputFormat(string(), uri); if (format.isNull()) { fflog(AV_LOG_ERROR, "Can't guess output format"); throws_if(ec, Errors::FormatNullOutputFormat); return; } setFormat(format); } // Fix stream flags #if !USE_CODECPAR FF_DISABLE_DEPRECATION_WARNINGS for (size_t i = 0; i < streamsCount(); ++i) { auto st = stream(i); if (st.raw()->codec) { if (outputFormat().isFlags(AVFMT_GLOBALHEADER)) { st.raw()->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; } } } FF_ENABLE_DEPRECATION_WARNINGS #endif resetSocketAccess(); if (!(format.flags() & AVFMT_NOFILE)) { int sts = avio_open2(&m_raw->pb, uri.c_str(), AVIO_FLAG_WRITE, nullptr, options); if (sts < 0) { throws_if(ec, sts, ffmpeg_category()); return; } } m_uri = uri; m_isOpened = true; }
OutputFormat& Ruleset::addOutputFormat() { OutputFormat* formatPtr = new OutputFormat (mDb); mChildren[formatPtr->getRowId()] = formatPtr; return *formatPtr; }
Codec guessEncodingCodec(OutputFormat format, const char *name, const char *url, const char *mime, AVMediaType mediaType) { auto id = av_guess_codec(format.raw(), name, url, mime, mediaType); return findEncodingCodec(id); }
/** This function reads a token, calls tokenType(...) to parse it to an integer, and then acts appropriately, calling the individual class methods to read a certain object. */ void Input::read() { char token[64]; OutputFormat* outList = outListHead; while (input != NULL) { while (!(*input).eof()) { token[0] = '\0'; clearIncludeComment(); *input >> token; if (strlen(token)>0) { debug(1,"Token %s = %d",token,tokenType(token)); switch (tokenType(token)) { case INTOK_GEOM: *input >> token; debug(1,"Creating new geometry object: %s.", token); inGeom = new Geometry(token); memCheck(inGeom,"Input::Input() constructor: inGeom"); break; case INTOK_MIX: debug(1,"Creating new Mixture object."); mixList = mixList->getMixture(*input); break; case INTOK_FLUX: debug(1,"Creating new Flux object."); fluxList = fluxList->getFlux(*input); break; case INTOK_PULSE: debug(1,"Creating new History object."); historyList = historyList->getHistory(*input); break; case INTOK_SCHED: debug(1,"Creating new Schedule object."); schedList = schedList->getSchedule(*input); break; case INTOK_DIM: debug(1,"Creating new Dimension object."); dimList = dimList->getDimension(*input); break; case INTOK_MINR: debug(1,"Reading Minor Radius."); *input >> token; inGeom->setMinorR(atof(token)); verbose(2,"Set torus minor radius = %g",atof(token)); break; case INTOK_MAJR: debug(1,"Reading Major Radius."); *input >> token; inGeom->setMajorR(atof(token)); verbose(2,"Set torus major radius = %g",atof(token)); break; case INTOK_COOL: debug(1,"Creating new CoolingTime object."); coolList->getCoolingTimes(*input); break; case INTOK_MAT: debug(1,"Creating new Loading object."); loadList->getMatLoading(*input); break; case INTOK_VOL: debug(1,"Reading Volume List."); volList->getVolumes(*input); break; case INTOK_MATLIB: debug(1,"Opening material library"); Component::getMatLib(*input); break; case INTOK_ELELIB: debug(1,"Opening element library"); Component::getEleLib(*input); break; case INTOK_DATALIB: debug(1,"Opening data library"); NuclearData::getDataLib(*input); break; case INTOK_LIBCONV: debug(1,"Converting data library"); DataLib::convertLib(*input); verbose(1,"Exiting after library conversion."); exit(0); case INTOK_TRUNC: debug(1,"Reading Truncation criteria."); Chain::getTruncInfo(*input); break; case INTOK_IGNORE: debug(1,"Reading relative ignore criteria."); Chain::getIgnoreInfo(*input); break; case INTOK_IMPURITY: debug(1,"Reading Impurity definition and truncation criteria."); Chain::getImpTruncInfo(*input); break; case INTOK_NORM: debug(1,"Reading interval normalizations."); normList->getNorms(*input); break; case INTOK_OUTPUT: debug(1,"Reading output formatting."); outList = outList->getOutFmts(*input); break; case INTOK_DUMPFILE: debug(1,"Openning dump filename."); *input >> token; Result::initBinDump(token); break; case INTOK_SOLVELIST: solveList->getSolveList(*input); break; case INTOK_SKIPLIST: skipList->getSolveList(*input); break; case INTOK_REFFLUX: *input >> token; VolFlux::setRefFluxType(tolower(token[0])); break; // case INTOK_CPLIBS: // int num; // *input >> num; // VolFlux::setNumCP(num); // *input >> num; // VolFlux::setNumCPEG(num); // Volume::loadRangeLib(input); // Volume::loadSpecLib(input); // break; default: error(100,"Invalid token in input file: %s",token); } } } /* delete current stream */ if (input != &cin) { debug(1,"Deleting current stream."); delete input; } debug(1,"Popping next stream."); /* end of this input stream */ streamStack >> input; }