bool CameraDevice::getDefaultInputFormat() { QMutexLocker locker(&iformatLock); if (iformat) return true; avdevice_register_all(); // Desktop capture input formats #ifdef Q_OS_LINUX idesktopFormat = av_find_input_format("x11grab"); #endif #ifdef Q_OS_WIN idesktopFormat = av_find_input_format("gdigrab"); #endif // Webcam input formats #ifdef Q_OS_LINUX if ((iformat = av_find_input_format("v4l2"))) return true; #endif #ifdef Q_OS_WIN if ((iformat = av_find_input_format("dshow"))) return true; if ((iformat = av_find_input_format("vfwcap"))) #endif #ifdef Q_OS_OSX if ((iformat = av_find_input_format("avfoundation"))) return true; if ((iformat = av_find_input_format("qtkit"))) return true; #endif qWarning() << "No valid input format found"; return false; }
int avformat_OpenDemux( vlc_object_t *p_this ) { demux_t *p_demux = (demux_t*)p_this; demux_sys_t *p_sys; AVInputFormat *fmt = NULL; vlc_tick_t i_start_time = VLC_TICK_INVALID; bool b_can_seek; const char *psz_url; int error; if( p_demux->psz_filepath ) psz_url = p_demux->psz_filepath; else psz_url = p_demux->psz_url; if( avformat_ProbeDemux( p_this, &fmt, psz_url ) != VLC_SUCCESS ) return VLC_EGENERIC; vlc_stream_Control( p_demux->s, STREAM_CAN_SEEK, &b_can_seek ); /* Fill p_demux fields */ p_demux->pf_demux = Demux; p_demux->pf_control = Control; p_demux->p_sys = p_sys = malloc( sizeof( demux_sys_t ) ); if( !p_sys ) return VLC_ENOMEM; p_sys->ic = 0; p_sys->fmt = fmt; p_sys->tracks = NULL; p_sys->i_ssa_order = 0; TAB_INIT( p_sys->i_attachments, p_sys->attachments); p_sys->p_title = NULL; p_sys->i_seekpoint = 0; p_sys->i_update = 0; /* Create I/O wrapper */ unsigned char * p_io_buffer = av_malloc( AVFORMAT_IOBUFFER_SIZE ); if( !p_io_buffer ) { avformat_CloseDemux( p_this ); return VLC_ENOMEM; } p_sys->ic = avformat_alloc_context(); if( !p_sys->ic ) { av_free( p_io_buffer ); avformat_CloseDemux( p_this ); return VLC_ENOMEM; } AVIOContext *pb = p_sys->ic->pb = avio_alloc_context( p_io_buffer, AVFORMAT_IOBUFFER_SIZE, 0, p_demux, IORead, NULL, IOSeek ); if( !pb ) { av_free( p_io_buffer ); avformat_CloseDemux( p_this ); return VLC_ENOMEM; } p_sys->ic->pb->seekable = b_can_seek ? AVIO_SEEKABLE_NORMAL : 0; error = avformat_open_input(&p_sys->ic, psz_url, p_sys->fmt, NULL); if( error < 0 ) { msg_Err( p_demux, "Could not open %s: %s", psz_url, vlc_strerror_c(AVUNERROR(error)) ); av_free( pb->buffer ); av_free( pb ); p_sys->ic = NULL; avformat_CloseDemux( p_this ); return VLC_EGENERIC; } char *psz_opts = var_InheritString( p_demux, "avformat-options" ); unsigned nb_streams = p_sys->ic->nb_streams; AVDictionary *options[nb_streams ? nb_streams : 1]; options[0] = NULL; for (unsigned i = 1; i < nb_streams; i++) options[i] = NULL; if (psz_opts) { vlc_av_get_options(psz_opts, &options[0]); for (unsigned i = 1; i < nb_streams; i++) { av_dict_copy(&options[i], options[0], 0); } free(psz_opts); } vlc_avcodec_lock(); /* avformat calls avcodec behind our back!!! */ error = avformat_find_stream_info( p_sys->ic, options ); vlc_avcodec_unlock(); AVDictionaryEntry *t = NULL; while ((t = av_dict_get(options[0], "", t, AV_DICT_IGNORE_SUFFIX))) { msg_Err( p_demux, "Unknown option \"%s\"", t->key ); } av_dict_free(&options[0]); for (unsigned i = 1; i < nb_streams; i++) { av_dict_free(&options[i]); } nb_streams = p_sys->ic->nb_streams; /* it may have changed */ if( !nb_streams ) { msg_Err( p_demux, "No streams found"); avformat_CloseDemux( p_this ); return VLC_EGENERIC; } p_sys->tracks = calloc( nb_streams, sizeof(*p_sys->tracks) ); if( !p_sys->tracks ) { avformat_CloseDemux( p_this ); return VLC_ENOMEM; } p_sys->i_tracks = nb_streams; if( error < 0 ) { msg_Warn( p_demux, "Could not find stream info: %s", vlc_strerror_c(AVUNERROR(error)) ); } for( unsigned i = 0; i < nb_streams; i++ ) { struct avformat_track_s *p_track = &p_sys->tracks[i]; AVStream *s = p_sys->ic->streams[i]; const AVCodecParameters *cp = s->codecpar; es_format_t es_fmt; const char *psz_type = "unknown"; /* Do not use the cover art as a stream */ if( s->disposition == AV_DISPOSITION_ATTACHED_PIC ) continue; vlc_fourcc_t fcc = GetVlcFourcc( cp->codec_id ); switch( cp->codec_type ) { case AVMEDIA_TYPE_AUDIO: es_format_Init( &es_fmt, AUDIO_ES, fcc ); es_fmt.i_original_fourcc = CodecTagToFourcc( cp->codec_tag ); es_fmt.i_bitrate = cp->bit_rate; es_fmt.audio.i_channels = cp->channels; es_fmt.audio.i_rate = cp->sample_rate; es_fmt.audio.i_bitspersample = cp->bits_per_coded_sample; es_fmt.audio.i_blockalign = cp->block_align; psz_type = "audio"; if(cp->codec_id == AV_CODEC_ID_AAC_LATM) { es_fmt.i_original_fourcc = VLC_FOURCC('L','A','T','M'); es_fmt.b_packetized = false; } else if(cp->codec_id == AV_CODEC_ID_AAC && p_sys->fmt->long_name && strstr(p_sys->fmt->long_name, "raw ADTS AAC")) { es_fmt.i_original_fourcc = VLC_FOURCC('A','D','T','S'); es_fmt.b_packetized = false; } break; case AVMEDIA_TYPE_VIDEO: es_format_Init( &es_fmt, VIDEO_ES, fcc ); es_fmt.i_original_fourcc = CodecTagToFourcc( cp->codec_tag ); es_fmt.video.i_bits_per_pixel = cp->bits_per_coded_sample; /* Special case for raw video data */ if( cp->codec_id == AV_CODEC_ID_RAWVIDEO ) { msg_Dbg( p_demux, "raw video, pixel format: %i", cp->format ); if( GetVlcChroma( &es_fmt.video, cp->format ) != VLC_SUCCESS) { msg_Err( p_demux, "was unable to find a FourCC match for raw video" ); } else es_fmt.i_codec = es_fmt.video.i_chroma; } /* We need this for the h264 packetizer */ else if( cp->codec_id == AV_CODEC_ID_H264 && ( p_sys->fmt == av_find_input_format("flv") || p_sys->fmt == av_find_input_format("matroska") || p_sys->fmt == av_find_input_format("mp4") ) ) es_fmt.i_original_fourcc = VLC_FOURCC( 'a', 'v', 'c', '1' ); es_fmt.video.i_width = cp->width; es_fmt.video.i_height = cp->height; es_fmt.video.i_visible_width = es_fmt.video.i_width; es_fmt.video.i_visible_height = es_fmt.video.i_height; get_rotation(&es_fmt, s); # warning FIXME: implement palette transmission psz_type = "video"; AVRational rate; #if (LIBAVUTIL_VERSION_MICRO < 100) /* libav */ # if (LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(55, 20, 0)) rate.num = s->time_base.num; rate.den = s->time_base.den; # else rate.num = s->codec->time_base.num; rate.den = s->codec->time_base.den; # endif rate.den *= __MAX( s->codec->ticks_per_frame, 1 ); #else /* ffmpeg */ rate = av_guess_frame_rate( p_sys->ic, s, NULL ); #endif if( rate.den && rate.num ) { es_fmt.video.i_frame_rate = rate.num; es_fmt.video.i_frame_rate_base = rate.den; } AVRational ar; #if (LIBAVUTIL_VERSION_MICRO < 100) /* libav */ ar.num = s->sample_aspect_ratio.num; ar.den = s->sample_aspect_ratio.den; #else ar = av_guess_sample_aspect_ratio( p_sys->ic, s, NULL ); #endif if( ar.num && ar.den ) { es_fmt.video.i_sar_den = ar.den; es_fmt.video.i_sar_num = ar.num; } break; case AVMEDIA_TYPE_SUBTITLE: es_format_Init( &es_fmt, SPU_ES, fcc ); es_fmt.i_original_fourcc = CodecTagToFourcc( cp->codec_tag ); if( strncmp( p_sys->ic->iformat->name, "matroska", 8 ) == 0 && cp->codec_id == AV_CODEC_ID_DVD_SUBTITLE && cp->extradata != NULL && cp->extradata_size > 0 ) { char *psz_start; char *psz_buf = malloc( cp->extradata_size + 1); if( psz_buf != NULL ) { memcpy( psz_buf, cp->extradata , cp->extradata_size ); psz_buf[cp->extradata_size] = '\0'; psz_start = strstr( psz_buf, "size:" ); if( psz_start && vobsub_size_parse( psz_start, &es_fmt.subs.spu.i_original_frame_width, &es_fmt.subs.spu.i_original_frame_height ) == VLC_SUCCESS ) { msg_Dbg( p_demux, "original frame size: %dx%d", es_fmt.subs.spu.i_original_frame_width, es_fmt.subs.spu.i_original_frame_height ); } else { msg_Warn( p_demux, "reading original frame size failed" ); } psz_start = strstr( psz_buf, "palette:" ); if( psz_start && vobsub_palette_parse( psz_start, &es_fmt.subs.spu.palette[1] ) == VLC_SUCCESS ) { es_fmt.subs.spu.palette[0] = SPU_PALETTE_DEFINED; msg_Dbg( p_demux, "vobsub palette read" ); } else { msg_Warn( p_demux, "reading original palette failed" ); } free( psz_buf ); } } else if( cp->codec_id == AV_CODEC_ID_DVB_SUBTITLE && cp->extradata_size > 3 ) { es_fmt.subs.dvb.i_id = GetWBE( cp->extradata ) | (GetWBE( cp->extradata + 2 ) << 16); } else if( cp->codec_id == AV_CODEC_ID_MOV_TEXT ) { if( cp->extradata_size && (es_fmt.p_extra = malloc(cp->extradata_size)) ) { memcpy( es_fmt.p_extra, cp->extradata, cp->extradata_size ); es_fmt.i_extra = cp->extradata_size; } } psz_type = "subtitle"; break; default: es_format_Init( &es_fmt, UNKNOWN_ES, 0 ); es_fmt.i_original_fourcc = CodecTagToFourcc( cp->codec_tag ); #ifdef HAVE_AVUTIL_CODEC_ATTACHMENT if( cp->codec_type == AVMEDIA_TYPE_ATTACHMENT ) { input_attachment_t *p_attachment; psz_type = "attachment"; if( cp->codec_id == AV_CODEC_ID_TTF ) { AVDictionaryEntry *filename = av_dict_get( s->metadata, "filename", NULL, 0 ); if( filename && filename->value ) { p_attachment = vlc_input_attachment_New( filename->value, "application/x-truetype-font", NULL, cp->extradata, (int)cp->extradata_size ); if( p_attachment ) TAB_APPEND( p_sys->i_attachments, p_sys->attachments, p_attachment ); } } else msg_Warn( p_demux, "unsupported attachment type (%u) in avformat demux", cp->codec_id ); } else #endif { if( cp->codec_type == AVMEDIA_TYPE_DATA ) psz_type = "data"; msg_Warn( p_demux, "unsupported track type (%u:%u) in avformat demux", cp->codec_type, cp->codec_id ); } break; } AVDictionaryEntry *language = av_dict_get( s->metadata, "language", NULL, 0 ); if ( language && language->value ) es_fmt.psz_language = strdup( language->value ); if( s->disposition & AV_DISPOSITION_DEFAULT ) es_fmt.i_priority = ES_PRIORITY_SELECTABLE_MIN + 1000; #ifdef HAVE_AVUTIL_CODEC_ATTACHMENT if( cp->codec_type != AVMEDIA_TYPE_ATTACHMENT ) #endif if( cp->codec_type != AVMEDIA_TYPE_DATA ) { const bool b_ogg = !strcmp( p_sys->fmt->name, "ogg" ); const uint8_t *p_extra = cp->extradata; unsigned i_extra = cp->extradata_size; if( cp->codec_id == AV_CODEC_ID_THEORA && b_ogg ) { unsigned pi_size[3]; const void *pp_data[3]; unsigned i_count; for( i_count = 0; i_count < 3; i_count++ ) { if( i_extra < 2 ) break; pi_size[i_count] = GetWBE( p_extra ); pp_data[i_count] = &p_extra[2]; if( i_extra < pi_size[i_count] + 2 ) break; p_extra += 2 + pi_size[i_count]; i_extra -= 2 + pi_size[i_count]; } if( i_count > 0 && xiph_PackHeaders( &es_fmt.i_extra, &es_fmt.p_extra, pi_size, pp_data, i_count ) ) { es_fmt.i_extra = 0; es_fmt.p_extra = NULL; } } else if( cp->codec_id == AV_CODEC_ID_SPEEX && b_ogg ) { const uint8_t p_dummy_comment[] = { 0, 0, 0, 0, 0, 0, 0, 0, }; unsigned pi_size[2]; const void *pp_data[2]; pi_size[0] = i_extra; pp_data[0] = p_extra; pi_size[1] = sizeof(p_dummy_comment); pp_data[1] = p_dummy_comment; if( pi_size[0] > 0 && xiph_PackHeaders( &es_fmt.i_extra, &es_fmt.p_extra, pi_size, pp_data, 2 ) ) { es_fmt.i_extra = 0; es_fmt.p_extra = NULL; } } else if( cp->codec_id == AV_CODEC_ID_OPUS ) { const uint8_t p_dummy_comment[] = { 'O', 'p', 'u', 's', 'T', 'a', 'g', 's', 0, 0, 0, 0, /* Vendor String length */ /* Vendor String */ 0, 0, 0, 0, /* User Comment List Length */ }; unsigned pi_size[2]; const void *pp_data[2]; pi_size[0] = i_extra; pp_data[0] = p_extra; pi_size[1] = sizeof(p_dummy_comment); pp_data[1] = p_dummy_comment; if( pi_size[0] > 0 && xiph_PackHeaders( &es_fmt.i_extra, &es_fmt.p_extra, pi_size, pp_data, 2 ) ) { es_fmt.i_extra = 0; es_fmt.p_extra = NULL; } } else if( cp->extradata_size > 0 && !es_fmt.i_extra ) { es_fmt.p_extra = malloc( i_extra ); if( es_fmt.p_extra ) { es_fmt.i_extra = i_extra; memcpy( es_fmt.p_extra, p_extra, i_extra ); } } p_track->p_es = es_out_Add( p_demux->out, &es_fmt ); if( p_track->p_es && (s->disposition & AV_DISPOSITION_DEFAULT) ) es_out_Control( p_demux->out, ES_OUT_SET_ES_DEFAULT, p_track->p_es ); msg_Dbg( p_demux, "adding es: %s codec = %4.4s (%d)", psz_type, (char*)&fcc, cp->codec_id ); } es_format_Clean( &es_fmt ); } if( p_sys->ic->start_time != (int64_t)AV_NOPTS_VALUE ) i_start_time = FROM_AV_TS(p_sys->ic->start_time); msg_Dbg( p_demux, "AVFormat(%s %s) supported stream", AVPROVIDER(LIBAVFORMAT), LIBAVFORMAT_IDENT ); msg_Dbg( p_demux, " - format = %s (%s)", p_sys->fmt->name, p_sys->fmt->long_name ); msg_Dbg( p_demux, " - start time = %"PRId64, i_start_time ); msg_Dbg( p_demux, " - duration = %"PRId64, ( p_sys->ic->duration != (int64_t)AV_NOPTS_VALUE ) ? FROM_AV_TS(p_sys->ic->duration) : -1 ); if( p_sys->ic->nb_chapters > 0 ) { p_sys->p_title = vlc_input_title_New(); p_sys->p_title->i_length = FROM_AV_TS(p_sys->ic->duration); } for( unsigned i = 0; i < p_sys->ic->nb_chapters; i++ ) { seekpoint_t *s = vlc_seekpoint_New(); AVDictionaryEntry *title = av_dict_get( p_sys->ic->metadata, "title", NULL, 0); if( title && title->value ) { s->psz_name = strdup( title->value ); EnsureUTF8( s->psz_name ); msg_Dbg( p_demux, " - chapter %d: %s", i, s->psz_name ); } s->i_time_offset = vlc_tick_from_samples( p_sys->ic->chapters[i]->start * p_sys->ic->chapters[i]->time_base.num, p_sys->ic->chapters[i]->time_base.den ) - (i_start_time != VLC_TICK_INVALID ? i_start_time : 0 ); TAB_APPEND( p_sys->p_title->i_seekpoint, p_sys->p_title->seekpoint, s ); } ResetTime( p_demux, 0 ); return VLC_SUCCESS; }
status_t register_avcodec_tags(media_format_family family, const char *avname, int &index) { AVInputFormat *inputFormat = av_find_input_format(avname); if (inputFormat == NULL) return B_MEDIA_NO_HANDLER; BMediaFormats mediaFormats; if (mediaFormats.InitCheck() != B_OK) return B_ERROR; for (int tagSet = 0; inputFormat->codec_tag[tagSet]; tagSet++) { const AVCodecTag *tags = inputFormat->codec_tag[tagSet]; if (tags == NULL) continue; for (; tags->id != CODEC_ID_NONE; tags++) { // XXX: we might want to keep some strange PCM codecs too... // skip unwanted codec tags if (tags->tag == CODEC_ID_RAWVIDEO || (tags->tag >= CODEC_ID_PCM_S16LE && tags->tag < CODEC_ID_ADPCM_IMA_QT) || tags->tag >= CODEC_ID_DVD_SUBTITLE) continue; if (index >= sMaxFormatCount) { fprintf(stderr, "Maximum format count reached for auto-generated " "AVCodec to media_format mapping, but there are still more " "AVCodecs compiled into libavcodec!\n"); break; } media_format format; // Determine media type if (tags->tag < CODEC_ID_PCM_S16LE) format.type = B_MEDIA_ENCODED_VIDEO; else format.type = B_MEDIA_ENCODED_AUDIO; media_format_description description; memset(&description, 0, sizeof(description)); // Hard-code everything to B_MISC_FORMAT_FAMILY to ease matching // later on. description.family = family; switch (family) { case B_AIFF_FORMAT_FAMILY: description.u.aiff.codec = tags->tag; break; case B_AVI_FORMAT_FAMILY: description.u.avi.codec = tags->tag; break; case B_MPEG_FORMAT_FAMILY: description.u.mpeg.id = tags->tag; break; case B_QUICKTIME_FORMAT_FAMILY: description.u.quicktime.codec = tags->tag; break; case B_WAV_FORMAT_FAMILY: description.u.wav.codec = tags->tag; break; default: break; } format.require_flags = 0; format.deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; if (mediaFormats.MakeFormatFor(&description, 1, &format) != B_OK) return B_ERROR; gAVCodecFormats[index] = format; index++; } } return B_OK; }
//TODO: handle error SoundSourceFFmpeg::SoundSourceFFmpeg(QString qFilename) : SoundSource(qFilename) { AVFormatParameters param; int i; QByteArray fname; packet.data = NULL; bufferOffset = 0; bufferSize = 0; memset(buffer, 0, AVCODEC_MAX_AUDIO_FRAME_SIZE); fname = qFilename.toLatin1(); FFmpegInit(); qDebug() << "New SoundSourceFFmpeg :" << fname; /* initialize param to something so av_open_input_file works for raw */ memset(¶m, 0, sizeof(AVFormatParameters)); param.channels = 2; param.sample_rate = 44100; iformat = av_find_input_format(fname.constData()); // Open audio file if(av_open_input_file(&pFormatCtx, fname.constData(), iformat, 0, ¶m)!=0) { qDebug() << "av_open_input_file: cannot open" << fname; return; } // Retrieve stream information if(av_find_stream_info(pFormatCtx)<0) { qDebug() << "av_find_stream_info: cannot open" << fname; return; } //debug only dump_format(pFormatCtx, 0, fname.constData(), false); qDebug() << "ffmpeg: using the first audio stream available"; // Find the first video stream audioStream=-1; for(i=0; i<pFormatCtx->nb_streams; i++) if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO) { audioStream=i; break; } if(audioStream==-1) { qDebug() << "cannot find an audio stream: cannot open" << fname; return; } // Get a pointer to the codec context for the video stream pCodecCtx=pFormatCtx->streams[audioStream]->codec; // Find the decoder for the audio stream if(!(pCodec=avcodec_find_decoder(pCodecCtx->codec_id))) { qDebug() << "cannot find a decoder for" << fname; return; } qDebug() << "ffmpeg: opening the audio codec"; //avcodec_open is not thread safe lock(); if(avcodec_open(pCodecCtx, pCodec)<0) { qDebug() << "avcodec: cannot open" << fname; return; } unlock(); pFrame=avcodec_alloc_frame(); channels = pCodecCtx->channels; SRATE = pCodecCtx->sample_rate; qDebug() << "Samplerate: " << SRATE << ", Channels: " << channels << "\n"; if(channels > 2){ qDebug() << "ffmpeg: No support for more than 2 channels!"; return; } filelength = (long int) ((double)pFormatCtx->duration * 2 / AV_TIME_BASE * SRATE); qDebug() << "ffmpeg: filelength: " << filelength << "d -|- duration: " << pFormatCtx->duration << "ld -- starttime: " << pFormatCtx->streams[audioStream]->start_time << "ld -- " << AV_TIME_BASE << " " << pFormatCtx->streams[audioStream]->codec_info_duration << "ld"; }
DSCapture::DSCapture() { int ret; av_register_all(); avdevice_register_all(); avformat_network_init(); AVInputFormat * a= av_find_input_format("dshow"); // Open the video file m_pInputFormatContext = NULL; if((ret= avformat_open_input(&m_pInputFormatContext, "video=Integrated Camera", a, NULL))!=0) { } ::av_find_stream_info(m_pInputFormatContext); m_nInputAudioStreamIndex = ::av_find_best_stream(m_pInputFormatContext, AVMEDIA_TYPE_AUDIO, -1, -1, &m_pInputAudioCodec, 0); m_nInputVideoStreamIndex = ::av_find_best_stream(m_pInputFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, &m_pInputVideoCodec, 0); if( m_nInputAudioStreamIndex >= 0 ) { m_pInputAudioCodecContext = m_pInputFormatContext->streams[m_nInputAudioStreamIndex]->codec; ::avcodec_open2(m_pInputAudioCodecContext, m_pInputAudioCodec, NULL); } if( m_nInputVideoStreamIndex >= 0 ) { m_pInputVideoCodecContext = m_pInputFormatContext->streams[m_nInputVideoStreamIndex]->codec; ::avcodec_open2(m_pInputVideoCodecContext, m_pInputVideoCodec, NULL); } //output: ret = ::avformat_alloc_output_context2(&m_pOutputFormatContext, NULL, "flv", "rtmp://127.0.0.1:8080/live/live1"); m_pOutputVideoCodec = ::avcodec_find_encoder(AV_CODEC_ID_H264); m_pOutputVideoStream = ::av_new_stream(m_pOutputFormatContext, 0); m_pOutputVideoCodecContext = m_pOutputVideoStream->codec; m_pOutputVideoCodecContext->pix_fmt = AV_PIX_FMT_YUV420P; m_pOutputVideoCodecContext->width = 320; m_pOutputVideoCodecContext->height = 240; m_pOutputVideoCodecContext->time_base.num = 1; m_pOutputVideoCodecContext->time_base.den = 25; m_pOutputVideoCodecContext->gop_size = 12; m_pOutputVideoCodecContext->bit_rate = 125000; m_pOutputVideoCodecContext->me_range = 16; m_pOutputVideoCodecContext->max_qdiff = 4; m_pOutputVideoCodecContext->qmax = 15; m_pOutputVideoCodecContext->qmin = 10; m_pOutputVideoCodecContext->qcompress = 0.6; m_pOutputVideoCodecContext->profile = FF_PROFILE_H264_BASELINE; if (m_pOutputFormatContext->oformat->flags & AVFMT_GLOBALHEADER) m_pOutputVideoCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER; if( m_pOutputVideoCodecContext->flags & AVFMT_GLOBALHEADER ) m_pOutputVideoCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER; ret = ::avcodec_open2(m_pOutputVideoCodecContext, m_pOutputVideoCodec, NULL); ret = ::avio_open(&m_pOutputFormatContext->pb, "rtmp://127.0.0.1:8080/live/live1", AVIO_FLAG_WRITE); ::avformat_write_header(m_pOutputFormatContext, NULL); //frame to hold the decoded data: m_pFrame = ::avcodec_alloc_frame(); ret = avpicture_alloc((AVPicture*)m_pFrame, AV_PIX_FMT_YUV420P, 640, 480); //init the sws context: m_pSwsContext = sws_getContext(640, 480, AV_PIX_FMT_YUYV422, 640, 480, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); m_pMidFrame = ::avcodec_alloc_frame(); ret = avpicture_alloc((AVPicture*)m_pMidFrame, AV_PIX_FMT_YUV420P, 640, 480); m_nBasePTS = 0; }
int main(int argc, char **argv) { // General int bRet, ix; // Input const char *sFile = "/dev/video0"; AVFormatContext *pIFormatCtx; AVCodecContext *pICodecCtx; AVCodec *pICodec; AVFrame *pFrame; int ixInputStream = -1; AVInputFormat *pIFormat; AVPacket oPacket; int fFrame = 0; // Output AVCodecContext *pOCodecCtx; AVCodec *pOCodec; uint8_t *pBuffer; int szBuffer; int szBufferActual; int bImgFormat = PIX_FMT_YUVJ420P; int bQuality = 3; FILE *fdJPEG; // Prepare ffmpeg library av_register_all(); avdevice_register_all(); // Open video stream pIFormat = av_find_input_format("video4linux2"); bRet = av_open_input_file(&pIFormatCtx, sFile, pIFormat, 0, NULL); printf("Abertura retornou %d\n",bRet); if (bRet != 0) { fprintf(stderr, "Could not open file\n"); return 1; } /* Retrieve stream information */ if (av_find_stream_info(pIFormatCtx) < 0) { fprintf(stderr, "No stream info\n"); return 1; } /* Find the first video stream */ ixInputStream = -1; for (ix = 0; ix < pIFormatCtx->nb_streams; ix++) { if (pIFormatCtx->streams[ix]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { ixInputStream = ix; break; } } if (ixInputStream == -1) { fprintf(stderr, "No video stream in file\n"); return 1; } /* Get a pointer to the codec context for the video stream */ pICodecCtx = pIFormatCtx->streams[ixInputStream]->codec; /* Find the decoder for the video stream */ pICodec = avcodec_find_decoder(pICodecCtx->codec_id); if (!pICodec) { fprintf(stderr, "Codec not found\n"); return 1; } /* Open input codec */ if (avcodec_open(pICodecCtx, pICodec) < 0) { fprintf(stderr, "Could not open codec\n"); return 1; } /* Allocate video frame */ pFrame = avcodec_alloc_frame(); /* Determine required buffer size and allocate buffer */ szBuffer = avpicture_get_size(bImgFormat, pICodecCtx->width, pICodecCtx->height); pBuffer = av_mallocz(szBuffer); /* Allocate Output Codec */ pOCodecCtx = avcodec_alloc_context(); if (!pOCodecCtx) { fprintf(stderr, "Could not allocate codec\n"); return 1; } /* Initialize picture size and other format parameters */ pOCodecCtx->bit_rate = pICodecCtx->bit_rate; pOCodecCtx->width = pICodecCtx->width; pOCodecCtx->height = pICodecCtx->height; pOCodecCtx->pix_fmt = bImgFormat; pOCodecCtx->codec_id = CODEC_ID_MJPEG; pOCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; pOCodecCtx->time_base.num = pICodecCtx->time_base.num; pOCodecCtx->time_base.den = pICodecCtx->time_base.den; /* Allocate codec for JPEG */ pOCodec = avcodec_find_encoder(pOCodecCtx->codec_id); if (!pOCodec) { fprintf(stderr, "Codec not found\n"); return 1; } if (avcodec_open(pOCodecCtx, pOCodec) < 0) { fprintf(stderr, "Could not open codec\n"); return 1; } /* Initialize all VBR settings */ pOCodecCtx->qmin = pOCodecCtx->qmax = bQuality; pOCodecCtx->mb_lmin = pOCodecCtx->lmin = pOCodecCtx->qmin * FF_QP2LAMBDA; pOCodecCtx->mb_lmax = pOCodecCtx->lmax = pOCodecCtx->qmax * FF_QP2LAMBDA; pOCodecCtx->flags |= CODEC_FLAG_QSCALE; pOCodecCtx->global_quality = pOCodecCtx->qmin * FF_QP2LAMBDA; /* Get 1 frame */ bRet = av_read_frame(pIFormatCtx, &oPacket); if (bRet < 0) { fprintf(stderr, "Could not read frame\n"); return 1; } if (oPacket.stream_index != ixInputStream) { fprintf(stderr, "Packet is not for our stream\n"); return 1; } /* Decode video frame */ avcodec_decode_video2(pICodecCtx, pFrame, &fFrame,&oPacket); av_free_packet(&oPacket); if (!fFrame) { fprintf(stderr, "Error reading frame\n"); return 1; } /* Encode the frame as a JPEG using certain quality settings */ pFrame->pts = 1; pFrame->quality = pOCodecCtx->global_quality; szBufferActual = avcodec_encode_video(pOCodecCtx, pBuffer, szBuffer, pFrame); /* Write JPEG to file */ fdJPEG = fopen("test.jpg", "wb"); bRet = fwrite(pBuffer, sizeof(uint8_t), szBufferActual, fdJPEG); fclose(fdJPEG); if (bRet != szBufferActual) { fprintf(stderr, "Error writing jpeg file\n"); return 1; } /* Cleanup */ if (pBuffer) { av_freep(&pBuffer); pBuffer = NULL; szBuffer = 0; } if (pFrame) { av_freep(&pFrame); pFrame = NULL; } if (pICodecCtx) { avcodec_close(pICodecCtx); pICodecCtx = NULL; } if (pOCodecCtx) { avcodec_close(pOCodecCtx); pOCodecCtx = NULL; } if (pIFormatCtx) { av_close_input_file(pIFormatCtx); pIFormatCtx = NULL; } return 0; }
bool CFFmpegImage::LoadImageFromMemory(unsigned char* buffer, unsigned int bufSize, unsigned int width, unsigned int height) { uint8_t* fbuffer = (uint8_t*)av_malloc(FFMPEG_FILE_BUFFER_SIZE); if (!fbuffer) { CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate FFMPEG_FILE_BUFFER_SIZE"); return false; } MemBuffer buf; buf.data = buffer; buf.size = bufSize; buf.pos = 0; AVIOContext* ioctx = avio_alloc_context(fbuffer, FFMPEG_FILE_BUFFER_SIZE, 0, &buf, mem_file_read, NULL, mem_file_seek); if (!ioctx) { av_free(fbuffer); CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVIOContext"); return false; } AVFormatContext* fctx = avformat_alloc_context(); if (!fctx) { av_free(ioctx->buffer); av_free(ioctx); CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVFormatContext"); return false; } fctx->pb = ioctx; ioctx->max_packet_size = FFMPEG_FILE_BUFFER_SIZE; // Some clients have pngs saved as jpeg or ask us for png but are jpeg // mythv throws all mimetypes away and asks us with application/octet-stream // this is poor man's fallback to at least identify png / jpeg bool is_jpeg = (bufSize > 2 && buffer[0] == 0xFF && buffer[1] == 0xD8 && buffer[2] == 0xFF); bool is_png = (bufSize > 3 && buffer[1] == 'P' && buffer[2] == 'N' && buffer[3] == 'G'); bool is_tiff = (bufSize > 2 && buffer[0] == 'I' && buffer[1] == 'I' && buffer[2] == '*'); AVInputFormat* inp = nullptr; if (is_jpeg) inp = av_find_input_format("jpeg_pipe"); else if (is_png) inp = av_find_input_format("png_pipe"); else if (is_tiff) inp = av_find_input_format("tiff_pipe"); else if (m_strMimeType == "image/jp2") inp = av_find_input_format("j2k_pipe"); else if (m_strMimeType == "image/webp") inp = av_find_input_format("webp_pipe"); // brute force parse if above check already failed else if (m_strMimeType == "image/jpeg" || m_strMimeType == "image/jpg") inp = av_find_input_format("jpeg_pipe"); else if (m_strMimeType == "image/png") inp = av_find_input_format("png_pipe"); else if (m_strMimeType == "image/tiff") inp = av_find_input_format("tiff_pipe"); if (avformat_open_input(&fctx, "", inp, NULL) < 0) { CLog::Log(LOGERROR, "Could not find suitable input format: %s", m_strMimeType.c_str()); avformat_close_input(&fctx); FreeIOCtx(ioctx); return false; } AVCodecContext* codec_ctx = fctx->streams[0]->codec; AVCodec* codec = avcodec_find_decoder(codec_ctx->codec_id); if (avcodec_open2(codec_ctx, codec, NULL) < 0) { avformat_close_input(&fctx); FreeIOCtx(ioctx); return false; } AVPacket pkt; AVFrame* frame = av_frame_alloc(); av_read_frame(fctx, &pkt); int frame_decoded; int ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt); if (ret < 0) CLog::Log(LOGDEBUG, "Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret))); if (frame_decoded != 0) { av_frame_free(&m_pFrame); m_pFrame = av_frame_clone(frame); if (m_pFrame) { m_height = m_pFrame->height; m_width = m_pFrame->width; m_originalWidth = m_width; m_originalHeight = m_height; const AVPixFmtDescriptor* pixDescriptor = av_pix_fmt_desc_get(static_cast<AVPixelFormat>(m_pFrame->format)); if (pixDescriptor && ((pixDescriptor->flags & AV_PIX_FMT_FLAG_ALPHA) != 0)) m_hasAlpha = true; } else { CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate a picture data buffer"); frame_decoded = 0; } } else CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not decode a frame"); av_frame_free(&frame); av_free_packet(&pkt); avcodec_close(codec_ctx); avformat_close_input(&fctx); FreeIOCtx(ioctx); return (frame_decoded != 0); }
static Bool FFD_CanHandleURL(GF_InputService *plug, const char *url) { Bool has_audio, has_video; s32 i; AVFormatContext *ctx; AVOutputFormat *fmt_out; Bool ret = 0; char *ext, szName[1000], szExt[20]; const char *szExtList; if (!plug || !url) return 0; /*disable RTP/RTSP from ffmpeg*/ if (!strnicmp(url, "rtsp://", 7)) return 0; if (!strnicmp(url, "rtspu://", 8)) return 0; if (!strnicmp(url, "rtp://", 6)) return 0; if (!strnicmp(url, "plato://", 8)) return 0; if (!strnicmp(url, "udp://", 6)) return 0; if (!strnicmp(url, "tcp://", 6)) return 0; if (!strnicmp(url, "data:", 5)) return 0; strcpy(szName, url); ext = strrchr(szName, '#'); if (ext) ext[0] = 0; ext = strrchr(szName, '?'); if (ext) ext[0] = 0; ext = strrchr(szName, '.'); if (ext && strlen(ext) > 19) ext = NULL; if (ext && strlen(ext) > 1) { strcpy(szExt, &ext[1]); strlwr(szExt); #ifndef FFMPEG_DEMUX_ENABLE_MPEG2TS if (!strcmp(szExt, "ts")) return 0; #endif /*note we forbid ffmpeg to handle files we support*/ if (!strcmp(szExt, "mp4") || !strcmp(szExt, "mpg4") || !strcmp(szExt, "m4a") || !strcmp(szExt, "m21") || !strcmp(szExt, "m4v") || !strcmp(szExt, "m4a") || !strcmp(szExt, "m4s") || !strcmp(szExt, "3gs") || !strcmp(szExt, "3gp") || !strcmp(szExt, "3gpp") || !strcmp(szExt, "3gp2") || !strcmp(szExt, "3g2") || !strcmp(szExt, "mp3") || !strcmp(szExt, "ac3") || !strcmp(szExt, "amr") || !strcmp(szExt, "bt") || !strcmp(szExt, "wrl") || !strcmp(szExt, "x3dv") || !strcmp(szExt, "xmt") || !strcmp(szExt, "xmta") || !strcmp(szExt, "x3d") || !strcmp(szExt, "jpg") || !strcmp(szExt, "jpeg") || !strcmp(szExt, "png") ) return 0; /*check any default stuff that should work with ffmpeg*/ { u32 i; for (i = 0 ; FFD_MIME_TYPES[i]; i+=3) { if (gf_service_check_mime_register(plug, FFD_MIME_TYPES[i], FFD_MIME_TYPES[i+1], FFD_MIME_TYPES[i+2], ext)) return 1; } } } ctx = NULL; if (open_file(&ctx, szName, NULL)<0) { AVInputFormat *av_in = NULL;; /*some extensions not supported by ffmpeg*/ if (ext && !strcmp(szExt, "cmp")) av_in = av_find_input_format("m4v"); if (open_file(&ctx, szName, av_in)<0) { return 0; } } if (!ctx || av_find_stream_info(ctx) <0) goto exit; /*figure out if we can use codecs or not*/ has_video = has_audio = 0; for(i = 0; i < (s32)ctx->nb_streams; i++) { AVCodecContext *enc = ctx->streams[i]->codec; switch(enc->codec_type) { case AVMEDIA_TYPE_AUDIO: if (!has_audio) has_audio = 1; break; case AVMEDIA_TYPE_VIDEO: if (!has_video) has_video= 1; break; default: break; } } if (!has_audio && !has_video) goto exit; ret = 1; #if ((LIBAVFORMAT_VERSION_MAJOR == 52) && (LIBAVFORMAT_VERSION_MINOR <= 47)) || (LIBAVFORMAT_VERSION_MAJOR < 52) fmt_out = guess_stream_format(NULL, url, NULL); #else fmt_out = av_guess_format(NULL, url, NULL); #endif if (fmt_out) gf_service_register_mime(plug, fmt_out->mime_type, fmt_out->extensions, fmt_out->name); else { ext = strrchr(szName, '.'); if (ext) { strcpy(szExt, &ext[1]); strlwr(szExt); szExtList = gf_modules_get_option((GF_BaseInterface *)plug, "MimeTypes", "application/x-ffmpeg"); if (!szExtList) { gf_service_register_mime(plug, "application/x-ffmpeg", szExt, "Other Movies (FFMPEG)"); } else if (!strstr(szExtList, szExt)) { u32 len; char *buf; len = (u32) (strlen(szExtList) + strlen(szExt) + 10); buf = gf_malloc(sizeof(char)*len); sprintf(buf, "\"%s ", szExt); strcat(buf, &szExtList[1]); gf_modules_set_option((GF_BaseInterface *)plug, "MimeTypes", "application/x-ffmpeg", buf); gf_free(buf); } } } exit: #ifndef FF_API_CLOSE_INPUT_FILE if (ctx) av_close_input_file(ctx); #else if (ctx) avformat_close_input(&ctx); #endif return ret; }
int audio_thr(LPVOID lpParam) { int iRet = -1; //音频测试,播放文件显示波形 AVFormatContext * pFmtCtx = NULL; AVFormatContext * pFOutmtCtx = NULL; AVInputFormat * pAudioInputFmt = NULL; AVOutputFormat * pAudioOutputFmt = NULL; AVCodecContext * pOutputCodecCtx = NULL; AVPacket * pAudioPacket = NULL; int iAudioIndex = -1; int data_size = 0; int resampled_data_size = 0; uint8_t * out_buffer = 0; int64_t dec_channel_layout = 0; double pts; CLS_DlgStreamPusher* pThis = (CLS_DlgStreamPusher*)lpParam; if (pThis == NULL || pThis->m_pStreamInfo == NULL){ TRACE("audio_thr--pThis == NULL || pThis->m_pStreamInfo == NULL\n"); return iRet; } struct_stream_info* strct_stream_info = pThis->m_pStreamInfo; pAudioInputFmt = av_find_input_format("dshow"); if (pAudioInputFmt == NULL){ TRACE("pAudioInputFmt == NULL\n"); return iRet; } char* psDevName = pThis->GetDeviceName(n_Audio); if (psDevName == NULL){ TRACE("audio_thr--psDevName == NULL"); return iRet; } if (avformat_open_input(&pFmtCtx, psDevName, pAudioInputFmt, NULL) != 0){ TRACE("avformat_open_input err!\n"); goto END; } if (avformat_find_stream_info(pFmtCtx, NULL) < 0){ TRACE("avformat_find_stream_info(pFmtCtx, NULL) < 0\n"); goto END; } for (int i = 0; i < pFmtCtx->nb_streams; i++){ if (pFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO){ iAudioIndex = i; AVCodec *tmpCodec = avcodec_find_decoder(pFmtCtx->streams[i]->codec->codec_id); if (0 > avcodec_open2(pFmtCtx->streams[i]->codec, tmpCodec, NULL)){ TRACE("can not find or open decoder!\n"); } break; } } //找到音频流信息 strct_stream_info->m_pAudioStream = pFmtCtx->streams[iAudioIndex]; if (strct_stream_info->m_pAudioStream == NULL){ TRACE("strct_stream_info->m_pAudioStream == NULL\n"); goto END; } AVCodecContext *pAudioDec = strct_stream_info->m_pAudioStream->codec; if (NULL == pAudioDec){ TRACE("NULL == pAudioDec\n"); goto END; } AVCodec* audio_encoder = avcodec_find_encoder(AV_CODEC_ID_AAC); if (audio_encoder == NULL){ TRACE("audio_encoder == NULL\r\n"); goto END; } pOutputCodecCtx = avcodec_alloc_context3(audio_encoder); if (pOutputCodecCtx == NULL){ TRACE("pOutputCodecCtx == NULL"); goto END; } pOutputCodecCtx->sample_rate = pFmtCtx->streams[0]->codec->sample_rate; pOutputCodecCtx->channel_layout = AV_CH_LAYOUT_STEREO; pOutputCodecCtx->channels = av_get_channel_layout_nb_channels(pOutputCodecCtx->channel_layout); pOutputCodecCtx->sample_fmt = audio_encoder->sample_fmts[0]; pOutputCodecCtx->codec = audio_encoder; pOutputCodecCtx->codec_tag = 0; if (avcodec_open2(pOutputCodecCtx, pOutputCodecCtx->codec, 0) < 0){ //编码器打开失败,退出程序 TRACE("音频编码器打开失败!\n"); goto END; } //SDL_AudioSpec int out_nb_samples = AUDIO_BUF_SIZE; AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16; int out_buffer_size = av_samples_get_buffer_size(NULL, pOutputCodecCtx->channels, out_nb_samples, out_sample_fmt, 1); SDL_AudioSpec wanted_spec, spec; wanted_spec.freq = pOutputCodecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = pOutputCodecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = out_nb_samples; wanted_spec.callback = fill_audio;//&CLS_DlgStreamPusher:: wanted_spec.userdata = strct_stream_info; strct_stream_info->m_content_out_channels = pOutputCodecCtx->channels; if (SDL_OpenAudio(&wanted_spec, &spec)<0){ TRACE("can't open audio.\n"); goto END; } int audio_hw_buf_size = spec.size; if (audio_hw_buf_size < 0){ TRACE("audio_hw_buf_size < 0\n"); return -1; } strct_stream_info->m_audio_src.fmt = AV_SAMPLE_FMT_S16; strct_stream_info->m_audio_src.freq = spec.freq; strct_stream_info->m_audio_src.channel_layout = pOutputCodecCtx->channel_layout; strct_stream_info->m_audio_src.channels = spec.channels; strct_stream_info->m_audio_hw_buf_size = audio_hw_buf_size; strct_stream_info->m_audio_tgt = strct_stream_info->m_audio_src; AVPacket pkt; out_buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE * 2); strct_stream_info->m_audio_refresh_tid = SDL_CreateThread(audio_refresh_thread, NULL, strct_stream_info); while (av_read_frame(pFmtCtx, &pkt) == 0 && _kbhit() == 0){ if (!pThis->m_blAudioShow){ break; } if (pkt.stream_index != iAudioIndex){ continue; } if (!strct_stream_info->m_pAudioFrame) { if (!(strct_stream_info->m_pAudioFrame = avcodec_alloc_frame())){ TRACE("!(strct_stream_info->m_pAudioFrame = avcodec_alloc_frame())\n"); goto END; } } else{ avcodec_get_frame_defaults(strct_stream_info->m_pAudioFrame); } int gotframe = -1; strct_stream_info->m_pAudioFrame = av_frame_alloc(); if (avcodec_decode_audio4(pAudioDec, strct_stream_info->m_pAudioFrame, &gotframe, &pkt) < 0){ av_frame_free(&strct_stream_info->m_pAudioFrame); TRACE("can not decoder a frame\n"); break; } av_free_packet(&pkt); if (!gotframe){ //没有获取到数据,继续下一次 continue; } strct_stream_info->m_pAudioFrame->nb_samples = 1024;//这里暂时写死值 data_size = av_samples_get_buffer_size(NULL, pOutputCodecCtx->channels, strct_stream_info->m_pAudioFrame->nb_samples, pOutputCodecCtx->sample_fmt, 1); dec_channel_layout = (pOutputCodecCtx->channel_layout && pOutputCodecCtx->channels == av_get_channel_layout_nb_channels(pOutputCodecCtx->channel_layout)) ? pOutputCodecCtx->channel_layout : av_get_default_channel_layout(pOutputCodecCtx->channels); //wanted_nb_samples = SynAudio(strct_stream_info, strct_stream_info->m_pAudioFrame->nb_samples); /*if (pOutputCodecCtx->sample_fmt != strct_stream_info->m_audio_src.fmt || dec_channel_layout != strct_stream_info->m_audio_src.channel_layout || pOutputCodecCtx->sample_rate != strct_stream_info->m_audio_src.freq){*/ swr_free(&strct_stream_info->m_audio_swr_ctx); strct_stream_info->m_audio_swr_ctx = swr_alloc_set_opts(NULL, strct_stream_info->m_audio_tgt.channel_layout, strct_stream_info->m_audio_tgt.fmt, strct_stream_info->m_audio_tgt.freq, dec_channel_layout, pOutputCodecCtx->sample_fmt, pOutputCodecCtx->sample_rate, 0, NULL); if (!strct_stream_info->m_audio_swr_ctx || swr_init(strct_stream_info->m_audio_swr_ctx) < 0){ TRACE("!pThis->m_pStreamInfstrct_stream_infoo->m_audio_swr_ctx || swr_init(strct_stream_info->m_audio_swr_ctx) < 0"); break; } strct_stream_info->m_audio_src.channel_layout = dec_channel_layout; strct_stream_info->m_audio_src.channels = pOutputCodecCtx->channels; strct_stream_info->m_audio_src.freq = pOutputCodecCtx->sample_rate; strct_stream_info->m_audio_src.fmt = pOutputCodecCtx->sample_fmt; //} if (NULL != strct_stream_info->m_audio_swr_ctx){ const uint8_t **in = (const uint8_t **)strct_stream_info->m_pAudioFrame->extended_data; uint8_t *out[] = { strct_stream_info->m_audio_buf2 }; int out_count = sizeof(strct_stream_info->m_audio_buf2) / strct_stream_info->m_audio_tgt.channels / av_get_bytes_per_sample(strct_stream_info->m_audio_tgt.fmt); int iRet = swr_convert(strct_stream_info->m_audio_swr_ctx, out, out_count, in, strct_stream_info->m_pAudioFrame->nb_samples); if (iRet < 0){ TRACE("swr_convert < 0\n"); break; } if (iRet == out_count) { TRACE("warning: audio buffer is probably too small\n"); swr_init(strct_stream_info->m_audio_swr_ctx); } strct_stream_info->m_audio_buf = strct_stream_info->m_audio_buf2; resampled_data_size = iRet * strct_stream_info->m_audio_tgt.channels * av_get_bytes_per_sample(strct_stream_info->m_audio_tgt.fmt); } else{ strct_stream_info->m_audio_buf = strct_stream_info->m_pAudioFrame->data[0]; resampled_data_size = data_size; } /* if no pts, then compute it */ pts = strct_stream_info->m_audio_clock; //*pts_ptr = pts; strct_stream_info->m_audio_clock += (double)data_size / (pAudioDec->channels * pAudioDec->sample_rate * av_get_bytes_per_sample(pAudioDec->sample_fmt)); #ifdef DEBUG { static double last_clock; /*printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n", is->audio_clock - last_clock, is->audio_clock, pts);*/ last_clock = strct_stream_info->m_audio_clock; } #endif //FIX:FLAC,MP3,AAC Different number of samples /*if (wanted_spec.samples != strct_stream_info->m_pAudioFrame->nb_samples){ SDL_CloseAudio(); out_nb_samples = strct_stream_info->m_pAudioFrame->nb_samples; out_buffer_size = av_samples_get_buffer_size(NULL, pOutputCodecCtx->channels, out_nb_samples, out_sample_fmt, 1); wanted_spec.samples = out_nb_samples; SDL_OpenAudio(&wanted_spec, NULL); }*/ //设置PCM数据 TRACE("----out_buffer_size---is [%ld]\n",out_buffer_size); audio_chunk = (Uint8 *)out_buffer; audio_len = out_buffer_size; audio_pos = audio_chunk; strct_stream_info->m_aduio_pkt_size = resampled_data_size;//audio_len;// av_free_packet(&pkt); //写PCM进行test if (1){ FILE *p = NULL; fopen_s(&p, "test.pcm", "a+b"); if (p == NULL){ continue; } int tempLenght = 2 * strct_stream_info->m_pAudioFrame->nb_samples;//由于实验中知道这是16位深,所以才这么写 uint8_t *tmpPtr = strct_stream_info->m_pAudioFrame->data[0]; if (NULL != p) { while (tempLenght > 0) { size_t temp = fwrite(tmpPtr, 1, tempLenght, p); tmpPtr += temp; tempLenght = tempLenght - temp; } fclose(p); } } SDL_PauseAudio(0); //while (audio_len > 0){ // //Wait until finish // SDL_Delay(1); //} //if (pFmtCtx->streams[iAudioIndex]->codec->sample_fmt != pOutputCodecCtx->sample_fmt // || pFmtCtx->streams[iAudioIndex]->codec->channels != pOutputCodecCtx->channels // || pFmtCtx->streams[iAudioIndex]->codec->sample_rate != pOutputCodecCtx->sample_rate){ // //TODO如果输入和输出的音频格式不一样 需要重采样,这里是一样的就没做 //} //av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame->nb_samples); //av_audio_fifo_write(fifo, (void **)frame->data, frame->nb_samples); ////循环读取数据,直到buf里数据采样数不够 //while (av_audio_fifo_size(fifo) >= (pOutputCodecCtx->frame_size > 0 ? pOutputCodecCtx->frame_size : AUDIO_BUF_SIZE)) //{ // av_frame_free(&frame); // frame = av_frame_alloc(); // frame->nb_samples = pOutputCodecCtx->frame_size>0 ? pOutputCodecCtx->frame_size : AUDIO_BUF_SIZE; // frame->channel_layout = pOutputCodecCtx->channel_layout; // frame->format = pOutputCodecCtx->sample_fmt; // frame->sample_rate = pOutputCodecCtx->sample_rate; // av_frame_get_buffer(frame, 0); // av_audio_fifo_read(fifo, (void **)frame->data, (pOutputCodecCtx->frame_size > 0 ? pOutputCodecCtx->frame_size : AUDIO_BUF_SIZE)); // av_init_packet(&pkt_out); // //frame->pts = pFrame->pts; // int got_picture = -1; // pkt_out.data = NULL; // pkt_out.size = 0; // if (avcodec_encode_audio2(pOutputCodecCtx, &pkt_out, frame, &got_picture) < 0){ // printf("can not decoder a frame"); // } // av_frame_free(&frame); // if (got_picture) // { // pkt_out.pts = frameIndex * pOutputCodecCtx->frame_size; // pkt_out.dts = frameIndex * pOutputCodecCtx->frame_size; // pkt_out.duration = pOutputCodecCtx->frame_size; // //TODO将编码结果后续做合成处理[pkt_out] // if (pFile != NULL){ // /*fwrite((uint8_t *)pDlg->m_streamstate->audio_buf + pDlg->m_streamstate->audio_buf_index, 1, len1, pFile);*/ // } // frameIndex++; // } //} } iRet = 1; END: //swr_free(&au_convert_ctx); SDL_CloseAudio(); SDL_Quit(); av_free(out_buffer); avcodec_close(pOutputCodecCtx); return iRet; }
int video_thr(LPVOID lpParam) { int iRet = -1; AVFormatContext * pFmtCtx = NULL; AVFormatContext * pRtmpFmtCtx = NULL; AVInputFormat * pVideoInputFmt = NULL; AVOutputFormat * pVideoOutfmt = NULL; struct_stream_info * strct_streaminfo = NULL; AVCodecContext * pCodecContext = NULL; AVCodec * pCodec = NULL; int iVideoIndex = -1; int iVideo_Height = 0; int iVideo_Width = 0; int iVideoPic = 0; int64_t start_time = 0; int frame_index = 0; SDL_Event event; CLS_DlgStreamPusher* pThis = (CLS_DlgStreamPusher*)lpParam; if (pThis == NULL){ TRACE("video_thr--pThis == NULL\n"); return iRet; } pVideoInputFmt = av_find_input_format("dshow"); if (pVideoInputFmt == NULL){ TRACE("pVideoInputFmt == NULL\n"); return iRet; } char* psDevName = pThis->GetDeviceName(n_Video); if (psDevName == NULL){ TRACE("video_thr--psDevName == NULL"); return iRet; } while (1){ if (pThis->m_cstrPushAddr != ""){ break; } } //根据推流地址获取到AVFormatContext avformat_alloc_output_context2(&pRtmpFmtCtx, NULL, "flv", pThis->m_cstrPushAddr); if (NULL == pRtmpFmtCtx){ TRACE("NULL == pRtmpFmtCtx"); return iRet; } pVideoOutfmt = pRtmpFmtCtx->oformat; strct_streaminfo = pThis->m_pStreamInfo; if (NULL == strct_streaminfo){ TRACE("NULL == strct_streaminfo"); return iRet; } FILE *fp_yuv = fopen("output.yuv", "wb+"); pFmtCtx = avformat_alloc_context(); if (avformat_open_input(&pFmtCtx, psDevName, pVideoInputFmt, NULL) != 0){ TRACE("avformat_open_input err!\n"); goto END; } if (avformat_find_stream_info(pFmtCtx, NULL) < 0){ TRACE("avformat_find_stream_info(pFmtCtx, NULL) < 0\n"); goto END; } for (int i = 0; i < pFmtCtx->nb_streams; i++){ if (pFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO){ iVideoIndex = i; break; } } if (iVideoIndex < 0){ TRACE("iVideoIndex < 0\n"); goto END; } pCodecContext = pFmtCtx->streams[iVideoIndex]->codec; if (NULL == pCodecContext){ TRACE("NULL == pCodecContext"); goto END; } pCodec = avcodec_find_decoder(pCodecContext->codec_id); if (pCodec == NULL){ TRACE("avcodec_find_decoder<0"); goto END; } if (avcodec_open2(pCodecContext, pCodec, NULL)<0){ TRACE("avcodec_open2<0"); goto END; } for (int i = 0; i < pFmtCtx->nb_streams; i++) { //根据输入流创建输出流(Create output AVStream according to input AVStream) AVStream *in_stream = pFmtCtx->streams[i]; AVStream *out_stream = avformat_new_stream(pRtmpFmtCtx, in_stream->codec->codec); if (!out_stream) { printf("Failed allocating output stream\n"); iRet = AVERROR_UNKNOWN; goto END; } //复制AVCodecContext的设置(Copy the settings of AVCodecContext) iRet = avcodec_copy_context(out_stream->codec, in_stream->codec); if (iRet < 0) { TRACE("Failed to copy context from input to output stream codec context\n"); goto END; } out_stream->codec->codec_tag = 0; if (pRtmpFmtCtx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } if (!(pVideoOutfmt->flags & AVFMT_NOFILE)) { iRet = avio_open(&pRtmpFmtCtx->pb, pThis->m_cstrPushAddr, AVIO_FLAG_WRITE); if (iRet < 0) { TRACE("Could not open output URL '%s'", pThis->m_cstrPushAddr); goto END; } } iRet = avformat_write_header(pRtmpFmtCtx, NULL); if (iRet < 0) { TRACE("Error occurred when opening output URL\n"); goto END; } start_time = av_gettime(); //获取视频的宽与高 iVideo_Height = pCodecContext->height;//strct_streaminfo->m_height;// iVideo_Width = pCodecContext->width;//strct_streaminfo->m_width;// TRACE("video_thr--video_height[%d],video_width[%d]", iVideo_Height , iVideo_Width); strct_streaminfo->m_pVideoPacket = (AVPacket*)av_malloc(sizeof(AVPacket)); strct_streaminfo->m_pVideoFrame = av_frame_alloc(); strct_streaminfo->m_pVideoFrameYUV = av_frame_alloc(); strct_streaminfo->m_pVideoOutBuffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, iVideo_Width, iVideo_Height)); avpicture_fill((AVPicture *)strct_streaminfo->m_pVideoFrameYUV, strct_streaminfo->m_pVideoOutBuffer, AV_PIX_FMT_YUV420P, iVideo_Width, iVideo_Height); strct_streaminfo->m_video_sws_ctx = sws_getContext(iVideo_Width, iVideo_Height, pCodecContext->pix_fmt, iVideo_Width, iVideo_Height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); if (NULL == strct_streaminfo->m_video_sws_ctx){ TRACE("NULL == strct_streaminfo->m_video_sws_ctx\n"); goto END; } strct_streaminfo->m_video_refresh_tid = SDL_CreateThread(video_refresh_thread, NULL, strct_streaminfo); //从摄像头获取数据 for (;;){ AVStream *in_stream, *out_stream; SDL_WaitEvent(&event); if (event.type == FF_VIDEO_REFRESH_EVENT){ if (av_read_frame(pFmtCtx, strct_streaminfo->m_pVideoPacket) >= 0){ if (strct_streaminfo->m_pVideoPacket->pts == AV_NOPTS_VALUE){ //Write PTS AVRational time_base1 = pFmtCtx->streams[iVideoIndex]->time_base; //Duration between 2 frames (us) int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(pFmtCtx->streams[iVideoIndex]->r_frame_rate); //Parameters strct_streaminfo->m_pVideoPacket->pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE); strct_streaminfo->m_pVideoPacket->dts = strct_streaminfo->m_pVideoPacket->pts; strct_streaminfo->m_pVideoPacket->duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE); } if (strct_streaminfo->m_pVideoPacket->stream_index == iVideoIndex){ AVRational time_base = pFmtCtx->streams[iVideoIndex]->time_base; AVRational time_base_q = { 1, AV_TIME_BASE }; int64_t pts_time = av_rescale_q(strct_streaminfo->m_pVideoPacket->dts, time_base, time_base_q); int64_t now_time = av_gettime() - start_time; if (pts_time > now_time) av_usleep(pts_time - now_time); in_stream = pFmtCtx->streams[strct_streaminfo->m_pVideoPacket->stream_index]; out_stream = pRtmpFmtCtx->streams[strct_streaminfo->m_pVideoPacket->stream_index]; /* copy packet */ //转换PTS/DTS(Convert PTS/DTS) strct_streaminfo->m_pVideoPacket->pts = av_rescale_q_rnd(strct_streaminfo->m_pVideoPacket->pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); strct_streaminfo->m_pVideoPacket->dts = av_rescale_q_rnd(strct_streaminfo->m_pVideoPacket->dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); strct_streaminfo->m_pVideoPacket->duration = av_rescale_q(strct_streaminfo->m_pVideoPacket->duration, in_stream->time_base, out_stream->time_base); strct_streaminfo->m_pVideoPacket->pos = -1; TRACE("Send %8d video frames to output URL\n", frame_index); frame_index++; iRet = av_interleaved_write_frame(pRtmpFmtCtx, strct_streaminfo->m_pVideoPacket); if (iRet < 0) { TRACE("Error muxing packet\n"); break; } if (pThis->m_blVideoShow){ //解码显示 iRet = avcodec_decode_video2(pCodecContext, strct_streaminfo->m_pVideoFrame, &iVideoPic, strct_streaminfo->m_pVideoPacket); if (iRet < 0){ TRACE("Decode Error.\n"); av_free_packet(strct_streaminfo->m_pVideoPacket); goto END; } if (iVideoPic <= 0){ TRACE("iVideoPic <= 0"); av_free_packet(strct_streaminfo->m_pVideoPacket); goto END; } if (sws_scale(strct_streaminfo->m_video_sws_ctx, (const uint8_t* const*)strct_streaminfo->m_pVideoFrame->data, strct_streaminfo->m_pVideoFrame->linesize, 0, /*strct_streaminfo->m_height*/iVideo_Height, strct_streaminfo->m_pVideoFrameYUV->data, strct_streaminfo->m_pVideoFrameYUV->linesize) < 0){ TRACE("sws_scale < 0"); av_free_packet(strct_streaminfo->m_pVideoPacket); goto END; } if (pThis->m_blPushStream){ //进行推流操作 if (pThis->push_stream() < 0){ TRACE("pThis->push_stream() < 0"); goto END; } } if (SDL_UpdateTexture(strct_streaminfo->m_sdlTexture, NULL, strct_streaminfo->m_pVideoFrameYUV->data[0], strct_streaminfo->m_pVideoFrameYUV->linesize[0]) < 0){ TRACE("SDL_UpdateTexture < 0\n"); goto END; } if (SDL_RenderClear(strct_streaminfo->m_sdlRenderer) < 0){ TRACE("SDL_RenderClear<0\n"); goto END; } if (SDL_RenderCopy(strct_streaminfo->m_sdlRenderer, strct_streaminfo->m_sdlTexture, NULL, NULL) < 0){ TRACE("SDL_RenderCopy<0\n"); goto END; } SDL_RenderPresent(strct_streaminfo->m_sdlRenderer); } } av_free_packet(strct_streaminfo->m_pVideoPacket); } } else if (event.type == FF_BREAK_EVENT){ break; } } av_write_trailer(pRtmpFmtCtx); iRet = 1; END: //fclose(fp_yuv); if (strct_streaminfo->m_video_sws_ctx){ sws_freeContext(strct_streaminfo->m_video_sws_ctx); } if (strct_streaminfo->m_pVideoFrameYUV){ av_frame_free(&strct_streaminfo->m_pVideoFrameYUV); } avformat_close_input(&pFmtCtx); avformat_free_context(pRtmpFmtCtx); return iRet; }
bool Capture::init() { if (mVideoDevice.empty()) { sgct::MessageHandler::instance()->print(sgct::MessageHandler::NOTIFY_ERROR, "No video device specified!\n"); cleanup(); return false; } initFFmpeg(); setupOptions(); // -------------------------------------------------- //check out https://ffmpeg.org/ffmpeg-devices.html // -------------------------------------------------- AVInputFormat * iformat; std::string inputName; #ifdef __WIN32__ iformat = av_find_input_format("dshow"); inputName = "video=" + mVideoDevice; #elif defined __APPLE__ iformat = av_find_input_format("avfoundation"); inputName = mVideoDevice; #else //linux NOT-Tested iformat = av_find_input_format("video4linux2"); inputName = mVideoDevice; #endif if (avformat_open_input(&mFMTContext, inputName.c_str(), iformat, &mOptions) < 0) { sgct::MessageHandler::instance()->print(sgct::MessageHandler::NOTIFY_ERROR, "Could not open capture input!\n"); cleanup(); return false; } /* retrieve stream information */ if (avformat_find_stream_info(mFMTContext, nullptr) < 0) { sgct::MessageHandler::instance()->print(sgct::MessageHandler::NOTIFY_ERROR, "Could not find stream information!\n"); cleanup(); return false; } if (!initVideoStream()) { cleanup(); return false; } //dump format info to console av_dump_format(mFMTContext, 0, inputName.c_str(), 0); if (mVideoCodecContext) { if (!allocateVideoDecoderData(mVideoCodecContext->pix_fmt)) { cleanup(); return false; } } /* initialize packet, set data to nullptr, let the demuxer fill it */ av_init_packet(&mPkt); mPkt.data = nullptr; mPkt.size = 0; //success mInited = true; return true; }
int main(int argc, char **argv) { if (argc != 5) { fprintf(stderr, "Usage: %s <segment length> <output location> <filename prefix> <encoding profile>\n", argv[0]); return 1; } struct config_info config; memset(&config, 0, sizeof(struct config_info)); config.segment_length = atoi(argv[1]); config.temp_directory = argv[2]; config.filename_prefix = argv[3]; config.encoding_profile = argv[4]; config.input_filename = "pipe://1"; char *output_filename = malloc(sizeof(char) * (strlen(config.temp_directory) + 1 + strlen(config.filename_prefix) + 10)); if (!output_filename) { fprintf(stderr, "Segmenter error: Could not allocate space for output filenames\n"); exit(1); } // ------------------ Done parsing input -------------- av_register_all(); AVInputFormat *input_format = av_find_input_format("mpegts"); if (!input_format) { fprintf(stderr, "Segmenter error: Could not find MPEG-TS demuxer\n"); exit(1); } AVFormatContext *input_context = NULL; int ret = avformat_open_input(&input_context, config.input_filename, input_format, NULL); if (ret != 0) { fprintf(stderr, "Segmenter error: Could not open input file, make sure it is an mpegts file: %d\n", ret); exit(1); } if (avformat_find_stream_info(input_context, NULL) < 0) { fprintf(stderr, "Segmenter error: Could not read stream information\n"); exit(1); } AVOutputFormat *output_format = av_guess_format("mpegts", NULL, NULL); if (!output_format) { fprintf(stderr, "Segmenter error: Could not find MPEG-TS muxer\n"); exit(1); } AVFormatContext *output_context = avformat_alloc_context(); if (!output_context) { fprintf(stderr, "Segmenter error: Could not allocated output context"); exit(1); } output_context->oformat = output_format; // Don't print warnings when PTS and DTS are identical. input_context->flags |= AVFMT_FLAG_IGNDTS; int video_index = -1; int audio_index = -1; AVStream *video_stream; AVStream *audio_stream; int i; for (i = 0; i < input_context->nb_streams && (video_index < 0 || audio_index < 0); i++) { switch (input_context->streams[i]->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: video_index = i; input_context->streams[i]->discard = AVDISCARD_NONE; video_stream = add_output_stream(output_context, input_context->streams[i]); break; case AVMEDIA_TYPE_AUDIO: audio_index = i; input_context->streams[i]->discard = AVDISCARD_NONE; audio_stream = add_output_stream(output_context, input_context->streams[i]); break; default: input_context->streams[i]->discard = AVDISCARD_ALL; break; } } #if LIBAVFORMAT_VERSION_MAJOR < 54 if (av_set_parameters(output_context, NULL) < 0) { fprintf(stderr, "Segmenter error: Invalid output format parameters\n"); exit(1); } #endif av_dump_format(output_context, 0, config.filename_prefix, 1); if (video_index >= 0) { AVCodec *codec = avcodec_find_decoder(video_stream->codec->codec_id); if (!codec) { fprintf(stderr, "Segmenter error: Could not find video decoder, key frames will not be honored\n"); } if (avcodec_open2(video_stream->codec, codec, NULL) < 0) { fprintf(stderr, "Segmenter error: Could not open video decoder, key frames will not be honored\n"); } } if (video_stream->codec->ticks_per_frame > 1) { // h264 sets the ticks_per_frame and time_base.den but not time_base.num // since we don't use ticks_per_frame, adjust time_base.num accordingly. video_stream->codec->time_base.num *= video_stream->codec->ticks_per_frame; } unsigned int output_index = 1; snprintf(output_filename, strlen(config.temp_directory) + 1 + strlen(config.filename_prefix) + 10, "%s/%s-%05u.ts", config.temp_directory, config.filename_prefix, output_index++); if (avio_open(&output_context->pb, output_filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Segmenter error: Could not open '%s'\n", output_filename); exit(1); } if (avformat_write_header(output_context, NULL)) { fprintf(stderr, "Segmenter error: Could not write mpegts header to first output file\n"); exit(1); } // Track initial PTS values so we can subtract them out (removing aduio/video delay, since they seem incorrect). int64_t initial_audio_pts = -1; int64_t initial_video_pts = -1; unsigned int first_segment = 1; unsigned int last_segment = 0; double prev_segment_time = 0; int decode_done; do { double segment_time; AVPacket packet; decode_done = av_read_frame(input_context, &packet); if (decode_done < 0) { break; } if (av_dup_packet(&packet) < 0) { fprintf(stderr, "Segmenter error: Could not duplicate packet"); av_free_packet(&packet); break; } if (packet.stream_index == video_index) { if (initial_video_pts < 0) initial_video_pts = packet.pts; packet.pts -= initial_video_pts; packet.dts = packet.pts; if (packet.flags & AV_PKT_FLAG_KEY) { segment_time = (double)packet.pts * video_stream->time_base.num / video_stream->time_base.den; } else { segment_time = prev_segment_time; } } else if (packet.stream_index == audio_index) { if (initial_audio_pts < 0) initial_audio_pts = packet.pts; packet.pts -= initial_audio_pts; packet.dts = packet.pts; segment_time = prev_segment_time; } else { segment_time = prev_segment_time; } // done writing the current file? if (segment_time - prev_segment_time >= config.segment_length) { avio_flush(output_context->pb); avio_close(output_context->pb); output_transfer_command(first_segment, ++last_segment, 0, config.encoding_profile); snprintf(output_filename, strlen(config.temp_directory) + 1 + strlen(config.filename_prefix) + 10, "%s/%s-%05u.ts", config.temp_directory, config.filename_prefix, output_index++); if (avio_open (&output_context->pb, output_filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Segmenter error: Could not open '%s'\n", output_filename); break; } prev_segment_time = segment_time; } ret = av_write_frame(output_context, &packet); if (ret < 0) { fprintf(stderr, "Segmenter error: Could not write frame of stream: %d\n", ret); } else if (ret > 0) { fprintf(stderr, "Segmenter info: End of stream requested\n"); av_free_packet(&packet); break; } av_free_packet(&packet); } while (!decode_done); av_write_trailer(output_context); if (video_index >= 0) { avcodec_close(video_stream->codec); } for (i = 0; i < output_context->nb_streams; i++) { av_freep(&output_context->streams[i]->codec); av_freep(&output_context->streams[i]); } avio_close(output_context->pb); av_free(output_context); output_transfer_command(first_segment, ++last_segment, 1, config.encoding_profile); return 0; }
bool FFmpegVideo::openCamera(const QString &dev) { AVCodec *pCodec; int i; pFrame = av_frame_alloc(); pFrameYUV=av_frame_alloc(); pFormatCtx = avformat_alloc_context(); AVDictionary* options = NULL; // Set some options av_dict_set(&options,"framerate","15",0); // Video frame size. The default is to capture the full screen av_dict_set(&options,"video_size","320x240",0); AVInputFormat *ifmt=av_find_input_format("video4linux2"); if(avformat_open_input(&pFormatCtx,dev.toAscii().data()/*"/dev/video0"*/,ifmt,&options)!=0){ printf("Couldn't open input stream.\n"); return false; } //获取视频流信息 if(avformat_find_stream_info(pFormatCtx,NULL)<0) { printf("Couldn't find stream information.\n"); return false; } //获取视频流索引 videoindex=-1; for(i=0; i<pFormatCtx->nb_streams; i++){ if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) { videoindex=i; break; } } if(videoindex==-1) { printf("Couldn't find a video stream.\n"); return false; } //获取视频流的分辨率大小 pCodecCtx=pFormatCtx->streams[videoindex]->codec; printf("pCodecCtx->pix_fmt:%d, pCodecCtx->codec_id:%d, pCodecCtx->width:%d, pCodecCtx->height:%d\n", pCodecCtx->pix_fmt, pCodecCtx->codec_id, pCodecCtx->width, pCodecCtx->height); pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) { printf("Codec not found.\n"); return false; } if(avcodec_open2(pCodecCtx, pCodec,NULL)<0) { printf("Could not open codec.\n"); return false; } rgb_sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 320, 240, AV_PIX_FMT_RGB24, SWS_BICUBIC, 0, 0, 0); return true; }
/* * ����һ�������������ģ�����Ƶ�ļ����н������ */ AVDecodeCtx *ffCreateDecodeContext( const char * filename, AVDictionary *opt_arg ) { int i, ret; AVInputFormat *file_iformat = NULL; AVDecodeCtx * pdc; AVDictionary * opt = NULL; ffInit(); pdc = (AVDecodeCtx *)malloc(sizeof(AVDecodeCtx)); while (pdc) { memset(pdc, 0, sizeof(AVDecodeCtx)); pdc->_fileName = strdup(filename); pdc->_ctx = avformat_alloc_context(); if (!pdc->_ctx) { av_log(NULL, AV_LOG_FATAL, "ffCreateDecodeContext : could not allocate context.\n"); break; } //filename = "video=.." ,open dshow device if (filename && strstr(filename, "video=") == filename){ file_iformat = av_find_input_format(CAP_DEVICE_NAME); if (!file_iformat){ av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n",CAP_DEVICE_NAME); break; } } av_dict_copy(&opt, opt_arg, 0); ret = avformat_open_input(&pdc->_ctx, filename, file_iformat, &opt); av_dict_free(&opt); opt = NULL; if (ret < 0) { char errmsg[ERROR_BUFFER_SIZE]; av_strerror(ret, errmsg, ERROR_BUFFER_SIZE); av_log(NULL, AV_LOG_FATAL, "ffCreateDecodeContext %s.\n", errmsg); break; } av_format_inject_global_side_data(pdc->_ctx); av_dict_copy(&opt, opt_arg, 0); ret = avformat_find_stream_info(pdc->_ctx, NULL); av_dict_free(&opt); opt = NULL; if (ret < 0) { char errmsg[ERROR_BUFFER_SIZE]; av_strerror(ret, errmsg, ERROR_BUFFER_SIZE); av_log(NULL, AV_LOG_FATAL, "ffCreateDecodeContext %s.\n", errmsg); break; } /* * ������Ƶ������Ƶ�� */ ret = av_find_best_stream(pdc->_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); if (ret >= 0) { pdc->has_video = 1; pdc->_video_st = pdc->_ctx->streams[ret]; pdc->_video_st_index = ret; } ret = av_find_best_stream(pdc->_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0); if (ret >= 0) { pdc->has_audio = 1; pdc->_audio_st = pdc->_ctx->streams[ret]; pdc->_audio_st_index = ret; } if (pdc->has_video) { if (open_video(pdc, pdc->_video_st->codec->codec_id, NULL) < 0) { ffCloseDecodeContext(pdc); return NULL; } pdc->encode_video = 1; } if (pdc->has_audio) { if (open_audio(pdc, pdc->_audio_st->codec->codec_id, NULL) < 0) { ffCloseDecodeContext(pdc); return NULL; } pdc->encode_audio = 1; } return pdc; } /* * ʧ������ */ ffCloseDecodeContext(pdc); return NULL; }
static double get_aspect_ratio( AVStream *stream, AVCodecContext *codec_context, AVPacket *pkt ) { double aspect_ratio = 1.0; if ( codec_context->codec_id == CODEC_ID_DVVIDEO ) { if ( pkt ) { if ( dv_is_pal( pkt ) ) { aspect_ratio = dv_is_wide( pkt ) ? 64.0/45.0 // 16:9 PAL : 16.0/15.0; // 4:3 PAL } else { aspect_ratio = dv_is_wide( pkt ) ? 32.0/27.0 // 16:9 NTSC : 8.0/9.0; // 4:3 NTSC } } else { AVRational ar = #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0) stream->sample_aspect_ratio; #else codec_context->sample_aspect_ratio; #endif // Override FFmpeg's notion of DV aspect ratios, which are // based upon a width of 704. Since we do not have a normaliser // that crops (nor is cropping 720 wide ITU-R 601 video always desirable) // we just coerce the values to facilitate a passive behaviour through // the rescale normaliser when using equivalent producers and consumers. // = display_aspect / (width * height) if ( ar.num == 10 && ar.den == 11 ) aspect_ratio = 8.0/9.0; // 4:3 NTSC else if ( ar.num == 59 && ar.den == 54 ) aspect_ratio = 16.0/15.0; // 4:3 PAL else if ( ar.num == 40 && ar.den == 33 ) aspect_ratio = 32.0/27.0; // 16:9 NTSC else if ( ar.num == 118 && ar.den == 81 ) aspect_ratio = 64.0/45.0; // 16:9 PAL } } else { AVRational codec_sar = codec_context->sample_aspect_ratio; AVRational stream_sar = #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0) stream->sample_aspect_ratio; #else { 0, 1 }; #endif if ( codec_sar.num > 0 ) aspect_ratio = av_q2d( codec_sar ); else if ( stream_sar.num > 0 ) aspect_ratio = av_q2d( stream_sar ); } return aspect_ratio; } /** Open the file. */ static int producer_open( mlt_producer this, mlt_profile profile, char *file ) { // Return an error code (0 == no error) int error = 0; // Context for avformat AVFormatContext *context = NULL; // Get the properties mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // We will treat everything with the producer fps double fps = mlt_profile_fps( profile ); // Lock the mutex now avformat_lock( ); // If "MRL", then create AVInputFormat AVInputFormat *format = NULL; AVFormatParameters *params = NULL; char *standard = NULL; char *mrl = strchr( file, ':' ); // AV option (0 = both, 1 = video, 2 = audio) int av = 0; // Only if there is not a protocol specification that avformat can handle if ( mrl && !url_exist( file ) ) { // 'file' becomes format abbreviation mrl[0] = 0; // Lookup the format format = av_find_input_format( file ); // Eat the format designator file = ++mrl; if ( format ) { // Allocate params params = calloc( sizeof( AVFormatParameters ), 1 ); // These are required by video4linux (defaults) params->width = 640; params->height = 480; params->time_base= (AVRational){1,25}; // params->device = file; params->channels = 2; params->sample_rate = 48000; } // XXX: this does not work anymore since avdevice // TODO: make producer_avddevice? // Parse out params mrl = strchr( file, '?' ); while ( mrl ) { mrl[0] = 0; char *name = strdup( ++mrl ); char *value = strchr( name, ':' ); if ( value ) { value[0] = 0; value++; char *t = strchr( value, '&' ); if ( t ) t[0] = 0; if ( !strcmp( name, "frame_rate" ) ) params->time_base.den = atoi( value ); else if ( !strcmp( name, "frame_rate_base" ) ) params->time_base.num = atoi( value ); else if ( !strcmp( name, "sample_rate" ) ) params->sample_rate = atoi( value ); else if ( !strcmp( name, "channels" ) ) params->channels = atoi( value ); else if ( !strcmp( name, "width" ) ) params->width = atoi( value ); else if ( !strcmp( name, "height" ) ) params->height = atoi( value ); else if ( !strcmp( name, "standard" ) ) { standard = strdup( value ); params->standard = standard; } else if ( !strcmp( name, "av" ) ) av = atoi( value ); } free( name ); mrl = strchr( mrl, '&' ); } } // Now attempt to open the file error = av_open_input_file( &context, file, format, 0, params ) < 0; // Cleanup AVFormatParameters free( standard ); free( params ); // If successful, then try to get additional info if ( error == 0 ) { // Get the stream info error = av_find_stream_info( context ) < 0; // Continue if no error if ( error == 0 ) { // We will default to the first audio and video streams found int audio_index = -1; int video_index = -1; int av_bypass = 0; // Now set properties where we can (use default unknowns if required) if ( context->duration != AV_NOPTS_VALUE ) { // This isn't going to be accurate for all formats mlt_position frames = ( mlt_position )( ( ( double )context->duration / ( double )AV_TIME_BASE ) * fps + 0.5 ); mlt_properties_set_position( properties, "out", frames - 1 ); mlt_properties_set_position( properties, "length", frames ); } // Find default audio and video streams find_default_streams( properties, context, &audio_index, &video_index ); if ( context->start_time != AV_NOPTS_VALUE ) mlt_properties_set_double( properties, "_start_time", context->start_time ); // Check if we're seekable (something funny about mpeg here :-/) if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 ) && strncmp( file, "udp:", 4 ) && strncmp( file, "tcp:", 4 ) && strncmp( file, "rtsp:", 5 ) && strncmp( file, "rtp:", 4 ) ) { mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 ); mlt_properties_set_data( properties, "dummy_context", context, 0, producer_file_close, NULL ); av_open_input_file( &context, file, NULL, 0, NULL ); av_find_stream_info( context ); } else av_bypass = 1; // Store selected audio and video indexes on properties mlt_properties_set_int( properties, "_audio_index", audio_index ); mlt_properties_set_int( properties, "_video_index", video_index ); mlt_properties_set_int( properties, "_last_position", -1 ); // Fetch the width, height and aspect ratio if ( video_index != -1 ) { AVCodecContext *codec_context = context->streams[ video_index ]->codec; mlt_properties_set_int( properties, "width", codec_context->width ); mlt_properties_set_int( properties, "height", codec_context->height ); if ( codec_context->codec_id == CODEC_ID_DVVIDEO ) { // Fetch the first frame of DV so we can read it directly AVPacket pkt; int ret = 0; while ( ret >= 0 ) { ret = av_read_frame( context, &pkt ); if ( ret >= 0 && pkt.stream_index == video_index && pkt.size > 0 ) { mlt_properties_set_double( properties, "aspect_ratio", get_aspect_ratio( context->streams[ video_index ], codec_context, &pkt ) ); break; } } } else { mlt_properties_set_double( properties, "aspect_ratio", get_aspect_ratio( context->streams[ video_index ], codec_context, NULL ) ); } } // Read Metadata if (context->title != NULL) mlt_properties_set(properties, "meta.attr.title.markup", context->title ); if (context->author != NULL) mlt_properties_set(properties, "meta.attr.author.markup", context->author ); if (context->copyright != NULL) mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright ); if (context->comment != NULL) mlt_properties_set(properties, "meta.attr.comment.markup", context->comment ); if (context->album != NULL) mlt_properties_set(properties, "meta.attr.album.markup", context->album ); if (context->year != 0) mlt_properties_set_int(properties, "meta.attr.year.markup", context->year ); if (context->track != 0) mlt_properties_set_int(properties, "meta.attr.track.markup", context->track ); // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later) if ( av == 0 && audio_index != -1 && video_index != -1 ) { // We'll use the open one as our video_context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); // And open again for our audio context av_open_input_file( &context, file, NULL, 0, NULL ); av_find_stream_info( context ); // Audio context mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL ); } else if ( av != 2 && video_index != -1 ) { // We only have a video context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); } else if ( audio_index != -1 ) { // We only have an audio context mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL ); } else { // Something has gone wrong error = -1; } mlt_properties_set_int( properties, "av_bypass", av_bypass ); } } // Unlock the mutex now avformat_unlock( ); return error; } /** Convert a frame position to a time code. */ static double producer_time_of_frame( mlt_producer this, mlt_position position ) { return ( double )position / mlt_producer_get_fps( this ); } static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height ) { #ifdef SWSCALE if ( format == mlt_image_yuv420p ) { struct SwsContext *context = sws_getContext( width, height, pix_fmt, width, height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL); AVPicture output; output.data[0] = buffer; output.data[1] = buffer + width * height; output.data[2] = buffer + ( 3 * width * height ) / 2; output.linesize[0] = width; output.linesize[1] = width >> 1; output.linesize[2] = width >> 1; sws_scale( context, frame->data, frame->linesize, 0, height, output.data, output.linesize); sws_freeContext( context ); } else if ( format == mlt_image_rgb24 ) { struct SwsContext *context = sws_getContext( width, height, pix_fmt, width, height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL); AVPicture output; avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height ); sws_scale( context, frame->data, frame->linesize, 0, height, output.data, output.linesize); sws_freeContext( context ); } else { struct SwsContext *context = sws_getContext( width, height, pix_fmt, width, height, PIX_FMT_YUYV422, SWS_FAST_BILINEAR, NULL, NULL, NULL); AVPicture output; avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height ); sws_scale( context, frame->data, frame->linesize, 0, height, output.data, output.linesize); sws_freeContext( context ); } #else if ( format == mlt_image_yuv420p ) { AVPicture pict; pict.data[0] = buffer; pict.data[1] = buffer + width * height; pict.data[2] = buffer + ( 3 * width * height ) / 2; pict.linesize[0] = width; pict.linesize[1] = width >> 1; pict.linesize[2] = width >> 1; img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height ); }
AudioDecoder::AudioDecoder(s32 type, u32 addr, u32 size, vm::ptr<CellAdecCbMsg> func, u32 arg) : type(type) , memAddr(addr) , memSize(size) , memBias(0) , cbFunc(func) , cbArg(arg) , is_closed(false) , is_finished(false) , just_started(false) , just_finished(false) , codec(nullptr) , input_format(nullptr) , ctx(nullptr) , fmt(nullptr) { av_register_all(); avcodec_register_all(); switch (type) { case CELL_ADEC_TYPE_ATRACX: case CELL_ADEC_TYPE_ATRACX_2CH: case CELL_ADEC_TYPE_ATRACX_6CH: case CELL_ADEC_TYPE_ATRACX_8CH: { codec = avcodec_find_decoder(AV_CODEC_ID_ATRAC3P); input_format = av_find_input_format("oma"); break; } case CELL_ADEC_TYPE_MP3: { codec = avcodec_find_decoder(AV_CODEC_ID_MP3); input_format = av_find_input_format("mp3"); break; } default: { throw EXCEPTION("Unknown type (0x%x)", type); } } if (!codec) { throw EXCEPTION("avcodec_find_decoder() failed"); } if (!input_format) { throw EXCEPTION("av_find_input_format() failed"); } fmt = avformat_alloc_context(); if (!fmt) { throw EXCEPTION("avformat_alloc_context() failed"); } io_buf = (u8*)av_malloc(4096); fmt->pb = avio_alloc_context(io_buf, 256, 0, this, adecRead, NULL, NULL); if (!fmt->pb) { throw EXCEPTION("avio_alloc_context() failed"); } }
static int sap_read_header(AVFormatContext *s) { struct SAPState *sap = s->priv_data; char host[1024], path[1024], url[1024]; uint8_t recvbuf[RTP_MAX_PACKET_LENGTH]; int port; int ret, i; AVInputFormat* infmt; if (!ff_network_init()) return AVERROR(EIO); av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port, path, sizeof(path), s->filename); if (port < 0) port = 9875; if (!host[0]) { /* Listen for announcements on sap.mcast.net if no host was specified */ av_strlcpy(host, "224.2.127.254", sizeof(host)); } ff_url_join(url, sizeof(url), "udp", NULL, host, port, "?localport=%d", port); ret = ffurl_open(&sap->ann_fd, url, AVIO_FLAG_READ, &s->interrupt_callback, NULL); if (ret) goto fail; while (1) { int addr_type, auth_len; int pos; ret = ffurl_read(sap->ann_fd, recvbuf, sizeof(recvbuf) - 1); if (ret == AVERROR(EAGAIN)) continue; if (ret < 0) goto fail; recvbuf[ret] = '\0'; /* Null terminate for easier parsing */ if (ret < 8) { av_log(s, AV_LOG_WARNING, "Received too short packet\n"); continue; } if ((recvbuf[0] & 0xe0) != 0x20) { av_log(s, AV_LOG_WARNING, "Unsupported SAP version packet " "received\n"); continue; } if (recvbuf[0] & 0x04) { av_log(s, AV_LOG_WARNING, "Received stream deletion " "announcement\n"); continue; } addr_type = recvbuf[0] & 0x10; auth_len = recvbuf[1]; sap->hash = AV_RB16(&recvbuf[2]); pos = 4; if (addr_type) pos += 16; /* IPv6 */ else pos += 4; /* IPv4 */ pos += auth_len * 4; if (pos + 4 >= ret) { av_log(s, AV_LOG_WARNING, "Received too short packet\n"); continue; } #define MIME "application/sdp" if (strcmp(&recvbuf[pos], MIME) == 0) { pos += strlen(MIME) + 1; } else if (strncmp(&recvbuf[pos], "v=0\r\n", 5) == 0) { // Direct SDP without a mime type } else { av_log(s, AV_LOG_WARNING, "Unsupported mime type %s\n", &recvbuf[pos]); continue; } sap->sdp = av_strdup(&recvbuf[pos]); break; } av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sap->sdp); ffio_init_context(&sap->sdp_pb, sap->sdp, strlen(sap->sdp), 0, NULL, NULL, NULL, NULL); infmt = av_find_input_format("sdp"); if (!infmt) goto fail; sap->sdp_ctx = avformat_alloc_context(); if (!sap->sdp_ctx) { ret = AVERROR(ENOMEM); goto fail; } sap->sdp_ctx->max_delay = s->max_delay; sap->sdp_ctx->pb = &sap->sdp_pb; sap->sdp_ctx->interrupt_callback = s->interrupt_callback; ret = avformat_open_input(&sap->sdp_ctx, "temp.sdp", infmt, NULL); if (ret < 0) goto fail; if (sap->sdp_ctx->ctx_flags & AVFMTCTX_NOHEADER) s->ctx_flags |= AVFMTCTX_NOHEADER; for (i = 0; i < sap->sdp_ctx->nb_streams; i++) { AVStream *st = avformat_new_stream(s, NULL); if (!st) { ret = AVERROR(ENOMEM); goto fail; } st->id = i; avcodec_copy_context(st->codec, sap->sdp_ctx->streams[i]->codec); st->time_base = sap->sdp_ctx->streams[i]->time_base; } return 0; fail: sap_read_close(s); return ret; }
static int lavf_check_file(demuxer_t *demuxer){ AVProbeData avpd; lavf_priv_t *priv; int probe_data_size = 0; int read_size = INITIAL_PROBE_SIZE; int score; if(!demuxer->priv) demuxer->priv=calloc(sizeof(lavf_priv_t),1); priv= demuxer->priv; init_avformat(); if (opt_format) { if (strcmp(opt_format, "help") == 0) { list_formats(); return 0; } priv->avif= av_find_input_format(opt_format); if (!priv->avif) { mp_msg(MSGT_DEMUX,MSGL_FATAL,"Unknown lavf format %s\n", opt_format); return 0; } mp_msg(MSGT_DEMUX,MSGL_INFO,"Forced lavf %s demuxer\n", priv->avif->long_name); return DEMUXER_TYPE_LAVF; } avpd.buf = av_mallocz(FFMAX(BIO_BUFFER_SIZE, PROBE_BUF_SIZE) + FF_INPUT_BUFFER_PADDING_SIZE); do { read_size = stream_read(demuxer->stream, avpd.buf + probe_data_size, read_size); if(read_size < 0) { av_free(avpd.buf); return 0; } probe_data_size += read_size; avpd.filename= demuxer->stream->url; if (!avpd.filename) { mp_msg(MSGT_DEMUX, MSGL_WARN, "Stream url is not set!\n"); avpd.filename = ""; } if (!strncmp(avpd.filename, "ffmpeg://", 9)) avpd.filename += 9; avpd.buf_size= probe_data_size; score = 0; priv->avif= av_probe_input_format2(&avpd, probe_data_size > 0, &score); read_size = FFMIN(2*read_size, PROBE_BUF_SIZE - probe_data_size); } while ((demuxer->desc->type != DEMUXER_TYPE_LAVF_PREFERRED || probe_data_size < SMALL_MAX_PROBE_SIZE) && score <= AVPROBE_SCORE_MAX / 4 && read_size > 0 && probe_data_size < PROBE_BUF_SIZE); av_free(avpd.buf); if(!priv->avif){ mp_msg(MSGT_HEADER,MSGL_V,"LAVF_check: no clue about this gibberish!\n"); return 0; }else mp_msg(MSGT_HEADER,MSGL_V,"LAVF_check: %s\n", priv->avif->long_name); return DEMUXER_TYPE_LAVF; }
static GF_Err FFD_ConnectService(GF_InputService *plug, GF_ClientService *serv, const char *url) { GF_Err e; s64 last_aud_pts; u32 i; s32 res; Bool is_local; const char *sOpt; char *ext, szName[1024]; FFDemux *ffd = plug->priv; AVInputFormat *av_in = NULL; char szExt[20]; if (ffd->ctx) return GF_SERVICE_ERROR; assert( url && strlen(url) < 1024); strcpy(szName, url); ext = strrchr(szName, '#'); ffd->service_type = 0; e = GF_NOT_SUPPORTED; ffd->service = serv; if (ext) { if (!stricmp(&ext[1], "video")) ffd->service_type = 1; else if (!stricmp(&ext[1], "audio")) ffd->service_type = 2; ext[0] = 0; } /*some extensions not supported by ffmpeg, overload input format*/ ext = strrchr(szName, '.'); strcpy(szExt, ext ? ext+1 : ""); strlwr(szExt); if (!strcmp(szExt, "cmp")) av_in = av_find_input_format("m4v"); is_local = (strnicmp(url, "file://", 7) && strstr(url, "://")) ? 0 : 1; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] opening file %s - local %d - av_in %08x\n", url, is_local, av_in)); if (!is_local) { AVProbeData pd; /*setup wraper for FFMPEG I/O*/ ffd->buffer_size = 8192; sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "FFMPEG", "IOBufferSize"); if (sOpt) ffd->buffer_size = atoi(sOpt); ffd->buffer = gf_malloc(sizeof(char)*ffd->buffer_size); #ifdef FFMPEG_DUMP_REMOTE ffd->outdbg = gf_f64_open("ffdeb.raw", "wb"); #endif #ifdef USE_PRE_0_7 init_put_byte(&ffd->io, ffd->buffer, ffd->buffer_size, 0, ffd, ff_url_read, NULL, NULL); ffd->io.is_streamed = 1; #else ffd->io.seekable = 1; #endif ffd->dnload = gf_service_download_new(ffd->service, url, GF_NETIO_SESSION_NOT_THREADED | GF_NETIO_SESSION_NOT_CACHED, NULL, ffd); if (!ffd->dnload) return GF_URL_ERROR; while (1) { u32 read; e = gf_dm_sess_fetch_data(ffd->dnload, ffd->buffer + ffd->buffer_used, ffd->buffer_size - ffd->buffer_used, &read); if (e==GF_EOS) break; /*we're sync!!*/ if (e==GF_IP_NETWORK_EMPTY) continue; if (e) goto err_exit; ffd->buffer_used += read; if (ffd->buffer_used == ffd->buffer_size) break; } if (e==GF_EOS) { const char *cache_file = gf_dm_sess_get_cache_name(ffd->dnload); res = open_file(&ffd->ctx, cache_file, av_in); } else { pd.filename = szName; pd.buf_size = ffd->buffer_used; pd.buf = (u8 *) ffd->buffer; av_in = av_probe_input_format(&pd, 1); if (!av_in) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] error probing file %s - probe start with %c %c %c %c\n", url, ffd->buffer[0], ffd->buffer[1], ffd->buffer[2], ffd->buffer[3])); return GF_NOT_SUPPORTED; } /*setup downloader*/ av_in->flags |= AVFMT_NOFILE; #ifdef USE_AVFORMAT_OPEN_INPUT /*commit ffmpeg 603b8bc2a109978c8499b06d2556f1433306eca7*/ res = avformat_open_input(&ffd->ctx, szName, av_in, NULL); #else res = av_open_input_stream(&ffd->ctx, &ffd->io, szName, av_in, NULL); #endif } } else { res = open_file(&ffd->ctx, szName, av_in); } switch (res) { #ifndef _WIN32_WCE case 0: e = GF_OK; break; case AVERROR_IO: e = GF_URL_ERROR; goto err_exit; case AVERROR_INVALIDDATA: e = GF_NON_COMPLIANT_BITSTREAM; goto err_exit; case AVERROR_NOMEM: e = GF_OUT_OF_MEM; goto err_exit; case AVERROR_NOFMT: e = GF_NOT_SUPPORTED; goto err_exit; #endif default: e = GF_SERVICE_ERROR; goto err_exit; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] looking for streams in %s - %d streams - type %s\n", ffd->ctx->filename, ffd->ctx->nb_streams, ffd->ctx->iformat->name)); res = av_find_stream_info(ffd->ctx); if (res <0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] cannot locate streams - error %d\n", res)); e = GF_NOT_SUPPORTED; goto err_exit; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] file %s opened - %d streams\n", url, ffd->ctx->nb_streams)); /*figure out if we can use codecs or not*/ ffd->audio_st = ffd->video_st = -1; for (i = 0; i < ffd->ctx->nb_streams; i++) { AVCodecContext *enc = ffd->ctx->streams[i]->codec; switch(enc->codec_type) { case AVMEDIA_TYPE_AUDIO: if ((ffd->audio_st<0) && (ffd->service_type!=1)) { ffd->audio_st = i; ffd->audio_tscale = ffd->ctx->streams[i]->time_base; } break; case AVMEDIA_TYPE_VIDEO: if ((ffd->video_st<0) && (ffd->service_type!=2)) { ffd->video_st = i; ffd->video_tscale = ffd->ctx->streams[i]->time_base; } break; default: break; } } if ((ffd->service_type==1) && (ffd->video_st<0)) goto err_exit; if ((ffd->service_type==2) && (ffd->audio_st<0)) goto err_exit; if ((ffd->video_st<0) && (ffd->audio_st<0)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] No supported streams in file\n")); goto err_exit; } sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "FFMPEG", "DataBufferMS"); ffd->data_buffer_ms = 0; if (sOpt) ffd->data_buffer_ms = atoi(sOpt); if (!ffd->data_buffer_ms) ffd->data_buffer_ms = FFD_DATA_BUFFER; /*build seek*/ if (is_local) { /*check we do have increasing pts. If not we can't rely on pts, we must skip SL we assume video pts is always present*/ if (ffd->audio_st>=0) { last_aud_pts = 0; for (i=0; i<20; i++) { AVPacket pkt; pkt.stream_index = -1; if (av_read_frame(ffd->ctx, &pkt) <0) break; if (pkt.pts == AV_NOPTS_VALUE) pkt.pts = pkt.dts; if (pkt.stream_index==ffd->audio_st) last_aud_pts = pkt.pts; } if (last_aud_pts*ffd->audio_tscale.den<10*ffd->audio_tscale.num) ffd->unreliable_audio_timing = 1; } ffd->seekable = (av_seek_frame(ffd->ctx, -1, 0, AVSEEK_FLAG_BACKWARD)<0) ? 0 : 1; if (!ffd->seekable) { #ifndef FF_API_CLOSE_INPUT_FILE av_close_input_file(ffd->ctx); #else avformat_close_input(&ffd->ctx); #endif ffd->ctx = NULL; open_file(&ffd->ctx, szName, av_in); av_find_stream_info(ffd->ctx); } } /*let's go*/ gf_service_connect_ack(serv, NULL, GF_OK); /*if (!ffd->service_type)*/ FFD_SetupObjects(ffd); ffd->service_type = 0; return GF_OK; err_exit: GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] Error opening file %s: %s\n", url, gf_error_to_string(e))); #ifndef FF_API_CLOSE_INPUT_FILE if (ffd->ctx) av_close_input_file(ffd->ctx); #else if (ffd->ctx) avformat_close_input(&ffd->ctx); #endif ffd->ctx = NULL; gf_service_connect_ack(serv, NULL, e); return GF_OK; }
int main (int argc, char **argv) { int ret = 0, got_frame; if (argc != 3 && argc != 4) { fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n" "API example program to show how to read frames from an input file.\n" "This program reads frames from a file, decodes them, and writes decoded\n" "video frames to a rawvideo file named video_output_file, and decoded\n" "audio frames to a rawaudio file named audio_output_file.\n\n" "If the -refcount option is specified, the program use the\n" "reference counting frame system which allows keeping a copy of\n" "the data for longer than one decode call.\n" "\n", argv[0]); exit(1); } // if (argc == 4 && !strcmp(argv[1], "-refcount")) { // refcount = 1; // argv++; // } src_filename = argv[1]; video_dst_filename = argv[2]; /* register all formats and codecs */ avdevice_register_all(); av_register_all(); // const char* format_name = "avfoundation"; AVInputFormat* input_format = av_find_input_format("avfoundation"); printf("input_format: %p\n", input_format); // printf("input_format: %s", input_format->long_name); AVDictionary* open_options = NULL; av_dict_set(&open_options, "pixel_format", "uyvy422", 0); av_dict_set(&open_options, "framerate", "30.000030", 0); av_dict_set(&open_options, "video_size", "1280x720", 0); /* open input file, and allocate format context */ // if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) { if (avformat_open_input(&fmt_ctx, src_filename, input_format, &open_options) < 0) { fprintf(stderr, "Could not open source file %s\n", src_filename); exit(1); } printf("fmt_ctx: %p\n", fmt_ctx); video_stream = fmt_ctx->streams[0]; printf("video_stream: %p\n", video_stream); video_dec_ctx = video_stream->codec; printf("video_dec_ctx: %p\n", video_dec_ctx); /* allocate image where the decoded image will be put */ width = video_dec_ctx->width; height = video_dec_ctx->height; pix_fmt = video_dec_ctx->pix_fmt; printf("width: %d\n", width); printf("height: %d\n", height); printf("pix_fmt: %d\n", pix_fmt); /* retrieve stream information */ if (avformat_find_stream_info(fmt_ctx, NULL) < 0) { fprintf(stderr, "Could not find stream information\n"); exit(1); } if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) { video_stream = fmt_ctx->streams[video_stream_idx]; video_dec_ctx = video_stream->codec; video_dst_file = fopen(video_dst_filename, "wb"); if (!video_dst_file) { fprintf(stderr, "Could not open destination file %s\n", video_dst_filename); ret = 1; goto end; } /* allocate image where the decoded image will be put */ width = video_dec_ctx->width; height = video_dec_ctx->height; pix_fmt = video_dec_ctx->pix_fmt; printf("width: %d\n", width); printf("height: %d\n", height); printf("pix_fmt: %d\n", pix_fmt); ret = av_image_alloc(video_dst_data, video_dst_linesize, width, height, pix_fmt, 1); if (ret < 0) { fprintf(stderr, "Could not allocate raw video buffer\n"); goto end; } video_dst_bufsize = ret; } /* dump input information to stderr */ av_dump_format(fmt_ctx, 0, src_filename, 0); if (!video_stream) { fprintf(stderr, "Could not find video stream in the input, aborting\n"); ret = 1; goto end; } frame = av_frame_alloc(); if (!frame) { fprintf(stderr, "Could not allocate frame\n"); ret = AVERROR(ENOMEM); goto end; } /* initialize packet, set data to NULL, let the demuxer fill it */ av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; if (video_stream) printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename); /* read frames from the file */ int frame_index = 0; while (av_read_frame(fmt_ctx, &pkt) >= 0) { AVPacket orig_pkt = pkt; do { ret = decode_packet(&got_frame, 0); if (ret < 0) break; pkt.data += ret; pkt.size -= ret; } while (pkt.size > 0); av_free_packet(&orig_pkt); frame_index++; if (frame_index > 5) { break; } } /* flush cached frames */ pkt.data = NULL; pkt.size = 0; do { decode_packet(&got_frame, 1); } while (got_frame); printf("Demuxing succeeded.\n"); if (video_stream) { printf("Play the output video file with the command:\n" "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n", av_get_pix_fmt_name(pix_fmt), width, height, video_dst_filename); } end: avcodec_close(video_dec_ctx); avformat_close_input(&fmt_ctx); if (video_dst_file) fclose(video_dst_file); av_frame_free(&frame); av_free(video_dst_data[0]); return ret < 0; }
int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, const char *filename, void *logctx, unsigned int offset, unsigned int max_probe_size) { AVProbeData pd = { filename ? filename : "" }; uint8_t *buf = NULL; int ret = 0, probe_size, buf_offset = 0; int score = 0; int ret2; if (!max_probe_size) max_probe_size = PROBE_BUF_MAX; else if (max_probe_size < PROBE_BUF_MIN) { av_log(logctx, AV_LOG_ERROR, "Specified probe size value %u cannot be < %u\n", max_probe_size, PROBE_BUF_MIN); return AVERROR(EINVAL); } if (offset >= max_probe_size) return AVERROR(EINVAL); if (pb->av_class) { uint8_t *mime_type_opt = NULL; char *semi; av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type_opt); pd.mime_type = (const char *)mime_type_opt; semi = pd.mime_type ? strchr(pd.mime_type, ';') : NULL; if (semi) { *semi = '\0'; } } #if 0 if (!*fmt && pb->av_class && av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) { if (!av_strcasecmp(mime_type, "audio/aacp")) { *fmt = av_find_input_format("aac"); } av_freep(&mime_type); } #endif for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt; probe_size = FFMIN(probe_size << 1, FFMAX(max_probe_size, probe_size + 1))) { score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0; /* Read probe data. */ if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0) goto fail; if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) { /* Fail if error was not end of file, otherwise, lower score. */ if (ret != AVERROR_EOF) goto fail; score = 0; ret = 0; /* error was end of file, nothing read */ } buf_offset += ret; if (buf_offset < offset) continue; pd.buf_size = buf_offset - offset; pd.buf = &buf[offset]; memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE); /* Guess file format. */ *fmt = av_probe_input_format2(&pd, 1, &score); if (*fmt) { /* This can only be true in the last iteration. */ if (score <= AVPROBE_SCORE_RETRY) { av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, " "misdetection possible!\n", (*fmt)->name, score); } else av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score); #if 0 FILE *f = fopen("probestat.tmp", "ab"); fprintf(f, "probe_size:%d format:%s score:%d filename:%s\n", probe_size, (*fmt)->name, score, filename); fclose(f); #endif } } if (!*fmt) ret = AVERROR_INVALIDDATA; fail: /* Rewind. Reuse probe buffer to avoid seeking. */ ret2 = ffio_rewind_with_probe_data(pb, &buf, buf_offset); if (ret >= 0) ret = ret2; av_freep(&pd.mime_type); return ret < 0 ? ret : score; }
int main(int argc, char* argv[]) { avcodec_register_all(); av_register_all(); AVFormatContext *pFormatCtx = NULL; AVCodecContext *pCodecCtx; AVCodec *pCodec; AVFrame *pFrame, *pFrameRGB; AVPicture pict; struct SwsContext *img_convert_ctx; uint8_t *buffer; int videoStream = -1; int numBytes; fprintf(stdout, "%s\n", argv[1]); AVInputFormat *fileFormat; fileFormat = av_find_input_format("ass"); if (avformat_open_input(&pFormatCtx, argv[1], fileFormat, NULL) != 0) { fprintf(stderr, "av_open_input_file \n"); return -1; } if (avformat_find_stream_info(pFormatCtx, NULL) < 0) { fprintf(stderr, "av_find_stream_info \n"); return -1; } av_dump_format(pFormatCtx, 0, argv[1], 0); int i; for (i = 0; i < pFormatCtx->nb_streams; ++i) { if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { videoStream = i; break ; } } if (videoStream == -1) { fprintf(stderr, "Unsupported videoStream.\n"); return -1; } fprintf(stdout, "videoStream: %d\n", videoStream); pCodecCtx = pFormatCtx->streams[videoStream]->codec; pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL) { fprintf(stderr, "Unsupported codec.\n"); return -1; } char *charenc = av_malloc(10);//"GBK"; memset(charenc, 0, 10); memcpy(charenc, "GBK", strlen("GBK")); printf("charenc :%p\n", charenc); pCodecCtx->sub_charenc = charenc;//"UTF-16LE"; printf("pCodecCtx->sub_charenc :%p\n", pCodecCtx->sub_charenc); if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { fprintf(stderr, "Could not open codec.\n"); return -1; } int index; int ret = 0; AVPacket packet; av_init_packet(&packet); AVSubtitle sub; int len1, gotSubtitle; for (index = 0; ; index++) { //memset(&packet, 0, sizeof (packet)); ret = av_read_frame(pFormatCtx, &packet); if (ret < 0) { fprintf(stderr, "read frame ret:%s\n", ret_str(ret)); break ; } fprintf(stdout, "stream_index:%d\n", packet.stream_index); if (packet.stream_index == videoStream) { ret = avcodec_decode_subtitle2(pCodecCtx, &sub, &gotSubtitle, &packet); if (index > 30) { break ; } fprintf(stdout, "gotSubtitle:%d\n", gotSubtitle); dumpSubtitle(&sub); } av_free_packet(&packet); } end: fprintf(stderr, "end return.\n"); avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); return 0; }
bool FFmpegDecoder::open(const std::string & filename, FFmpegParameters* parameters) { try { // Open video file AVFormatContext * p_format_context = 0; if (filename.compare(0, 5, "/dev/")==0) { avdevice_register_all(); OSG_NOTICE<<"Attempting to stream "<<filename<<std::endl; AVFormatParameters formatParams; memset(&formatParams, 0, sizeof(AVFormatParameters)); AVInputFormat *iformat; formatParams.channel = 0; formatParams.standard = 0; #if 1 formatParams.width = 320; formatParams.height = 240; #else formatParams.width = 640; formatParams.height = 480; #endif formatParams.time_base.num = 1; formatParams.time_base.den = 30; std::string format = "video4linux2"; iformat = av_find_input_format(format.c_str()); if (iformat) { OSG_NOTICE<<"Found input format: "<<format<<std::endl; } else { OSG_NOTICE<<"Failed to find input format: "<<format<<std::endl; } int error = av_open_input_file(&p_format_context, filename.c_str(), iformat, 0, &formatParams); if (error != 0) { std::string error_str; switch (error) { //case AVERROR_UNKNOWN: error_str = "AVERROR_UNKNOWN"; break; // same value as AVERROR_INVALIDDATA case AVERROR_IO: error_str = "AVERROR_IO"; break; case AVERROR_NUMEXPECTED: error_str = "AVERROR_NUMEXPECTED"; break; case AVERROR_INVALIDDATA: error_str = "AVERROR_INVALIDDATA"; break; case AVERROR_NOMEM: error_str = "AVERROR_NOMEM"; break; case AVERROR_NOFMT: error_str = "AVERROR_NOFMT"; break; case AVERROR_NOTSUPP: error_str = "AVERROR_NOTSUPP"; break; case AVERROR_NOENT: error_str = "AVERROR_NOENT"; break; case AVERROR_PATCHWELCOME: error_str = "AVERROR_PATCHWELCOME"; break; default: error_str = "Unknown error"; break; } throw std::runtime_error("av_open_input_file() failed : " + error_str); } } else { AVInputFormat* av_format = (parameters ? parameters->getFormat() : 0); AVFormatParameters* av_params = (parameters ? parameters->getFormatParameter() : 0); if (av_open_input_file(&p_format_context, filename.c_str(), av_format, 0, av_params) !=0 ) throw std::runtime_error("av_open_input_file() failed"); } m_format_context.reset(p_format_context); // Retrieve stream info if (av_find_stream_info(p_format_context) < 0) throw std::runtime_error("av_find_stream_info() failed"); m_duration = double(m_format_context->duration) / AV_TIME_BASE; m_start = double(m_format_context->start_time) / AV_TIME_BASE; // TODO move this elsewhere m_clocks.reset(m_start); // Dump info to stderr dump_format(p_format_context, 0, filename.c_str(), false); // Find and open the first video and audio streams (note that audio stream is optional and only opened if possible) findVideoStream(); findAudioStream(); m_video_decoder.open(m_video_stream); try { m_audio_decoder.open(m_audio_stream); } catch (const std::runtime_error & error) { OSG_WARN << "FFmpegImageStream::open audio failed, audio stream will be disabled: " << error.what() << std::endl; } } catch (const std::runtime_error & error) { OSG_WARN << "FFmpegImageStream::open : " << error.what() << std::endl; return false; } return true; }
int grabber::init() { pFormatCtx = avformat_alloc_context(); #if USE_DSHOW //Use dshow // //Need to Install screen-capture-recorder //screen-capture-recorder //Website: http://sourceforge.net/projects/screencapturer/ //AVDictionary* options = NULL; //av_dict_set(&options,"rtbufsize","1500M",0); // AVInputFormat *ifmt=av_find_input_format("dshow"); if(avformat_open_input(&pFormatCtx,"video=screen-capture-recorder",ifmt,NULL)!=0){ printf("Couldn't open input stream.\n"); return -1; } #else //Use gdigrab AVDictionary* options = NULL; //Set some options //grabbing frame rate //av_dict_set(&options,"framerate","5",0); //The distance from the left edge of the screen or desktop //av_dict_set(&options,"offset_x","20",0); //The distance from the top edge of the screen or desktop //av_dict_set(&options,"offset_y","40",0); //Video frame size. The default is to capture the full screen //av_dict_set(&options,"video_size","640x480",0); AVInputFormat *ifmt=av_find_input_format("gdigrab"); if(avformat_open_input(&pFormatCtx,"desktop",ifmt,&options)!=0){ printf("Couldn't open input stream.\n"); return -1; } #endif if(avformat_find_stream_info(pFormatCtx,NULL)<0) { printf("Couldn't find stream information.\n"); return -1; } int i = 0; for(; i<pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) { videoindex=i; break; } } if(videoindex==-1) { printf("Didn't find a video stream.\n"); return -1; } pCodecCtx=pFormatCtx->streams[videoindex]->codec; pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) { printf("Codec not found.\n"); return -1; } if(avcodec_open2(pCodecCtx, pCodec,NULL)<0) { printf("Could not open codec.\n"); return -1; } { for (int i=0;i<10;i++) { AVFrame* frame = av_frame_alloc(); frame->format = pCodecCtx->pix_fmt; frame->width = pCodecCtx->width; frame->height = pCodecCtx->height; av_image_alloc(frame->data, frame->linesize, frame->width, frame->height, (AVPixelFormat)frame->format, 32); free_fifo.push(frame); } } img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, /*pCodecCtx->width, pCodecCtx->height*/1280, 720, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); pEnc = new encoder(/*pCodecCtx->width, pCodecCtx->height*/1280, 720, m_user_data, m_push_data); if (-1 == pEnc->init()) { printf("Could not open encoder.\n"); return -1; } pEnc->startLoop(); return 0 ; }
av_session_t *av_init_session() { av_session_t *_retu = malloc(sizeof(av_session_t)); /* Initialize our mutex */ pthread_mutex_init ( &_retu->_mutex, NULL ); _retu->_messenger = tox_new(1); if ( !_retu->_messenger ) { fprintf ( stderr, "tox_new() failed!\n" ); return NULL; } _retu->_friends = NULL; const ALchar *_device_list = alcGetString(NULL, ALC_CAPTURE_DEVICE_SPECIFIER); int i = 0; const ALchar *device_names[20]; if ( _device_list ) { INFO("\nAvailable Capture Devices are:"); while (*_device_list ) { device_names[i] = _device_list; INFO("%d) %s", i, device_names[i]); _device_list += strlen( _device_list ) + 1; ++i; } } INFO("Enter capture device number"); char dev[2]; char *left; char *warned_ = fgets(dev, 2, stdin); (void)warned_; long selection = strtol(dev, &left, 10); if ( *left ) { printf("'%s' is not a number!", dev); fflush(stdout); exit(EXIT_FAILURE); } else { INFO("Selected: %d ( %s )", selection, device_names[selection]); } _retu->audio_capture_device = (struct ALCdevice *)alcCaptureOpenDevice( device_names[selection], AUDIO_SAMPLE_RATE, AL_FORMAT_MONO16, AUDIO_FRAME_SIZE * 4); if (alcGetError((ALCdevice *)_retu->audio_capture_device) != AL_NO_ERROR) { printf("Could not start capture device! %d\n", alcGetError((ALCdevice *)_retu->audio_capture_device)); return 0; } uint16_t height = 0, width = 0; #ifdef TOX_FFMPEG avdevice_register_all(); avcodec_register_all(); av_register_all(); _retu->video_input_format = av_find_input_format(VIDEO_DRIVER); if (avformat_open_input(&_retu->video_format_ctx, DEFAULT_WEBCAM, _retu->video_input_format, NULL) != 0) { fprintf(stderr, "Opening video_input_format failed!\n"); //return -1; goto failed_init_ffmpeg; } avformat_find_stream_info(_retu->video_format_ctx, NULL); av_dump_format(_retu->video_format_ctx, 0, DEFAULT_WEBCAM, 0); for (i = 0; i < _retu->video_format_ctx->nb_streams; ++i) { if (_retu->video_format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { _retu->video_stream = i; break; } } _retu->webcam_decoder_ctx = _retu->video_format_ctx->streams[_retu->video_stream]->codec; _retu->webcam_decoder = avcodec_find_decoder(_retu->webcam_decoder_ctx->codec_id); if (_retu->webcam_decoder == NULL) { fprintf(stderr, "Unsupported codec!\n"); //return -1; goto failed_init_ffmpeg; } if (_retu->webcam_decoder_ctx == NULL) { fprintf(stderr, "Init webcam_decoder_ctx failed!\n"); //return -1; goto failed_init_ffmpeg; } if (avcodec_open2(_retu->webcam_decoder_ctx, _retu->webcam_decoder, NULL) < 0) { fprintf(stderr, "Opening webcam decoder failed!\n"); //return -1; goto failed_init_ffmpeg; } width = _retu->webcam_decoder_ctx->width; height = _retu->webcam_decoder_ctx->height; failed_init_ffmpeg: ; #endif uint8_t _byte_address[TOX_FRIEND_ADDRESS_SIZE]; tox_get_address(_retu->_messenger, _byte_address ); fraddr_to_str( _byte_address, _retu->_my_public_id ); _retu->av = toxav_new(_retu->_messenger, width, height); /* ------------------ */ toxav_register_callstate_callback(callback_call_started, av_OnStart, _retu->av); toxav_register_callstate_callback(callback_call_canceled, av_OnCancel, _retu->av); toxav_register_callstate_callback(callback_call_rejected, av_OnReject, _retu->av); toxav_register_callstate_callback(callback_call_ended, av_OnEnd, _retu->av); toxav_register_callstate_callback(callback_recv_invite, av_OnInvite, _retu->av); toxav_register_callstate_callback(callback_recv_ringing, av_OnRinging, _retu->av); toxav_register_callstate_callback(callback_recv_starting, av_OnStarting, _retu->av); toxav_register_callstate_callback(callback_recv_ending, av_OnEnding, _retu->av); toxav_register_callstate_callback(callback_recv_error, av_OnError, _retu->av); toxav_register_callstate_callback(callback_requ_timeout, av_OnRequestTimeout, _retu->av); /* ------------------ */ return _retu; }
int ff_load_image(uint8_t *data[4], int linesize[4], int *w, int *h, enum AVPixelFormat *pix_fmt, const char *filename, void *log_ctx) { AVInputFormat *iformat = NULL; AVFormatContext *format_ctx = NULL; AVCodec *codec; AVCodecContext *codec_ctx; AVFrame *frame; int frame_decoded, ret = 0; AVPacket pkt; av_init_packet(&pkt); av_register_all(); iformat = av_find_input_format("image2"); if ((ret = avformat_open_input(&format_ctx, filename, iformat, NULL)) < 0) { av_log(log_ctx, AV_LOG_ERROR, "Failed to open input file '%s'\n", filename); return ret; } codec_ctx = format_ctx->streams[0]->codec; codec = avcodec_find_decoder(codec_ctx->codec_id); if (!codec) { av_log(log_ctx, AV_LOG_ERROR, "Failed to find codec\n"); ret = AVERROR(EINVAL); goto end; } if ((ret = avcodec_open2(codec_ctx, codec, NULL)) < 0) { av_log(log_ctx, AV_LOG_ERROR, "Failed to open codec\n"); goto end; } if (!(frame = avcodec_alloc_frame()) ) { av_log(log_ctx, AV_LOG_ERROR, "Failed to alloc frame\n"); ret = AVERROR(ENOMEM); goto end; } ret = av_read_frame(format_ctx, &pkt); if (ret < 0) { av_log(log_ctx, AV_LOG_ERROR, "Failed to read frame from file\n"); goto end; } ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt); if (ret < 0 || !frame_decoded) { av_log(log_ctx, AV_LOG_ERROR, "Failed to decode image from file\n"); goto end; } ret = 0; *w = frame->width; *h = frame->height; *pix_fmt = frame->format; if ((ret = av_image_alloc(data, linesize, *w, *h, *pix_fmt, 16)) < 0) goto end; ret = 0; av_image_copy(data, linesize, (const uint8_t **)frame->data, frame->linesize, *pix_fmt, *w, *h); end: av_free_packet(&pkt); avcodec_close(codec_ctx); avformat_close_input(&format_ctx); av_freep(&frame); if (ret < 0) av_log(log_ctx, AV_LOG_ERROR, "Error loading image file '%s'\n", filename); return ret; }
u32 adecOpen(AudioDecoder* data) { AudioDecoder& adec = *data; adec.adecCb = &Emu.GetCPU().AddThread(CPU_THREAD_PPU); u32 adec_id = cellAdec.GetNewId(data); adec.id = adec_id; adec.adecCb->SetName("Audio Decoder[" + std::to_string(adec_id) + "] Callback"); thread t("Audio Decoder[" + std::to_string(adec_id) + "] Thread", [&]() { ConLog.Write("Audio Decoder enter()"); AdecTask& task = adec.task; while (true) { if (Emu.IsStopped()) { break; } if (adec.job.IsEmpty() && adec.is_running) { Sleep(1); continue; } /*if (adec.frames.GetCount() >= 50) { Sleep(1); continue; }*/ if (!adec.job.Pop(task)) { break; } switch (task.type) { case adecStartSeq: { // TODO: reset data ConLog.Warning("adecStartSeq:"); adec.reader.addr = 0; adec.reader.size = 0; adec.reader.init = false; if (adec.reader.rem) free(adec.reader.rem); adec.reader.rem = nullptr; adec.reader.rem_size = 0; adec.is_running = true; adec.just_started = true; } break; case adecEndSeq: { // TODO: finalize ConLog.Warning("adecEndSeq:"); /*Callback cb; cb.SetAddr(adec.cbFunc); cb.Handle(adec.id, CELL_ADEC_MSG_TYPE_SEQDONE, CELL_OK, adec.cbArg); cb.Branch(true); // ???*/ adec.adecCb->ExecAsCallback(adec.cbFunc, true, adec.id, CELL_ADEC_MSG_TYPE_SEQDONE, CELL_OK, adec.cbArg); avcodec_close(adec.ctx); avformat_close_input(&adec.fmt); adec.is_running = false; } break; case adecDecodeAu: { int err = 0; adec.reader.addr = task.au.addr; adec.reader.size = task.au.size; //ConLog.Write("Audio AU: size = 0x%x, pts = 0x%llx", task.au.size, task.au.pts); //if (adec.last_pts > task.au.pts || adec.just_started) adec.last_pts = task.au.pts; if (adec.just_started) adec.last_pts = task.au.pts; struct AVPacketHolder : AVPacket { AVPacketHolder(u32 size) { av_init_packet(this); if (size) { data = (u8*)av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE); this->size = size + FF_INPUT_BUFFER_PADDING_SIZE; } else { data = NULL; size = 0; } } ~AVPacketHolder() { av_free(data); //av_free_packet(this); } } au(0); /*{ wxFile dump; dump.Open(wxString::Format("audio pts-0x%llx.dump", task.au.pts), wxFile::write); u8* buf = (u8*)malloc(task.au.size); if (Memory.CopyToReal(buf, task.au.addr, task.au.size)) dump.Write(buf, task.au.size); free(buf); dump.Close(); }*/ if (adec.just_started) // deferred initialization { err = avformat_open_input(&adec.fmt, NULL, av_find_input_format("oma"), NULL); if (err) { ConLog.Error("adecDecodeAu: avformat_open_input() failed"); Emu.Pause(); break; } AVCodec* codec = avcodec_find_decoder(AV_CODEC_ID_ATRAC3P); // ??? if (!codec) { ConLog.Error("adecDecodeAu: avcodec_find_decoder() failed"); Emu.Pause(); break; } /*err = avformat_find_stream_info(adec.fmt, NULL); if (err) { ConLog.Error("adecDecodeAu: avformat_find_stream_info() failed"); Emu.Pause(); break; } if (!adec.fmt->nb_streams) { ConLog.Error("adecDecodeAu: no stream found"); Emu.Pause(); break; }*/ if (!avformat_new_stream(adec.fmt, codec)) { ConLog.Error("adecDecodeAu: avformat_new_stream() failed"); Emu.Pause(); break; } adec.ctx = adec.fmt->streams[0]->codec; // TODO: check data AVDictionary* opts = nullptr; av_dict_set(&opts, "refcounted_frames", "1", 0); { SMutexGeneralLocker lock(g_mutex_avcodec_open2); // not multithread-safe err = avcodec_open2(adec.ctx, codec, &opts); } if (err) { ConLog.Error("adecDecodeAu: avcodec_open2() failed"); Emu.Pause(); break; } adec.just_started = false; } bool last_frame = false; while (true) { if (Emu.IsStopped()) { ConLog.Warning("adecDecodeAu aborted"); return; } /*if (!adec.ctx) // fake { AdecFrame frame; frame.pts = task.au.pts; frame.auAddr = task.au.addr; frame.auSize = task.au.size; frame.userdata = task.au.userdata; frame.size = 4096; frame.data = nullptr; adec.frames.Push(frame); adec.adecCb->ExecAsCallback(adec.cbFunc, false, adec.id, CELL_ADEC_MSG_TYPE_PCMOUT, CELL_OK, adec.cbArg); break; }*/ last_frame = av_read_frame(adec.fmt, &au) < 0; if (last_frame) { //break; av_free(au.data); au.data = NULL; au.size = 0; } struct AdecFrameHolder : AdecFrame { AdecFrameHolder() { data = av_frame_alloc(); } ~AdecFrameHolder() { if (data) { av_frame_unref(data); av_frame_free(&data); } } } frame; if (!frame.data) { ConLog.Error("adecDecodeAu: av_frame_alloc() failed"); Emu.Pause(); break; } int got_frame = 0; int decode = avcodec_decode_audio4(adec.ctx, frame.data, &got_frame, &au); if (decode <= 0) { if (!last_frame && decode < 0) { ConLog.Error("adecDecodeAu: AU decoding error(0x%x)", decode); } if (!got_frame && adec.reader.size == 0) break; } if (got_frame) { frame.pts = adec.last_pts; adec.last_pts += ((u64)frame.data->nb_samples) * 90000 / 48000; // ??? frame.auAddr = task.au.addr; frame.auSize = task.au.size; frame.userdata = task.au.userdata; frame.size = frame.data->nb_samples * frame.data->channels * sizeof(float); if (frame.data->format != AV_SAMPLE_FMT_FLTP) { ConLog.Error("adecDecodeaAu: unsupported frame format(%d)", frame.data->format); Emu.Pause(); break; } if (frame.data->channels != 2) { ConLog.Error("adecDecodeAu: unsupported channel count (%d)", frame.data->channels); Emu.Pause(); break; } //ConLog.Write("got audio frame (pts=0x%llx, nb_samples=%d, ch=%d, sample_rate=%d, nbps=%d)", //frame.pts, frame.data->nb_samples, frame.data->channels, frame.data->sample_rate, //av_get_bytes_per_sample((AVSampleFormat)frame.data->format)); adec.frames.Push(frame); frame.data = nullptr; // to prevent destruction /*Callback cb; cb.SetAddr(adec.cbFunc); cb.Handle(adec.id, CELL_ADEC_MSG_TYPE_PCMOUT, CELL_OK, adec.cbArg); cb.Branch(false);*/ adec.adecCb->ExecAsCallback(adec.cbFunc, false, adec.id, CELL_ADEC_MSG_TYPE_PCMOUT, CELL_OK, adec.cbArg); } } /*Callback cb; cb.SetAddr(adec.cbFunc); cb.Handle(adec.id, CELL_ADEC_MSG_TYPE_AUDONE, task.au.auInfo_addr, adec.cbArg); cb.Branch(false);*/ adec.adecCb->ExecAsCallback(adec.cbFunc, false, adec.id, CELL_ADEC_MSG_TYPE_AUDONE, task.au.auInfo_addr, adec.cbArg); } break; case adecClose: { adec.is_finished = true; ConLog.Write("Audio Decoder exit"); return; } default: ConLog.Error("Audio Decoder error: unknown task(%d)", task.type); } } adec.is_finished = true; ConLog.Warning("Audio Decoder aborted"); }); t.detach(); return adec_id; }
// open video capture device void VideoFFmpeg::openCam (char *file, short camIdx) { // open camera source AVInputFormat *inputFormat; AVDictionary *formatParams = NULL; char filename[28], rateStr[20]; #ifdef WIN32 // video capture on windows only through Video For Windows driver inputFormat = av_find_input_format("vfwcap"); if (!inputFormat) // Video For Windows not supported?? return; sprintf(filename, "%d", camIdx); #else // In Linux we support two types of devices: VideoForLinux and DV1394. // the user specify it with the filename: // [<device_type>][:<standard>] // <device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394. By default 'v4l' // <standard> : 'pal', 'secam' or 'ntsc'. By default 'ntsc' // The driver name is constructed automatically from the device type: // v4l : /dev/video<camIdx> // dv1394: /dev/dv1394/<camIdx> // If you have different driver name, you can specify the driver name explicitly // instead of device type. Examples of valid filename: // /dev/v4l/video0:pal // /dev/ieee1394/1:ntsc // dv1394:secam // v4l:pal char *p; if (file && strstr(file, "1394") != NULL) { // the user specifies a driver, check if it is v4l or d41394 inputFormat = av_find_input_format("dv1394"); sprintf(filename, "/dev/dv1394/%d", camIdx); } else { const char *formats[] = {"video4linux2,v4l2", "video4linux2", "video4linux"}; int i, formatsCount = sizeof(formats) / sizeof(char*); for (i = 0; i < formatsCount; i++) { inputFormat = av_find_input_format(formats[i]); if (inputFormat) break; } sprintf(filename, "/dev/video%d", camIdx); } if (!inputFormat) // these format should be supported, check ffmpeg compilation return; if (file && strncmp(file, "/dev", 4) == 0) { // user does not specify a driver strncpy(filename, file, sizeof(filename)); filename[sizeof(filename)-1] = 0; if ((p = strchr(filename, ':')) != 0) *p = 0; } if (file && (p = strchr(file, ':')) != NULL) { av_dict_set(&formatParams, "standard", p+1, 0); } #endif //frame rate if (m_captRate <= 0.f) m_captRate = defFrameRate; sprintf(rateStr, "%f", m_captRate); av_dict_set(&formatParams, "framerate", rateStr, 0); if (m_captWidth > 0 && m_captHeight > 0) { char video_size[64]; BLI_snprintf(video_size, sizeof(video_size), "%dx%d", m_captWidth, m_captHeight); av_dict_set(&formatParams, "video_size", video_size, 0); } if (openStream(filename, inputFormat, &formatParams) != 0) return; // for video capture it is important to do non blocking read m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK; // open base class VideoBase::openCam(file, camIdx); // check if we should do multi-threading? if (BLI_system_thread_count() > 1) { // no need to thread if the system has a single core m_isThreaded = true; } av_dict_free(&formatParams); }
VideoDecoder::VideoDecoder(s32 type, u32 profile, u32 addr, u32 size, vm::ptr<CellVdecCbMsg> func, u32 arg) : type(type) , profile(profile) , memAddr(addr) , memSize(size) , memBias(0) , cbFunc(func) , cbArg(arg) , is_finished(false) , is_closed(false) , just_started(false) , just_finished(false) , frc_set(0) , codec(nullptr) , input_format(nullptr) , ctx(nullptr) { av_register_all(); avcodec_register_all(); switch (type) { case CELL_VDEC_CODEC_TYPE_MPEG2: { codec = avcodec_find_decoder(AV_CODEC_ID_MPEG2VIDEO); input_format = av_find_input_format("mpeg"); break; } case CELL_VDEC_CODEC_TYPE_AVC: { codec = avcodec_find_decoder(AV_CODEC_ID_H264); input_format = av_find_input_format("mpeg"); break; } case CELL_VDEC_CODEC_TYPE_DIVX: { codec = avcodec_find_decoder(AV_CODEC_ID_MPEG4); input_format = av_find_input_format("mpeg"); break; } default: { throw EXCEPTION("Unknown type (0x%x)", type); } } if (!codec) { throw EXCEPTION("avcodec_find_decoder() failed"); } if (!input_format) { throw EXCEPTION("av_find_input_format() failed"); } fmt = avformat_alloc_context(); if (!fmt) { throw EXCEPTION("avformat_alloc_context() failed"); } io_buf = (u8*)av_malloc(4096); fmt->pb = avio_alloc_context(io_buf, 4096, 0, this, vdecRead, NULL, NULL); if (!fmt->pb) { throw EXCEPTION("avio_alloc_context() failed"); } }
static int avformat_ProbeDemux( vlc_object_t *p_this, AVInputFormat **pp_fmt, const char *psz_url ) { demux_t *p_demux = (demux_t*)p_this; AVProbeData pd = { 0 }; const uint8_t *peek; /* Init Probe data */ pd.buf_size = vlc_stream_Peek( p_demux->s, &peek, 2048 + 213 ); if( pd.buf_size <= 0 ) { msg_Warn( p_demux, "cannot peek" ); return VLC_EGENERIC; } pd.buf = malloc( pd.buf_size + AVPROBE_PADDING_SIZE ); if( unlikely(pd.buf == NULL) ) return VLC_ENOMEM; memcpy( pd.buf, peek, pd.buf_size ); memset( pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE ); if( psz_url != NULL ) msg_Dbg( p_demux, "trying url: %s", psz_url ); pd.filename = psz_url; vlc_init_avformat(p_this); /* Guess format */ char *psz_format = var_InheritString( p_this, "avformat-format" ); if( psz_format ) { if( (*pp_fmt = av_find_input_format(psz_format)) ) msg_Dbg( p_demux, "forcing format: %s", (*pp_fmt)->name ); free( psz_format ); } if( *pp_fmt == NULL ) *pp_fmt = av_probe_input_format( &pd, 1 ); free( pd.buf ); if( *pp_fmt == NULL ) { msg_Dbg( p_demux, "couldn't guess format" ); return VLC_EGENERIC; } if( !p_demux->obj.force ) { static const char ppsz_blacklist[][16] = { /* Don't handle MPEG unless forced */ "mpeg", "vcd", "vob", "mpegts", /* libavformat's redirector won't work */ "redir", "sdp", /* Don't handle subtitles format */ "ass", "srt", "microdvd", /* No timestamps at all */ "hevc", "h264", "" }; for( int i = 0; *ppsz_blacklist[i]; i++ ) { if( !strcmp( (*pp_fmt)->name, ppsz_blacklist[i] ) ) return VLC_EGENERIC; } } /* Don't trigger false alarms on bin files */ if( !p_demux->obj.force && !strcmp( (*pp_fmt)->name, "psxstr" ) ) { int i_len; if( !p_demux->psz_filepath ) return VLC_EGENERIC; i_len = strlen( p_demux->psz_filepath ); if( i_len < 4 ) return VLC_EGENERIC; if( strcasecmp( &p_demux->psz_filepath[i_len - 4], ".str" ) && strcasecmp( &p_demux->psz_filepath[i_len - 4], ".xai" ) && strcasecmp( &p_demux->psz_filepath[i_len - 3], ".xa" ) ) { return VLC_EGENERIC; } } msg_Dbg( p_demux, "detected format: %s", (*pp_fmt)->name ); return VLC_SUCCESS; }