Exemplo n.º 1
0
static int index_rebuild_ffmpeg(struct anim * anim, 
				IMB_Timecode_Type tcs_in_use,
				IMB_Proxy_Size proxy_sizes_in_use,
				int quality,
				short *stop, short *do_update, 
				float *progress)
{
	int i, videoStream;
	unsigned long long seek_pos = 0;
	unsigned long long last_seek_pos = 0;
	unsigned long long seek_pos_dts = 0;
	unsigned long long seek_pos_pts = 0;
	unsigned long long last_seek_pos_dts = 0;
	unsigned long long start_pts = 0;
	double frame_rate;
	double pts_time_base;
	int frameno = 0;
	int start_pts_set = FALSE;

	AVFormatContext *iFormatCtx;
	AVCodecContext *iCodecCtx;
	AVCodec *iCodec;
	AVStream *iStream;
	AVFrame* in_frame = 0;
	AVPacket next_packet;
	int streamcount;

	struct proxy_output_ctx * proxy_ctx[IMB_PROXY_MAX_SLOT];
	anim_index_builder * indexer [IMB_TC_MAX_SLOT];

	int num_proxy_sizes = IMB_PROXY_MAX_SLOT;
	int num_indexers = IMB_TC_MAX_SLOT;
	uint64_t stream_size;

	memset(proxy_ctx, 0, sizeof(proxy_ctx));
	memset(indexer, 0, sizeof(indexer));

	if(av_open_input_file(&iFormatCtx, anim->name, NULL, 0, NULL) != 0) {
		return 0;
	}

	if (av_find_stream_info(iFormatCtx) < 0) {
		av_close_input_file(iFormatCtx);
		return 0;
	}

	streamcount = anim->streamindex;

	/* Find the video stream */
	videoStream = -1;
	for (i = 0; i < iFormatCtx->nb_streams; i++)
		if(iFormatCtx->streams[i]->codec->codec_type
		   == AVMEDIA_TYPE_VIDEO) {
			if (streamcount > 0) {
				streamcount--;
				continue;
			}
			videoStream = i;
			break;
		}

	if (videoStream == -1) {
		av_close_input_file(iFormatCtx);
		return 0;
	}

	iStream = iFormatCtx->streams[videoStream];
	iCodecCtx = iStream->codec;

	iCodec = avcodec_find_decoder(iCodecCtx->codec_id);
	
	if (iCodec == NULL) {
		av_close_input_file(iFormatCtx);
		return 0;
	}

	iCodecCtx->workaround_bugs = 1;

	if (avcodec_open(iCodecCtx, iCodec) < 0) {
		av_close_input_file(iFormatCtx);
		return 0;
	}

	in_frame = avcodec_alloc_frame();

	stream_size = avio_size(iFormatCtx->pb);

	for (i = 0; i < num_proxy_sizes; i++) {
		if (proxy_sizes_in_use & proxy_sizes[i]) {
			proxy_ctx[i] = alloc_proxy_output_ffmpeg(
				anim, iStream, proxy_sizes[i],
				iCodecCtx->width * proxy_fac[i],
				iCodecCtx->height * proxy_fac[i],
				quality);
			if (!proxy_ctx[i]) {
				proxy_sizes_in_use &= ~proxy_sizes[i];
			}
		}
	}

	for (i = 0; i < num_indexers; i++) {
		if (tcs_in_use & tc_types[i]) {
			char fname[FILE_MAXDIR+FILE_MAXFILE];

			get_tc_filename(anim, tc_types[i], fname);

			indexer[i] = IMB_index_builder_create(fname);
			if (!indexer[i]) {
				tcs_in_use &= ~tc_types[i];
			}
		}
	}

	frame_rate = av_q2d(iStream->r_frame_rate);
	pts_time_base = av_q2d(iStream->time_base);

	while(av_read_frame(iFormatCtx, &next_packet) >= 0) {
		int frame_finished = 0;
		float next_progress =  ((int)floor(((double) next_packet.pos) * 100 /
		                                   ((double) stream_size)+0.5)) / 100;

		if (*progress != next_progress) {
			*progress = next_progress;
			*do_update = 1;
		}

		if (*stop) {
			av_free_packet(&next_packet);
			break;
		}

		if (next_packet.stream_index == videoStream) {
			if (next_packet.flags & AV_PKT_FLAG_KEY) {
				last_seek_pos = seek_pos;
				last_seek_pos_dts = seek_pos_dts;
				seek_pos = next_packet.pos;
				seek_pos_dts = next_packet.dts;
				seek_pos_pts = next_packet.pts;
			}

			avcodec_decode_video2(
				iCodecCtx, in_frame, &frame_finished, 
				&next_packet);
		}

		if (frame_finished) {
			unsigned long long s_pos = seek_pos;
			unsigned long long s_dts = seek_pos_dts;
			unsigned long long pts 
				= av_get_pts_from_frame(iFormatCtx, in_frame);

			for (i = 0; i < num_proxy_sizes; i++) {
				add_to_proxy_output_ffmpeg(
					proxy_ctx[i], in_frame);
			}

			if (!start_pts_set) {
				start_pts = pts;
				start_pts_set = TRUE;
			}

			frameno = (pts - start_pts) 
				* pts_time_base * frame_rate; 

			/* decoding starts *always* on I-Frames,
			   so: P-Frames won't work, even if all the
			   information is in place, when we seek
			   to the I-Frame presented *after* the P-Frame,
			   but located before the P-Frame within
			   the stream */

			if (pts < seek_pos_pts) {
				s_pos = last_seek_pos;
				s_dts = last_seek_pos_dts;
			}

			for (i = 0; i < num_indexers; i++) {
				if (tcs_in_use & tc_types[i]) {
					IMB_index_builder_proc_frame(
						indexer[i], 
						next_packet.data, 
						next_packet.size,
						frameno, s_pos,	s_dts, pts);
				}
			}
		}
		av_free_packet(&next_packet);
	}

	for (i = 0; i < num_indexers; i++) {
		if (tcs_in_use & tc_types[i]) {
			IMB_index_builder_finish(indexer[i], *stop);
		}
	}

	for (i = 0; i < num_proxy_sizes; i++) {
		if (proxy_sizes_in_use & proxy_sizes[i]) {
			free_proxy_output_ffmpeg(proxy_ctx[i], *stop);
		}
	}

	av_free(in_frame);

	return 1;
}
int main(int argc, char *argv[]) {

    char *filename = NULL;
    //char *filename_suffix = NULL;
    char *inputsource = NULL;
    char *outputDB = NULL;

    //Find the last / in passed filename.
    if (strrchr(argv[1],'/') == NULL) {
        if (strcmp(argv[1],"-") == 0) {
            inputsource = "/dev/stdin";
            if (argv[2] == NULL) {
                printf("Please input a name for the movie!\n");
                return -1;
            } else {
                if (strrchr(argv[2],'/') == NULL) {
                    filename = argv[2];
                } else {
                    filename = strrchr(argv[2],'/') + 1;
                }

                if (argv[3] != NULL && argc == 4) {
                    outputDB = argv[3];
                } else {
                    outputDB = "/home/gsc/videoaudiofingerprint.db";
                }

            }
        } else {
            filename = argv[1];
            inputsource = argv[1];

            if (argv[2] != NULL && argc == 3) {
                outputDB = argv[2];
            } else {
                outputDB = "/home/gsc/videoaudiofingerprint.db";
            }
        }
    } else {
        filename = strrchr(argv[1],'/') + 1;
        inputsource = argv[1];

        if (argv[3] != NULL && argc == 4) {
            outputDB = argv[3];
        } else {
            outputDB = "/home/gsc/videoaudiofingerprint.db";
        }
    }

    printf("Filename = %s Input source = %s DB output = %s argc = %d\n",filename,inputsource,outputDB, argc);

    /*** DB initialization ***/
    int retval = 0;

    // Create a handle for database connection, create a pointer to sqlite3
    sqlite3 *handle;

    //Full array init of size 5h@60fps (a.k.a large enough)
    //TO FIX: use dynamic array?
    int *fullArray = (int*) calloc ( (1080000-1), sizeof (int));

    // Create the database. If it doesnt exist, it would be created
    // pass a pointer to the pointer to sqlite3, in short sqlite3**

    retval = sqlite3_open(outputDB,&handle);
    // If connection failed, handle returns NULL
    if(retval) {
        printf("Database connection failed\n");
        return -1;
    }

    char query1[] = "create table allmovies (allmovieskey INTEGER PRIMARY KEY,name TEXT,fps INTEGER, date INTEGER);";
    // Execute the query for creating the table
    retval = sqlite3_exec(handle,query1,0,0,0);
    char query2[] = "PRAGMA count_changes = OFF";
    retval = sqlite3_exec(handle,query2,0,0,0);
    char query3[] = "PRAGMA synchronous = OFF";
    retval = sqlite3_exec(handle,query3,0,0,0);

    //Hashluma table
    char query_hash[] = "create table hashluma (avg_range int, movies TEXT)";
    retval = sqlite3_exec(handle,query_hash,0,0,0);

    if (!retval) {
        //Populating the hash tables
        printf("Populating hashluma table\n");
        char hashquery[50];
        memset(hashquery, 0, 50);
        int i = 0;
        for(i=0; i <= 254; i++) {
            sprintf(hashquery, "insert into hashluma (avg_range) values (%d)", i);
            retval = sqlite3_exec(handle,hashquery,0,0,0);
        }
    }

    char table_query[150];
    memset(table_query, 0, 150);
    sprintf(table_query,"create table '%s' (s_end FLOAT, luma INTEGER);",filename);

    int repeated = 0;

    retval = sqlite3_exec(handle,table_query,0,0,0);
    if (retval) {
        char error [100];
        memset(error, 0, 100);
        sprintf(error,"Table for movie %s already exists! Skipping fingerprinting ... \n",filename);
        printf("%s",error);
        //Decide which is the best policy, not FP? overwrite? new file?
        repeated = 1;
        sqlite3_close(handle);
        return 0;
    }
    /*** DB init finished ***/

    printf("Analyzing video %s\n",filename);

    av_register_all();

    AVFormatContext *pFormatCtx;

    // Open video file
    if(av_open_input_file(&pFormatCtx, inputsource, NULL, 0, NULL)!=0) {
        printf("Could't open file %s\n", argv[1]);
        return -1; // Couldn't open file
    }

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0) {
        printf("Could't find stream information\n");
        return -1; // Couldn't find stream information
    }

    // Dump information about file onto standard error
    dump_format(pFormatCtx, 0, filename, 0);

    int i;
    AVCodecContext *pVideoCodecCtx;
    AVCodecContext *pAudioCodecCtx;

    // Find the first video stream
    int videoStream=-1;
    int audioStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++) {
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO && videoStream==-1)
            videoStream=i;

        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO && audioStream==-1)
            audioStream=i;

    }

    if(videoStream==-1 || audioStream==-1)
        return -1; // Didn't find both streams

    // Get a pointer to the codec context for the video stream
    pVideoCodecCtx=pFormatCtx->streams[videoStream]->codec;
    // Similar, for audio stream
    pAudioCodecCtx=pFormatCtx->streams[audioStream]->codec;

    AVCodec *pVideoCodec;
    AVCodec *pAudioCodec;

    // Find the decoder for the streams
    pVideoCodec=avcodec_find_decoder(pVideoCodecCtx->codec_id);
    pAudioCodec=avcodec_find_decoder(pAudioCodecCtx->codec_id);
    if(pVideoCodec==NULL) {
        fprintf(stderr, "Unsupported video codec!\n");
        sqlite3_close(handle);
        return -1; // Codec not found
    }
    if(pAudioCodec==NULL) {
        fprintf(stderr, "Unsupported audio codec!\n");
        sqlite3_close(handle);
        return -1; // Codec not found
    }

    // Open codecs
    if(avcodec_open(pVideoCodecCtx, pVideoCodec)<0) {
        sqlite3_close(handle);
        return -1; // Could not open codec
    }
    if(avcodec_open(pAudioCodecCtx, pAudioCodec)<0) {
        sqlite3_close(handle);
        return -1; // Could not open codec
    }

    AVFrame *pVideoFrame;
    AVFrame *pVideoFrameYUV;
    AVFrame *pAudioFrame;

    int samples = 0;

    // Allocate audio/video frame
    pVideoFrame=avcodec_alloc_frame();
    pVideoFrameYUV=avcodec_alloc_frame();
    pAudioFrame=avcodec_alloc_frame();

    if(pVideoFrameYUV==NULL || pVideoFrame==NULL || pAudioFrame==NULL) {
        sqlite3_close(handle);
        return -1;
    }

    uint8_t *videoBuffer;
    int16_t *audioBuffer;

    int numVideoBytes;
    int numAudioBytes;
    // Determine required buffer size and allocate buffer
    numVideoBytes=avpicture_get_size(PIX_FMT_YUV420P, pVideoCodecCtx->width, pVideoCodecCtx->height);
    videoBuffer=(uint8_t *)av_mallocz(numVideoBytes*sizeof(uint8_t));
    numAudioBytes = AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE;
    audioBuffer=(int16_t *)av_mallocz(numAudioBytes);

    // Assign appropriate parts of videoBuffer to image planes in pVideoFrameYUV
    // Note that pVideoFrameYUV is an AVFrame, but AVFrame is a superset of AVPicture
    avpicture_fill((AVPicture *)pVideoFrameYUV, videoBuffer, PIX_FMT_YUV420P, pVideoCodecCtx->width, pVideoCodecCtx->height);

    int frameFinished = 0;
    AVPacket packet;
    av_init_packet(&packet);
    struct SwsContext * sws_context;
    double fps = 0.0;

    struct timeval tv;
    gettimeofday(&tv, NULL);

    char allmovies_query[150];
    memset(allmovies_query, 0, 150);
    fps = (double)pFormatCtx->streams[videoStream]->r_frame_rate.num/(double)pFormatCtx->streams[videoStream]->r_frame_rate.den;
    //if (repeated) {
    //  filename_suffix = (int)tv.tv_sec;
    //  sprintf(filename, "%s_%d", filename, filename_suffix);
    //  sprintf(allmovies_query, "insert into allmovies (name,fps,date) values ('%s',%d,%d);", filename, (int)(fps*100), filename_suffix);
    //} else {
    sprintf(allmovies_query, "insert into allmovies (name,fps,date) values ('%s',%d,%d);", filename, (int)(fps*100), (int)tv.tv_sec);
    //}
    retval = sqlite3_exec(handle,allmovies_query,0,0,0);

    printf("%d %d\n",pAudioCodecCtx->sample_rate,pAudioCodecCtx->channels);

    i = 0;
    unsigned int offset = 0; // bytes
    //fftw_complex *in;
    int totalSamples = 0;
    //in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);
    int counter = 0;
    float audioTime = 0.0;

    while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Decode video
        if(packet.stream_index==videoStream) {
            // Decode video frame
            avcodec_decode_video2(pVideoCodecCtx, pVideoFrame, &frameFinished, &packet);

            // Did we get a video frame?
            if(frameFinished) {
                if (pVideoCodecCtx->pix_fmt != PIX_FMT_YUV420P) {
                    // Convert the image from its native format to YUV (PIX_FMT_YUV420P)
                    //img_convert((AVPicture *)pVideoFrameYUV, PIX_FMT_YUV420P, (AVPicture*)pVideoFrame, pVideoCodecCtx->pix_fmt, pVideoCodecCtx->width, pVideoCodecCtx->height);
                    sws_context = sws_getContext(pVideoCodecCtx->width, pVideoCodecCtx->height, pVideoCodecCtx->pix_fmt, pVideoCodecCtx->width, pVideoCodecCtx->height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);

                    sws_scale(sws_context, pVideoFrame->data, pVideoFrame->linesize, 0, pVideoCodecCtx->height, pVideoFrameYUV->data, pVideoFrameYUV->linesize);
                    sws_freeContext(sws_context);

                    retval = AvgFrameImport(pVideoFrameYUV, pVideoCodecCtx->width, pVideoCodecCtx->height, i++, filename, handle, fps, fullArray);
                } else {
                    retval = AvgFrameImport(pVideoFrame, pVideoCodecCtx->width, pVideoCodecCtx->height, i++, filename, handle, fps, fullArray);
                }
            }
        }
        // Decode audio
        // http://qtdvd.com/guides/ffmpeg.html#decode
        if (packet.stream_index == audioStream) {

            offset = 0;
            int frameSize;
            int length = 0;
            memset(audioBuffer, 0, sizeof(audioBuffer));
            while (packet.size > 0) {
                //memset(audioBuffer, 0, sizeof(audioBuffer));
                frameSize = numAudioBytes;

                //Copy decoded information into audioBuffer
                //frameSize gets set as the decoded frameSize, in bytes
                length = avcodec_decode_audio3(pAudioCodecCtx, audioBuffer, &frameSize, &packet);
                if (length <= 0) { // Error, see if we can recover.
                    packet.size--;
                    packet.data++;
                }
                else {
                    //Slide pointer to next frame and update size
                    printf("read %d bytes\n", length);
                    packet.size -= length;
                    packet.data += length;

                    //Slide frame of audiobuffer
                    memcpy((uint16_t*)(audioBuffer+offset), audioBuffer, frameSize);
                    //Update offset
                    offset += frameSize;
                }

                //Do something with audioBuffer
                //in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);
                //if (counter%2)
                //	printf("%f R: %d\n", audioTime, (int16_t)*audioBuffer);
                //else
                //	printf("%f L: %d\n", audioTime, (int16_t)*audioBuffer);
                printf("%f %d\n", audioTime, (int16_t)*audioBuffer);
                fflush(stdout);

            }


            if (offset == 0)
                samples = 0;
            else
                samples = (unsigned int)offset/sizeof(short);

            totalSamples+=samples;

            if (counter%2)
                audioTime+=samples*1.0/pAudioCodecCtx->sample_rate;

            counter++;

        }
    }

    printf("Total time (s) (per audio sample calculation): %f\n",(float)(totalSamples*1.0/pAudioCodecCtx->sample_rate/pAudioCodecCtx->channels));

    //Cut the large fullArray to the movie actual size
    int *shortArray = (int*) calloc ( i, sizeof (int));
    memcpy(shortArray, fullArray, i*sizeof(int));
    free(fullArray);

    //Do magic
    makeIndexes(shortArray, handle, filename, threshold, i, fps);

    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);

    // Free the YUV image
    av_free(videoBuffer);
    av_free(audioBuffer);
    av_free(pVideoFrameYUV);

    // Free the YUV frame
    av_free(pVideoFrame);
    av_free(pAudioFrame);

    // Close the codec
    avcodec_close(pVideoCodecCtx);
    avcodec_close(pAudioCodecCtx);

    // Close the video file
    av_close_input_file(pFormatCtx);

    // Close DB handler
    sqlite3_close(handle);

    // Free full array
    free(shortArray);

    return 0;
}
Exemplo n.º 3
0
void D2V::index() {
    AVPacket packet;
    av_init_packet(&packet);

    while (av_read_frame(f->fctx, &packet) == 0) {
        if (stop_processing) {
            stop_processing = false;
            result = ProcessingCancelled;
            fclose(d2v_file);
            closeAudioFiles(audio_files, f->fctx);
            return;
        }

        // Apparently we might receive packets from streams with AVDISCARD_ALL set,
        // and also from streams discovered late, probably.
        if (packet.stream_index != video_stream->index &&
            !audio_files.count(packet.stream_index)) {
            av_free_packet(&packet);
            continue;
        }

        bool okay = true;

        if (packet.stream_index == video_stream->index)
            okay = handleVideoPacket(&packet);
        else
            okay = handleAudioPacket(&packet);

        if (!okay) {
            av_free_packet(&packet);
            result = ProcessingError;
            fclose(d2v_file);
            closeAudioFiles(audio_files, f->fctx);
            return;
        }

        av_free_packet(&packet);
    }

    if (!isDataLineNull()) {
        reorderDataLineFlags();
        lines.push_back(line);
        clearDataLine();
    }


    if (!lines.size()) {
        result = ProcessingFinished;
        fclose(d2v_file);
        closeAudioFiles(audio_files, f->fctx);
        return;
    }


    if (!printHeader()) {
        result = ProcessingError;
        fclose(d2v_file);
        closeAudioFiles(audio_files, f->fctx);
        return;
    }

    if (!printSettings()) {
        result = ProcessingError;
        fclose(d2v_file);
        closeAudioFiles(audio_files, f->fctx);
        return;
    }

    for (size_t i = 0; i < lines.size(); i++) {
        if (stop_processing) {
            stop_processing = false;
            result = ProcessingCancelled;
            fclose(d2v_file);
            closeAudioFiles(audio_files, f->fctx);
            return;
        }

        if (!printDataLine(lines[i])) {
            result = ProcessingError;
            fclose(d2v_file);
            closeAudioFiles(audio_files, f->fctx);
            return;
        }
    }

    if (!printStreamEnd()) {
        result = ProcessingError;
        fclose(d2v_file);
        closeAudioFiles(audio_files, f->fctx);
        return;
    }

    result = ProcessingFinished;
    fclose(d2v_file);
    closeAudioFiles(audio_files, f->fctx);
}
Exemplo n.º 4
0
mfxStatus FFmpeg_Reader_ReadNextFrame(mfxBitstream *pBS)
{
    MSDK_CHECK_POINTER(pBS, MFX_ERR_NULL_PTR);

    AVPacket packet;
    bool videoFrameFound = false;

    // Read until video frame is found or no more packets (audio or video) in container.
    while(!videoFrameFound)
    {
        if(!av_read_frame(g_pFormatCtx, &packet))
        {
            if(packet.stream_index == g_videoStreamIdx)
            {
                if(g_videoType == MFX_CODEC_AVC)
                {
                    //
                    // Apply MP4 to H264 Annex B filter on buffer
                    //
                    uint8_t *pOutBuf;
                    int outBufSize;
                    int isKeyFrame = packet.flags & AV_PKT_FLAG_KEY;
                    av_bitstream_filter_filter(g_pBsfc, g_pFormatCtx->streams[g_videoStreamIdx]->codec, NULL, &pOutBuf, &outBufSize, packet.data, packet.size, isKeyFrame);

                    // Current approach leads to a duplicate SPS and PPS....., does not seem to be an issue!

                    //
                    // Copy filtered buffer to bitstream
                    //
                    memmove(pBS->Data, pBS->Data + pBS->DataOffset, pBS->DataLength);
                    pBS->DataOffset = 0;
                    memcpy(pBS->Data + pBS->DataLength, pOutBuf, outBufSize);
                    pBS->DataLength += outBufSize; 

                    av_free(pOutBuf);
                }
                else  // MPEG2
                {
                    memmove(pBS->Data, pBS->Data + pBS->DataOffset, pBS->DataLength);
                    pBS->DataOffset = 0;
                    memcpy(pBS->Data + pBS->DataLength, packet.data, packet.size);
                    pBS->DataLength += packet.size; 
                }

                // We are required to tell MSDK that complete frame is in the bitstream!
                pBS->DataFlag = MFX_BITSTREAM_COMPLETE_FRAME;

                // Save PTS timestamp in stream and in PTS window vector
                //   - DTS is discarded since it's not important)
                pBS->TimeStamp = packet.pts;
                g_ptsStack.push_back(pBS->TimeStamp);

                videoFrameFound = true;
            }
#ifdef PROCESS_AUDIO
            else if(packet.stream_index == g_audioStreamIdx)
            {
                // Write the unmodified compressed frame in the media file
                //   - Since this function is called during retrieval of video stream header during which the 
                //     muxer context (g_pFormatCtxMux) is not available any audio frame preceeding the first video frame will be dropped.
                //     (this limitation should be addressed in future code revision...)
                if(g_pFormatCtxMux) {
                    // Rescale audio time base (likely not needed...)
                    packet.pts			= av_rescale_q(packet.pts, g_audio_dec_time_base, g_pAudioStreamMux->time_base);
                    packet.dts			= packet.pts;
                    packet.stream_index	= g_audioStreamMuxIdx;

                    //double realPTS = av_q2d(g_pAudioStreamMux->time_base) * packet.pts;
                    //printf("PTS A: %6lld (%.3f), DTS: %6lld\n", packet.pts, realPTS, packet.dts);

                    // Write unmodified compressed sample to destination container
                    if (av_interleaved_write_frame(g_pFormatCtxMux, &packet)) {
                        printf("FFMPEG: Error while writing audio frame\n");
                        return MFX_ERR_UNKNOWN;
                    }
                }
            }
#endif

            // Free the packet that was allocated by av_read_frame
            av_free_packet(&packet);
        }
        else
        {
            return MFX_ERR_MORE_DATA;  // Indicates that we reached end of container and to stop video decode
        }
    }

    return MFX_ERR_NONE;
}
Exemplo n.º 5
0
int main(int argc, char **argv){	
	struct 				tm start_time_tm;
	int outputPorts;

	pthread_t *audioThreads;
	pthread_attr_t custom_sched_attr;	
	int fifo_max_prio = 0;
	int fifo_min_prio = 0;
	int fifo_mid_prio = 0;	
	struct sched_param fifo_param;

	syncbuffer = 0;
	normalbuffer = 0;

	if(argc < 3){
		printf("./<audio_decoder> udp://[IP]:[PORT] [ptsDelay] [Amount of channel] [Channel 0] [Channel n]\n");
		return 0;
	}

	if(argc != 3){

	}

	
	ff_ctx = malloc(sizeof(ff_ctx_t));

	av_register_all();
	avformat_network_init();

	InitFF(ff_ctx, argv[1], argv[2]);
	

	if (avformat_open_input (&ff_ctx->avInputCtx, ff_ctx->udp_address, NULL , &ff_ctx->avDic) != 0) {
		printf ("Cloud not open UDP input stream at %s\n", ff_ctx->udp_address);
		return -1;
	}

	if (avformat_find_stream_info(ff_ctx->avInputCtx, NULL) < 0) {
		printf ("Cloud not get stream info\n");
		return -1;
	}

	if (ff_ctx->audioIndexStream = av_find_best_stream(ff_ctx->avInputCtx, AVMEDIA_TYPE_AUDIO, -1, -1, &ff_ctx->avCodec, 0) < 0) {
		printf ("No audio streams found\n");
		return -1;
	}

	printf ("Audio stream found at %d\n", ff_ctx->audioIndexStream);

	ff_ctx->avDicentry = av_dict_get(ff_ctx->avInputCtx->metadata, "service_name", NULL, 0);

	if(ff_ctx->avDicentry != NULL){
		strptime( ff_ctx->avDicentry->value, "%Y-%m-%d %H:%M:%S", &start_time_tm);
		start_time = mktime(&start_time_tm);
	}
	else {
		start_time = getSystemTime(NULL);
	}
	
	ff_ctx->avCodecCtx = ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->codec;
	ff_ctx->avCodec = avcodec_find_decoder(ff_ctx->avCodecCtx->codec_id);

	av_dump_format(ff_ctx->avInputCtx, 0, ff_ctx->udp_address, 0);

	if (avcodec_open2 (ff_ctx->avCodecCtx, ff_ctx->avCodec, NULL) < 0) {
		return -1;
	}

	outputPorts = ff_ctx->avCodecCtx->channels;
	InitBF(ff_ctx->avCodecCtx->channels, &to_audio_buffer, TO_AUDIO_BUFFER_SIZE);
	InitBF(ff_ctx->avCodecCtx->channels, &to_jack_buffer, TO_JACK_BUFFER_SIZE);

	//One thread for each channel
	audioThreads = malloc (sizeof(pthread_t)*outputPorts);

	pthread_attr_init(&custom_sched_attr);	
 	pthread_attr_setinheritsched(&custom_sched_attr, PTHREAD_INHERIT_SCHED /* PTHREAD_EXPLICIT_SCHED */);

 	//Options below only are applied when PTHREAD_EXPLICIT_SCHED is used!
 	pthread_attr_setscope(&custom_sched_attr, PTHREAD_SCOPE_SYSTEM );	
 	pthread_attr_setschedpolicy(&custom_sched_attr, SCHED_FIFO);	

 	fifo_max_prio = sched_get_priority_max(SCHED_FIFO);	
 	fifo_min_prio = sched_get_priority_min(SCHED_FIFO);	
 	fifo_mid_prio = (fifo_min_prio + fifo_max_prio) / 2;	
 	fifo_param.sched_priority = fifo_mid_prio;	
 	pthread_attr_setschedparam(&custom_sched_attr, &fifo_param);

 	int i;
 	threadArgs_t args[outputPorts];
 	for (i = 0; i < outputPorts; i++) {
 		args[i].channel = i;
 		args[i].process_block_size = AUDIO_PROCESS_BLOCK_SIZE;
 		if (pthread_create(&audioThreads[i], &custom_sched_attr, audioThreadFunction, &args[i])) {
 			printf ("Unable to create audio_thread %d\n", i);
 			return 0;
 		}
 	}
    
    av_init_packet(&ff_ctx->avPacket);

	static AVFrame frame;
	int frameFinished;
	int nb, ch;

	char samplebuf[30];
	av_get_sample_fmt_string (samplebuf, 30, ff_ctx->avCodecCtx->sample_fmt);
	printf ("Audio sample format is %s\n", samplebuf);

	audio_sync_sample_t **sync_samples;
	sync_samples = malloc (outputPorts*sizeof(audio_sync_sample_t*));

	long double initPTS, PTS, frame_pts_offset;
	unsigned long int frame_count, framePTS, sample_count;

	int sample_rate = ff_ctx->avCodecCtx->sample_rate;

	if (init_jack(&jackCtx, outputPorts)) {
		return 1;
	}

	while(av_read_frame (ff_ctx->avInputCtx, &ff_ctx->avPacket)>=0) {

		if(ff_ctx->avPacket.stream_index == ff_ctx->audioIndexStream ) {
			int contador = 0;
			long double time_1 = getSystemTime(NULL);

			int len = avcodec_decode_audio4 (ff_ctx->avCodecCtx, &frame, &frameFinished, &ff_ctx->avPacket);

			if (frameFinished) {
				int data_size = frame.nb_samples * av_get_bytes_per_sample(frame.format);
				int sync_size = frame.nb_samples * sizeof (audio_sync_sample_t);

				framePTS = av_frame_get_best_effort_timestamp (&frame);

				frame_count = framePTS - ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->start_time;
				frame_pts_offset = frame_count * av_q2d(ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->time_base) ;

				initPTS = start_time + frame_pts_offset + ff_ctx->ptsDelay;

#ifdef _DBG_PTS
				printf ("frame decoded PTS %lu, frame count %lu, TB %d/%d, PTS %Lf\n", framePTS, frame_count, ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->time_base.num, ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->time_base.den, initPTS);
#endif

				//Build sync info data, sample timing
				for (ch = 0; ch < ff_ctx->avCodecCtx->channels; ch++) {
					sync_samples[ch] =  malloc(sync_size);

					PTS = initPTS;

					for (sample_count = 0; sample_count < frame.nb_samples; sample_count++) {
						PTS += (1/(float) sample_rate);
						sync_samples[ch][sample_count].samplePTS = PTS;
					}
				}

#ifdef _DBG_PTS
				printf ("ended samples PTS %Lf\n", PTS);
#endif
				for (ch = 0; ch < ff_ctx->avCodecCtx->channels; ch++) {
					ProduceSyncToBuffer (&to_audio_buffer, ch, (uint8_t*) sync_samples[ch], sync_size);
					ProduceAudioToBuffer(&to_audio_buffer, ch, (uint8_t*) frame.extended_data[ch], data_size);

					free(sync_samples[ch]);
				}
			}

	       	long double time_2 = getSystemTime(NULL);
	       	adaptativeSleep( (1/READ_INPUT_FRAME_RATE) - (time_2 - time_1));
		}
	}
}
static void FFMpegUtils_print(JNIEnv *env, jobject obj, jint pAVFormatContext) {
	int i;
	AVCodecContext *pCodecCtx;
	AVFrame *pFrame;
	AVCodec *pCodec;
	AVFormatContext *pFormatCtx = (AVFormatContext *) pAVFormatContext;
	struct SwsContext *img_convert_ctx;

	LOGD("playing");

	// Find the first video stream
	int videoStream = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
			videoStream = i;
			break;
		}
	if (videoStream == -1) {
		jniThrowException(env,
						  "java/io/IOException",
						  "Didn't find a video stream");
		return;
	}

	// Get a pointer to the codec context for the video stream
	pCodecCtx = pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		jniThrowException(env,
						  "java/io/IOException",
						  "Unsupported codec!");
		return; // Codec not found
	}
	// Open codec
	if (avcodec_open(pCodecCtx, pCodec) < 0) {
		jniThrowException(env,
						  "java/io/IOException",
						  "Could not open codec");
		return; // Could not open codec
	}

	// Allocate video frame
	pFrame = avcodec_alloc_frame();

	// Allocate an AVFrame structure
	AVFrame *pFrameRGB = avcodec_alloc_frame();
	if (pFrameRGB == NULL) {
		jniThrowException(env,
						  "java/io/IOException",
						  "Could allocate an AVFrame structure");
		return;
	}

	uint8_t *buffer;
	int numBytes;
	// Determine required buffer size and allocate buffer
	numBytes = avpicture_get_size(PIX_FMT_RGB565, pCodecCtx->width,
			pCodecCtx->height);
	buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	// of AVPicture
	avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB565,
			pCodecCtx->width, pCodecCtx->height);

	int w = pCodecCtx->width;
	int h = pCodecCtx->height;
	img_convert_ctx = sws_getContext(w, h, pCodecCtx->pix_fmt, w, h,
			PIX_FMT_RGB565, SWS_BICUBIC, NULL, NULL, NULL);

	int frameFinished;
	AVPacket packet;

	i = 0;
	int result = -1;
	while ((result = av_read_frame(pFormatCtx, &packet)) >= 0) {
		// Is this a packet from the video stream?
		if (packet.stream_index == videoStream) {
			// Decode video frame
			avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
					packet.data, packet.size);

			// Did we get a video frame?
			if (frameFinished) {
				// Convert the image from its native format to RGB
				sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0,
						pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);

				//FFMpegUtils_saveFrame(pFrameRGB, pCodecCtx->width,
				//		pCodecCtx->height, i);
				FFMpegUtils_handleOnVideoFrame(env, obj, pFrame, pCodecCtx->width,
						pCodecCtx->height);
				i++;
			}
		}

		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);
	}

	// Free the RGB image
	av_free(buffer);
	av_free(pFrameRGB);

	// Free the YUV frame
	av_free(pFrame);

	// Close the codec
	avcodec_close(pCodecCtx);

	// Close the video file
	av_close_input_file(pFormatCtx);

	LOGD("end of playing");
}
Exemplo n.º 7
0
u32 vdecOpen(VideoDecoder* data)
{
	VideoDecoder& vdec = *data;

	vdec.vdecCb = &Emu.GetCPU().AddThread(CPU_THREAD_PPU);

	u32 vdec_id = cellVdec->GetNewId(data);

	vdec.id = vdec_id;

	vdec.vdecCb->SetName("Video Decoder[" + std::to_string(vdec_id) + "] Callback");

	thread t("Video Decoder[" + std::to_string(vdec_id) + "] Thread", [&]()
	{
		cellVdec->Notice("Video Decoder thread started");

		VdecTask& task = vdec.task;

		while (true)
		{
			if (Emu.IsStopped())
			{
				break;
			}

			if (!vdec.job.GetCountUnsafe() && vdec.is_running)
			{
				std::this_thread::sleep_for(std::chrono::milliseconds(1));
				continue;
			}

			if (vdec.frames.GetCount() >= 50)
			{
				std::this_thread::sleep_for(std::chrono::milliseconds(1));
				continue;
			}

			if (!vdec.job.Pop(task))
			{
				break;
			}

			switch (task.type)
			{
			case vdecStartSeq:
			{
				// TODO: reset data
				cellVdec->Warning("vdecStartSeq:");

				vdec.reader.addr = 0;
				vdec.reader.size = 0;
				vdec.is_running = true;
				vdec.just_started = true;
			}
			break;

			case vdecEndSeq:
			{
				// TODO: finalize
				cellVdec->Warning("vdecEndSeq:");

				vdec.vdecCb->ExecAsCallback(vdec.cbFunc, false, vdec.id, CELL_VDEC_MSG_TYPE_SEQDONE, CELL_OK, vdec.cbArg);
				/*Callback cb;
				cb.SetAddr(vdec.cbFunc);
				cb.Handle(vdec.id, CELL_VDEC_MSG_TYPE_SEQDONE, CELL_OK, vdec.cbArg);
				cb.Branch(true); // ???*/

				vdec.is_running = false;
				vdec.just_finished = true;
			}
			break;

			case vdecDecodeAu:
			{
				int err;

				if (task.mode != CELL_VDEC_DEC_MODE_NORMAL)
				{
					cellVdec->Error("vdecDecodeAu: unsupported decoding mode(%d)", task.mode);
					break;
				}

				vdec.reader.addr = task.addr;
				vdec.reader.size = task.size;
				//LOG_NOTICE(HLE, "Video AU: size = 0x%x, pts = 0x%llx, dts = 0x%llx", task.size, task.pts, task.dts);

				if (vdec.just_started)
				{
					vdec.first_pts = task.pts;
					vdec.last_pts = task.pts;
					vdec.first_dts = task.dts;
				}

				struct AVPacketHolder : AVPacket
				{
					AVPacketHolder(u32 size)
					{
						av_init_packet(this);

						if (size)
						{
							data = (u8*)av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
							memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
							this->size = size + FF_INPUT_BUFFER_PADDING_SIZE;
						}
						else
						{
							data = NULL;
							size = 0;
						}
					}

					~AVPacketHolder()
					{
						av_free(data);
						//av_free_packet(this);
					}

				} au(0);

				if (vdec.just_started && vdec.just_finished)
				{
					avcodec_flush_buffers(vdec.ctx);
					vdec.just_started = false;
					vdec.just_finished = false;
				}
				else if (vdec.just_started) // deferred initialization
				{
					err = avformat_open_input(&vdec.fmt, NULL, av_find_input_format("mpeg"), NULL);
					if (err)
					{
						cellVdec->Error("vdecDecodeAu: avformat_open_input() failed");
						Emu.Pause();
						break;
					}
					AVCodec* codec = avcodec_find_decoder(AV_CODEC_ID_H264); // ???
					if (!codec)
					{
						cellVdec->Error("vdecDecodeAu: avcodec_find_decoder() failed");
						Emu.Pause();
						break;
					}
					/*err = avformat_find_stream_info(vdec.fmt, NULL);
					if (err)
					{
						LOG_ERROR(HLE, "vdecDecodeAu: avformat_find_stream_info() failed");
						Emu.Pause();
						break;
					}
					if (!vdec.fmt->nb_streams)
					{
						LOG_ERROR(HLE, "vdecDecodeAu: no stream found");
						Emu.Pause();
						break;
					}*/
					if (!avformat_new_stream(vdec.fmt, codec))
					{
						cellVdec->Error("vdecDecodeAu: avformat_new_stream() failed");
						Emu.Pause();
						break;
					}
					vdec.ctx = vdec.fmt->streams[0]->codec; // TODO: check data
						
					AVDictionary* opts = nullptr;
					av_dict_set(&opts, "refcounted_frames", "1", 0);
					{
						std::lock_guard<std::mutex> lock(g_mutex_avcodec_open2);
						// not multithread-safe (???)
						err = avcodec_open2(vdec.ctx, codec, &opts);
					}
					if (err)
					{
						cellVdec->Error("vdecDecodeAu: avcodec_open2() failed");
						Emu.Pause();
						break;
					}
					vdec.just_started = false;
				}

				bool last_frame = false;

				while (true)
				{
					if (Emu.IsStopped() || vdec.job.PeekIfExist().type == vdecClose)
					{
						vdec.is_finished = true;
						cellVdec->Warning("vdecDecodeAu: aborted");
						return;
					}

					last_frame = av_read_frame(vdec.fmt, &au) < 0;
					if (last_frame)
					{
						//break;
						av_free(au.data);
						au.data = NULL;
						au.size = 0;
					}

					struct VdecFrameHolder : VdecFrame
					{
						VdecFrameHolder()
						{
							data = av_frame_alloc();
						}

						~VdecFrameHolder()
						{
							if (data)
							{
								av_frame_unref(data);
								av_frame_free(&data);
							}
						}

					} frame;

					if (!frame.data)
					{
						cellVdec->Error("vdecDecodeAu: av_frame_alloc() failed");
						Emu.Pause();
						break;
					}

					int got_picture = 0;

					int decode = avcodec_decode_video2(vdec.ctx, frame.data, &got_picture, &au);

					if (decode <= 0)
					{
						if (!last_frame && decode < 0)
						{
							cellVdec->Error("vdecDecodeAu: AU decoding error(0x%x)", decode);
						}
						if (!got_picture && vdec.reader.size == 0) break; // video end?
					}

					if (got_picture)
					{
						u64 ts = av_frame_get_best_effort_timestamp(frame.data);
						if (ts != AV_NOPTS_VALUE)
						{
							frame.pts = ts/* - vdec.first_pts*/; // ???
							vdec.last_pts = frame.pts;
						}
						else
						{
							vdec.last_pts += vdec.ctx->time_base.num * 90000 / (vdec.ctx->time_base.den / vdec.ctx->ticks_per_frame);
							frame.pts = vdec.last_pts;
						}
						//frame.pts = vdec.last_pts;
						//vdec.last_pts += 3754;
						frame.dts = (frame.pts - vdec.first_pts) + vdec.first_dts;
						frame.userdata = task.userData;

						//LOG_NOTICE(HLE, "got picture (pts=0x%llx, dts=0x%llx)", frame.pts, frame.dts);

						vdec.frames.Push(frame); // !!!!!!!!
						frame.data = nullptr; // to prevent destruction

						vdec.vdecCb->ExecAsCallback(vdec.cbFunc, false, vdec.id, CELL_VDEC_MSG_TYPE_PICOUT, CELL_OK, vdec.cbArg);
						/*Callback cb;
						cb.SetAddr(vdec.cbFunc);
						cb.Handle(vdec.id, CELL_VDEC_MSG_TYPE_PICOUT, CELL_OK, vdec.cbArg);
						cb.Branch(false);*/
					}
				}

				vdec.vdecCb->ExecAsCallback(vdec.cbFunc, false, vdec.id, CELL_VDEC_MSG_TYPE_AUDONE, CELL_OK, vdec.cbArg);
				/*Callback cb;
				cb.SetAddr(vdec.cbFunc);
				cb.Handle(vdec.id, CELL_VDEC_MSG_TYPE_AUDONE, CELL_OK, vdec.cbArg);
				cb.Branch(false);*/
			}
			break;

			case vdecClose:
			{
				vdec.is_finished = true;
				cellVdec->Notice("Video Decoder thread ended");
				return;
			}

			case vdecSetFrameRate:
			{
				cellVdec->Error("TODO: vdecSetFrameRate(%d)", task.frc);
			}
			break;

			default:
				cellVdec->Error("Video Decoder thread error: unknown task(%d)", task.type);
			}
		}

		vdec.is_finished = true;
		cellVdec->Warning("Video Decoder thread aborted");
	});

	t.detach();

	return vdec_id;
}
Exemplo n.º 8
0
/**
* @brief 
*
* @return 
*/
int LocalVideoInput::run()
{
    AVInputFormat *inputFormat = av_find_input_format( "video4linux2" );
    if ( inputFormat == NULL)
        Fatal( "Can't load input format" );

#if 0
    AVProbeData probeData;
    probeData.filename = mSource.c_str();
    probeData.buf = new unsigned char[1024];
    probeData.buf_size = 1024;
    inputFormat = av_probe_input_format( &probeData, 0 );
    if ( inputFormat == NULL)
        Fatal( "Can't probe input format" );

    AVFormatParameters formatParameters ;
    memset( &formatParameters, 0, sizeof(formatParameters) );
    formatParameters.channels = 1;
    formatParameters.channel = 0;
    formatParameters.standard = "PAL";
    formatParameters.pix_fmt = PIX_FMT_RGB24;
    //formatParameters.time_base.num = 1;
    //formatParameters.time_base.den = 10;
    formatParameters.width = 352;
    formatParameters.height = 288;
    //formatParameters.prealloced_context = 1;
#endif

    /* New API */
    AVDictionary *opts = NULL;
    av_dict_set( &opts, "standard", "PAL", 0 );
    av_dict_set( &opts, "video_size", "320x240", 0 );
    av_dict_set( &opts, "channel", "0", 0 );
    av_dict_set( &opts, "pixel_format", "rgb24", 0 );
    //av_dict_set( &opts, "framerate", "10", 0 );
    avDumpDict( opts );

    int avError = 0;
    AVFormatContext *formatContext = NULL;
    //if ( av_open_input_file( &formatContext, mSource.c_str(), inputFormat, 0, &formatParameters ) !=0 )
    if ( (avError = avformat_open_input( &formatContext, mSource.c_str(), inputFormat, &opts )) < 0 )
        Fatal( "Unable to open input %s due to: %s", mSource.c_str(), avStrError(avError) );

    avDumpDict( opts );
#if 0
    if ( av_open_input_stream( &formatContext, 0, mSource.c_str(), inputFormat, &formatParameters ) !=0 )
        Fatal( "Unable to open input %s due to: %s", mSource.c_str(), strerror(errno) );
#endif

    // Locate stream info from input
    if ( (avError = avformat_find_stream_info( formatContext, &opts )) < 0 )
        Fatal( "Unable to find stream info from %s due to: %s", mSource.c_str(), avStrError(avError) );
    
    if ( dbgLevel > DBG_INF )
        av_dump_format( formatContext, 0, mSource.c_str(), 0 );

    // Find first video stream present
    int videoStreamId = -1;
    for ( int i=0; i < formatContext->nb_streams; i++ )
    {
        if ( formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
        {
            videoStreamId = i;
            //set_context_opts( formatContext->streams[i]->codec, avcodec_opts[CODEC_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
            break;
        }
    }
    if ( videoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mSource.c_str() );
    mStream = formatContext->streams[videoStreamId];
    mCodecContext = mStream->codec;

    // Try and get the codec from the codec context
    AVCodec *codec = NULL;
    if ( (codec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mSource.c_str() );

    // Open the codec
    if ( avcodec_open2( mCodecContext, codec, &opts ) < 0 )
        Fatal( "Unable to open codec for video stream from %s", mSource.c_str() );

    //AVFrame *savedFrame = avcodec_alloc_frame();

    // Allocate space for the native video frame
    AVFrame *frame = avcodec_alloc_frame();

    // Determine required buffer size and allocate buffer
    int pictureSize = avpicture_get_size( mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height );
    
    ByteBuffer frameBuffer( pictureSize );
    
    //avpicture_fill( (AVPicture *)savedFrame, mLastFrame.mBuffer.data(), mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height);

    AVPacket packet;
    while( !mStop )
    {
        int frameComplete = false;
        while ( !frameComplete && (av_read_frame( formatContext, &packet ) >= 0) )
        {
            Debug( 5, "Got packet from stream %d", packet.stream_index );
            if ( packet.stream_index == videoStreamId )
            {
                frameComplete = false;
                if ( avcodec_decode_video2( mCodecContext, frame, &frameComplete, &packet ) < 0 )
                    Fatal( "Unable to decode frame at frame %ju", mFrameCount );

                Debug( 3, "Decoded video packet at frame %ju, pts %jd", mFrameCount, packet.pts );

                if ( frameComplete )
                {
                    Debug( 3, "Got frame %d, pts %jd (%.3f)", mCodecContext->frame_number, frame->pkt_pts, (((double)(packet.pts-mStream->start_time)*mStream->time_base.num)/mStream->time_base.den) );

                    avpicture_layout( (AVPicture *)frame, mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height, frameBuffer.data(), frameBuffer.capacity() );

                    uint64_t timestamp = packet.pts;
                    //Debug( 3, "%d: TS: %lld, TS1: %lld, TS2: %lld, TS3: %.3f", time( 0 ), timestamp, packet.pts, ((1000000LL*packet.pts*mStream->time_base.num)/mStream->time_base.den), (((double)packet.pts*mStream->time_base.num)/mStream->time_base.den) );
                    //Info( "%ld:TS: %lld, TS1: %lld, TS2: %lld, TS3: %.3f", time( 0 ), timestamp, packet.pts, ((1000000LL*packet.pts*mStream->time_base.num)/mStream->time_base.den), (((double)packet.pts*mStream->time_base.num)/mStream->time_base.den) );

                    VideoFrame *videoFrame = new VideoFrame( this, mCodecContext->frame_number, timestamp, frameBuffer );
                    distributeFrame( FramePtr( videoFrame ) );
                }
            }
            av_free_packet( &packet );
        }
        usleep( INTERFRAME_TIMEOUT );
    }
    cleanup();

    av_freep( &frame );
    if ( mCodecContext )
    {
       avcodec_close( mCodecContext );
       mCodecContext = NULL; // Freed by avformat_close_input
    }
    if ( formatContext )
    {
        avformat_close_input( &formatContext );
        formatContext = NULL;
        //av_free( formatContext );
    }
    return( !ended() );
}
Exemplo n.º 9
0
/*
 * Class:     com_example_testffmpeg_CFFmpegJni
 * Method:    IPlay
 * Signature: ()I
 */
jint Java_com_example_testffmpeg_CFFmpegJni_IPlay(JNIEnv *env, jobject thiz)
{
	/// 定义返回值
	int nRet = -1;
	/// 打开文件
	if(NULL != m_pFormatCtx)
	{
		avformat_close_input(&m_pFormatCtx);
		/// 释放数据
		av_free(m_pFormatCtx);
		m_pFormatCtx = NULL;
	}

	if(NULL == m_pFormatCtx)
	{
		/// 打开文件
		if(0 != (nRet = avformat_open_input(&m_pFormatCtx, m_szURLPath, 0, NULL/*&m_pDictOptions*/)))
		{
			char szTemp[256];
			memset(szTemp, 0x00, sizeof(szTemp));
			av_strerror(nRet, szTemp, 255);
			/// 打印错误信息
			LOGD("%s, Error Code = %d, %s, Error = %s", m_szURLPath, nRet,
					" The Error URL Or Path--------------->", szTemp);
			return nRet;
		}
	}

	// m_pFormatCtx->max_analyze_duration = 1000;
	// m_pFormatCtx->probesize = 2048;
	if(0 > avformat_find_stream_info(m_pFormatCtx, NULL))
	{
		LOGD("Couldn't find stream information.");
		return -1;
	}

	int nVideoIndex = -1;
	for(int i = 0; i < m_pFormatCtx->nb_streams; i++)
	{
		if(AVMEDIA_TYPE_VIDEO == m_pFormatCtx->streams[i]->codec->codec_type)
		{
			nVideoIndex = i;
			break;
		}
	}
	if(-1 == nVideoIndex)
	{
		LOGD("Didn't find a video stream.");
		return -1;
	}

	AVCodecContext* pCodecCtx = m_pFormatCtx->streams[nVideoIndex]->codec;
	AVCodec* pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if(NULL == pCodec)
	{
		LOGD("Codec not found.");
		return -1;
	}

	if(pCodec->capabilities & CODEC_CAP_TRUNCATED)
	{
		pCodecCtx->flags |= CODEC_FLAG_TRUNCATED;
	}

	if(0 > avcodec_open2(pCodecCtx, pCodec, NULL))
	{
		LOGD("Could not open codec.");
		return -1;
	}

	/// 声明数据帧变量
	AVFrame	*pFrame = NULL, *pFrameYUV = NULL;
	pFrame = avcodec_alloc_frame();
	pFrameYUV = avcodec_alloc_frame();
	/// 创建转换数据缓冲
	int nConvertSize = avpicture_get_size(PIX_FMT_RGB565, iWidth, iHeight);
	uint8_t* pConvertbuffer = new uint8_t[nConvertSize];
	avpicture_fill((AVPicture *)pFrameYUV, pConvertbuffer, PIX_FMT_RGB565, iWidth, iHeight);

	/// 声明解码参数
	int nCodecRet, nHasGetPicture;
	/// 声明数据帧解码数据包
	int nPackgeSize  = pCodecCtx->width * pCodecCtx->height;
	AVPacket* pAVPacket = (AVPacket *)malloc(sizeof(AVPacket));
	av_new_packet(pAVPacket, nPackgeSize);

	/// 列出输出文件的相关流信息
	av_dump_format(m_pFormatCtx, 0, m_szURLPath, 0);
	/// 设置播放状态
	m_bIsPlaying = true;

	/// 声明格式转换参数
	struct SwsContext* img_convert_ctx = NULL;
	/// 格式化像素格式为YUV
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
		iWidth, iHeight, PIX_FMT_RGB565, SWS_BICUBIC, NULL, NULL, NULL);
	/// 读取数据帧
	while(0 <= av_read_frame(m_pFormatCtx, pAVPacket) && true == m_bIsPlaying)
	{
		/// 判断是否是视频数据流
		if(nVideoIndex == pAVPacket->stream_index)
		{
			/// 解码数据包
			nCodecRet = avcodec_decode_video2(pCodecCtx, pFrame, &nHasGetPicture, pAVPacket);
			if(0 < nHasGetPicture)
			{
				/// 转换格式为YUV
				sws_scale(img_convert_ctx, (const uint8_t* const* )pFrame->data, pFrame->linesize,
						0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
				/// 回调显示数据
				e_DisplayCallBack(env, pConvertbuffer, nConvertSize);
			}
		}
		/// 释放解码包,此数据包,在 av_read_frame 调用时被创建
		av_free_packet(pAVPacket);
	}
	/// 释放个格式化信息
	sws_freeContext(img_convert_ctx);

	/// 释放转换图片缓存
	delete[] pConvertbuffer;
	pConvertbuffer = NULL;
	/// 释放数据帧对象指针
	av_free(pFrame); pFrame = NULL;
	av_free(pFrameYUV); pFrameYUV = NULL;

	/// 释放解码信息对象
	avcodec_close(pCodecCtx); pCodecCtx = NULL;
	avformat_close_input(&m_pFormatCtx);
	/// 释放数据
	av_free(m_pFormatCtx);
	m_pFormatCtx = NULL;
	return nRet;
}
Exemplo n.º 10
0
static u32 FFDemux_Run(void *par)
{
	AVPacket pkt;
	s64 seek_to;
	u64 seek_audio, seek_video;
	Bool video_init, do_seek, map_audio_time, map_video_time;
	GF_NetworkCommand com;
	GF_NetworkCommand map;
	GF_SLHeader slh;
	FFDemux *ffd = (FFDemux *) par;

	memset(&map, 0, sizeof(GF_NetworkCommand));
	map.command_type = GF_NET_CHAN_MAP_TIME;

	memset(&com, 0, sizeof(GF_NetworkCommand));
	com.command_type = GF_NET_CHAN_BUFFER_QUERY;

	memset(&slh, 0, sizeof(GF_SLHeader));

	slh.compositionTimeStampFlag = slh.decodingTimeStampFlag = 1;
	seek_to = (s64) (AV_TIME_BASE*ffd->seek_time);
	map_video_time = !ffd->seekable;

	video_init = (seek_to && ffd->video_ch) ? 0 : 1;
	seek_audio = seek_video = 0;
	if (ffd->seekable && (ffd->audio_st>=0)) seek_audio = (u64) (s64) (ffd->seek_time*ffd->audio_tscale.den);
	if (ffd->seekable && (ffd->video_st>=0)) seek_video = (u64) (s64) (ffd->seek_time*ffd->video_tscale.den);

	/*it appears that ffmpeg has trouble resyncing on some mpeg files - we trick it by restarting to 0 to get the
	first video frame, and only then seek*/
	if (ffd->seekable) av_seek_frame(ffd->ctx, -1, video_init ? seek_to : 0, AVSEEK_FLAG_BACKWARD);
	do_seek = !video_init;
	map_audio_time = video_init ? ffd->unreliable_audio_timing : 0;

	while (ffd->is_running) {

		pkt.stream_index = -1;
		/*EOF*/
		if (av_read_frame(ffd->ctx, &pkt) <0) break;
		if (pkt.pts == AV_NOPTS_VALUE) pkt.pts = pkt.dts;
		if (!pkt.dts) pkt.dts = pkt.pts;

		slh.compositionTimeStamp = pkt.pts;
		slh.decodingTimeStamp = pkt.dts;

		gf_mx_p(ffd->mx);
		/*blindly send audio as soon as video is init*/
		if (ffd->audio_ch && (pkt.stream_index == ffd->audio_st) && !do_seek) {
			slh.compositionTimeStamp *= ffd->audio_tscale.num;
			slh.decodingTimeStamp *= ffd->audio_tscale.num;

			if (map_audio_time) {
				map.base.on_channel = ffd->audio_ch;
				map.map_time.media_time = ffd->seek_time;
				/*mapwith TS=0 since we don't use SL*/
				map.map_time.timestamp = 0;
				map.map_time.reset_buffers = 1;
				map_audio_time = 0;
				gf_term_on_command(ffd->service, &map, GF_OK);
			}
			else if (slh.compositionTimeStamp < seek_audio) {
				slh.decodingTimeStamp = slh.compositionTimeStamp = seek_audio;
			}
			gf_term_on_sl_packet(ffd->service, ffd->audio_ch, pkt.data, pkt.size, &slh, GF_OK);
		}
		else if (ffd->video_ch && (pkt.stream_index == ffd->video_st)) {
			slh.compositionTimeStamp *= ffd->video_tscale.num;
			slh.decodingTimeStamp *= ffd->video_tscale.num;

			/*if we get pts = 0 after a seek the demuxer is reseting PTSs, so force map time*/
			if ((!do_seek && seek_to && !slh.compositionTimeStamp) || (map_video_time) ) {
				seek_to = 0;
				map_video_time = 0;

				map.base.on_channel = ffd->video_ch;
				map.map_time.timestamp = (u64) pkt.pts;
//				map.map_time.media_time = ffd->seek_time;
				map.map_time.media_time = 0;
				map.map_time.reset_buffers = 0;
				gf_term_on_command(ffd->service, &map, GF_OK);
			}
			else if (slh.compositionTimeStamp < seek_video) {
				slh.decodingTimeStamp = slh.compositionTimeStamp = seek_video;
			}
			gf_term_on_sl_packet(ffd->service, ffd->video_ch, pkt.data, pkt.size, &slh, GF_OK);
			video_init = 1;
		}
		gf_mx_v(ffd->mx);
		av_free_packet(&pkt);

		/*here's the trick - only seek after sending the first packets of each stream - this allows ffmpeg video decoders
		to resync properly*/
		if (do_seek && video_init && ffd->seekable) {
			av_seek_frame(ffd->ctx, -1, seek_to, AVSEEK_FLAG_BACKWARD);
			do_seek = 0;
			map_audio_time = ffd->unreliable_audio_timing;
		}
		/*sleep untill the buffer occupancy is too low - note that this work because all streams in this
		demuxer are synchronized*/
		while (1) {
			if (ffd->audio_ch) {
				com.base.on_channel = ffd->audio_ch;
				gf_term_on_command(ffd->service, &com, GF_OK);
				if (com.buffer.occupancy < ffd->data_buffer_ms) break;
			}
			if (ffd->video_ch) {
				com.base.on_channel = ffd->video_ch;
				gf_term_on_command(ffd->service, &com, GF_OK);
				if (com.buffer.occupancy < ffd->data_buffer_ms) break;
			}
			gf_sleep(10);

			/*escape if disconnect*/
			if (!ffd->audio_run && !ffd->video_run) break;
		}
		if (!ffd->audio_run && !ffd->video_run) break;
	}
	/*signal EOS*/
	if (ffd->audio_ch) gf_term_on_sl_packet(ffd->service, ffd->audio_ch, NULL, 0, NULL, GF_EOS);
	if (ffd->video_ch) gf_term_on_sl_packet(ffd->service, ffd->video_ch, NULL, 0, NULL, GF_EOS);
	ffd->is_running = 2;

	return 0;
}
Exemplo n.º 11
0
int main(int argc, char* argv[])
{
	AVFormatContext	*pFormatCtx;
	int				i, videoindex;
	AVCodecContext	*pCodecCtx;
	AVCodec			*pCodec;
	AVFrame	*pFrame,*pFrameYUV;
	uint8_t *out_buffer;
	AVPacket *packet;
	int y_size;
	int ret, got_picture;
	struct SwsContext *img_convert_ctx;

	char filepath[]="bigbuckbunny_480x272.h265";
	//SDL---------------------------
	int screen_w=0,screen_h=0;
	SDL_Window *screen; 
	SDL_Renderer* sdlRenderer;
	SDL_Texture* sdlTexture;
	SDL_Rect sdlRect;

	FILE *fp_yuv;

	av_register_all();
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
	if(avformat_find_stream_info(pFormatCtx,NULL)<0){
		printf("Couldn't find stream information.\n");
		return -1;
	}
	videoindex=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++) 
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoindex=i;
			break;
		}
	if(videoindex==-1){
		printf("Didn't find a video stream.\n");
		return -1;
	}

	pCodecCtx=pFormatCtx->streams[videoindex]->codec;
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL){
		printf("Codec not found.\n");
		return -1;
	}
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
		printf("Could not open codec.\n");
		return -1;
	}
	
	pFrame=av_frame_alloc();
	pFrameYUV=av_frame_alloc();
	out_buffer=(uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
	packet=(AVPacket *)av_malloc(sizeof(AVPacket));
	//Output Info-----------------------------
	printf("--------------- File Information ----------------\n");
	av_dump_format(pFormatCtx,0,filepath,0);
	printf("-------------------------------------------------\n");
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 
		pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 

#if OUTPUT_YUV420P 
    fp_yuv=fopen("output.yuv","wb+");  
#endif  
	
	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
		printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
		return -1;
	} 

	screen_w = pCodecCtx->width;
	screen_h = pCodecCtx->height;
	//SDL 2.0 Support for multiple windows
	screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
		screen_w, screen_h,
		SDL_WINDOW_OPENGL);

	if(!screen) {  
		printf("SDL: could not create window - exiting:%s\n",SDL_GetError());  
		return -1;
	}

	sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  
	//IYUV: Y + U + V  (3 planes)
	//YV12: Y + V + U  (3 planes)
	sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);  

	sdlRect.x=0;
	sdlRect.y=0;
	sdlRect.w=screen_w;
	sdlRect.h=screen_h;

	//SDL End----------------------
	while(av_read_frame(pFormatCtx, packet)>=0){
		if(packet->stream_index==videoindex){
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
			if(ret < 0){
				printf("Decode Error.\n");
				return -1;
			}
			if(got_picture){
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
					pFrameYUV->data, pFrameYUV->linesize);
				
#if OUTPUT_YUV420P
				y_size=pCodecCtx->width*pCodecCtx->height;  
				fwrite(pFrameYUV->data[0],1,y_size,fp_yuv);    //Y 
				fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv);  //U
				fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv);  //V
#endif
				//SDL---------------------------
#if 0
				SDL_UpdateTexture( sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0] );  
#else
				SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
				pFrameYUV->data[0], pFrameYUV->linesize[0],
				pFrameYUV->data[1], pFrameYUV->linesize[1],
				pFrameYUV->data[2], pFrameYUV->linesize[2]);
#endif	
				
				SDL_RenderClear( sdlRenderer );  
				SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);  
				SDL_RenderPresent( sdlRenderer );  
				//SDL End-----------------------
				//Delay 40ms
				SDL_Delay(40);
			}
		}
		av_free_packet(packet);
	}
	//flush decoder
	//FIX: Flush Frames remained in Codec
	while (1) {
		ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
		if (ret < 0)
			break;
		if (!got_picture)
			break;
		sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
			pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
		int y_size=pCodecCtx->width*pCodecCtx->height;  
		fwrite(pFrameYUV->data[0],1,y_size,fp_yuv);    //Y 
		fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv);  //U
		fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv);  //V
#endif
		//SDL---------------------------
		SDL_UpdateTexture( sdlTexture, &sdlRect, pFrameYUV->data[0], pFrameYUV->linesize[0] );  
		SDL_RenderClear( sdlRenderer );  
		SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);  
		SDL_RenderPresent( sdlRenderer );  
		//SDL End-----------------------
		//Delay 40ms
		SDL_Delay(40);
	}

	sws_freeContext(img_convert_ctx);

#if OUTPUT_YUV420P 
    fclose(fp_yuv);
#endif 

	SDL_Quit();

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}
Exemplo n.º 12
0
static GF_Err FFD_ConnectService(GF_InputService *plug, GF_ClientService *serv, const char *url)
{
	GF_Err e;
	s64 last_aud_pts;
	u32 i;
	s32 res;
	Bool is_local;
	const char *sOpt;
	char *ext, szName[1024];
	FFDemux *ffd = plug->priv;
	AVInputFormat *av_in = NULL;
	char szExt[20];

	if (ffd->ctx) return GF_SERVICE_ERROR;

	assert( url && strlen(url) < 1024);
	strcpy(szName, url);
	ext = strrchr(szName, '#');
	ffd->service_type = 0;
	e = GF_NOT_SUPPORTED;
	ffd->service = serv;

	if (ext) {
		if (!stricmp(&ext[1], "video")) ffd->service_type = 1;
		else if (!stricmp(&ext[1], "audio")) ffd->service_type = 2;
		ext[0] = 0;
	}

	/*some extensions not supported by ffmpeg, overload input format*/
	ext = strrchr(szName, '.');
	strcpy(szExt, ext ? ext+1 : "");
	strlwr(szExt);
	if (!strcmp(szExt, "cmp")) av_in = av_find_input_format("m4v");

	is_local = (strnicmp(url, "file://", 7) && strstr(url, "://")) ? 0 : 1;

	GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] opening file %s - local %d - av_in %08x\n", url, is_local, av_in));

	if (!is_local) {
		AVProbeData   pd;

		/*setup wraper for FFMPEG I/O*/
		ffd->buffer_size = 8192;
		sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "FFMPEG", "IOBufferSize");
		if (sOpt) ffd->buffer_size = atoi(sOpt);
		ffd->buffer = gf_malloc(sizeof(char)*ffd->buffer_size);
#ifdef FFMPEG_DUMP_REMOTE
		ffd->outdbg = gf_f64_open("ffdeb.raw", "wb");
#endif
#ifdef USE_PRE_0_7
		init_put_byte(&ffd->io, ffd->buffer, ffd->buffer_size, 0, ffd, ff_url_read, NULL, NULL);
		ffd->io.is_streamed = 1;
#else
		ffd->io.seekable = 1;
#endif

		ffd->dnload = gf_term_download_new(ffd->service, url, GF_NETIO_SESSION_NOT_THREADED  | GF_NETIO_SESSION_NOT_CACHED, NULL, ffd);
		if (!ffd->dnload) return GF_URL_ERROR;
		while (1) {
			u32 read;
			e = gf_dm_sess_fetch_data(ffd->dnload, ffd->buffer + ffd->buffer_used, ffd->buffer_size - ffd->buffer_used, &read);
			if (e==GF_EOS) break;
			/*we're sync!!*/
			if (e==GF_IP_NETWORK_EMPTY) continue;
			if (e) goto err_exit;
			ffd->buffer_used += read;
			if (ffd->buffer_used == ffd->buffer_size) break;
		}
		if (e==GF_EOS) {
			const char *cache_file = gf_dm_sess_get_cache_name(ffd->dnload);
			res = open_file(&ffd->ctx, cache_file, av_in);
		} else {
			pd.filename = szName;
			pd.buf_size = ffd->buffer_used;
			pd.buf = ffd->buffer;
			av_in = av_probe_input_format(&pd, 1);
			if (!av_in) {
				GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] error probing file %s - probe start with %c %c %c %c\n", url, ffd->buffer[0], ffd->buffer[1], ffd->buffer[2], ffd->buffer[3]));
				return GF_NOT_SUPPORTED;
			}
			/*setup downloader*/
			av_in->flags |= AVFMT_NOFILE;
#if FF_API_FORMAT_PARAMETERS /*commit ffmpeg 603b8bc2a109978c8499b06d2556f1433306eca7*/
			res = avformat_open_input(&ffd->ctx, szName, av_in, NULL);
#else
			res = av_open_input_stream(&ffd->ctx, &ffd->io, szName, av_in, NULL);
#endif
		}
	} else {
		res = open_file(&ffd->ctx, szName, av_in);
	}

	switch (res) {
#ifndef _WIN32_WCE
	case 0: e = GF_OK; break;
	case AVERROR_IO: e = GF_URL_ERROR; goto err_exit;
	case AVERROR_INVALIDDATA: e = GF_NON_COMPLIANT_BITSTREAM; goto err_exit;
	case AVERROR_NOMEM: e = GF_OUT_OF_MEM; goto err_exit;
	case AVERROR_NOFMT: e = GF_NOT_SUPPORTED; goto err_exit;
#endif
	default: e = GF_SERVICE_ERROR; goto err_exit;
	}

	GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] looking for streams in %s - %d streams - type %s\n", ffd->ctx->filename, ffd->ctx->nb_streams, ffd->ctx->iformat->name));

	res = av_find_stream_info(ffd->ctx);
	if (res <0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] cannot locate streams - error %d\n", res));
		e = GF_NOT_SUPPORTED;
		goto err_exit;
	}
	GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] file %s opened - %d streams\n", url, ffd->ctx->nb_streams));

	/*figure out if we can use codecs or not*/
	ffd->audio_st = ffd->video_st = -1;
	for (i = 0; i < ffd->ctx->nb_streams; i++) {
		AVCodecContext *enc = ffd->ctx->streams[i]->codec;
		switch(enc->codec_type) {
		case AVMEDIA_TYPE_AUDIO:
			if ((ffd->audio_st<0) && (ffd->service_type!=1)) {
				ffd->audio_st = i;
				ffd->audio_tscale = ffd->ctx->streams[i]->time_base;
			}
			break;
		case AVMEDIA_TYPE_VIDEO:
			if ((ffd->video_st<0) && (ffd->service_type!=2)) {
				ffd->video_st = i;
				ffd->video_tscale = ffd->ctx->streams[i]->time_base;
			}
			break;
		default:
			break;
		}
	}
	if ((ffd->service_type==1) && (ffd->video_st<0)) goto err_exit;
	if ((ffd->service_type==2) && (ffd->audio_st<0)) goto err_exit;
	if ((ffd->video_st<0) && (ffd->audio_st<0)) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] No supported streams in file\n"));
		goto err_exit;
	}


	sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "FFMPEG", "DataBufferMS");
	ffd->data_buffer_ms = 0;
	if (sOpt) ffd->data_buffer_ms = atoi(sOpt);
	if (!ffd->data_buffer_ms) ffd->data_buffer_ms = FFD_DATA_BUFFER;

	/*build seek*/
	if (is_local) {
		/*check we do have increasing pts. If not we can't rely on pts, we must skip SL
		we assume video pts is always present*/
		if (ffd->audio_st>=0) {
			last_aud_pts = 0;
			for (i=0; i<20; i++) {
				AVPacket pkt;
				pkt.stream_index = -1;
				if (av_read_frame(ffd->ctx, &pkt) <0) break;
				if (pkt.pts == AV_NOPTS_VALUE) pkt.pts = pkt.dts;
				if (pkt.stream_index==ffd->audio_st) last_aud_pts = pkt.pts;
			}
			if (last_aud_pts*ffd->audio_tscale.den<10*ffd->audio_tscale.num) ffd->unreliable_audio_timing = 1;
		}

		ffd->seekable = (av_seek_frame(ffd->ctx, -1, 0, AVSEEK_FLAG_BACKWARD)<0) ? 0 : 1;
		if (!ffd->seekable) {
			av_close_input_file(ffd->ctx);
			open_file(&ffd->ctx, szName, av_in);
			av_find_stream_info(ffd->ctx);
		}
	}

	/*let's go*/
	gf_term_on_connect(serv, NULL, GF_OK);
	/*if (!ffd->service_type)*/ FFD_SetupObjects(ffd);
	ffd->service_type = 0;
	return GF_OK;

err_exit:
	GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] Error opening file %s: %s\n", url, gf_error_to_string(e)));
    if (ffd->ctx) av_close_input_file(ffd->ctx);
	ffd->ctx = NULL;
	gf_term_on_connect(serv, NULL, e);
	return GF_OK;
}
Exemplo n.º 13
0
MediaScanImage *video_create_image_from_frame(MediaScanVideo *v, MediaScanResult *r) {
  MediaScanImage *i = image_create();
  AVFormatContext *avf = (AVFormatContext *)r->_avf;
  av_codecs_t *codecs = (av_codecs_t *)v->_codecs;
  AVCodec *codec = (AVCodec *)v->_avc;
  AVFrame *frame = NULL;
  AVPacket packet;
  struct SwsContext *swsc = NULL;
  int got_picture;
  int64_t duration_tb = ((double)avf->duration / AV_TIME_BASE) / av_q2d(codecs->vs->time_base);
  uint8_t *src;
  int x, y;
  int ofs = 0;
  int no_keyframe_found = 0;
  int skipped_frames = 0;

  if ((avcodec_open(codecs->vc, codec)) < 0) {
    LOG_ERROR("Couldn't open video codec %s for thumbnail creation\n", codec->name);
    goto err;
  }

  frame = avcodec_alloc_frame();
  if (!frame) {
    LOG_ERROR("Couldn't allocate a video frame\n");
    goto err;
  }

  av_init_packet(&packet);

  i->path = v->path;
  i->width = v->width;
  i->height = v->height;

  // XXX select best video frame, for example:
  // * Skip frames of all the same color (e.g. blank intro frames
  // * Use edge detection to skip blurry frames
  //   * http://code.google.com/p/fast-edge/
  //   * http://en.wikipedia.org/wiki/Canny_edge_detector 
  // * Use a frame some percentage into the video, what percentage?
  // * If really ambitious, use OpenCV for finding a frame with a face?

  // XXX other ways to seek if this fails
  // XXX for now, seek 10% into the video
  av_seek_frame(avf, codecs->vsid, (int)((double)duration_tb * 0.1), 0);

  for (;;) {
    int ret;
    int rgb_bufsize;
    AVFrame *frame_rgb = NULL;
    uint8_t *rgb_buffer = NULL;

    // Give up if we already tried the first frame
    if (no_keyframe_found) {
      LOG_ERROR("Error decoding video frame for thumbnail: %s\n", v->path);
      goto err;
    }

    if ((ret = av_read_frame(avf, &packet)) < 0) {
      if (ret == AVERROR_EOF || skipped_frames > 200) {
        LOG_DEBUG("Couldn't find a keyframe, using first frame\n");
        no_keyframe_found = 1;
        av_seek_frame(avf, codecs->vsid, 0, 0);
        av_read_frame(avf, &packet);
      }
      else {
        LOG_ERROR("Couldn't read video frame (%s): ", v->path);
        print_averror(ret);
        goto err;
      }
    }

    // Skip frame if it's not from the video stream
    if (!no_keyframe_found && packet.stream_index != codecs->vsid) {
      av_free_packet(&packet);
      skipped_frames++;
      continue;
    }

    // Skip non-key-frames
    if (!no_keyframe_found && !(packet.flags & AV_PKT_FLAG_KEY)) {
      av_free_packet(&packet);
      skipped_frames++;
      continue;
    }

    // Skip invalid packets, not sure why this isn't an error from av_read_frame
    if (packet.pos < 0) {
      av_free_packet(&packet);
      skipped_frames++;
      continue;
    }

    LOG_DEBUG("Using video packet: pos %lld size %d, stream_index %d, duration %d\n",
              packet.pos, packet.size, packet.stream_index, packet.duration);

    if ((ret = avcodec_decode_video2(codecs->vc, frame, &got_picture, &packet)) < 0) {
      LOG_ERROR("Error decoding video frame for thumbnail: %s\n", v->path);
      print_averror(ret);
      goto err;
    }

    if (!got_picture) {
      if (skipped_frames > 200) {
        LOG_ERROR("Error decoding video frame for thumbnail: %s\n", v->path);
        goto err;
      }
      if (!no_keyframe_found) {
        // Try next frame
        av_free_packet(&packet);
        skipped_frames++;
        continue;
      }
    }

    // use swscale to convert from source format to RGBA in our buffer with no resizing
    // XXX what scaler is fastest here when not actually resizing?
    swsc = sws_getContext(i->width, i->height, codecs->vc->pix_fmt,
                          i->width, i->height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
    if (!swsc) {
      LOG_ERROR("Unable to get swscale context\n");
      goto err;
    }

    frame_rgb = avcodec_alloc_frame();
    if (!frame_rgb) {
      LOG_ERROR("Couldn't allocate a video frame\n");
      goto err;
    }

    // XXX There is probably a way to get sws_scale to write directly to i->_pixbuf in our RGBA format

    rgb_bufsize = avpicture_get_size(PIX_FMT_RGB24, i->width, i->height);
    rgb_buffer = av_malloc(rgb_bufsize);
    if (!rgb_buffer) {
      LOG_ERROR("Couldn't allocate an RGB video buffer\n");
      av_free(frame_rgb);
      goto err;
    }
    LOG_MEM("new rgb_buffer of size %d @ %p\n", rgb_bufsize, rgb_buffer);

    avpicture_fill((AVPicture *)frame_rgb, rgb_buffer, PIX_FMT_RGB24, i->width, i->height);

    // Convert image to RGB24
    sws_scale(swsc, frame->data, frame->linesize, 0, i->height, frame_rgb->data, frame_rgb->linesize);

    // Allocate space for our version of the image
    image_alloc_pixbuf(i, i->width, i->height);

    src = frame_rgb->data[0];
    ofs = 0;
    for (y = 0; y < i->height; y++) {
      for (x = 0; x < i->width * 3; x += 3) {
        i->_pixbuf[ofs++] = COL(src[x], src[x + 1], src[x + 2]);
      }
      src += i->width * 3;
    }

    // Free the frame
    LOG_MEM("destroy rgb_buffer @ %p\n", rgb_buffer);
    av_free(rgb_buffer);

    av_free(frame_rgb);

    // Done!
    goto out;
  }

err:
  image_destroy(i);
  i = NULL;

out:
  sws_freeContext(swsc);
  av_free_packet(&packet);
  if (frame)
    av_free(frame);

  avcodec_close(codecs->vc);

  return i;
}
Exemplo n.º 14
0
Error FFMpegVideoProvider::Impl::entry() {
	mState = STATE_CAPTURING;
	Error rc;

	AVFormatContext *fmtCtx = NULL;
	AVCodecContext  *decodeCtx = NULL, *encodeCtx = NULL;
	AVCodec         *decoder= NULL, *encoder = NULL;
	SwsContext      *swsCtx;
	AVFrame         *convertedFrame = avcodec_alloc_frame();
	AVPacket pkt;
	VBufferPtr buf;

	FFMpegCodecInfo encodeInfo = FFMpegInfo::findCodecInfo(mCurrentParam.currentCodec);

	// Open file
	{
		AVInputFormat *inputFmt = av_find_input_format(mCtx.inputFmt);
		AVFormatParameters ap;
		::memset(&ap,0,sizeof(ap));

		ap.width = 640;
		ap.height = 480;
		ap.time_base = (AVRational) {1,25};
		if (av_open_input_file(&fmtCtx,mFileName.c_str(),inputFmt,0,&ap) != 0
				|| fmtCtx == NULL) {
			rc.setErrorString("Open input error");
			goto open_input_file_failed;
		}
	}

	// Find video track
	{
		for (int i=0; i<fmtCtx->nb_streams; i++) {
			if (fmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
				decodeCtx = fmtCtx->streams[i]->codec;
				break;
			}
		}
		if (decodeCtx == NULL) {
			rc.setErrorString("No video track found");
			goto find_video_track_failed;
		}
	}

	// Find decoder
	{
		decoder = avcodec_find_decoder(decodeCtx->codec_id);
		if (decoder == NULL) {
			rc.setErrorString("Can't find decoder");
			goto find_decoder_failed;
		}

		if (decodeCtx->pix_fmt == PIX_FMT_NONE ) {
			decodeCtx->pix_fmt = PIX_FMT_YUVJ420P;
		}
	}

	// Open decoder
	{
		if (avcodec_open(decodeCtx,decoder) != 0) {
			rc.setErrorString("Can't open decoder");
			goto open_decoder_failed;
		}
	}

	// Find encoder
	{
		encoder = avcodec_find_encoder(encodeInfo.codecId);
		if (encoder == NULL) {
			rc.setErrorString("Can't find encoder");
			goto find_encoder_failed;
		}

		encodeCtx = avcodec_alloc_context();
		encodeCtx->width = mCurrentParam.currentGeometry.width;
		encodeCtx->height = mCurrentParam.currentGeometry.height;
		encodeCtx->time_base = (AVRational){mCurrentParam.currentFrameRate.num, mCurrentParam.currentFrameRate.den};
		encodeCtx->pix_fmt = encodeInfo.supportedPixelFormat.front();
		encodeCtx->bit_rate = 480000;
		encodeCtx->max_b_frames = 1;
	}

	// Open encoder
	{
		if (avcodec_open(encodeCtx, encoder) != 0) {
			rc.setErrorString("Open encoder failed");
			goto open_encoder_failed;
		}
	}

	// Init swscale
	{
		swsCtx = sws_getCachedContext(NULL, decodeCtx->width, decodeCtx->height,decodeCtx->pix_fmt,
				encodeCtx->width,encodeCtx->height,encodeCtx->pix_fmt, SWS_FAST_BILINEAR,
				NULL,NULL,NULL);
		if (swsCtx == NULL) {
			rc.setErrorString("Init sws context failed");
			goto init_sws_context_failed;
		}
	}

	// Allocate space for sws conversion
	{
		if (avpicture_alloc((AVPicture *)convertedFrame,encodeCtx->pix_fmt,encodeCtx->width,encodeCtx->height) != 0) {
			rc.setErrorString("Allocate space for conversion failed");
			goto allocate_space_failed;
		}
	}

	// All init finish successfully. Notify it
	{
		mThreadError = rc;
		mThreadMutex.lock();
		mThreadCond.signal();
		mThreadMutex.unlock();
	}

	// The mainloop
	while (!shouldStop()) {

		int got_frame = 0;
		AVFrame * decodedFrame = avcodec_alloc_frame();
		// Get a frame of decoded picture
		while (!got_frame && !shouldStop()){
			av_init_packet(&pkt);
			if (av_read_frame(fmtCtx, &pkt) != 0){
				rc.setErrorString("Read frame error");
				av_free_packet (&pkt);
				goto read_frame_error;
			}

			if ( fmtCtx->streams[pkt.stream_index]->codec != decodeCtx) {
				av_free_packet(&pkt);
				continue;
			}

			if (avcodec_decode_video2(decodeCtx,decodedFrame,&got_frame,&pkt) < 0){
				rc.setErrorString("Decode video frame error");
				av_free_packet (&pkt);
				goto decode_frame_error;
			}
			av_free_packet (&pkt);
		}

		if (!got_frame) {
			rc.setErrorString("Decode thread is requested to stop");
			goto decode_stop;
		}

		// Do sws scale
		{
			if (sws_scale(swsCtx,decodedFrame->data,decodedFrame->linesize,
					0,decodeCtx->height, convertedFrame->data, convertedFrame->linesize) < 1) {
				rc.setErrorString("sws scale failed");
				goto sws_scale_failed;
			}
		}

		// Wait a buffer to encode to
		{
			mBufferMutex.lock();
			while (!shouldStop() && (mBuffers.size() < 1)) {
				rc = mBufferCond.wait(mBufferMutex,500);
				if (rc.isError()) {
					if (rc.getErrorType() == Error::ERR_TIMEOUT) continue;
					else {
						mBufferMutex.unlock();
						goto wait_buffer_error;
					}
				}else{
					break;
				}
			}
			if (mBuffers.size() < 1) {
				mBufferMutex.unlock();
				goto wait_buffer_error;
			}
			buf = mBuffers.front();
			mBuffers.pop_front();
			mBufferMutex.unlock();
		}

		// Encode the video
		{
			int encoded = avcodec_encode_video(encodeCtx, buf->buf.getData(),buf->buf.getSize(),convertedFrame);
			if (encoded < 0) {
				rc.setErrorString("Encode video error");
				goto encode_video_error;
			}
			buf->size = encoded;
		}


		encode_video_error:
		buf->returned = rc;
		buf->cond.signal();
		wait_buffer_error:
		sws_scale_failed:
		decode_stop:
		decode_frame_error:
		read_frame_error:

		av_free(decodedFrame);
		if (rc.isError()) break;
	}


	// Notify all waiter(if exists)
	{
		mBufferMutex.lock();
		for (std::list<VBufferPtr>::const_iterator iter = mBuffers.begin();
				iter != mBuffers.end(); ++ iter) {
			(*iter)->mutex.lock();
			(*iter)->returned = rc;
			(*iter)->cond.signal();
			(*iter)->size = 0;
			(*iter)->mutex.unlock();
		}
		mBuffers.clear();
		mBufferMutex.unlock();
	}

	allocate_space_failed:
	sws_freeContext(swsCtx);
	init_sws_context_failed:
	avcodec_close(encodeCtx);
	open_encoder_failed:
	av_free(encodeCtx);
	find_encoder_failed:
	avcodec_close(decodeCtx);
	open_decoder_failed:
	find_decoder_failed:
	find_video_track_failed:
	av_close_input_file(fmtCtx);
	open_input_file_failed:
	av_free(convertedFrame);

	mState = STATE_READY;
	mThreadMutex.lock();
	mThreadError = rc;
	mThreadCond.signal();
	mThreadMutex.unlock();
	return rc;
}
Exemplo n.º 15
0
/*	decode and play stream. returns 0 or av error code.
 */
static int play (player_t * const player) {
	assert (player != NULL);

	AVPacket pkt;
	av_init_packet (&pkt);
	pkt.data = NULL;
	pkt.size = 0;

	AVFrame *frame = NULL, *filteredFrame = NULL;
	frame = av_frame_alloc ();
	assert (frame != NULL);
	filteredFrame = av_frame_alloc ();
	assert (filteredFrame != NULL);

	while (!player->doQuit) {
		int ret = av_read_frame (player->fctx, &pkt);
		if (ret < 0) {
			av_free_packet (&pkt);
			return ret;
		} else if (pkt.stream_index != player->streamIdx) {
			av_free_packet (&pkt);
			continue;
		}

		AVPacket pkt_orig = pkt;

		/* pausing */
		pthread_mutex_lock (&player->pauseMutex);
		if (player->doPause) {
			av_read_pause (player->fctx);
			do {
				pthread_cond_wait (&player->pauseCond, &player->pauseMutex);
			} while (player->doPause);
			av_read_play (player->fctx);
		}
		pthread_mutex_unlock (&player->pauseMutex);

		while (pkt.size > 0 && !player->doQuit) {
			int got_frame = 0;

			const int decoded = avcodec_decode_audio4 (player->st->codec,
					frame, &got_frame, &pkt);
			if (decoded < 0) {
				/* skip this one */
				break;
			}

			if (got_frame != 0) {
				/* XXX: suppresses warning from resample filter */
				if (frame->pts == (int64_t) AV_NOPTS_VALUE) {
					frame->pts = 0;
				}
				ret = av_buffersrc_write_frame (player->fabuf, frame);
				assert (ret >= 0);

				while (true) {
					if (av_buffersink_get_frame (player->fbufsink, filteredFrame) < 0) {
						/* try again next frame */
						break;
					}

					const int numChannels = av_get_channel_layout_nb_channels (
							filteredFrame->channel_layout);
					const int bps = av_get_bytes_per_sample(filteredFrame->format);
					ao_play (player->aoDev, (char *) filteredFrame->data[0],
							filteredFrame->nb_samples * numChannels * bps);

					av_frame_unref (filteredFrame);
				}
			}

			pkt.data += decoded;
			pkt.size -= decoded;
		};

		av_free_packet (&pkt_orig);

		player->songPlayed = av_q2d (player->st->time_base) * (double) pkt.pts;
		player->lastTimestamp = pkt.pts;
	}

	av_frame_free (&filteredFrame);
	av_frame_free (&frame);

	return 0;
}
Exemplo n.º 16
0
int JPG_to_Pixel(const unsigned char *jpgBuff, int jpgSize, int pixelFmt, int pixelWidth, int pixelHeight, unsigned char *pixelBuff, int *pixelSize) {	
	AVFormatContext *formatContext;
	AVInputFormat *inputFormat;
	AVIOContext *ioContext;
	AVStream *stream;
	AVCodecContext *codecContext;
	AVCodec *codec;
	AVFrame *frame, *frame2;
	AVPacket packet;
	struct SwsContext *swsContext;
	int streamIndex;
	int gotFrame;
	int codecRet;
	int result = -1;

	av_register_all();
	formatContext = avformat_alloc_context();
	ioContext = avio_alloc_context((unsigned char *)jpgBuff, jpgSize, 0, NULL, NULL, NULL, NULL);
	inputFormat = av_find_input_format("mjpeg");
	av_probe_input_buffer2(ioContext, &inputFormat, NULL, NULL, 0, 0);
	formatContext->pb = ioContext;
	formatContext->iformat = inputFormat;
	avformat_open_input(&formatContext, NULL, NULL, NULL);
	av_find_stream_info(formatContext);

	av_init_packet(&packet);
	for (streamIndex = 0; streamIndex < formatContext->nb_streams; streamIndex++) {
		av_read_frame(formatContext, &packet);
		if (formatContext->streams[streamIndex]->codec->codec_type == AVMEDIA_TYPE_VIDEO && 0 < packet.size) {
			stream = formatContext->streams[streamIndex];
			codecContext = stream->codec;
			codec = avcodec_find_decoder(codecContext->codec_id);
			avcodec_open2(codecContext, codec, NULL);
			frame = avcodec_alloc_frame();
			codecRet = avcodec_decode_video2(codecContext, frame, &gotFrame, &packet);
			if (0 <= codecRet && 1 == gotFrame) {
				frame2 = av_frame_clone(frame);
				frame2->format = PF(pixelFmt);
				swsContext = sws_getContext(codecContext->width, codecContext->height, codecContext->pix_fmt, pixelWidth, pixelHeight, (AVPixelFormat)frame2->format, SWS_BICUBIC, NULL, NULL, NULL);   
				sws_scale(swsContext, (const uint8_t *const *)frame->data, frame->linesize, 0, codecContext->height, frame2->data, frame2->linesize);  
				sws_freeContext(swsContext);

				*pixelSize = avpicture_layout((const AVPicture *)frame2, (enum AVPixelFormat)frame2->format, pixelWidth, pixelHeight, pixelBuff, *pixelSize);
				result = *pixelSize;

				av_frame_free(&frame2);
			}	
			if (1 == codecContext->refcounted_frames) av_frame_unref(frame); 
			avcodec_free_frame(&frame);
			avcodec_close(codecContext);
		}
		av_free_packet(&packet);
		if (-1 != result)
			break;
	}

	avformat_close_input(&formatContext);
	av_free(ioContext->buffer);
	av_free(ioContext);
	avformat_free_context(formatContext);
	return result;
}
Exemplo n.º 17
0
int _tmain(int argc, _TCHAR* argv[])
{
	AVFormatContext *pFormatCtx;
	char *filepath = "Titanic.ts";
	char *file_out = "Titanic.yuv";
	char *file_out1 = "Titanic.h264";
	FILE *fp_out;
	FILE *fp_out1;
	errno_t err,err1;

	err = fopen_s(&fp_out, file_out, "wb+");
	if (err != 0)
	{
		printf("The file 'crt_fopen_s.c' was opened\n");
		return -1;
	}

	err1 = fopen_s(&fp_out1, file_out1, "wb+");
	if (err1 != 0)
	{
		printf("The file 'crt_fopen_s.c' was opened\n");
		return -1;
	}

	av_register_all();    //注册所有组件
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();   //开辟内存
	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) < 0) //打开输入视频文件
	{
		printf("Can't open the input stream.\n");
		return -1;
	}
	if (avformat_find_stream_info(pFormatCtx,NULL)<0)     //判断文件流,视频流还是音频流
	{
		printf("Can't find the stream information!\n");
		return -1;
	}

	int i, index_video = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
	{
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)      //如果是视频流,则记录存储。
		{
			index_video = i;
			break;
		}		
	}
	if (index_video == -1)
	{
		printf("Can't find a video stream;\n");
		return -1;
	}

	AVCodecContext *pCodecCtx = pFormatCtx->streams[index_video]->codec;

	AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);     //查找解码器
	if (pCodec == NULL)
	{
		printf("Can't find a decoder!\n");
		return -1;
	}

	//if (pCodecCtx->codec_id == AV_CODEC_ID_H264)
	//{
	//	av_opt_set(pCodecCtx->priv_data, "preset", "slow", 0);
	//	av_opt_set(pCodecCtx->priv_data, "tune", "zerolatency", 0);
	//}


	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)   //打开编码器
	{
		printf("Can't open the decoder!\n");
		return -1;
	}


	AVFrame *pFrame = av_frame_alloc();  //this only allocates the AVFrame itself, not the data buffers
	AVFrame *pFrameYUV = av_frame_alloc();

	uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P,pCodecCtx->width,pCodecCtx->height));  //开辟缓冲区
	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);//帧和申请的内存结合

	AVPacket *pkt = (AVPacket *)av_malloc(sizeof(AVPacket));;
	av_init_packet(pkt);
	
	SwsContext * img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
		pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
	
	int frame_cnt = 0;
	int ret;
	int get_frame;
	int y_size = pCodecCtx->width*pCodecCtx->height;
	while (av_read_frame(pFormatCtx,pkt) >=0  )
	{
		if (pkt->stream_index == index_video)
		{
			fwrite(pkt->data,1,pkt->size,fp_out1);
			if (avcodec_decode_video2(pCodecCtx, pFrame, &get_frame, pkt) < 0)
			{
				printf("Decode Error!\n");
				return -1;
			}
			if (get_frame)
			{
				printf("Decoded frame index: %d\n", frame_cnt);
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
					pFrameYUV->data, pFrameYUV->linesize);
				fwrite(pFrameYUV->data[0], 1, y_size, fp_out);
				fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_out);
				fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_out);
				frame_cnt++;
			}

		}
		av_free_packet(pkt);
	}
	while (1)
	{
		if (avcodec_decode_video2(pCodecCtx, pFrame, &get_frame, pkt) < 0)
		{
			printf("Decode Error!\n");
			break;
		}
		if (get_frame)
		{
			printf("Flush Decoded frame index: %d\n", frame_cnt);
			sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
				pFrameYUV->data, pFrameYUV->linesize);
			fwrite(pFrameYUV->data[0], 1, y_size, fp_out);
			fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_out);
			fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_out);
			frame_cnt++;
		}
		else
			break;
	}

	//close
	fclose(fp_out);
	fclose(fp_out1);

	//free
	sws_freeContext(img_convert_ctx);
	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);
	avformat_free_context(pFormatCtx);

	return 0;
}
Exemplo n.º 18
0
/*
 * This thread is used to load video frame asynchronously.
 * It provides a frame caching service. 
 * The main thread is responsible for positioning the frame pointer in the
 * file correctly before calling startCache() which starts this thread.
 * The cache is organized in two layers: 1) a cache of 20-30 undecoded packets to keep
 * memory and CPU low 2) a cache of 5 decoded frames. 
 * If the main thread does not find the frame in the cache (because the video has restarted
 * or because the GE is lagging), it stops the cache with StopCache() (this is a synchronous
 * function: it sends a signal to stop the cache thread and wait for confirmation), then
 * change the position in the stream and restarts the cache thread.
 */
void *VideoFFmpeg::cacheThread(void *data)
{
	VideoFFmpeg* video = (VideoFFmpeg*)data;
	// holds the frame that is being decoded
	CacheFrame *currentFrame = NULL;
	CachePacket *cachePacket;
	bool endOfFile = false;
	int frameFinished = 0;
	double timeBase = av_q2d(video->m_formatCtx->streams[video->m_videoStream]->time_base);
	int64_t startTs = video->m_formatCtx->streams[video->m_videoStream]->start_time;

	if (startTs == AV_NOPTS_VALUE)
		startTs = 0;

	while (!video->m_stopThread)
	{
		// packet cache is used solely by this thread, no need to lock
		// In case the stream/file contains other stream than the one we are looking for,
		// allow a bit of cycling to get rid quickly of those frames
		frameFinished = 0;
		while (	   !endOfFile 
				&& (cachePacket = (CachePacket *)video->m_packetCacheFree.first) != NULL 
				&& frameFinished < 25)
		{
			// free packet => packet cache is not full yet, just read more
			if (av_read_frame(video->m_formatCtx, &cachePacket->packet)>=0) 
			{
				if (cachePacket->packet.stream_index == video->m_videoStream)
				{
					// make sure fresh memory is allocated for the packet and move it to queue
					av_dup_packet(&cachePacket->packet);
					BLI_remlink(&video->m_packetCacheFree, cachePacket);
					BLI_addtail(&video->m_packetCacheBase, cachePacket);
					break;
				} else {
					// this is not a good packet for us, just leave it on free queue
					// Note: here we could handle sound packet
					av_free_packet(&cachePacket->packet);
					frameFinished++;
				}
				
			} else {
				if (video->m_isFile)
					// this mark the end of the file
					endOfFile = true;
				// if we cannot read a packet, no need to continue
				break;
			}
		}
		// frame cache is also used by main thread, lock
		if (currentFrame == NULL) 
		{
			// no current frame being decoded, take free one
			pthread_mutex_lock(&video->m_cacheMutex);
			if ((currentFrame = (CacheFrame *)video->m_frameCacheFree.first) != NULL)
				BLI_remlink(&video->m_frameCacheFree, currentFrame);
			pthread_mutex_unlock(&video->m_cacheMutex);
		}
		if (currentFrame != NULL)
		{
			// this frame is out of free and busy queue, we can manipulate it without locking
			frameFinished = 0;
			while (!frameFinished && (cachePacket = (CachePacket *)video->m_packetCacheBase.first) != NULL)
			{
				BLI_remlink(&video->m_packetCacheBase, cachePacket);
				// use m_frame because when caching, it is not used in main thread
				// we can't use currentFrame directly because we need to convert to RGB first
				avcodec_decode_video2(video->m_codecCtx, 
					video->m_frame, &frameFinished, 
					&cachePacket->packet);
				if (frameFinished) 
				{
					AVFrame * input = video->m_frame;

					/* This means the data wasnt read properly, this check stops crashing */
					if (   input->data[0]!=0 || input->data[1]!=0 
						|| input->data[2]!=0 || input->data[3]!=0)
					{
						if (video->m_deinterlace) 
						{
							if (avpicture_deinterlace(
								(AVPicture*) video->m_frameDeinterlaced,
								(const AVPicture*) video->m_frame,
								video->m_codecCtx->pix_fmt,
								video->m_codecCtx->width,
								video->m_codecCtx->height) >= 0)
							{
								input = video->m_frameDeinterlaced;
							}
						}
						// convert to RGB24
						sws_scale(video->m_imgConvertCtx,
							input->data,
							input->linesize,
							0,
							video->m_codecCtx->height,
							currentFrame->frame->data,
							currentFrame->frame->linesize);
						// move frame to queue, this frame is necessarily the next one
						video->m_curPosition = (long)((cachePacket->packet.dts-startTs) * (video->m_baseFrameRate*timeBase) + 0.5);
						currentFrame->framePosition = video->m_curPosition;
						pthread_mutex_lock(&video->m_cacheMutex);
						BLI_addtail(&video->m_frameCacheBase, currentFrame);
						pthread_mutex_unlock(&video->m_cacheMutex);
						currentFrame = NULL;
					}
				}
				av_free_packet(&cachePacket->packet);
				BLI_addtail(&video->m_packetCacheFree, cachePacket);
			} 
			if (currentFrame && endOfFile) 
			{
				// no more packet and end of file => put a special frame that indicates that
				currentFrame->framePosition = -1;
				pthread_mutex_lock(&video->m_cacheMutex);
				BLI_addtail(&video->m_frameCacheBase, currentFrame);
				pthread_mutex_unlock(&video->m_cacheMutex);
				currentFrame = NULL;
				// no need to stay any longer in this thread
				break;
			}
		}
		// small sleep to avoid unnecessary looping
		PIL_sleep_ms(10);
	}
	// before quitting, put back the current frame to queue to allow freeing
	if (currentFrame)
	{
		pthread_mutex_lock(&video->m_cacheMutex);
		BLI_addtail(&video->m_frameCacheFree, currentFrame);
		pthread_mutex_unlock(&video->m_cacheMutex);
	}
	return 0;
}
Exemplo n.º 19
0
int main()
{
	int ret=0, check_yuv = 0;
	AVPacket pkt;
	AVFrame *frame = NULL;
	enum AVMediaType mediaType;
	unsigned int streamIdx;
	unsigned int i;
	int gotFrame;

	check_yuv = check_file();

	//inititialize all the registers
	av_register_all();

	if (open_input_file(check_yuv) < 0) exit(1);
	if (open_output_file() < 0) exit(1);

	//initialize packet, set data to NULL
	av_init_packet(&pkt);
	pkt.data = NULL;
	pkt.size = 0;

	//read all packets
	while (av_read_frame(inFmtCtx, &pkt) >= 0)
	{
		streamIdx = pkt.stream_index;
		mediaType = inFmtCtx->streams[streamIdx]->codec->codec_type;
		av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", streamIdx);
		av_log(NULL, AV_LOG_DEBUG, "Going to reencode \n");

		frame = av_frame_alloc();

		if (!frame)
		{
			ret = AVERROR(ENOMEM);
			break;
		}

		av_packet_rescale_ts(&pkt,inFmtCtx->streams[streamIdx]->time_base,	inFmtCtx->streams[streamIdx]->codec->time_base);

		if (mediaType == AVMEDIA_TYPE_VIDEO) {
			ret = avcodec_decode_video2(inFmtCtx->streams[streamIdx]->codec, frame, &gotFrame, &pkt);

			if (ret < 0)
			{
				av_frame_free(&frame);
				av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
				break;
			}
		}

		if (gotFrame)
		{
			frame->pts = av_frame_get_best_effort_timestamp(frame);
			ret = encode_write_frame(frame, streamIdx, &gotFrame);
			//av_frame_free(&frame);
			if (ret < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Error Encoding Frame");
				exit(1);
			}
		}
		else av_frame_free(&frame);

		av_free_packet(&pkt);
	}

	//flush encoders
	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		if (inFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			ret = flush_encoder(i);
			if (ret < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
				exit(1);
			}
		}
	}


	//free all
	av_write_trailer(outFmtCtx);
	av_free_packet(&pkt);
	//av_frame_free(&frame);
	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		avcodec_close(inFmtCtx->streams[i]->codec);
		if (outFmtCtx && outFmtCtx->nb_streams > i && outFmtCtx->streams[i] && outFmtCtx->streams[i]->codec)
			avcodec_close(outFmtCtx->streams[i]->codec);
	}
	avformat_close_input(&inFmtCtx);
	if (outFmtCtx && !(outFmtCtx->oformat->flags & AVFMT_NOFILE))
		avio_closep(&outFmtCtx->pb);
	avformat_free_context(outFmtCtx);

	if (ret < 0)
		av_log(NULL, AV_LOG_ERROR, "Error occurred \n");

	return 0;


}
Exemplo n.º 20
0
// position pointer in file, position in second
AVFrame *VideoFFmpeg::grabFrame(long position)
{
	AVPacket packet;
	int frameFinished;
	int posFound = 1;
	bool frameLoaded = false;
	int64_t targetTs = 0;
	CacheFrame *frame;
	int64_t dts = 0;

	if (m_cacheStarted)
	{
		// when cache is active, we must not read the file directly
		do {
			pthread_mutex_lock(&m_cacheMutex);
			frame = (CacheFrame *)m_frameCacheBase.first;
			pthread_mutex_unlock(&m_cacheMutex);
			// no need to remove the frame from the queue: the cache thread does not touch the head, only the tail
			if (frame == NULL)
			{
				// no frame in cache, in case of file it is an abnormal situation
				if (m_isFile)
				{
					// go back to no threaded reading
					stopCache();
					break;
				}
				return NULL;
			}
			if (frame->framePosition == -1) 
			{
				// this frame mark the end of the file (only used for file)
				// leave in cache to make sure we don't miss it
				m_eof = true;
				return NULL;
			}
			// for streaming, always return the next frame, 
			// that's what grabFrame does in non cache mode anyway.
			if (m_isStreaming || frame->framePosition == position)
			{
				return frame->frame;
			}
			// for cam, skip old frames to keep image realtime.
			// There should be no risk of clock drift since it all happens on the same CPU
			if (frame->framePosition > position) 
			{
				// this can happen after rewind if the seek didn't find the first frame
				// the frame in the buffer is ahead of time, just leave it there
				return NULL;
			}
			// this frame is not useful, release it
			pthread_mutex_lock(&m_cacheMutex);
			BLI_remlink(&m_frameCacheBase, frame);
			BLI_addtail(&m_frameCacheFree, frame);
			pthread_mutex_unlock(&m_cacheMutex);
		} while (true);
	}
	double timeBase = av_q2d(m_formatCtx->streams[m_videoStream]->time_base);
	int64_t startTs = m_formatCtx->streams[m_videoStream]->start_time;
	if (startTs == AV_NOPTS_VALUE)
		startTs = 0;

	// come here when there is no cache or cache has been stopped
	// locate the frame, by seeking if necessary (seeking is only possible for files)
	if (m_isFile)
	{
		// first check if the position that we are looking for is in the preseek range
		// if so, just read the frame until we get there
		if (position > m_curPosition + 1 
			&& m_preseek 
			&& position - (m_curPosition + 1) < m_preseek) 
		{
			while (av_read_frame(m_formatCtx, &packet)>=0)
			{
				if (packet.stream_index == m_videoStream) 
				{
					avcodec_decode_video2(
						m_codecCtx, 
						m_frame, &frameFinished, 
						&packet);
					if (frameFinished)
					{
						m_curPosition = (long)((packet.dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
					}
				}
				av_free_packet(&packet);
				if (position == m_curPosition+1)
					break;
			}
		}
		// if the position is not in preseek, do a direct jump
		if (position != m_curPosition + 1) 
		{ 
			int64_t pos = (int64_t)((position - m_preseek) / (m_baseFrameRate*timeBase));

			if (pos < 0)
				pos = 0;

			pos += startTs;

			if (position <= m_curPosition || !m_eof)
			{
#if 0
				// Tried to make this work but couldn't: seeking on byte is ignored by the
				// format plugin and it will generally continue to read from last timestamp.
				// Too bad because frame seek is not always able to get the first frame
				// of the file.
				if (position <= m_preseek)
				{
					// we can safely go the beginning of the file
					if (av_seek_frame(m_formatCtx, m_videoStream, 0, AVSEEK_FLAG_BYTE) >= 0)
					{
						// binary seek does not reset the timestamp, must do it now
						av_update_cur_dts(m_formatCtx, m_formatCtx->streams[m_videoStream], startTs);
						m_curPosition = 0;
					}
				}
				else
#endif
				{
					// current position is now lost, guess a value. 
					if (av_seek_frame(m_formatCtx, m_videoStream, pos, AVSEEK_FLAG_BACKWARD) >= 0)
					{
						// current position is now lost, guess a value. 
						// It's not important because it will be set at this end of this function
						m_curPosition = position - m_preseek - 1;
					}
				}
			}
			// this is the timestamp of the frame we're looking for
			targetTs = (int64_t)(position / (m_baseFrameRate * timeBase)) + startTs;

			posFound = 0;
			avcodec_flush_buffers(m_codecCtx);
		}
	} else if (m_isThreaded)
	{
		// cache is not started but threading is possible
		// better not read the stream => make take some time, better start caching
		if (startCache())
			return NULL;
		// Abnormal!!! could not start cache, fall back on direct read
		m_isThreaded = false;
	}

	// find the correct frame, in case of streaming and no cache, it means just
	// return the next frame. This is not quite correct, may need more work
	while (av_read_frame(m_formatCtx, &packet) >= 0)
	{
		if (packet.stream_index == m_videoStream) 
		{
			AVFrame *input = m_frame;
			short counter = 0;

			/* While the data is not read properly (png, tiffs, etc formats may need several pass)*/
			while ((input->data[0] == 0 && input->data[1] == 0 && input->data[2] == 0 && input->data[3] == 0) && counter < 10) {
				avcodec_decode_video2(m_codecCtx, m_frame, &frameFinished, &packet);
				counter++;
			}

			// remember dts to compute exact frame number
			dts = packet.dts;
			if (frameFinished && !posFound) 
			{
				if (dts >= targetTs)
				{
					posFound = 1;
				}
			} 

			if (frameFinished && posFound == 1) 
			{
				AVFrame * input = m_frame;

				/* This means the data wasnt read properly, 
				 * this check stops crashing */
				if (   input->data[0]==0 && input->data[1]==0 
					&& input->data[2]==0 && input->data[3]==0)
				{
					av_free_packet(&packet);
					break;
				}

				if (m_deinterlace) 
				{
					if (avpicture_deinterlace(
						(AVPicture*) m_frameDeinterlaced,
						(const AVPicture*) m_frame,
						m_codecCtx->pix_fmt,
						m_codecCtx->width,
						m_codecCtx->height) >= 0)
					{
						input = m_frameDeinterlaced;
					}
				}
				// convert to RGB24
				sws_scale(m_imgConvertCtx,
					input->data,
					input->linesize,
					0,
					m_codecCtx->height,
					m_frameRGB->data,
					m_frameRGB->linesize);
				av_free_packet(&packet);
				frameLoaded = true;
				break;
			}
		}
		av_free_packet(&packet);
	}
	m_eof = m_isFile && !frameLoaded;
	if (frameLoaded)
	{
		m_curPosition = (long)((dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
		if (m_isThreaded)
		{
			// normal case for file: first locate, then start cache
			if (!startCache())
			{
				// Abnormal!! could not start cache, return to non-cache mode
				m_isThreaded = false;
			}
		}
		return m_frameRGB;
	}
	return NULL;
}
Exemplo n.º 21
0
bool FFmpegDecoder::readNextPacketNormal()
{
    AVPacket packet;

    if (! m_pending_packet)
    {
        bool end_of_stream = false;

        // Read the next frame packet
        int error = av_read_frame(m_format_context.get(), &packet);
        if (error < 0)
        {
            if (error == static_cast<int>(AVERROR_EOF) ||
                m_format_context.get()->pb->eof_reached)
                end_of_stream = true;
            else {
                OSG_FATAL << "av_read_frame() returned " << AvStrError(error) << std::endl;
                throw std::runtime_error("av_read_frame() failed");
            }
        }

        if (end_of_stream)
        {
            // If we reach the end of the stream, change the decoder state
            if (loop())
            {
                m_clocks.reset(m_start);
                rewindButDontFlushQueues();
            }
            else
                m_state = END_OF_STREAM;

            return false;
        }
        else
        {
            // Make the packet data available beyond av_read_frame() logical scope.
            if ((error = av_dup_packet(&packet)) < 0) {
                OSG_FATAL << "av_dup_packet() returned " << AvStrError(error) << std::endl;
                throw std::runtime_error("av_dup_packet() failed");
            }

            m_pending_packet = FFmpegPacket(packet);
        }
    }

    // Send data packet
    if (m_pending_packet.type == FFmpegPacket::PACKET_DATA)
    {
        if (m_pending_packet.packet.stream_index == m_audio_index)
        {
            if (m_audio_queue.timedPush(m_pending_packet, 10)) {
                m_pending_packet.release();
                return true;
            }
        }
        else if (m_pending_packet.packet.stream_index == m_video_index)
        {
            if (m_video_queue.timedPush(m_pending_packet, 10)) {
                m_pending_packet.release();
                return true;
            }
        }
        else
        {
            m_pending_packet.clear();
            return true;
        }
    }

    return false;
}
Exemplo n.º 22
0
int main(int argc, char **argv)
{
    int ret = 0;
    AVPacket dec_pkt;
    AVCodec *enc_codec;

    if (argc != 4) {
        fprintf(stderr, "Usage: %s <input file> <encode codec> <output file>\n"
                "The output format is guessed according to the file extension.\n"
                "\n", argv[0]);
        return -1;
    }

    ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, NULL, NULL, 0);
    if (ret < 0) {
        fprintf(stderr, "Failed to create a VAAPI device. Error code: %s\n", av_err2str(ret));
        return -1;
    }

    if ((ret = open_input_file(argv[1])) < 0)
        goto end;

    if (!(enc_codec = avcodec_find_encoder_by_name(argv[2]))) {
        fprintf(stderr, "Could not find encoder '%s'\n", argv[2]);
        ret = -1;
        goto end;
    }

    if ((ret = (avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, argv[3]))) < 0) {
        fprintf(stderr, "Failed to deduce output format from file extension. Error code: "
                "%s\n", av_err2str(ret));
        goto end;
    }

    if (!(encoder_ctx = avcodec_alloc_context3(enc_codec))) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    ret = avio_open(&ofmt_ctx->pb, argv[3], AVIO_FLAG_WRITE);
    if (ret < 0) {
        fprintf(stderr, "Cannot open output file. "
                "Error code: %s\n", av_err2str(ret));
        goto end;
    }

    /* read all packets and only transcoding video */
    while (ret >= 0) {
        if ((ret = av_read_frame(ifmt_ctx, &dec_pkt)) < 0)
            break;

        if (video_stream == dec_pkt.stream_index)
            ret = dec_enc(&dec_pkt, enc_codec);

        av_packet_unref(&dec_pkt);
    }

    /* flush decoder */
    dec_pkt.data = NULL;
    dec_pkt.size = 0;
    ret = dec_enc(&dec_pkt, enc_codec);
    av_packet_unref(&dec_pkt);

    /* flush encoder */
    ret = encode_write(NULL);

    /* write the trailer for output stream */
    av_write_trailer(ofmt_ctx);

end:
    avformat_close_input(&ifmt_ctx);
    avformat_close_input(&ofmt_ctx);
    avcodec_free_context(&decoder_ctx);
    avcodec_free_context(&encoder_ctx);
    av_buffer_unref(&hw_device_ctx);
    return ret;
}
Exemplo n.º 23
0
/*	decode and play stream. returns 0 or av error code.
 */
static int play (player_t * const player) {
	assert (player != NULL);

	AVPacket pkt;
	av_init_packet (&pkt);
	pkt.data = NULL;
	pkt.size = 0;

	AVFrame *frame = NULL, *filteredFrame = NULL;
	frame = avcodec_alloc_frame ();
	assert (frame != NULL);
	filteredFrame = avcodec_alloc_frame ();
	assert (filteredFrame != NULL);

	while (!player->doQuit) {
		ping ();
		int ret = av_read_frame (player->fctx, &pkt);
		if (ret < 0) {
			av_free_packet (&pkt);
			return ret;
		} else if (pkt.stream_index != player->streamIdx) {
			av_free_packet (&pkt);
			continue;
		}

		AVPacket pkt_orig = pkt;

		/* pausing */
		pthread_mutex_lock (&player->pauseMutex);
		if (player->doPause) {
			av_read_pause (player->fctx);
			do {
				pthread_cond_wait (&player->pauseCond, &player->pauseMutex);
			} while (player->doPause);
			av_read_play (player->fctx);
		}
		pthread_mutex_unlock (&player->pauseMutex);

		while (pkt.size > 0 && !player->doQuit) {
			int got_frame = 0;

			const int decoded = avcodec_decode_audio4 (player->st->codec,
					frame, &got_frame, &pkt);
			if (decoded < 0) {
				/* skip this one */
				break;
			}

			if (got_frame != 0) {
				/* XXX: suppresses warning from resample filter */
				if (frame->pts == (int64_t) AV_NOPTS_VALUE) {
					frame->pts = 0;
				}
				ret = av_buffersrc_write_frame (player->fabuf, frame);
				assert (ret >= 0);

				while (true) {
					AVFilterBufferRef *audioref = NULL;
#ifdef HAVE_AV_BUFFERSINK_GET_BUFFER_REF
					/* ffmpeg’s compatibility layer is broken in some releases */
					if (av_buffersink_get_buffer_ref (player->fbufsink,
							&audioref, 0) < 0) {
#else
					if (av_buffersink_read (player->fbufsink, &audioref) < 0) {
#endif
						/* try again next frame */
						break;
					}

					ret = avfilter_copy_buf_props (filteredFrame, audioref);
					assert (ret >= 0);

					const int numChannels = av_get_channel_layout_nb_channels (
							filteredFrame->channel_layout);
					const int bps = av_get_bytes_per_sample(filteredFrame->format);
					ao_play (player->aoDev, (char *) filteredFrame->data[0],
							filteredFrame->nb_samples * numChannels * bps);

					avfilter_unref_bufferp (&audioref);
				}
			}

			pkt.data += decoded;
			pkt.size -= decoded;
		};

		av_free_packet (&pkt_orig);

		player->songPlayed = av_q2d (player->st->time_base) * (double) pkt.pts;
		player->lastTimestamp = pkt.pts;
	}

	avcodec_free_frame (&filteredFrame);
	avcodec_free_frame (&frame);

	return 0;
}

static void finish (player_t * const player) {
	ao_close (player->aoDev);
	player->aoDev = NULL;
	if (player->fgraph != NULL) {
		avfilter_graph_free (&player->fgraph);
		player->fgraph = NULL;
	}
	if (player->st != NULL && player->st->codec != NULL) {
		avcodec_close (player->st->codec);
		player->st = NULL;
	}
	if (player->fctx != NULL) {
		avformat_close_input (&player->fctx);
	}
}
Exemplo n.º 24
0
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx = NULL;
    int i, videoStream;
    AVCodecContext *pCodecCtx = NULL;
    AVCodec *pCodec = NULL;
    AVDictionary *optionsDict = NULL;
    AVFrame *pFrame = NULL;
    int frameFinished;
    AVPacket packet;
    struct SwsContext *sws_ctx = NULL;

    SDL_Surface *screen = NULL;
    SDL_Overlay *bmp = NULL;
    SDL_Rect rect;
    SDL_Event event;

    if (argc < 2) {
        printf("Please provide a movie file\n");
        exit(1);
    }

    // Register all file formats and codecs
    av_register_all();

    // Initialize SDL
    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Open video file
    if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) {
        return -1; // Couldn't open file
    }

    // Retrieve stream information
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        return -1; // Couldn't find stream information
    }

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream = -1;
    for (i = 0; i < pFormatCtx->nb_streams; i++) {
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStream = i;
            break;
        }
    }
    if (videoStream == -1) {
        return -1; // Didn't find a video stream
    }

    // Get a pointer to the codec context for the video stream
    pCodecCtx = pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }

    // Open codec
    if (avcodec_open2(pCodecCtx, pCodec, &optionsDict) < 0) {
        return -1; // Could not open codec
    }

    // Allocate video frame
    pFrame = avcodec_alloc_frame();

    // Make a screen to put our video
#ifndef __DARWIN__
    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
    if (!screen) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }

    // Allocate a place to put our YUV image on that screen
    bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,
                               SDL_YV12_OVERLAY, screen);

    sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
                pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
                PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);

    // Read frames and save first five frames to disk
    i = 0;
    while (av_read_frame(pFormatCtx, &packet) >= 0) {
        // Is this a packet from the video stream?
        if (packet.stream_index == videoStream) {
            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
                                 &packet);

            // Did we get a video frame?
            if (frameFinished) {
                SDL_LockYUVOverlay(bmp);

                AVPicture pict;
                pict.data[0] = bmp->pixels[0];
                pict.data[1] = bmp->pixels[2];
                pict.data[2] = bmp->pixels[1];

                pict.linesize[0] = bmp->pitches[0];
                pict.linesize[1] = bmp->pitches[2];
                pict.linesize[2] = bmp->pitches[1];

                // Convert the image into YUV format that SDL uses
                sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
                          pFrame->linesize, 0, pCodecCtx->height,
                          pict.data, pict.linesize);

                SDL_UnlockYUVOverlay(bmp);

                rect.x = 0;
                rect.y = 0;
                rect.w = pCodecCtx->width;
                rect.h = pCodecCtx->height;
                SDL_DisplayYUVOverlay(bmp, &rect);
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
        SDL_PollEvent(&event);
        switch (event.type) {
        case SDL_QUIT:
            SDL_Quit();
            exit(0);
            break;
        default:
            break;
        }
    }

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    return 0;
}
Exemplo n.º 25
0
int main(int argc, char **argv)
{
    char fntemplate[PATH_MAX];
    char pktfilename[PATH_MAX];
    AVFormatContext *fctx;
    AVPacket pkt;
    int64_t pktnum = 0;
    int64_t maxpkts = 0;
    int donotquit = 0;
    int nowrite = 0;
    int err;

    if ((argc > 1) && !strncmp(argv[1], "-", 1)) {
        if (strchr(argv[1], 'w'))
            donotquit = 1;
        if (strchr(argv[1], 'n'))
            nowrite = 1;
        argv++;
        argc--;
    }
    if (argc < 2)
        return usage(1);
    if (argc > 2)
        maxpkts = atoi(argv[2]);
    strncpy(fntemplate, argv[1], PATH_MAX-1);
    if (strrchr(argv[1], '/'))
        strncpy(fntemplate, strrchr(argv[1], '/')+1, PATH_MAX-1);
    if (strrchr(fntemplate, '.'))
        *strrchr(fntemplate, '.') = '\0';
    if (strchr(fntemplate, '%')) {
        fprintf(stderr, "can't use filenames containing '%%'\n");
        return usage(1);
    }
    if (strlen(fntemplate) + sizeof(PKTFILESUFF) >= PATH_MAX-1) {
        fprintf(stderr, "filename too long\n");
        return usage(1);
    }
    strcat(fntemplate, PKTFILESUFF);
    printf("FNTEMPLATE: '%s'\n", fntemplate);

    // register all file formats
    av_register_all();

    err = av_open_input_file(&fctx, argv[1], NULL, 0, NULL);
    if (err < 0) {
        fprintf(stderr, "av_open_input_file: error %d\n", err);
        return 1;
    }

    err = av_find_stream_info(fctx);
    if (err < 0) {
        fprintf(stderr, "av_find_stream_info: error %d\n", err);
        return 1;
    }

    av_init_packet(&pkt);

    while ((err = av_read_frame(fctx, &pkt)) >= 0) {
        int fd;
        snprintf(pktfilename, PATH_MAX-1, fntemplate, pktnum, pkt.stream_index, pkt.pts, pkt.size, (pkt.flags & PKT_FLAG_KEY)?'K':'_');
        printf(PKTFILESUFF"\n", pktnum, pkt.stream_index, pkt.pts, pkt.size, (pkt.flags & PKT_FLAG_KEY)?'K':'_');
        //printf("open(\"%s\")\n", pktfilename);
        if (!nowrite) {
            fd = open(pktfilename, O_WRONLY|O_CREAT, 0644);
            write(fd, pkt.data, pkt.size);
            close(fd);
        }
        pktnum++;
        if (maxpkts && (pktnum >= maxpkts))
            break;
    }

    while (donotquit)
        sleep(60);

    return 0;
}
Exemplo n.º 26
0
STDMETHODIMP CBDDemuxer::FillMVCExtensionQueue(REFERENCE_TIME rtBase)
{
  if (!m_MVCFormatContext)
    return E_FAIL;

  int ret, count = 0;
  bool found = (rtBase == Packet::INVALID_TIME);

  AVPacket mvcPacket = { 0 };
  av_init_packet(&mvcPacket);

  while (count < MVC_DEMUX_COUNT) {
    ret = av_read_frame(m_MVCFormatContext, &mvcPacket);

    if (ret == AVERROR(EINTR) || ret == AVERROR(EAGAIN)) {
      continue;
    }
    else if (ret == AVERROR_EOF) {
      DbgLog((LOG_TRACE, 10, L"EOF reading MVC extension data"));
      break;
    }
    else if (mvcPacket.size <= 0 || mvcPacket.stream_index != m_MVCStreamIndex) {
      av_packet_unref(&mvcPacket);
      continue;
    }
    else {
      AVStream *stream = m_MVCFormatContext->streams[mvcPacket.stream_index];

      REFERENCE_TIME rtDTS = m_lavfDemuxer->ConvertTimestampToRT(mvcPacket.dts, stream->time_base.num, stream->time_base.den);
      REFERENCE_TIME rtPTS = m_lavfDemuxer->ConvertTimestampToRT(mvcPacket.pts, stream->time_base.num, stream->time_base.den);

      if (rtBase == Packet::INVALID_TIME || rtDTS == Packet::INVALID_TIME) {
        // do nothing, can't compare timestamps when they are not set
      } else if (rtDTS < rtBase) {
        DbgLog((LOG_TRACE, 10, L"CBDDemuxer::FillMVCExtensionQueue(): Dropping MVC extension at %I64d, base is %I64d", rtDTS, rtBase));
        av_packet_unref(&mvcPacket);
        continue;
      }
      else if (rtDTS == rtBase) {
        found = true;
      }

      Packet *pPacket = new Packet();
      if (!pPacket) {
        av_packet_unref(&mvcPacket);
        return E_OUTOFMEMORY;
      }

      pPacket->SetPacket(&mvcPacket);
      pPacket->rtDTS = rtDTS;
      pPacket->rtPTS = rtPTS;

      m_lavfDemuxer->QueueMVCExtension(pPacket);
      av_packet_unref(&mvcPacket);

      count++;
    }
  };

  if (found)
    return S_OK;
  else if (count > 0)
    return S_FALSE;
  else
    return E_FAIL;
}
Exemplo n.º 27
0
int decode_thread(void *arg) {

	VideoState *is = (VideoState *)arg;
	AVFormatContext *pFormatCtx=NULL;
	AVPacket pkt1, *packet = &pkt1;

	int video_index = -1;
	int audio_index = -1;
	int i;

	is->videoStream = -1;
	is->audioStream = -1;

	global_video_state = is;

	// Open video file
	if (avformat_open_input(&pFormatCtx, is->filename, NULL, NULL) != 0)
		return -1; // Couldn't open file

	is->pFormatCtx = pFormatCtx;

	// Retrieve stream information
	if (avformat_find_stream_info(pFormatCtx, NULL)<0)
		return -1; // Couldn't find stream information

				   // Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, is->filename, 0);

	// Find the first video stream

	for (i = 0; i<pFormatCtx->nb_streams; i++) {
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
			video_index < 0) {
			video_index = i;
		}
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
			audio_index < 0) {
			audio_index = i;
		}
	}
	if (audio_index >= 0) {
		stream_component_open(is, audio_index);
	}
	if (video_index >= 0) {
		stream_component_open(is, video_index);
	}

	if (is->videoStream < 0 || is->audioStream < 0) {
		fprintf(stderr, "%s: could not open codecs\n", is->filename);
		goto fail;
	}

	// main decode loop

	for (;;) {
		if (is->quit) {
			break;
		}
		// seek stuff goes here
		if (is->audioq.size > MAX_AUDIOQ_SIZE ||
			is->videoq.size > MAX_VIDEOQ_SIZE) {
			SDL_Delay(10);
			continue;
		}
		if (av_read_frame(is->pFormatCtx, packet) < 0) {
			if (is->pFormatCtx->pb->error == 0) {
				SDL_Delay(100); /* no error; wait for user input */
				continue;
			}
			else {
				break;
			}
		}
		// Is this a packet from the video stream?
		if (packet->stream_index == is->videoStream) {
			packet_queue_put(&is->videoq, packet);
		}
		else if (packet->stream_index == is->audioStream) {
			packet_queue_put(&is->audioq, packet);
		}
		else {
			av_free_packet(packet);
		}
	}
	/* all done - wait for it */
	while (!is->quit) {
		SDL_Delay(100);
	}

fail:
	if (1) {
		SDL_Event event;
		event.type = FF_QUIT_EVENT;
		event.user.data1 = is;
		SDL_PushEvent(&event);
	}
	return 0;
}
Exemplo n.º 28
0
int main(int argc, char *argv[])
{
	AVFormatContext *pFormatCtx = NULL;
	int i, videoStream;
	AVCodecContext *pCodecCtx;
	AVCodec *pCodec;
	AVFrame *pFrame;
	AVPacket packet;
	int frameFinished;

	int *stream_size=NULL;
	int *stream_max=NULL;
	int *stream_min=NULL;
	double *stream_ave=NULL;
	int total_max=0, total_min=INT32_MAX;
	double total_ave=0;
	int numberStreams;
	int frame_counter=0;
	int total_size=0;
	int tave=0;

	struct settings programSettings;
	struct stat fileStat;
	off_t total_file_size;
	double framerate;

	// default settings
	programSettings.ave_len=1;
	programSettings.output_stderr=0;
	programSettings.output_interval=1;
	programSettings.output_interval_seconds=0;
	programSettings.output_progress=0;


	// parse commandline options
	const static char *legal_flags = "s:i:I:ePh";

	int c;
	char *error=NULL;
	while ((c = getopt (argc, argv, legal_flags)) != -1) {
		switch (c) {
			case 's':
				// been programming in java too much recently
				// I want to catch a number format exception here
				// And tell the user of their error.
				programSettings.ave_len=(int)strtol(optarg, &error, 10);
				if (*error || programSettings.ave_len < 1) {
					fprintf(stderr,"Smoothing value is invalid\n");
					print_usage();
					return -1;
				}

				break;
			case 'e':
				programSettings.output_stderr=1;
				break;
			case 'P':
				programSettings.output_progress=1;
				break;

			case 'i':
				programSettings.output_interval=(int)strtol(optarg, &error, 10);
				if (*error || programSettings.output_interval<1) {
					fprintf(stderr,"Interval is invalid\n");
					print_usage();

					return -1;
				}
				break;
			case 'I':
				programSettings.output_interval_seconds=strtod(optarg, &error);

				if (*error || programSettings.output_interval_seconds <= 0) {
					fprintf(stderr,"Interval Seconds is invalid\n");
					print_usage();

					return -1;
				}

				break;
			case 'h':
			case '*':
				print_usage();
				return 0;
				break;
		}
	}


	optind--;
	// argc -= optind;
	argv += optind;

	//fprintf (stderr, "optind = %d. Trying file: %s\n",optind,argv[1]);

	// Register all formats and codecs
	av_register_all();

	if (argv[1] == NULL)
	{
		fprintf(stderr,"Error: No filename.\n");
		print_usage();
		return -1; // Couldn't open file
	}


	if (programSettings.output_progress) {
		stat(argv[1], &fileStat);
		// check for return error
		progress_init(0,fileStat.st_size);
	}


	// Open video file
#if LIBAVFORMAT_VERSION_MAJOR < 53
	if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
#else
		if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
#endif
		{
			fprintf(stderr,"Error: could not open file.\n");
			return -1; // Couldn't open file
		}

	// Retrieve stream information
#if LIBAVFORMAT_VERSION_MAJOR < 53
	if(av_find_stream_info(pFormatCtx)<0)
#else
		if(avformat_find_stream_info(pFormatCtx,NULL)<0)
#endif
		{
			fprintf(stderr,"Error: could not interpret file.\n");
			return -1; // Couldn't find stream information
		}

	// Dump information about file onto standard error
#if LIBAVFORMAT_VERSION_MAJOR < 53
	dump_format(pFormatCtx, 0, argv[1], 0);
#else
	av_dump_format(pFormatCtx, 0, argv[1], 0);
#endif

	// As this program outputs based on video frames.
	// Find the first video stream
	// To determine the bitrate.
	videoStream=-1;

	numberStreams = pFormatCtx->nb_streams;

	stream_size = (int *)malloc(numberStreams * sizeof(int));
	stream_min = (int *)malloc(numberStreams * sizeof(int));
	stream_max = (int *)malloc(numberStreams * sizeof(int));
	stream_ave = (double *)malloc(numberStreams * sizeof(double));


	for(i=0; i<numberStreams; i++) {
		//	fprintf (stderr,"stream: %d = %d (%s)\n",i,pFormatCtx->streams[i]->codec->codec_type ,pFormatCtx->streams[i]->codec->codec_name);
		// Initialise statistic counters
		stream_size[i] = 0;
		stream_min[i]=INT32_MAX;
		stream_max[i]=0;
		stream_ave[i] = 0;

#if LIBAVFORMAT_VERSION_MAJOR < 53
		if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO)
#else
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
#endif
		{
			videoStream=i;
#if LIBAVFORMAT_VERSION_MAJOR < 55
			framerate = pFormatCtx->streams[i]->r_frame_rate.num;
#else 
                        framerate = pFormatCtx->streams[i]->avg_frame_rate.num;
#endif

#if LIBAVFORMAT_VERSION_MAJOR < 55
			if (pFormatCtx->streams[i]->r_frame_rate.den != 0)
				framerate /= pFormatCtx->streams[i]->r_frame_rate.den;
			//	fprintf (stderr,"Video Stream: %d Frame Rate: %d:%d\n",videoStream,pFormatCtx->streams[i]->r_frame_rate.num,pFormatCtx->streams[i]->r_frame_rate.den);
#else 
			if (pFormatCtx->streams[i]->avg_frame_rate.den != 0)
				framerate /= pFormatCtx->streams[i]->avg_frame_rate.den;
			//	fprintf (stderr,"Video Stream: %d Frame Rate: %d:%d\n",videoStream,pFormatCtx->streams[i]->avg_frame_rate.num,pFormatCtx->streams[i]->avg_frame_rate.den);
#endif
		}
	}

	if(videoStream==-1) {
		free (stream_size); free (stream_min); free (stream_max); free (stream_ave);
		return -1; // Didn't find a video stream
	}

	// Get a pointer to the codec context for the video stream
	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL) {
		free (stream_size); free (stream_min); free (stream_max); free (stream_ave);
		return -1; // Codec not found
	}

	if (framerate == 0)
	{
		//fprintf(stderr,"frame rate %d:%d\n",pCodecCtx->time_base.num,pCodecCtx->time_base.den);
		framerate = pCodecCtx->time_base.den;
		if (pCodecCtx->time_base.den != 0)
			framerate /= pCodecCtx->time_base.num;
	}



	if (programSettings.output_interval_seconds >0)
	{
		if (INT32_MAX / framerate > programSettings.output_interval_seconds)
		{
			programSettings.output_interval = programSettings.output_interval_seconds * framerate;
		} else {
			fprintf(stderr,"Interval seconds too large\n");
			free (stream_size); free (stream_min); free (stream_max); free (stream_ave);
			return -1;
		}
	}
	//	fprintf (stderr,"Video Stream: %d Frame Rate: %g\n",videoStream,framerate);


	// Open codec
#if LIBAVCODEC_VERSION_MAJOR < 52
	if(avcodec_open(pCodecCtx, *pCodec)<0)
#else
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
#endif
	{
		free (stream_size); free (stream_min); free (stream_max); free (stream_ave);
		return -1; // Could not open codec
	}

	// Allocate video frame
#if LIBAVCODEC_VERSION_MAJOR < 55
	pFrame=avcodec_alloc_frame();
#else
	pFrame=av_frame_alloc();
#endif

	int counter_interval=0;


	total_file_size=0;
	// Loop until nothing read
	while(av_read_frame(pFormatCtx, &packet)>=0)
	{
		stream_size[packet.stream_index] += packet.size;

		if (programSettings.output_progress) {
			total_file_size += packet.size;
			progress_loadBar(total_file_size);
		}
		// Is this a packet from the video stream?
		if(packet.stream_index==videoStream)
		{
			// Decode video frame
			// I'm not entirely sure when avcodec_decode_video was deprecated. most likely earlier than 53
	#if LIBAVCODEC_VERSION_MAJOR < 52
			avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size);
	#else
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
	#endif

			if (counter_interval++ >= programSettings.output_interval) {

				//if (!(frame_counter % ave_len)) {
				// print the statistics in gnuplot friendly format...
				total_size=0;
				for(i=0; i<numberStreams; i++) { total_size += stream_size[i]; }
				// if (tave == -1) { tave = total_size; }

				tave = ((tave * (programSettings.ave_len-1)) + total_size) / programSettings.ave_len / programSettings.output_interval;

				if(total_min > total_size)
					total_min = total_size;

				if(total_max < total_size)
					total_max = total_size;

				total_ave += total_size;


				printf ("%f ",frame_counter/framerate);
				printf ("%f ",tave*8*framerate);

				for(i=0; i<numberStreams; i++) {

					// double rate = stream_size[i]*8*framerate/ programSettings.output_interval;

					if(stream_min[i] > stream_size[i])
						stream_min[i] = stream_size[i];

					if(stream_max[i] < stream_size[i])
						stream_max[i] = stream_size[i];

					stream_ave[i] += stream_size[i];


					printf ("%f ",stream_size[i]*8*framerate/ programSettings.output_interval);

					stream_size[i]=0;
				}
				printf("\n");

				//}
				counter_interval = 1;
			}
			frame_counter++;
		}

		// Free the packet that was allocated by av_read_frame
#if LIBAVCODEC_VERSION_MAJOR < 52
		av_freep(&packet);
#else
		av_free_packet(&packet);
#endif
	}

	free(stream_size);


	// Free the YUV frame
	av_free(pFrame);

	// Close the codec
	avcodec_close(pCodecCtx);

	// Close the video file
#if LIBAVCODEC_VERSION_MAJOR < 53
	av_close_input_file(pFormatCtx);
#else
	avformat_close_input(&pFormatCtx);
#endif

	// Print statistics
	if (programSettings.output_stderr)
	{
		fprintf(stderr,"%20s %20s %20s %20s\n","Stream","Min Bitrate","Average bitrate","Max bitrate");

		fprintf(stderr,"%20s %20f %20f %20f\n","Total",total_min*8*framerate/ programSettings.output_interval,
				total_ave * 8*framerate/ programSettings.output_interval/(frame_counter/programSettings.output_interval),
				total_max*8*framerate/ programSettings.output_interval);
		for(i=0; i<numberStreams; i++) {
			fprintf(stderr,"%20d %20f %20f %20f\n",i,stream_min[i]*8*framerate/ programSettings.output_interval,
					stream_ave[i] *8*framerate/ programSettings.output_interval/(frame_counter/programSettings.output_interval),
					stream_max[i]*8*framerate/ programSettings.output_interval);
		}
	}
	free (stream_min); free (stream_max); free (stream_ave);

	return 0;
}
Exemplo n.º 29
0
int main(int argc, char* argv[])
{
	AVFormatContext* in_fctx;	
	AVCodecContext* in_cctx;
	AVCodec* in_codec;
	const char* in_filename;
	const char* out_filename;
	char* decoded_buf;
	char* output_buf;
	char* resample_buf;
	char* before_encoding_buf;
	int ret = 0;

	if (argc != 3)
	{
		printf("./audio_convertor input ouput\n");
		return -1;
	}
	//in_filename = "../input/input.aac";
	//out_filename = "output/aac2mp3.mp3";
	in_filename = argv[1];
	out_filename = argv[2];
	decoded_buf = (char*)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
	output_buf = (char*)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
	resample_buf = (char*)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); 
	before_encoding_buf = (char*)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); 

	avcodec_register_all();
	av_register_all();

	in_fctx = avformat_alloc_context();

	ret = av_open_input_file(&in_fctx, in_filename, NULL, 0, NULL);
	if ( ret != 0 )
	{
		printf("open input audio file[%s] fail\n", in_filename);
		return -1;
	}

	ret = av_find_stream_info(in_fctx);
	if ( ret < 0 )
	{
		printf("find stream in audio file[%s] fail\n", in_filename);
		return -1;
	}

	dump_format(in_fctx, 0, in_filename, 0);

	//这里我们假设,如果一个文件包含多个音频流,
	//只对第一个音频流做转码,而对于视频流则忽略
	int i;
	int ast_index = -1;
	for (i = 0; i<(int)in_fctx->nb_streams; ++i)
	{
		if (in_fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
		{
			ast_index = i;
			break;
		}
	}
	if (ast_index == -1)
	{
		printf("there is not any audio stream in file[%s]\n", in_filename);
		return 0;
	}
	else
	{
		printf("find audio stream in file[%s]\n", in_filename);
	}
	
	in_cctx = in_fctx->streams[ast_index]->codec;
	//in_cctx->codec_id = CODEC_ID_GSM;
	in_codec = avcodec_find_decoder(in_cctx->codec_id);
	if (!in_codec)
	{
		printf("find decoder for codec_id[%d] fail, file[%s]\n", in_cctx->codec_id, in_filename);
		return -1;
	}
	ret = avcodec_open(in_cctx, in_codec);
	if (ret >= 0)
	{
		printf("open codec[name:%s] for stream[idx:%d] of file[%s]\n", in_codec->name, ast_index, in_filename);
	}

	// 输出部分初始化
	AVOutputFormat* out_fmt;
	AVFormatContext* out_fctx;
	AVCodecContext* out_cctx = NULL;

	out_fmt = av_guess_format(NULL, out_filename, NULL);
	if (!out_fmt)
	{
		printf("Could not deduce output format from file extension: using MPEG-3.\n");
		out_fmt = av_guess_format("mp3", NULL, NULL);
	}
	if (!out_fmt)
	{
		fprintf(stderr, "Could not find suitable output format\n");
		exit(1);
	}

	out_fctx = avformat_alloc_context();
	if (!out_fctx)
	{
		fprintf(stderr, "avformat_alloc_context fail\n");
		exit(1);
	}
	out_fctx->oformat = out_fmt;

	out_cctx = output_decode_init(in_cctx, out_fctx, out_fmt->audio_codec);
	if (!out_cctx)
	{
		fprintf(stderr, "output_codec_init fail\n");
		exit(1);
	}
	/* set the output parameters (must be done even if no parameters). */
	if (av_set_parameters(out_fctx, NULL) < 0) 
	{
		fprintf(stderr, "Invalid output format parameters\n");
		exit(1);
	}
	dump_format(out_fctx, 0, out_filename, 1);

	output_decode_open(out_cctx);

	/* open the output file */
	if (!(out_fmt->flags & AVFMT_NOFILE)) 
	{
		if (url_fopen(&out_fctx->pb, out_filename, URL_WRONLY) < 0)
		{
			fprintf(stderr, "Could not open '%s'\n", out_filename);
			exit(1);
		}
	}
	/* write the stream header, if any */
	if(av_write_header(out_fctx) < 0)
	{
		fprintf(stderr, "Could not write header for output file\n");
		return -1;
	}

	int decoded_size;
	AVPacket in_packet;
	AVPacket out_packet;
	ReSampleContext *rs_ctx = NULL;
	/*
	参考链接:http://hi.baidu.com/wg_wang/item/34396781d20b4b1ec316270b
	两点需要注意:
	(1) 从输入文件中按帧读取数据,解码,按照输出文件的要求,编码,并按帧写入到输出文件中。
	在这里,由于sample_rate和channels可能不同,需要对音频数据进行重采样。
	(2) 由于不同编码类型对一帧音频的数据要求不同,可能需要将输入数据保存起来,直到够输出的编码使用,
	或者,一帧的输入数据可能需要被多次输出。
	这样,要求初始化重采样以及libavutil提供的fifo(libavutils/fifo.h声明)以临时保存数据。
	举个例子:aac的frame_size=1024,mp3的frame_size=1152。若不用这个fifo,则生成的mp3文件是有问题的
	*/
	// 设置从采样
	rs_ctx = av_audio_resample_init(
				out_cctx->channels, in_cctx->channels,
				out_cctx->sample_rate, in_cctx->sample_rate,
				out_cctx->sample_fmt, in_cctx->sample_fmt,
				16, 10, 0, 0.8);

	AVFifoBuffer *iofifo;
	iofifo = av_fifo_alloc(AVCODEC_MAX_AUDIO_FRAME_SIZE*2);
	av_init_packet(&in_packet);
	av_init_packet(&out_packet);

	while (av_read_frame(in_fctx, &in_packet) >= 0)
	{
		while (in_packet.size > 0)
		{
			int used_size;
			decoded_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
			used_size = avcodec_decode_audio3(in_cctx, (int16_t *)decoded_buf, &decoded_size, &in_packet);
			if (used_size < 0)
			{
				printf("avcodec_decode_audio3 fail\n");
				exit(1);
			}
			int bs, frame_bytes;

			bs = audio_resample(rs_ctx, (short *)resample_buf, (short *)decoded_buf, decoded_size/(in_cctx->channels*g_in_bytes));
			ret = av_fifo_generic_write(iofifo, (uint8_t *)resample_buf, bs*out_cctx->channels*g_out_bytes, NULL);
			//ret = av_fifo_generic_write(iofifo, (uint8_t *)decoded_buf, in_cctx->frame_size*in_cctx->channels*g_out_bytes, NULL);

			frame_bytes = out_cctx->frame_size * g_out_bytes * out_cctx->channels;
			while(av_fifo_size(iofifo) >= frame_bytes)
			{
				ret = av_fifo_generic_read(iofifo, before_encoding_buf, frame_bytes, NULL);
				out_packet.size = avcodec_encode_audio(out_cctx, (uint8_t*)output_buf, frame_bytes, (short *)before_encoding_buf);
				out_packet.data = (uint8_t *)output_buf;
				av_write_frame(out_fctx, &out_packet);
			}

			in_packet.size -= used_size;
			in_packet.data += used_size;
		}
	}
	/* write the trailer, if any */
	av_write_trailer(out_fctx);
	if (!(out_fmt->flags & AVFMT_NOFILE)) {
		/* close the output file */
		url_fclose(out_fctx->pb);
	}
}
Exemplo n.º 30
-1
static int
artwork_rescale(AVFormatContext *src_ctx, int s, int out_w, int out_h, int format, struct evbuffer *evbuf)
{
  uint8_t *buf;
  uint8_t *outbuf;

  AVCodecContext *src;

  AVFormatContext *dst_ctx;
  AVCodecContext *dst;
  AVOutputFormat *dst_fmt;
  AVStream *dst_st;

  AVCodec *img_decoder;
  AVCodec *img_encoder;

  int64_t pix_fmt_mask;
  const enum PixelFormat *pix_fmts;

  AVFrame *i_frame;
  AVFrame *o_frame;

  struct SwsContext *swsctx;

  AVPacket pkt;
  int have_frame;

  int outbuf_len;

  int ret;

  src = src_ctx->streams[s]->codec;

  img_decoder = avcodec_find_decoder(src->codec_id);
  if (!img_decoder)
    {
      DPRINTF(E_LOG, L_ART, "No suitable decoder found for artwork %s\n", src_ctx->filename);

      return -1;
    }

#if LIBAVCODEC_VERSION_MAJOR >= 54 || (LIBAVCODEC_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR >= 6)
  ret = avcodec_open2(src, img_decoder, NULL);
#else
  ret = avcodec_open(src, img_decoder);
#endif
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open codec for decoding: %s\n", strerror(AVUNERROR(ret)));

      return -1;
    }

  /* Set up output */
#if LIBAVFORMAT_VERSION_MAJOR >= 53 || (LIBAVFORMAT_VERSION_MAJOR == 52 && LIBAVFORMAT_VERSION_MINOR >= 45)
  /* FFmpeg 0.6 */
  dst_fmt = av_guess_format("image2", NULL, NULL);
#else
  dst_fmt = guess_format("image2", NULL, NULL);
#endif
  if (!dst_fmt)
    {
      DPRINTF(E_LOG, L_ART, "ffmpeg image2 muxer not available\n");

      ret = -1;
      goto out_close_src;
    }

  dst_fmt->video_codec = CODEC_ID_NONE;

  /* Try to keep same codec if possible */
  if ((src->codec_id == CODEC_ID_PNG) && (format & ART_CAN_PNG))
    dst_fmt->video_codec = CODEC_ID_PNG;
  else if ((src->codec_id == CODEC_ID_MJPEG) && (format & ART_CAN_JPEG))
    dst_fmt->video_codec = CODEC_ID_MJPEG;

  /* If not possible, select new codec */
  if (dst_fmt->video_codec == CODEC_ID_NONE)
    {
      if (format & ART_CAN_PNG)
	dst_fmt->video_codec = CODEC_ID_PNG;
      else if (format & ART_CAN_JPEG)
	dst_fmt->video_codec = CODEC_ID_MJPEG;
    }

  img_encoder = avcodec_find_encoder(dst_fmt->video_codec);
  if (!img_encoder)
    {
      DPRINTF(E_LOG, L_ART, "No suitable encoder found for codec ID %d\n", dst_fmt->video_codec);

      ret = -1;
      goto out_close_src;
    }

  dst_ctx = avformat_alloc_context();
  if (!dst_ctx)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for format context\n");

      ret = -1;
      goto out_close_src;
    }

  dst_ctx->oformat = dst_fmt;

#if LIBAVFORMAT_VERSION_MAJOR >= 53
  dst_fmt->flags &= ~AVFMT_NOFILE;
#else
  ret = snprintf(dst_ctx->filename, sizeof(dst_ctx->filename), "evbuffer:%p", evbuf);
  if ((ret < 0) || (ret >= sizeof(dst_ctx->filename)))
    {
      DPRINTF(E_LOG, L_ART, "Output artwork URL too long\n");

      ret = -1;
      goto out_free_dst_ctx;
    }
#endif

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 21)
  dst_st = avformat_new_stream(dst_ctx, NULL);
#else
  dst_st = av_new_stream(dst_ctx, 0);
#endif
  if (!dst_st)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for new output stream\n");

      ret = -1;
      goto out_free_dst_ctx;
    }

  dst = dst_st->codec;

#if LIBAVCODEC_VERSION_MAJOR >= 54 || (LIBAVCODEC_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR >= 35)
  avcodec_get_context_defaults3(dst, NULL);
#else
  avcodec_get_context_defaults2(dst, AVMEDIA_TYPE_VIDEO);
#endif

  if (dst_fmt->flags & AVFMT_GLOBALHEADER)
    dst->flags |= CODEC_FLAG_GLOBAL_HEADER;

  dst->codec_id = dst_fmt->video_codec;
#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 64)
  dst->codec_type = AVMEDIA_TYPE_VIDEO;
#else
  dst->codec_type = CODEC_TYPE_VIDEO;
#endif

  pix_fmt_mask = 0;
  pix_fmts = img_encoder->pix_fmts;
  while (pix_fmts && (*pix_fmts != -1))
    {
      pix_fmt_mask |= (1 << *pix_fmts);
      pix_fmts++;
    }

  dst->pix_fmt = avcodec_find_best_pix_fmt(pix_fmt_mask, src->pix_fmt, 1, NULL);

  if (dst->pix_fmt < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not determine best pixel format\n");

      ret = -1;
      goto out_free_dst_ctx;
    }

  DPRINTF(E_DBG, L_ART, "Selected pixel format: %d\n", dst->pix_fmt);

  dst->time_base.num = 1;
  dst->time_base.den = 25;

  dst->width = out_w;
  dst->height = out_h;

#if LIBAVFORMAT_VERSION_MAJOR <= 52 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR <= 1)
  ret = av_set_parameters(dst_ctx, NULL);
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Invalid parameters for artwork output: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_free_dst_ctx;
    }
#endif

  /* Open encoder */
#if LIBAVCODEC_VERSION_MAJOR >= 54 || (LIBAVCODEC_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR >= 6)
  ret = avcodec_open2(dst, img_encoder, NULL);
#else
  ret = avcodec_open(dst, img_encoder);
#endif
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open codec for encoding: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_free_dst_ctx;
    }

  i_frame = avcodec_alloc_frame();
  o_frame = avcodec_alloc_frame();

  if (!i_frame || !o_frame)
    {
      DPRINTF(E_LOG, L_ART, "Could not allocate input/output frame\n");

      ret = -1;
      goto out_free_frames;
    }

  ret = avpicture_get_size(dst->pix_fmt, src->width, src->height);

  DPRINTF(E_DBG, L_ART, "Artwork buffer size: %d\n", ret);

  buf = (uint8_t *)av_malloc(ret);
  if (!buf)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for artwork buffer\n");

      ret = -1;
      goto out_free_frames;
    }

  avpicture_fill((AVPicture *)o_frame, buf, dst->pix_fmt, src->width, src->height);

  swsctx = sws_getContext(src->width, src->height, src->pix_fmt,
			  dst->width, dst->height, dst->pix_fmt,
			  SWS_BICUBIC, NULL, NULL, NULL);
  if (!swsctx)
    {
      DPRINTF(E_LOG, L_ART, "Could not get SWS context\n");

      ret = -1;
      goto out_free_buf;
    }

  /* Get frame */
  have_frame = 0;
  while (av_read_frame(src_ctx, &pkt) == 0)
    {
      if (pkt.stream_index != s)
	{
	  av_free_packet(&pkt);
	  continue;
	}

#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 32)
      /* FFmpeg 0.6 */
      avcodec_decode_video2(src, i_frame, &have_frame, &pkt);
#else
      avcodec_decode_video(src, i_frame, &have_frame, pkt.data, pkt.size);
#endif

      break;
    }

  if (!have_frame)
    {
      DPRINTF(E_LOG, L_ART, "Could not decode artwork\n");

      av_free_packet(&pkt);
      sws_freeContext(swsctx);

      ret = -1;
      goto out_free_buf;
    }

  /* Scale */
#if LIBSWSCALE_VERSION_MAJOR >= 1 || (LIBSWSCALE_VERSION_MAJOR == 0 && LIBSWSCALE_VERSION_MINOR >= 9)
  /* FFmpeg 0.6, libav 0.6+ */
  sws_scale(swsctx, (const uint8_t * const *)i_frame->data, i_frame->linesize, 0, src->height, o_frame->data, o_frame->linesize);
#else
  sws_scale(swsctx, i_frame->data, i_frame->linesize, 0, src->height, o_frame->data, o_frame->linesize);
#endif

  sws_freeContext(swsctx);
  av_free_packet(&pkt);

  /* Open output file */
#if LIBAVFORMAT_VERSION_MAJOR >= 53
  dst_ctx->pb = avio_evbuffer_open(evbuf);
#else
  ret = url_fopen(&dst_ctx->pb, dst_ctx->filename, URL_WRONLY);
#endif
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open artwork destination buffer\n");

      ret = -1;
      goto out_free_buf;
    }

  /* Encode frame */
  outbuf_len = dst->width * dst->height * 3;
  if (outbuf_len < FF_MIN_BUFFER_SIZE)
    outbuf_len = FF_MIN_BUFFER_SIZE;

  outbuf = (uint8_t *)av_malloc(outbuf_len);
  if (!outbuf)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for encoded artwork buffer\n");

#if LIBAVFORMAT_VERSION_MAJOR >= 53
      avio_evbuffer_close(dst_ctx->pb);
#else
      url_fclose(dst_ctx->pb);
#endif

      ret = -1;
      goto out_free_buf;
    }

#if LIBAVCODEC_VERSION_MAJOR >= 54
  av_init_packet(&pkt);
  pkt.data = outbuf;
  pkt.size = outbuf_len;
  ret = avcodec_encode_video2(dst, &pkt, o_frame, &have_frame);
  if (!ret && have_frame && dst->coded_frame) 
    {
      dst->coded_frame->pts       = pkt.pts;
      dst->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY);
    }
  else if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not encode artwork\n");

      ret = -1;
      goto out_fclose_dst;
    }
#else
  ret = avcodec_encode_video(dst, outbuf, outbuf_len, o_frame);
  if (ret <= 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not encode artwork\n");

      ret = -1;
      goto out_fclose_dst;
    }

  av_init_packet(&pkt);
  pkt.stream_index = 0;
  pkt.data = outbuf;
  pkt.size = ret;
#endif

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 3)
  ret = avformat_write_header(dst_ctx, NULL);
#else
  ret = av_write_header(dst_ctx);
#endif
  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not write artwork header: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_fclose_dst;
    }

  ret = av_interleaved_write_frame(dst_ctx, &pkt);

  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Error writing artwork\n");

      ret = -1;
      goto out_fclose_dst;
    }

  ret = av_write_trailer(dst_ctx);
  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not write artwork trailer: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_fclose_dst;
    }

  switch (dst_fmt->video_codec)
    {
      case CODEC_ID_PNG:
	ret = ART_FMT_PNG;
	break;

      case CODEC_ID_MJPEG:
	ret = ART_FMT_JPEG;
	break;

      default:
	DPRINTF(E_LOG, L_ART, "Unhandled rescale output format\n");
	ret = -1;
	break;
    }

 out_fclose_dst:
#if LIBAVFORMAT_VERSION_MAJOR >= 53
  avio_evbuffer_close(dst_ctx->pb);
#else
  url_fclose(dst_ctx->pb);
#endif
  av_free(outbuf);

 out_free_buf:
  av_free(buf);

 out_free_frames:
  if (i_frame)
    av_free(i_frame);
  if (o_frame)
    av_free(o_frame);
  avcodec_close(dst);

 out_free_dst_ctx:
  avformat_free_context(dst_ctx);

 out_close_src:
  avcodec_close(src);

  return ret;
}