static void process(ID3ASFilterContext *context, AVFrame *frame, AVRational timebase)
{
  codec_t *this = context->priv_data;
  int ret;

  do_init(this, frame, timebase);

  if (av_buffersrc_write_frame(this->buffersrc_ctx, frame) < 0) {
    ERROR("Error while feeding the filtergraph\n");
    exit(-1);
  }
  while (1) {
    ret = av_buffersink_get_frame(this->buffersink_ctx, this->output_frame);

    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) 
      break;
    if (ret < 0) {
      ERROR("Error from get_frame");
      exit(-1);
    }
    send_to_graph(context, this->output_frame, this->output_timebase);

    av_frame_unref(this->output_frame);
  }
}
Exemple #2
0
void MovieDecoder::getScaledVideoFrame(int scaledSize, bool maintainAspectRatio, VideoFrame& videoFrame)
{
    initializeFilterGraph(m_pFormatContext->streams[m_VideoStream]->time_base, scaledSize, maintainAspectRatio);

    auto del = [] (AVFrame* f) { av_frame_free(&f); };
    std::unique_ptr<AVFrame, decltype(del)> res(av_frame_alloc(), del);

    checkRc(av_buffersrc_write_frame(m_pFilterSource, m_pFrame), "Failed to write frame to filter graph");

    int attempts = 0;
    int rc = av_buffersink_get_frame(m_pFilterSink, res.get());
    while (rc == AVERROR(EAGAIN) && attempts++ < 10)
    {
        decodeVideoFrame();
        checkRc(av_buffersrc_write_frame(m_pFilterSource, m_pFrame), "Failed to write frame to filter graph");
        rc = av_buffersink_get_frame(m_pFilterSink, res.get());
    }

    checkRc(rc, "Failed to get buffer from filter");

    videoFrame.width = res->width;
    videoFrame.height = res->height;
    videoFrame.lineSize = videoFrame.width * 4;

	if(videoFrame.frameData != nullptr)
		delete videoFrame.frameData;

	uint8_t * framedata = res->data[0];

	videoFrame.frameData = new uint8_t[videoFrame.width * 4 * videoFrame.height];
	for(int y = 0;y < videoFrame.height;y++)
	{
		memcpy(videoFrame.frameData + ((videoFrame.height - y - 1) * videoFrame.lineSize), framedata + (y * res->linesize[0]), videoFrame.lineSize);
	}

    if (m_pFilterGraph)
    {
        avfilter_graph_free(&m_pFilterGraph);
    }
}
JNIEXPORT jlong JNICALL
Java_org_jitsi_impl_neomedia_codec_FFmpeg_get_1filtered_1video_1frame
    (JNIEnv *env, jclass clazz,
    jlong input, jint width, jint height, jint pixFmt,
    jlong buffer, jlong ffsink, jlong output)
{
    AVFrame *input_ = (AVFrame *) (intptr_t) input;
    AVFilterContext *buffer_ = (AVFilterContext *) (intptr_t) buffer;
    AVFilterBufferRef *ref = NULL;

    input_->width = width;
    input_->height = height;
    input_->format = pixFmt;
    if (av_buffersrc_write_frame(buffer_, input_) == 0)
    {
        AVFilterContext *ffsink_ = (AVFilterContext *) (intptr_t) ffsink;

        if (ff_request_frame(ffsink_->inputs[0]) == 0)
        {
            ref = (AVFilterBufferRef *) (ffsink_->priv);
            if (ref)
            {
                AVFrame *output_ = (AVFrame *) (intptr_t) output;

                /*
                 * The data of cur_buf will be returned into output so it needs
                 * to exist at least while output needs it. So take ownership of
                 * cur_buf and the user of output will unref it when they are
                 * done with output.
                 */
                ffsink_->priv = NULL;

                memcpy(output_->data, ref->data, sizeof(output_->data));
                memcpy(
                    output_->linesize,
                    ref->linesize,
                    sizeof(output_->linesize));
                output_->interlaced_frame = ref->video->interlaced;
                output_->top_field_first = ref->video->top_field_first;
            }
        }
    }
    return (jlong) (intptr_t) ref;
}
Exemple #4
0
static void mix_silence_fill_idx_upto(mix_t *mix, unsigned int idx, uint64_t upto) {
	unsigned int silence_samples = mix->format.clockrate / 100;

	while (mix->in_pts[idx] < upto) {
		if (G_UNLIKELY(upto - mix->in_pts[idx] > mix->format.clockrate * 30)) {
			ilog(LOG_WARN, "More than 30 seconds of silence needed to fill mix buffer, resetting");
			mix->in_pts[idx] = upto;
			break;
		}

		if (G_UNLIKELY(!mix->silence_frame)) {
			mix->silence_frame = av_frame_alloc();
			mix->silence_frame->format = mix->format.format;
			mix->silence_frame->channel_layout =
				av_get_default_channel_layout(mix->format.channels);
			mix->silence_frame->nb_samples = silence_samples;
			mix->silence_frame->sample_rate = mix->format.clockrate;
			if (av_frame_get_buffer(mix->silence_frame, 0) < 0) {
				ilog(LOG_ERR, "Failed to get silence frame buffers");
				return;
			}
			int planes = av_sample_fmt_is_planar(mix->silence_frame->format) ? mix->format.channels : 1;
			for (int i = 0; i < planes; i++)
				memset(mix->silence_frame->extended_data[i], 0, mix->silence_frame->linesize[0]);
		}

		dbg("pushing silence frame into stream %i (%lli < %llu)", idx,
				(long long unsigned) mix->in_pts[idx],
				(long long unsigned) upto);

		mix->silence_frame->pts = mix->in_pts[idx];
		mix->silence_frame->nb_samples = MIN(silence_samples, upto - mix->in_pts[idx]);
		mix->in_pts[idx] += mix->silence_frame->nb_samples;

		if (av_buffersrc_write_frame(mix->src_ctxs[idx], mix->silence_frame))
			ilog(LOG_WARN, "Failed to write silence frame to buffer");
	}
}
Exemple #5
0
/*	decode and play stream. returns 0 or av error code.
 */
static int play (player_t * const player) {
	assert (player != NULL);

	AVPacket pkt;
	av_init_packet (&pkt);
	pkt.data = NULL;
	pkt.size = 0;

	AVFrame *frame = NULL, *filteredFrame = NULL;
	frame = av_frame_alloc ();
	assert (frame != NULL);
	filteredFrame = av_frame_alloc ();
	assert (filteredFrame != NULL);

	while (!player->doQuit) {
		int ret = av_read_frame (player->fctx, &pkt);
		if (ret < 0) {
			av_free_packet (&pkt);
			return ret;
		} else if (pkt.stream_index != player->streamIdx) {
			av_free_packet (&pkt);
			continue;
		}

		AVPacket pkt_orig = pkt;

		/* pausing */
		pthread_mutex_lock (&player->pauseMutex);
		if (player->doPause) {
			av_read_pause (player->fctx);
			do {
				pthread_cond_wait (&player->pauseCond, &player->pauseMutex);
			} while (player->doPause);
			av_read_play (player->fctx);
		}
		pthread_mutex_unlock (&player->pauseMutex);

		while (pkt.size > 0 && !player->doQuit) {
			int got_frame = 0;

			const int decoded = avcodec_decode_audio4 (player->st->codec,
					frame, &got_frame, &pkt);
			if (decoded < 0) {
				/* skip this one */
				break;
			}

			if (got_frame != 0) {
				/* XXX: suppresses warning from resample filter */
				if (frame->pts == (int64_t) AV_NOPTS_VALUE) {
					frame->pts = 0;
				}
				ret = av_buffersrc_write_frame (player->fabuf, frame);
				assert (ret >= 0);

				while (true) {
					if (av_buffersink_get_frame (player->fbufsink, filteredFrame) < 0) {
						/* try again next frame */
						break;
					}

					const int numChannels = av_get_channel_layout_nb_channels (
							filteredFrame->channel_layout);
					const int bps = av_get_bytes_per_sample(filteredFrame->format);
					ao_play (player->aoDev, (char *) filteredFrame->data[0],
							filteredFrame->nb_samples * numChannels * bps);

					av_frame_unref (filteredFrame);
				}
			}

			pkt.data += decoded;
			pkt.size -= decoded;
		};

		av_free_packet (&pkt_orig);

		player->songPlayed = av_q2d (player->st->time_base) * (double) pkt.pts;
		player->lastTimestamp = pkt.pts;
	}

	av_frame_free (&filteredFrame);
	av_frame_free (&frame);

	return 0;
}
Exemple #6
0
/*	decode and play stream. returns 0 or av error code.
 */
static int play (player_t * const player) {
	assert (player != NULL);

	AVPacket pkt;
	av_init_packet (&pkt);
	pkt.data = NULL;
	pkt.size = 0;

	AVFrame *frame = NULL, *filteredFrame = NULL;
	frame = avcodec_alloc_frame ();
	assert (frame != NULL);
	filteredFrame = avcodec_alloc_frame ();
	assert (filteredFrame != NULL);

	while (!player->doQuit) {
		ping ();
		int ret = av_read_frame (player->fctx, &pkt);
		if (ret < 0) {
			av_free_packet (&pkt);
			return ret;
		} else if (pkt.stream_index != player->streamIdx) {
			av_free_packet (&pkt);
			continue;
		}

		AVPacket pkt_orig = pkt;

		/* pausing */
		pthread_mutex_lock (&player->pauseMutex);
		while (true) {
			if (!player->doPause) {
				av_read_play (player->fctx);
				break;
			} else {
				av_read_pause (player->fctx);
			}
			pthread_cond_wait (&player->pauseCond, &player->pauseMutex);
		}
		pthread_mutex_unlock (&player->pauseMutex);

		do {
			int got_frame = 0;

			const int decoded = avcodec_decode_audio4 (player->st->codec,
					frame, &got_frame, &pkt);
			if (decoded < 0) {
				/* skip this one */
				break;
			}

			if (got_frame != 0) {
				/* XXX: suppresses warning from resample filter */
				if (frame->pts == (int64_t) AV_NOPTS_VALUE) {
					frame->pts = 0;
				}
				ret = av_buffersrc_write_frame (player->fabuf, frame);
				assert (ret >= 0);

				while (true) {
					AVFilterBufferRef *audioref = NULL;
#ifdef HAVE_AV_BUFFERSINK_GET_BUFFER_REF
					/* ffmpeg’s compatibility layer is broken in some releases */
					if (av_buffersink_get_buffer_ref (player->fbufsink,
							&audioref, 0) < 0) {
#else
					if (av_buffersink_read (player->fbufsink, &audioref) < 0) {
#endif
						/* try again next frame */
						break;
					}

					ret = avfilter_copy_buf_props (filteredFrame, audioref);
					assert (ret >= 0);

					const int numChannels = av_get_channel_layout_nb_channels (
							filteredFrame->channel_layout);
					const int bps = av_get_bytes_per_sample(filteredFrame->format);
					ao_play (player->aoDev, (char *) filteredFrame->data[0],
							filteredFrame->nb_samples * numChannels * bps);

					avfilter_unref_bufferp (&audioref);
				}
			}

			pkt.data += decoded;
			pkt.size -= decoded;
		} while (pkt.size > 0);

		av_free_packet (&pkt_orig);

		player->songPlayed = av_q2d (player->st->time_base) * (double) pkt.pts;
		player->lastTimestamp = pkt.pts;
	}

	avcodec_free_frame (&filteredFrame);
	avcodec_free_frame (&frame);

	return 0;
}

static void finish (player_t * const player) {
	ao_close (player->aoDev);
	player->aoDev = NULL;
	if (player->fgraph != NULL) {
		avfilter_graph_free (&player->fgraph);
		player->fgraph = NULL;
	}
	if (player->st != NULL && player->st->codec != NULL) {
		avcodec_close (player->st->codec);
		player->st = NULL;
	}
	if (player->fctx != NULL) {
		avformat_close_input (&player->fctx);
	}
}
Exemple #7
0
// decode one audio packet and return its uncompressed size
static int audio_decode_frame(struct GroovePlaylist *playlist, struct GrooveFile *file) {
    struct GroovePlaylistPrivate *p = (struct GroovePlaylistPrivate *) playlist;
    struct GrooveFilePrivate *f = (struct GrooveFilePrivate *) file;

    AVPacket *pkt = &f->audio_pkt;
    AVCodecContext *dec = f->audio_st->codec;

    AVPacket *pkt_temp = &p->audio_pkt_temp;
    *pkt_temp = *pkt;

    // update the audio clock with the pts if we can
    if (pkt->pts != AV_NOPTS_VALUE)
        f->audio_clock = av_q2d(f->audio_st->time_base) * pkt->pts;

    int max_data_size = 0;
    int len1, got_frame;
    int new_packet = 1;
    AVFrame *in_frame = p->in_frame;

    // NOTE: the audio packet can contain several frames
    while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
        new_packet = 0;

        len1 = avcodec_decode_audio4(dec, in_frame, &got_frame, pkt_temp);
        if (len1 < 0) {
            // if error, we skip the frame
            pkt_temp->size = 0;
            return -1;
        }

        pkt_temp->data += len1;
        pkt_temp->size -= len1;

        if (!got_frame) {
            // stop sending empty packets if the decoder is finished
            if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
                return 0;
            continue;
        }

        // push the audio data from decoded frame into the filtergraph
        int err = av_buffersrc_write_frame(p->abuffer_ctx, in_frame);
        if (err < 0) {
            av_strerror(err, p->strbuf, sizeof(p->strbuf));
            av_log(NULL, AV_LOG_ERROR, "error writing frame to buffersrc: %s\n",
                    p->strbuf);
            return -1;
        }

        // for each data format in the sink map, pull filtered audio from its
        // buffersink, turn it into a GrooveBuffer and then increment the ref
        // count for each sink in that stack.
        struct SinkMap *map_item = p->sink_map;
        double clock_adjustment = 0;
        while (map_item) {
            struct GrooveSink *example_sink = map_item->stack_head->sink;
            int data_size = 0;
            for (;;) {
                AVFrame *oframe = av_frame_alloc();
                int err = example_sink->buffer_sample_count == 0 ?
                    av_buffersink_get_frame(map_item->abuffersink_ctx, oframe) :
                    av_buffersink_get_samples(map_item->abuffersink_ctx, oframe, example_sink->buffer_sample_count);
                if (err == AVERROR_EOF || err == AVERROR(EAGAIN)) {
                    av_frame_free(&oframe);
                    break;
                }
                if (err < 0) {
                    av_frame_free(&oframe);
                    av_log(NULL, AV_LOG_ERROR, "error reading buffer from buffersink\n");
                    return -1;
                }
                struct GrooveBuffer *buffer = frame_to_groove_buffer(playlist, example_sink, oframe);
                if (!buffer) {
                    av_frame_free(&oframe);
                    return -1;
                }
                data_size += buffer->size;
                struct SinkStack *stack_item = map_item->stack_head;
                // we hold this reference to avoid cleanups until at least this loop
                // is done and we call unref after it.
                groove_buffer_ref(buffer);
                while (stack_item) {
                    struct GrooveSink *sink = stack_item->sink;
                    struct GrooveSinkPrivate *s = (struct GrooveSinkPrivate *) sink;
                    // as soon as we call groove_queue_put, this buffer could be unref'd.
                    // so we ref before putting it in the queue, and unref if it failed.
                    groove_buffer_ref(buffer);
                    if (groove_queue_put(s->audioq, buffer) < 0) {
                        av_log(NULL, AV_LOG_ERROR, "unable to put buffer in queue\n");
                        groove_buffer_unref(buffer);
                    }
                    stack_item = stack_item->next;
                }
                groove_buffer_unref(buffer);
            }
            if (data_size > max_data_size) {
                max_data_size = data_size;
                clock_adjustment = data_size / (double)example_sink->bytes_per_sec;
            }
            map_item = map_item->next;
        }

        // if no pts, then estimate it
        if (pkt->pts == AV_NOPTS_VALUE)
            f->audio_clock += clock_adjustment;
        return max_data_size;
    }
    return max_data_size;
}