AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame, int perms) { AVFilterBufferRef *samplesref; int channels = av_frame_get_channels(frame); int64_t layout = av_frame_get_channel_layout(frame); if(av_frame_get_channels(frame) > 8) // libavfilter does not suport more than 8 channels FIXME, remove once libavfilter is fixed return NULL; if (layout && av_get_channel_layout_nb_channels(layout) != av_frame_get_channels(frame)) { av_log(0, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n"); return NULL; } samplesref = avfilter_get_audio_buffer_ref_from_arrays_channels( (uint8_t **)frame->data, frame->linesize[0], perms, frame->nb_samples, frame->format, channels, layout); if (!samplesref) return NULL; if (avfilter_copy_frame_props(samplesref, frame) < 0) { samplesref->buf->data[0] = NULL; avfilter_unref_bufferp(&samplesref); } return samplesref; }
static bool audio_frame(struct ff_frame *frame, void *opaque) { struct ffmpeg_source *s = opaque; struct obs_source_audio audio_data = {0}; uint64_t pts; // Media ended if (frame == NULL) return true; pts = (uint64_t)(frame->pts * 1000000000.0L); int channels = av_frame_get_channels(frame->frame); for(int i = 0; i < channels; i++) audio_data.data[i] = frame->frame->data[i]; audio_data.samples_per_sec = frame->frame->sample_rate; audio_data.frames = frame->frame->nb_samples; audio_data.timestamp = pts; audio_data.format = convert_ffmpeg_sample_format(frame->frame->format); audio_data.speakers = channels; obs_source_output_audio(s->source, &audio_data); return true; }
int ff_filter_frame(AVFilterLink *link, AVFrame *frame) { FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1); /* Consistency checks */ if (link->type == AVMEDIA_TYPE_VIDEO) { if (strcmp(link->dst->filter->name, "scale") && strcmp(link->dst->filter->name, "idet")) { av_assert1(frame->format == link->format); av_assert1(frame->width == link->w); av_assert1(frame->height == link->h); } } else { av_assert1(frame->format == link->format); av_assert1(av_frame_get_channels(frame) == link->channels); av_assert1(frame->channel_layout == link->channel_layout); av_assert1(frame->sample_rate == link->sample_rate); } /* Go directly to actual filtering if possible */ if (link->type == AVMEDIA_TYPE_AUDIO && link->min_samples && (link->partial_buf || frame->nb_samples < link->min_samples || frame->nb_samples > link->max_samples)) { return ff_filter_frame_needs_framing(link, frame); } else { return ff_filter_frame_framed(link, frame); } }
string AudioSamples::channelsLayoutString() const { if (!m_raw) return ""; char buf[128] = {0}; av_get_channel_layout_string(buf, sizeof(buf), av_frame_get_channels(m_raw), av_frame_get_channel_layout(m_raw)); return string(buf); }
const CGEAudioFrameBufferData* CGEVideoDecodeHandler::getCurrentAudioFrame() { m_cachedAudioFrame.timestamp = av_frame_get_best_effort_timestamp(m_context->pAudioFrame); m_cachedAudioFrame.data = m_context->pAudioFrame->data[0]; m_cachedAudioFrame.nbSamples = m_context->pAudioFrame->nb_samples; m_cachedAudioFrame.bytesPerSample = av_get_bytes_per_sample((AVSampleFormat)m_context->pAudioFrame->format); m_cachedAudioFrame.channels = av_frame_get_channels(m_context->pAudioFrame); m_cachedAudioFrame.linesize = m_context->pAudioFrame->linesize[0]; m_cachedAudioFrame.format = (CGESampleFormat)m_context->pAudioFrame->format; return &m_cachedAudioFrame; }
int ff_filter_frame(AVFilterLink *link, AVFrame *frame) { int ret; FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1); /* Consistency checks */ if (link->type == AVMEDIA_TYPE_VIDEO) { if (strcmp(link->dst->filter->name, "buffersink") && strcmp(link->dst->filter->name, "format") && strcmp(link->dst->filter->name, "idet") && strcmp(link->dst->filter->name, "null") && strcmp(link->dst->filter->name, "scale")) { av_assert1(frame->format == link->format); av_assert1(frame->width == link->w); av_assert1(frame->height == link->h); } } else { if (frame->format != link->format) { av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n"); goto error; } if (av_frame_get_channels(frame) != link->channels) { av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n"); goto error; } if (frame->channel_layout != link->channel_layout) { av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n"); goto error; } if (frame->sample_rate != link->sample_rate) { av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n"); goto error; } } link->frame_blocked_in = link->frame_wanted_out = 0; link->frame_count_in++; filter_unblock(link->dst); ret = ff_framequeue_add(&link->fifo, frame); if (ret < 0) { av_frame_free(&frame); return ret; } ff_filter_set_ready(link->dst, 300); return 0; error: av_frame_free(&frame); return AVERROR_PATCHWELCOME; }
void ffsox_convert_setup(convert_t *convert, frame_t *fr, frame_t *fw, double q, intercept_t *intercept) { int nb_samples1,nb_samples2; convert->fr=fr; convert->fw=fw; convert->q=q; convert->intercept=intercept; convert->channels=av_frame_get_channels(fr->frame); nb_samples1=fr->frame->nb_samples-fr->nb_samples.frame; nb_samples2=fw->frame->nb_samples-fw->nb_samples.frame; convert->nb_samples=PBU_MIN(nb_samples1,nb_samples2); }
int ff_filter_frame(AVFilterLink *link, AVFrame *frame) { FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1); /* Consistency checks */ if (link->type == AVMEDIA_TYPE_VIDEO) { if (strcmp(link->dst->filter->name, "buffersink") && strcmp(link->dst->filter->name, "format") && strcmp(link->dst->filter->name, "idet") && strcmp(link->dst->filter->name, "null") && strcmp(link->dst->filter->name, "scale")) { av_assert1(frame->format == link->format); av_assert1(frame->width == link->w); av_assert1(frame->height == link->h); } } else { if (frame->format != link->format) { av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n"); goto error; } if (av_frame_get_channels(frame) != link->channels) { av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n"); goto error; } if (frame->channel_layout != link->channel_layout) { av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n"); goto error; } if (frame->sample_rate != link->sample_rate) { av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n"); goto error; } } /* Go directly to actual filtering if possible */ if (link->type == AVMEDIA_TYPE_AUDIO && link->min_samples && (link->partial_buf || frame->nb_samples < link->min_samples || frame->nb_samples > link->max_samples)) { return ff_filter_frame_needs_framing(link, frame); } else { return ff_filter_frame_framed(link, frame); } error: av_frame_free(&frame); return AVERROR_PATCHWELCOME; }
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) { AVFilterContext *ctx = inlink->dst; AShowInfoContext *s = ctx->priv; char chlayout_str[128]; uint32_t checksum = 0; int channels = av_get_channel_layout_nb_channels(buf->channel_layout); int planar = av_sample_fmt_is_planar(buf->format); int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels); int data_size = buf->nb_samples * block_align; int planes = planar ? channels : 1; int i; void *tmp_ptr = av_realloc(s->plane_checksums, channels * sizeof(*s->plane_checksums)); if (!tmp_ptr) return AVERROR(ENOMEM); s->plane_checksums = tmp_ptr; for (i = 0; i < planes; i++) { uint8_t *data = buf->extended_data[i]; s->plane_checksums[i] = av_adler32_update(0, data, data_size); checksum = i ? av_adler32_update(checksum, data, data_size) : s->plane_checksums[0]; } av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1, buf->channel_layout); av_log(ctx, AV_LOG_INFO, "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" " "fmt:%s channels:%d chlayout:%s rate:%d nb_samples:%d " "checksum:%08X ", inlink->frame_count, av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base), av_frame_get_pkt_pos(buf), av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str, buf->sample_rate, buf->nb_samples, checksum); av_log(ctx, AV_LOG_INFO, "plane_checksums: [ "); for (i = 0; i < planes; i++) av_log(ctx, AV_LOG_INFO, "%08X ", s->plane_checksums[i]); av_log(ctx, AV_LOG_INFO, "]\n"); return ff_filter_frame(inlink->dst->outputs[0], buf); }
static void audio_frame_cksum(AVBPrint *bp, AVFrame *frame) { int nb_planes, nb_samples, p; const char *name; nb_planes = av_frame_get_channels(frame); nb_samples = frame->nb_samples; if (!av_sample_fmt_is_planar(frame->format)) { nb_samples *= nb_planes; nb_planes = 1; } name = av_get_sample_fmt_name(frame->format); av_bprintf(bp, ", %d samples", frame->nb_samples); av_bprintf(bp, ", %s", name ? name : "unknown"); for (p = 0; p < nb_planes; p++) { uint32_t cksum = 0; void *d = frame->extended_data[p]; switch (frame->format) { case AV_SAMPLE_FMT_U8: case AV_SAMPLE_FMT_U8P: cksum_line_u8(&cksum, d, nb_samples); break; case AV_SAMPLE_FMT_S16: case AV_SAMPLE_FMT_S16P: cksum_line_s16(&cksum, d, nb_samples); break; case AV_SAMPLE_FMT_S32: case AV_SAMPLE_FMT_S32P: cksum_line_s32(&cksum, d, nb_samples); break; case AV_SAMPLE_FMT_FLT: case AV_SAMPLE_FMT_FLTP: cksum_line_flt(&cksum, d, nb_samples); break; case AV_SAMPLE_FMT_DBL: case AV_SAMPLE_FMT_DBLP: cksum_line_dbl(&cksum, d, nb_samples); break; default: av_assert0(!"reached"); } av_bprintf(bp, ", 0x%08x", cksum); } }
static int ff_filter_frame_needs_framing(AVFilterLink *link, AVFrame *frame) { int insamples = frame->nb_samples, inpos = 0, nb_samples; AVFrame *pbuf = link->partial_buf; int nb_channels = av_frame_get_channels(frame); int ret = 0; link->flags |= FF_LINK_FLAG_REQUEST_LOOP; /* Handle framing (min_samples, max_samples) */ while (insamples) { if (!pbuf) { AVRational samples_tb = { 1, link->sample_rate }; pbuf = ff_get_audio_buffer(link, link->partial_buf_size); if (!pbuf) { av_log(link->dst, AV_LOG_WARNING, "Samples dropped due to memory allocation failure.\n"); return 0; } av_frame_copy_props(pbuf, frame); pbuf->pts = frame->pts; if (pbuf->pts != AV_NOPTS_VALUE) pbuf->pts += av_rescale_q(inpos, samples_tb, link->time_base); pbuf->nb_samples = 0; } nb_samples = FFMIN(insamples, link->partial_buf_size - pbuf->nb_samples); av_samples_copy(pbuf->extended_data, frame->extended_data, pbuf->nb_samples, inpos, nb_samples, nb_channels, link->format); inpos += nb_samples; insamples -= nb_samples; pbuf->nb_samples += nb_samples; if (pbuf->nb_samples >= link->min_samples) { ret = ff_filter_frame_framed(link, pbuf); pbuf = NULL; } } av_frame_free(&frame); link->partial_buf = pbuf; return ret; }
static int filter_frame(AVFilterLink *inlink, AVFrame *inbuf) { AudioPhaserContext *p = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFrame *outbuf; if (av_frame_is_writable(inbuf)) { outbuf = inbuf; } else { outbuf = ff_get_audio_buffer(inlink, inbuf->nb_samples); if (!outbuf) return AVERROR(ENOMEM); av_frame_copy_props(outbuf, inbuf); } p->phaser(p, inbuf->extended_data, outbuf->extended_data, outbuf->nb_samples, av_frame_get_channels(outbuf)); if (inbuf != outbuf) av_frame_free(&inbuf); return ff_filter_frame(outlink, outbuf); }
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags) { AVFrame *copy = NULL; int ret = 0; if (frame && frame->channel_layout && av_get_channel_layout_nb_channels(frame->channel_layout) != av_frame_get_channels(frame)) { av_log(ctx, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n"); return AVERROR(EINVAL); } if (!(flags & AV_BUFFERSRC_FLAG_KEEP_REF) || !frame) return av_buffersrc_add_frame_internal(ctx, frame, flags); if (!(copy = av_frame_alloc())) return AVERROR(ENOMEM); ret = av_frame_ref(copy, frame); if (ret >= 0) ret = av_buffersrc_add_frame_internal(ctx, copy, flags); av_frame_free(©); return ret; }
static int av_buffersrc_add_frame_internal(AVFilterContext *ctx, AVFrame *frame, int flags) { BufferSourceContext *s = ctx->priv; AVFrame *copy; int refcounted, ret; s->nb_failed_requests = 0; if (!frame) { s->eof = 1; return 0; } else if (s->eof) return AVERROR(EINVAL); refcounted = !!frame->buf[0]; if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) { switch (ctx->outputs[0]->type) { case AVMEDIA_TYPE_VIDEO: CHECK_VIDEO_PARAM_CHANGE(ctx, s, frame->width, frame->height, frame->format); break; case AVMEDIA_TYPE_AUDIO: /* For layouts unknown on input but known on link after negotiation. */ if (!frame->channel_layout) frame->channel_layout = s->channel_layout; CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout, av_frame_get_channels(frame), frame->format); break; default: return AVERROR(EINVAL); } } if (!av_fifo_space(s->fifo) && (ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) + sizeof(copy))) < 0) return ret; if (!(copy = av_frame_alloc())) return AVERROR(ENOMEM); if (refcounted) { av_frame_move_ref(copy, frame); } else { ret = av_frame_ref(copy, frame); if (ret < 0) { av_frame_free(©); return ret; } } if ((ret = av_fifo_generic_write(s->fifo, ©, sizeof(copy), NULL)) < 0) { if (refcounted) av_frame_move_ref(frame, copy); av_frame_free(©); return ret; } if ((flags & AV_BUFFERSRC_FLAG_PUSH)) if ((ret = ctx->output_pads[0].request_frame(ctx->outputs[0])) < 0) return ret; return 0; }
int AudioDecoder::audio_thread(void *arg) { VideoState *is = (VideoState *) arg; AVStreamsParser* ps = is->getAVStreamsParser(); AVFrame *frame = av_frame_alloc(); Frame *af; #if CONFIG_AVFILTER int last_serial = -1; int64_t dec_channel_layout; int reconfigure; #endif int got_frame = 0; AVRational tb; int ret = 0; if (!frame) return AVERROR(ENOMEM); do { if ((got_frame = is->auddec().decode_frame(frame)) < 0) goto the_end; if (got_frame) { tb = (AVRational){1, frame->sample_rate}; #if CONFIG_AVFILTER dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame)); reconfigure = cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels, (AVSampleFormat)frame->format, av_frame_get_channels(frame)) || is->audio_filter_src.channel_layout != dec_channel_layout || is->audio_filter_src.freq != frame->sample_rate || is->auddec().pkt_serial != last_serial; if (reconfigure) { char buf1[1024], buf2[1024]; av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout); av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout); av_log(NULL, AV_LOG_DEBUG, "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n", is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial, frame->sample_rate, av_frame_get_channels(frame), av_get_sample_fmt_name((AVSampleFormat)frame->format), buf2, is->auddec().pkt_serial); is->audio_filter_src.fmt = (AVSampleFormat)frame->format; is->audio_filter_src.channels = av_frame_get_channels(frame); is->audio_filter_src.channel_layout = dec_channel_layout; is->audio_filter_src.freq = frame->sample_rate; last_serial = is->auddec().pkt_serial; if ((ret = configure_audio_filters(is,gOptions. afilters, 1)) < 0) goto the_end; } if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0) goto the_end; while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) { tb = is->out_audio_filter->inputs[0]->time_base; #endif if (!(af = is->sampq().peek_writable())) goto the_end; af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb); af->pos = av_frame_get_pkt_pos(frame); af->serial = is->auddec().pkt_serial; af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate}); av_frame_move_ref(af->frame, frame); is->sampq().push(); #if CONFIG_AVFILTER if (ps->audioq.serial != is->auddec().pkt_serial) break; } if (ret == AVERROR_EOF) is->auddec().finished = is->auddec().pkt_serial; #endif } } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF); the_end: #if CONFIG_AVFILTER avfilter_graph_free(&is->agraph); #endif av_frame_free(&frame); return ret; }
static int decode_packet(int *got_frame, int cached) { int ret = 0; if (pkt.stream_index == video_stream_idx) { /* decode video frame */ ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt); if (ret < 0) { fprintf(stderr, "Error decoding video frame\n"); return ret; } if (*got_frame) { printf("video_frame%s n:%d coded_n:%d pts:%s\n", cached ? "(cached)" : "", video_frame_count++, frame->coded_picture_number, av_ts2timestr(frame->pts, &video_dec_ctx->time_base)); /* copy decoded frame to destination buffer: * this is required since rawvideo expects non aligned data */ av_image_copy(video_dst_data, video_dst_linesize, (const uint8_t **)(frame->data), frame->linesize, video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height); /* write to rawvideo file */ fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file); } } else if (pkt.stream_index == audio_stream_idx) { /* decode audio frame */ ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt); if (ret < 0) { fprintf(stderr, "Error decoding audio frame\n"); return ret; } if (*got_frame) { printf("audio_frame%s n:%d nb_samples:%d pts:%s\n", cached ? "(cached)" : "", audio_frame_count++, frame->nb_samples, av_ts2timestr(frame->pts, &audio_dec_ctx->time_base)); ret = av_samples_alloc(audio_dst_data, &audio_dst_linesize, av_frame_get_channels(frame), frame->nb_samples, frame->format, 1); if (ret < 0) { fprintf(stderr, "Could not allocate audio buffer\n"); return AVERROR(ENOMEM); } /* TODO: extend return code of the av_samples_* functions so that this call is not needed */ audio_dst_bufsize = av_samples_get_buffer_size(NULL, av_frame_get_channels(frame), frame->nb_samples, frame->format, 1); /* copy audio data to destination buffer: * this is required since rawaudio expects non aligned data */ av_samples_copy(audio_dst_data, frame->data, 0, 0, frame->nb_samples, av_frame_get_channels(frame), frame->format); /* write to rawaudio file */ fwrite(audio_dst_data[0], 1, audio_dst_bufsize, audio_dst_file); av_freep(&audio_dst_data[0]); } } return ret; }
/** * Decode a frame to a packet, run the result through SwrContext, if desired, encode it via an appropriate * encoder, and write the results to the Java-side native buffer. * * @param aio FFAudio context * @param cached true or false * @return number of bytes placed into java buffer or a negative value, if something went wrong */ static int decode_packet(FFAudioIO *aio, int cached) { int res = 0; uint8_t **resample_buf = NULL; jobject byte_buffer = NULL; uint8_t *javaBuffer = NULL; uint32_t out_buf_size = 0; int out_buf_samples = 0; int64_t out_channel_count; int64_t out_sample_rate; int flush = aio->got_frame; enum AVSampleFormat out; int bytesConsumed = 0; init_ids(aio->env, aio->java_instance); av_opt_get_int(aio->swr_context, "out_channel_count", 0, &out_channel_count); av_opt_get_int(aio->swr_context, "out_sample_rate", 0, &out_sample_rate); av_opt_get_sample_fmt(aio->swr_context, "out_sample_fmt", 0, &out); resample_buf = av_mallocz(sizeof(uint8_t *) * 1); // one plane! // make sure we really have an audio packet if (aio->decode_packet.stream_index == aio->stream_index) { // decode frame // got_frame indicates whether we got a frame bytesConsumed = avcodec_decode_audio4(aio->decode_context, aio->decode_frame, &aio->got_frame, &aio->decode_packet); if (bytesConsumed < 0) { throwUnsupportedAudioFileExceptionIfError(aio->env, bytesConsumed, "Failed to decode audio frame."); return bytesConsumed; } if (aio->got_frame) { aio->decoded_samples += aio->decode_frame->nb_samples; out_buf_samples = aio->decode_frame->nb_samples; #ifdef DEBUG fprintf(stderr, "samples%s n:%" PRIu64 " nb_samples:%d pts:%s\n", cached ? "(cached)" : "", aio->decoded_samples, aio->decode_frame->nb_samples, av_ts2timestr(aio->decode_frame->pts, &aio->decode_context->time_base)); #endif // adjust out sample number for a different sample rate // this is an estimate!! out_buf_samples = av_rescale_rnd( swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate) + aio->decode_frame->nb_samples, out_sample_rate, aio->stream->codecpar->sample_rate, AV_ROUND_UP ); // allocate new aio->audio_data buffers res = av_samples_alloc(aio->audio_data, NULL, av_frame_get_channels(aio->decode_frame), aio->decode_frame->nb_samples, aio->decode_frame->format, 1); if (res < 0) { throwIOExceptionIfError(aio->env, res, "Could not allocate audio buffer."); return AVERROR(ENOMEM); } // copy audio data to aio->audio_data av_samples_copy(aio->audio_data, aio->decode_frame->data, 0, 0, aio->decode_frame->nb_samples, av_frame_get_channels(aio->decode_frame), aio->decode_frame->format); res = resample(aio, resample_buf, out_buf_samples, (const uint8_t **)aio->audio_data, aio->decode_frame->nb_samples); if (res < 0) goto bail; else out_buf_samples = res; } else if (flush && swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate)) { res = resample(aio, resample_buf, swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate), NULL, 0); if (res < 0) goto bail; else out_buf_samples = res; } else { #ifdef DEBUG fprintf(stderr, "Got no frame.\n"); #endif } if (out_buf_samples > 0) { res = av_samples_get_buffer_size(NULL, (int)out_channel_count, out_buf_samples, out, 1); if (res < 0) goto bail; else out_buf_size = res; // ensure native buffer capacity if (aio->java_buffer_capacity < out_buf_size) { aio->java_buffer_capacity = (*aio->env)->CallIntMethod(aio->env, aio->java_instance, setNativeBufferCapacity_MID, (jint)out_buf_size); } // get java-managed byte buffer reference byte_buffer = (*aio->env)->GetObjectField(aio->env, aio->java_instance, nativeBuffer_FID); if (!byte_buffer) { res = -1; throwIOExceptionIfError(aio->env, 1, "Failed to get native buffer."); goto bail; } // we have some samples, let's copy them to the java buffer, using the desired encoding javaBuffer = (uint8_t *)(*aio->env)->GetDirectBufferAddress(aio->env, byte_buffer); if (!javaBuffer) { throwIOExceptionIfError(aio->env, 1, "Failed to get address for native buffer."); goto bail; } if (aio->encode_context) { aio->encode_frame->nb_samples = out_buf_samples; res = encode_buffer(aio, resample_buf[0], out_buf_size, javaBuffer); if (res < 0) { out_buf_size = 0; goto bail; } out_buf_size = res; } else { memcpy(javaBuffer, resample_buf[0], out_buf_size); } // we already wrote to the buffer, now we still need to // set new bytebuffer limit and position to 0. (*aio->env)->CallObjectMethod(aio->env, byte_buffer, rewind_MID); (*aio->env)->CallObjectMethod(aio->env, byte_buffer, limit_MID, out_buf_size); } } aio->resampled_samples += out_buf_size; bail: if (resample_buf) { if (resample_buf[0]) av_freep(&resample_buf[0]); av_free(resample_buf); } if (aio->audio_data[0]) av_freep(&aio->audio_data[0]); return res; }
static gboolean gst_ffmpegauddec_negotiate (GstFFMpegAudDec * ffmpegdec, AVCodecContext * context, AVFrame * frame, gboolean force) { GstFFMpegAudDecClass *oclass; GstAudioFormat format; gint channels; GstAudioChannelPosition pos[64] = { 0, }; oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec)); format = gst_ffmpeg_smpfmt_to_audioformat (frame->format); if (format == GST_AUDIO_FORMAT_UNKNOWN) goto no_caps; channels = av_get_channel_layout_nb_channels (av_frame_get_channel_layout (frame)); if (channels == 0) channels = av_frame_get_channels (frame); if (channels == 0) goto no_caps; if (!force && !settings_changed (ffmpegdec, frame)) return TRUE; GST_DEBUG_OBJECT (ffmpegdec, "Renegotiating audio from %dHz@%dchannels (%d) to %dHz@%dchannels (%d)", ffmpegdec->info.rate, ffmpegdec->info.channels, ffmpegdec->info.finfo->format, av_frame_get_sample_rate (frame), channels, format); gst_ffmpeg_channel_layout_to_gst (av_frame_get_channel_layout (frame), channels, pos); memcpy (ffmpegdec->ffmpeg_layout, pos, sizeof (GstAudioChannelPosition) * channels); /* Get GStreamer channel layout */ gst_audio_channel_positions_to_valid_order (pos, channels); ffmpegdec->needs_reorder = memcmp (pos, ffmpegdec->ffmpeg_layout, sizeof (pos[0]) * channels) != 0; gst_audio_info_set_format (&ffmpegdec->info, format, av_frame_get_sample_rate (frame), channels, pos); if (!gst_audio_decoder_set_output_format (GST_AUDIO_DECODER (ffmpegdec), &ffmpegdec->info)) goto caps_failed; return TRUE; /* ERRORS */ no_caps: { #ifdef HAVE_LIBAV_UNINSTALLED /* using internal ffmpeg snapshot */ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, ("Could not find GStreamer caps mapping for libav codec '%s'.", oclass->in_plugin->name), (NULL)); #else /* using external ffmpeg */ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, ("Could not find GStreamer caps mapping for libav codec '%s', and " "you are using an external libavcodec. This is most likely due to " "a packaging problem and/or libavcodec having been upgraded to a " "version that is not compatible with this version of " "gstreamer-libav. Make sure your gstreamer-libav and libavcodec " "packages come from the same source/repository.", oclass->in_plugin->name), (NULL)); #endif return FALSE; } caps_failed: { GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL), ("Could not set caps for libav decoder (%s), not fixed?", oclass->in_plugin->name)); memset (&ffmpegdec->info, 0, sizeof (ffmpegdec->info)); return FALSE; } }
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame) { int (*filter_frame)(AVFilterLink *, AVFrame *); AVFilterContext *dstctx = link->dst; AVFilterPad *dst = link->dstpad; AVFrame *out = NULL; int ret; AVFilterCommand *cmd= link->dst->command_queue; int64_t pts; if (!(filter_frame = dst->filter_frame)) filter_frame = default_filter_frame; /* copy the frame if needed */ if (dst->needs_writable && !av_frame_is_writable(frame)) { av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n"); switch (link->type) { case AVMEDIA_TYPE_VIDEO: out = ff_get_video_buffer(link, link->w, link->h); break; case AVMEDIA_TYPE_AUDIO: out = ff_get_audio_buffer(link, frame->nb_samples); break; default: ret = AVERROR(EINVAL); goto fail; } if (!out) { ret = AVERROR(ENOMEM); goto fail; } ret = av_frame_copy_props(out, frame); if (ret < 0) goto fail; switch (link->type) { case AVMEDIA_TYPE_VIDEO: av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize, frame->format, frame->width, frame->height); break; case AVMEDIA_TYPE_AUDIO: av_samples_copy(out->extended_data, frame->extended_data, 0, 0, frame->nb_samples, av_frame_get_channels(frame), frame->format); break; default: ret = AVERROR(EINVAL); goto fail; } av_frame_free(&frame); } else out = frame; while(cmd && cmd->time <= out->pts * av_q2d(link->time_base)){ av_log(link->dst, AV_LOG_DEBUG, "Processing command time:%f command:%s arg:%s\n", cmd->time, cmd->command, cmd->arg); avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags); ff_command_queue_pop(link->dst); cmd= link->dst->command_queue; } pts = out->pts; if (dstctx->enable_str) { int64_t pos = av_frame_get_pkt_pos(out); dstctx->var_values[VAR_N] = link->frame_count_out; dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base); dstctx->var_values[VAR_W] = link->w; dstctx->var_values[VAR_H] = link->h; dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos; dstctx->is_disabled = fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) < 0.5; if (dstctx->is_disabled && (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC)) filter_frame = default_filter_frame; } ret = filter_frame(link, out); link->frame_count_out++; ff_update_link_current_pts(link, pts); return ret; fail: av_frame_free(&out); av_frame_free(&frame); return ret; }
static int run_test(AVCodec *enc, AVCodec *dec, AVCodecContext *enc_ctx, AVCodecContext *dec_ctx) { AVPacket enc_pkt; AVFrame *in_frame, *out_frame; uint8_t *raw_in = NULL, *raw_out = NULL; int in_offset = 0, out_offset = 0; int result = 0; int got_output = 0; int i = 0; int in_frame_bytes, out_frame_bytes; in_frame = av_frame_alloc(); if (!in_frame) { av_log(NULL, AV_LOG_ERROR, "Can't allocate input frame\n"); return AVERROR(ENOMEM); } in_frame->nb_samples = enc_ctx->frame_size; in_frame->format = enc_ctx->sample_fmt; in_frame->channel_layout = enc_ctx->channel_layout; if (av_frame_get_buffer(in_frame, 32) != 0) { av_log(NULL, AV_LOG_ERROR, "Can't allocate a buffer for input frame\n"); return AVERROR(ENOMEM); } out_frame = av_frame_alloc(); if (!out_frame) { av_log(NULL, AV_LOG_ERROR, "Can't allocate output frame\n"); return AVERROR(ENOMEM); } raw_in = av_malloc(in_frame->linesize[0] * NUMBER_OF_FRAMES); if (!raw_in) { av_log(NULL, AV_LOG_ERROR, "Can't allocate memory for raw_in\n"); return AVERROR(ENOMEM); } raw_out = av_malloc(in_frame->linesize[0] * NUMBER_OF_FRAMES); if (!raw_out) { av_log(NULL, AV_LOG_ERROR, "Can't allocate memory for raw_out\n"); return AVERROR(ENOMEM); } for (i = 0; i < NUMBER_OF_FRAMES; i++) { av_init_packet(&enc_pkt); enc_pkt.data = NULL; enc_pkt.size = 0; generate_raw_frame((uint16_t*)(in_frame->data[0]), i, enc_ctx->sample_rate, enc_ctx->channels, enc_ctx->frame_size); in_frame_bytes = in_frame->nb_samples * av_frame_get_channels(in_frame) * sizeof(uint16_t); if (in_frame_bytes > in_frame->linesize[0]) { av_log(NULL, AV_LOG_ERROR, "Incorrect value of input frame linesize\n"); return 1; } memcpy(raw_in + in_offset, in_frame->data[0], in_frame_bytes); in_offset += in_frame_bytes; result = avcodec_encode_audio2(enc_ctx, &enc_pkt, in_frame, &got_output); if (result < 0) { av_log(NULL, AV_LOG_ERROR, "Error encoding audio frame\n"); return result; } /* if we get an encoded packet, feed it straight to the decoder */ if (got_output) { result = avcodec_decode_audio4(dec_ctx, out_frame, &got_output, &enc_pkt); if (result < 0) { av_log(NULL, AV_LOG_ERROR, "Error decoding audio packet\n"); return result; } if (got_output) { if (result != enc_pkt.size) { av_log(NULL, AV_LOG_INFO, "Decoder consumed only part of a packet, it is allowed to do so -- need to update this test\n"); return AVERROR_UNKNOWN; } if (in_frame->nb_samples != out_frame->nb_samples) { av_log(NULL, AV_LOG_ERROR, "Error frames before and after decoding has different number of samples\n"); return AVERROR_UNKNOWN; } if (in_frame->channel_layout != out_frame->channel_layout) { av_log(NULL, AV_LOG_ERROR, "Error frames before and after decoding has different channel layout\n"); return AVERROR_UNKNOWN; } if (in_frame->format != out_frame->format) { av_log(NULL, AV_LOG_ERROR, "Error frames before and after decoding has different sample format\n"); return AVERROR_UNKNOWN; } out_frame_bytes = out_frame->nb_samples * av_frame_get_channels(out_frame) * sizeof(uint16_t); if (out_frame_bytes > out_frame->linesize[0]) { av_log(NULL, AV_LOG_ERROR, "Incorrect value of output frame linesize\n"); return 1; } memcpy(raw_out + out_offset, out_frame->data[0], out_frame_bytes); out_offset += out_frame_bytes; } } av_free_packet(&enc_pkt); } if (memcmp(raw_in, raw_out, out_frame_bytes * NUMBER_OF_FRAMES) != 0) { av_log(NULL, AV_LOG_ERROR, "Output differs\n"); return 1; } av_log(NULL, AV_LOG_INFO, "OK\n"); av_freep(&raw_in); av_freep(&raw_out); av_frame_free(&in_frame); av_frame_free(&out_frame); return 0; }
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) { AVFilterContext *ctx = inlink->dst; AShowInfoContext *s = ctx->priv; char chlayout_str[128]; uint32_t checksum = 0; int channels = inlink->channels; int planar = av_sample_fmt_is_planar(buf->format); int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels); int data_size = buf->nb_samples * block_align; int planes = planar ? channels : 1; int i; void *tmp_ptr = av_realloc_array(s->plane_checksums, channels, sizeof(*s->plane_checksums)); if (!tmp_ptr) return AVERROR(ENOMEM); s->plane_checksums = tmp_ptr; for (i = 0; i < planes; i++) { uint8_t *data = buf->extended_data[i]; s->plane_checksums[i] = av_adler32_update(0, data, data_size); checksum = i ? av_adler32_update(checksum, data, data_size) : s->plane_checksums[0]; } av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1, buf->channel_layout); av_log(ctx, AV_LOG_INFO, "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" " "fmt:%s channels:%d chlayout:%s rate:%d nb_samples:%d " "checksum:%08"PRIX32" ", inlink->frame_count_out, av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base), av_frame_get_pkt_pos(buf), av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str, buf->sample_rate, buf->nb_samples, checksum); av_log(ctx, AV_LOG_INFO, "plane_checksums: [ "); for (i = 0; i < planes; i++) av_log(ctx, AV_LOG_INFO, "%08"PRIX32" ", s->plane_checksums[i]); av_log(ctx, AV_LOG_INFO, "]\n"); for (i = 0; i < buf->nb_side_data; i++) { AVFrameSideData *sd = buf->side_data[i]; av_log(ctx, AV_LOG_INFO, " side data - "); switch (sd->type) { case AV_FRAME_DATA_MATRIXENCODING: dump_matrixenc (ctx, sd); break; case AV_FRAME_DATA_DOWNMIX_INFO: dump_downmix (ctx, sd); break; case AV_FRAME_DATA_REPLAYGAIN: dump_replaygain(ctx, sd); break; case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: dump_audio_service_type(ctx, sd); break; default: dump_unknown (ctx, sd); break; } av_log(ctx, AV_LOG_INFO, "\n"); } return ff_filter_frame(inlink->dst->outputs[0], buf); }
int AudioSamples::channelsCount() const { return m_raw ? av_frame_get_channels(m_raw) : 0; }