static struct mp_audio *play(struct af_instance *af, struct mp_audio *data) { struct af_resample *s = af->priv; struct mp_audio *in = data; struct mp_audio *out = af->data; out->samples = avresample_available(s->avrctx) + av_rescale_rnd(get_delay(s) + in->samples, s->ctx.out_rate, s->ctx.in_rate, AV_ROUND_UP); mp_audio_realloc_min(out, out->samples); af->delay = get_delay(s) / (double)s->ctx.in_rate; #if !USE_SET_CHANNEL_MAPPING do_reorder(in, s->reorder_in); #endif if (out->samples) { out->samples = avresample_convert(s->avrctx, (uint8_t **) out->planes, out->samples * out->sstride, out->samples, (uint8_t **) in->planes, in->samples * in->sstride, in->samples); if (out->samples < 0) return NULL; // error } *data = *out; #if USE_SET_CHANNEL_MAPPING if (needs_reorder(s->reorder_out, out->nch)) { if (af_fmt_is_planar(out->format)) { reorder_planes(data, s->reorder_out); } else { int out_size = out->samples * out->sstride; if (talloc_get_size(s->reorder_buffer) < out_size) s->reorder_buffer = talloc_realloc_size(s, s->reorder_buffer, out_size); data->planes[0] = s->reorder_buffer; int out_samples = avresample_convert(s->avrctx_out, (uint8_t **) data->planes, out_size, out->samples, (uint8_t **) out->planes, out_size, out->samples); assert(out_samples == data->samples); } } #else do_reorder(data, s->reorder_out); #endif return data; }
static struct mp_audio *play(struct af_instance *af, struct mp_audio *data) { struct af_resample *s = af->priv; struct mp_audio *in = data; struct mp_audio *out = af->data; int in_size = data->len; int in_samples = in_size / (data->bps * data->nch); int out_samples = avresample_available(s->avrctx) + av_rescale_rnd(get_delay(s) + in_samples, s->ctx.out_rate, s->ctx.in_rate, AV_ROUND_UP); int out_size = out->bps * out_samples * out->nch; if (talloc_get_size(out->audio) < out_size) out->audio = talloc_realloc_size(out, out->audio, out_size); af->delay = out->bps * av_rescale_rnd(get_delay(s), s->ctx.out_rate, s->ctx.in_rate, AV_ROUND_UP); #if !USE_SET_CHANNEL_MAPPING reorder_channels(data->audio, s->reorder_in, data->bps, data->nch, in_samples); #endif out_samples = avresample_convert(s->avrctx, (uint8_t **) &out->audio, out_size, out_samples, (uint8_t **) &in->audio, in_size, in_samples); *data = *out; #if USE_SET_CHANNEL_MAPPING if (needs_reorder(s->reorder_out, out->nch)) { if (talloc_get_size(s->reorder_buffer) < out_size) s->reorder_buffer = talloc_realloc_size(s, s->reorder_buffer, out_size); data->audio = s->reorder_buffer; out_samples = avresample_convert(s->avrctx_out, (uint8_t **) &data->audio, out_size, out_samples, (uint8_t **) &out->audio, out_size, out_samples); } #else reorder_channels(data->audio, s->reorder_out, out->bps, out->nch, out_samples); #endif data->len = out->bps * out_samples * out->nch; return data; }
yuv_image decode_frame(const xyuv::frame &frame_in) { // Determine the size of each plane. bool has_y = !frame_in.format.channel_blocks[channel::Y].samples.empty(); bool has_u = !frame_in.format.channel_blocks[channel::U].samples.empty(); bool has_v = !frame_in.format.channel_blocks[channel::V].samples.empty(); bool has_a = !frame_in.format.channel_blocks[channel::A].samples.empty(); yuv_image yuva_out = create_yuv_image( frame_in.format.image_w, frame_in.format.image_h, frame_in.format.chroma_siting, has_y, has_u, has_v, has_a ); bool has_negative_line_stride = (frame_in.format.origin == image_origin::LOWER_LEFT); const uint8_t * raw_data = frame_in.data.get(); std::unique_ptr<uint8_t> tmp_buffer; if (needs_reorder(frame_in.format)) { // Todo: If needed optimize this for memory. // At some point we will have allocated 2x frame + 1 plane. tmp_buffer.reset(new uint8_t[frame_in.format.size]); memcpy(tmp_buffer.get(), raw_data, frame_in.format.size); // Use the copy instead. raw_data = tmp_buffer.get(); for (auto & plane : frame_in.format.planes) { reorder_inverse(tmp_buffer.get(), plane); } } if (has_y) decode_channel( raw_data, frame_in.format.channel_blocks[channel::Y], &(yuva_out.y_plane), frame_in.format.planes, frame_in.format.conversion_matrix.y_packed_range, has_negative_line_stride ); if (has_u) decode_channel( raw_data, frame_in.format.channel_blocks[channel::U], &(yuva_out.u_plane), frame_in.format.planes, frame_in.format.conversion_matrix.u_packed_range, has_negative_line_stride ); if (has_v) decode_channel( raw_data, frame_in.format.channel_blocks[channel::V], &(yuva_out.v_plane), frame_in.format.planes, frame_in.format.conversion_matrix.v_packed_range, has_negative_line_stride ); if (has_a) decode_channel( raw_data, frame_in.format.channel_blocks[channel::A], &(yuva_out.a_plane), frame_in.format.planes, std::make_pair<float, float>(0.0f, 1.0f), has_negative_line_stride ); return yuva_out; }