void ff_af_queue_remove(AudioFrameQueue *afq, int nb_samples, int64_t *pts, int *duration) { int64_t out_pts = AV_NOPTS_VALUE; int removed_samples = 0; #ifdef DEBUG af_queue_log_state(afq); #endif /* get output pts from the next frame or generated pts */ if (afq->frame_queue) { if (afq->frame_queue->pts != AV_NOPTS_VALUE) out_pts = afq->frame_queue->pts - afq->remaining_delay; } else { if (afq->next_pts != AV_NOPTS_VALUE) out_pts = afq->next_pts - afq->remaining_delay; } if (pts) { if (out_pts != AV_NOPTS_VALUE) *pts = ff_samples_to_time_base(afq->avctx, out_pts); else *pts = AV_NOPTS_VALUE; } /* if the delay is larger than the packet duration, we use up delay samples for the output packet and leave all frames in the queue */ if (afq->remaining_delay >= nb_samples) { removed_samples += nb_samples; afq->remaining_delay -= nb_samples; } /* remove frames from the queue until we have enough to cover the requested number of samples or until the queue is empty */ while (removed_samples < nb_samples && afq->frame_queue) { removed_samples += afq->frame_queue->duration; delete_next_frame(afq); } afq->remaining_samples -= removed_samples; /* if there are no frames left and we have room for more samples, use any remaining delay samples */ if (removed_samples < nb_samples && afq->remaining_samples > 0) { int add_samples = FFMIN(afq->remaining_samples, nb_samples - removed_samples); removed_samples += add_samples; afq->remaining_samples -= add_samples; } if (removed_samples > nb_samples) av_log(afq->avctx, AV_LOG_WARNING, "frame_size is too large\n"); if (duration) *duration = ff_samples_to_time_base(afq->avctx, removed_samples); }
static int encode_superframe(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { WMACodecContext *s = avctx->priv_data; int i, total_gain, ret, error; s->block_len_bits = s->frame_len_bits; // required by non variable block len s->block_len = 1 << s->block_len_bits; ret = apply_window_and_mdct(avctx, frame); if (ret < 0) return ret; if (s->ms_stereo) { float a, b; int i; for (i = 0; i < s->block_len; i++) { a = s->coefs[0][i] * 0.5; b = s->coefs[1][i] * 0.5; s->coefs[0][i] = a + b; s->coefs[1][i] = a - b; } } if ((ret = ff_alloc_packet2(avctx, avpkt, 2 * MAX_CODED_SUPERFRAME_SIZE, 0)) < 0) return ret; total_gain = 128; for (i = 64; i; i >>= 1) { error = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain - i); if (error <= 0) total_gain -= i; } while(total_gain <= 128 && error > 0) error = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain++); if (error > 0) { av_log(avctx, AV_LOG_ERROR, "Invalid input data or requested bitrate too low, cannot encode\n"); avpkt->size = 0; return AVERROR(EINVAL); } av_assert0((put_bits_count(&s->pb) & 7) == 0); i= avctx->block_align - (put_bits_count(&s->pb)+7)/8; av_assert0(i>=0); while(i--) put_bits(&s->pb, 8, 'N'); flush_put_bits(&s->pb); av_assert0(put_bits_ptr(&s->pb) - s->pb.buf == avctx->block_align); if (frame->pts != AV_NOPTS_VALUE) avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->initial_padding); avpkt->size = avctx->block_align; *got_packet_ptr = 1; return 0; }
static int encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { DCAEncContext *c = avctx->priv_data; const int32_t *samples; int ret, i; if ((ret = ff_alloc_packet2(avctx, avpkt, c->frame_size, 0)) < 0) return ret; samples = (const int32_t *)frame->data[0]; subband_transform(c, samples); if (c->lfe_channel) lfe_downsample(c, samples); calc_masking(c, samples); find_peaks(c); assign_bits(c); calc_scales(c); quantize_all(c); shift_history(c, samples); init_put_bits(&c->pb, avpkt->data, avpkt->size); put_frame_header(c); put_primary_audio_header(c); for (i = 0; i < SUBFRAMES; i++) put_subframe(c, i); for (i = put_bits_count(&c->pb); i < 8*c->frame_size; i++) put_bits(&c->pb, 1, 0); flush_put_bits(&c->pb); avpkt->pts = frame->pts; avpkt->duration = ff_samples_to_time_base(avctx, frame->nb_samples); avpkt->size = put_bits_count(&c->pb) >> 3; *got_packet_ptr = 1; return 0; }
static int encode_superframe(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { WMACodecContext *s = avctx->priv_data; int i, total_gain, ret; s->block_len_bits= s->frame_len_bits; //required by non variable block len s->block_len = 1 << s->block_len_bits; apply_window_and_mdct(avctx, frame); if (s->ms_stereo) { float a, b; int i; for(i = 0; i < s->block_len; i++) { a = s->coefs[0][i]*0.5; b = s->coefs[1][i]*0.5; s->coefs[0][i] = a + b; s->coefs[1][i] = a - b; } } if ((ret = ff_alloc_packet(avpkt, 2 * MAX_CODED_SUPERFRAME_SIZE))) { av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n"); return ret; } #if 1 total_gain= 128; for(i=64; i; i>>=1){ int error = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain - i); if(error<0) total_gain-= i; } #else total_gain= 90; best = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain); for(i=32; i; i>>=1){ int scoreL = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain - i); int scoreR = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain + i); av_log(NULL, AV_LOG_ERROR, "%d %d %d (%d)\n", scoreL, best, scoreR, total_gain); if(scoreL < FFMIN(best, scoreR)){ best = scoreL; total_gain -= i; }else if(scoreR < best){ best = scoreR; total_gain += i; } } #endif if ((i = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain)) >= 0) { av_log(avctx, AV_LOG_ERROR, "required frame size too large. please " "use a higher bit rate.\n"); return AVERROR(EINVAL); } assert((put_bits_count(&s->pb) & 7) == 0); while (i++) put_bits(&s->pb, 8, 'N'); flush_put_bits(&s->pb); if (frame->pts != AV_NOPTS_VALUE) avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->delay); avpkt->size = s->block_align; *got_packet_ptr = 1; return 0; }