bool AudioEncoderFFmpeg::encode(const AudioFrame &frame) { DPTR_D(AudioEncoderFFmpeg); AVFrame *f = NULL; if (frame.isValid()) { f = av_frame_alloc(); const AudioFormat fmt(frame.format()); f->format = fmt.sampleFormatFFmpeg(); f->channel_layout = fmt.channelLayoutFFmpeg(); // f->channels = fmt.channels(); //remove? not availale in libav9 // must be (not the last frame) exactly frame_size unless CODEC_CAP_VARIABLE_FRAME_SIZE is set (frame_size==0) // TODO: mpv use pcmhack for avctx.frame_size==0. can we use input frame.samplesPerChannel? f->nb_samples = d.frame_size; /// f->quality = d.avctx->global_quality; //TODO // TODO: record last pts. mpv compute pts internally and also use playback time f->pts = int64_t(frame.timestamp()*fmt.sampleRate()); // TODO // pts is set in muxer const int nb_planes = frame.planeCount(); // bytes between 2 samples on a plane. TODO: add to AudioFormat? what about bytesPerFrame? const int sample_stride = fmt.isPlanar() ? fmt.bytesPerSample() : fmt.bytesPerSample()*fmt.channels(); for (int i = 0; i < nb_planes; ++i) { f->linesize[i] = f->nb_samples * sample_stride;// frame.bytesPerLine(i); // f->extended_data[i] = (uint8_t*)frame.constBits(i); } } AVPacket pkt; av_init_packet(&pkt); pkt.data = (uint8_t*)d.buffer.constData(); pkt.size = d.buffer.size(); int got_packet = 0; int ret = avcodec_encode_audio2(d.avctx, &pkt, f, &got_packet); av_frame_free(&f); if (ret < 0) { //qWarning("error avcodec_encode_audio2: %s" ,av_err2str(ret)); return false; //false } if (!got_packet) { qWarning("no packet got"); return false; //false } // qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts); d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base)); // qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts); return true; }
static SLDataFormat_PCM audioFormatToSL(const AudioFormat &format) { SLDataFormat_PCM format_pcm; format_pcm.formatType = SL_DATAFORMAT_PCM; format_pcm.numChannels = format.channels(); format_pcm.samplesPerSec = format.sampleRate() * 1000; format_pcm.bitsPerSample = format.bytesPerSample()*8; format_pcm.containerSize = format_pcm.bitsPerSample; // TODO: more layouts format_pcm.channelMask = format.channels() == 1 ? SL_SPEAKER_FRONT_CENTER : SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; #ifdef SL_BYTEORDER_NATIVE format_pcm.endianness = SL_BYTEORDER_NATIVE; #else union { unsigned short num; char buf[sizeof(unsigned short)]; } endianness; endianness.num = 1; format_pcm.endianness = endianness.buf[0] ? SL_BYTEORDER_LITTLEENDIAN : SL_BYTEORDER_BIGENDIAN; #endif return format_pcm; }
static FormatCode getFormatCode(const AudioFormat &format) { if (format.sampleFormat == AF_SAMPFMT_FLOAT) return kFloat; if (format.sampleFormat == AF_SAMPFMT_DOUBLE) return kDouble; if (format.isInteger()) { switch (format.bytesPerSample(false)) { case 1: return kInt8; case 2: return kInt16; case 3: return kInt24; case 4: return kInt32; } } /* NOTREACHED */ assert(false); return kUndefined; }