Ejemplo n.º 1
0
static SLDataFormat_PCM audioFormatToSL(const AudioFormat &format)
{
    SLDataFormat_PCM format_pcm;
    format_pcm.formatType = SL_DATAFORMAT_PCM;
    format_pcm.numChannels = format.channels();
    format_pcm.samplesPerSec = format.sampleRate() * 1000;
    format_pcm.bitsPerSample = format.bytesPerSample()*8;
    format_pcm.containerSize = format_pcm.bitsPerSample;
    // TODO: more layouts
    format_pcm.channelMask = format.channels() == 1 ? SL_SPEAKER_FRONT_CENTER : SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
#ifdef SL_BYTEORDER_NATIVE
    format_pcm.endianness = SL_BYTEORDER_NATIVE;
#else
    union { unsigned short num; char buf[sizeof(unsigned short)]; } endianness;
    endianness.num = 1;
    format_pcm.endianness = endianness.buf[0] ? SL_BYTEORDER_LITTLEENDIAN : SL_BYTEORDER_BIGENDIAN;
#endif
    return format_pcm;
}
Ejemplo n.º 2
0
void LibavReader::setupDecoder_() {
	int ret = -1;
	for( std::size_t i = 0 ; i < this->pFormatContext_->nb_streams; ++i ) {
		if( this->pFormatContext_->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO ) {
			ret = i;
			break;
		}
	}
	if( ret == -1 ) {
		throw CodecError( "LibavReader: Find no audio stream!", __FILE__, __LINE__ );
	}
	this->pStream_ = this->pFormatContext_->streams[ret];
	AVCodecContext * pCC = this->pStream_->codec;
	// getting codec information
	this->setBitRate( pCC->bit_rate );

	// setup sample format
	AudioFormat format;
	format.setFrequency( 44100 );
	format.setChannels( pCC->channels );
	switch( pCC->channels ) {
	case 1:
		this->setChannelLayout( LayoutMono );
		break;
	case 2:
		this->setChannelLayout( LayoutStereo );
		break;
	default:
		this->setChannelLayout( LayoutNative );
	}
	format.setSampleType( AudioFormat::SignedInt );
	format.setSampleSize( 16 );
	this->setAudioFormat( format );

	AVCodec * pC = avcodec_find_decoder( pCC->codec_id );
	if( pC == NULL ) {
		throw CodecError( tr( "find no decoder" ), __FILE__, __LINE__ );
	}

	if( avcodec_open2( pCC, pC, NULL ) < 0 ) {
		throw CodecError( tr( "can not open decoder" ), __FILE__, __LINE__ );
	}
	this->pCodecContext_.reset( pCC, avcodec_close );

	// resampling
	if( pCC->channel_layout > 0 || format.frequency() != pCC->sample_rate || pCC->sample_fmt != AV_SAMPLE_FMT_S16 ) {
		auto rsc = av_audio_resample_init( format.channels(), pCC->channels, format.frequency(), pCC->sample_rate, AV_SAMPLE_FMT_S16, pCC->sample_fmt, 16, 10, 0, 0.8 );
		if( !rsc ) {
			throw CodecError( QObject::tr( "ReSampleContext open failed" ), __FILE__, __LINE__ );
		}
		this->pArContext_.reset( rsc, []( ReSampleContext * p )->void {
			audio_resample_close( p );
		} );
	}
}
Ejemplo n.º 3
0
bool AudioEncoderFFmpeg::encode(const AudioFrame &frame)
{
    DPTR_D(AudioEncoderFFmpeg);
    AVFrame *f = NULL;
    if (frame.isValid()) {
        f = av_frame_alloc();
        const AudioFormat fmt(frame.format());
        f->format = fmt.sampleFormatFFmpeg();
        f->channel_layout = fmt.channelLayoutFFmpeg();
        // f->channels = fmt.channels(); //remove? not availale in libav9
        // must be (not the last frame) exactly frame_size unless CODEC_CAP_VARIABLE_FRAME_SIZE is set (frame_size==0)
        // TODO: mpv use pcmhack for avctx.frame_size==0. can we use input frame.samplesPerChannel?
        f->nb_samples = d.frame_size;
        /// f->quality = d.avctx->global_quality; //TODO
        // TODO: record last pts. mpv compute pts internally and also use playback time
        f->pts = int64_t(frame.timestamp()*fmt.sampleRate()); // TODO
        // pts is set in muxer
        const int nb_planes = frame.planeCount();
        // bytes between 2 samples on a plane. TODO: add to AudioFormat? what about bytesPerFrame?
        const int sample_stride = fmt.isPlanar() ? fmt.bytesPerSample() : fmt.bytesPerSample()*fmt.channels();
        for (int i = 0; i < nb_planes; ++i) {
            f->linesize[i] = f->nb_samples * sample_stride;// frame.bytesPerLine(i); //
            f->extended_data[i] = (uint8_t*)frame.constBits(i);
        }
    }
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = (uint8_t*)d.buffer.constData();
    pkt.size = d.buffer.size();
    int got_packet = 0;
    int ret = avcodec_encode_audio2(d.avctx, &pkt, f, &got_packet);
    av_frame_free(&f);
    if (ret < 0) {
        //qWarning("error avcodec_encode_audio2: %s" ,av_err2str(ret));
        return false; //false
    }
    if (!got_packet) {
        qWarning("no packet got");
        return false; //false
    }
    // qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
    d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
    // qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
    return true;
}