// we expect the AVPacket.data to contain either interleaved S16, or  interleaved F32 audio
bool AudioEncoderMP3::encodePacket(AVPacket* p, FLVTag& tag) {

  assert(lame_flags);
  assert(settings.in_interleaved); /* we only support interleaved audio for now */
 
  int nsamples = 0;
  int written = 0;

#if defined(USE_GRAPH)
  uint64_t enc_start = uv_hrtime() / 1000000;
#endif

  if(settings.in_bitsize == AV_AUDIO_BITSIZE_S16) {
    nsamples = p->data.size() / (sizeof(int16_t) * nchannels);
    //printf("----------------- samples: %d, channels: %d, data.size(): %zu\n", nsamples, nchannels, p->data.size());
    written = lame_encode_buffer_interleaved(lame_flags, (short int*)&p->data.front(), nsamples, mp3_buffer, AUDIO_ENCODER_BUFFER_SIZE);
  }
  else if(settings.in_bitsize == AV_AUDIO_BITSIZE_F32) {
    nsamples = p->data.size() / (sizeof(float) * nchannels);
    written = lame_encode_buffer_interleaved_ieee_float(lame_flags, (const float*)&p->data.front(), nsamples, mp3_buffer, AUDIO_ENCODER_BUFFER_SIZE);
  }


  if(written > 0) {
    bitrate_nbytes += written;
  }

  uint64_t time_now = uv_hrtime();
  if(time_now >= bitrate_timeout) {
    bitrate_timeout = time_now + bitrate_delay;
    double duration = (time_now - bitrate_time_started) / 1000000000.0; // in s.
    bitrate_in_kbps = ((bitrate_nbytes * 8) / 1000) / duration;
    STREAMER_STATUS("audio bitrate: %0.2f kbps\n", bitrate_in_kbps);
  }

#if defined(USE_GRAPH)
  frames_graph["enc_audio"] += ((uv_hrtime()/1000000) - enc_start);
  frames_graph["enc_audio_video"] += ((uv_hrtime()/1000000) - enc_start);
  network_graph["mp3"] += written;
#endif

#if AUDIO_USE_DATA_PTR
  if(written) {
    tag.setData(mp3_buffer, written);
  }
#elif AUDIO_USE_COPY_DATA
  tag.bs.clear();
  if(written) {
    tag.bs.putBytes((uint8_t*)mp3_buffer, written);
    tag.setData(tag.bs.getPtr(), tag.bs.size());
  }
#endif

  tag.makeAudioTag();
  tag.setTimeStamp(p->timestamp);

  return written > 0;
}
Esempio n. 2
0
    bool Encode(float *input, UINT numInputFrames, DataPacket &packet, QWORD &timestamp)
    {
        if(bFirstFrame)
        {
            curEncodeTimestamp = timestamp;
            bFirstFrame = false;
        }

        //------------------------------------------------

        UINT lastSampleSize = frameCounter;

        frameCounter += numInputFrames;
        if(frameCounter > outputFrameSize)
        {
            frameCounter -= outputFrameSize;

            bufferedTimestamps << curEncodeTimestamp;
            curEncodeTimestamp = timestamp + ((outputFrameSize-lastSampleSize)*1000/App->GetSampleRateHz());
        }

        int ret = lame_encode_buffer_interleaved_ieee_float(lgf, (float*)input, numInputFrames, MP3OutputBuffer.Array()+1, dwMP3MaxSize);

        if(ret < 0)
        {
            AppWarning(TEXT("MP3 encode failed"));
            return false;
        }

        if(ret > 0)
        {
            if(bFirstPacket)
            {
                header.CopyArray(MP3OutputBuffer.Array(), ret);
                bFirstPacket = false;
                ret = 0;
            }
            else
            {
                packet.lpPacket = MP3OutputBuffer.Array();
                packet.size     = ret+1;

                timestamp = bufferedTimestamps[0];
                bufferedTimestamps.Remove(0);
            }
        }

        return ret > 0;
    }