Exemple #1
0
static int luaCV_tokenize (lua_State *L) {
	// todo: negative numbers and exponentials
  size_t l;
	int pos = 1;
  luaL_Buffer bToken;
  luaL_buffinit(L, &bToken);
  const char *s = luaL_checklstring(L, 1, &l);
	lua_createtable(L, 0, 1);
	int modeAlpha = 0;
	char lastChar = ' ';

  while (l--) {
		
		if(*s == '"') { // begin or end string literal mode
			if(lastChar == '\\') {
				luaL_addchar(&bToken, *s);
			}
			else if(modeAlpha != 2) { // start string literal mode
				modeAlpha = 2;
				pushBuffer(L, &bToken, 2, &pos);
				luaL_addchar(&bToken, *s);
			} else { // end string literal mode
				luaL_addchar(&bToken, *s);
				modeAlpha = 0;
				pushBuffer(L, &bToken, 2, &pos);
			}
		}
		else if(modeAlpha == 2) { // if in string literal mode
			luaL_addchar(&bToken, *s);
		}
		else if(isspace(*s)) {
			// ignore whitespace
		}
		else if(isalnum(*s) || (*s == '.' && isdigit(lastChar))) {
			if(modeAlpha != 1) {
				modeAlpha = 1;
				pushBuffer(L, &bToken, 2, &pos);
			}
			luaL_addchar(&bToken, *s);
		}
		else {
			if(modeAlpha != 0) {
				modeAlpha = 0;
				pushBuffer(L, &bToken, 2, &pos);
			}
			//const char tok[2] = { 'c', *s }; 
			lua_pushlstring(L, s, 1);
			lua_rawseti(L, 2, pos++);
		}
		lastChar = *s;
		s++;
  }
	pushBuffer(L, &bToken, 2, &pos);
	
  return 1;
}
Exemple #2
0
    void
    PositionTransform::pushBuffer(const uint8_t *const data,
                                  size_t size,
                                  videocore::IMetadata &metadata)
    {
        auto output = m_output.lock();
        
        if(output) {

            if(m_positionIsDirty) {
                glm::mat4 mat(1.f);
                const float x (m_posX), y(m_posY), cw(m_contextWidth), ch(m_contextHeight), w(m_width), h(m_height);
                
                mat = glm::translate(mat,
                                     glm::vec3((x / cw) * 2.f - 1.f,   // The compositor uses homogeneous coordinates.
                                               (y / ch) * 2.f - 1.f,   // i.e. [ -1 .. 1 ]
                                               0.f));
                
                mat = glm::scale(mat,
                                 glm::vec3(w / cw, //
                                           h / ch, // size is a percentage for scaling.
                                           1.f));
                
                m_matrix = mat;

                m_positionIsDirty = false;
            }
            videocore::VideoBufferMetadata& md = dynamic_cast<videocore::VideoBufferMetadata&>(metadata);
            glm::mat4 & mat = md.getData<videocore::kVideoMetadataMatrix>();
            
            mat = mat * m_matrix;
            
            output->pushBuffer(data, size, metadata);
        }
    }
    void
    GenericAudioMixer::mixThread()
    {
        const auto us = std::chrono::microseconds(static_cast<long long>(m_bufferDuration * 1000000.)) ;
        const float g = 0.70710678118f; // 1 / sqrt(2)
        const size_t outSampleCount = static_cast<size_t>(m_outFrequencyInHz * m_bufferDuration);
        const size_t outBufferSize = outSampleCount * m_bytesPerSample ;
        const std::unique_ptr<short[]> buffer(new short[outBufferSize / sizeof(short)]);
        const std::unique_ptr<short[]> samples(new short[outBufferSize / sizeof(short)]);
        
        while(!m_exiting.load()) {
            std::unique_lock<std::mutex> l(m_mixMutex);
            
            if(std::chrono::high_resolution_clock::now() >= m_nextMixTime) {
                
                size_t sampleBufferSize = 0;
                
                // Mix and push
                for ( auto it = m_inBuffer.begin() ; it != m_inBuffer.end() ; ++it )
                {
                    auto size = it->second->get((uint8_t*)&buffer[0], outBufferSize);
                    if(size > sampleBufferSize) {
                        sampleBufferSize = size;
                    }
                    const size_t count = (size/sizeof(short));
                    const float gain = m_inGain[it->first];
                    const float mult = g*gain;
                    const short div ( 1.f / mult );
                    for ( size_t i = 0 ; i <  count ; i+=8) {
                        samples[i] += buffer[i] / div;
                        samples[i+1] += buffer[i+1] / div;
                        samples[i+2] += buffer[i+2] / div;
                        samples[i+3] += buffer[i+3] / div;
                        samples[i+4] += buffer[i+4] / div;
                        samples[i+5] += buffer[i+5] / div;
                        samples[i+6] += buffer[i+6] / div;
                        samples[i+7] += buffer[i+7] / div;
                    }
                    
                }
                
                if(sampleBufferSize) {
                    
                    m_nextMixTime += us;
                    
                    MetaData<'soun'> md ( std::chrono::duration_cast<std::chrono::milliseconds>(m_nextMixTime - m_epoch).count() );
                    
                    
                    auto out = m_output.lock();
                    if(out) {
                        out->pushBuffer((uint8_t*)&samples[0], sampleBufferSize, md);
                    }
                }
                
                memset(samples.get(), 0, outBufferSize);
            }
            m_mixThreadCond.wait_until(l, m_nextMixTime);
        }

    }
Exemple #4
0
 void
 AACEncode::pushBuffer(const uint8_t* const data, size_t size, IMetadata& metadata)
 {
     const size_t sampleCount = size / m_bytesPerSample;
     const size_t aac_packet_count = sampleCount / kSamplesPerFrame;
     const size_t required_bytes = aac_packet_count * m_outputPacketMaxSize;
     
     if(m_outputBuffer.total() < (required_bytes)) {
         m_outputBuffer.resize(required_bytes);
     }
     uint8_t* p = m_outputBuffer();
     uint8_t* p_out = (uint8_t*)data;
     
     for ( size_t i = 0 ; i < aac_packet_count ; ++i ) {
         UInt32 num_packets = 1;
         
         AudioBufferList l;
         l.mNumberBuffers=1;
         l.mBuffers[0].mDataByteSize = m_outputPacketMaxSize * num_packets;
         l.mBuffers[0].mData = p;
         
         std::unique_ptr<UserData> ud(new UserData());
         ud->size = static_cast<int>(kSamplesPerFrame * m_bytesPerSample);
         ud->data = const_cast<uint8_t*>(p_out);
         ud->packetSize = static_cast<int>(m_bytesPerSample);
         
         AudioStreamPacketDescription output_packet_desc[num_packets];
         m_converterMutex.lock();
         AudioConverterFillComplexBuffer(m_audioConverter, AACEncode::ioProc, ud.get(), &num_packets, &l, output_packet_desc);
         m_converterMutex.unlock();
         
         p += output_packet_desc[0].mDataByteSize;
         p_out += kSamplesPerFrame * m_bytesPerSample;
     }
     const size_t totalBytes = p - m_outputBuffer();
     
     
     auto output = m_output.lock();
     if(output && totalBytes) {
         if(!m_sentConfig) {
             output->pushBuffer((const uint8_t*)m_asc, sizeof(m_asc), metadata);
             m_sentConfig = true;
         }
         
         output->pushBuffer(m_outputBuffer(), totalBytes, metadata);
     }
 }
 void
 RTMPSession::sendPacket(uint8_t* data, size_t size, RTMPChunk_0 metadata)
 {
     RTMPMetadata_t md(0.);
     
     md.setData(metadata.timestamp.data, metadata.msg_length.data, metadata.msg_type_id, metadata.msg_stream_id, false);
     
     pushBuffer(data, size, md);
 }
Exemple #6
0
void RadioInterface::driveTransmitRadio(signalVector &radioBurst, bool zeroBurst) {

  if (!mOn) return;

  radioifyVector(radioBurst, sendBuffer + 2 * sendCursor, powerScaling, zeroBurst);

  sendCursor += radioBurst.size();

  pushBuffer();
}
void RadioInterface::driveTransmitRadio(signalVector &radioBurst) {

  if (!mOn) return;

  USRPifyVector(radioBurst, sendBuffer+sendCursor, powerScaling);

  sendCursor += (radioBurst.size()*2);

  pushBuffer();
}
void BinaryDataHandler::put(void const *src, UInt32 size)
{
    UInt8 const *data = static_cast<UInt8 const *>(src);

    if(_zeroCopyThreshold && size >= _zeroCopyThreshold)
    {
        if(_zeroCopyThreshold == 1)
        {
            write(const_cast<MemoryHandle>(data), size);
        }
        else
        {
            UInt8 tag = 1;

            // we have to write a tag, to indicate the membership
            // of this zero copy block to the current data block
            put(&tag, sizeof(tag));

            _zeroCopyBuffers.push_back(
                MemoryBlock(const_cast<MemoryHandle>(data), size, size));
        }
    }
    else
    {
        UInt32 copySize;

        while(size != 0)
        {
            if(_currentWriteBuffer == writeBufEnd())
            {
                pushBuffer();
            }

            copySize = osgMin((_currentWriteBuffer->getSize() -
                               _currentWriteBufferPos),
                              size);

            memcpy(_currentWriteBuffer->getMem() + _currentWriteBufferPos,
                    data,
                    copySize);

             size                  -= copySize;
            _currentWriteBufferPos += copySize;
             data                  += copySize;

            // skip to next buffer if current buffer is full
            if(_currentWriteBufferPos == _currentWriteBuffer->getSize())
            {
                _currentWriteBuffer->setDataSize(_currentWriteBufferPos);
                _currentWriteBuffer++;
                _currentWriteBufferPos = 0;
            }
        }
    }
}
Exemple #9
0
void RadioInterface::driveTransmitRadio(std::vector<signalVector *> &bursts,
                                        std::vector<bool> &zeros)
{
  if (!mOn)
    return;

  for (size_t i = 0; i < mChans; i++)
    radioifyVector(*bursts[i], i, zeros[i]);

  while (pushBuffer());
}
Exemple #10
0
 void
 Split::pushBuffer(const uint8_t* const data, size_t size, IMetadata& metadata)
 {
     m_mtx.lock();
     for ( auto & it : m_outputs ) {
         auto outp = it.lock();
         if(outp) {
             outp->pushBuffer(data, size, metadata);
         }
     }
     m_mtx.unlock();
 }
Exemple #11
0
    void
    AACPacketizer::pushBuffer(const uint8_t* const inBuffer, size_t inSize, IMetadata& metadata)
    {
        const auto now = std::chrono::steady_clock::now();
        const uint64_t bufferDuration(metadata.timestampDelta * 1000000.);
        const double nowmicros (std::chrono::duration_cast<std::chrono::microseconds>(now - m_epoch).count());
        
        
        const auto micros = std::floor(nowmicros / double(bufferDuration)) * bufferDuration;
        
        std::vector<uint8_t> & outBuffer = m_outbuffer;
        
        outBuffer.clear();
        
        int flags = 0;
        const int flags_size = 2;
        
        //static int prev_ts = 0;
        
        int ts = micros / 1000; // m_audioTs * 1000.;//

        
        auto output = m_output.lock();
        RTMPMetadata_t outMeta(metadata.timestampDelta);
        
        if(output) {
        
            flags = FLV_CODECID_AAC | FLV_SAMPLERATE_44100HZ | FLV_SAMPLESSIZE_16BIT | FLV_STEREO;
        
        
            outBuffer.reserve(inSize + flags_size);
        
            put_byte(outBuffer, flags);
            put_byte(outBuffer, m_sentAudioConfig);
            if(!m_sentAudioConfig) {
                
                m_sentAudioConfig = true;
                const char hdr[2] = { 0x12,0x10 };
                put_buff(outBuffer, (uint8_t*)hdr, 2);
                
            } else {
                
                put_buff(outBuffer, inBuffer, inSize);
                m_audioTs += metadata.timestampDelta;
                
            }

            outMeta.setData(ts, static_cast<int>(outBuffer.size()), FLV_TAG_TYPE_AUDIO, kAudioChannelStreamId);
            
            output->pushBuffer(&outBuffer[0], outBuffer.size(), outMeta);
        }

    }
Exemple #12
0
void AudioAppSrc::pushAudioBuffer()
{
    if ((!preAlloc) || (buffer.isNull()))
    {
        return;
    }

    buffer->unmap(mapInfo);

    //qDebug() << "AudioAppSrc PUSHBUFFER. PREALLOC" << preAlloc << "Length:" << buffer->size();
    pushBuffer(buffer);
}
    void
    AACPacketizer::pushBuffer(const uint8_t* const inBuffer, size_t inSize, IMetadata& metadata)
    {
        std::vector<uint8_t> & outBuffer = m_outbuffer;

        outBuffer.clear();

        int flvStereoOrMono = (m_channelCount == 2 ? FLV_STEREO : FLV_MONO);
        int flvSampleRate = FLV_SAMPLERATE_44100HZ; // default
        if (m_sampleRate == 22050.0) {
            flvSampleRate = FLV_SAMPLERATE_22050HZ;
        }

        int flags = 0;
        const int flags_size = 2;


        int ts = metadata.timestampDelta + m_ctsOffset ;
//        DLog("AAC: %06d", ts);
        
        auto output = m_output.lock();

        RTMPMetadata_t outMeta(ts);

        if(inSize == 2 && !m_asc[0] && !m_asc[1]) {
            m_asc[0] = inBuffer[0];
            m_asc[1] = inBuffer[1];
        }

        if(output) {

            flags = FLV_CODECID_AAC | flvSampleRate | FLV_SAMPLESSIZE_16BIT | flvStereoOrMono;

            outBuffer.reserve(inSize + flags_size);

            put_byte(outBuffer, flags);
            put_byte(outBuffer, m_sentAudioConfig);

            if(!m_sentAudioConfig) {
                m_sentAudioConfig = true;
                put_buff(outBuffer, (uint8_t*)m_asc, sizeof(m_asc));

            } else {
                put_buff(outBuffer, inBuffer, inSize);
            }

            outMeta.setData(ts, static_cast<int>(outBuffer.size()), RTMP_PT_AUDIO, kAudioChannelStreamId, false);

            output->pushBuffer(&outBuffer[0], outBuffer.size(), outMeta);
        }

    }
Exemple #14
0
    void
    H264Encode::compressionSessionOutput(const uint8_t *data, size_t size, uint64_t ts)
    {
#if VERSION_OK
        auto l = m_output.lock();
        if(l) {
            videocore::VideoBufferMetadata md(ts);
            
            l->pushBuffer(data, size, md);
        }
#endif

    }
void RadioInterface::driveTransmitRadio(std::vector<signalVector *> &bursts,
                                        std::vector<bool> &zeros)
{
  if (!mOn)
    return;

  for (size_t i = 0; i < mChans; i++) {
    radioifyVector(*bursts[i],
                   (float *) (sendBuffer[i]->begin() + sendCursor), zeros[i]);
  }

  sendCursor += bursts[0]->size();

  pushBuffer();
}
    void
    AACPacketizer::pushBuffer(const uint8_t* const inBuffer, size_t inSize, IMetadata& metadata)
    {
        std::vector<uint8_t> & outBuffer = m_outbuffer;
        
        outBuffer.clear();
        
        int flags = 0;
        const int flags_size = 2;
    
        
        int ts = metadata.timestampDelta;
        
        auto output = m_output.lock();
        
        RTMPMetadata_t outMeta(metadata.timestampDelta);
        
        if(inSize == 2 && !m_asc[0] && !m_asc[1]) {
            m_asc[0] = inBuffer[0];
            m_asc[1] = inBuffer[1];
        }
        
        if(output) {
        
            flags = FLV_CODECID_AAC | FLV_SAMPLERATE_44100HZ | FLV_SAMPLESSIZE_16BIT | FLV_STEREO;
           
            outBuffer.reserve(inSize + flags_size);
            
            put_byte(outBuffer, flags);
            put_byte(outBuffer, m_sentAudioConfig);

            if(!m_sentAudioConfig) {
                m_sentAudioConfig = true;
                put_buff(outBuffer, (uint8_t*)m_asc, sizeof(m_asc));
                
            } else {
                
                put_buff(outBuffer, inBuffer, inSize);
                m_audioTs += metadata.timestampDelta;
                
            }

            outMeta.setData(ts, static_cast<int>(outBuffer.size()), FLV_TAG_TYPE_AUDIO, kAudioChannelStreamId);
            
            output->pushBuffer(&outBuffer[0], outBuffer.size(), outMeta);
        }

    }
Exemple #17
0
void AudioAppSrc::pushAudioBuffer(QByteArray data)
{
    if (preAlloc)
    {
        return;
    }

    QGst::BufferPtr buf = QGst::Buffer::create(data.size());
    QGst::MapInfo map;
    buf->map(map, QGst::MapWrite);
    memcpy(map.data(), data.data(), map.size());
    buf->unmap(map);

    //qDebug() << "AudioAppSrc PUSHBUFFER. PREALLOC" << preAlloc << "Length:" << buffer->size();
    pushBuffer(buf);
}
    void
    AspectTransform::pushBuffer(const uint8_t *const data,
                                size_t size,
                                videocore::IMetadata &metadata)
    {
        auto output = m_output.lock();
        
        if(output) {

            std::shared_ptr<IPixelBuffer> pb = *(std::shared_ptr<IPixelBuffer>*)data;
            
            pb->lock(true);
            
            float width = float(pb->width());
            float height = float(pb->height());
            
            if(width != m_prevWidth || height != m_prevHeight) {
                setBoundingBoxDirty();
                m_prevHeight = height;
                m_prevWidth = width;
            }
            
            if(m_boundingBoxDirty) {
                
                float wfac = float(m_boundingWidth) / width;
                float hfac = float(m_boundingHeight) / height;
                
                const float mult = (m_aspectMode == kAspectFit ? (wfac < hfac) : (wfac > hfac)) ? wfac : hfac;
                
                wfac = width*mult / float(m_boundingWidth);
                hfac = height*mult / float(m_boundingHeight);
                
                m_scale = glm::vec3(wfac,hfac,1.f);
                
                m_boundingBoxDirty = false;
            }
            
            pb->unlock(true);
            
            videocore::VideoBufferMetadata& md = dynamic_cast<videocore::VideoBufferMetadata&>(metadata);
            glm::mat4 & mat = md.getData<videocore::kVideoMetadataMatrix>();
            
            mat = glm::scale(mat, m_scale);
            
            output->pushBuffer(data, size, metadata);
        }
    }
Exemple #19
0
 void
 AspectTransform::pushBuffer(const uint8_t *const data,
                             size_t size,
                             videocore::IMetadata &metadata)
 {
     auto output = m_output.lock();
     
     if(output) {
         CVPixelBufferRef pb = (CVPixelBufferRef)data;
         CVPixelBufferLockBaseAddress(pb, kCVPixelBufferLock_ReadOnly);
         
         float width = CVPixelBufferGetWidth(pb);
         float height = CVPixelBufferGetHeight(pb);
         
         if(width != m_prevWidth || height != m_prevHeight) {
             setBoundingBoxDirty();
             m_prevHeight = height;
             m_prevWidth = width;
         }
         
         if(m_boundingBoxDirty) {
             // TODO: Replace CVPixelBufferRef with an internal format.
             
             float wfac = float(m_boundingWidth) / width;
             float hfac = float(m_boundingHeight) / height;
             
             const float mult = (m_aspectMode == kAspectFit ? (wfac < hfac) : (wfac > hfac)) ? wfac : hfac;
             
             wfac = width*mult / float(m_boundingWidth);
             hfac = height*mult / float(m_boundingHeight);
             
             m_scale = glm::vec3(wfac,hfac,1.f);
             
             m_boundingBoxDirty = false;
         }
         
         CVPixelBufferUnlockBaseAddress(pb, kCVPixelBufferLock_ReadOnly);
         
         videocore::VideoBufferMetadata& md = dynamic_cast<videocore::VideoBufferMetadata&>(metadata);
         glm::mat4 & mat = md.getData<videocore::kVideoMetadataMatrix>();
         
         mat = glm::scale(mat, m_scale);
         
         output->pushBuffer(data, size, metadata);
     }
 }
void BinaryDataHandler::flush(void)
{
    if(_currentWriteBuffer != writeBufEnd())
    {
        // mark rest of buffer as empty
        _currentWriteBuffer->setDataSize(_currentWriteBufferPos);
        _currentWriteBuffer++;

        while(_currentWriteBuffer != writeBufEnd())
        {
            _currentWriteBuffer->setDataSize(0);
            _currentWriteBuffer++;
        }
    }

    pushBuffer();
}
 void
 PixelBufferSource::pushPixelBuffer(void *data, size_t size)
 {
     
     auto outp = m_output.lock();
     
     if(outp) {
         void* loc = CVPixelBufferGetBaseAddress((CVPixelBufferRef)m_pixelBuffer);
         CVPixelBufferLockBaseAddress((CVPixelBufferRef)m_pixelBuffer, 0);
         memcpy(loc, data, size);
         CVPixelBufferUnlockBaseAddress((CVPixelBufferRef)m_pixelBuffer, 0);
         
         VideoBufferMetadata md(0.);
         md.setData(kLayerGame, shared_from_this());
         
         outp->pushBuffer((const uint8_t*)m_pixelBuffer, sizeof(CVPixelBufferRef), md);
     }
 }
Exemple #22
0
static int duk_disasm(RAsm *a, RAsmOp *op, const ut8 *buf, int len) {
	int res = 0, res2 = 0;
	const char *opstr = NULL;
	ut8 *b = a->cur->user;
	duk_push_global_stash (ctx);
	duk_dup (ctx, 0);  /* timer callback */
	duk_get_prop_string (ctx, -2, "disfun");
	b = a->cur->user = duk_require_tval (ctx, -1);
//	pushBuffer (buf, len);
	if (duk_is_callable(ctx, -1)) {
		int i;
		// duk_push_string (ctx, "TODO 2");
		pushBuffer (buf, len);
		duk_call (ctx, 1);

		// [ size, str ]
		for (i = 0; i<3; i++) {
			duk_dup_top (ctx);
			duk_get_prop_index (ctx, -1, i);
			if (duk_is_number (ctx, -1)) {
				if (res)
				res2 = duk_to_number (ctx, -1);
				else
				res2 = res = duk_to_number (ctx, -1);
			} else if (duk_is_string (ctx, -1)) {
				if (!opstr) {
					opstr = duk_to_string (ctx, -1);
				}
			}
			duk_pop (ctx);
		}
	} else {
		eprintf ("[:(] Is not a function %02x %02x\n", b[0],b[1]);
	}

	// fill op struct
	op->size = res;
	if (!opstr) opstr = "invalid";
	strncpy (op->buf_asm, opstr, sizeof (op->buf_asm));
	r_hex_bin2str (buf, op->size, op->buf_hex);
	return res2;
}
void RadioInterface::driveTransmitRadio() {

  radioVector *radioBurst = NULL;

  radioBurst = mTransmitFIFO.read();

  LOG(DEEPDEBUG) << "transmitFIFO: read radio vector at time: " << radioBurst->time();

  signalVector *newBurst = radioBurst;
  if (sendBuffer) {
    signalVector *tmp = sendBuffer;
    sendBuffer = new signalVector(*sendBuffer,*newBurst);
    delete tmp; 
  }
  else 
    sendBuffer = new signalVector(*newBurst);
  
  delete radioBurst;
  
  pushBuffer();
}
void AudioVisualiserComponent::pushBuffer (const AudioSampleBuffer& buffer)
{
    pushBuffer (buffer.getArrayOfReadPointers(),
                buffer.getNumChannels(),
                buffer.getNumSamples());
}
    void
    GenericAudioMixer::mixThread()
    {
        const auto us = std::chrono::microseconds(static_cast<long long>(m_frameDuration * 1000000.)) ;
        const float g = 0.70710678118f; // 1 / sqrt(2)
        const size_t outSampleCount = static_cast<size_t>(m_outFrequencyInHz * m_frameDuration);
        const size_t outBufferSize = outSampleCount * m_bytesPerSample ;

        const size_t requiredSampleCount = static_cast<size_t>(m_outFrequencyInHz * m_bufferDuration);
        const size_t requiredBufferSize = requiredSampleCount * m_bytesPerSample;

        const std::unique_ptr<short[]> buffer(new short[outBufferSize / sizeof(short)]);
        const std::unique_ptr<short[]> samples(new short[outBufferSize / sizeof(short)]);

        m_nextMixTime = std::chrono::steady_clock::now();

        while(!m_exiting.load()) {
            std::unique_lock<std::mutex> l(m_mixMutex);

            const auto now = std::chrono::steady_clock::now();

            if( now >= m_nextMixTime) {

                size_t sampleBufferSize = 0;
                m_nextMixTime += us;

                // Mix and push
                for ( auto it = m_inBuffer.begin() ; it != m_inBuffer.end() ; ++it )
                {

                    //
                    // TODO: A better approach is to put the buffer size requirement on the OUTPUT buffer, and not on the input buffers.
                    //
                    if(it->second->size() >= requiredBufferSize){
                        auto size = it->second->get((uint8_t*)&buffer[0], outBufferSize);
                        if(size > sampleBufferSize) {
                            sampleBufferSize = size;
                        }

                        const size_t count = (size/sizeof(short));
                        const float gain = m_inGain[it->first];
                        const float mult = g*gain;

                        for ( size_t i = 0 ; i <  count ; i+=8) {
                            samples[i] += buffer[i] * mult;
                            samples[i+1] += buffer[i+1] * mult;
                            samples[i+2] += buffer[i+2] * mult;
                            samples[i+3] += buffer[i+3] * mult;
                            samples[i+4] += buffer[i+4] * mult;
                            samples[i+5] += buffer[i+5] * mult;
                            samples[i+6] += buffer[i+6] * mult;
                            samples[i+7] += buffer[i+7] * mult;
                        }
                    }
                }

                if(sampleBufferSize) {

                    AudioBufferMetadata md ( std::chrono::duration_cast<std::chrono::milliseconds>(m_nextMixTime - m_epoch).count() );
                    std::shared_ptr<videocore::ISource> blank;

                    md.setData(m_outFrequencyInHz, m_outBitsPerChannel, m_outChannelCount, false, blank);

                    auto out = m_output.lock();
                    if(out) {
                        out->pushBuffer((uint8_t*)&samples[0], sampleBufferSize, md);
                    }
                }

                memset(samples.get(), 0, outBufferSize);
            }
            if(!m_exiting.load()) {
                m_mixThreadCond.wait_until(l, m_nextMixTime);
            }
        }
        DLog("Exiting audio mixer...\n");
    }
    void H264Packetizer::pushBuffer(const uint8_t* const inBuffer, size_t inSize, IMetadata& inMetadata)
    {
        std::vector<uint8_t>& outBuffer = m_outbuffer;
        
        outBuffer.clear();
        
        uint8_t nal_type = inBuffer[4] & 0x1F;
        int flags = 0;
        const int flags_size = 5;
        int dts = inMetadata.dts ;
        int pts = inMetadata.pts + m_ctsOffset; // correct for pts < dts which some players (ffmpeg) don't like
        
        dts = dts > 0 ? dts : pts - m_ctsOffset ;
        
        bool is_config = (nal_type == 7 || nal_type == 8);
        
        flags = FLV_CODECID_H264;
        auto output = m_output.lock();

        switch(nal_type) {
            case 7:
                if(m_sps.size() == 0) {
                    m_sps.resize(inSize-4);
                    memcpy(&m_sps[0], inBuffer+4, inSize-4);
                }
                break;
            case 8:
                if(m_pps.size() == 0) {
                    m_pps.resize(inSize-4);
                    memcpy(&m_pps[0], inBuffer+4, inSize-4);
                }
                flags |= FLV_FRAME_KEY;
                break;
            case 5:
                flags |= FLV_FRAME_KEY;
                
                break;
            default:
                flags |= FLV_FRAME_INTER;
                
                
                break;
                
        }
        
        
        if(output) {
            
            RTMPMetadata_t outMeta(dts);
            std::vector<uint8_t> conf;
            
            if(is_config && m_sps.size() > 0 && m_pps.size() > 0 ) {
                conf = configurationFromSpsAndPps();
                inSize = conf.size();
            }
            outBuffer.reserve(inSize + flags_size);
            
            put_byte(outBuffer, flags);
            put_byte(outBuffer, !is_config);
            put_be24(outBuffer, pts - dts);             // Decoder delay
            
            if(is_config ) {
                // create modified SPS/PPS buffer
                if(m_sps.size() > 0 && m_pps.size() > 0 && !m_sentConfig && (flags & FLV_FRAME_KEY)) {
                    put_buff(outBuffer, &conf[0], conf.size());
                    m_sentConfig = true;
                } else {
                    return;
                }
            } else {
                put_buff(outBuffer, inBuffer, inSize);
            }
            
            outMeta.setData(dts, static_cast<int>(outBuffer.size()), RTMP_PT_VIDEO, kVideoChannelStreamId, nal_type == 5);
            
            output->pushBuffer(&outBuffer[0], outBuffer.size(), outMeta);
        }
        
    }
    void H264Packetizer::pushBuffer(const uint8_t* const inBuffer, size_t inSize, IMetadata& inMetadata)
    {
        
      
      
        
        std::vector<uint8_t>& outBuffer = m_outbuffer;
        
        outBuffer.clear();
        
        uint8_t nal_type = inBuffer[4] & 0x1F;
        int flags = 0;
        const int flags_size = 5;
        const int ts = inMetadata.timestampDelta;

        bool is_config = (nal_type == 7 || nal_type == 8);
        
        
        flags = FLV_CODECID_H264;
        auto output = m_output.lock();
        RTMPMetadata_t outMeta(inMetadata.timestampDelta);

        switch(nal_type) {
            case 7:
                if(m_sps.size() == 0) {
                    m_sps.resize(inSize-4);
                    memcpy(&m_sps[0], inBuffer+4, inSize-4);
                    
                }
                return;
            case 8:
                if(m_pps.size() == 0) {
                    m_pps.resize(inSize-4);
                    memcpy(&m_pps[0], inBuffer+4, inSize-4);
                }

                flags |= FLV_FRAME_KEY;
                break;
            case 5:
                flags |= FLV_FRAME_KEY;
                
                break;
            default:
                flags |= FLV_FRAME_INTER;
                
                
                break;
                
        }
        
        if(output) {
            std::vector<uint8_t> conf;
            
            if(is_config && m_sps.size() > 0 && m_pps.size() > 0 ) {
                conf = configurationFromSpsAndPps();
                inSize = conf.size();
            }
            outBuffer.reserve(inSize + flags_size);
            
            put_byte(outBuffer, flags);
            put_byte(outBuffer, !is_config);
            put_be24(outBuffer, 0);
            
            if(is_config) {
                // create modified SPS/PPS buffer
                if(m_sps.size() > 0 && m_pps.size() > 0 && !m_sentConfig) {
                    put_buff(outBuffer, &conf[0], conf.size());
                    m_sentConfig = true;
                } else {
                    return;
                }
            } else {
                put_buff(outBuffer, inBuffer, inSize);
            }
            
            static auto prev_time = std::chrono::steady_clock::now();
            auto now = std::chrono::steady_clock::now();
            
            auto m_micros = std::chrono::duration_cast<std::chrono::microseconds>(now - prev_time).count();
            static uint64_t total = 0;
            static uint64_t count = 0;
            
            total+=m_micros;
            count++;
            
            prev_time = now;
            outMeta.setData(ts, static_cast<int>(outBuffer.size()), FLV_TAG_TYPE_VIDEO, kVideoChannelStreamId);
            
            output->pushBuffer(&outBuffer[0], outBuffer.size(), outMeta);
        }
        
    }