Esempio n. 1
0
/**
 *  Parse the buffer into a recognized frame
 *
 *  Every time that data comes in on the connection, you should call this method to parse
 *  the incoming data, and let it handle by the AMQP library. This method returns the number
 *  of bytes that were processed.
 *
 *  If not all bytes could be processed because it only contained a partial frame, you should
 *  call this same method later on when more data is available. The AMQP library does not do
 *  any buffering, so it is up to the caller to ensure that the old data is also passed in that
 *  later call.
 *
 *  @param  buffer      buffer to decode
 *  @return             number of bytes that were processed
 */
uint64_t ConnectionImpl::parse(const Buffer &buffer)
{
    // do not parse if already in an error state
    if (_state == state_closed) return 0;

    // number of bytes processed
    uint64_t processed = 0;

    // create a monitor object that checks if the connection still exists
    Monitor monitor(this);

    // keep looping until we have processed all bytes, and the monitor still
    // indicates that the connection is in a valid state
    while (processed < buffer.size() && monitor.valid())
    {
        // prevent protocol exceptions
        try
        {
            // try to recognize the frame
            ReceivedFrame receivedFrame(ReducedBuffer(buffer, processed), _maxFrame);
            if (!receivedFrame.complete()) return processed;

            // process the frame
            receivedFrame.process(this);

            // number of bytes processed
            uint64_t bytes = receivedFrame.totalSize();

            // add bytes
            processed += bytes;
        }
        catch (const ProtocolException &exception)
        {
            // something terrible happened on the protocol (like data out of range)
            reportError(exception.what());

            // done
            return processed;
        }
    }

    // leap out if the connection object no longer exists
    if (!monitor.valid() || !_closed || _state != state_connected) return processed;

    // the close() function was called, but if the close frame was not yet sent
    // if there are no waiting channels, we can do that right now
    if (!waitingChannels()) sendClose();

    // done
    return processed;
}
Esempio n. 2
0
/**
 *  Parse the buffer into a recognized frame
 *  
 *  Every time that data comes in on the connection, you should call this method to parse
 *  the incoming data, and let it handle by the AMQP library. This method returns the number
 *  of bytes that were processed.
 *
 *  If not all bytes could be processed because it only contained a partial frame, you should
 *  call this same method later on when more data is available. The AMQP library does not do
 *  any buffering, so it is up to the caller to ensure that the old data is also passed in that
 *  later call.
 *
 *  @param  buffer      buffer to decode
 *  @param  size        size of the buffer to decode
 *  @return             number of bytes that were processed
 */
size_t ConnectionImpl::parse(char *buffer, size_t size)
{
    // do not parse if already in an error state
    if (_state == state_closed) return 0;
    
    // number of bytes processed
    size_t processed = 0;
    
    // create a monitor object that checks if the connection still exists
    Monitor monitor(this);
    
    // keep looping until we have processed all bytes, and the monitor still
    // indicates that the connection is in a valid state
    while (size > 0 && monitor.valid())
    {
        // prevent protocol exceptions
        try
        {
            // try to recognize the frame
            ReceivedFrame receivedFrame(buffer, size, _maxFrame);
            if (!receivedFrame.complete()) return processed;

            // process the frame
            receivedFrame.process(this);

            // number of bytes processed
            size_t bytes = receivedFrame.totalSize();
            
            // add bytes
            processed += bytes; size -= bytes; buffer += bytes;
        }
        catch (const ProtocolException &exception)
        {
            // something terrible happened on the protocol (like data out of range)
            reportError(exception.what());
            
            // done
            return processed;
        }
    }
    
    // done
    return processed;
}
Esempio n. 3
0
void VideoSender::processFrame()
{
	//qDebug() << "VideoSender::processFrame(): "<<this<<" mark";
	//sendLock();
	QMutexLocker lock(&m_sendMutex);
	
	if(m_frame && m_frame->isValid())
	{
		m_origSize = m_frame->size();
		#ifdef DEBUG_VIDEOFRAME_POINTERS
		qDebug() << "VideoSender::processFrame(): Mark1: m_frame:"<<m_frame;
		#endif
		
// 		m_frame->incRef();
		if(m_transmitSize.isEmpty())
			m_transmitSize = m_origSize;
			
		// Use 16bit format for transmission because:
		//  320x240x16bits / 8bits/byte = 153,600 bytes
		//  320x240x32bits / 8bits/byte = 307,200 bytes
		//  Half as much bandwidth required to transmit the same image - at the expense of 1ms on the sending side.

		//qDebug() << "VideoSender::processFrame: Downscaling video for transmission to "<<m_transmitSize;
		// To scale the video frame, first we must convert it to a QImage if its not already an image.
		// If we're lucky, it already is. Otherwise, we have to jump thru hoops to convert the byte 
		// array to a QImage then scale it.
		QImage scaledImage;
		if(!m_frame->image().isNull())
		{
			scaledImage = m_transmitSize == m_origSize ? 
				m_frame->image() : 
				m_frame->image().scaled(m_transmitSize);
			
			scaledImage = scaledImage.convertToFormat(QImage::Format_RGB16);
		}
		else
		{
			#ifdef DEBUG_VIDEOFRAME_POINTERS
			qDebug() << "VideoSender::processFrame(): Scaling data from frame:"<<m_frame<<", pointer:"<<m_frame->pointer();
			#endif
			const QImage::Format imageFormat = QVideoFrame::imageFormatFromPixelFormat(m_frame->pixelFormat());
			if(imageFormat != QImage::Format_Invalid)
			{
				QImage image(m_frame->pointer(),
					m_frame->size().width(),
					m_frame->size().height(),
					m_frame->size().width() *
						(imageFormat == QImage::Format_RGB16  ||
						imageFormat == QImage::Format_RGB555 ||
						imageFormat == QImage::Format_RGB444 ||
						imageFormat == QImage::Format_ARGB4444_Premultiplied ? 2 :
						imageFormat == QImage::Format_RGB888 ||
						imageFormat == QImage::Format_RGB666 ||
						imageFormat == QImage::Format_ARGB6666_Premultiplied ? 3 :
						4),
					imageFormat);
					
				//QTime t; t.start();
				scaledImage = m_transmitSize == m_origSize ? 
					image.convertToFormat(QImage::Format_RGB16) : // call convertToFormat instead of copy() because conversion does an implicit copy 
					image.scaled(m_transmitSize).convertToFormat(QImage::Format_RGB16); // do convertToFormat() after scaled() because less bytes to convert
					
				//qDebug() << "Downscaled image from "<<image.byteCount()<<"bytes to "<<scaledImage.byteCount()<<"bytes, orig ptr len:"<<m_frame->pointerLength()<<", orig ptr:"<<m_frame->pointer();
				//convertToFormat(QImage::Format_RGB16).
				//qDebug() << "VideoSender::processFrame: [QImage] downscale and 16bit conversion took"<<t.elapsed()<<"ms";
			}
			else
			{
				//qDebug() << "VideoSender::processFrame: Unable to convert pixel format to image format, cannot scale frame. Pixel Format:"<<m_frame->pixelFormat();
				return;
			}
		}
		
		#ifdef DEBUG_VIDEOFRAME_POINTERS
		qDebug() << "VideoSender::processFrame(): Mark2: frame:"<<m_frame;
		#endif
		
		// Now that we've got the image out of the original frame and scaled it, we have to construct a new
		// video frame to transmit on the wire from the scaledImage (assuming the sccaledImage is valid.)
		// We attempt to transmit in its native format without converting it if we can to save local CPU power.
		if(!scaledImage.isNull())
		{
			m_captureTime = m_frame->captureTime();

			QImage::Format format = scaledImage.format();
			m_pixelFormat = 
				format == QImage::Format_ARGB32 ? QVideoFrame::Format_ARGB32 :
				format == QImage::Format_RGB32  ? QVideoFrame::Format_RGB32  :
				format == QImage::Format_RGB888 ? QVideoFrame::Format_RGB24  :
				format == QImage::Format_RGB16  ? QVideoFrame::Format_RGB565 :
				format == QImage::Format_RGB555 ? QVideoFrame::Format_RGB555 :
				//format == QImage::Format_ARGB32_Premultiplied ? QVideoFrame::Format_ARGB32_Premultiplied :
				// GLVideoDrawable doesn't support premultiplied - so the format conversion below will convert it to ARGB32 automatically
				QVideoFrame::Format_Invalid;
				
			if(m_pixelFormat == QVideoFrame::Format_Invalid)
			{
				qDebug() << "VideoFrame: image was not in an acceptable format, converting to ARGB32 automatically.";
				scaledImage = scaledImage.convertToFormat(QImage::Format_ARGB32);
				m_pixelFormat = QVideoFrame::Format_ARGB32;
			}
			
			uchar *ptr = (uchar*)malloc(sizeof(uchar) * scaledImage.byteCount());
			const uchar *src = (const uchar*)scaledImage.bits();
			memcpy(ptr, src, scaledImage.byteCount());
			
			if(m_dataPtr)
				m_dataPtr.clear();
				
			m_dataPtr = QSharedPointer<uchar>(ptr);
			m_byteCount = scaledImage.byteCount();
			m_imageFormat = scaledImage.format();
			m_imageSize = scaledImage.size();
			
			m_holdTime = m_transmitFps <= 0 ? m_frame->holdTime() : 1000/m_transmitFps;
			
			#ifdef DEBUG_VIDEOFRAME_POINTERS
			qDebug() << "VideoSender::processFrame(): Mark5: frame:"<<m_frame;
			#endif
		}
	}
	
	//sendUnlock();
	
	#ifdef DEBUG_VIDEOFRAME_POINTERS
	qDebug() << "VideoSender::processFrame(): Mark6: m_frame:"<<m_frame;
	#endif
	
	//qDebug() << "VideoSender::processFrame(): "<<this<<" mark end";
	emit receivedFrame();
}