コード例 #1
0
ファイル: LoRaMac-api-v3.c プロジェクト: 2thetop/XRange
uint8_t LoRaMacPrepareFrame( ChannelParams_t channel,LoRaMacHeader_t *macHdr, LoRaMacFrameCtrl_t *fCtrl, uint8_t *fOpts, uint8_t fPort, void *fBuffer, uint16_t fBufferSize )
{
    uint8_t retStatus;

    switch( PrepareFrame( macHdr, fCtrl, fPort, fBuffer, fBufferSize ) )
    {
        case LORAMAC_STATUS_OK:
            retStatus = 0U;
            break;
        case LORAMAC_STATUS_BUSY:
            retStatus = 1U;
            break;
        case LORAMAC_STATUS_NO_NETWORK_JOINED:
            retStatus = 2U;
            break;
        case LORAMAC_STATUS_LENGTH_ERROR:
        case LORAMAC_STATUS_MAC_CMD_LENGTH_ERROR:
            retStatus = 3U;
            break;
        case LORAMAC_STATUS_SERVICE_UNKNOWN:
            retStatus = 4U;
            break;
        default:
            retStatus = 1U;
            break;
    }

    return retStatus;
}
コード例 #2
0
ファイル: BMDOutput.cpp プロジェクト: TritonSailor/livepro
void BMDOutputDelegate::SetCurrentFrame(VideoFramePtr ptr)
{
	m_frame = ptr;
	
	PrepareFrame();
	
	if((++ m_frameReceivedCount % m_framesPerSecond) == 0)
		m_frameSet = false;
		
	ScheduleNextFrame(false);
	
	
	
	//if(m_totalFramesScheduled == 0)
	
	
	
	//ScheduleNextFrame(false);
	//qDebug() << "BMDOutputDelegate::SetCurrentFrame(): Got frame, size:"<<ptr->size();
	
	/*
	
// 	if(image.width()  != FRAME_WIDTH ||
// 	   image.height() != FRAME_HEIGHT)
// 	{
// 		image = image.scaled(FRAME_WIDTH,FRAME_HEIGHT,Qt::IgnoreAspectRatio);
// 	}
	
	if(image.format() != V4L_QIMAGE_FORMAT)
		image = image.convertToFormat(V4L_QIMAGE_FORMAT);
		
	// QImage and V4L's def of RGB24 differ in that they have the R & B bytes swapped compared to each other 
	// - so we swap them here to appear correct in output. 
	if(V4L_NATIVE_FORMAT == VIDEO_PALETTE_RGB24 &&
	   V4L_QIMAGE_FORMAT == QImage::Format_RGB888)
	{
		uchar  tmp;
		uchar *bits = image.scanLine(0);
		for(int i=0; i<image.byteCount(); i+=3)
		{
			tmp       = bits[i];
			bits[i]   = bits[i+2];
			bits[i+2] = tmp;
		}
	}
	
	if(write(m_BMDOutputDev, (const uchar*)image.bits(), image.byteCount()) != image.byteCount()) 
	{
		qDebug() << "BMDOutput::readFrame(): Error writing to "<<m_bmdOutputName<<" (bytes written does not match bytes requested), V4L error: " << strerror(errno); 
	}
	else
	{
		//qDebug() << "DVizSharedMemoryThread::readFrame(): Wrote "<<outImg.byteCount()<<" bytes to "<<V4L_OUTPUT;
	} */
	
}
コード例 #3
0
void
FFmpegAACDecoder<LIBAV_VER>::DecodePacket(MP4Sample* aSample)
{
  AVPacket packet;
  av_init_packet(&packet);

  aSample->Pad(FF_INPUT_BUFFER_PADDING_SIZE);
  packet.data = aSample->data;
  packet.size = aSample->size;
  packet.pos = aSample->byte_offset;

  if (!PrepareFrame()) {
    NS_WARNING("FFmpeg audio decoder failed to allocate frame.");
    mCallback->Error();
    return;
  }

  int decoded;
  int bytesConsumed =
    avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);

  if (bytesConsumed < 0 || !decoded) {
    NS_WARNING("FFmpeg audio decoder error.");
    mCallback->Error();
    return;
  }

  NS_ASSERTION(bytesConsumed == (int)aSample->size,
               "Only one audio packet should be received at a time.");

  uint32_t numChannels = mCodecContext->channels;

  nsAutoArrayPtr<AudioDataValue> audio(
    CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples));

  nsAutoPtr<AudioData> data(
    new AudioData(packet.pos, aSample->composition_timestamp, aSample->duration,
                  mFrame->nb_samples, audio.forget(), numChannels));

  mCallback->Output(data.forget());

  if (mTaskQueue->IsEmpty()) {
    mCallback->InputExhausted();
  }
}
コード例 #4
0
MediaResult
FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
{
  AVPacket packet;
  mLib->av_init_packet(&packet);

  packet.data = const_cast<uint8_t*>(aSample->Data());
  packet.size = aSample->Size();

  if (!PrepareFrame()) {
    return MediaResult(
      NS_ERROR_OUT_OF_MEMORY,
      RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame"));
  }

  int64_t samplePosition = aSample->mOffset;
  media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime);

  while (packet.size > 0) {
    int decoded;
    int bytesConsumed =
      mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);

    if (bytesConsumed < 0) {
      NS_WARNING("FFmpeg audio decoder error.");
      return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                         RESULT_DETAIL("FFmpeg audio error:%d", bytesConsumed));
    }

    if (mFrame->format != AV_SAMPLE_FMT_FLT &&
        mFrame->format != AV_SAMPLE_FMT_FLTP &&
        mFrame->format != AV_SAMPLE_FMT_S16 &&
        mFrame->format != AV_SAMPLE_FMT_S16P &&
        mFrame->format != AV_SAMPLE_FMT_S32 &&
        mFrame->format != AV_SAMPLE_FMT_S32P) {
      return MediaResult(
        NS_ERROR_DOM_MEDIA_DECODE_ERR,
        RESULT_DETAIL("FFmpeg audio decoder outputs unsupported audio format"));
    }

    if (decoded) {
      uint32_t numChannels = mCodecContext->channels;
      AudioConfig::ChannelLayout layout(numChannels);
      if (!layout.IsValid()) {
        return MediaResult(
          NS_ERROR_DOM_MEDIA_FATAL_ERR,
          RESULT_DETAIL("Unsupported channel layout:%u", numChannels));
      }

      uint32_t samplingRate = mCodecContext->sample_rate;

      AlignedAudioBuffer audio =
        CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples);
      if (!audio) {
        return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
      }

      media::TimeUnit duration =
        FramesToTimeUnit(mFrame->nb_samples, samplingRate);
      if (!duration.IsValid()) {
        return MediaResult(
          NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
          RESULT_DETAIL("Invalid sample duration"));
      }

      RefPtr<AudioData> data = new AudioData(samplePosition,
                                             pts.ToMicroseconds(),
                                             duration.ToMicroseconds(),
                                             mFrame->nb_samples,
                                             Move(audio),
                                             numChannels,
                                             samplingRate);
      mCallback->Output(data);
      pts += duration;
      if (!pts.IsValid()) {
        return MediaResult(
          NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
          RESULT_DETAIL("Invalid count of accumulated audio samples"));
      }
    }
    packet.data += bytesConsumed;
    packet.size -= bytesConsumed;
    samplePosition += bytesConsumed;
  }
  return NS_OK;
}
コード例 #5
0
void
FFmpegAudioDecoder<LIBAV_VER>::DecodePacket(MediaRawData* aSample)
{
  AVPacket packet;
  av_init_packet(&packet);

  packet.data = const_cast<uint8_t*>(aSample->Data());
  packet.size = aSample->Size();

  if (!PrepareFrame()) {
    NS_WARNING("FFmpeg audio decoder failed to allocate frame.");
    mCallback->Error();
    return;
  }

  int64_t samplePosition = aSample->mOffset;
  media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime);

  while (packet.size > 0) {
    int decoded;
    int bytesConsumed =
      avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);

    if (bytesConsumed < 0) {
      NS_WARNING("FFmpeg audio decoder error.");
      mCallback->Error();
      return;
    }

    if (decoded) {
      uint32_t numChannels = mCodecContext->channels;
      uint32_t samplingRate = mCodecContext->sample_rate;

      nsAutoArrayPtr<AudioDataValue> audio(
        CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples));

      media::TimeUnit duration =
        FramesToTimeUnit(mFrame->nb_samples, samplingRate);
      if (!duration.IsValid()) {
        NS_WARNING("Invalid count of accumulated audio samples");
        mCallback->Error();
        return;
      }

      nsRefPtr<AudioData> data = new AudioData(samplePosition,
                                               pts.ToMicroseconds(),
                                               duration.ToMicroseconds(),
                                               mFrame->nb_samples,
                                               audio.forget(),
                                               numChannels,
                                               samplingRate);
      mCallback->Output(data);
      pts += duration;
      if (!pts.IsValid()) {
        NS_WARNING("Invalid count of accumulated audio samples");
        mCallback->Error();
        return;
      }
    }
    packet.data += bytesConsumed;
    packet.size -= bytesConsumed;
    samplePosition += bytesConsumed;
  }

  if (mTaskQueue->IsEmpty()) {
    mCallback->InputExhausted();
  }
}
コード例 #6
0
void
FFmpegAudioDecoder<LIBAV_VER>::DecodePacket(MediaRawData* aSample)
{
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
  AVPacket packet;
  mLib->av_init_packet(&packet);

  packet.data = const_cast<uint8_t*>(aSample->Data());
  packet.size = aSample->Size();

  if (!PrepareFrame()) {
    NS_WARNING("FFmpeg audio decoder failed to allocate frame.");
    mCallback->Error();
    return;
  }

  int64_t samplePosition = aSample->mOffset;
  media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime);

  while (packet.size > 0) {
    int decoded;
    int bytesConsumed =
      mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);

    if (bytesConsumed < 0) {
      NS_WARNING("FFmpeg audio decoder error.");
      mCallback->Error();
      return;
    }

    if (decoded) {
      uint32_t numChannels = mCodecContext->channels;
      AudioConfig::ChannelLayout layout(numChannels);
      if (!layout.IsValid()) {
        mCallback->Error();
        return;
      }

      uint32_t samplingRate = mCodecContext->sample_rate;

      AlignedAudioBuffer audio =
        CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples);

      media::TimeUnit duration =
        FramesToTimeUnit(mFrame->nb_samples, samplingRate);
      if (!audio || !duration.IsValid()) {
        NS_WARNING("Invalid count of accumulated audio samples");
        mCallback->Error();
        return;
      }

      RefPtr<AudioData> data = new AudioData(samplePosition,
                                             pts.ToMicroseconds(),
                                             duration.ToMicroseconds(),
                                             mFrame->nb_samples,
                                             Move(audio),
                                             numChannels,
                                             samplingRate);
      mCallback->Output(data);
      pts += duration;
      if (!pts.IsValid()) {
        NS_WARNING("Invalid count of accumulated audio samples");
        mCallback->Error();
        return;
      }
    }
    packet.data += bytesConsumed;
    packet.size -= bytesConsumed;
    samplePosition += bytesConsumed;
  }

  if (mTaskQueue->IsEmpty()) {
    mCallback->InputExhausted();
  }
}
コード例 #7
0
ファイル: Mesh.cpp プロジェクト: helcl42/Game-DX
//-----------------------------------------------------------------------------
// Prepares the given frame.
//-----------------------------------------------------------------------------
void Mesh::PrepareFrame( Frame *frame )
{
	m_frames->Add( frame );

	// Check if this frame is actually a reference point.
	if( strncmp( "rp_", frame->Name, 3 ) == 0 )
		m_refPoints->Add( frame );

	// Set the initial final transformation.
	frame->finalTransformationMatrix = frame->TransformationMatrix;

	// Prepare the frame's mesh container, if it has one.
	if( frame->pMeshContainer != NULL )
	{
		MeshContainer *meshContainer = (MeshContainer*)frame->pMeshContainer;

		// Check if this mesh is a skinned mesh.
		if( meshContainer->pSkinInfo != NULL )
		{
			// Create the array of bone matrix pointers.
			meshContainer->boneMatrixPointers = new D3DXMATRIX*[meshContainer->pSkinInfo->GetNumBones()];

			// Set up the pointers to the mesh's bone transformation matrices.
			for( unsigned long b = 0; b < meshContainer->pSkinInfo->GetNumBones(); b++ )
			{
				Frame *bone = (Frame*)D3DXFrameFind( m_firstFrame, meshContainer->pSkinInfo->GetBoneName( b ) );
				if( bone == NULL )
					continue;

				meshContainer->boneMatrixPointers[b] = &bone->finalTransformationMatrix;
			}

			// Keep track of the maximum bones out of all the mesh containers.
			if( m_totalBoneMatrices < meshContainer->pSkinInfo->GetNumBones() )
				m_totalBoneMatrices = meshContainer->pSkinInfo->GetNumBones();
		}

		// Check if the mesh has any materials.
		if( meshContainer->NumMaterials > 0 )
		{
			// Load all the materials in via the material manager.
			for( unsigned long m = 0; m < meshContainer->NumMaterials; m++ )
			{
				// Ensure the material has a texture.
				if( meshContainer->materialNames[m] != NULL )
				{
					// Get the name of the material's script and load it.
					char *name = new char[strlen( meshContainer->materialNames[m] ) + 5];
					sprintf( name, "%s.txt", meshContainer->materialNames[m] );
					meshContainer->materials[m] = Engine::GetInstance()->GetMaterialManager()->Add( name, GetPath() );
					SAFE_DELETE_ARRAY( name );
				}
			}
		}
	}

	// Prepare the frame's siblings.
	if( frame->pFrameSibling != NULL )
		PrepareFrame( (Frame*)frame->pFrameSibling );

	// Prepare the frame's children.
	if( frame->pFrameFirstChild != NULL )
		PrepareFrame( (Frame*)frame->pFrameFirstChild );
}
コード例 #8
0
ファイル: Mesh.cpp プロジェクト: helcl42/Game-DX
//-----------------------------------------------------------------------------
// The mesh class constructor.
//-----------------------------------------------------------------------------
Mesh::Mesh( char *name, char *path ) : Resource< Mesh >( name, path )
{
	// Create the list of reference points.
	m_frames = new LinkedList< Frame >;
	m_refPoints = new LinkedList< Frame >;

	// Load the mesh's frame hierarchy.
	AllocateHierarchy ah;
	D3DXLoadMeshHierarchyFromX( GetFilename(), D3DXMESH_MANAGED, Engine::GetInstance()->GetDevice(), &ah, NULL, (D3DXFRAME**)&m_firstFrame, &m_animationController );

	// Disable all the animation tracks initially.
	if( m_animationController != NULL )
		for( unsigned long t = 0; t < m_animationController->GetMaxNumTracks(); ++t )
			m_animationController->SetTrackEnable( t, false );

	// Invalidate the bone transformation matrices array.
	m_boneMatrices = NULL;
	m_totalBoneMatrices = 0;

	// Prepare the frame hierarchy.
	PrepareFrame( m_firstFrame );

	// Allocate memory for the bone matrices.
	m_boneMatrices = new D3DXMATRIX[m_totalBoneMatrices];

	// Create a static (non-animated) version of the mesh.
	m_staticMesh = new MeshContainer;
	ZeroMemory( m_staticMesh, sizeof( MeshContainer ) );

	// Load the mesh.
	ID3DXBuffer *materialBuffer, *adjacencyBuffer;
	D3DXLoadMeshFromX( GetFilename(), D3DXMESH_MANAGED, Engine::GetInstance()->GetDevice(), &adjacencyBuffer, &materialBuffer, NULL, &m_staticMesh->NumMaterials, &m_staticMesh->originalMesh );

	// Optimise the mesh for better rendering performance.
	m_staticMesh->originalMesh->OptimizeInplace( D3DXMESHOPT_COMPACT | D3DXMESHOPT_ATTRSORT | D3DXMESHOPT_VERTEXCACHE, (DWORD*)adjacencyBuffer->GetBufferPointer(), NULL, NULL, NULL );

	// Finished with the adjacency buffer, so destroy it.
	SAFE_RELEASE( adjacencyBuffer );

	// Check if the mesh has any materials.
	if( m_staticMesh->NumMaterials > 0 )
	{
		// Create the array of materials.
		m_staticMesh->materials = new Material*[m_staticMesh->NumMaterials];

		// Get the list of materials from the material buffer.
		D3DXMATERIAL *materials = (D3DXMATERIAL*)materialBuffer->GetBufferPointer();

		// Load each material into the array via the material manager.
		for( unsigned long m = 0; m < m_staticMesh->NumMaterials; m++ )
		{
			// Ensure the material has a texture.
			if( materials[m].pTextureFilename )
			{
				// Get the name of the material's script and load it.
				char *name = new char[strlen( materials[m].pTextureFilename ) + 5];
				sprintf( name, "%s.txt", materials[m].pTextureFilename );
				m_staticMesh->materials[m] = Engine::GetInstance()->GetMaterialManager()->Add( name, GetPath() );
				SAFE_DELETE_ARRAY( name );
			}
			else
				m_staticMesh->materials[m] = NULL;
		}
	}

	// Create the bounding volume around the mesh.
	BoundingVolumeFromMesh( m_staticMesh->originalMesh );

	// Destroy the material buffer.
	SAFE_RELEASE( materialBuffer );

	// Create a vertex array and an array of indices into the vertex array.
	m_vertices = new Vertex[m_staticMesh->originalMesh->GetNumVertices()];
	m_indices = new unsigned short[m_staticMesh->originalMesh->GetNumFaces() * 3];

	// Use the arrays to store a local copy of the static mesh's vertices and
	// indices so that they can be used by the scene manager on the fly.
	Vertex* verticesPtr;
	m_staticMesh->originalMesh->LockVertexBuffer( 0, (void**)&verticesPtr );
	unsigned short *indicesPtr;
	m_staticMesh->originalMesh->LockIndexBuffer( 0, (void**)&indicesPtr );

	memcpy( m_vertices, verticesPtr, VERTEX_FVF_SIZE * m_staticMesh->originalMesh->GetNumVertices() );
	memcpy( m_indices, indicesPtr, sizeof( unsigned short ) * m_staticMesh->originalMesh->GetNumFaces() * 3 );

	m_staticMesh->originalMesh->UnlockVertexBuffer();
	m_staticMesh->originalMesh->UnlockIndexBuffer();
}
コード例 #9
0
ファイル: LoadLua.cpp プロジェクト: ggcrunchy/ui-edit-v1
static int PrepareFrame (lua_State * L)
{
	PrepareFrame();

	return 0;
}
コード例 #10
0
ファイル: FFmpegH264Decoder.cpp プロジェクト: 70599/Waterfox
FFmpegH264Decoder<LIBAV_VER>::DecodeResult
FFmpegH264Decoder<LIBAV_VER>::DoDecodeFrame(MediaRawData* aSample,
                                            uint8_t* aData, int aSize)
{
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());

  AVPacket packet;
  av_init_packet(&packet);

  packet.data = aData;
  packet.size = aSize;
  packet.dts = aSample->mTimecode;
  packet.pts = aSample->mTime;
  packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
  packet.pos = aSample->mOffset;

  // LibAV provides no API to retrieve the decoded sample's duration.
  // (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
  // As such we instead use a map using the dts as key that we will retrieve
  // later.
  // The map will have a typical size of 16 entry.
  mDurationMap.Insert(aSample->mTimecode, aSample->mDuration);

  if (!PrepareFrame()) {
    NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
    mCallback->Error();
    return DecodeResult::DECODE_ERROR;
  }

  // Required with old version of FFmpeg/LibAV
  mFrame->reordered_opaque = AV_NOPTS_VALUE;

  int decoded;
  int bytesConsumed =
    avcodec_decode_video2(mCodecContext, mFrame, &decoded, &packet);

  FFMPEG_LOG("DoDecodeFrame:decode_video: rv=%d decoded=%d "
             "(Input: pts(%lld) dts(%lld) Output: pts(%lld) "
             "opaque(%lld) pkt_pts(%lld) pkt_dts(%lld))",
             bytesConsumed, decoded, packet.pts, packet.dts, mFrame->pts,
             mFrame->reordered_opaque, mFrame->pkt_pts, mFrame->pkt_dts);

  if (bytesConsumed < 0) {
    NS_WARNING("FFmpeg video decoder error.");
    mCallback->Error();
    return DecodeResult::DECODE_ERROR;
  }

  // If we've decoded a frame then we need to output it
  if (decoded) {
    int64_t pts = mPtsContext.GuessCorrectPts(mFrame->pkt_pts, mFrame->pkt_dts);
    FFMPEG_LOG("Got one frame output with pts=%lld opaque=%lld",
               pts, mCodecContext->reordered_opaque);
    // Retrieve duration from dts.
    // We use the first entry found matching this dts (this is done to
    // handle damaged file with multiple frames with the same dts)

    int64_t duration;
    if (!mDurationMap.Find(mFrame->pkt_dts, duration)) {
      NS_WARNING("Unable to retrieve duration from map");
      duration = aSample->mDuration;
      // dts are probably incorrectly reported ; so clear the map as we're
      // unlikely to find them in the future anyway. This also guards
      // against the map becoming extremely big.
      mDurationMap.Clear();
    }

    VideoInfo info;
    info.mDisplay = mDisplay;

    VideoData::YCbCrBuffer b;
    b.mPlanes[0].mData = mFrame->data[0];
    b.mPlanes[0].mStride = mFrame->linesize[0];
    b.mPlanes[0].mHeight = mFrame->height;
    b.mPlanes[0].mWidth = mFrame->width;
    b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;

    b.mPlanes[1].mData = mFrame->data[1];
    b.mPlanes[1].mStride = mFrame->linesize[1];
    b.mPlanes[1].mHeight = (mFrame->height + 1) >> 1;
    b.mPlanes[1].mWidth = (mFrame->width + 1) >> 1;
    b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;

    b.mPlanes[2].mData = mFrame->data[2];
    b.mPlanes[2].mStride = mFrame->linesize[2];
    b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1;
    b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1;
    b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;

    RefPtr<VideoData> v = VideoData::Create(info,
                                              mImageContainer,
                                              aSample->mOffset,
                                              pts,
                                              duration,
                                              b,
                                              !!mFrame->key_frame,
                                              -1,
                                              mImage);
    if (!v) {
      NS_WARNING("image allocation error.");
      mCallback->Error();
      return DecodeResult::DECODE_ERROR;
    }
    mCallback->Output(v);
    return DecodeResult::DECODE_FRAME;
  }
  return DecodeResult::DECODE_NO_FRAME;
}
コード例 #11
0
FFmpegH264Decoder<LIBAV_VER>::DecodeResult
FFmpegH264Decoder<LIBAV_VER>::DoDecodeFrame(MediaRawData* aSample,
                                            uint8_t* aData, int aSize)
{
  AVPacket packet;
  av_init_packet(&packet);

  packet.data = aData;
  packet.size = aSize;
  packet.dts = aSample->mTimecode;
  packet.pts = aSample->mTime;
  packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
  packet.pos = aSample->mOffset;

  if (!PrepareFrame()) {
    NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
    mCallback->Error();
    return DecodeResult::DECODE_ERROR;
  }

  // Required with old version of FFmpeg/LibAV
  mFrame->reordered_opaque = AV_NOPTS_VALUE;

  int decoded;
  int bytesConsumed =
    avcodec_decode_video2(mCodecContext, mFrame, &decoded, &packet);

  FFMPEG_LOG("DoDecodeFrame:decode_video: rv=%d decoded=%d "
             "(Input: pts(%lld) dts(%lld) Output: pts(%lld) "
             "opaque(%lld) pkt_pts(%lld) pkt_dts(%lld))",
             bytesConsumed, decoded, packet.pts, packet.dts, mFrame->pts,
             mFrame->reordered_opaque, mFrame->pkt_pts, mFrame->pkt_dts);

  if (bytesConsumed < 0) {
    NS_WARNING("FFmpeg video decoder error.");
    mCallback->Error();
    return DecodeResult::DECODE_ERROR;
  }

  // If we've decoded a frame then we need to output it
  if (decoded) {
    int64_t pts = GetPts(packet);
    FFMPEG_LOG("Got one frame output with pts=%lld opaque=%lld",
               pts, mCodecContext->reordered_opaque);

    VideoInfo info;
    info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);

    VideoData::YCbCrBuffer b;
    b.mPlanes[0].mData = mFrame->data[0];
    b.mPlanes[0].mStride = mFrame->linesize[0];
    b.mPlanes[0].mHeight = mFrame->height;
    b.mPlanes[0].mWidth = mFrame->width;
    b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;

    b.mPlanes[1].mData = mFrame->data[1];
    b.mPlanes[1].mStride = mFrame->linesize[1];
    b.mPlanes[1].mHeight = (mFrame->height + 1) >> 1;
    b.mPlanes[1].mWidth = (mFrame->width + 1) >> 1;
    b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;

    b.mPlanes[2].mData = mFrame->data[2];
    b.mPlanes[2].mStride = mFrame->linesize[2];
    b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1;
    b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1;
    b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;

    nsRefPtr<VideoData> v = VideoData::Create(info,
                                              mImageContainer,
                                              aSample->mOffset,
                                              pts,
                                              aSample->mDuration,
                                              b,
                                              aSample->mKeyframe,
                                              -1,
                                              gfx::IntRect(0, 0, mCodecContext->width, mCodecContext->height));
    if (!v) {
      NS_WARNING("image allocation error.");
      mCallback->Error();
      return DecodeResult::DECODE_ERROR;
    }
    mCallback->Output(v);
    return DecodeResult::DECODE_FRAME;
  }
  return DecodeResult::DECODE_NO_FRAME;
}
コード例 #12
0
MediaResult
FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
                                        uint8_t* aData, int aSize,
                                        bool* aGotFrame,
                                        MediaDataDecoder::DecodedData& aResults)
{
  AVPacket packet;
  mLib->av_init_packet(&packet);

  packet.data = aData;
  packet.size = aSize;
  packet.dts = mLastInputDts = aSample->mTimecode;
  packet.pts = aSample->mTime;
  packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
  packet.pos = aSample->mOffset;

  // LibAV provides no API to retrieve the decoded sample's duration.
  // (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
  // As such we instead use a map using the dts as key that we will retrieve
  // later.
  // The map will have a typical size of 16 entry.
  mDurationMap.Insert(aSample->mTimecode, aSample->mDuration);

  if (!PrepareFrame()) {
    NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
    return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
  }

  // Required with old version of FFmpeg/LibAV
  mFrame->reordered_opaque = AV_NOPTS_VALUE;

  int decoded;
  int bytesConsumed =
    mLib->avcodec_decode_video2(mCodecContext, mFrame, &decoded, &packet);

  FFMPEG_LOG("DoDecodeFrame:decode_video: rv=%d decoded=%d "
             "(Input: pts(%" PRId64 ") dts(%" PRId64 ") Output: pts(%" PRId64 ") "
             "opaque(%" PRId64 ") pkt_pts(%" PRId64 ") pkt_dts(%" PRId64 "))",
             bytesConsumed, decoded, packet.pts, packet.dts, mFrame->pts,
             mFrame->reordered_opaque, mFrame->pkt_pts, mFrame->pkt_dts);

  if (bytesConsumed < 0) {
    return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                       RESULT_DETAIL("FFmpeg video error:%d", bytesConsumed));
  }

  if (!decoded) {
    if (aGotFrame) {
      *aGotFrame = false;
    }
    return NS_OK;
  }

  // If we've decoded a frame then we need to output it
  int64_t pts = mPtsContext.GuessCorrectPts(mFrame->pkt_pts, mFrame->pkt_dts);
  // Retrieve duration from dts.
  // We use the first entry found matching this dts (this is done to
  // handle damaged file with multiple frames with the same dts)

  int64_t duration;
  if (!mDurationMap.Find(mFrame->pkt_dts, duration)) {
    NS_WARNING("Unable to retrieve duration from map");
    duration = aSample->mDuration;
    // dts are probably incorrectly reported ; so clear the map as we're
    // unlikely to find them in the future anyway. This also guards
    // against the map becoming extremely big.
    mDurationMap.Clear();
  }
  FFMPEG_LOG(
    "Got one frame output with pts=%" PRId64 " dts=%" PRId64
    " duration=%" PRId64 " opaque=%" PRId64,
    pts, mFrame->pkt_dts, duration, mCodecContext->reordered_opaque);

  VideoData::YCbCrBuffer b;
  b.mPlanes[0].mData = mFrame->data[0];
  b.mPlanes[1].mData = mFrame->data[1];
  b.mPlanes[2].mData = mFrame->data[2];

  b.mPlanes[0].mStride = mFrame->linesize[0];
  b.mPlanes[1].mStride = mFrame->linesize[1];
  b.mPlanes[2].mStride = mFrame->linesize[2];

  b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
  b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;
  b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;

  b.mPlanes[0].mWidth = mFrame->width;
  b.mPlanes[0].mHeight = mFrame->height;
  if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P) {
    b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = mFrame->width;
    b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = mFrame->height;
  } else {
    b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1;
    b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1;
  }
  if (mLib->av_frame_get_colorspace) {
    switch (mLib->av_frame_get_colorspace(mFrame)) {
      case AVCOL_SPC_BT709:
        b.mYUVColorSpace = YUVColorSpace::BT709;
        break;
      case AVCOL_SPC_SMPTE170M:
      case AVCOL_SPC_BT470BG:
        b.mYUVColorSpace = YUVColorSpace::BT601;
        break;
      case AVCOL_SPC_UNSPECIFIED:
#if LIBAVCODEC_VERSION_MAJOR >= 55
        if (mCodecContext->codec_id == AV_CODEC_ID_VP9) {
          b.mYUVColorSpace = YUVColorSpace::BT709;
        }
#endif
        break;
      default:
        break;
    }
  }
  RefPtr<VideoData> v =
    VideoData::CreateAndCopyData(mInfo,
                                  mImageContainer,
                                  aSample->mOffset,
                                  pts,
                                  duration,
                                  b,
                                  !!mFrame->key_frame,
                                  -1,
                                  mInfo.ScaledImageRect(mFrame->width,
                                                        mFrame->height));

  if (!v) {
    return MediaResult(NS_ERROR_OUT_OF_MEMORY,
                       RESULT_DETAIL("image allocation error"));
  }
  aResults.AppendElement(Move(v));
  if (aGotFrame) {
    *aGotFrame = true;
  }
  return NS_OK;
}
コード例 #13
0
ファイル: BMDOutput.cpp プロジェクト: TritonSailor/livepro
void BMDOutputDelegate::StartRunning ()
{
	IDeckLinkDisplayMode*	videoDisplayMode = NULL;
	
	// Get the display mode for 1080i 59.95 - mode 6
	// Changed to NTSC 23.98 - JB 20110215
	videoDisplayMode = GetDisplayModeByIndex(1);

	if (!videoDisplayMode)
		return;

	m_frameWidth = videoDisplayMode->GetWidth();
	m_frameHeight = videoDisplayMode->GetHeight();
	videoDisplayMode->GetFrameRate(&m_frameDuration, &m_frameTimescale);
	
	// Calculate the number of frames per second, rounded up to the nearest integer.  For example, for NTSC (29.97 FPS), framesPerSecond == 30.
	m_framesPerSecond = (unsigned long)((m_frameTimescale + (m_frameDuration-1))  /  m_frameDuration);
	
	
	QImage image(m_frameWidth,m_frameHeight, QImage::Format_ARGB32);
	image.fill(Qt::green);
	//m_frame = VideoFramePtr(new VideoFrame(image, 1000/30));
	
	HRESULT res;
	
	// Set the video output mode
	if (m_deckLinkOutput->EnableVideoOutput(videoDisplayMode->GetDisplayMode(), bmdVideoOutputFlagDefault) != S_OK)
	{
		//fprintf(stderr, "Failed to enable video output\n");
		qDebug() << "BMDOutputDelegate::StartRunning(): Failed to EnableVideoOutput()";
		goto bail;
	}
	
	res = m_deckLinkOutput->CreateVideoFrame(
		m_frameWidth,
		m_frameHeight,
		m_frameWidth * 4,
		bmdFormat8BitBGRA, 
		bmdFrameFlagDefault,
		&m_rgbFrame);
	
	if(res != S_OK)
	{
		qDebug() << "BMDOutputDelegate::StartRunning: Error creating RGB frame, res:"<<res;
		goto bail;
	}
	
	res = m_deckLinkOutput->CreateVideoFrame(
		m_frameWidth,
		m_frameHeight,
		m_frameWidth * 2,
		bmdFormat8BitYUV, 
		bmdFrameFlagDefault,
		&m_yuvFrame);
	
	if(res != S_OK)
	{
		qDebug() << "BMDOutputDelegate::StartRunning: Error creating YUV frame, res:"<<res;
		goto bail;
	}


// 	// Generate a frame of black
// 	if (m_deckLinkOutput->CreateVideoFrame(m_frameWidth, m_frameHeight, m_frameWidth*2, bmdFormat8BitYUV, bmdFrameFlagDefault, &m_videoFrameBlack) != S_OK)
// 	{
// 		fprintf(stderr, "Failed to create video frame\n");	
// 		goto bail;
// 	}
// 	FillBlack(m_videoFrameBlack);
// 	
// 	// Generate a frame of colour bars
// 	if (m_deckLinkOutput->CreateVideoFrame(m_frameWidth, m_frameHeight, m_frameWidth*2, bmdFormat8BitYUV, bmdFrameFlagDefault, &m_videoFrameBars) != S_OK)
// 	{
// 		fprintf(stderr, "Failed to create video frame\n");
// 		goto bail;
// 	}
// 	FillColourBars(m_videoFrameBars);
	
	
	// Begin video preroll by scheduling a second of frames in hardware
	m_totalFramesScheduled = 0;
  	for (unsigned i = 0; i < m_framesPerSecond; i++)
  	{
  		PrepareFrame();
 		ScheduleNextFrame(true);
 	}
	
	// Args: startTime, timeScale, playback speed (1.0 = normal)
	m_deckLinkOutput->StartScheduledPlayback(0, 100, 1.0);
	
	m_running = true;
	
	return;
	
bail:
	// *** Error-handling code.  Cleanup any resources that were allocated. *** //
	StopRunning();
}
コード例 #14
0
ファイル: M2VParser.cpp プロジェクト: Trottel/mkvtoolnix
//Maintains the time of the last start of GOP and uses the temporal_reference
//field as an offset.
int32_t M2VParser::FillQueues(){
  if(chunks.empty()){
    return -1;
  }
  bool done = false;
  while(!done){
    MediaTime myTime;
    MPEGChunk* chunk = chunks.front();
    while (chunk->GetType() != MPEG_VIDEO_PICTURE_START_CODE) {
      if (chunk->GetType() == MPEG_VIDEO_GOP_START_CODE) {
        ParseGOPHeader(chunk, m_gopHdr);
        if (frameNum != 0) {
          gopPts = highestPts + 1;
        }
        if (gopChunk)
          delete gopChunk;
        gopChunk = chunk;
        gopNum++;
        /* Perform some sanity checks */
        if(waitSecondField){
          mxerror(Y("Single field frame before GOP header detected. Fix the MPEG2 video stream before attempting to multiplex it.\n"));
        }
        if(!waitQueue.empty()){
          mxwarn(Y("Shortened GOP detected. Some frames have been dropped. You may want to fix the MPEG2 video stream before attempting to multiplex it.\n"));
          FlushWaitQueue();
        }
        if(m_gopHdr.brokenLink){
          mxinfo(Y("Found group of picture with broken link. You may want use smart reencode before attempting to multiplex it.\n"));
        }
        // There are too many broken videos to do the following so ReferenceBlock will be wrong for broken videos.
        /*
        if(m_gopHdr.closedGOP){
          ClearRef();
        }
        */
      } else if (chunk->GetType() == MPEG_VIDEO_SEQUENCE_START_CODE) {
        if (seqHdrChunk)
          delete seqHdrChunk;
        ParseSequenceHeader(chunk, m_seqHdr);
        seqHdrChunk = chunk;

      }

      chunks.erase(chunks.begin());
      if (chunks.empty())
        return -1;
      chunk = chunks.front();
    }
    MPEG2PictureHeader picHdr;
    ParsePictureHeader(chunk, picHdr);

    if (picHdr.pictureStructure == 0x03) {
      usePictureFrames = true;
    }
    myTime = gopPts + picHdr.temporalReference;
    invisible = false;

    if (myTime > highestPts)
      highestPts = myTime;

    switch(picHdr.frameType){
      case MPEG2_I_FRAME:
        PrepareFrame(chunk, myTime, picHdr);
        notReachedFirstGOP = false;
        break;
      case MPEG2_P_FRAME:
        if(firstRef == -1) break;
        PrepareFrame(chunk, myTime, picHdr);
        break;
      default: //B-frames
        if(firstRef == -1 || secondRef == -1){
          if(!m_gopHdr.closedGOP && !m_gopHdr.brokenLink){
            if(gopNum > 0){
              mxerror(Y("Found B frame without second reference in a non closed GOP. Fix the MPEG2 video stream before attempting to multiplex it.\n"));
            } else if (!probing && !bFrameMissingReferenceWarning){
              mxwarn(Y("Found one or more B frames without second reference in the first GOP. You may want to fix the MPEG2 video stream or use smart reencode before attempting to multiplex it.\n"));
              bFrameMissingReferenceWarning = true;
            }
          }
          invisible = true;
        }
        PrepareFrame(chunk, myTime, picHdr);
    }
    frameNum++;
    chunks.erase(chunks.begin());
    delete chunk;
    if (chunks.empty())
      return -1;
  }
  return 0;
}
コード例 #15
0
MediaResult
FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
                                        uint8_t* aData,
                                        int aSize,
                                        bool* aGotFrame,
                                        DecodedData& aResults)
{
  AVPacket packet;
  mLib->av_init_packet(&packet);

  packet.data = const_cast<uint8_t*>(aData);
  packet.size = aSize;

  if (aGotFrame) {
    *aGotFrame = false;
  }

  if (!PrepareFrame()) {
    return MediaResult(
      NS_ERROR_OUT_OF_MEMORY,
      RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame"));
  }

  int64_t samplePosition = aSample->mOffset;
  media::TimeUnit pts = aSample->mTime;

  while (packet.size > 0) {
    int decoded;
    int bytesConsumed =
      mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);

    if (bytesConsumed < 0) {
      NS_WARNING("FFmpeg audio decoder error.");
      return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                         RESULT_DETAIL("FFmpeg audio error:%d", bytesConsumed));
    }

    if (decoded) {
      if (mFrame->format != AV_SAMPLE_FMT_FLT &&
          mFrame->format != AV_SAMPLE_FMT_FLTP &&
          mFrame->format != AV_SAMPLE_FMT_S16 &&
          mFrame->format != AV_SAMPLE_FMT_S16P &&
          mFrame->format != AV_SAMPLE_FMT_S32 &&
          mFrame->format != AV_SAMPLE_FMT_S32P) {
        return MediaResult(
          NS_ERROR_DOM_MEDIA_DECODE_ERR,
          RESULT_DETAIL(
            "FFmpeg audio decoder outputs unsupported audio format"));
      }
      uint32_t numChannels = mCodecContext->channels;
      uint32_t samplingRate = mCodecContext->sample_rate;

      AlignedAudioBuffer audio =
        CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples);
      if (!audio) {
        return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
      }

      media::TimeUnit duration =
        FramesToTimeUnit(mFrame->nb_samples, samplingRate);
      if (!duration.IsValid()) {
        return MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
                           RESULT_DETAIL("Invalid sample duration"));
      }

      media::TimeUnit newpts = pts + duration;
      if (!newpts.IsValid()) {
        return MediaResult(
          NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
          RESULT_DETAIL("Invalid count of accumulated audio samples"));
      }

      aResults.AppendElement(new AudioData(samplePosition,
                                           pts,
                                           duration,
                                           mFrame->nb_samples,
                                           Move(audio),
                                           numChannels,
                                           samplingRate,
                                           mCodecContext->channel_layout));

      pts = newpts;

      if (aGotFrame) {
        *aGotFrame = true;
      }
    }
    packet.data += bytesConsumed;
    packet.size -= bytesConsumed;
    samplePosition += bytesConsumed;
  }
  return NS_OK;
}