示例#1
0
//Frees the currently loaded package
void CALL_CONVT ac_free_package(lp_ac_package pPackage) {
  //Free the packet
  if (pPackage != NULL) {        
    AVPacket* pkt = &((lp_ac_package_data)pPackage)->ffpackage;
    if (pkt) {
      if (pkt->destruct) pkt->destruct(pkt);
      pkt->data = NULL; pkt->size = 0;
    }     
    av_free((lp_ac_package_data)pPackage);
  }
}
void ofxMultiVideoStreamer::endGrab() {
  grabber.endGrab();
  grabber.downloadTextures();

  AVPacket* vid = memory_pool.getFreeVideoPacket();
  if(!vid) {
    printf("error: cannot get a free memory packet. this often means that you're encoding too many streams or that you cpu isn't fast enough to keep up with the encoding. try reducing the bitrate, fps, video size.\n");
  }
  else {
    grabber.assignPixels(vid->data);

    vid->clearMulti();
    vid->makeMulti();

    // set the correct strides and plane pointers for each of the streams
    for(size_t i = 0; i < mvs.size(); ++i) {
      MultiStreamerInfo* msi = mvs[i];
      MultiAVPacketInfo info;
      grabber.assignPlanes(msi->id, vid->data, info.planes);
      grabber.assignStrides(msi->id, info.strides);
      vid->addMulti(msi->id, info);
    }

    vid->makeVideoPacket();
    vid->setTimeStamp(grabber.getTimeStamp());

    mvs.addVideo(vid);
  }
}
void ofxMultiVideoStreamer::addAudio(float* input, int nsize, int nchannels) {
  size_t nbytes =  nsize * sizeof(float) * nchannels;

  // @todo - we're delaying allocation here .. we should do this in setup() but then VideoStreamer must be updated.
  if(!has_allocated_audio_pool) {
    if(!memory_pool.allocateAudioFrames(128, nbytes)) {
      printf("error: cannot allocate audio!\n");
      ::exit(EXIT_FAILURE);
    }

    has_allocated_audio_pool = true;
  }

  AVPacket* pkt = memory_pool.getFreeAudioPacket();
  if(!pkt) {
    printf("error: cannot find a free audio packet, make sure that we've allocated enough.\n");
    return ;
  }

  uint8_t* ptr = (uint8_t*)input;
  pkt->data.assign(ptr, ptr + nbytes);
  pkt->setTimeStamp(grabber.getTimeStamp());
  mvs.addAudio(pkt);
}
void Decoder::decode(const Packet::ConstPtr &packet,
		sensor_msgs::ImagePtr& image, int &got_image)
{
	/* Declarations */
	int size;
	AVPacket pkt;
	AVFrame* frame_in;
	AVFrame* frame_out;

	const int out_width = width_out_ == -1 ? packet->width : width_out_;
	const int out_height = height_out_ == -1 ? packet->height : height_out_;
	const int out_pix_fmt = pix_fmt_out_ == -1 ? packet->pix_fmt : pix_fmt_out_;

	/* Check if the codec context has to be reinitialized */
	if (!codec_context_ || packet->codec_ID != codec_context_->codec_id
			|| packet->compressed_pix_fmt != codec_context_->pix_fmt
			|| packet->compressed_width != codec_context_->width
			|| packet->compressed_height != codec_context_->height)
	{
		free_context();
		init_decoder(packet->compressed_width, packet->compressed_height,
				packet->compressed_pix_fmt, packet->codec_ID);
	}

	/* Get local references to the AVFrame structs */
	frame_in = frame_in_->get_frame();

	if (out_width == packet->compressed_width
			&& out_height == packet->compressed_height
			&& out_pix_fmt == packet->compressed_pix_fmt)
		frame_out = frame_in_->get_frame();
	else
	{
		/* Check if the output frame has to be reinitialized */
		frame_out = frame_out_ ? frame_out_->get_frame() : NULL;

		if (!frame_out_ || frame_out->width != out_width
				|| frame_out->height != out_height
				|| frame_out->format != out_pix_fmt)
		{
			frame_out_ = boost::make_shared<Frame>(out_width, out_height,
					out_pix_fmt);
			frame_out = frame_out_->get_frame();
		}
	}

	/* Check if the received packet is valid */
	if (previous_packet_ + 1 != packet->seq)
		has_keyframe_ = false;

	previous_packet_ = packet->seq;

	/* Check if there is a valid keyframe stored */
	if (!has_keyframe_)
	{
		if (packet->keyframe)
			has_keyframe_ = true;
		else
		{
			got_image = 0;
			return;
		}
	}

	/* Fill the AVPacket */
	if (av_new_packet(&pkt, packet->data.size())
			|| static_cast<unsigned int>(pkt.size) != packet->data.size())
		throw std::runtime_error("Could not allocate AV packet data.");

	memcpy(pkt.data, &packet->data[0], pkt.size);

	pkt.pts = packet->pts;
	pkt.flags = packet->keyframe ? AV_PKT_FLAG_KEY : 0;

	/* Decode packet */
	if (avcodec_decode_video2(codec_context_, frame_in, &got_image, &pkt) < 0)
		std::cout << "[decode] Could no decode packet." << std::endl;

	/* Free the packet data */
	if (pkt.destruct)
		pkt.destruct(&pkt);
	else
		av_free_packet(&pkt);

	if (!got_image)
		return;

	if (frame_in != frame_out)
	{
		/* Get SWS Context */
		sws_context_ = sws_getCachedContext(sws_context_, frame_in->width,
				frame_in->height, (enum AVPixelFormat) frame_in->format,
				frame_out->width, frame_out->height,
				(enum AVPixelFormat) frame_out->format, SWS_BICUBIC, NULL, NULL,
				NULL);
		if (!sws_context_)
			throw std::runtime_error("Could not initialize sws context.");

		/* Transform image */
		sws_scale(sws_context_, frame_in->data, frame_in->linesize, 0,
				frame_in->height, frame_out->data, frame_out->linesize);
	}

	/* Retrieve the PTS for the AVFrame */
	image->header.stamp = ros::Time(
			static_cast<uint32_t>(frame_in->pkt_pts >> 32),
			static_cast<uint32_t>(frame_in->pkt_pts));

	/* Store image */
	image->header.seq = packet->seq;
	image->width = frame_out->width;
	image->height = frame_out->height;
	image->step = frame_out->linesize[0];

	if (!pix_fmt_libav2ros(frame_out->format, image->encoding,
			image->is_bigendian))
		throw std::runtime_error(
				"Can not handle requested output pixel format.");

	size = frame_out->linesize[0] * frame_out->height;
	image->data.resize(size);
	image->data.assign(frame_out->data[0], frame_out->data[0] + size);
}
示例#5
0
文件: lavf.c 项目: tufei/x264c64
static int read_frame_internal( x264_picture_t *p_pic, lavf_hnd_t *h, int i_frame, video_info_t *info )
{
    if( h->first_pic && !info )
    {
        /* see if the frame we are requesting is the frame we have already read and stored.
         * if so, retrieve the pts and image data before freeing it. */
        if( !i_frame )
        {
            XCHG( x264_image_t, p_pic->img, h->first_pic->img );
            p_pic->i_pts = h->first_pic->i_pts;
        }
        lavf_input.picture_clean( h->first_pic );
        free( h->first_pic );
        h->first_pic = NULL;
        if( !i_frame )
            return 0;
    }

    AVCodecContext *c = h->lavf->streams[h->stream_id]->codec;
    lavf_pic_t *pic_h = p_pic->opaque;
    AVPacket *pkt = &pic_h->packet;
    AVFrame *frame = &pic_h->frame;

    while( i_frame >= h->next_frame )
    {
        int finished = 0;
        while( !finished && av_read_frame( h->lavf, pkt ) >= 0 )
            if( pkt->stream_index == h->stream_id )
            {
                c->reordered_opaque = pkt->pts;
                if( avcodec_decode_video2( c, frame, &finished, pkt ) < 0 )
                    fprintf( stderr, "lavf [warning]: video decoding failed on frame %d\n", h->next_frame );
            }
        if( !finished )
        {
            if( avcodec_decode_video2( c, frame, &finished, pkt ) < 0 )
                fprintf( stderr, "lavf [warning]: video decoding failed on frame %d\n", h->next_frame );
            if( !finished )
                return -1;
        }
        h->next_frame++;
    }

    if( check_swscale( h, c, i_frame ) )
        return -1;
    /* FIXME: avoid sws_scale where possible (no colorspace conversion). */
    sws_scale( h->scaler, frame->data, frame->linesize, 0, c->height, p_pic->img.plane, p_pic->img.i_stride );

    if( info )
        info->interlaced = frame->interlaced_frame;

    if( h->vfr_input )
    {
        p_pic->i_pts = 0;
        if( frame->reordered_opaque != AV_NOPTS_VALUE )
            p_pic->i_pts = frame->reordered_opaque;
        else if( pkt->dts != AV_NOPTS_VALUE )
            p_pic->i_pts = pkt->dts; // for AVI files
        else if( info )
        {
            h->vfr_input = info->vfr = 0;
            goto exit;
        }
        if( !h->pts_offset_flag )
        {
            h->pts_offset = p_pic->i_pts;
            h->pts_offset_flag = 1;
        }
        p_pic->i_pts -= h->pts_offset;
    }

exit:
    if( pkt->destruct )
        pkt->destruct( pkt );
    avcodec_get_frame_defaults( frame );
    return 0;
}