Exemplo n.º 1
0
/*! \brief read a frame from webcam or X11 through grabber_read(),
 * display it,  then encode and split it.
 * Return a list of ast_frame representing the video fragments.
 * The head pointer is returned by the function, the tail pointer
 * is returned as an argument.
 */
static struct ast_frame *get_video_frames(struct video_desc *env, struct ast_frame **tail)
{
	struct video_out_desc *v = &env->out;
	struct ast_frame *dummy;
	struct fbuf_t *loc_src = grabber_read(v);

	if (!loc_src)
		return NULL;	/* can happen, e.g. we are reading too early */

	if (tail == NULL)
		tail = &dummy;
	*tail = NULL;
	/* Scale the video for the encoder, then use it for local rendering
	 * so we will see the same as the remote party.
	 */
	my_scale(loc_src, NULL, &env->enc_in, NULL);
	show_frame(env, WIN_LOCAL);
	if (!v->sendvideo)
		return NULL;
	if (v->enc_out.data == NULL) {
		static volatile int a = 0;
		if (a++ < 2)
			ast_log(LOG_WARNING, "fail, no encoder output buffer\n");
		return NULL;
	}
	v->enc->enc_run(v);
	return v->enc->enc_encap(&v->enc_out, v->mtu, tail);
}
Exemplo n.º 2
0
static int decode_example(const char *filename)
{
    AVFormatContext *fctx = NULL;
    AVCodec *codec;
    AVCodecContext *avctx;
    int video_st = -1;
    int i, got_pic;
    AVFrame *picture, *tmp_picture;
    int size;
    uint8_t *tmp_buf;
    int ret = 0;

    avformat_open_input(&fctx, filename, NULL, NULL);
    if (fctx == NULL)
        return AVERROR(1);

    av_find_stream_info(fctx);

    av_dump_format(fctx, 0, filename, 0);

    for (i = 0; i < fctx->nb_streams; i++) {
        if (fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_st = i;
            break;
        }
    }

    avctx = fctx->streams[video_st]->codec;

    codec = avcodec_find_decoder_by_name("libdm365_h264");
    if (codec == NULL) {
        av_log(avctx, AV_LOG_ERROR, "unsupported codec\n");
        return AVERROR(1);
    }

    if (avcodec_open(avctx, codec) < 0) {
        av_log(avctx, AV_LOG_ERROR, "cannot open codec\n");
        return AVERROR(1);
    }

    picture = avcodec_alloc_frame();
    tmp_picture = avcodec_alloc_frame();

    size = avpicture_get_size(PIX_FMT_YUV420P, avctx->width, avctx->height);
    tmp_buf = av_malloc(size);
    if (tmp_buf == NULL) {
        ret = AVERROR(ENOMEM);
        goto decode_cleanup;
    }
    avpicture_fill((AVPicture *)tmp_picture, tmp_buf,
            PIX_FMT_NV12, avctx->width, avctx->height);

    for (i = 0; i < 10; i++) {
        AVPacket pkt;
        int nb;
        char fname[32];
        int factor = 2;

        if (av_read_frame(fctx, &pkt) < 0)
            break;

        nb = avcodec_decode_video2(avctx, picture, &got_pic, &pkt);
        if (nb < 0) {
            av_log(avctx, AV_LOG_ERROR, "error in decoding\n");
            goto decode_cleanup;
        }
        printf("Decoded frame: %d\n", i);

        my_scale((AVPicture *) picture, avctx->width, avctx->height,
                (AVPicture *) tmp_picture, factor);

        sprintf(fname, "frame%02d.pgm", i+1);
        pgm_save(picture->data[0], picture->linesize[0],
                avctx->width, avctx->height, fname);

        sprintf(fname, "frame%02d.bmp", i+1);
        save_image((AVPicture *)tmp_picture, avctx->pix_fmt,
                avctx->width/factor, avctx->height/factor, fname);
    }

decode_cleanup:
    av_free(picture);
    av_free(tmp_picture->data[0]);
    av_free(tmp_picture);
    av_close_input_file(fctx);
    avcodec_close(avctx);
    return ret;
}
Exemplo n.º 3
0
/*! \brief refreshes the buffers of all the device by calling the
 * grabber_read on each device in the device table.
 * it encodes the primary source buffer, if the picture in picture mode is
 * enabled it encodes (in the buffer to split) the secondary source buffer too.
 * The encoded buffer is splitted to build the local and the remote view.
 * Return a list of ast_frame representing the video fragments.
 * The head pointer is returned by the function, the tail pointer
 * is returned as an argument.
 *
 * \param env = video environment descriptor
 * \param tail = tail ponter (pratically a return value)
 */
static struct ast_frame *get_video_frames(struct video_desc *env, struct ast_frame **tail)
{
	struct video_out_desc *v = &env->out;
	struct ast_frame *dummy;
	struct fbuf_t *loc_src_primary = NULL, *p_read;
	int i;
	/* if no device was found in the config file */
	if (!env->out.device_num)
		return NULL;
	/* every time this function is called we refresh the buffers of every device,
	updating the private device buffer in the device table */
	for (i = 0; i < env->out.device_num; i++) {
		p_read = grabber_read(&env->out.devices[i], env->out.fps);
		/* it is used only if different from NULL, we mantain last good buffer otherwise */
		if (p_read)
			env->out.devices[i].dev_buf = p_read;
	}
	/* select the primary device buffer as the one to encode */
	loc_src_primary = env->out.devices[env->out.device_primary].dev_buf;
	/* loc_src_primary can be NULL if the device has been turned off during
	execution of it is read too early */
	if (loc_src_primary) {
		/* Scale the video for the encoder, then use it for local rendering
		so we will see the same as the remote party */
		my_scale(loc_src_primary, NULL, &env->enc_in, NULL);
	}
	if (env->out.picture_in_picture) { /* the picture in picture mode is enabled */
		struct fbuf_t *loc_src_secondary;
		/* reads from the secondary source */
		loc_src_secondary = env->out.devices[env->out.device_secondary].dev_buf;
		if (loc_src_secondary) {
			env->enc_in.win_x = env->out.pip_x;
			env->enc_in.win_y = env->out.pip_y;
			env->enc_in.win_w = env->enc_in.w/3;
			env->enc_in.win_h = env->enc_in.h/3;
			/* scales to the correct geometry and inserts in
			the enc_in buffer the picture in picture */
			my_scale(loc_src_secondary, NULL, &env->enc_in, NULL);
			/* returns to normal parameters (not picture in picture) */
			env->enc_in.win_x = 0;
			env->enc_in.win_y = 0;
			env->enc_in.win_w = 0;
			env->enc_in.win_h = 0;
		}
		else {
			/* loc_src_secondary can be NULL if the device has been turned off during
			execution of it is read too early */
			env->out.picture_in_picture = 0; /* disable picture in picture */
		}
	}
	show_frame(env, WIN_LOCAL); /* local rendering */
	for (i = 0; i < env->out.device_num; i++) 
		show_frame(env, i+WIN_SRC1); /* rendering of every source device in thumbnails */
	if (tail == NULL)
		tail = &dummy;
	*tail = NULL;
	/* if no reason for encoding, do not encode */
	if (!env->owner || !loc_src_primary || !v->sendvideo)
		return NULL;
	if (v->enc_out.data == NULL) {
		static volatile int a = 0;
		if (a++ < 2)
			ast_log(LOG_WARNING, "fail, no encoder output buffer\n");
		return NULL;
	}
	v->enc->enc_run(v);
	return v->enc->enc_encap(&v->enc_out, v->mtu, tail);
}
Exemplo n.º 4
0
static int mad_play_frame(input_object *obj, char *buf)
{
	struct mad_local_data *data;
	struct mad_pcm *pcm;
	mad_fixed_t const *left_ch;
	mad_fixed_t const *right_ch;
	int16_t	*output;
	int nsamples;
	int nchannels;

	if (!obj)
		return 0;
	data = (struct mad_local_data *)obj->local_data;
	if (!data)
		return 0;
	if (data->bytes_avail < 3072) {
		/*
		   alsaplayer_error("Filling buffer = %d,%d",
		   data->bytes_avail,
		   data->map_offset + MAD_BUFSIZE - data->bytes_avail);
		   */
		fill_buffer(data, -1); /* data->map_offset + MAD_BUFSIZE - data->bytes_avail); */
		mad_stream_buffer(&data->stream, data->mad_map, data->bytes_avail);
	} else {
		/* alsaplayer_error("bytes_avail = %d", data->bytes_avail); */
	}
	if (mad_frame_decode(&data->frame, &data->stream) == -1) {
		if (!MAD_RECOVERABLE(data->stream.error)) {
			/*
			   alsaplayer_error("MAD error: %s (%d). fatal", 
			   error_str(data->stream.error, data->str),
			   data->bytes_avail);
			   */	
			mad_frame_mute(&data->frame);
			return 0;
		} else {
			if (reader_eof(data->mad_fd)) {
				return 0;
			}	
			//alsaplayer_error("MAD error: %s (not fatal)", error_str(data->stream.error, data->str)); 
			memset(buf, 0, obj->frame_size);
			return 1;
		}
	}
	data->current_frame++;
	if (data->seekable && data->current_frame < (obj->nr_frames + FRAME_RESERVE)) {
		data->frames[data->current_frame] = 
			data->map_offset + data->stream.this_frame - data->mad_map;
		if (data->current_frame > 3 && 
				(data->frames[data->current_frame] -
				 data->frames[data->current_frame-3]) < 6) {
			return 0;
		}		
		if (data->highest_frame < data->current_frame)
			data->highest_frame = data->current_frame;
	}				

	mad_synth_frame (&data->synth, &data->frame);

	{
		pcm = &data->synth.pcm;
		output = (int16_t *)buf;
		nsamples = pcm->length;
		nchannels = pcm->channels;
		if (nchannels != obj->nr_channels) {
			alsaplayer_error("ERROR: bad data stream! (channels: %d != %d, frame %d)",
					nchannels, 
					obj->nr_channels,
					data->current_frame);
			mad_frame_mute(&data->frame);
			memset(buf, 0, obj->frame_size);
			return 1;
		}	
		obj->nr_channels = nchannels;
		if (data->samplerate != data->frame.header.samplerate) {
			alsaplayer_error("ERROR: bad data stream! (samplerate: %d != %d, frame %d)",
					data->samplerate, 
					data->frame.header.samplerate,
					data->current_frame);
			mad_frame_mute(&data->frame);
			memset(buf, 0, obj->frame_size);
			return 1;
		}	
		data->samplerate = data->frame.header.samplerate;
		left_ch = pcm->samples[0];
		right_ch = pcm->samples[1];
		while (nsamples--) {
			*output++ = my_scale(*(left_ch++));
			if (nchannels == 1) {
				*output++ = my_scale(*(left_ch-1));
			} else { /* nchannels == 2 */
				*output++ = my_scale(*(right_ch++));
			}	

		}
	}
	data->bytes_avail = data->stream.bufend - data->stream.next_frame;
	return 1;
}