コード例 #1
0
static bool set_obs_frame_colorprops(struct ff_frame *frame,
		struct ffmpeg_source *s, struct obs_source_frame *obs_frame)
{
	enum AVColorSpace frame_cs = av_frame_get_colorspace(frame->frame);
	enum video_colorspace obs_cs;

	switch(frame_cs) {
	case AVCOL_SPC_BT709:       obs_cs = VIDEO_CS_709; break;
	case AVCOL_SPC_SMPTE170M:
	case AVCOL_SPC_BT470BG:     obs_cs = VIDEO_CS_601; break;
	case AVCOL_SPC_UNSPECIFIED: obs_cs = VIDEO_CS_DEFAULT; break;
	default:
		FF_BLOG(LOG_WARNING, "frame using an unsupported colorspace %d",
				frame_cs);
		obs_cs = VIDEO_CS_DEFAULT;
	}

	enum video_range_type range;
	obs_frame->format = ffmpeg_to_obs_video_format(frame->frame->format);
	obs_frame->full_range =
		frame->frame->color_range == AVCOL_RANGE_JPEG;

	if (s->range != VIDEO_RANGE_DEFAULT)
		obs_frame->full_range = s->range == VIDEO_RANGE_FULL;

	range = obs_frame->full_range ? VIDEO_RANGE_FULL : VIDEO_RANGE_PARTIAL;

	if (!video_format_get_parameters(obs_cs,
			range, obs_frame->color_matrix,
			obs_frame->color_range_min,
			obs_frame->color_range_max)) {
		FF_BLOG(LOG_ERROR, "Failed to get video format "
                                "parameters for video format %u",
                                obs_cs);
		return false;
	}

	return true;
}
コード例 #2
0
ファイル: v4l2-input.c プロジェクト: Bl00drav3n/obs-studio
/**
 * Prepare the output frame structure for obs and compute plane offsets
 *
 * Basically all data apart from memory pointers and the timestamp is known
 * before the capture starts. This function prepares the obs_source_frame
 * struct with all the data that is already known.
 *
 * v4l2 uses a continuous memory segment for all planes so we simply compute
 * offsets to add to the start address in order to give obs the correct data
 * pointers for the individual planes.
 */
static void v4l2_prep_obs_frame(struct v4l2_data *data,
	struct obs_source_frame *frame, size_t *plane_offsets)
{
	memset(frame, 0, sizeof(struct obs_source_frame));
	memset(plane_offsets, 0, sizeof(size_t) * MAX_AV_PLANES);

	frame->width = data->width;
	frame->height = data->height;
	frame->format = v4l2_to_obs_video_format(data->pixfmt);
	video_format_get_parameters(VIDEO_CS_DEFAULT, VIDEO_RANGE_PARTIAL,
		frame->color_matrix, frame->color_range_min,
		frame->color_range_max);

	switch(data->pixfmt) {
	case V4L2_PIX_FMT_NV12:
		frame->linesize[0] = data->linesize;
		frame->linesize[1] = data->linesize / 2;
		plane_offsets[1] = data->linesize * data->height;
		break;
	case V4L2_PIX_FMT_YVU420:
		frame->linesize[0] = data->linesize;
		frame->linesize[1] = data->linesize / 2;
		frame->linesize[2] = data->linesize / 2;
		plane_offsets[1] = data->linesize * data->height * 5 / 4;
		plane_offsets[2] = data->linesize * data->height;
		break;
	case V4L2_PIX_FMT_YUV420:
		frame->linesize[0] = data->linesize;
		frame->linesize[1] = data->linesize / 2;
		frame->linesize[2] = data->linesize / 2;
		plane_offsets[1] = data->linesize * data->height;
		plane_offsets[2] = data->linesize * data->height * 5 / 4;
		break;
	default:
		frame->linesize[0] = data->linesize;
		break;
	}
}
コード例 #3
0
void DeckLinkDeviceInstance::HandleVideoFrame(
		IDeckLinkVideoInputFrame *videoFrame, const uint64_t timestamp)
{
	if (videoFrame == nullptr)
		return;

	void *bytes;
	if (videoFrame->GetBytes(&bytes) != S_OK) {
		LOG(LOG_WARNING, "Failed to get video frame data");
		return;
	}

	currentFrame.data[0]     = (uint8_t *)bytes;
	currentFrame.linesize[0] = (uint32_t)videoFrame->GetRowBytes();
	currentFrame.width       = (uint32_t)videoFrame->GetWidth();
	currentFrame.height      = (uint32_t)videoFrame->GetHeight();
	currentFrame.timestamp   = timestamp;

	video_format_get_parameters(VIDEO_CS_601, VIDEO_RANGE_PARTIAL,
			currentFrame.color_matrix, currentFrame.color_range_min,
			currentFrame.color_range_max);

	obs_source_output_video(decklink->GetSource(), &currentFrame);
}
コード例 #4
0
ファイル: ffmpeg-decode.c プロジェクト: AmesianX/obs-studio
bool ffmpeg_decode_video(struct ffmpeg_decode *decode,
		uint8_t *data, size_t size, long long *ts,
		struct obs_source_frame *frame,
		bool *got_output)
{
	AVPacket packet = {0};
	int got_frame = false;
	enum video_format new_format;
	int ret;

	*got_output = false;

	copy_data(decode, data, size);

	av_init_packet(&packet);
	packet.data     = decode->packet_buffer;
	packet.size     = (int)size;
	packet.pts      = *ts;

	if (decode->codec->id == AV_CODEC_ID_H264 &&
			obs_avc_keyframe(data, size))
		packet.flags |= AV_PKT_FLAG_KEY;

	if (!decode->frame) {
		decode->frame = av_frame_alloc();
		if (!decode->frame)
			return false;
	}

	ret = avcodec_send_packet(decode->decoder, &packet);
	if (ret == 0)
		ret = avcodec_receive_frame(decode->decoder, decode->frame);

	got_frame = (ret == 0);

	if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
		ret = 0;

	if (ret < 0)
		return false;
	else if (!got_frame)
		return true;

	for (size_t i = 0; i < MAX_AV_PLANES; i++) {
		frame->data[i]     = decode->frame->data[i];
		frame->linesize[i] = decode->frame->linesize[i];
	}

	new_format = convert_pixel_format(decode->frame->format);
	if (new_format != frame->format) {
		bool success;
		enum video_range_type range;

		frame->format = new_format;
		frame->full_range =
			decode->frame->color_range == AVCOL_RANGE_JPEG;

		range = frame->full_range ?
			VIDEO_RANGE_FULL : VIDEO_RANGE_PARTIAL;

		success = video_format_get_parameters(VIDEO_CS_601,
				range, frame->color_matrix,
				frame->color_range_min, frame->color_range_max);
		if (!success) {
			blog(LOG_ERROR, "Failed to get video format "
			                "parameters for video format %u",
			                VIDEO_CS_601);
			return false;
		}
	}

	*ts = decode->frame->pkt_pts;

	frame->width  = decode->frame->width;
	frame->height = decode->frame->height;
	frame->flip   = false;

	if (frame->format == VIDEO_FORMAT_NONE)
		return false;

	*got_output = true;
	return true;
}
コード例 #5
0
ファイル: media.c プロジェクト: AmesianX/obs-studio
static void mp_media_next_video(mp_media_t *m, bool preload)
{
	struct mp_decode *d = &m->v;
	struct obs_source_frame *frame = &m->obsframe;
	enum video_format new_format;
	enum video_colorspace new_space;
	enum video_range_type new_range;
	AVFrame *f = d->frame;

	if (!preload) {
		if (!mp_media_can_play_frame(m, d))
			return;

		d->frame_ready = false;

		if (!m->v_cb)
			return;
	} else if (!d->frame_ready) {
		return;
	}

	bool flip = false;
	if (m->swscale) {
		int ret = sws_scale(m->swscale,
				(const uint8_t *const *)f->data, f->linesize,
				0, f->height,
				m->scale_pic, m->scale_linesizes);
		if (ret < 0)
			return;

		flip = m->scale_linesizes[0] < 0 && m->scale_linesizes[1] == 0;
		for (size_t i = 0; i < 4; i++) {
			frame->data[i] = m->scale_pic[i];
			frame->linesize[i] = abs(m->scale_linesizes[i]);
		}

	} else {
		flip = f->linesize[0] < 0 && f->linesize[1] == 0;

		for (size_t i = 0; i < MAX_AV_PLANES; i++) {
			frame->data[i] = f->data[i];
			frame->linesize[i] = abs(f->linesize[i]);
		}
	}

	if (flip)
		frame->data[0] -= frame->linesize[0] * (f->height - 1);

	new_format = convert_pixel_format(m->scale_format);
	new_space  = convert_color_space(f->colorspace);
	new_range  = m->force_range == VIDEO_RANGE_DEFAULT
		? convert_color_range(f->color_range)
		: m->force_range;

	if (new_format != frame->format ||
	    new_space  != m->cur_space  ||
	    new_range  != m->cur_range) {
		bool success;

		frame->format = new_format;
		frame->full_range = new_range == VIDEO_RANGE_FULL;

		success = video_format_get_parameters(
				new_space,
				new_range,
				frame->color_matrix,
				frame->color_range_min,
				frame->color_range_max);

		frame->format = new_format;
		m->cur_space = new_space;
		m->cur_range = new_range;

		if (!success) {
			frame->format = VIDEO_FORMAT_NONE;
			return;
		}
	}

	if (frame->format == VIDEO_FORMAT_NONE)
		return;

	frame->timestamp = m->base_ts + d->frame_pts - m->start_ts +
		m->play_sys_ts - base_sys_ts;
	frame->width = f->width;
	frame->height = f->height;
	frame->flip = flip;

	if (!m->is_local_file && !d->got_first_keyframe) {
		if (!f->key_frame)
			return;

		d->got_first_keyframe = true;
	}

	if (preload)
		m->v_preload_cb(m->opaque, frame);
	else
		m->v_cb(m->opaque, frame);
}