Esempio n. 1
0
static inline void do_encode(struct obs_encoder *encoder,
		struct encoder_frame *frame)
{
	profile_start(do_encode_name);
	if (!encoder->profile_encoder_encode_name)
		encoder->profile_encoder_encode_name =
			profile_store_name(obs_get_profiler_name_store(),
					"encode(%s)", encoder->context.name);

	struct encoder_packet pkt = {0};
	bool received = false;
	bool success;

	pkt.timebase_num = encoder->timebase_num;
	pkt.timebase_den = encoder->timebase_den;
	pkt.encoder = encoder;

	profile_start(encoder->profile_encoder_encode_name);
	success = encoder->info.encode(encoder->context.data, frame, &pkt,
			&received);
	profile_end(encoder->profile_encoder_encode_name);
	if (!success) {
		full_stop(encoder);
		blog(LOG_ERROR, "Error encoding with encoder '%s'",
				encoder->context.name);
		goto error;
	}

	if (received) {
		if (!encoder->first_received) {
			encoder->offset_usec = packet_dts_usec(&pkt);
			encoder->first_received = true;
		}

		/* we use system time here to ensure sync with other encoders,
		 * you do not want to use relative timestamps here */
		pkt.dts_usec = encoder->start_ts / 1000 +
			packet_dts_usec(&pkt) - encoder->offset_usec;
		pkt.sys_dts_usec = pkt.dts_usec;

		pthread_mutex_lock(&encoder->callbacks_mutex);

		for (size_t i = encoder->callbacks.num; i > 0; i--) {
			struct encoder_callback *cb;
			cb = encoder->callbacks.array+(i-1);
			send_packet(encoder, cb, &pkt);
		}

		pthread_mutex_unlock(&encoder->callbacks_mutex);
	}

error:
	profile_end(do_encode_name);
}
Esempio n. 2
0
void obs_load_all_modules(void)
{
	profile_start(obs_load_all_modules_name);
	obs_find_modules(load_all_callback, NULL);
#ifdef _WIN32
	profile_start(reset_win32_symbol_paths_name);
	reset_win32_symbol_paths();
	profile_end(reset_win32_symbol_paths_name);
#endif
	profile_end(obs_load_all_modules_name);
}
Esempio n. 3
0
static void *video_thread(void *param)
{
	struct video_output *video = param;

	os_set_thread_name("video-io: video thread");

	const char *video_thread_name =
		profile_store_name(obs_get_profiler_name_store(),
				"video_thread(%s)", video->info.name);

	while (os_sem_wait(video->update_semaphore) == 0) {
		if (video->stop)
			break;

		profile_start(video_thread_name);
		while (!video->stop && !video_output_cur_frame(video)) {
			video->total_frames++;
		}

		video->total_frames++;
		profile_end(video_thread_name);

		profile_reenable_thread();
	}

	return NULL;
}
Esempio n. 4
0
void Master::sendLearnts(int thread_no) {
        if (PAR_DEBUG) fprintf(stderr, "Sending learnts to %d\n", thread_no);

        vec<int> message(sizeof(Report)/sizeof(int),0);
        int num_learnts = 0;

        SClause *sc = (SClause*) &global_learnts[lhead[thread_no]];
        for ( ; (int*) sc != &global_learnts[global_learnts.size()]; sc = sc->getNext()) {
                if (sc->source == thread_no) continue;
                sc->pushInVec(message);
                num_learnts++;
        }
        lhead[thread_no] = global_learnts.size();

        Report& r = *((Report*) (int*) message);
        r.num_learnts = num_learnts;

        profile_start();

        MPI_Bsend((int*) message, message.size(), MPI_INT, thread_no+1, LEARNTS_TAG, MPI_COMM_WORLD);

        last_send_learnts[thread_no] = wallClockTime();

        profile_end("send learnts", message.size())
}
static void lpc_profile_start(int state, ktime_t now)
{
	if (!is_state_lpc(state))
		return;

	profile_start(&lpc_info, 0, now);
}
Esempio n. 6
0
void Slave::sendReport() {
        if (FULL_DEBUG) fprintf(stderr, "%d: Forming report\n", thread_no);
        static bool firstCall = true;
        if(firstCall){
            if(engine.decisionLevel() == 0 && so.shareBounds)
              exportBounds();
        }
        
        Report& r = *((Report*) (int*) report_message);
        r.status = status;

        if (FULL_DEBUG) fprintf(stderr, "%d: Sending report to master\n", thread_no);

        profile_start();

        MPI_Bsend((int*) report_message, report_message.size(), MPI_INT, 0, REPORT_TAG, MPI_COMM_WORLD);

        profile_end("send result", report_message.size());

        if (FULL_DEBUG) fprintf(stderr, "%d: Sent report to master\n", thread_no);

        report_message.clear();
        report_message.growTo(sizeof(Report)/sizeof(int),0);
        
        unitFound = false;
}
static void lpm_profile_start(int state, ktime_t now)
{
	if ((state & MAJOR_STATE) != LPM_STATE)
		return;

	profile_start(&lpm_info, get_lpm_substate(state), now);
}
Esempio n. 8
0
static void receive_video(void *param, struct video_data *frame)
{
    profile_start(receive_video_name);

    struct obs_encoder    *encoder  = param;
    struct encoder_frame  enc_frame;

    memset(&enc_frame, 0, sizeof(struct encoder_frame));

    for (size_t i = 0; i < MAX_AV_PLANES; i++) {
        enc_frame.data[i]     = frame->data[i];
        enc_frame.linesize[i] = frame->linesize[i];
    }

    if (!encoder->start_ts)
        encoder->start_ts = frame->timestamp;

    enc_frame.frames = 1;
    enc_frame.pts    = encoder->cur_pts;

    do_encode(encoder, &enc_frame);

    encoder->cur_pts += encoder->timebase_num;

    profile_end(receive_video_name);
}
Esempio n. 9
0
/**
 * Calculate digest algorithm cost
 *
 * @v digest		Digest algorithm
 * @ret cost		Cost (in cycles per byte)
 */
unsigned long digest_cost ( struct digest_algorithm *digest ) {
	static uint8_t random[8192]; /* Too large for stack */
	uint8_t ctx[digest->ctxsize];
	uint8_t out[digest->digestsize];
	struct profiler profiler;
	unsigned long cost;
	unsigned int i;

	/* Fill buffer with pseudo-random data */
	srand ( 0x1234568 );
	for ( i = 0 ; i < sizeof ( random ) ; i++ )
		random[i] = rand();

	/* Profile digest calculation */
	memset ( &profiler, 0, sizeof ( profiler ) );
	for ( i = 0 ; i < PROFILE_COUNT ; i++ ) {
		profile_start ( &profiler );
		digest_init ( digest, ctx );
		digest_update ( digest, ctx, random, sizeof ( random ) );
		digest_final ( digest, ctx, out );
		profile_stop ( &profiler );
	}

	/* Round to nearest whole number of cycles per byte */
	cost = ( ( profile_mean ( &profiler ) + ( sizeof ( random ) / 2 ) ) /
		 sizeof ( random ) );

	return cost;
}
Esempio n. 10
0
static inline void render_main_texture(struct obs_core_video *video,
		int cur_texture)
{
	profile_start(render_main_texture_name);

	struct vec4 clear_color;
	vec4_set(&clear_color, 0.0f, 0.0f, 0.0f, 1.0f);

	gs_set_render_target(video->render_textures[cur_texture], NULL);
	gs_clear(GS_CLEAR_COLOR, &clear_color, 1.0f, 0);

	set_render_size(video->base_width, video->base_height);

	pthread_mutex_lock(&obs->data.draw_callbacks_mutex);

	for (size_t i = 0; i < obs->data.draw_callbacks.num; i++) {
		struct draw_callback *callback;
		callback = obs->data.draw_callbacks.array+i;

		callback->draw(callback->param,
				video->base_width, video->base_height);
	}

	pthread_mutex_unlock(&obs->data.draw_callbacks_mutex);

	obs_view_render(&obs->data.main_view);

	video->textures_rendered[cur_texture] = true;

	profile_end(render_main_texture_name);
}
Esempio n. 11
0
void Slave::splitJob() {
        vec<int> message;
        int num_splits;
        //fprintf(stderr, "%d: Split job called, assumptions.size()=%d, DL=%d!\n", thread_no, engine.assumptions.size(), engine.decisionLevel());
        profile_start();

        MPI_Recv(&num_splits, 1, MPI_INT, 0, STEAL_TAG, MPI_COMM_WORLD, &s);

        int max_splits = engine.decisionLevel() - engine.assumptions.size() - 1;
        if (num_splits > max_splits) num_splits = max_splits;
        if (num_splits < 0) num_splits = 0;
        
        if (FULL_DEBUG) fprintf(stderr, "%d: Splitting %d jobs\n", thread_no, num_splits);

        for (int i = 0; i < num_splits; i++) {
                engine.assumptions.push(toInt(sat.decLit(engine.assumptions.size()+1)));
                sat.incVarUse(engine.assumptions.last()/2);
        }
        assert(num_splits == 0 || engine.decisionLevel() > engine.assumptions.size());

        vec<Lit> ps;
        for (int i = 0; i < engine.assumptions.size(); i++) ps.push(toLit(engine.assumptions[i]));
        Clause *c = Clause_new(ps);
        
        sat.convertToSClause(*c);
        free(c);
        message.push(num_splits);
        sat.temp_sc->pushInVec(message);

        MPI_Bsend((int*) message, message.size(), MPI_INT, 0, SPLIT_TAG, MPI_COMM_WORLD);

        profile_end("send split job", message.size());

        if (FULL_DEBUG) fprintf(stderr, "%d: Sent %d split job to master\n", thread_no, message[0]);
}
Esempio n. 12
0
static void *audio_thread(void *param)
{
	struct audio_output *audio = param;
	uint64_t buffer_time = audio->info.buffer_ms * 1000000;
	uint64_t prev_time = os_gettime_ns() - buffer_time;
	uint64_t audio_time;

	os_set_thread_name("audio-io: audio thread");

	const char *audio_thread_name =
		profile_store_name(obs_get_profiler_name_store(),
				"audio_thread(%s)", audio->info.name);
	
	while (os_event_try(audio->stop_event) == EAGAIN) {
		os_sleep_ms(AUDIO_WAIT_TIME);

		profile_start(audio_thread_name);
		pthread_mutex_lock(&audio->line_mutex);

		audio_time = os_gettime_ns() - buffer_time;
		audio_time = mix_and_output(audio, audio_time, prev_time);
		prev_time  = audio_time;

		pthread_mutex_unlock(&audio->line_mutex);
		profile_end(audio_thread_name);

		profile_reenable_thread();
	}

	return NULL;
}
static void cpd_profile_start(int state, ktime_t now, int cpu)
{
	if (!is_state_cpd(state))
		return;

	profile_start(&cpd_info[to_cluster(cpu)], 0, now);
}
Esempio n. 14
0
u8 init_simulation(command* cmd, simulation* sim, timer* t, ocrGuid_t** list)
{
  sim->step = 0;
  sim->steps = cmd->steps;
  sim->period = cmd->period;
  sim->dt = cmd->dt;
  sim->e_potential = 0.0;
  sim->e_kinetic = 0.0;

  u8 insane = 0;
  if(cmd->doeam)
    insane = init_eam(cmd->pot_dir, cmd->pot_name, cmd->pot_type, &sim->pot, sim->dt);
  else
    init_lj(&sim->pot, sim->dt);
  if(insane) return insane;

  real_t lattice_const = cmd->lat;
  if(cmd->lat < 0.0)
    lattice_const = sim->pot.lat;
  insane = sanity_checks(cmd, sim->pot.cutoff, lattice_const, sim->pot.lattice_type);
  if(insane) return insane;

  ocrGuid_t box_tmp;
  box** box_ptr = init_lattice(sim, cmd, lattice_const, list, &box_tmp);

  profile_start(redistribute_timer,t);
  redistribute_atoms(sim, box_ptr, sim->bxs.boxes_num);
  profile_stop(redistribute_timer,t);

  ocrDbDestroy(box_tmp);

  return 0;
}
Esempio n. 15
0
/**
 * Add received data to data transfer buffer
 *
 * @v xferbuf		Data transfer buffer
 * @v iobuf		I/O buffer
 * @v meta		Data transfer metadata
 * @ret rc		Return status code
 */
int xferbuf_deliver ( struct xfer_buffer *xferbuf, struct io_buffer *iobuf,
		      struct xfer_metadata *meta ) {
	size_t len = iob_len ( iobuf );
	size_t pos;
	int rc;

	/* Start profiling */
	profile_start ( &xferbuf_deliver_profiler );

	/* Calculate new buffer position */
	pos = xferbuf->pos;
	if ( meta->flags & XFER_FL_ABS_OFFSET )
		pos = 0;
	pos += meta->offset;

	/* Write data to buffer */
	if ( ( rc = xferbuf_write ( xferbuf, pos, iobuf->data, len ) ) != 0 )
		goto done;

	/* Update current buffer position */
	xferbuf->pos = ( pos + len );

 done:
	free_iob ( iobuf );
	profile_stop ( &xferbuf_deliver_profiler );
	return rc;
}
Esempio n. 16
0
/**
 * Complete bulk IN transfer
 *
 * @v ep		USB endpoint
 * @v iobuf		I/O buffer
 * @v rc		Completion status code
 */
static void acm_in_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf,
			      int rc ) {
	struct acm_device *acm = container_of ( ep, struct acm_device,
						usbnet.in );
	struct rndis_device *rndis = acm->rndis;

	/* Profile receive completions */
	profile_start ( &acm_in_profiler );

	/* Ignore packets cancelled when the endpoint closes */
	if ( ! ep->open )
		goto ignore;

	/* Record USB errors against the RNDIS device */
	if ( rc != 0 ) {
		DBGC ( acm, "ACM %p bulk IN failed: %s\n",
		       acm, strerror ( rc ) );
		goto error;
	}

	/* Hand off to RNDIS */
	rndis_rx ( rndis, iob_disown ( iobuf ) );

	profile_stop ( &acm_in_profiler );
	return;

 error:
	rndis_rx_err ( rndis, iob_disown ( iobuf ), rc );
 ignore:
	free_iob ( iobuf );
}
Esempio n. 17
0
static void receive_video(void *param, struct video_data *frame)
{
	profile_start(receive_video_name);

	struct obs_encoder    *encoder  = param;
	struct obs_encoder    *pair     = encoder->paired_encoder;
	struct encoder_frame  enc_frame;

	if (!encoder->first_received && pair) {
		if (!pair->first_received ||
		    pair->first_raw_ts > frame->timestamp) {
			goto wait_for_audio;
		}
	}

	memset(&enc_frame, 0, sizeof(struct encoder_frame));

	for (size_t i = 0; i < MAX_AV_PLANES; i++) {
		enc_frame.data[i]     = frame->data[i];
		enc_frame.linesize[i] = frame->linesize[i];
	}

	if (!encoder->start_ts)
		encoder->start_ts = frame->timestamp;

	enc_frame.frames = 1;
	enc_frame.pts    = encoder->cur_pts;

	do_encode(encoder, &enc_frame);

	encoder->cur_pts += encoder->timebase_num;

wait_for_audio:
	profile_end(receive_video_name);
}
Esempio n. 18
0
static inline void stage_output_texture(struct obs_core_video *video,
		int cur_texture, int prev_texture)
{
	profile_start(stage_output_texture_name);

	gs_texture_t   *texture;
	bool        texture_ready;
	gs_stagesurf_t *copy = video->copy_surfaces[cur_texture];

	if (video->gpu_conversion) {
		texture = video->convert_textures[prev_texture];
		texture_ready = video->textures_converted[prev_texture];
	} else {
		texture = video->output_textures[prev_texture];
		texture_ready = video->textures_output[prev_texture];
	}

	unmap_last_surface(video);

	if (!texture_ready)
		goto end;

	gs_stage_texture(copy, texture);

	video->textures_copied[cur_texture] = true;

end:
	profile_end(stage_output_texture_name);
}
Esempio n. 19
0
/**
 * Complete bulk IN transfer
 *
 * @v ep		USB endpoint
 * @v iobuf		I/O buffer
 * @v rc		Completion status code
 */
static void smsc95xx_in_complete ( struct usb_endpoint *ep,
				   struct io_buffer *iobuf, int rc ) {
	struct smsc95xx_device *smsc95xx =
		container_of ( ep, struct smsc95xx_device, usbnet.in );
	struct net_device *netdev = smsc95xx->netdev;
	struct smsc95xx_rx_header *header;

	/* Profile completions */
	profile_start ( &smsc95xx_in_profiler );

	/* Ignore packets cancelled when the endpoint closes */
	if ( ! ep->open ) {
		free_iob ( iobuf );
		return;
	}

	/* Record USB errors against the network device */
	if ( rc != 0 ) {
		DBGC ( smsc95xx, "SMSC95XX %p bulk IN failed: %s\n",
		       smsc95xx, strerror ( rc ) );
		goto err;
	}

	/* Sanity check */
	if ( iob_len ( iobuf ) < ( sizeof ( *header ) + 4 /* CRC */ ) ) {
		DBGC ( smsc95xx, "SMSC95XX %p underlength bulk IN\n",
		       smsc95xx );
		DBGC_HDA ( smsc95xx, 0, iobuf->data, iob_len ( iobuf ) );
		rc = -EINVAL;
		goto err;
	}

	/* Strip header and CRC */
	header = iobuf->data;
	iob_pull ( iobuf, sizeof ( *header ) );
	iob_unput ( iobuf, 4 /* CRC */ );

	/* Check for errors */
	if ( header->command & cpu_to_le32 ( SMSC95XX_RX_RUNT |
					     SMSC95XX_RX_LATE |
					     SMSC95XX_RX_CRC ) ) {
		DBGC ( smsc95xx, "SMSC95XX %p receive error (%08x):\n",
		       smsc95xx, le32_to_cpu ( header->command ) );
		DBGC_HDA ( smsc95xx, 0, iobuf->data, iob_len ( iobuf ) );
		rc = -EIO;
		goto err;
	}

	/* Hand off to network stack */
	netdev_rx ( netdev, iob_disown ( iobuf ) );

	profile_stop ( &smsc95xx_in_profiler );
	return;

 err:
	/* Hand off to network stack */
	netdev_rx_err ( netdev, iob_disown ( iobuf ), rc );
}
Esempio n. 20
0
/**
 * Complete interrupt transfer
 *
 * @v ep		USB endpoint
 * @v iobuf		I/O buffer
 * @v rc		Completion status code
 */
static void acm_intr_complete ( struct usb_endpoint *ep,
				struct io_buffer *iobuf, int rc ) {
	struct acm_device *acm = container_of ( ep, struct acm_device,
						usbnet.intr );
	struct rndis_device *rndis = acm->rndis;
	struct usb_setup_packet *message;

	/* Profile completions */
	profile_start ( &acm_intr_profiler );

	/* Ignore packets cancelled when the endpoint closes */
	if ( ! ep->open )
		goto ignore;

	/* Drop packets with errors */
	if ( rc != 0 ) {
		DBGC ( acm, "ACM %p interrupt failed: %s\n",
		       acm, strerror ( rc ) );
		DBGC_HDA ( acm, 0, iobuf->data, iob_len ( iobuf ) );
		goto error;
	}

	/* Extract message header */
	if ( iob_len ( iobuf ) < sizeof ( *message ) ) {
		DBGC ( acm, "ACM %p underlength interrupt:\n", acm );
		DBGC_HDA ( acm, 0, iobuf->data, iob_len ( iobuf ) );
		rc = -EINVAL;
		goto error;
	}
	message = iobuf->data;

	/* Parse message header */
	switch ( message->request ) {

	case cpu_to_le16 ( CDC_RESPONSE_AVAILABLE ) :
	case cpu_to_le16 ( 0x0001 ) : /* qemu seems to use this value */
		acm->responded = 1;
		break;

	default:
		DBGC ( acm, "ACM %p unrecognised interrupt:\n", acm );
		DBGC_HDA ( acm, 0, iobuf->data, iob_len ( iobuf ) );
		rc = -ENOTSUP;
		goto error;
	}

	/* Free I/O buffer */
	free_iob ( iobuf );
	profile_stop ( &acm_intr_profiler );

	return;

 error:
	rndis_rx_err ( rndis, iob_disown ( iobuf ), rc );
 ignore:
	free_iob ( iobuf );
	return;
}
Esempio n. 21
0
static inline void render_output_texture(struct obs_core_video *video,
		int cur_texture, int prev_texture)
{
	profile_start(render_output_texture_name);

	gs_texture_t *texture = video->render_textures[prev_texture];
	gs_texture_t *target  = video->output_textures[cur_texture];
	uint32_t     width   = gs_texture_get_width(target);
	uint32_t     height  = gs_texture_get_height(target);
	struct vec2  base_i;

	vec2_set(&base_i,
		1.0f / (float)video->base_width,
		1.0f / (float)video->base_height);

	gs_effect_t    *effect  = get_scale_effect(video, width, height);
	gs_technique_t *tech;

	if (video->ovi.output_format == VIDEO_FORMAT_RGBA) {
		tech = gs_effect_get_technique(effect, "Draw");
	} else {
		tech = gs_effect_get_technique(effect, "DrawMatrix");
	}

	gs_eparam_t    *image   = gs_effect_get_param_by_name(effect, "image");
	gs_eparam_t    *matrix  = gs_effect_get_param_by_name(effect,
			"color_matrix");
	gs_eparam_t    *bres_i  = gs_effect_get_param_by_name(effect,
			"base_dimension_i");
	size_t      passes, i;

	if (!video->textures_rendered[prev_texture])
		goto end;

	gs_set_render_target(target, NULL);
	set_render_size(width, height);

	if (bres_i)
		gs_effect_set_vec2(bres_i, &base_i);

	gs_effect_set_val(matrix, video->color_matrix, sizeof(float) * 16);
	gs_effect_set_texture(image, texture);

	gs_enable_blending(false);
	passes = gs_technique_begin(tech);
	for (i = 0; i < passes; i++) {
		gs_technique_begin_pass(tech, i);
		gs_draw_sprite(texture, 0, width, height);
		gs_technique_end_pass(tech);
	}
	gs_technique_end(tech);
	gs_enable_blending(true);

	video->textures_output[cur_texture] = true;

end:
	profile_end(render_output_texture_name);
}
Esempio n. 22
0
static inline void output_frame(bool raw_active, const bool gpu_active)
{
	struct obs_core_video *video = &obs->video;
	int cur_texture  = video->cur_texture;
	int prev_texture = cur_texture == 0 ? NUM_TEXTURES-1 : cur_texture-1;
	struct video_data frame;
	bool active = raw_active || gpu_active;
	bool frame_ready;

	memset(&frame, 0, sizeof(struct video_data));

	profile_start(output_frame_gs_context_name);
	gs_enter_context(video->graphics);

	profile_start(output_frame_render_video_name);
	render_video(video, raw_active, gpu_active, cur_texture, prev_texture);
	profile_end(output_frame_render_video_name);

	if (raw_active) {
		profile_start(output_frame_download_frame_name);
		frame_ready = download_frame(video, prev_texture, &frame);
		profile_end(output_frame_download_frame_name);
	}

	profile_start(output_frame_gs_flush_name);
	gs_flush();
	profile_end(output_frame_gs_flush_name);

	gs_leave_context();
	profile_end(output_frame_gs_context_name);

	if (raw_active && frame_ready) {
		struct obs_vframe_info vframe_info;
		circlebuf_pop_front(&video->vframe_info_buffer, &vframe_info,
				sizeof(vframe_info));

		frame.timestamp = vframe_info.timestamp;
		profile_start(output_frame_output_video_data_name);
		output_video_data(video, &frame, vframe_info.count);
		profile_end(output_frame_output_video_data_name);
	}

	if (++video->cur_texture == NUM_TEXTURES)
		video->cur_texture = 0;
}
Esempio n. 23
0
/// time_push - save the previous time before doing something that could nest
///
/// After calling this function, the static global `g_prev_time` will
/// contain the current time.
///
/// @param[out] rel to the time elapsed so far
/// @param[out] start the current time
void time_push(proftime_T *rel, proftime_T *start)
{
  proftime_T now = profile_start();

  // subtract the previous time from now, store it in `rel`
  *rel = profile_sub(now, g_prev_time);
  *start = now;

  // reset global `g_prev_time` for the next call
  g_prev_time = now;
}
Esempio n. 24
0
void prof_draws(TProfiler *prof, const char *str)
{
	tte_set_pos(0, 0);
	tte_write(prof->str);
	tte_write(": \n");
	VBlankIntrWait();

	profile_start();
	tte_write(str);

	prof->time=  profile_stop();
}
Esempio n. 25
0
static bool buffer_audio(struct obs_encoder *encoder, struct audio_data *data)
{
	profile_start(buffer_audio_name);

	size_t size = data->frames * encoder->blocksize;
	size_t offset_size = 0;
	bool success = true;

	if (!encoder->start_ts && encoder->paired_encoder) {
		uint64_t end_ts     = data->timestamp;
		uint64_t v_start_ts = encoder->paired_encoder->start_ts;

		/* no video yet, so don't start audio */
		if (!v_start_ts) {
			success = false;
			goto fail;
		}

		/* audio starting point still not synced with video starting
		 * point, so don't start audio */
		end_ts += (uint64_t)data->frames * 1000000000ULL /
			(uint64_t)encoder->samplerate;
		if (end_ts <= v_start_ts) {
			success = false;
			goto fail;
		}

		/* ready to start audio, truncate if necessary */
		if (data->timestamp < v_start_ts)
			offset_size = calc_offset_size(encoder, v_start_ts,
					data->timestamp);
		if (data->timestamp <= v_start_ts)
			clear_audio(encoder);

		encoder->start_ts = v_start_ts;

		/* use currently buffered audio instead */
		if (v_start_ts < data->timestamp) {
			start_from_buffer(encoder, v_start_ts);
			goto skip_push;
		}

	} else if (!encoder->start_ts && !encoder->paired_encoder) {
		encoder->start_ts = data->timestamp;
	}

fail:
	push_back_audio(encoder, data, size, offset_size);

skip_push:
	profile_end(buffer_audio_name);
	return success;
}
Esempio n. 26
0
static void render_convert_texture(struct obs_core_video *video,
		int cur_texture, int prev_texture)
{
	profile_start(render_convert_texture_name);

	gs_texture_t *texture = video->output_textures[prev_texture];
	gs_texture_t *target  = video->convert_textures[cur_texture];
	float        fwidth  = (float)video->output_width;
	float        fheight = (float)video->output_height;
	size_t       passes, i;

	gs_effect_t    *effect  = video->conversion_effect;
	gs_eparam_t    *image   = gs_effect_get_param_by_name(effect, "image");
	gs_technique_t *tech    = gs_effect_get_technique(effect,
			video->conversion_tech);

	if (!video->textures_output[prev_texture])
		goto end;

	set_eparam(effect, "u_plane_offset", (float)video->plane_offsets[1]);
	set_eparam(effect, "v_plane_offset", (float)video->plane_offsets[2]);
	set_eparam(effect, "width",  fwidth);
	set_eparam(effect, "height", fheight);
	set_eparam(effect, "width_i",  1.0f / fwidth);
	set_eparam(effect, "height_i", 1.0f / fheight);
	set_eparam(effect, "width_d2",  fwidth  * 0.5f);
	set_eparam(effect, "height_d2", fheight * 0.5f);
	set_eparam(effect, "width_d2_i",  1.0f / (fwidth  * 0.5f));
	set_eparam(effect, "height_d2_i", 1.0f / (fheight * 0.5f));
	set_eparam(effect, "input_height", (float)video->conversion_height);

	gs_effect_set_texture(image, texture);

	gs_set_render_target(target, NULL);
	set_render_size(video->output_width, video->conversion_height);

	gs_enable_blending(false);
	passes = gs_technique_begin(tech);
	for (i = 0; i < passes; i++) {
		gs_technique_begin_pass(tech, i);
		gs_draw_sprite(texture, 0, video->output_width,
				video->conversion_height);
		gs_technique_end_pass(tech);
	}
	gs_technique_end(tech);
	gs_enable_blending(true);

	video->textures_converted[cur_texture] = true;

end:
	profile_end(render_convert_texture_name);
}
/*********************************************************************
 *                Information gathering function                     *
 *********************************************************************/
void cpuidle_profile_start(int cpu, int state)
{
	struct cpuidle_profile_info *info;
	ktime_t now;

	if (!profile_ongoing)
		return;

	now = ktime_get();
	info = &per_cpu(profile_info, cpu);

	profile_start(info, state, now);
}
Esempio n. 28
0
void *obs_video_thread(void *param)
{
	uint64_t last_time = 0;
	uint64_t interval = video_output_get_frame_time(obs->video.video);

	obs->video.video_time = os_gettime_ns();

	os_set_thread_name("libobs: graphics thread");

	const char *video_thread_name =
		profile_store_name(obs_get_profiler_name_store(),
			"obs_video_thread(%g ms)", interval / 1000000.);
	profile_register_root(video_thread_name, interval);

	while (!video_output_stopped(obs->video.video)) {
		profile_start(video_thread_name);

		profile_start(tick_sources_name);
		last_time = tick_sources(obs->video.video_time, last_time);
		profile_end(tick_sources_name);

		profile_start(render_displays_name);
		render_displays();
		profile_end(render_displays_name);

		profile_start(output_frame_name);
		output_frame();
		profile_end(output_frame_name);

		profile_end(video_thread_name);

		profile_reenable_thread();

		video_sleep(&obs->video, &obs->video.video_time, interval);
	}

	UNUSED_PARAMETER(param);
	return NULL;
}
Esempio n. 29
0
/**
 * Transmit packet
 *
 * @v acm		USB RNDIS device
 * @v iobuf		I/O buffer
 * @ret rc		Return status code
 */
static int acm_out_transmit ( struct acm_device *acm,
			      struct io_buffer *iobuf ) {
	int rc;

	/* Profile transmissions */
	profile_start ( &acm_out_profiler );

	/* Enqueue I/O buffer */
	if ( ( rc = usb_stream ( &acm->usbnet.out, iobuf, 0 ) ) != 0 )
		return rc;

	profile_stop ( &acm_out_profiler );
	return 0;
}
Esempio n. 30
0
static bool buffer_audio(struct obs_encoder *encoder, struct audio_data *data)
{
    profile_start(buffer_audio_name);

    size_t samplerate = encoder->samplerate;
    size_t size = data->frames * encoder->blocksize;
    size_t offset_size = 0;

    if (!encoder->start_ts && encoder->paired_encoder) {
        uint64_t end_ts     = data->timestamp;
        uint64_t v_start_ts = encoder->paired_encoder->start_ts;

        /* no video yet, so don't start audio */
        if (!v_start_ts)
            goto fail;

        /* audio starting point still not synced with video starting
         * point, so don't start audio */
        end_ts += (uint64_t)data->frames * 1000000000ULL / samplerate;
        if (end_ts <= v_start_ts)
            goto fail;

        /* ready to start audio, truncate if necessary */
        if (data->timestamp < v_start_ts) {
            uint64_t offset = v_start_ts - data->timestamp;
            offset = (int)(offset * samplerate / 1000000000);
            offset_size = (size_t)offset * encoder->blocksize;
        }

        encoder->start_ts = v_start_ts;

    } else if (!encoder->start_ts && !encoder->paired_encoder) {
        encoder->start_ts = data->timestamp;
    }

    size -= offset_size;

    /* push in to the circular buffer */
    if (size)
        for (size_t i = 0; i < encoder->planes; i++)
            circlebuf_push_back(&encoder->audio_input_buffer[i],
                                data->data[i] + offset_size, size);

    profile_end(buffer_audio_name);
    return true;

fail:
    profile_end(buffer_audio_name);
    return false;
}