static inline void do_encode(struct obs_encoder *encoder, struct encoder_frame *frame) { profile_start(do_encode_name); if (!encoder->profile_encoder_encode_name) encoder->profile_encoder_encode_name = profile_store_name(obs_get_profiler_name_store(), "encode(%s)", encoder->context.name); struct encoder_packet pkt = {0}; bool received = false; bool success; pkt.timebase_num = encoder->timebase_num; pkt.timebase_den = encoder->timebase_den; pkt.encoder = encoder; profile_start(encoder->profile_encoder_encode_name); success = encoder->info.encode(encoder->context.data, frame, &pkt, &received); profile_end(encoder->profile_encoder_encode_name); if (!success) { full_stop(encoder); blog(LOG_ERROR, "Error encoding with encoder '%s'", encoder->context.name); goto error; } if (received) { if (!encoder->first_received) { encoder->offset_usec = packet_dts_usec(&pkt); encoder->first_received = true; } /* we use system time here to ensure sync with other encoders, * you do not want to use relative timestamps here */ pkt.dts_usec = encoder->start_ts / 1000 + packet_dts_usec(&pkt) - encoder->offset_usec; pkt.sys_dts_usec = pkt.dts_usec; pthread_mutex_lock(&encoder->callbacks_mutex); for (size_t i = encoder->callbacks.num; i > 0; i--) { struct encoder_callback *cb; cb = encoder->callbacks.array+(i-1); send_packet(encoder, cb, &pkt); } pthread_mutex_unlock(&encoder->callbacks_mutex); } error: profile_end(do_encode_name); }
static inline void do_encode(struct obs_encoder *encoder, struct encoder_frame *frame) { struct encoder_packet pkt = {0}; bool received = false; bool success; pkt.timebase_num = encoder->timebase_num; pkt.timebase_den = encoder->timebase_den; success = encoder->info.encode(encoder->context.data, frame, &pkt, &received); if (!success) { full_stop(encoder); blog(LOG_ERROR, "Error encoding with encoder '%s'", encoder->context.name); return; } if (received) { /* we use system time here to ensure sync with other encoders, * you do not want to use relative timestamps here */ pkt.dts_usec = encoder->start_ts / 1000 + packet_dts_usec(&pkt); pthread_mutex_lock(&encoder->callbacks_mutex); for (size_t i = 0; i < encoder->callbacks.num; i++) { struct encoder_callback *cb; cb = encoder->callbacks.array+i; send_packet(encoder, cb, &pkt); } pthread_mutex_unlock(&encoder->callbacks_mutex); } }
static void apply_interleaved_packet_offset(struct obs_output *output, struct encoder_packet *out) { int64_t offset; /* audio and video need to start at timestamp 0, and the encoders * may not currently be at 0 when we get data. so, we store the * current dts as offset and subtract that value from the dts/pts * of the output packet. */ if (out->type == OBS_ENCODER_VIDEO) { if (!output->received_video) output->received_video = true; offset = output->video_offset; } else { if (!output->received_audio) output->received_audio = true; offset = output->audio_offset; } out->dts -= offset; out->pts -= offset; /* convert the newly adjusted dts to relative dts time to ensure proper * interleaving. if we're using an audio encoder that's already been * started on another output, then the first audio packet may not be * quite perfectly synced up in terms of system time (and there's * nothing we can really do about that), but it will always at least be * within a 23ish millisecond threshold (at least for AAC) */ out->dts_usec = packet_dts_usec(out); }