static void x264_lookahead_slicetype_decide( x264_t *h ) { x264_stack_align( x264_slicetype_decide, h ); x264_lookahead_update_last_nonb( h, h->lookahead->next.list[0] ); x264_pthread_mutex_lock( &h->lookahead->ofbuf.mutex ); while( h->lookahead->ofbuf.i_size == h->lookahead->ofbuf.i_max_size ) x264_pthread_cond_wait( &h->lookahead->ofbuf.cv_empty, &h->lookahead->ofbuf.mutex ); x264_pthread_mutex_lock( &h->lookahead->next.mutex ); x264_lookahead_shift( &h->lookahead->ofbuf, &h->lookahead->next, h->lookahead->next.list[0]->i_bframes + 1 ); x264_pthread_mutex_unlock( &h->lookahead->next.mutex ); /* For MB-tree and VBV lookahead, we have to perform propagation analysis on I-frames too. */ if( h->lookahead->b_analyse_keyframe && IS_X264_TYPE_I( h->lookahead->last_nonb->i_type ) ) x264_stack_align( x264_slicetype_analyse, h, 1 ); x264_pthread_mutex_unlock( &h->lookahead->ofbuf.mutex ); }
static int write_frame( hnd_t handle, uint8_t *p_nalu, int i_size, x264_picture_t *p_picture ) { mp4_hnd_t *p_mp4 = handle; uint64_t dts, cts; if( !p_mp4->i_numframe ) { p_mp4->i_start_offset = p_picture->i_dts * -1; p_mp4->i_first_cts = p_mp4->b_dts_compress ? 0 : p_mp4->i_start_offset * p_mp4->i_time_inc; if( p_mp4->b_fragments ) { lsmash_edit_t edit; edit.duration = ISOM_EDIT_DURATION_UNKNOWN32; /* QuickTime doesn't support 64bit duration. */ edit.start_time = p_mp4->i_first_cts; edit.rate = ISOM_EDIT_MODE_NORMAL; MP4_LOG_IF_ERR( lsmash_create_explicit_timeline_map( p_mp4->p_root, p_mp4->i_track, edit ), "failed to set timeline map for video.\n" ); } } lsmash_sample_t *p_sample = lsmash_create_sample( i_size + p_mp4->i_sei_size ); MP4_FAIL_IF_ERR( !p_sample, "failed to create a video sample data.\n" ); if( p_mp4->p_sei_buffer ) { memcpy( p_sample->data, p_mp4->p_sei_buffer, p_mp4->i_sei_size ); free( p_mp4->p_sei_buffer ); p_mp4->p_sei_buffer = NULL; } memcpy( p_sample->data + p_mp4->i_sei_size, p_nalu, i_size ); p_mp4->i_sei_size = 0; if( p_mp4->b_dts_compress ) { if( p_mp4->i_numframe == 1 ) p_mp4->i_init_delta = (p_picture->i_dts + p_mp4->i_start_offset) * p_mp4->i_time_inc; dts = p_mp4->i_numframe > p_mp4->i_delay_frames ? p_picture->i_dts * p_mp4->i_time_inc : p_mp4->i_numframe * (p_mp4->i_init_delta / p_mp4->i_dts_compress_multiplier); cts = p_picture->i_pts * p_mp4->i_time_inc; } else { dts = (p_picture->i_dts + p_mp4->i_start_offset) * p_mp4->i_time_inc; cts = (p_picture->i_pts + p_mp4->i_start_offset) * p_mp4->i_time_inc; } p_sample->dts = dts; p_sample->cts = cts; p_sample->index = p_mp4->i_sample_entry; p_sample->prop.ra_flags = p_picture->b_keyframe ? ISOM_SAMPLE_RANDOM_ACCESS_FLAG_SYNC : ISOM_SAMPLE_RANDOM_ACCESS_FLAG_NONE; if( p_mp4->b_brand_qt ) { p_sample->prop.independent = IS_X264_TYPE_I( p_picture->i_type ) ? ISOM_SAMPLE_IS_INDEPENDENT : ISOM_SAMPLE_IS_NOT_INDEPENDENT; p_sample->prop.disposable = p_picture->i_type == X264_TYPE_B ? ISOM_SAMPLE_IS_DISPOSABLE : ISOM_SAMPLE_IS_NOT_DISPOSABLE; p_sample->prop.redundant = ISOM_SAMPLE_HAS_NO_REDUNDANCY; if( p_picture->i_type == X264_TYPE_I || p_picture->i_type == X264_TYPE_P || p_picture->i_type == X264_TYPE_BREF ) p_sample->prop.allow_earlier = QT_SAMPLE_EARLIER_PTS_ALLOWED; /* if( p_picture->i_type == X264_TYPE_I && p_picture->b_keyframe ) p_sample->prop.ra_flags = ISOM_SAMPLE_RANDOM_ACCESS_FLAG_OPEN_RAP; */ } if( p_mp4->b_fragments && p_mp4->i_numframe && p_sample->prop.ra_flags != ISOM_SAMPLE_RANDOM_ACCESS_FLAG_NONE ) { MP4_FAIL_IF_ERR( lsmash_flush_pooled_samples( p_mp4->p_root, p_mp4->i_track, p_sample->dts - p_mp4->i_prev_dts ), "failed to flush the rest of samples.\n" ); MP4_FAIL_IF_ERR( lsmash_create_fragment_movie( p_mp4->p_root ), "failed to create a movie fragment.\n" ); } /* Append data per sample. */ MP4_FAIL_IF_ERR( lsmash_append_sample( p_mp4->p_root, p_mp4->i_track, p_sample ), "failed to append a video frame.\n" ); p_mp4->i_prev_dts = dts; p_mp4->i_numframe++; return i_size; }
static void *start_encoder( void *ptr ) { obe_vid_enc_params_t *enc_params = ptr; obe_t *h = enc_params->h; obe_encoder_t *encoder = enc_params->encoder; x264_t *s = NULL; x264_picture_t pic, pic_out; x264_nal_t *nal; int i_nal, frame_size = 0, user_sar_width, user_sar_height; int64_t pts = 0, arrival_time = 0, frame_duration, buffer_duration; int64_t *pts2; float buffer_fill; obe_raw_frame_t *raw_frame; obe_coded_frame_t *coded_frame; /* TODO: check for width, height changes */ /* Lock the mutex until we verify and fetch new parameters */ pthread_mutex_lock( &encoder->encoder_mutex ); enc_params->avc_param.pf_log = x264_logger; s = x264_encoder_open( &enc_params->avc_param ); if( !s ) { pthread_mutex_unlock( &encoder->encoder_mutex ); fprintf( stderr, "[x264]: encoder configuration failed\n" ); goto end; } x264_encoder_parameters( s, &enc_params->avc_param ); encoder->encoder_params = malloc( sizeof(enc_params->avc_param) ); if( !encoder->encoder_params ) { pthread_mutex_unlock( &encoder->encoder_mutex ); syslog( LOG_ERR, "Malloc failed\n" ); goto end; } memcpy( encoder->encoder_params, &enc_params->avc_param, sizeof(enc_params->avc_param) ); encoder->is_ready = 1; /* XXX: This will need fixing for soft pulldown streams */ frame_duration = av_rescale_q( 1, (AVRational){enc_params->avc_param.i_fps_den, enc_params->avc_param.i_fps_num}, (AVRational){1, OBE_CLOCK} ); buffer_duration = frame_duration * enc_params->avc_param.sc.i_buffer_size; /* Broadcast because input and muxer can be stuck waiting for encoder */ pthread_cond_broadcast( &encoder->encoder_cv ); pthread_mutex_unlock( &encoder->encoder_mutex ); user_sar_width = enc_params->avc_param.vui.i_sar_width; user_sar_height = enc_params->avc_param.vui.i_sar_height; while( 1 ) { pthread_mutex_lock( &encoder->encoder_mutex ); if( encoder->cancel_thread ) { pthread_mutex_unlock( &encoder->encoder_mutex ); break; } if( !encoder->num_raw_frames ) pthread_cond_wait( &encoder->encoder_cv, &encoder->encoder_mutex ); if( encoder->cancel_thread ) { pthread_mutex_unlock( &encoder->encoder_mutex ); break; } /* Reset the speedcontrol buffer if the source has dropped frames. Otherwise speedcontrol * stays in an underflow state and is locked to the fastest preset */ pthread_mutex_lock( &h->drop_mutex ); if( h->encoder_drop ) { pthread_mutex_lock( &h->smoothing_mutex ); h->smoothing_buffer_complete = 0; pthread_mutex_unlock( &h->smoothing_mutex ); syslog( LOG_INFO, "Speedcontrol reset\n" ); x264_speedcontrol_sync( s, enc_params->avc_param.sc.i_buffer_size, enc_params->avc_param.sc.f_buffer_init, 0 ); h->encoder_drop = 0; } pthread_mutex_unlock( &h->drop_mutex ); raw_frame = encoder->frames[0]; pthread_mutex_unlock( &encoder->encoder_mutex ); if( convert_obe_to_x264_pic( &pic, raw_frame ) < 0 ) { syslog( LOG_ERR, "Malloc failed\n" ); break; } /* FIXME: if frames are dropped this might not be true */ pic.i_pts = pts++; pts2 = malloc( sizeof(int64_t) ); if( !pts2 ) { syslog( LOG_ERR, "Malloc failed\n" ); break; } pts2[0] = raw_frame->pts; pic.opaque = pts2; /* If the AFD has changed, then change the SAR. x264 will write the SAR at the next keyframe * TODO: allow user to force keyframes in order to be frame accurate */ if( raw_frame->sar_width != enc_params->avc_param.vui.i_sar_width || raw_frame->sar_height != enc_params->avc_param.vui.i_sar_height ) { /* If the frame's SAR has been guessed but the user entered a reasonable SAR, then use it. * Otherwise, use the guessed SAR. */ if( raw_frame->sar_guess && user_sar_width > 0 && user_sar_height > 0 ) { enc_params->avc_param.vui.i_sar_width = user_sar_width; enc_params->avc_param.vui.i_sar_height = user_sar_height; } else { enc_params->avc_param.vui.i_sar_width = raw_frame->sar_width; enc_params->avc_param.vui.i_sar_height = raw_frame->sar_height; } x264_encoder_reconfig( s, &enc_params->avc_param ); } /* Update speedcontrol based on the system state */ if( h->obe_system == OBE_SYSTEM_TYPE_GENERIC ) { pthread_mutex_lock( &h->smoothing_mutex ); if( h->smoothing_buffer_complete ) { /* Wait until a frame is sent out. */ while( !h->smoothing_last_exit_time ) pthread_cond_wait( &h->smoothing_out_cv, &h->smoothing_mutex ); /* time elapsed since last frame was removed */ int64_t last_frame_delta = get_input_clock_in_mpeg_ticks( h ) - h->smoothing_last_exit_time; if( h->num_smoothing_frames ) { int64_t frame_durations = h->smoothing_frames[h->num_smoothing_frames-1]->real_dts - h->smoothing_frames[0]->real_dts + frame_duration; buffer_fill = (float)(frame_durations - last_frame_delta)/buffer_duration; } else buffer_fill = (float)(-1 * last_frame_delta)/buffer_duration; x264_speedcontrol_sync( s, buffer_fill, enc_params->avc_param.sc.i_buffer_size, 1 ); } pthread_mutex_unlock( &h->smoothing_mutex ); } frame_size = x264_encoder_encode( s, &nal, &i_nal, &pic, &pic_out ); arrival_time = raw_frame->arrival_time; raw_frame->release_data( raw_frame ); raw_frame->release_frame( raw_frame ); remove_frame_from_encode_queue( encoder ); if( frame_size < 0 ) { syslog( LOG_ERR, "x264_encoder_encode failed\n" ); break; } if( frame_size ) { coded_frame = new_coded_frame( encoder->stream_id, frame_size ); if( !coded_frame ) { syslog( LOG_ERR, "Malloc failed\n" ); break; } memcpy( coded_frame->data, nal[0].p_payload, frame_size ); coded_frame->is_video = 1; coded_frame->len = frame_size; coded_frame->cpb_initial_arrival_time = pic_out.hrd_timing.safe_cpb_initial_arrival_time; coded_frame->cpb_final_arrival_time = pic_out.hrd_timing.cpb_final_arrival_time; coded_frame->real_dts = pic_out.hrd_timing.cpb_removal_time; coded_frame->real_pts = pic_out.hrd_timing.dpb_output_time; pts2 = pic_out.opaque; coded_frame->pts = pts2[0]; coded_frame->random_access = pic_out.b_keyframe; coded_frame->priority = IS_X264_TYPE_I( pic_out.i_type ); free( pic_out.opaque ); if( h->obe_system == OBE_SYSTEM_TYPE_LOW_LATENCY ) { coded_frame->arrival_time = arrival_time; //printf("\n Encode Latency %"PRIi64" \n", obe_mdate() - coded_frame->arrival_time ); } add_to_smoothing_queue( h, coded_frame ); } } end: if( s ) x264_encoder_close( s ); free( enc_params ); return NULL; }
int BleX264Encoder::encode(unsigned char *rgbframe, mint64 pts, void *opaque) { Q_UNUSED(pts); unsigned char *src_buf = rgbframe; x264_picture_init(m_pictureIn); m_pictureIn->img.i_csp = X264_CSP_I420; m_pictureIn->img.i_plane = 3; m_pictureIn->i_type = X264_TYPE_AUTO; m_pictureIn->i_qpplus1 = 0; // @note why i_pts plus 1 everytime // because the timebase set as above. m_pictureIn->i_pts = ++m_encoded_frames; m_pictureIn->opaque = opaque; m_pictureIn->img.plane[0] = src_buf; m_pictureIn->img.plane[1] = src_buf + m_x264Param->i_height * m_x264Param->i_width; m_pictureIn->img.plane[2] = src_buf + m_x264Param->i_height * m_x264Param->i_width * 5 / 4; m_pictureIn->img.i_stride[0] = m_x264Param->i_width; m_pictureIn->img.i_stride[1] = m_x264Param->i_width >> 1; m_pictureIn->img.i_stride[2] = m_x264Param->i_width >> 1; x264_picture_t picOut; int nalNum; x264_nal_t* nalOut; int len = x264_encoder_encode(m_x264Encoder, &nalOut, &nalNum, m_pictureIn, &picOut); if (len < 0) { log_error("x264 encode failed"); return -1; } if (nalNum <= 0) { log_warn("frame delayed in encoder."); return -2; } if(!bFirstFrameProcessed && nalNum) { if(picOut.i_dts < 0) delayOffset = int(-picOut.i_dts); bFirstFrameProcessed = true; } float timeOffset = float(picOut.i_pts - picOut.i_dts) * BleAVQueue::instance()->timestampBuilder()->videoInternal(); BleVideoPacket *pkt = dynamic_cast<BleVideoPacket *> (BleAVQueue::instance()->find_unencoded_video()); BleAssert(pkt != NULL); MStream &body = pkt->data; unsigned char frameType; if (IS_X264_TYPE_I(picOut.i_type)) { frameType = 0x17; } else { frameType = 0x27; } body.write1Bytes(frameType); body.write1Bytes(0x01); body.write3Bytes((int)timeOffset); // NALU payload : 4bytes size + payload // NALU payload size : 4bytes size + payload size // for b_repeat_headers = 0 in x264_param_t // so NALU type is only IDR, SLICE(P or B frame) // so you must get SPS PPS before encoding any frame. for (int i = 0; i < nalNum; ++i) { x264_nal_t &nal = nalOut[i]; body.writeString((char*)nal.p_payload, nal.i_payload); } if (IS_X264_TYPE_I(picOut.i_type)) { log_trace("I"); } else if (IS_X264_TYPE_B(picOut.i_type)) { log_trace("B"); } else { log_trace("P"); } BleAVQueue::instance()->update_packet(pkt); return 0; }