static block_t * ParseAUTail(decoder_t *p_dec, uint8_t i_nal_type, block_t *p_nalb) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_ret = NULL; block_ChainLastAppend(&p_sys->post.pp_chain_last, p_nalb); switch(i_nal_type) { case HEVC_NAL_EOS: case HEVC_NAL_EOB: p_ret = OutputQueues(p_sys, true); break; case HEVC_NAL_SUFF_SEI: HxxxParse_AnnexB_SEI( p_nalb->p_buffer, p_nalb->i_buffer, 2 /* nal header */, ParseSEICallback, p_dec ); break; } if(!p_ret && p_sys->frame.p_chain == NULL) p_ret = OutputQueues(p_sys, false); return p_ret; }
static block_t * OutputQueues(decoder_sys_t *p_sys, bool b_valid) { block_t *p_output = NULL; block_t **pp_output_last = &p_output; uint32_t i_flags = 0; /* Because block_ChainGather does not merge flags or times */ if(p_sys->pre.p_chain) { i_flags |= p_sys->pre.p_chain->i_flags; block_ChainLastAppend(&pp_output_last, p_sys->pre.p_chain); INITQ(pre); } if(p_sys->frame.p_chain) { i_flags |= p_sys->frame.p_chain->i_flags; if(p_output && p_output->i_dts == 0) { p_output->i_dts = p_sys->frame.p_chain->i_dts; p_output->i_pts = p_sys->frame.p_chain->i_pts; } block_ChainLastAppend(&pp_output_last, p_sys->frame.p_chain); INITQ(frame); } if(p_sys->post.p_chain) { i_flags |= p_sys->post.p_chain->i_flags; block_ChainLastAppend(&pp_output_last, p_sys->post.p_chain); INITQ(post); } if(p_output) { p_output->i_flags |= i_flags; if(!b_valid) p_output->i_flags |= BLOCK_FLAG_CORRUPTED; } return p_output; }
static void OutputSend( sout_stream_t *p_stream, sout_stream_id_t *id, block_t *p_block ) { sout_stream_sys_t *p_sys = p_stream->p_sys; if( id->id ) { /* We wait until the first key frame (if needed) and * to be beyong i_dts_start (for stream without key frame) */ if( id->b_wait_key ) { if( p_block->i_flags & BLOCK_FLAG_TYPE_I ) { id->b_wait_key = false; id->b_wait_start = false; } if( ( p_block->i_flags & BLOCK_FLAG_TYPE_MASK ) == 0 ) id->b_wait_key = false; } if( id->b_wait_start ) { if( p_block->i_dts >=p_sys->i_dts_start ) id->b_wait_start = false; } if( id->b_wait_key || id->b_wait_start ) block_ChainRelease( p_block ); else sout_StreamIdSend( p_sys->p_out, id->id, p_block ); } else if( p_sys->b_drop ) { block_ChainRelease( p_block ); } else { size_t i_size; block_ChainProperties( p_block, NULL, &i_size, NULL ); p_sys->i_size += i_size; block_ChainLastAppend( &id->pp_last, p_block ); } }
static block_t * ParseAUTail(decoder_t *p_dec, uint8_t i_nal_type, block_t *p_nalb) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_ret = NULL; block_ChainLastAppend(&p_sys->post.pp_chain_last, p_nalb); switch(i_nal_type) { case HEVC_NAL_EOS: case HEVC_NAL_EOB: p_ret = OutputQueues(p_sys, true); break; } if(!p_ret && p_sys->frame.p_chain == NULL) p_ret = OutputQueues(p_sys, false); return p_ret; }
static block_t * ParseAUHead(decoder_t *p_dec, uint8_t i_nal_type, block_t *p_nalb) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_ret = NULL; if(p_sys->post.p_chain || p_sys->frame.p_chain) p_ret = OutputQueues(p_sys, true); switch(i_nal_type) { case HEVC_NAL_AUD: if(!p_ret && p_sys->pre.p_chain) p_ret = OutputQueues(p_sys, true); break; case HEVC_NAL_VPS: case HEVC_NAL_SPS: case HEVC_NAL_PPS: { uint8_t i_id; if( hevc_get_xps_id(p_nalb->p_buffer, p_nalb->i_buffer, &i_id) && InsertXPS(p_dec, i_nal_type, i_id, p_nalb) ) { const hevc_sequence_parameter_set_t *p_sps; if( i_nal_type == HEVC_NAL_SPS && (p_sps = p_dec->p_sys->rgi_p_decsps[i_id]) ) { if(!p_dec->fmt_out.video.i_frame_rate) { (void) hevc_get_frame_rate( p_sps, p_dec->p_sys->rgi_p_decvps, &p_dec->fmt_out.video.i_frame_rate, &p_dec->fmt_out.video.i_frame_rate_base ); } if(p_dec->fmt_out.video.primaries == COLOR_PRIMARIES_UNDEF) { (void) hevc_get_colorimetry( p_sps, &p_dec->fmt_out.video.primaries, &p_dec->fmt_out.video.transfer, &p_dec->fmt_out.video.space, &p_dec->fmt_out.video.b_color_range_full); } unsigned sizes[4]; if( hevc_get_picture_size( p_sps, &sizes[0], &sizes[1], &sizes[2], &sizes[3] ) ) { if( p_dec->fmt_out.video.i_width != sizes[0] || p_dec->fmt_out.video.i_height != sizes[1] ) { p_dec->fmt_out.video.i_width = sizes[0]; p_dec->fmt_out.video.i_height = sizes[1]; } } if(p_dec->fmt_out.i_profile == -1) { uint8_t i_profile, i_level; if( hevc_get_sps_profile_tier_level( p_sps, &i_profile, &i_level ) ) { p_dec->fmt_out.i_profile = i_profile; p_dec->fmt_out.i_level = i_level; } } } } break; } case HEVC_NAL_PREF_SEI: HxxxParse_AnnexB_SEI( p_nalb->p_buffer, p_nalb->i_buffer, 2 /* nal header */, ParseSEICallback, p_dec ); break; default: break; } block_ChainLastAppend(&p_sys->pre.pp_chain_last, p_nalb); return p_ret; }
static block_t *ParseVCL(decoder_t *p_dec, uint8_t i_nal_type, block_t *p_frag) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_outputchain = NULL; const uint8_t *p_buffer = p_frag->p_buffer; size_t i_buffer = p_frag->i_buffer; if(unlikely(!hxxx_strip_AnnexB_startcode(&p_buffer, &i_buffer) || i_buffer < 3)) { block_ChainLastAppend(&p_sys->frame.pp_chain_last, p_frag); /* might be corrupted */ return NULL; } const uint8_t i_layer = hevc_getNALLayer( p_buffer ); bool b_first_slice_in_pic = p_buffer[2] & 0x80; if (b_first_slice_in_pic) { if(p_sys->frame.p_chain) { /* Starting new frame: return previous frame data for output */ p_outputchain = OutputQueues(p_sys, p_sys->b_init_sequence_complete); } switch(i_nal_type) { case HEVC_NAL_BLA_W_LP: case HEVC_NAL_BLA_W_RADL: case HEVC_NAL_BLA_N_LP: case HEVC_NAL_IDR_W_RADL: case HEVC_NAL_IDR_N_LP: case HEVC_NAL_CRA: p_frag->i_flags |= BLOCK_FLAG_TYPE_I; break; default: { hevc_slice_segment_header_t *p_sli = hevc_decode_slice_header( p_buffer, i_buffer, true, p_sys->rgi_p_decsps, p_sys->rgi_p_decpps ); if( p_sli ) { enum hevc_slice_type_e type; if( hevc_get_slice_type( p_sli, &type ) ) { if( type == HEVC_SLICE_TYPE_P ) p_frag->i_flags |= BLOCK_FLAG_TYPE_P; else p_frag->i_flags |= BLOCK_FLAG_TYPE_B; } hevc_rbsp_release_slice_header( p_sli ); } else p_frag->i_flags |= BLOCK_FLAG_TYPE_B; } break; } } if(!p_sys->b_init_sequence_complete && i_layer == 0 && (p_frag->i_flags & BLOCK_FLAG_TYPE_I) && XPSReady(p_sys)) { p_sys->b_init_sequence_complete = true; } if( !p_sys->b_init_sequence_complete ) cc_storage_reset( p_sys->p_ccs ); block_ChainLastAppend(&p_sys->frame.pp_chain_last, p_frag); return p_outputchain; }
/***************************************************************************** * ParseMPEGBlock: Re-assemble fragments into a block containing a picture *****************************************************************************/ static block_t *ParseMPEGBlock( decoder_t *p_dec, block_t *p_frag ) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_pic = NULL; /* * Check if previous picture is finished */ if( ( p_sys->b_frame_slice && (p_frag->p_buffer[3] == 0x00 || p_frag->p_buffer[3] > 0xaf) ) && p_sys->p_seq == NULL ) { /* We have a picture but without a sequence header we can't * do anything */ msg_Dbg( p_dec, "waiting for sequence start" ); if( p_sys->p_frame ) block_ChainRelease( p_sys->p_frame ); p_sys->p_frame = NULL; p_sys->pp_last = &p_sys->p_frame; p_sys->b_frame_slice = false; } else if( p_sys->b_frame_slice && (p_frag->p_buffer[3] == 0x00 || p_frag->p_buffer[3] > 0xaf) ) { const bool b_eos = p_frag->p_buffer[3] == 0xb7; mtime_t i_duration; if( b_eos ) { block_ChainLastAppend( &p_sys->pp_last, p_frag ); p_frag = NULL; } p_pic = block_ChainGather( p_sys->p_frame ); if( b_eos ) p_pic->i_flags |= BLOCK_FLAG_END_OF_SEQUENCE; i_duration = (mtime_t)( 1000000 * p_sys->i_frame_rate_base / p_sys->i_frame_rate ); if( !p_sys->b_seq_progressive && p_sys->i_picture_structure != 0x03 ) { i_duration /= 2; } if( p_sys->b_seq_progressive ) { if( p_sys->i_top_field_first == 0 && p_sys->i_repeat_first_field == 1 ) { i_duration *= 2; } else if( p_sys->i_top_field_first == 1 && p_sys->i_repeat_first_field == 1 ) { i_duration *= 3; } } else { if( p_sys->i_picture_structure == 0x03 ) { if( p_sys->i_progressive_frame && p_sys->i_repeat_first_field ) { i_duration += i_duration / 2; } } } if( p_sys->b_low_delay || p_sys->i_picture_type == 0x03 ) { /* Trivial case (DTS == PTS) */ /* Correct interpolated dts when we receive a new pts/dts */ if( p_sys->i_pts > VLC_TS_INVALID ) p_sys->i_interpolated_dts = p_sys->i_pts; if( p_sys->i_dts > VLC_TS_INVALID ) p_sys->i_interpolated_dts = p_sys->i_dts; } else { /* Correct interpolated dts when we receive a new pts/dts */ if(p_sys->i_last_ref_pts > VLC_TS_INVALID && !p_sys->b_second_field) p_sys->i_interpolated_dts = p_sys->i_last_ref_pts; if( p_sys->i_dts > VLC_TS_INVALID ) p_sys->i_interpolated_dts = p_sys->i_dts; if( !p_sys->b_second_field ) p_sys->i_last_ref_pts = p_sys->i_pts; } p_pic->i_dts = p_sys->i_interpolated_dts; p_sys->i_interpolated_dts += i_duration; /* Set PTS only if we have a B frame or if it comes from the stream */ if( p_sys->i_pts > VLC_TS_INVALID ) { p_pic->i_pts = p_sys->i_pts; } else if( p_sys->i_picture_type == 0x03 ) { p_pic->i_pts = p_pic->i_dts; } else { p_pic->i_pts = VLC_TS_INVALID; } switch ( p_sys->i_picture_type ) { case 0x01: p_pic->i_flags |= BLOCK_FLAG_TYPE_I; break; case 0x02: p_pic->i_flags |= BLOCK_FLAG_TYPE_P; break; case 0x03: p_pic->i_flags |= BLOCK_FLAG_TYPE_B; break; } p_pic->i_length = p_sys->i_interpolated_dts - p_pic->i_dts; #if 0 msg_Dbg( p_dec, "pic: type=%d dts=%"PRId64" pts-dts=%"PRId64, p_sys->i_picture_type, p_pic->i_dts, p_pic->i_pts - p_pic->i_dts); #endif /* Reset context */ p_sys->p_frame = NULL; p_sys->pp_last = &p_sys->p_frame; p_sys->b_frame_slice = false; if( p_sys->i_picture_structure != 0x03 ) { p_sys->b_second_field = !p_sys->b_second_field; } else { p_sys->b_second_field = 0; } /* CC */ p_sys->b_cc_reset = true; p_sys->i_cc_pts = p_pic->i_pts; p_sys->i_cc_dts = p_pic->i_dts; p_sys->i_cc_flags = p_pic->i_flags; } if( !p_pic && p_sys->b_cc_reset ) { p_sys->b_cc_reset = false; cc_Flush( &p_sys->cc ); } if( !p_frag ) return p_pic; /* * Check info of current fragment */ if( p_frag->p_buffer[3] == 0xb8 ) { /* Group start code */ if( p_sys->p_seq && p_sys->i_seq_old > p_sys->i_frame_rate/p_sys->i_frame_rate_base ) { /* Useful for mpeg1: repeat sequence header every second */ block_ChainLastAppend( &p_sys->pp_last, block_Duplicate( p_sys->p_seq ) ); if( p_sys->p_ext ) { block_ChainLastAppend( &p_sys->pp_last, block_Duplicate( p_sys->p_ext ) ); } p_sys->i_seq_old = 0; } } else if( p_frag->p_buffer[3] == 0xb3 && p_frag->i_buffer >= 8 ) { /* Sequence header code */ static const int code_to_frame_rate[16][2] = { { 1, 1 }, /* invalid */ { 24000, 1001 }, { 24, 1 }, { 25, 1 }, { 30000, 1001 }, { 30, 1 }, { 50, 1 }, { 60000, 1001 }, { 60, 1 }, /* Unofficial 15fps from Xing*/ { 15, 1001 }, /* Unofficial economy rates from libmpeg3 */ { 5000, 1001 }, { 1000, 1001 }, { 12000, 1001 }, { 15000, 1001 }, { 1, 1 }, { 1, 1 } /* invalid */ }; if( p_sys->p_seq ) block_Release( p_sys->p_seq ); if( p_sys->p_ext ) block_Release( p_sys->p_ext ); p_sys->p_seq = block_Duplicate( p_frag ); p_sys->i_seq_old = 0; p_sys->p_ext = NULL; p_dec->fmt_out.video.i_width = ( p_frag->p_buffer[4] << 4)|(p_frag->p_buffer[5] >> 4 ); p_dec->fmt_out.video.i_height = ( (p_frag->p_buffer[5]&0x0f) << 8 )|p_frag->p_buffer[6]; p_sys->i_aspect_ratio_info = p_frag->p_buffer[7] >> 4; /* TODO: MPEG1 aspect ratio */ p_sys->i_frame_rate = code_to_frame_rate[p_frag->p_buffer[7]&0x0f][0]; p_sys->i_frame_rate_base = code_to_frame_rate[p_frag->p_buffer[7]&0x0f][1]; p_dec->fmt_out.video.i_frame_rate = p_sys->i_frame_rate; p_dec->fmt_out.video.i_frame_rate_base = p_sys->i_frame_rate_base; p_sys->b_seq_progressive = true; p_sys->b_low_delay = true; if ( !p_sys->b_inited ) { msg_Dbg( p_dec, "size %dx%d fps=%.3f", p_dec->fmt_out.video.i_width, p_dec->fmt_out.video.i_height, p_sys->i_frame_rate / (float)p_sys->i_frame_rate_base ); p_sys->b_inited = 1; } } else if( p_frag->p_buffer[3] == 0xb5 )
/***************************************************************************** * ParseNALBlock: parses annexB type NALs * All p_frag blocks are required to start with 0 0 0 1 4-byte startcode *****************************************************************************/ static block_t *ParseNALBlock( decoder_t *p_dec, bool *pb_ts_used, block_t *p_frag ) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_pic = NULL; bool b_new_picture = false; const int i_nal_type = p_frag->p_buffer[4]&0x1f; const mtime_t i_frag_dts = p_frag->i_dts; const mtime_t i_frag_pts = p_frag->i_pts; if( p_sys->b_slice && (!p_sys->p_active_pps || !p_sys->p_active_sps) ) { msg_Warn( p_dec, "waiting for SPS/PPS" ); /* Reset context */ p_sys->slice.type = H264_SLICE_TYPE_UNKNOWN; p_sys->b_slice = false; DropStoredNAL( p_sys ); /* From SEI */ p_sys->i_dpb_output_delay = 0; p_sys->i_pic_struct = UINT8_MAX; cc_storage_reset( p_sys->p_ccs ); } if( i_nal_type >= H264_NAL_SLICE && i_nal_type <= H264_NAL_SLICE_IDR ) { h264_slice_t newslice; if( i_nal_type == H264_NAL_SLICE_IDR ) { p_sys->b_recovered = true; p_sys->i_recovery_frame_cnt = UINT_MAX; p_sys->i_recoveryfnum = UINT_MAX; } if( ParseSliceHeader( p_dec, p_frag, &newslice ) ) { /* Only IDR carries the id, to be propagated */ if( newslice.i_idr_pic_id == -1 ) newslice.i_idr_pic_id = p_sys->slice.i_idr_pic_id; b_new_picture = IsFirstVCLNALUnit( &p_sys->slice, &newslice ); if( b_new_picture ) { /* Parse SEI for that frame now we should have matched SPS/PPS */ for( block_t *p_sei = p_sys->p_sei; p_sei; p_sei = p_sei->p_next ) { HxxxParse_AnnexB_SEI( p_sei->p_buffer, p_sei->i_buffer, 1 /* nal header */, ParseSeiCallback, p_dec ); } if( p_sys->b_slice ) p_pic = OutputPicture( p_dec ); } /* */ p_sys->slice = newslice; } else { p_sys->p_active_pps = NULL; /* Fragment will be discarded later on */ } p_sys->b_slice = true; } else if( i_nal_type == H264_NAL_SPS ) { if( p_sys->b_slice ) p_pic = OutputPicture( p_dec ); PutSPS( p_dec, p_frag ); p_sys->b_new_sps = true; /* Do not append the SPS because we will insert it on keyframes */ p_frag = NULL; } else if( i_nal_type == H264_NAL_PPS ) { if( p_sys->b_slice ) p_pic = OutputPicture( p_dec ); PutPPS( p_dec, p_frag ); p_sys->b_new_pps = true; /* Do not append the PPS because we will insert it on keyframes */ p_frag = NULL; } else if( i_nal_type == H264_NAL_SEI ) { if( p_sys->b_slice ) p_pic = OutputPicture( p_dec ); block_ChainLastAppend( &p_sys->pp_sei_last, p_frag ); p_frag = NULL; } else if( i_nal_type == H264_NAL_END_OF_SEQ || i_nal_type == H264_NAL_END_OF_STREAM ) { /* Early end of packetization */ block_ChainLastAppend( &p_sys->pp_sei_last, p_frag ); p_frag = NULL; /* important for still pictures/menus */ p_sys->i_next_block_flags |= BLOCK_FLAG_END_OF_SEQUENCE; if( p_sys->b_slice ) p_pic = OutputPicture( p_dec ); } else if( i_nal_type == H264_NAL_AU_DELIMITER || ( i_nal_type >= H264_NAL_PREFIX && i_nal_type <= H264_NAL_RESERVED_18 ) ) { if( p_sys->b_slice ) p_pic = OutputPicture( p_dec ); if( i_nal_type == H264_NAL_AU_DELIMITER ) { if( p_sys->p_frame && (p_sys->p_frame->i_flags & BLOCK_FLAG_PRIVATE_AUD) ) { block_Release( p_frag ); p_frag = NULL; } else { p_frag->i_flags |= BLOCK_FLAG_PRIVATE_AUD; } } } /* Append the block */ if( p_frag ) block_ChainLastAppend( &p_sys->pp_frame_last, p_frag ); *pb_ts_used = false; if( p_sys->i_frame_dts <= VLC_TS_INVALID && p_sys->i_frame_pts <= VLC_TS_INVALID && b_new_picture ) { p_sys->i_frame_dts = i_frag_dts; p_sys->i_frame_pts = i_frag_pts; *pb_ts_used = true; if( i_frag_dts > VLC_TS_INVALID ) date_Set( &p_sys->dts, i_frag_dts ); } if( p_pic && (p_pic->i_flags & BLOCK_FLAG_DROP) ) { block_Release( p_pic ); p_pic = NULL; } return p_pic; }
/*** * Encapsulation (packetization) suitable for all muxing standards * maps [DataUnit] -> EncapsulationUnit */ static block_t *dirac_BuildEncapsulationUnit( decoder_t *p_dec, block_t *p_block ) { decoder_sys_t *p_sys = p_dec->p_sys; assert(p_block->i_buffer >= 13 && 0x42424344 == GetDWBE( p_block->p_buffer )); if( p_sys->i_eu_pts <= VLC_TS_INVALID && p_sys->i_eu_dts <= VLC_TS_INVALID ) { /* earliest block with pts/dts gets to set the pts/dts for the dated * encapsulation unit as a whole */ /* NB, the 'earliest block' criteria is aribtary */ if( p_block->i_pts > VLC_TS_INVALID || p_block->i_dts > VLC_TS_INVALID ) { p_sys->i_eu_pts = p_block->i_pts; p_sys->i_eu_dts = p_block->i_dts; } } /* inpectdataunit also updates flags for the EU. * - if this is the first block in the EU, then it hasn't been added * to the chain yet (so, p_block will become the front of the chain * - otherwise, use the flags of the chain (first block) */ block_t *p_eu = p_sys->p_eu ? p_sys->p_eu : p_block; int i_block = dirac_InspectDataUnit( p_dec, &p_block, p_eu); if( !p_block ) { /* block has been discarded during inspection */ /* becareful, don't discard anything that is dated, * as this needs to go into the timegen loop. set * the DIRAC_DISCARD block flag, and it'll be dropped * at output time */ return NULL; } block_ChainLastAppend( &p_sys->pp_eu_last, p_block ); dirac_block_encap_t *p_dbe = dirac_GetBlockEncap( p_block ); #ifdef SANITIZE_PREV_PARSE_OFFSET /* fixup prev_parse_offset to point to the last data unit * to arrive */ if( p_dbe ) { SetDWBE( p_block->p_buffer + 9, p_sys->u_eu_last_npo ); p_sys->u_eu_last_npo = p_dbe->u_last_next_offset; } #endif if( i_block != DIRAC_DU_ENDS_EU ) { /* encapsulation unit not ended */ return NULL; } /* gather up encapsulation unit, reassociating the final * private state with the gathered block */ block_t *p_eu_last = (block_t*) p_sys->pp_eu_last - offsetof( block_t, p_next ); p_dbe = dirac_RemoveBlockEncap( p_eu_last ); uint8_t u_parse_code = p_block->p_buffer[4]; /* gather up the encapsulation unit */ p_block = block_ChainGather( p_sys->p_eu ); assert( p_block ); /* block_ChainGather doesn't define when it frees chain */ p_block->i_flags |= DIRAC_NON_DATED; if( p_dbe ) { dirac_AddBlockEncap( &p_block, p_dbe ); if( dirac_isPicture( u_parse_code ) ) p_block->i_flags &= ~DIRAC_NON_DATED; } p_sys->p_eu = NULL; p_sys->pp_eu_last = &p_sys->p_eu; return p_block; }
/***************************************************************************** * Packetize: form dated encapsulation units from anything *****************************************************************************/ static block_t *Packetize( decoder_t *p_dec, block_t **pp_block ) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_block = NULL; int i_flushing = 0; if( pp_block && *pp_block ) { p_block = *pp_block; *pp_block = NULL; if( p_block->i_flags & BLOCK_FLAG_DISCONTINUITY ) { /* pre-emptively insert an EOS at a discontinuity, protects * any decoders from any sudden changes */ block_Release( p_block ); p_block = dirac_EmitEOS( p_dec, 0 ); if( p_block ) { p_block->p_next = dirac_EmitEOS( p_dec, 13 ); /* need two EOS to ensure it gets detected by synchro * duplicates get discarded in forming encapsulation unit */ } } else if( p_block->i_flags & BLOCK_FLAG_CORRUPTED ) { /* silently discard corruption sentinels, * synchronizer will then discard affected data units. * do not produce an EOS data unit as this is very * disruptive to the stream (and may make a larger error). */ block_Release( p_block ); p_block = NULL; } if( p_block ) block_BytestreamPush( &p_sys->bytestream, p_block ); } /* form as many encapsulation units as possible, give up * when the synchronizer runs out of input data */ while( ( p_block = dirac_DoSync( p_dec ) ) ) { p_block = dirac_BuildEncapsulationUnit( p_dec, p_block ); if( !p_block ) continue; /* add to tail of output queue (ie, not reordered) */ block_ChainLastAppend( &p_sys->pp_outqueue_last, p_block ); /* insert encapsulation unit into timestamp generator * which then calculates some timestamps if required */ i_flushing = dirac_TimeGenPush( p_dec, p_block ); if( i_flushing ) break; } block_t *p_output = NULL; block_t **pp_output = &p_output; /* extract all the dated packets from the head of the output queue */ /* explicitly nondated packets repeat the previous timestamps to * stop vlc discarding them */ while( (p_block = p_sys->p_outqueue) ) { if( p_block->i_flags & DIRAC_DISCARD ) { p_sys->p_outqueue = p_block->p_next; p_block->p_next = NULL; block_Release( p_block ); continue; } if( i_flushing || p_block->i_flags & DIRAC_NON_DATED ) { p_block->i_dts = p_sys->i_dts_last_out; p_block->i_pts = p_sys->i_pts_last_out; } else if( p_block->i_pts <= VLC_TS_INVALID ) break; else if( p_block->i_dts <= VLC_TS_INVALID ) break; p_sys->i_dts_last_out = p_block->i_dts; p_sys->i_pts_last_out = p_block->i_pts; p_sys->p_outqueue = p_block->p_next; p_block->p_next = NULL; /* clear any flags we set */ p_block->i_flags &= ~BLOCK_FLAG_PRIVATE_MASK; block_ChainLastAppend( &pp_output, p_block ); mtime_t i_delay = p_block->i_pts - p_block->i_dts; if( i_delay < 0 ) msg_Err( p_dec, "pts - dts is negative(%"PRId64"): incorrect RoB size", i_delay ); } if( i_flushing ) { p_sys->i_eu_dts = p_sys->i_eu_pts = VLC_TS_INVALID; /* reset timegen state (except synchronizer) */ p_sys->b_seen_seq_hdr = false; if( i_flushing < 2 ) { /* this state isn't safe to loose if there was * an unsignalled discontinuity */ p_sys->b_pts = p_sys->b_dts = false; } p_sys->b_tg_last_picnum = false; dirac_ReorderInit( &p_sys->reorder_buf ); assert( p_sys->p_outqueue == NULL ); p_sys->p_out_dts = NULL; } /* perform sanity check: * if there were a block at the front of outqueue that never * satisfied the extraction criteria, but all blocks after did, * the output queue would grow bounded by the stream length. * If there are 10 data units in the output queue, assume this * has happened and purge all blocks that fail extraction criteria */ int i_count; block_ChainProperties( p_sys->p_outqueue, &i_count, NULL, NULL ); if( i_count > 9 ) { p_block = p_sys->p_outqueue; while( p_block ) { block_t *p_block_next = p_block->p_next; if( p_block->i_pts > VLC_TS_INVALID && p_block->i_dts > VLC_TS_INVALID ) break; block_Release( p_block ); p_sys->p_outqueue = p_block = p_block_next; } } if( !p_sys->p_outqueue ) { p_sys->pp_outqueue_last = &p_sys->p_outqueue; } return p_output; }
static block_t *OutputPicture( decoder_t *p_dec ) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_pic; if ( !p_sys->b_header && p_sys->i_recovery_frames != -1 ) { if( p_sys->i_recovery_frames == 0 ) { msg_Dbg( p_dec, "Recovery from SEI recovery point complete" ); p_sys->b_header = true; } --p_sys->i_recovery_frames; } if( !p_sys->b_header && p_sys->i_recovery_frames == -1 && p_sys->slice.i_frame_type != BLOCK_FLAG_TYPE_I) return NULL; const bool b_sps_pps_i = p_sys->slice.i_frame_type == BLOCK_FLAG_TYPE_I && p_sys->b_sps && p_sys->b_pps; if( b_sps_pps_i || p_sys->b_frame_sps || p_sys->b_frame_pps ) { block_t *p_head = NULL; if( p_sys->p_frame->i_flags & BLOCK_FLAG_PRIVATE_AUD ) { p_head = p_sys->p_frame; p_sys->p_frame = p_sys->p_frame->p_next; if( p_sys->p_frame == NULL ) p_sys->pp_frame_last = &p_sys->p_frame; p_head->p_next = NULL; } block_t *p_list = NULL; block_t **pp_list_tail = &p_list; for( int i = 0; i <= H264_SPS_ID_MAX && (b_sps_pps_i || p_sys->b_frame_sps); i++ ) { if( p_sys->pp_sps[i] ) block_ChainLastAppend( &pp_list_tail, block_Duplicate( p_sys->pp_sps[i] ) ); } for( int i = 0; i < H264_PPS_ID_MAX && (b_sps_pps_i || p_sys->b_frame_pps); i++ ) { if( p_sys->pp_pps[i] ) block_ChainLastAppend( &pp_list_tail, block_Duplicate( p_sys->pp_pps[i] ) ); } if( b_sps_pps_i && p_list ) p_sys->b_header = true; if( p_list ) block_ChainAppend( &p_head, p_list ); if( p_sys->p_frame ) block_ChainAppend( &p_head, p_sys->p_frame ); p_pic = block_ChainGather( p_head ); } else { p_pic = block_ChainGather( p_sys->p_frame ); } unsigned i_num_clock_ts = 2; if( p_sys->b_frame_mbs_only == 0 ) { if( p_sys->b_pic_struct_present_flag && p_sys->i_pic_struct < 9 ) { const uint8_t rgi_numclock[9] = { 1, 1, 1, 2, 2, 3, 3, 2, 3 }; i_num_clock_ts = rgi_numclock[ p_sys->i_pic_struct ]; } else if( p_sys->slice.i_field_pic_flag ) /* See D-1 and E-6 */ { i_num_clock_ts = 1; } } if( p_sys->i_time_scale && p_pic->i_length == 0 ) { p_pic->i_length = CLOCK_FREQ * i_num_clock_ts * p_sys->i_num_units_in_tick / p_sys->i_time_scale; } mtime_t i_field_pts_diff = -1; if( p_sys->b_frame_mbs_only == 0 && p_sys->b_pic_struct_present_flag ) { switch( p_sys->i_pic_struct ) { /* Top and Bottom field slices */ case 1: case 2: if( !p_sys->b_even_frame ) { p_pic->i_flags |= (p_sys->i_pic_struct == 1) ? BLOCK_FLAG_TOP_FIELD_FIRST : BLOCK_FLAG_BOTTOM_FIELD_FIRST; } else if( p_pic->i_pts <= VLC_TS_INVALID && p_sys->i_prev_pts > VLC_TS_INVALID && p_pic->i_length ) { /* interpolate from even frame */ i_field_pts_diff = p_pic->i_length; } p_sys->b_even_frame = !p_sys->b_even_frame; break; /* Each of the following slices contains multiple fields */ case 3: p_pic->i_flags |= BLOCK_FLAG_TOP_FIELD_FIRST; p_sys->b_even_frame = false; break; case 4: p_pic->i_flags |= BLOCK_FLAG_BOTTOM_FIELD_FIRST; p_sys->b_even_frame = false; break; case 5: p_pic->i_flags |= BLOCK_FLAG_TOP_FIELD_FIRST; break; case 6: p_pic->i_flags |= BLOCK_FLAG_BOTTOM_FIELD_FIRST; break; default: p_sys->b_even_frame = false; break; } } /* set dts/pts to current block timestamps */ p_pic->i_dts = p_sys->i_frame_dts; p_pic->i_pts = p_sys->i_frame_pts; /* Fixup missing timestamps after split (multiple AU/block)*/ if( p_pic->i_dts <= VLC_TS_INVALID ) p_pic->i_dts = p_sys->i_prev_dts; /* PTS Fixup, interlaced fields (multiple AU/block) */ if( p_pic->i_pts <= VLC_TS_INVALID && p_sys->i_time_scale ) { mtime_t i_pts_delay = CLOCK_FREQ * p_sys->i_dpb_output_delay * p_sys->i_num_units_in_tick / p_sys->i_time_scale; p_pic->i_pts = p_pic->i_dts + i_pts_delay; if( i_field_pts_diff >= 0 ) p_pic->i_pts += i_field_pts_diff; } /* save for next pic fixups */ p_sys->i_prev_dts = p_pic->i_dts; p_sys->i_prev_pts = p_pic->i_pts; p_pic->i_flags |= p_sys->slice.i_frame_type; p_pic->i_flags &= ~BLOCK_FLAG_PRIVATE_AUD; if( !p_sys->b_header ) p_pic->i_flags |= BLOCK_FLAG_PREROLL; /* reset after output */ p_sys->i_frame_dts = VLC_TS_INVALID; p_sys->i_frame_pts = VLC_TS_INVALID; p_sys->i_dpb_output_delay = 0; p_sys->slice.i_frame_type = 0; p_sys->p_frame = NULL; p_sys->pp_frame_last = &p_sys->p_frame; p_sys->b_frame_sps = false; p_sys->b_frame_pps = false; p_sys->b_slice = false; /* CC */ cc_storage_commit( p_sys->p_ccs, p_pic ); return p_pic; }
/***************************************************************************** * ParseNALBlock: parses annexB type NALs * All p_frag blocks are required to start with 0 0 0 1 4-byte startcode *****************************************************************************/ static block_t *ParseNALBlock( decoder_t *p_dec, bool *pb_ts_used, block_t *p_frag ) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_pic = NULL; bool b_new_picture = false; const int i_nal_ref_idc = (p_frag->p_buffer[4] >> 5)&0x03; const int i_nal_type = p_frag->p_buffer[4]&0x1f; const mtime_t i_frag_dts = p_frag->i_dts; const mtime_t i_frag_pts = p_frag->i_pts; if( p_sys->b_slice && ( !p_sys->b_sps || !p_sys->b_pps ) ) { block_ChainRelease( p_sys->p_frame ); msg_Warn( p_dec, "waiting for SPS/PPS" ); /* Reset context */ p_sys->slice.i_frame_type = 0; p_sys->p_frame = NULL; p_sys->pp_frame_last = &p_sys->p_frame; p_sys->b_frame_sps = false; p_sys->b_frame_pps = false; p_sys->b_slice = false; cc_storage_reset( p_sys->p_ccs ); } if( ( !p_sys->b_sps || !p_sys->b_pps ) && i_nal_type >= H264_NAL_SLICE && i_nal_type <= H264_NAL_SLICE_IDR ) { p_sys->b_slice = true; /* Fragment will be discarded later on */ } else if( i_nal_type >= H264_NAL_SLICE && i_nal_type <= H264_NAL_SLICE_IDR ) { slice_t slice; if(ParseSlice( p_dec, &b_new_picture, &slice, i_nal_ref_idc, i_nal_type, p_frag )) { /* */ if( b_new_picture && p_sys->b_slice ) p_pic = OutputPicture( p_dec ); /* */ p_sys->slice = slice; p_sys->b_slice = true; } } else if( i_nal_type == H264_NAL_SPS ) { if( p_sys->b_slice ) p_pic = OutputPicture( p_dec ); p_sys->b_frame_sps = true; PutSPS( p_dec, p_frag ); /* Do not append the SPS because we will insert it on keyframes */ p_frag = NULL; } else if( i_nal_type == H264_NAL_PPS ) { if( p_sys->b_slice ) p_pic = OutputPicture( p_dec ); p_sys->b_frame_pps = true; PutPPS( p_dec, p_frag ); /* Do not append the PPS because we will insert it on keyframes */ p_frag = NULL; } else if( i_nal_type == H264_NAL_AU_DELIMITER || i_nal_type == H264_NAL_SEI || ( i_nal_type >= H264_NAL_PREFIX && i_nal_type <= H264_NAL_RESERVED_18 ) ) { if( p_sys->b_slice ) p_pic = OutputPicture( p_dec ); /* Parse SEI for CC support */ if( i_nal_type == H264_NAL_SEI ) { HxxxParse_AnnexB_SEI( p_frag->p_buffer, p_frag->i_buffer, 1 /* nal header */, ParseSeiCallback, p_dec ); } else if( i_nal_type == H264_NAL_AU_DELIMITER ) { if( p_sys->p_frame && (p_sys->p_frame->i_flags & BLOCK_FLAG_PRIVATE_AUD) ) { block_Release( p_frag ); p_frag = NULL; } else { p_frag->i_flags |= BLOCK_FLAG_PRIVATE_AUD; } } } /* Append the block */ if( p_frag ) block_ChainLastAppend( &p_sys->pp_frame_last, p_frag ); *pb_ts_used = false; if( p_sys->i_frame_dts <= VLC_TS_INVALID && p_sys->i_frame_pts <= VLC_TS_INVALID && b_new_picture ) { p_sys->i_frame_dts = i_frag_dts; p_sys->i_frame_pts = i_frag_pts; *pb_ts_used = true; } return p_pic; }
/* ParseIDU: parse an Independent Decoding Unit */ static block_t *ParseIDU( decoder_t *p_dec, bool *pb_used_ts, block_t *p_frag ) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_pic; const idu_type_t idu = (const idu_type_t)(p_frag->p_buffer[3]); // sunqueen modify *pb_used_ts = false; if( !p_sys->b_sequence_header && idu != IDU_TYPE_SEQUENCE_HEADER ) { msg_Warn( p_dec, "waiting for sequence header" ); block_Release( p_frag ); return NULL; } if( p_sys->b_sequence_header && !p_sys->b_entry_point && idu != IDU_TYPE_ENTRY_POINT ) { msg_Warn( p_dec, "waiting for entry point" ); block_Release( p_frag ); return NULL; } /* TODO we do not gather ENTRY_POINT and SEQUENCE_DATA user data * But It should not be a problem for decoder */ /* Do we have completed a frame */ p_pic = NULL; if( p_sys->b_frame && idu != IDU_TYPE_FRAME_USER_DATA && idu != IDU_TYPE_FIELD && idu != IDU_TYPE_FIELD_USER_DATA && idu != IDU_TYPE_SLICE && idu != IDU_TYPE_SLICE_USER_DATA && idu != IDU_TYPE_END_OF_SEQUENCE ) { /* Prepend SH and EP on I */ if( p_sys->p_frame->i_flags & BLOCK_FLAG_TYPE_I ) { block_t *p_list = block_Duplicate( p_sys->sh.p_sh ); block_ChainAppend( &p_list, block_Duplicate( p_sys->ep.p_ep ) ); block_ChainAppend( &p_list, p_sys->p_frame ); p_list->i_flags = p_sys->p_frame->i_flags; p_sys->p_frame = p_list; } /* */ p_pic = block_ChainGather( p_sys->p_frame ); p_pic->i_dts = p_sys->i_frame_dts; p_pic->i_pts = p_sys->i_frame_pts; /* */ if( p_pic->i_dts > VLC_TS_INVALID ) p_sys->i_interpolated_dts = p_pic->i_dts; /* We can interpolate dts/pts only if we have a frame rate */ if( p_dec->fmt_out.video.i_frame_rate != 0 && p_dec->fmt_out.video.i_frame_rate_base != 0 ) { if( p_sys->i_interpolated_dts > VLC_TS_INVALID ) p_sys->i_interpolated_dts += INT64_C(1000000) * p_dec->fmt_out.video.i_frame_rate_base / p_dec->fmt_out.video.i_frame_rate; //msg_Dbg( p_dec, "-------------- XXX0 dts=%"PRId64" pts=%"PRId64" interpolated=%"PRId64, // p_pic->i_dts, p_pic->i_pts, p_sys->i_interpolated_dts ); if( p_pic->i_dts <= VLC_TS_INVALID ) p_pic->i_dts = p_sys->i_interpolated_dts; if( p_pic->i_pts <= VLC_TS_INVALID ) { if( !p_sys->sh.b_has_bframe || (p_pic->i_flags & BLOCK_FLAG_TYPE_B ) ) p_pic->i_pts = p_pic->i_dts; /* TODO compute pts for other case */ } } //msg_Dbg( p_dec, "-------------- dts=%"PRId64" pts=%"PRId64, p_pic->i_dts, p_pic->i_pts ); /* Reset context */ p_sys->b_frame = false; p_sys->i_frame_dts = VLC_TS_INVALID; p_sys->i_frame_pts = VLC_TS_INVALID; p_sys->p_frame = NULL; p_sys->pp_last = &p_sys->p_frame; } /* */ if( p_sys->i_frame_dts <= VLC_TS_INVALID && p_sys->i_frame_pts <= VLC_TS_INVALID ) { p_sys->i_frame_dts = p_frag->i_dts; p_sys->i_frame_pts = p_frag->i_pts; *pb_used_ts = true; } /* We will add back SH and EP on I frames */ block_t *p_release = NULL; if( idu != IDU_TYPE_SEQUENCE_HEADER && idu != IDU_TYPE_ENTRY_POINT ) block_ChainLastAppend( &p_sys->pp_last, p_frag ); else p_release = p_frag; /* Parse IDU */ if( idu == IDU_TYPE_SEQUENCE_HEADER ) { es_format_t *p_es = &p_dec->fmt_out; bs_t s; int i_profile; uint8_t ridu[32]; int i_ridu = sizeof(ridu); /* */ if( p_sys->sh.p_sh ) block_Release( p_sys->sh.p_sh ); p_sys->sh.p_sh = block_Duplicate( p_frag ); /* Extract the raw IDU */ DecodeRIDU( ridu, &i_ridu, &p_frag->p_buffer[4], p_frag->i_buffer - 4 ); /* Auto detect VC-1_SPMP_PESpacket_PayloadFormatHeader (SMPTE RP 227) for simple/main profile * TODO find a test case and valid it */ if( i_ridu > 4 && (ridu[0]&0x80) == 0 ) /* for advanced profile, the first bit is 1 */ { video_format_t *p_v = &p_dec->fmt_in.video; const size_t i_potential_width = GetWBE( &ridu[0] ); const size_t i_potential_height = GetWBE( &ridu[2] ); if( i_potential_width >= 2 && i_potential_width <= 8192 && i_potential_height >= 2 && i_potential_height <= 8192 ) { if( ( p_v->i_width <= 0 && p_v->i_height <= 0 ) || ( p_v->i_width == i_potential_width && p_v->i_height == i_potential_height ) ) { static const uint8_t startcode[4] = { 0x00, 0x00, 0x01, IDU_TYPE_SEQUENCE_HEADER }; p_es->video.i_width = i_potential_width; p_es->video.i_height = i_potential_height; /* Remove it */ p_frag->p_buffer += 4; p_frag->i_buffer -= 4; memcpy( p_frag->p_buffer, startcode, sizeof(startcode) ); } } } /* Parse it */ bs_init( &s, ridu, i_ridu ); i_profile = bs_read( &s, 2 ); if( i_profile == 3 ) { const int i_level = bs_read( &s, 3 ); /* Advanced profile */ p_sys->sh.b_advanced_profile = true; p_sys->sh.b_range_reduction = false; p_sys->sh.b_has_bframe = true; bs_skip( &s, 2+3+5+1 ); // chroma format + frame rate Q + bit rate Q + postprocflag p_es->video.i_width = 2*bs_read( &s, 12 )+2; p_es->video.i_height = 2*bs_read( &s, 12 )+2; if( !p_sys->b_sequence_header ) msg_Dbg( p_dec, "found sequence header for advanced profile level L%d resolution %dx%d", i_level, p_es->video.i_width, p_es->video.i_height); bs_skip( &s, 1 );// pulldown p_sys->sh.b_interlaced = bs_read( &s, 1 ); bs_skip( &s, 1 );// frame counter p_sys->sh.b_frame_interpolation = bs_read( &s, 1 ); bs_skip( &s, 1 ); // Reserved bs_skip( &s, 1 ); // Psf if( bs_read( &s, 1 ) ) /* Display extension */ { const int i_display_width = bs_read( &s, 14 )+1; const int i_display_height = bs_read( &s, 14 )+1; p_es->video.i_sar_num = i_display_width * p_es->video.i_height; p_es->video.i_sar_den = i_display_height * p_es->video.i_width; if( !p_sys->b_sequence_header ) msg_Dbg( p_dec, "display size %dx%d", i_display_width, i_display_height ); if( bs_read( &s, 1 ) ) /* Pixel aspect ratio (PAR/SAR) */ { static const int p_ar[16][2] = { { 0, 0}, { 1, 1}, {12,11}, {10,11}, {16,11}, {40,33}, {24,11}, {20,11}, {32,11}, {80,33}, {18,11}, {15,11}, {64,33}, {160,99},{ 0, 0}, { 0, 0} }; int i_ar = bs_read( &s, 4 ); unsigned i_ar_w, i_ar_h; if( i_ar == 15 ) { i_ar_w = bs_read( &s, 8 ); i_ar_h = bs_read( &s, 8 ); } else { i_ar_w = p_ar[i_ar][0]; i_ar_h = p_ar[i_ar][1]; } vlc_ureduce( &i_ar_w, &i_ar_h, i_ar_w, i_ar_h, 0 ); if( !p_sys->b_sequence_header ) msg_Dbg( p_dec, "aspect ratio %d:%d", i_ar_w, i_ar_h ); } } if( bs_read( &s, 1 ) ) /* Frame rate */ { int i_fps_num = 0; int i_fps_den = 0; if( bs_read( &s, 1 ) ) { i_fps_num = bs_read( &s, 16 )+1; i_fps_den = 32; } else { const int i_nr = bs_read( &s, 8 ); const int i_dn = bs_read( &s, 4 ); switch( i_nr ) { case 1: i_fps_num = 24000; break; case 2: i_fps_num = 25000; break; case 3: i_fps_num = 30000; break; case 4: i_fps_num = 50000; break; case 5: i_fps_num = 60000; break; case 6: i_fps_num = 48000; break; case 7: i_fps_num = 72000; break; } switch( i_dn ) { case 1: i_fps_den = 1000; break; case 2: i_fps_den = 1001; break; } } if( i_fps_num != 0 && i_fps_den != 0 ) vlc_ureduce( &p_es->video.i_frame_rate, &p_es->video.i_frame_rate_base, i_fps_num, i_fps_den, 0 ); if( !p_sys->b_sequence_header ) msg_Dbg( p_dec, "frame rate %d/%d", p_es->video.i_frame_rate, p_es->video.i_frame_rate_base ); } } else { /* Simple and main profile */ p_sys->sh.b_advanced_profile = false; p_sys->sh.b_interlaced = false; if( !p_sys->b_sequence_header ) msg_Dbg( p_dec, "found sequence header for %s profile", i_profile == 0 ? "simple" : "main" ); bs_skip( &s, 2+3+5+1+1+ // reserved + frame rate Q + bit rate Q + loop filter + reserved 1+1+1+1+2+ // multiresolution + reserved + fast uv mc + extended mv + dquant 1+1+1+1 ); // variable size transform + reserved + overlap + sync marker p_sys->sh.b_range_reduction = bs_read( &s, 1 ); if( bs_read( &s, 3 ) > 0 ) p_sys->sh.b_has_bframe = true; else p_sys->sh.b_has_bframe = false; bs_skip( &s, 2 ); // quantizer p_sys->sh.b_frame_interpolation = bs_read( &s, 1 ); } p_sys->b_sequence_header = true; BuildExtraData( p_dec ); } else if( idu == IDU_TYPE_ENTRY_POINT ) { if( p_sys->ep.p_ep ) block_Release( p_sys->ep.p_ep ); p_sys->ep.p_ep = block_Duplicate( p_frag ); if( !p_sys->b_entry_point ) msg_Dbg( p_dec, "found entry point" ); p_sys->b_entry_point = true; BuildExtraData( p_dec ); } else if( idu == IDU_TYPE_FRAME ) { bs_t s; uint8_t ridu[8]; int i_ridu = sizeof(ridu); /* Extract the raw IDU */ DecodeRIDU( ridu, &i_ridu, &p_frag->p_buffer[4], p_frag->i_buffer - 4 ); /* Parse it + interpolate pts/dts if possible */ bs_init( &s, ridu, i_ridu ); if( p_sys->sh.b_advanced_profile ) { int i_fcm = 0; if( p_sys->sh.b_interlaced ) { if( bs_read( &s, 1 ) ) { if( bs_read( &s, 1 ) ) i_fcm = 1; /* interlaced field */ else i_fcm = 2; /* interlaced frame */ } } if( i_fcm == 1 ) /*interlaced field */ { /* XXX for mixed I/P we should check reference usage before marking them I (too much work) */ switch( bs_read( &s, 3 ) ) { case 0: /* II */ case 1: /* IP */ case 2: /* PI */ p_sys->p_frame->i_flags |= BLOCK_FLAG_TYPE_I; p_sys->p_frame->i_flags |= BLOCK_FLAG_TYPE_I; p_sys->p_frame->i_flags |= BLOCK_FLAG_TYPE_I; break; case 3: /* PP */ p_sys->p_frame->i_flags |= BLOCK_FLAG_TYPE_P; break; case 4: /* BB */ case 5: /* BBi */ case 6: /* BiB */ case 7: /* BiBi */ p_sys->p_frame->i_flags |= BLOCK_FLAG_TYPE_B; break; } } else { if( !bs_read( &s, 1 ) ) p_sys->p_frame->i_flags |= BLOCK_FLAG_TYPE_P; else if( !bs_read( &s, 1 ) ) p_sys->p_frame->i_flags |= BLOCK_FLAG_TYPE_B; else if( !bs_read( &s, 1 ) ) p_sys->p_frame->i_flags |= BLOCK_FLAG_TYPE_I; else if( !bs_read( &s, 1 ) ) p_sys->p_frame->i_flags |= BLOCK_FLAG_TYPE_B; /* Bi */ else p_sys->p_frame->i_flags |= BLOCK_FLAG_TYPE_P; /* P Skip */ } } else { if( p_sys->sh.b_frame_interpolation ) bs_skip( &s, 1 ); // interpolate bs_skip( &s, 2 ); // frame count if( p_sys->sh.b_range_reduction ) bs_skip( &s, 1 ); // range reduction if( bs_read( &s, 1 ) ) p_sys->p_frame->i_flags |= BLOCK_FLAG_TYPE_P; else if( !p_sys->sh.b_has_bframe || bs_read( &s, 1 ) ) p_sys->p_frame->i_flags |= BLOCK_FLAG_TYPE_I; else p_sys->p_frame->i_flags |= BLOCK_FLAG_TYPE_B; } p_sys->b_frame = true; } if( p_release ) block_Release( p_release ); return p_pic; }
static block_t * ts_sections_assembler_Append( ts_sections_assembler_t *p_as, block_t *p_content ) { const bool b_short = !( p_content->p_buffer[1] & 0x80 ); const uint16_t i_private_length = ((p_content->p_buffer[1] & 0x0f) << 8) | p_content->p_buffer[2]; if( b_short ) { /* Short, unsegmented section */ if(unlikely(( i_private_length > 0xFFD || i_private_length > p_content->i_buffer - 3 ))) { block_Release( p_content ); return NULL; } if( !p_as->b_raw ) { p_content->p_buffer += 3; p_content->i_buffer = i_private_length; } return p_content; } else { /* Payload can span on multiple sections */ if (unlikely( p_content->i_buffer < (size_t)12 + i_private_length)) { block_Release( p_content ); return NULL; } /* TODO: CRC32 */ const uint8_t i_version = ( p_content->p_buffer[5] & 0x3F ) >> 1; const uint8_t i_current = p_content->p_buffer[5] & 0x01; const uint8_t i_section = p_content->p_buffer[6]; const uint8_t i_section_last = p_content->p_buffer[7]; if( !p_as->b_raw ) { p_content->p_buffer += 3 + 5; p_content->i_buffer = i_private_length - 4; } if( !i_current || ( p_as->i_version != -1 && i_version != p_as->i_version ) || /* Only merge same version */ p_as->i_prev_version == i_version || /* No duplicates */ i_section > i_section_last ) { block_Release( p_content ); return NULL; } if( i_section != p_as->i_prev_section + 1 ) /* first or unfinished sections gathering */ { ts_sections_assembler_Reset( p_as, false ); if( i_section > 0 || i_version == p_as->i_prev_version ) { block_Release( p_content ); return NULL; } } p_as->i_version = i_version; p_as->i_prev_section = i_section; /* Add one more section */ block_ChainLastAppend( &p_as->pp_sections_tail, p_content ); /* We finished gathering our sections */ if( i_section == i_section_last ) { block_t *p_all_sections = block_ChainGather( p_as->p_sections ); p_as->p_sections = NULL; p_as->pp_sections_tail = &p_as->p_sections; p_as->i_prev_version = i_version; ts_sections_assembler_Reset( p_as, false ); return p_all_sections; } } return NULL; }
static block_t *OutputPicture( decoder_t *p_dec ) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_pic = NULL; block_t **pp_pic_last = &p_pic; if( unlikely(!p_sys->p_frame) ) { assert( p_sys->p_frame ); return NULL; } /* Bind matched/referred PPS and SPS */ const h264_picture_parameter_set_t *p_pps = p_sys->p_active_pps; const h264_sequence_parameter_set_t *p_sps = p_sys->p_active_sps; if( !p_pps || !p_sps ) { DropStoredNAL( p_sys ); return NULL; } if( !p_sys->b_recovered && p_sys->i_recoveryfnum == UINT_MAX && p_sys->i_recovery_frame_cnt == UINT_MAX && p_sys->slice.type == H264_SLICE_TYPE_I ) { /* No way to recover using SEI, just sync on I Slice */ p_sys->b_recovered = true; } bool b_need_sps_pps = p_sys->slice.type == H264_SLICE_TYPE_I && p_sys->p_active_pps && p_sys->p_active_sps; /* Handle SEI recovery */ if ( !p_sys->b_recovered && p_sys->i_recovery_frame_cnt != UINT_MAX && p_sys->i_recoveryfnum == UINT_MAX ) { p_sys->i_recoveryfnum = p_sys->slice.i_frame_num + p_sys->i_recovery_frame_cnt; b_need_sps_pps = true; /* SPS/PPS must be inserted for SEI recovery */ msg_Dbg( p_dec, "Recovering using SEI, prerolling %u reference pics", p_sys->i_recovery_frame_cnt ); } if( p_sys->i_recoveryfnum != UINT_MAX ) { assert(p_sys->b_recovered == false); const unsigned maxFrameNum = 1 << (p_sps->i_log2_max_frame_num + 4); if( (p_sys->i_recoveryfnum > maxFrameNum && (unsigned)p_sys->slice.i_frame_num <= maxFrameNum / 2 && (unsigned)p_sys->slice.i_frame_num >= p_sys->i_recoveryfnum % maxFrameNum ) || (unsigned)p_sys->slice.i_frame_num >= p_sys->i_recoveryfnum ) { p_sys->i_recoveryfnum = UINT_MAX; p_sys->b_recovered = true; msg_Dbg( p_dec, "Recovery from SEI recovery point complete" ); } } /* Gather PPS/SPS if required */ block_t *p_xpsnal = NULL; block_t **pp_xpsnal_tail = &p_xpsnal; if( b_need_sps_pps || p_sys->b_new_sps || p_sys->b_new_pps ) { for( int i = 0; i <= H264_SPS_ID_MAX && (b_need_sps_pps || p_sys->b_new_sps); i++ ) { if( p_sys->sps[i].p_block ) block_ChainLastAppend( &pp_xpsnal_tail, block_Duplicate( p_sys->sps[i].p_block ) ); } for( int i = 0; i < H264_PPS_ID_MAX && (b_need_sps_pps || p_sys->b_new_pps); i++ ) { if( p_sys->pps[i].p_block ) block_ChainLastAppend( &pp_xpsnal_tail, block_Duplicate( p_sys->pps[i].p_block ) ); } } /* Now rebuild NAL Sequence, inserting PPS/SPS if any */ if( p_sys->p_frame->i_flags & BLOCK_FLAG_PRIVATE_AUD ) { block_t *p_au = p_sys->p_frame; p_sys->p_frame = p_au->p_next; p_au->p_next = NULL; p_au->i_flags &= ~BLOCK_FLAG_PRIVATE_AUD; block_ChainLastAppend( &pp_pic_last, p_au ); } if( p_xpsnal ) block_ChainLastAppend( &pp_pic_last, p_xpsnal ); if( p_sys->p_sei ) block_ChainLastAppend( &pp_pic_last, p_sys->p_sei ); assert( p_sys->p_frame ); if( p_sys->p_frame ) block_ChainLastAppend( &pp_pic_last, p_sys->p_frame ); /* Reset chains, now empty */ p_sys->p_frame = NULL; p_sys->pp_frame_last = &p_sys->p_frame; p_sys->p_sei = NULL; p_sys->pp_sei_last = &p_sys->p_sei; p_pic = block_ChainGather( p_pic ); if( !p_pic ) return NULL; /* for PTS Fixup, interlaced fields (multiple AU/block) */ int tFOC = 0, bFOC = 0, PictureOrderCount = 0; h264_compute_poc( p_sps, &p_sys->slice, &p_sys->pocctx, &PictureOrderCount, &tFOC, &bFOC ); unsigned i_num_clock_ts = h264_get_num_ts( p_sps, &p_sys->slice, p_sys->i_pic_struct, tFOC, bFOC ); if( p_sps->frame_mbs_only_flag == 0 && p_sps->vui.b_pic_struct_present_flag ) { switch( p_sys->i_pic_struct ) { /* Top and Bottom field slices */ case 1: case 2: p_pic->i_flags |= BLOCK_FLAG_SINGLE_FIELD; p_pic->i_flags |= (!p_sys->slice.i_bottom_field_flag) ? BLOCK_FLAG_TOP_FIELD_FIRST : BLOCK_FLAG_BOTTOM_FIELD_FIRST; break; /* Each of the following slices contains multiple fields */ case 3: p_pic->i_flags |= BLOCK_FLAG_TOP_FIELD_FIRST; break; case 4: p_pic->i_flags |= BLOCK_FLAG_BOTTOM_FIELD_FIRST; break; case 5: p_pic->i_flags |= BLOCK_FLAG_TOP_FIELD_FIRST; break; case 6: p_pic->i_flags |= BLOCK_FLAG_BOTTOM_FIELD_FIRST; break; default: break; } } /* set dts/pts to current block timestamps */ p_pic->i_dts = p_sys->i_frame_dts; p_pic->i_pts = p_sys->i_frame_pts; /* Fixup missing timestamps after split (multiple AU/block)*/ if( p_pic->i_dts <= VLC_TS_INVALID ) p_pic->i_dts = date_Get( &p_sys->dts ); if( p_sys->slice.type == H264_SLICE_TYPE_I ) p_sys->prevdatedpoc.pts = VLC_TS_INVALID; if( p_pic->i_pts == VLC_TS_INVALID ) { if( p_sys->prevdatedpoc.pts > VLC_TS_INVALID && date_Get( &p_sys->dts ) != VLC_TS_INVALID ) { date_t pts = p_sys->dts; date_Set( &pts, p_sys->prevdatedpoc.pts ); int diff = tFOC - p_sys->prevdatedpoc.num; if( diff > 0 ) date_Increment( &pts, diff ); else date_Decrement( &pts, -diff ); p_pic->i_pts = date_Get( &pts ); } /* In case there's no PTS at all */ else if( p_sys->slice.i_nal_ref_idc == 0 && p_sys->slice.type == H264_SLICE_TYPE_B ) { p_pic->i_pts = p_pic->i_dts; } else if( p_sys->slice.type == H264_SLICE_TYPE_I && date_Get( &p_sys->dts ) != VLC_TS_INVALID ) { /* Hell no PTS on IDR. We're totally blind */ date_t pts = p_sys->dts; date_Increment( &pts, 2 ); p_pic->i_pts = date_Get( &pts ); } } if( p_pic->i_pts > VLC_TS_INVALID ) { p_sys->prevdatedpoc.pts = p_pic->i_pts; p_sys->prevdatedpoc.num = PictureOrderCount; } if( p_pic->i_length == 0 ) { if( p_sps->vui.i_time_scale ) { p_pic->i_length = CLOCK_FREQ * i_num_clock_ts * p_sps->vui.i_num_units_in_tick / p_sps->vui.i_time_scale; } else { date_t next = p_sys->dts; date_Increment( &next, i_num_clock_ts ); p_pic->i_length = date_Get( &next ) - date_Get( &p_sys->dts ); } } #if 0 msg_Err(p_dec, "F/BOC %d/%d POC %d %d rec %d flags %x ref%d fn %d fp %d %d pts %ld len %ld", tFOC, bFOC, PictureOrderCount, p_sys->slice.type, p_sys->b_recovered, p_pic->i_flags, p_sys->slice.i_nal_ref_idc, p_sys->slice.i_frame_num, p_sys->slice.i_field_pic_flag, p_pic->i_pts - p_pic->i_dts, p_pic->i_pts % (100*CLOCK_FREQ), p_pic->i_length); #endif /* save for next pic fixups */ if( date_Get( &p_sys->dts ) != VLC_TS_INVALID ) { if( p_sys->i_next_block_flags & BLOCK_FLAG_DISCONTINUITY ) date_Set( &p_sys->dts, VLC_TS_INVALID ); else date_Increment( &p_sys->dts, i_num_clock_ts ); } if( p_pic ) { p_pic->i_flags |= p_sys->i_next_block_flags; p_sys->i_next_block_flags = 0; } switch( p_sys->slice.type ) { case H264_SLICE_TYPE_P: p_pic->i_flags |= BLOCK_FLAG_TYPE_P; break; case H264_SLICE_TYPE_B: p_pic->i_flags |= BLOCK_FLAG_TYPE_B; break; case H264_SLICE_TYPE_I: p_pic->i_flags |= BLOCK_FLAG_TYPE_I; default: break; } if( !p_sys->b_recovered ) { if( p_sys->i_recoveryfnum != UINT_MAX ) /* recovering from SEI */ p_pic->i_flags |= BLOCK_FLAG_PREROLL; else p_pic->i_flags |= BLOCK_FLAG_DROP; } p_pic->i_flags &= ~BLOCK_FLAG_PRIVATE_AUD; /* reset after output */ p_sys->i_frame_dts = VLC_TS_INVALID; p_sys->i_frame_pts = VLC_TS_INVALID; p_sys->i_dpb_output_delay = 0; p_sys->i_pic_struct = UINT8_MAX; p_sys->i_recovery_frame_cnt = UINT_MAX; p_sys->slice.type = H264_SLICE_TYPE_UNKNOWN; p_sys->p_sei = NULL; p_sys->pp_sei_last = &p_sys->p_sei; p_sys->b_new_sps = false; p_sys->b_new_pps = false; p_sys->b_slice = false; /* CC */ cc_storage_commit( p_sys->p_ccs, p_pic ); return p_pic; }
static block_t *ParseVCL(decoder_t *p_dec, uint8_t i_nal_type, block_t *p_frag) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_frame = NULL; const uint8_t *p_buffer = p_frag->p_buffer; size_t i_buffer = p_frag->i_buffer; if(unlikely(!hxxx_strip_AnnexB_startcode(&p_buffer, &i_buffer) || i_buffer < 3)) { block_ChainAppend(&p_sys->p_frame, p_frag); /* might corrupt */ return NULL; } bool b_first_slice_in_pic = p_buffer[2] & 0x80; if (b_first_slice_in_pic) { if(p_sys->p_frame) { /* Starting new frame, gather and return previous frame data */ p_frame = block_ChainGather(p_sys->p_frame); p_sys->p_frame = NULL; p_sys->pp_frame_last = &p_sys->p_frame; } switch(i_nal_type) { case HEVC_NAL_BLA_W_LP: case HEVC_NAL_BLA_W_RADL: case HEVC_NAL_BLA_N_LP: case HEVC_NAL_IDR_W_RADL: case HEVC_NAL_IDR_N_LP: case HEVC_NAL_CRA: p_frag->i_flags |= BLOCK_FLAG_TYPE_I; break; default: { hevc_slice_segment_header_t *p_sli = hevc_decode_slice_header( p_buffer, i_buffer, true, p_sys->rgi_p_decsps, p_sys->rgi_p_decpps ); if( p_sli ) { enum hevc_slice_type_e type; if( hevc_get_slice_type( p_sli, &type ) ) { if( type == HEVC_SLICE_TYPE_P ) p_frag->i_flags |= BLOCK_FLAG_TYPE_P; else p_frag->i_flags |= BLOCK_FLAG_TYPE_B; } hevc_rbsp_release_slice_header( p_sli ); } else p_frag->i_flags |= BLOCK_FLAG_TYPE_B; } break; } } block_ChainLastAppend(&p_sys->pp_frame_last, p_frag); return p_frame; }
/***************************************************************************** * ParseMPEGBlock: Re-assemble fragments into a block containing a picture *****************************************************************************/ static block_t *ParseMPEGBlock( decoder_t *p_dec, block_t *p_frag ) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_pic = NULL; if( p_frag->p_buffer[3] == 0xB0 || p_frag->p_buffer[3] == 0xB1 || p_frag->p_buffer[3] == 0xB2 ) { /* VOS and USERDATA */ #if 0 /* Remove VOS start/end code from the original stream */ block_Release( p_frag ); #else /* Append the block for now since ts/ps muxers rely on VOL * being present in the stream */ block_ChainLastAppend( &p_sys->pp_last, p_frag ); #endif return NULL; } if( p_frag->p_buffer[3] >= 0x20 && p_frag->p_buffer[3] <= 0x2f ) { /* Copy the complete VOL */ if( (size_t)p_dec->fmt_out.i_extra != p_frag->i_buffer ) { p_dec->fmt_out.p_extra = xrealloc( p_dec->fmt_out.p_extra, p_frag->i_buffer ); p_dec->fmt_out.i_extra = p_frag->i_buffer; } memcpy( p_dec->fmt_out.p_extra, p_frag->p_buffer, p_frag->i_buffer ); ParseVOL( p_dec, &p_dec->fmt_out, p_dec->fmt_out.p_extra, p_dec->fmt_out.i_extra ); #if 0 /* Remove from the original stream */ block_Release( p_frag ); #else /* Append the block for now since ts/ps muxers rely on VOL * being present in the stream */ block_ChainLastAppend( &p_sys->pp_last, p_frag ); #endif return NULL; } else { if( !p_dec->fmt_out.i_extra ) { msg_Warn( p_dec, "waiting for VOL" ); block_Release( p_frag ); return NULL; } /* Append the block */ block_ChainLastAppend( &p_sys->pp_last, p_frag ); } if( p_frag->p_buffer[3] == 0xb6 && ParseVOP( p_dec, p_frag ) == VLC_SUCCESS ) { /* We are dealing with a VOP */ p_pic = block_ChainGather( p_sys->p_frame ); p_pic->i_flags = p_sys->i_flags; p_pic->i_pts = p_sys->i_interpolated_pts; p_pic->i_dts = p_sys->i_interpolated_dts; if ( p_dec->fmt_out.video.i_cpb_buffer && p_dec->fmt_out.i_bitrate ) { mtime_t i_cpb_delay = p_sys->i_vbv_occupancy * INT64_C(1000000) / p_dec->fmt_out.i_bitrate; p_pic->i_delay = i_cpb_delay; } else p_pic->i_delay = DEFAULT_DELAY * 1000; /* Reset context */ p_sys->p_frame = NULL; p_sys->pp_last = &p_sys->p_frame; } return p_pic; }
/***************************************************************************** * DecodeAudio: Called to decode one frame *****************************************************************************/ static block_t *DecodeAudio( decoder_t *p_dec, block_t **pp_block ) { decoder_sys_t *p_sys = p_dec->p_sys; AVCodecContext *ctx = p_sys->p_context; AVFrame *frame = NULL; block_t *p_block = NULL; if( !ctx->extradata_size && p_dec->fmt_in.i_extra && p_sys->b_delayed_open) { InitDecoderConfig( p_dec, ctx ); OpenAudioCodec( p_dec ); } if( p_sys->b_delayed_open ) { if( pp_block ) p_block = *pp_block; goto drop; } /* Flushing or decoding, we return any block ready from multiple frames output */ if( p_sys->p_decoded ) return DequeueOneDecodedFrame( p_sys ); if( pp_block == NULL ) /* Drain request */ { /* we don't need to care about return val */ (void) avcodec_send_packet( ctx, NULL ); } else { p_block = *pp_block; } if( p_block ) { if( p_block->i_flags & BLOCK_FLAG_CORRUPTED ) { Flush( p_dec ); goto drop; } if( p_block->i_flags & BLOCK_FLAG_DISCONTINUITY ) { date_Set( &p_sys->end_date, VLC_TS_INVALID ); } /* We've just started the stream, wait for the first PTS. */ if( !date_Get( &p_sys->end_date ) && p_block->i_pts <= VLC_TS_INVALID ) goto drop; if( p_block->i_buffer <= 0 ) goto drop; if( (p_block->i_flags & BLOCK_FLAG_PRIVATE_REALLOCATED) == 0 ) { p_block = block_Realloc( p_block, 0, p_block->i_buffer + FF_INPUT_BUFFER_PADDING_SIZE ); if( !p_block ) return NULL; *pp_block = p_block; p_block->i_buffer -= FF_INPUT_BUFFER_PADDING_SIZE; memset( &p_block->p_buffer[p_block->i_buffer], 0, FF_INPUT_BUFFER_PADDING_SIZE ); p_block->i_flags |= BLOCK_FLAG_PRIVATE_REALLOCATED; } } frame = av_frame_alloc(); if (unlikely(frame == NULL)) goto end; for( int ret = 0; ret == 0; ) { /* Feed in the loop as buffer could have been full on first iterations */ if( p_block ) { AVPacket pkt; av_init_packet( &pkt ); pkt.data = p_block->p_buffer; pkt.size = p_block->i_buffer; ret = avcodec_send_packet( ctx, &pkt ); if( ret == 0 ) /* Block has been consumed */ { /* Only set new pts from input block if it has been used, * otherwise let it be through interpolation */ if( p_block->i_pts > date_Get( &p_sys->end_date ) ) { date_Set( &p_sys->end_date, p_block->i_pts ); } block_Release( p_block ); *pp_block = p_block = NULL; } else if ( ret != AVERROR(EAGAIN) ) /* Errors other than buffer full */ { if( ret == AVERROR(ENOMEM) || ret == AVERROR(EINVAL) ) goto end; else goto drop; } } /* Try to read one or multiple frames */ ret = avcodec_receive_frame( ctx, frame ); if( ret == 0 ) { /* checks and init from first decoded frame */ if( ctx->channels <= 0 || ctx->channels > 8 || ctx->sample_rate <= 0 ) { msg_Warn( p_dec, "invalid audio properties channels count %d, sample rate %d", ctx->channels, ctx->sample_rate ); goto drop; } else if( p_dec->fmt_out.audio.i_rate != (unsigned int)ctx->sample_rate ) { date_Init( &p_sys->end_date, ctx->sample_rate, 1 ); } SetupOutputFormat( p_dec, true ); if( decoder_UpdateAudioFormat( p_dec ) ) goto drop; block_t *p_converted = ConvertAVFrame( p_dec, frame ); /* Consumes frame */ if( p_converted ) { /* Silent unwanted samples */ if( p_sys->i_reject_count > 0 ) { memset( p_converted->p_buffer, 0, p_converted->i_buffer ); p_sys->i_reject_count--; } p_converted->i_buffer = p_converted->i_nb_samples * p_dec->fmt_out.audio.i_bytes_per_frame; p_converted->i_pts = date_Get( &p_sys->end_date ); p_converted->i_length = date_Increment( &p_sys->end_date, p_converted->i_nb_samples ) - p_converted->i_pts; block_ChainLastAppend( &p_sys->pp_decoded_last, p_converted ); } /* Prepare new frame */ frame = av_frame_alloc(); if (unlikely(frame == NULL)) break; } else av_frame_free( &frame ); }; return ( p_sys->p_decoded ) ? DequeueOneDecodedFrame( p_sys ) : NULL; end: p_dec->b_error = true; if( pp_block ) { assert( *pp_block == p_block ); *pp_block = NULL; } drop: if( p_block != NULL ) block_Release(p_block); if( frame != NULL ) av_frame_free( &frame ); return NULL; }