/***************************************************************************** * DoWork: process samples buffer ***************************************************************************** * This function queues the audio buffer to be processed by the goom thread *****************************************************************************/ static block_t *DoWork( filter_t *p_filter, block_t *p_in_buf ) { filter_sys_t *p_sys = p_filter->p_sys; block_t *p_block; /* Queue sample */ vlc_mutex_lock( &p_sys->p_thread->lock ); if( p_sys->p_thread->i_blocks == MAX_BLOCKS ) { vlc_mutex_unlock( &p_sys->p_thread->lock ); return p_in_buf; } p_block = block_New( p_sys->p_thread, p_in_buf->i_buffer ); if( !p_block ) { vlc_mutex_unlock( &p_sys->p_thread->lock ); return p_in_buf; } memcpy( p_block->p_buffer, p_in_buf->p_buffer, p_in_buf->i_buffer ); p_block->i_pts = p_in_buf->i_pts; p_sys->p_thread->pp_blocks[p_sys->p_thread->i_blocks++] = p_block; vlc_cond_signal( &p_sys->p_thread->wait ); vlc_mutex_unlock( &p_sys->p_thread->lock ); return p_in_buf; }
/** * It retreives data using the get() callback, copies them, * and then release them using the release() callback. */ static block_t *Block(access_t *access) { imem_sys_t *sys = (imem_sys_t*)access->p_sys; unsigned flags; size_t buffer_size; void *buffer; if (sys->source.get(sys->source.data, sys->source.cookie, NULL, NULL, &flags, &buffer_size, &buffer)) { access->info.b_eof = true; return NULL; } block_t *block = NULL; if (buffer_size > 0) { block = block_New(access, buffer_size); if (block) memcpy(block->p_buffer, buffer, buffer_size); } sys->source.release(sys->source.data, sys->source.cookie, buffer_size, buffer); return block; }
static block_t *GrabAudio( demux_t *p_demux ) { demux_sys_t *p_sys = p_demux->p_sys; struct audio_buf_info buf_info; block_t *p_block = NULL; int i_read = 0; int i_correct = 0; int result = 0; p_block = block_New( p_demux, p_sys->i_audio_max_frame_size ); if( !p_block ) { msg_Warn( p_demux, "cannot get buffer" ); return NULL; } i_read = read( p_sys->fd_audio, p_block->p_buffer, p_sys->i_audio_max_frame_size ); if( i_read <= 0 ) return NULL; p_block->i_buffer = i_read; /* Correct the date because of kernel buffering */ i_correct = i_read; result = ioctl( p_sys->fd_audio, SNDCTL_DSP_GETISPACE, &buf_info ); if( result == 0 ) i_correct += buf_info.bytes; p_block->i_pts = p_block->i_dts = mdate() - INT64_C(1000000) * (mtime_t)i_correct / 2 / p_sys->channels / p_sys->i_sample_rate; return p_block; }
/***************************************************************************** * DoWork: process samples buffer ***************************************************************************** * This function queues the audio buffer to be processed by the goom thread *****************************************************************************/ static void DoWork( aout_instance_t * p_aout, aout_filter_t * p_filter, aout_buffer_t * p_in_buf, aout_buffer_t * p_out_buf ) { aout_filter_sys_t *p_sys = p_filter->p_sys; block_t *p_block; p_out_buf->i_nb_samples = p_in_buf->i_nb_samples; p_out_buf->i_nb_bytes = p_in_buf->i_nb_bytes; /* Queue sample */ vlc_mutex_lock( &p_sys->p_thread->lock ); if( p_sys->p_thread->i_blocks == MAX_BLOCKS ) { vlc_mutex_unlock( &p_sys->p_thread->lock ); return; } p_block = block_New( p_sys->p_thread, p_in_buf->i_nb_bytes ); if( !p_block ) return; memcpy( p_block->p_buffer, p_in_buf->p_buffer, p_in_buf->i_nb_bytes ); p_block->i_pts = p_in_buf->start_date; p_sys->p_thread->pp_blocks[p_sys->p_thread->i_blocks++] = p_block; vlc_cond_signal( &p_sys->p_thread->wait ); vlc_mutex_unlock( &p_sys->p_thread->lock ); }
static aout_buffer_t *audio_new_buffer( decoder_t *p_dec, int i_samples ) { block_t *p_block; int i_size; if( p_dec->fmt_out.audio.i_bitspersample ) { i_size = i_samples * p_dec->fmt_out.audio.i_bitspersample / 8 * p_dec->fmt_out.audio.i_channels; } else if( p_dec->fmt_out.audio.i_bytes_per_frame && p_dec->fmt_out.audio.i_frame_length ) { i_size = i_samples * p_dec->fmt_out.audio.i_bytes_per_frame / p_dec->fmt_out.audio.i_frame_length; } else { i_size = i_samples * 4 * p_dec->fmt_out.audio.i_channels; } p_block = block_New( p_dec, i_size ); p_block->i_nb_samples = i_samples; return p_block; }
/***************************************************************************** * Demux: *****************************************************************************/ static block_t *GrabVideo( demux_t *p_demux ) { demux_sys_t *p_sys = p_demux->p_sys; block_t *p_block = NULL; if( dc1394_capture_dequeue( p_sys->camera, DC1394_CAPTURE_POLICY_WAIT, &p_sys->frame ) != DC1394_SUCCESS ) { msg_Err( p_demux, "Unable to capture a frame" ); return NULL; } p_block = block_New( p_demux, p_sys->frame->size[0] * p_sys->frame->size[1] * 2 ); if( !p_block ) { msg_Err( p_demux, "Can not get block" ); return NULL; } if( !p_sys->frame->image ) { msg_Err (p_demux, "Capture buffer empty"); block_Release( p_block ); return NULL; } memcpy( p_block->p_buffer, (const char *)p_sys->frame->image, p_sys->width * p_sys->height * 2 ); p_block->i_pts = p_block->i_dts = mdate(); dc1394_capture_enqueue( p_sys->camera, p_sys->frame ); return p_block; }
/***************************************************************************** * DVRRead *****************************************************************************/ static block_t *DVRRead( void ) { int i, i_len; block_t *p_ts = p_freelist, **pp_current = &p_ts; struct iovec p_iov[MAX_READ_ONCE]; for ( i = 0; i < MAX_READ_ONCE; i++ ) { if ( (*pp_current) == NULL ) *pp_current = block_New(); p_iov[i].iov_base = (*pp_current)->p_ts; p_iov[i].iov_len = TS_SIZE; pp_current = &(*pp_current)->p_next; } if ( (i_len = readv(i_dvr, p_iov, MAX_READ_ONCE)) < 0 ) { msg_Err( NULL, "couldn't read from DVR device (%s)", strerror(errno) ); i_len = 0; } i_len /= TS_SIZE; pp_current = &p_ts; while ( i_len && *pp_current ) { pp_current = &(*pp_current)->p_next; i_len--; } p_freelist = *pp_current; *pp_current = NULL; return p_ts; }
/***************************************************************************** * Demux: read chunks and send them to the synthesizer ***************************************************************************** * Returns -1 in case of error, 0 in case of EOF, 1 otherwise *****************************************************************************/ static int Demux (demux_t *p_demux) { stream_t *s = p_demux->s; demux_sys_t *p_sys = p_demux->p_sys; uint64_t pulse = p_sys->pulse, next_pulse = UINT64_MAX; if (pulse == UINT64_MAX) return 0; /* all tracks are done */ es_out_Control (p_demux->out, ES_OUT_SET_PCR, VLC_TS_0 + date_Get (&p_sys->pts)); for (unsigned i = 0; i < p_sys->trackc; i++) { mtrk_t *track = p_sys->trackv + i; while (track->next == pulse) { if (HandleMessage (p_demux, track) || ReadDeltaTime (s, track)) { msg_Err (p_demux, "fatal parsing error"); return VLC_EGENERIC; } } if (track->next < next_pulse) next_pulse = track->next; } mtime_t cur_tick = (date_Get (&p_sys->pts) + 9999) / 10000, last_tick; if (next_pulse != UINT64_MAX) last_tick = date_Increment (&p_sys->pts, next_pulse - pulse) / 10000; else last_tick = cur_tick + 1; /* MIDI Tick emulation (ping the decoder every 10ms) */ while (cur_tick < last_tick) { block_t *tick = block_New (p_demux, 1); if (tick == NULL) break; tick->p_buffer[0] = 0xF9; tick->i_dts = tick->i_pts = VLC_TS_0 + cur_tick++ * 10000; es_out_Send (p_demux->out, p_sys->es, tick); } p_sys->pulse = next_pulse; return 1; }
/***************************************************************************** * GetSoutBuffer: *****************************************************************************/ static block_t *GetSoutBuffer( decoder_t *p_dec ) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_block = block_New( p_dec, p_sys->frame.i_size ); if( p_block ) { p_block->i_pts = p_block->i_dts = date_Get( &p_sys->end_date ); p_block->i_length = date_Increment( &p_sys->end_date, p_sys->frame.i_samples ) - p_block->i_pts; } return p_block; }
/***************************************************************************** * Demux: *****************************************************************************/ static block_t *GrabVideo( demux_t *p_demux ) { demux_sys_t *p_sys = p_demux->p_sys; block_t *p_block = NULL; int result = 0; if( p_sys->dma_capture ) { result = dc1394_dma_single_capture( &p_sys->camera ); if( result != DC1394_SUCCESS ) { msg_Err( p_demux, "unable to capture a frame" ); return NULL; } } else { result = dc1394_single_capture( p_sys->camera_info.handle, &p_sys->camera ); if( result != DC1394_SUCCESS ) { msg_Err( p_demux, "unable to capture a frame" ); return NULL; } } p_block = block_New( p_demux, p_sys->camera.frame_width * p_sys->camera.frame_height * 2 ); if( !p_block ) { msg_Err( p_demux, "cannot get block" ); return NULL; } if( !p_sys->camera.capture_buffer ) { msg_Err (p_demux, "caputer buffer empty"); block_Release( p_block ); return NULL; } memcpy( p_block->p_buffer, (const char *)p_sys->camera.capture_buffer, p_sys->camera.frame_width * p_sys->camera.frame_height * 2 ); p_block->i_pts = p_block->i_dts = mdate(); if( p_sys->dma_capture ) dc1394_dma_done_with_buffer( &p_sys->camera ); return p_block; }
static block_t *Encode( encoder_t *p_enc, aout_buffer_t *p_aout_buf ) { encoder_sys_t *p_sys = p_enc->p_sys; int16_t *p_buffer = (int16_t *)p_aout_buf->p_buffer; int i_nb_samples = p_aout_buf->i_nb_samples; block_t *p_chain = NULL; mtime_t i_computed_pts = p_aout_buf->start_date - (mtime_t)1000000 * (mtime_t)p_sys->i_nb_samples / (mtime_t)p_enc->fmt_in.audio.i_rate; if ( aout_DateGet( &p_sys->pts ) - i_computed_pts > 10000 || aout_DateGet( &p_sys->pts ) - i_computed_pts < -10000 ) { msg_Dbg( p_enc, "resetting audio date" ); aout_DateSet( &p_sys->pts, i_computed_pts ); } while ( p_sys->i_nb_samples + i_nb_samples >= MPEG_FRAME_SIZE ) { int i_used; block_t *p_block; Uninterleave( p_enc, p_buffer, MPEG_FRAME_SIZE - p_sys->i_nb_samples ); i_nb_samples -= MPEG_FRAME_SIZE - p_sys->i_nb_samples; p_buffer += (MPEG_FRAME_SIZE - p_sys->i_nb_samples) * 2; toolame_encode_buffer( p_sys->p_toolame, p_sys->p_left, p_sys->p_right, MPEG_FRAME_SIZE, p_sys->p_out_buffer, MAX_CODED_FRAME_SIZE, &i_used ); p_sys->i_nb_samples = 0; p_block = block_New( p_enc, i_used ); p_enc->p_vlc->pf_memcpy( p_block->p_buffer, p_sys->p_out_buffer, i_used ); p_block->i_length = (mtime_t)1000000 * (mtime_t)MPEG_FRAME_SIZE / (mtime_t)p_enc->fmt_in.audio.i_rate; p_block->i_dts = p_block->i_pts = aout_DateGet( &p_sys->pts ); aout_DateIncrement( &p_sys->pts, MPEG_FRAME_SIZE ); block_ChainAppend( &p_chain, p_block ); } if ( i_nb_samples ) { Uninterleave( p_enc, p_buffer, i_nb_samples ); p_sys->i_nb_samples += i_nb_samples; } return p_chain; }
static block_t *nal_get_annexeb( decoder_t *p_dec, uint8_t *p, int i_size ) { block_t *p_nal; p_nal = block_New( p_dec, 3 + i_size ); /* Add start code */ p_nal->p_buffer[0] = 0x00; p_nal->p_buffer[1] = 0x00; p_nal->p_buffer[2] = 0x01; /* Copy nalu */ memcpy( &p_nal->p_buffer[3], p, i_size ); return p_nal; }
static block_t *dirac_EmitEOS( decoder_t *p_dec, uint32_t i_prev_parse_offset ) { const uint8_t p_eos[] = { 'B','B','C','D',0x10,0,0,0,13,0,0,0,0 }; block_t *p_block = block_New( p_dec, 13 ); if( !p_block ) return NULL; memcpy( p_block->p_buffer, p_eos, 13 ); SetDWBE( p_block->p_buffer + 9, i_prev_parse_offset ); p_block->i_flags = DIRAC_NON_DATED; return p_block; (void) p_dec; }
static block_t *EncodeFrame( encoder_t *p_enc, aout_buffer_t *p_block ) { block_t *p_pcm_block; block_t *p_chain = NULL; unsigned int i_samples = p_block->i_buffer >> 2 /* s16l stereo */; mtime_t start_date = p_block->i_pts; start_date -= (mtime_t)i_samples * (mtime_t)1000000 / (mtime_t)p_enc->fmt_out.audio.i_rate; VLC_UNUSED(p_enc); do { p_pcm_block = GetPCM( p_enc, p_block ); if( !p_pcm_block ) break; p_block = NULL; /* we don't need it anymore */ uint32_t enc_buffer[16384]; /* storage for 65536 Bytes XXX: too much */ struct enc_chunk_hdr *chunk = (void*) enc_buffer; chunk->enc_data = ENC_CHUNK_SKIP_HDR(chunk->enc_data, chunk); encode_frame( (char*)p_pcm_block->p_buffer, chunk ); block_Release( p_pcm_block ); block_t *p_mp3_block = block_New( p_enc, chunk->enc_size ); if( !p_mp3_block ) break; vlc_memcpy( p_mp3_block->p_buffer, chunk->enc_data, chunk->enc_size ); /* date management */ p_mp3_block->i_length = SAMP_PER_FRAME1 * 1000000 / p_enc->fmt_out.audio.i_rate; start_date += p_mp3_block->i_length; p_mp3_block->i_dts = p_mp3_block->i_pts = start_date; p_mp3_block->i_nb_samples = SAMP_PER_FRAME1; block_ChainAppend( &p_chain, p_mp3_block ); } while( p_pcm_block ); return p_chain; }
/** * It retreives data using the get() callback, sends them to es_out * and the release it using the release() callback. */ static int Demux(demux_t *demux) { imem_sys_t *sys = (imem_sys_t*)demux->p_sys; if (sys->deadline == VLC_TS_INVALID) sys->deadline = sys->dts + 1; for (;;) { if (sys->deadline <= sys->dts) break; /* */ int64_t dts, pts; unsigned flags; size_t buffer_size; void *buffer; if (sys->source.get(sys->source.data, sys->source.cookie, &dts, &pts, &flags, &buffer_size, &buffer)) return 0; if (dts < 0) dts = pts; if (buffer_size > 0) { block_t *block = block_New(demux, buffer_size); if (block) { block->i_dts = dts >= 0 ? (1 + dts) : VLC_TS_INVALID; block->i_pts = pts >= 0 ? (1 + pts) : VLC_TS_INVALID; memcpy(block->p_buffer, buffer, buffer_size); es_out_Control(demux->out, ES_OUT_SET_PCR, block->i_dts); es_out_Send(demux->out, sys->es, block); } } sys->dts = dts; sys->source.release(sys->source.data, sys->source.cookie, buffer_size, buffer); } sys->deadline = VLC_TS_INVALID; return 1; }
static picture_t *ImageReadUrl( image_handler_t *p_image, const char *psz_url, video_format_t *p_fmt_in, video_format_t *p_fmt_out ) { block_t *p_block; picture_t *p_pic; stream_t *p_stream = NULL; int i_size; p_stream = stream_UrlNew( p_image->p_parent, psz_url ); if( !p_stream ) { msg_Dbg( p_image->p_parent, "could not open %s for reading", psz_url ); return NULL; } i_size = stream_Size( p_stream ); p_block = block_New( p_image->p_parent, i_size ); stream_Read( p_stream, p_block->p_buffer, i_size ); if( !p_fmt_in->i_chroma ) { char *psz_mime = NULL; stream_Control( p_stream, STREAM_GET_CONTENT_TYPE, &psz_mime ); if( psz_mime ) p_fmt_in->i_chroma = image_Mime2Fourcc( psz_mime ); free( psz_mime ); } stream_Delete( p_stream ); if( !p_fmt_in->i_chroma ) { /* Try to guess format from file name */ p_fmt_in->i_chroma = image_Ext2Fourcc( psz_url ); } p_pic = ImageRead( p_image, p_block, p_fmt_in, p_fmt_out ); return p_pic; }
static block_t *CreateAnnexbNAL( decoder_t *p_dec, const uint8_t *p, int i_size ) { block_t *p_nal; p_nal = block_New( p_dec, 4 + i_size ); if( !p_nal ) return NULL; /* Add start code */ p_nal->p_buffer[0] = 0x00; p_nal->p_buffer[1] = 0x00; p_nal->p_buffer[2] = 0x00; p_nal->p_buffer[3] = 0x01; /* Copy nalu */ memcpy( &p_nal->p_buffer[4], p, i_size ); VLC_UNUSED(p_dec); return p_nal; }
static block_t *Encode( encoder_t *p_enc, aout_buffer_t *p_aout_buf ) { encoder_sys_t *p_sys = p_enc->p_sys; int16_t *p_buffer = (int16_t *)p_aout_buf->p_buffer; int i_nb_samples = p_aout_buf->i_nb_samples; block_t *p_chain = NULL; p_sys->i_pts = p_aout_buf->start_date - (mtime_t)1000000 * (mtime_t)p_sys->i_nb_samples / (mtime_t)p_enc->fmt_out.audio.i_rate; while ( p_sys->i_nb_samples + i_nb_samples >= MPEG_FRAME_SIZE ) { int i_used; block_t *p_block; Bufferize( p_enc, p_buffer, MPEG_FRAME_SIZE - p_sys->i_nb_samples ); i_nb_samples -= MPEG_FRAME_SIZE - p_sys->i_nb_samples; p_buffer += (MPEG_FRAME_SIZE - p_sys->i_nb_samples) * 2; i_used = twolame_encode_buffer_interleaved( p_sys->p_twolame, p_sys->p_buffer, MPEG_FRAME_SIZE, p_sys->p_out_buffer, MAX_CODED_FRAME_SIZE ); p_sys->i_nb_samples = 0; p_block = block_New( p_enc, i_used ); vlc_memcpy( p_block->p_buffer, p_sys->p_out_buffer, i_used ); p_block->i_length = (mtime_t)1000000 * (mtime_t)MPEG_FRAME_SIZE / (mtime_t)p_enc->fmt_out.audio.i_rate; p_block->i_dts = p_block->i_pts = p_sys->i_pts; p_sys->i_pts += p_block->i_length; block_ChainAppend( &p_chain, p_block ); } if ( i_nb_samples ) { Bufferize( p_enc, p_buffer, i_nb_samples ); p_sys->i_nb_samples += i_nb_samples; } return p_chain; }
static block_t *Encode( encoder_t *p_enc, subpicture_t *p_spu ) { subpicture_region_t *p_region; block_t *p_block; size_t len; if( p_spu == NULL ) return NULL; p_region = p_spu->p_region; if( ( p_region == NULL ) || ( p_region->fmt.i_chroma != VLC_CODEC_TEXT ) || ( p_region->psz_text == NULL ) ) return NULL; /* This should already be UTF-8 encoded, so not much effort... */ len = strlen( p_region->psz_text ); p_block = block_New( p_enc, len ); memcpy( p_block->p_buffer, p_region->psz_text, len ); return p_block; }
/***************************************************************************** * GetCc: *****************************************************************************/ static block_t *GetCc( decoder_t *p_dec, bool pb_present[4] ) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_cc; for( int i = 0; i < 4; i++ ) pb_present[i] = p_sys->cc.pb_present[i]; if( p_sys->cc.i_data <= 0 ) return NULL; p_cc = block_New( p_dec, p_sys->cc.i_data); if( p_cc ) { memcpy( p_cc->p_buffer, p_sys->cc.p_data, p_sys->cc.i_data ); p_cc->i_dts = p_cc->i_pts = p_sys->cc.b_reorder ? p_sys->i_cc_pts : p_sys->i_cc_dts; p_cc->i_flags = ( p_sys->cc.b_reorder ? p_sys->i_cc_flags : BLOCK_FLAG_TYPE_P ) & BLOCK_FLAG_TYPE_MASK; } cc_Flush( &p_sys->cc ); return p_cc; }
/***************************************************************************** * Read: standard read on a file descriptor. *****************************************************************************/ static block_t *BlockRead( access_t *p_access ) { access_sys_t *p_sys = p_access->p_sys; block_t *p_block; rmff_pheader_t pheader; int i_size; if( p_sys->p_header ) { p_block = p_sys->p_header; p_sys->p_header = NULL; return p_block; } i_size = real_get_rdt_chunk_header( p_access->p_sys->p_rtsp, &pheader ); if( i_size <= 0 ) return NULL; p_block = block_New( p_access, i_size ); p_block->i_buffer = real_get_rdt_chunk( p_access->p_sys->p_rtsp, &pheader, &p_block->p_buffer ); return p_block; }
block_t *screen_Capture( demux_t *p_demux ) { demux_sys_t *p_sys = p_demux->p_sys; screen_data_t *p_data = p_sys->p_data; block_t *p_block; p_block = block_New( p_demux, p_sys->fmt.video.i_width * p_sys->fmt.video.i_height * p_sys->fmt.video.i_bits_per_pixel / 8 ); p_data->p_screen->ReadBitmap( p_data->p_bitmap ); for( unsigned i = 0; i < p_sys->fmt.video.i_height; i++ ) { memcpy( p_block->p_buffer + i * p_sys->fmt.video.i_width * p_sys->fmt.video.i_bits_per_pixel / 8, (uint8_t *) p_data->p_bitmap->Bits() + i * p_data->p_bitmap->BytesPerRow(), p_sys->fmt.video.i_width * p_sys->fmt.video.i_bits_per_pixel / 8 ); } return p_block; }
/***************************************************************************** * DVR events *****************************************************************************/ static void DVRRead(struct ev_loop *loop, struct ev_io *w, int revents) { int i, i_len; block_t *p_ts = p_freelist, **pp_current = &p_ts; struct iovec p_iov[MAX_READ_ONCE]; for ( i = 0; i < MAX_READ_ONCE; i++ ) { if ( (*pp_current) == NULL ) *pp_current = block_New(); p_iov[i].iov_base = (*pp_current)->p_ts; p_iov[i].iov_len = TS_SIZE; pp_current = &(*pp_current)->p_next; } if ( (i_len = readv(i_dvr, p_iov, MAX_READ_ONCE)) < 0 ) { msg_Err( NULL, "couldn't read from DVR device (%s)", strerror(errno) ); i_len = 0; } i_len /= TS_SIZE; if ( i_len ) ev_timer_again(loop, &mute_watcher); pp_current = &p_ts; while ( i_len && *pp_current ) { pp_current = &(*pp_current)->p_next; i_len--; } p_freelist = *pp_current; *pp_current = NULL; demux_Run( p_ts ); }
/**************************************************************************** * EncodeVideo: the whole thing ****************************************************************************/ static block_t *EncodeVideo( encoder_t *p_enc, picture_t *p_pict ) { encoder_sys_t *p_sys = p_enc->p_sys; AVFrame frame; int i_out, i_plane; #if LIBAVCODEC_BUILD >= 4702 if ( !p_sys->b_inited && p_enc->i_threads >= 1 ) { struct thread_context_t ** pp_contexts; int i; p_sys->b_inited = 1; pp_contexts = malloc( sizeof(struct thread_context_t *) * p_enc->i_threads ); p_sys->p_context->thread_opaque = (void *)pp_contexts; for ( i = 0; i < p_enc->i_threads; i++ ) { pp_contexts[i] = vlc_object_create( p_enc, sizeof(struct thread_context_t) ); pp_contexts[i]->p_context = p_sys->p_context; vlc_mutex_init( p_enc, &pp_contexts[i]->lock ); vlc_cond_init( p_enc, &pp_contexts[i]->cond ); pp_contexts[i]->b_work = 0; pp_contexts[i]->b_done = 0; if ( vlc_thread_create( pp_contexts[i], "encoder", FfmpegThread, VLC_THREAD_PRIORITY_VIDEO, VLC_FALSE ) ) { msg_Err( p_enc, "cannot spawn encoder thread, expect to die soon" ); return NULL; } } p_sys->p_context->execute = FfmpegExecute; } #endif memset( &frame, 0, sizeof( AVFrame ) ); for( i_plane = 0; i_plane < p_pict->i_planes; i_plane++ ) { frame.data[i_plane] = p_pict->p[i_plane].p_pixels; frame.linesize[i_plane] = p_pict->p[i_plane].i_pitch; } /* Let ffmpeg select the frame type */ frame.pict_type = 0; frame.repeat_pict = 2 - p_pict->i_nb_fields; #if LIBAVCODEC_BUILD >= 4685 frame.interlaced_frame = !p_pict->b_progressive; frame.top_field_first = !!p_pict->b_top_field_first; #endif #if LIBAVCODEC_BUILD < 4702 /* Set the pts of the frame being encoded (segfaults with mpeg4!)*/ if( p_enc->fmt_out.i_codec == VLC_FOURCC( 'm', 'p', 'g', 'v' ) || p_enc->fmt_out.i_codec == VLC_FOURCC( 'm', 'p', '1', 'v' ) || p_enc->fmt_out.i_codec == VLC_FOURCC( 'm', 'p', '2', 'v' ) ) #else if( 1 ) #endif { frame.pts = p_pict->date ? p_pict->date : AV_NOPTS_VALUE; if ( p_sys->b_hurry_up && frame.pts != AV_NOPTS_VALUE ) { mtime_t current_date = mdate(); if ( current_date + HURRY_UP_GUARD3 > frame.pts ) { p_sys->p_context->mb_decision = FF_MB_DECISION_SIMPLE; p_sys->p_context->flags &= ~CODEC_FLAG_TRELLIS_QUANT; msg_Dbg( p_enc, "hurry up mode 3" ); } else { p_sys->p_context->mb_decision = p_sys->i_hq; if ( current_date + HURRY_UP_GUARD2 > frame.pts ) { p_sys->p_context->flags &= ~CODEC_FLAG_TRELLIS_QUANT; #if LIBAVCODEC_BUILD >= 4690 p_sys->p_context->noise_reduction = p_sys->i_noise_reduction + (HURRY_UP_GUARD2 + current_date - frame.pts) / 500; #endif msg_Dbg( p_enc, "hurry up mode 2" ); } else { if ( p_sys->b_trellis ) p_sys->p_context->flags |= CODEC_FLAG_TRELLIS_QUANT; #if LIBAVCODEC_BUILD >= 4690 p_sys->p_context->noise_reduction = p_sys->i_noise_reduction; #endif } } if ( current_date + HURRY_UP_GUARD1 > frame.pts ) { frame.pict_type = FF_P_TYPE; /* msg_Dbg( p_enc, "hurry up mode 1 %lld", current_date + HURRY_UP_GUARD1 - frame.pts ); */ } } } else { frame.pts = AV_NOPTS_VALUE; } if ( frame.pts != AV_NOPTS_VALUE && frame.pts != 0 ) { if ( p_sys->i_last_pts == frame.pts ) { msg_Warn( p_enc, "almost fed libavcodec with two frames with the " "same PTS (" I64Fd ")", frame.pts ); return NULL; } else if ( p_sys->i_last_pts > frame.pts ) { msg_Warn( p_enc, "almost fed libavcodec with a frame in the " "past (current: " I64Fd ", last: "I64Fd")", frame.pts, p_sys->i_last_pts ); return NULL; } else { p_sys->i_last_pts = frame.pts; } } frame.quality = p_sys->i_quality; /* Ugly work-around for stupid libavcodec behaviour */ #if LIBAVCODEC_BUILD >= 4722 p_sys->i_framenum++; p_sys->pi_delay_pts[p_sys->i_framenum % MAX_FRAME_DELAY] = frame.pts; frame.pts = p_sys->i_framenum * AV_TIME_BASE * p_enc->fmt_in.video.i_frame_rate_base; frame.pts += p_enc->fmt_in.video.i_frame_rate - 1; frame.pts /= p_enc->fmt_in.video.i_frame_rate; #endif /* End work-around */ i_out = avcodec_encode_video( p_sys->p_context, p_sys->p_buffer_out, AVCODEC_MAX_VIDEO_FRAME_SIZE, &frame ); if( i_out > 0 ) { block_t *p_block = block_New( p_enc, i_out ); memcpy( p_block->p_buffer, p_sys->p_buffer_out, i_out ); /* FIXME, 3-2 pulldown is not handled correctly */ p_block->i_length = I64C(1000000) * p_enc->fmt_in.video.i_frame_rate_base / p_enc->fmt_in.video.i_frame_rate; if( !p_sys->p_context->max_b_frames || !p_sys->p_context->delay ) { /* No delay -> output pts == input pts */ p_block->i_pts = p_block->i_dts = p_pict->date; } else if( p_sys->p_context->coded_frame->pts != AV_NOPTS_VALUE && p_sys->p_context->coded_frame->pts != 0 && p_sys->i_buggy_pts_detect != p_sys->p_context->coded_frame->pts ) { p_sys->i_buggy_pts_detect = p_sys->p_context->coded_frame->pts; p_block->i_pts = p_sys->p_context->coded_frame->pts; /* Ugly work-around for stupid libavcodec behaviour */ #if LIBAVCODEC_BUILD >= 4722 { int64_t i_framenum = p_block->i_pts * p_enc->fmt_in.video.i_frame_rate / p_enc->fmt_in.video.i_frame_rate_base / AV_TIME_BASE; p_block->i_pts = p_sys->pi_delay_pts[i_framenum % MAX_FRAME_DELAY]; } #endif /* End work-around */ if( p_sys->p_context->coded_frame->pict_type != FF_I_TYPE && p_sys->p_context->coded_frame->pict_type != FF_P_TYPE ) { p_block->i_dts = p_block->i_pts; } else { if( p_sys->i_last_ref_pts ) { p_block->i_dts = p_sys->i_last_ref_pts; } else { /* Let's put something sensible */ p_block->i_dts = p_block->i_pts; } p_sys->i_last_ref_pts = p_block->i_pts; } } else { /* Buggy libavcodec which doesn't update coded_frame->pts * correctly */ p_block->i_dts = p_block->i_pts = p_pict->date; } switch ( p_sys->p_context->coded_frame->pict_type ) { case FF_I_TYPE: p_block->i_flags |= BLOCK_FLAG_TYPE_I; break; case FF_P_TYPE: p_block->i_flags |= BLOCK_FLAG_TYPE_P; break; case FF_B_TYPE: p_block->i_flags |= BLOCK_FLAG_TYPE_B; break; } return p_block; } return NULL; }
/**************************************************************************** * Encode: the whole thing **************************************************************************** * This function spits out encapsulation units. ****************************************************************************/ static block_t *Encode( encoder_t *p_enc, picture_t *p_pic ) { encoder_sys_t *p_sys = p_enc->p_sys; block_t *p_block, *p_output_chain = NULL; int i_plane, i_line, i_width, i_src_stride; uint8_t *p_dst; if( !p_pic ) return NULL; /* we only know if the sequence is interlaced when the first * picture arrives, so final setup is done here */ /* XXX todo, detect change of interlace */ p_sys->ctx.src_params.topfieldfirst = p_pic->b_top_field_first; p_sys->ctx.src_params.source_sampling = !p_pic->b_progressive; if( p_sys->b_auto_field_coding ) p_sys->ctx.enc_params.picture_coding_mode = !p_pic->b_progressive; if( !p_sys->p_dirac ) { date_t date; /* Initialise the encoder with the encoder context */ p_sys->p_dirac = dirac_encoder_init( &p_sys->ctx, 0 ); if( !p_sys->p_dirac ) { msg_Err( p_enc, "Failed to initialize dirac encoder" ); return NULL; } date_Init( &date, p_enc->fmt_in.video.i_frame_rate, p_enc->fmt_in.video.i_frame_rate_base ); #if DIRAC_RESEARCH_VERSION_ATLEAST(1,0,2) int i_delayinpics = dirac_encoder_pts_offset( p_sys->p_dirac ); i_delayinpics /= p_sys->ctx.enc_params.picture_coding_mode + 1; date_Increment( &date, i_delayinpics ); #else date_Increment( &date, 1 ); #endif p_sys->i_pts_offset = date_Get( &date ); /* picture_coding_mode = 1 == FIELD_CODING, two pictures are produced * for each frame input. Calculate time between fields for offsetting * the second field later. */ if( 1 == p_sys->ctx.enc_params.picture_coding_mode ) { date_Set( &date, 0 ); date_Increment( &date, 1 ); p_sys->i_field_time = date_Get( &date ) / 2; } } /* Copy input picture into encoder input buffer (stride by stride) */ /* Would be lovely to just pass the picture in, but there is noway for the * library to free it */ p_dst = p_sys->p_buffer_in; for( i_plane = 0; i_plane < p_pic->i_planes; i_plane++ ) { uint8_t *p_src = p_pic->p[i_plane].p_pixels; i_width = p_pic->p[i_plane].i_visible_pitch; i_src_stride = p_pic->p[i_plane].i_pitch; for( i_line = 0; i_line < p_pic->p[i_plane].i_visible_lines; i_line++ ) { vlc_memcpy( p_dst, p_src, i_width ); p_dst += i_width; p_src += i_src_stride; } } /* Load one frame of data into encoder */ if( dirac_encoder_load( p_sys->p_dirac, p_sys->p_buffer_in, p_sys->i_buffer_in ) < 0 ) { msg_Dbg( p_enc, "dirac_encoder_load() error" ); return NULL; } /* store pts in a lookaside buffer, so that the same pts may * be used for the picture in coded order */ StorePicturePTS( p_enc, p_sys->i_input_picnum, p_pic->date ); p_sys->i_input_picnum++; /* store dts in a queue, so that they appear in order in * coded order */ p_block = block_New( p_enc, 1 ); if( !p_block ) return NULL; p_block->i_dts = p_pic->date - p_sys->i_pts_offset; block_FifoPut( p_sys->p_dts_fifo, p_block ); p_block = NULL; /* for field coding mode, insert an extra value into both the * pts lookaside buffer and dts queue, offset to correspond * to a one field delay. */ if( 1 == p_sys->ctx.enc_params.picture_coding_mode ) { StorePicturePTS( p_enc, p_sys->i_input_picnum, p_pic->date + p_sys->i_field_time ); p_sys->i_input_picnum++; p_block = block_New( p_enc, 1 ); if( !p_block ) return NULL; p_block->i_dts = p_pic->date - p_sys->i_pts_offset + p_sys->i_field_time; block_FifoPut( p_sys->p_dts_fifo, p_block ); p_block = NULL; } dirac_encoder_state_t state; /* Retrieve encoded frames from encoder */ do { p_sys->p_dirac->enc_buf.buffer = p_sys->p_buffer_out; p_sys->p_dirac->enc_buf.size = p_sys->i_buffer_out; state = dirac_encoder_output( p_sys->p_dirac ); switch( state ) { case ENC_STATE_AVAIL: { uint32_t pic_num; /* extract data from encoder temporary buffer. */ p_block = block_New( p_enc, p_sys->p_dirac->enc_buf.size ); if( !p_block ) return NULL; memcpy( p_block->p_buffer, p_sys->p_dirac->enc_buf.buffer, p_sys->p_dirac->enc_buf.size ); /* if some flags were set for a previous block, prevent * them from getting lost */ if( p_sys->p_chain ) p_block->i_flags |= p_sys->p_chain->i_flags; /* store all extracted blocks in a chain and gather up when an * entire encapsulation unit is avaliable (ends with a picture) */ block_ChainAppend( &p_sys->p_chain, p_block ); /* Presence of a Sequence header indicates a seek point */ if( 0 == p_block->p_buffer[4] ) { p_block->i_flags |= BLOCK_FLAG_TYPE_I; if( !p_enc->fmt_out.p_extra ) { const uint8_t eos[] = { 'B','B','C','D',0x10,0,0,0,13,0,0,0,0 }; uint32_t len = GetDWBE( p_block->p_buffer + 5 ); /* if it hasn't been done so far, stash a copy of the * sequence header for muxers such as ogg */ /* The OggDirac spec advises that a Dirac EOS DataUnit * is appended to the sequence header to allow guard * against poor streaming servers */ /* XXX, should this be done using the packetizer ? */ p_enc->fmt_out.p_extra = malloc( len + sizeof(eos) ); if( !p_enc->fmt_out.p_extra ) return NULL; memcpy( p_enc->fmt_out.p_extra, p_block->p_buffer, len); memcpy( (uint8_t*)p_enc->fmt_out.p_extra + len, eos, sizeof(eos) ); SetDWBE( (uint8_t*)p_enc->fmt_out.p_extra + len + 10, len ); p_enc->fmt_out.i_extra = len + sizeof(eos); } } if( ReadDiracPictureNumber( &pic_num, p_block ) ) { /* Finding a picture terminates an ecapsulation unit, gather * all data and output; use the next dts value queued up * and find correct pts in the tlb */ p_block = block_FifoGet( p_sys->p_dts_fifo ); p_sys->p_chain->i_dts = p_block->i_dts; p_sys->p_chain->i_pts = GetPicturePTS( p_enc, pic_num ); block_Release( p_block ); block_ChainAppend( &p_output_chain, block_ChainGather( p_sys->p_chain ) ); p_sys->p_chain = NULL; } else { p_block = NULL; } break; } case ENC_STATE_BUFFER: break; case ENC_STATE_INVALID: default: break; } } while( state == ENC_STATE_AVAIL ); return p_output_chain; }
/***************************************************************************** * Block: *****************************************************************************/ static block_t *Block( access_t *p_access ) { access_sys_t *p_sys = p_access->p_sys; int i_blocks = VCD_BLOCKS_ONCE; block_t *p_block; int i_read; /* Check end of file */ if( p_access->info.b_eof ) return NULL; /* Check end of title */ while( p_sys->i_sector >= p_sys->p_sectors[p_access->info.i_title + 2] ) { if( p_access->info.i_title + 2 >= p_sys->i_titles ) { p_access->info.b_eof = true; return NULL; } p_access->info.i_update |= INPUT_UPDATE_TITLE | INPUT_UPDATE_SEEKPOINT | INPUT_UPDATE_SIZE; p_access->info.i_title++; p_access->info.i_seekpoint = 0; p_access->info.i_size = p_sys->title[p_access->info.i_title]->i_size; p_access->info.i_pos = 0; } /* Don't read after the end of a title */ if( p_sys->i_sector + i_blocks >= p_sys->p_sectors[p_access->info.i_title + 2] ) { i_blocks = p_sys->p_sectors[p_access->info.i_title + 2 ] - p_sys->i_sector; } /* Do the actual reading */ if( !( p_block = block_New( p_access, i_blocks * VCD_DATA_SIZE ) ) ) { msg_Err( p_access, "cannot get a new block of size: %i", i_blocks * VCD_DATA_SIZE ); return NULL; } if( ioctl_ReadSectors( VLC_OBJECT(p_access), p_sys->vcddev, p_sys->i_sector, p_block->p_buffer, i_blocks, VCD_TYPE ) < 0 ) { msg_Err( p_access, "cannot read sector %i", p_sys->i_sector ); block_Release( p_block ); /* Try to skip one sector (in case of bad sectors) */ p_sys->i_sector++; p_access->info.i_pos += VCD_DATA_SIZE; return NULL; } /* Update seekpoints */ for( i_read = 0; i_read < i_blocks; i_read++ ) { input_title_t *t = p_sys->title[p_access->info.i_title]; if( t->i_seekpoint > 0 && p_access->info.i_seekpoint + 1 < t->i_seekpoint && p_access->info.i_pos + i_read * VCD_DATA_SIZE >= t->seekpoint[p_access->info.i_seekpoint+1]->i_byte_offset ) { msg_Dbg( p_access, "seekpoint change" ); p_access->info.i_update |= INPUT_UPDATE_SEEKPOINT; p_access->info.i_seekpoint++; } } /* Update a few values */ p_sys->i_sector += i_blocks; p_access->info.i_pos += p_block->i_buffer; return p_block; }
/* We split/pack PCM blocks to a fixed size: pcm_chunk_size bytes */ static block_t *GetPCM( encoder_t *p_enc, aout_buffer_t *p_block ) { encoder_sys_t *p_sys = p_enc->p_sys; block_t *p_pcm_block; if( !p_block ) goto buffered; /* just return a block if we can */ /* Put the PCM samples sent by VLC in the Fifo */ while( p_sys->i_buffer + p_block->i_buffer >= pcm_chunk_size ) { unsigned int i_buffer = 0; p_pcm_block = block_New( p_enc, pcm_chunk_size ); if( !p_pcm_block ) break; if( p_sys->i_buffer ) { vlc_memcpy( p_pcm_block->p_buffer, p_sys->p_buffer, p_sys->i_buffer ); i_buffer = p_sys->i_buffer; p_sys->i_buffer = 0; free( p_sys->p_buffer ); } vlc_memcpy( p_pcm_block->p_buffer + i_buffer, p_block->p_buffer, pcm_chunk_size - i_buffer ); p_block->p_buffer += pcm_chunk_size - i_buffer; p_block->i_buffer -= pcm_chunk_size - i_buffer; block_FifoPut( p_sys->p_fifo, p_pcm_block ); } /* We hadn't enough data to make a block, put it in standby */ if( p_block->i_buffer ) { uint8_t *p_tmp; if( p_sys->i_buffer > 0 ) p_tmp = realloc( p_sys->p_buffer, p_block->i_buffer + p_sys->i_buffer ); else p_tmp = malloc( p_block->i_buffer ); if( !p_tmp ) { p_sys->i_buffer = 0; free( p_sys->p_buffer ); p_sys->p_buffer = NULL; return NULL; } p_sys->p_buffer = p_tmp; vlc_memcpy( p_sys->p_buffer + p_sys->i_buffer, p_block->p_buffer, p_block->i_buffer ); p_sys->i_buffer += p_block->i_buffer; p_block->i_buffer = 0; } buffered: /* and finally get a block back */ return block_FifoCount( p_sys->p_fifo ) > 0 ? block_FifoGet( p_sys->p_fifo ) : NULL; }
/**************************************************************************** * Encode: ****************************************************************************/ static block_t *Encode( encoder_t *p_enc, picture_t *p_pict ) { encoder_sys_t *p_sys = p_enc->p_sys; x264_picture_t pic; x264_nal_t *nal; block_t *p_block; int i_nal, i_out, i; /* init pic */ memset( &pic, 0, sizeof( x264_picture_t ) ); pic.i_pts = p_pict->date; pic.img.i_csp = X264_CSP_I420; pic.img.i_plane = p_pict->i_planes; for( i = 0; i < p_pict->i_planes; i++ ) { pic.img.plane[i] = p_pict->p[i].p_pixels; pic.img.i_stride[i] = p_pict->p[i].i_pitch; } x264_encoder_encode( p_sys->h, &nal, &i_nal, &pic, &pic ); if( !i_nal ) return NULL; for( i = 0, i_out = 0; i < i_nal; i++ ) { memcpy( p_sys->p_buffer + i_out, nal[i].p_payload, nal[i].i_payload ); i_out += nal[i].i_payload; } p_block = block_New( p_enc, i_out ); if( !p_block ) return NULL; memcpy( p_block->p_buffer, p_sys->p_buffer, i_out ); if( pic.i_type == X264_TYPE_IDR || pic.i_type == X264_TYPE_I ) p_block->i_flags |= BLOCK_FLAG_TYPE_I; else if( pic.i_type == X264_TYPE_P ) p_block->i_flags |= BLOCK_FLAG_TYPE_P; else if( pic.i_type == X264_TYPE_B ) p_block->i_flags |= BLOCK_FLAG_TYPE_B; /* This isn't really valid for streams with B-frames */ p_block->i_length = INT64_C(1000000) * p_enc->fmt_in.video.i_frame_rate_base / p_enc->fmt_in.video.i_frame_rate; p_block->i_pts = pic.i_pts; if( p_sys->param.i_bframe > 0 ) { if( p_block->i_flags & BLOCK_FLAG_TYPE_B ) { /* FIXME : this is wrong if bpyramid is set */ p_block->i_dts = p_block->i_pts; p_sys->i_interpolated_dts = p_block->i_dts; } else { if( p_sys->i_interpolated_dts ) { p_block->i_dts = p_sys->i_interpolated_dts; } else { /* Let's put something sensible */ p_block->i_dts = p_block->i_pts; } p_sys->i_interpolated_dts += p_block->i_length; } } else { p_block->i_dts = p_block->i_pts; } return p_block; }
id->ff_enc_c->coded_frame->motion_val[i], 2 * stride * height * sizeof(int16_t) ); } if ( id->ff_enc_c->coded_frame->ref_index[i] ) { id->p_frame->ref_index[i] = malloc( b8_stride * 2 * mb_height * sizeof(int8_t) ); vlc_memcpy( id->p_frame->ref_index[i], id->ff_enc_c->coded_frame->ref_index[i], b8_stride * 2 * mb_height * sizeof(int8_t)); } } } #endif p_out = block_New( p_stream, i_out ); vlc_memcpy( p_out->p_buffer, id->p_buffer_out, i_out ); p_out->i_length = p_buffer->i_length; p_out->i_pts = p_buffer->i_dts; p_out->i_dts = p_buffer->i_dts; p_out->i_rate = p_buffer->i_rate; switch ( id->ff_enc_c->coded_frame->pict_type ) { case FF_I_TYPE: p_out->i_flags |= BLOCK_FLAG_TYPE_I; break; case FF_P_TYPE: p_out->i_flags |= BLOCK_FLAG_TYPE_P; break; case FF_B_TYPE:
/**************************************************************************** * EncodeAudio: the whole thing ****************************************************************************/ static block_t *EncodeAudio( encoder_t *p_enc, aout_buffer_t *p_aout_buf ) { encoder_sys_t *p_sys = p_enc->p_sys; block_t *p_block, *p_chain = NULL; char *p_buffer = p_aout_buf->p_buffer; int i_samples = p_aout_buf->i_nb_samples; int i_samples_delay = p_sys->i_samples_delay; p_sys->i_pts = p_aout_buf->start_date - (mtime_t)1000000 * (mtime_t)p_sys->i_samples_delay / (mtime_t)p_enc->fmt_in.audio.i_rate; p_sys->i_samples_delay += i_samples; while( p_sys->i_samples_delay >= p_sys->p_context->frame_size ) { int16_t *p_samples; int i_out; if( i_samples_delay ) { /* Take care of the left-over from last time */ int i_delay_size = i_samples_delay * 2 * p_sys->p_context->channels; int i_size = p_sys->i_frame_size - i_delay_size; p_samples = (int16_t *)p_sys->p_buffer; memcpy( p_sys->p_buffer + i_delay_size, p_buffer, i_size ); p_buffer -= i_delay_size; i_samples += i_samples_delay; i_samples_delay = 0; } else { p_samples = (int16_t *)p_buffer; } i_out = avcodec_encode_audio( p_sys->p_context, p_sys->p_buffer_out, 2 * AVCODEC_MAX_AUDIO_FRAME_SIZE, p_samples ); #if 0 msg_Warn( p_enc, "avcodec_encode_audio: %d", i_out ); #endif if( i_out < 0 ) break; p_buffer += p_sys->i_frame_size; p_sys->i_samples_delay -= p_sys->p_context->frame_size; i_samples -= p_sys->p_context->frame_size; if( i_out == 0 ) continue; p_block = block_New( p_enc, i_out ); memcpy( p_block->p_buffer, p_sys->p_buffer_out, i_out ); p_block->i_length = (mtime_t)1000000 * (mtime_t)p_sys->p_context->frame_size / (mtime_t)p_sys->p_context->sample_rate; p_block->i_dts = p_block->i_pts = p_sys->i_pts; /* Update pts */ p_sys->i_pts += p_block->i_length; block_ChainAppend( &p_chain, p_block ); } /* Backup the remaining raw samples */ if( i_samples ) { memcpy( p_sys->p_buffer + i_samples_delay * 2 * p_sys->p_context->channels, p_buffer, i_samples * 2 * p_sys->p_context->channels ); } return p_chain; }