static int open_flac(bgav_demuxer_context_t * ctx) { bgav_stream_t * s; uint8_t header[4]; uint32_t size; flac_priv_t * priv; bgav_input_context_t * input_mem; uint8_t * comment_buffer; bgav_vorbis_comment_t vc; /* Skip header */ bgav_input_skip(ctx->input, 4); priv = calloc(1, sizeof(*priv)); ctx->priv = priv; ctx->tt = bgav_track_table_create(1); header[0] = 0; while(!(header[0] & 0x80)) { if(bgav_input_read_data(ctx->input, header, 4) < 4) return 0; size = header[1]; size <<= 8; size |= header[2]; size <<= 8; size |= header[3]; // if(!bgav_input_read_24_be(ctx->input, &size)) // return 0; switch(header[0] & 0x7F) { case 0: // STREAMINFO /* Add audio stream */ s = bgav_track_add_audio_stream(ctx->tt->cur, ctx->opt); s->ext_size = BGAV_FLAC_STREAMINFO_SIZE + 8; // Make a complete file header s->ext_data = malloc(s->ext_size); s->ext_data[0] = 'f'; s->ext_data[1] = 'L'; s->ext_data[2] = 'a'; s->ext_data[3] = 'C'; memcpy(s->ext_data+4, header, 4); /* We tell the decoder, that this is the last metadata packet */ s->ext_data[4] |= 0x80; if(bgav_input_read_data(ctx->input, s->ext_data + 8, BGAV_FLAC_STREAMINFO_SIZE) < BGAV_FLAC_STREAMINFO_SIZE) goto fail; if(!bgav_flac_streaminfo_read(s->ext_data + 8, &priv->streaminfo)) goto fail; if(ctx->opt->dump_headers) bgav_flac_streaminfo_dump(&priv->streaminfo); bgav_flac_streaminfo_init_stream(&priv->streaminfo, s); if(priv->streaminfo.total_samples) ctx->tt->cur->duration = gavl_samples_to_time(priv->streaminfo.samplerate, priv->streaminfo.total_samples); // bgav_input_skip(ctx->input, size); break; case 1: // PADDING bgav_input_skip(ctx->input, size); break; case 2: // APPLICATION bgav_input_skip(ctx->input, size); break; case 3: // SEEKTABLE if(!bgav_flac_seektable_read(ctx->input, &priv->seektable, size)) goto fail; if(ctx->opt->dump_indices) bgav_flac_seektable_dump(&priv->seektable); break; case 4: // VORBIS_COMMENT comment_buffer = malloc(size); if(bgav_input_read_data(ctx->input, comment_buffer, size) < size) return 0; input_mem = bgav_input_open_memory(comment_buffer, size, ctx->opt); memset(&vc, 0, sizeof(vc)); if(bgav_vorbis_comment_read(&vc, input_mem)) { bgav_vorbis_comment_2_metadata(&vc, &ctx->tt->cur->metadata); } if(ctx->tt->cur->audio_streams) gavl_metadata_set(&ctx->tt->cur->audio_streams->m, GAVL_META_SOFTWARE, vc.vendor); if(ctx->opt->dump_headers) bgav_vorbis_comment_dump(&vc); bgav_vorbis_comment_free(&vc); bgav_input_close(input_mem); bgav_input_destroy(input_mem); free(comment_buffer); break; case 5: // CUESHEET bgav_input_skip(ctx->input, size); break; default: bgav_input_skip(ctx->input, size); } } ctx->data_start = ctx->input->position; ctx->flags |= BGAV_DEMUXER_HAS_DATA_START; gavl_metadata_set(&ctx->tt->cur->metadata, GAVL_META_FORMAT, "FLAC"); gavl_metadata_set(&ctx->tt->cur->metadata, GAVL_META_MIMETYPE, "audio/flac"); ctx->index_mode = INDEX_MODE_SIMPLE; if(priv->seektable.num_entries && ctx->input->input->seek_byte) ctx->flags |= BGAV_DEMUXER_CAN_SEEK; bgav_demuxer_init_cue(ctx); return 1; fail: return 0; }
bool ReadMedia::initFormat() { const gavl_audio_format_t * open_audio_format; const gavl_video_format_t * open_video_format; // we use the m_vfifosize to see if the user app wants video or not // then, we set m_video_stream_count to 0 if he doesn't want video if (m_video_stream_count > 0 && m_vfifosize > 0) { open_video_format = bgav_get_video_format(m_file, 0); if (open_video_format->pixelformat == GAVL_PIXELFORMAT_NONE) { printf("!!!sorry, pixelformat is not recognized.\n"); return false; } // let's check to see if the formats are the same, if they are the same // there is no reason to recreate the fifo or frames if ( gavl_video_formats_equal( &m_video_format, open_video_format) == 0 ) { // the formats are different gavl_video_format_copy (&m_video_format, open_video_format); if (m_video_frame != NULL) gavl_video_frame_destroy(m_video_frame); m_video_frame = gavl_video_frame_create(&m_video_format); gavl_video_frame_clear( m_video_frame, &m_video_format); if (m_fifovideo != NULL) delete m_fifovideo; m_fifovideo= new FifoVideoFrames( m_vfifosize , &m_video_format); } } else { m_video_stream_count = 0; m_veof = true; } // we use the m_afifosize to see if the user app wants audio or not // then, we set m_audio_stream_count to 0 if he doesn't want audio if (m_audio_stream_count > 0 && m_afifosize > 0) { open_audio_format = bgav_get_audio_format(m_file, 0); // we can get audio formats that are unkown if ( open_audio_format->sample_format == GAVL_SAMPLE_NONE) { printf("sorry, this file has unsupported audio.\n"); return false; } if ( gavl_audio_formats_equal(&m_audio_format, open_audio_format) == 0 ) { // audio formats are different // save the old spf int spf = m_audio_format.samples_per_frame; gavl_audio_format_copy(&m_audio_format, open_audio_format); if (m_audio_frame != NULL) { gavl_audio_frame_destroy(m_audio_frame); } // set it back to original m_audio_format.samples_per_frame = spf ; m_audio_frame = gavl_audio_frame_create(&m_audio_format); gavl_audio_frame_mute( m_audio_frame, &m_audio_format); if( m_fifoaudio != NULL ) delete m_fifoaudio; m_fifoaudio = new FifoAudioFrames( m_afifosize , &m_audio_format); } } else { // user doesn't want audio m_audio_stream_count = 0; m_aeof=true; } m_length_in_gavltime = bgav_get_duration ( m_file, 0);; m_length_in_seconds = gavl_time_to_seconds( m_length_in_gavltime ); m_num_samples = 0; m_num_frames = 0; if (m_audio_stream_count) { if ( bgav_can_seek_sample(m_file) == 1 ) { m_num_samples= bgav_audio_duration ( m_file, 0) ; } else { m_num_samples= gavl_time_to_samples( m_audio_format.samplerate , bgav_get_duration ( m_file, 0) ); } } // set frames WE NEED TO take care here for non-constant frame-rates if(m_video_stream_count) { if ( bgav_can_seek_sample(m_file) == 1 && m_video_format.framerate_mode == GAVL_FRAMERATE_CONSTANT) { m_num_frames = bgav_video_duration ( m_file, 0)/ m_video_format.frame_duration; } else if ( bgav_can_seek_sample(m_file) == 1 && m_video_format.framerate_mode == GAVL_FRAMERATE_VARIABLE ) { // FIXME what to do with non constant frame rates? m_num_frames=0; } else { m_num_frames = gavl_time_to_frames( m_video_format.timescale, m_video_format.frame_duration , bgav_get_duration ( m_file, 0) ); } } // printf("m_num_frames =%lld, duration = %lld , vid_duration=%lld\n", // m_num_frames, bgav_get_duration ( m_file, 0), bgav_video_duration ( m_file, 0) ); // set seconds if ( bgav_can_seek_sample(m_file) == 1) { gavl_time_t atime=0,vtime=0; if ( m_audio_stream_count ) atime = gavl_samples_to_time( m_audio_format.samplerate, m_num_samples ); if (m_video_stream_count && m_video_format.frame_duration > 0) { vtime = gavl_frames_to_time( m_video_format.timescale, m_video_format.frame_duration, m_num_frames ); } else if ( m_video_stream_count ) { // non constant framerate vtime = bgav_video_duration( m_file, 0); } // else rely on audio time m_length_in_gavltime = atime > vtime ? atime :vtime; m_length_in_seconds = gavl_time_to_seconds( m_length_in_gavltime ); //printf("atime=%ld, vtime=%ld, l_in_sec=%f\n", atime, vtime, m_length_in_seconds); } m_pcm_seek = SEEK_NOTHING; m_frame_seek = SEEK_NOTHING; return true; }