Example #1
0
int
resolve_unknown_type(const char * path, media_types dir_type)
{
	struct stat entry;
	unsigned char type = TYPE_UNKNOWN;
	char str_buf[PATH_MAX];
	ssize_t len;

	if( lstat(path, &entry) == 0 )
	{
		if( S_ISLNK(entry.st_mode) )
		{
			if( (len = readlink(path, str_buf, PATH_MAX-1)) > 0 )
			{
				str_buf[len] = '\0';
				//DEBUG DPRINTF(E_DEBUG, L_GENERAL, "Checking for recursive symbolic link: %s (%s)\n", path, str_buf);
				if( strncmp(path, str_buf, strlen(str_buf)) == 0 )
				{
					DPRINTF(E_DEBUG, L_GENERAL, "Ignoring recursive symbolic link: %s (%s)\n", path, str_buf);
					return type;
				}
			}
			stat(path, &entry);
		}

		if( S_ISDIR(entry.st_mode) )
		{
			type = TYPE_DIR;
		}
		else if( S_ISREG(entry.st_mode) )
		{
			switch( dir_type )
			{
				case ALL_MEDIA:
					if( is_image(path) ||
					    is_audio(path) ||
					    is_video(path) ||
					    is_playlist(path) )
						type = TYPE_FILE;
					break;
				case TYPE_AUDIO:
					if( is_audio(path) ||
					    is_playlist(path) )
						type = TYPE_FILE;
					break;
				case TYPE_VIDEO:
					if( is_video(path) )
						type = TYPE_FILE;
					break;
				case TYPE_IMAGES:
					if( is_image(path) )
						type = TYPE_FILE;
					break;
				default:
					break;
			}
		}
	}
	return type;
}
Example #2
0
static stream_timing_t *id_to_st( hb_reader_t *r, const hb_buffer_t *buf, int valid )
{
    stream_timing_t *st = r->stream_timing;
    while ( st->id != buf->id && st->id != -1)
    {
        ++st;
    }
    // if we haven't seen this stream add it.
    if ( st->id == -1 )
    {
        // we keep the steam timing info in an array with some power-of-two
        // number of slots. If we don't have two slots left (one for our new
        // entry plus one for the "-1" eol) we need to expand the array.
        int slot = st - r->stream_timing;
        if ( slot + 1 >= r->st_slots )
        {
            r->st_slots *= 2;
            r->stream_timing = realloc( r->stream_timing, r->st_slots *
                                        sizeof(*r->stream_timing) );
            st = r->stream_timing + slot;
        }
        st->id = buf->id;
        st->average = 30.*90.;
        st->last = -st->average;
        if ( ( st->is_audio = is_audio( r, buf->id ) ) != 0 )
        {
            r->saw_audio = 1;
        }
        st[1].id = -1;
        st->valid = valid;
    }
    return st;
}
Example #3
0
void CPlayerMedia::synchronize_rtp_bytestreams (rtcp_sync_t *sync)
{
  if (is_audio()) {
    player_error_message("Attempt to syncronize audio byte stream");
    return;
  }
  if (m_rtp_byte_stream != NULL) 
    m_rtp_byte_stream->synchronize(sync);
}
Example #4
0
int
resolve_unknown_type(const char * path, enum media_types dir_type)
{
	struct stat entry;
	unsigned char type = TYPE_UNKNOWN;

	if( stat(path, &entry) == 0 )
	{
		if( S_ISDIR(entry.st_mode) )
		{
			type = TYPE_DIR;
		}
		else if( S_ISREG(entry.st_mode) )
		{
			switch( dir_type )
			{
				case ALL_MEDIA:
					if( is_image(path) ||
					    is_audio(path) ||
					    is_video(path) ||
					    is_playlist(path) )
						type = TYPE_FILE;
					break;
				case AUDIO_ONLY:
					if( is_audio(path) ||
					    is_playlist(path) )
						type = TYPE_FILE;
					break;
				case VIDEO_ONLY:
					if( is_video(path) )
						type = TYPE_FILE;
					break;
				case IMAGES_ONLY:
					if( is_image(path) )
						type = TYPE_FILE;
					break;
				default:
					break;
			}
		}
	}
	return type;
}
Example #5
0
/***********************************************************************
 * ReaderFunc
 ***********************************************************************
 *
 **********************************************************************/
static void ReaderFunc( void * _r )
{
    hb_reader_t  * r = _r;
    hb_fifo_t   ** fifos;
    hb_buffer_t  * buf;
    hb_list_t    * list;
    int            n;
    int            chapter = -1;
    int            chapter_end = r->job->chapter_end;

    if ( r->title->type == HB_BD_TYPE )
    {
        if ( !( r->bd = hb_bd_init( r->title->path ) ) )
            return;
    }
    else if ( r->title->type == HB_DVD_TYPE )
    {
        if ( !( r->dvd = hb_dvd_init( r->title->path ) ) )
            return;
    }
    else if ( r->title->type == HB_STREAM_TYPE ||
              r->title->type == HB_FF_STREAM_TYPE )
    {
        if ( !( r->stream = hb_stream_open( r->title->path, r->title ) ) )
            return;
    }
    else
    {
        // Unknown type, should never happen
        return;
    }

    if (r->bd)
    {
        if( !hb_bd_start( r->bd, r->title ) )
        {
            hb_bd_close( &r->bd );
            return;
        }
        if ( r->job->start_at_preview )
        {
            // XXX code from DecodePreviews - should go into its own routine
            hb_bd_seek( r->bd, (float)r->job->start_at_preview /
                         ( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );
        }
        else if ( r->job->pts_to_start )
        {
            // Note, bd seeks always put us to an i-frame.  no need
            // to start decoding early using r->pts_to_start
            hb_bd_seek_pts( r->bd, r->job->pts_to_start );
            r->job->pts_to_start = 0;
            r->start_found = 1;
        }
        else
        {
            hb_bd_seek_chapter( r->bd, r->job->chapter_start );
        }
        if (r->job->angle > 1)
        {
            hb_bd_set_angle( r->bd, r->job->angle - 1 );
        }
    }
    else if (r->dvd)
    {
        /*
         * XXX this code is a temporary hack that should go away if/when
         *     chapter merging goes away in libhb/dvd.c
         * map the start and end chapter numbers to on-media chapter
         * numbers since chapter merging could cause the handbrake numbers
         * to diverge from the media numbers and, if our chapter_end is after
         * a media chapter that got merged, we'll stop ripping too early.
         */
        int start = r->job->chapter_start;
        hb_chapter_t *chap = hb_list_item( r->title->list_chapter, chapter_end - 1 );

        chapter_end = chap->index;
        if (start > 1)
        {
           chap = hb_list_item( r->title->list_chapter, start - 1 );
           start = chap->index;
        }
        /* end chapter mapping XXX */

        if( !hb_dvd_start( r->dvd, r->title, start ) )
        {
            hb_dvd_close( &r->dvd );
            return;
        }
        if (r->job->angle)
        {
            hb_dvd_set_angle( r->dvd, r->job->angle );
        }

        if ( r->job->start_at_preview )
        {
            // XXX code from DecodePreviews - should go into its own routine
            hb_dvd_seek( r->dvd, (float)r->job->start_at_preview /
                         ( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );
        }
    }
    else if ( r->stream && r->job->start_at_preview )
    {
        
        // XXX code from DecodePreviews - should go into its own routine
        hb_stream_seek( r->stream, (float)( r->job->start_at_preview - 1 ) /
                        ( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );

    } 
    else if ( r->stream && r->job->pts_to_start )
    {
        int64_t pts_to_start = r->job->pts_to_start;
        
        // Find out what the first timestamp of the stream is
        // and then seek to the appropriate offset from it
        if ( ( buf = hb_stream_read( r->stream ) ) )
        {
            if ( buf->start > 0 )
            {
                pts_to_start += buf->start;
                r->pts_to_start += buf->start;
                r->job->pts_to_start += buf->start;
            }
        }
        
        if ( hb_stream_seek_ts( r->stream, pts_to_start ) >= 0 )
        {
            // Seek takes us to the nearest I-frame before the timestamp
            // that we want.  So we will retrieve the start time of the
            // first packet we get, subtract that from pts_to_start, and
            // inspect the reset of the frames in sync.
            r->start_found = 2;
            r->job->pts_to_start = pts_to_start;
        }
    } 
    else if( r->stream )
    {
        /*
         * Standard stream, seek to the starting chapter, if set, and track the
         * end chapter so that we end at the right time.
         */
        int start = r->job->chapter_start;
        hb_chapter_t *chap = hb_list_item( r->title->list_chapter, chapter_end - 1 );
        
        chapter_end = chap->index;
        if (start > 1)
        {
            chap = hb_list_item( r->title->list_chapter, start - 1 );
            start = chap->index;
        }
        
        /*
         * Seek to the start chapter.
         */
        hb_stream_seek_chapter( r->stream, start );
    }

    list  = hb_list_init();

    while( !*r->die && !r->job->done )
    {
        if (r->bd)
            chapter = hb_bd_chapter( r->bd );
        else if (r->dvd)
            chapter = hb_dvd_chapter( r->dvd );
        else if (r->stream)
            chapter = hb_stream_chapter( r->stream );

        if( chapter < 0 )
        {
            hb_log( "reader: end of the title reached" );
            break;
        }
        if( chapter > chapter_end )
        {
            hb_log( "reader: end of chapter %d (media %d) reached at media chapter %d",
                    r->job->chapter_end, chapter_end, chapter );
            break;
        }

        if (r->bd)
        {
          if( (buf = hb_bd_read( r->bd )) == NULL )
          {
              break;
          }
        }
        else if (r->dvd)
        {
          if( (buf = hb_dvd_read( r->dvd )) == NULL )
          {
              break;
          }
        }
        else if (r->stream)
        {
          if ( (buf = hb_stream_read( r->stream )) == NULL )
          {
            break;
          }
          if ( r->start_found == 2 )
          {
            // We will inspect the timestamps of each frame in sync
            // to skip from this seek point to the timestamp we
            // want to start at.
            if ( buf->start > 0 && buf->start < r->job->pts_to_start )
            {
                r->job->pts_to_start -= buf->start;
            }
            else if ( buf->start >= r->job->pts_to_start )
            {
                r->job->pts_to_start = 0;
                r->start_found = 1;
            }
          }
        }

        if( r->job->indepth_scan )
        {
            /*
             * Need to update the progress during a subtitle scan
             */
            hb_state_t state;

#define p state.param.working

            state.state = HB_STATE_WORKING;
            p.progress = (double)chapter / (double)r->job->chapter_end;
            if( p.progress > 1.0 )
            {
                p.progress = 1.0;
            }
            p.rate_avg = 0.0;
            p.hours    = -1;
            p.minutes  = -1;
            p.seconds  = -1;
            hb_set_state( r->job->h, &state );
        }

        (hb_demux[r->title->demuxer])( buf, list, &r->demux );

        while( ( buf = hb_list_item( list, 0 ) ) )
        {
            hb_list_rem( list, buf );
            fifos = GetFifoForId( r->job, buf->id );

            if ( fifos && ! r->saw_video && !r->job->indepth_scan )
            {
                // The first data packet with a PTS from an audio or video stream
                // that we're decoding defines 'time zero'. Discard packets until
                // we get one.
                if ( buf->start != -1 && buf->renderOffset != -1 &&
                     ( buf->id == r->title->video_id || is_audio( r, buf->id ) ) )
                {
                    // force a new scr offset computation
                    r->scr_changes = r->demux.scr_changes - 1;
                    // create a stream state if we don't have one so the
                    // offset will get computed correctly.
                    id_to_st( r, buf, 1 );
                    r->saw_video = 1;
                    hb_log( "reader: first SCR %"PRId64" id 0x%x DTS %"PRId64,
                            r->demux.last_scr, buf->id, buf->renderOffset );
                }
                else
                {
                    fifos = NULL;
                }
            }
            if( fifos )
            {
                if ( buf->renderOffset != -1 )
                {
                    if ( r->scr_changes != r->demux.scr_changes )
                    {
                        // This is the first audio or video packet after an SCR
                        // change. Compute a new scr offset that would make this
                        // packet follow the last of this stream with the 
                        // correct average spacing.
                        stream_timing_t *st = id_to_st( r, buf, 0 );

                        // if this is the video stream and we don't have
                        // audio yet or this is an audio stream
                        // generate a new scr
                        if ( st->is_audio ||
                             ( st == r->stream_timing && !r->saw_audio ) )
                        {
                            new_scr_offset( r, buf );
                        }
                        else
                        {
                            // defer the scr change until we get some
                            // audio since audio has a timestamp per
                            // frame but video & subtitles don't. Clear
                            // the timestamps so the decoder will generate
                            // them from the frame durations.
                            buf->start = -1;
                            buf->renderOffset = -1;
                        }
                    }
                }
                if ( buf->start != -1 )
                {
                    int64_t start = buf->start - r->scr_offset;
                    if ( !r->start_found )
                        UpdateState( r, start );

                    if ( !r->start_found &&
                        start >= r->pts_to_start )
                    {
                        // pts_to_start point found
                        r->start_found = 1;
                    }
                    // This log is handy when you need to debug timing problems
                    //hb_log("id %x scr_offset %ld start %ld --> %ld", 
                    //        buf->id, r->scr_offset, buf->start, 
                    //        buf->start - r->scr_offset);
                    buf->start -= r->scr_offset;
                }
                if ( buf->renderOffset != -1 )
                {
                    if ( r->scr_changes == r->demux.scr_changes )
                    {
                        // This packet is referenced to the same SCR as the last.
                        // Adjust timestamp to remove the System Clock Reference
                        // offset then update the average inter-packet time
                        // for this stream.
                        buf->renderOffset -= r->scr_offset;
                        update_ipt( r, buf );
                    }
                }
                if ( !r->start_found )
                {
                    hb_buffer_close( &buf );
                    continue;
                }

                buf->sequence = r->sequence++;
                /* if there are mutiple output fifos, send a copy of the
                 * buffer down all but the first (we have to not ship the
                 * original buffer or we'll race with the thread that's
                 * consuming the buffer & inject garbage into the data stream). */
                for( n = 1; fifos[n] != NULL; n++)
                {
                    hb_buffer_t *buf_copy = hb_buffer_init( buf->size );
                    hb_buffer_copy_settings( buf_copy, buf );
                    memcpy( buf_copy->data, buf->data, buf->size );
                    push_buf( r, fifos[n], buf_copy );
                }
                push_buf( r, fifos[0], buf );
            }
            else
            {
                hb_buffer_close( &buf );
            }
        }
    }

    // send empty buffers downstream to video & audio decoders to signal we're done.
    if( !*r->die && !r->job->done )
    {
        push_buf( r, r->job->fifo_mpeg2, hb_buffer_init(0) );

        hb_audio_t *audio;
        for( n = 0; (audio = hb_list_item( r->job->title->list_audio, n)); ++n )
        {
            if ( audio->priv.fifo_in )
                push_buf( r, audio->priv.fifo_in, hb_buffer_init(0) );
        }

        hb_subtitle_t *subtitle;
        for( n = 0; (subtitle = hb_list_item( r->job->title->list_subtitle, n)); ++n )
        {
            if ( subtitle->fifo_in && subtitle->source == VOBSUB)
                push_buf( r, subtitle->fifo_in, hb_buffer_init(0) );
        }
    }

    hb_list_empty( &list );
    if (r->bd)
    {
        hb_bd_stop( r->bd );
        hb_bd_close( &r->bd );
    }
    else if (r->dvd)
    {
        hb_dvd_stop( r->dvd );
        hb_dvd_close( &r->dvd );
    }
    else if (r->stream)
    {
        hb_stream_close(&r->stream);
    }

    if ( r->stream_timing )
    {
        free( r->stream_timing );
    }

    hb_log( "reader: done. %d scr changes", r->demux.scr_changes );
    if ( r->demux.dts_drops )
    {
        hb_log( "reader: %d drops because DTS out of range", r->demux.dts_drops );
    }

    free( r );
    _r = NULL;
}
Example #6
0
int64_t
GetVideoMetadata(const char *path, char *name)
{
	struct stat file;
	int ret, i;
	struct tm *modtime;
	AVFormatContext *ctx = NULL;
	AVCodecContext *ac = NULL, *vc = NULL;
	int audio_stream = -1, video_stream = -1;
	enum audio_profiles audio_profile = PROFILE_AUDIO_UNKNOWN;
	char fourcc[4];
	int64_t album_art = 0;
	char nfo[MAXPATHLEN], *ext;
	struct song_metadata video;
	metadata_t m;
	uint32_t free_flags = 0xFFFFFFFF;
	char *path_cpy, *basepath;

	memset(&m, '\0', sizeof(m));
	memset(&video, '\0', sizeof(video));

	//DEBUG DPRINTF(E_DEBUG, L_METADATA, "Parsing video %s...\n", name);
	if ( stat(path, &file) != 0 )
		return 0;
	strip_ext(name);
	//DEBUG DPRINTF(E_DEBUG, L_METADATA, " * size: %jd\n", file.st_size);

	ret = lav_open(&ctx, path);
	if( ret != 0 )
	{
		char err[128];
		av_strerror(ret, err, sizeof(err));
		DPRINTF(E_WARN, L_METADATA, "Opening %s failed! [%s]\n", path, err);
		return 0;
	}
	//dump_format(ctx, 0, NULL, 0);
	for( i=0; i<ctx->nb_streams; i++)
	{
		if( audio_stream == -1 &&
		    ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO )
		{
			audio_stream = i;
			ac = ctx->streams[audio_stream]->codec;
			continue;
		}
		else if( video_stream == -1 &&
			 ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
		{
			video_stream = i;
			vc = ctx->streams[video_stream]->codec;
			continue;
		}
	}
	path_cpy = strdup(path);
	basepath = basename(path_cpy);
	if( !vc )
	{
		/* This must not be a video file. */
		lav_close(ctx);
		if( !is_audio(path) )
			DPRINTF(E_DEBUG, L_METADATA, "File %s does not contain a video stream.\n", basepath);
		free(path_cpy);
		return 0;
	}

	if( ac )
	{
		aac_object_type_t aac_type = AAC_INVALID;
		switch( ac->codec_id )
		{
			case CODEC_ID_MP3:
				audio_profile = PROFILE_AUDIO_MP3;
				break;
			case CODEC_ID_AAC:
				if( !ac->extradata_size ||
				    !ac->extradata )
				{
					DPRINTF(E_DEBUG, L_METADATA, "No AAC type\n");
				}
				else
				{
					uint8_t data;
					memcpy(&data, ac->extradata, 1);
					aac_type = data >> 3;
				}
				switch( aac_type )
				{
					/* AAC Low Complexity variants */
					case AAC_LC:
					case AAC_LC_ER:
						if( ac->sample_rate < 8000 ||
						    ac->sample_rate > 48000 )
						{
							DPRINTF(E_DEBUG, L_METADATA, "Unsupported AAC: sample rate is not 8000 < %d < 48000\n",
								ac->sample_rate);
							break;
						}
						/* AAC @ Level 1/2 */
						if( ac->channels <= 2 &&
						    ac->bit_rate <= 576000 )
							audio_profile = PROFILE_AUDIO_AAC;
						else if( ac->channels <= 6 &&
							 ac->bit_rate <= 1440000 )
							audio_profile = PROFILE_AUDIO_AAC_MULT5;
						else
							DPRINTF(E_DEBUG, L_METADATA, "Unhandled AAC: %d channels, %d bitrate\n",
								ac->channels,
								ac->bit_rate);
						break;
					default:
						DPRINTF(E_DEBUG, L_METADATA, "Unhandled AAC type [%d]\n", aac_type);
						break;
				}
				break;
			case CODEC_ID_AC3:
			case CODEC_ID_DTS:
				audio_profile = PROFILE_AUDIO_AC3;
				break;
			case CODEC_ID_WMAV1:
			case CODEC_ID_WMAV2:
				/* WMA Baseline: stereo, up to 48 KHz, up to 192,999 bps */
				if ( ac->bit_rate <= 193000 )
					audio_profile = PROFILE_AUDIO_WMA_BASE;
				/* WMA Full: stereo, up to 48 KHz, up to 385 Kbps */
				else if ( ac->bit_rate <= 385000 )
					audio_profile = PROFILE_AUDIO_WMA_FULL;
				break;
			#if LIBAVCODEC_VERSION_INT > ((51<<16)+(50<<8)+1)
			case CODEC_ID_WMAPRO:
				audio_profile = PROFILE_AUDIO_WMA_PRO;
				break;
			#endif
			case CODEC_ID_MP2:
				audio_profile = PROFILE_AUDIO_MP2;
				break;
			case CODEC_ID_AMR_NB:
				audio_profile = PROFILE_AUDIO_AMR;
				break;
			default:
				if( (ac->codec_id >= CODEC_ID_PCM_S16LE) &&
				    (ac->codec_id < CODEC_ID_ADPCM_IMA_QT) )
					audio_profile = PROFILE_AUDIO_PCM;
				else
					DPRINTF(E_DEBUG, L_METADATA, "Unhandled audio codec [0x%X]\n", ac->codec_id);
				break;
		}
		xasprintf(&m.frequency, "%u", ac->sample_rate);
		#if LIBAVCODEC_VERSION_INT < (52<<16)
		xasprintf(&m.bps, "%u", ac->bits_per_sample);
		#else
		xasprintf(&m.bps, "%u", ac->bits_per_coded_sample);
		#endif
		xasprintf(&m.channels, "%u", ac->channels);
	}
	if( vc )
	{
		int off;
		int duration, hours, min, sec, ms;
		ts_timestamp_t ts_timestamp = NONE;
		DPRINTF(E_DEBUG, L_METADATA, "Container: '%s' [%s]\n", ctx->iformat->name, basepath);
		xasprintf(&m.resolution, "%dx%d", vc->width, vc->height);
		if( ctx->bit_rate > 8 )
			xasprintf(&m.bitrate, "%u", ctx->bit_rate / 8);
		if( ctx->duration > 0 ) {
			duration = (int)(ctx->duration / AV_TIME_BASE);
			hours = (int)(duration / 3600);
			min = (int)(duration / 60 % 60);
			sec = (int)(duration % 60);
			ms = (int)(ctx->duration / (AV_TIME_BASE/1000) % 1000);
			xasprintf(&m.duration, "%d:%02d:%02d.%03d", hours, min, sec, ms);
		}

		/* NOTE: The DLNA spec only provides for ASF (WMV), TS, PS, and MP4 containers.
		 * Skip DLNA parsing for everything else. */
		if( strcmp(ctx->iformat->name, "avi") == 0 )
		{
			xasprintf(&m.mime, "video/x-msvideo");
			if( vc->codec_id == CODEC_ID_MPEG4 )
			{
        			fourcc[0] = vc->codec_tag     & 0xff;
			        fourcc[1] = vc->codec_tag>>8  & 0xff;
		        	fourcc[2] = vc->codec_tag>>16 & 0xff;
			        fourcc[3] = vc->codec_tag>>24 & 0xff;
				if( memcmp(fourcc, "XVID", 4) == 0 ||
				    memcmp(fourcc, "DX50", 4) == 0 ||
				    memcmp(fourcc, "DIVX", 4) == 0 )
					xasprintf(&m.creator, "DiVX");
			}
		}
Example #7
0
/* And our main album art functions */
void
update_if_album_art(const char *path)
{
	char *dir;
	char *match;
	char file[MAXPATHLEN];
	char fpath[MAXPATHLEN];
	char dpath[MAXPATHLEN];
	int ncmp = 0;
	int album_art;
	DIR *dh;
	struct dirent *dp;
	enum file_types type = TYPE_UNKNOWN;
	int64_t art_id = 0;
	int ret;

	strncpyt(fpath, path, sizeof(fpath));
	match = basename(fpath);
	/* Check if this file name matches a specific audio or video file */
	if( ends_with(match, ".cover.jpg") )
	{
		ncmp = strlen(match)-10;
	}
	else
	{
		ncmp = strrchr(match, '.') - match;
	}
	/* Check if this file name matches one of the default album art names */
	album_art = is_album_art(match);

	strncpyt(dpath, path, sizeof(dpath));
	dir = dirname(dpath);
	dh = opendir(dir);
	if( !dh )
		return;
	while ((dp = readdir(dh)) != NULL)
	{
		switch( dp->d_type )
		{
			case DT_REG:
				type = TYPE_FILE;
				break;
			case DT_LNK:
			case DT_UNKNOWN:
				snprintf(file, sizeof(file), "%s/%s", dir, dp->d_name);
				type = resolve_unknown_type(file, ALL_MEDIA);
				break;
			default:
				type = TYPE_UNKNOWN;
				break;
		}
		if( type != TYPE_FILE )
			continue;
		if( (*(dp->d_name) != '.') &&
		    (is_video(dp->d_name) || is_audio(dp->d_name)) &&
		    (album_art || strncmp(dp->d_name, match, ncmp) == 0) )
		{
			DPRINTF(E_DEBUG, L_METADATA, "New file %s looks like cover art for %s\n", path, dp->d_name);
			snprintf(file, sizeof(file), "%s/%s", dir, dp->d_name);
			art_id = find_album_art(file, NULL, 0);
			ret = sql_exec(db, "UPDATE DETAILS set ALBUM_ART = %lld where PATH = '%q'", (long long)art_id, file);
			if( ret != SQLITE_OK )
				DPRINTF(E_WARN, L_METADATA, "Error setting %s as cover art for %s\n", match, dp->d_name);
		}
	}
	closedir(dh);
}
Example #8
0
static int reader_work( hb_work_object_t * w, hb_buffer_t ** buf_in,
                        hb_buffer_t ** buf_out)
{
    hb_work_private_t  * r = w->private_data;
    hb_fifo_t         ** fifos;
    hb_buffer_t        * buf;
    hb_buffer_list_t     list;
    int                  ii, chapter = -1;

    hb_buffer_list_clear(&list);

    if (r->bd)
        chapter = hb_bd_chapter( r->bd );
    else if (r->dvd)
        chapter = hb_dvd_chapter( r->dvd );
    else if (r->stream)
        chapter = hb_stream_chapter( r->stream );

    if( chapter < 0 )
    {
        hb_log( "reader: end of the title reached" );
        reader_send_eof(r);
        return HB_WORK_DONE;
    }
    if( chapter > r->chapter_end )
    {
        hb_log("reader: end of chapter %d (media %d) reached at media chapter %d",
                r->job->chapter_end, r->chapter_end, chapter);
        reader_send_eof(r);
        return HB_WORK_DONE;
    }

    if (r->bd)
    {
        if( (buf = hb_bd_read( r->bd )) == NULL )
        {
            reader_send_eof(r);
            return HB_WORK_DONE;
        }
    }
    else if (r->dvd)
    {
        if( (buf = hb_dvd_read( r->dvd )) == NULL )
        {
            reader_send_eof(r);
            return HB_WORK_DONE;
        }
    }
    else if (r->stream)
    {
        if ( (buf = hb_stream_read( r->stream )) == NULL )
        {
            reader_send_eof(r);
            return HB_WORK_DONE;
        }
    }

    (hb_demux[r->title->demuxer])(buf, &list, &r->demux);

    while ((buf = hb_buffer_list_rem_head(&list)) != NULL)
    {
        fifos = GetFifoForId( r, buf->s.id );

        if (fifos && r->stream && r->start_found == 2 )
        {
            // We will inspect the timestamps of each frame in sync
            // to skip from this seek point to the timestamp we
            // want to start at.
            if (buf->s.start != AV_NOPTS_VALUE &&
                buf->s.start < r->job->pts_to_start)
            {
                r->job->pts_to_start -= buf->s.start;
            }
            else if ( buf->s.start >= r->job->pts_to_start )
            {
                r->job->pts_to_start = 0;
            }
            r->start_found = 1;
        }

        if ( fifos && ! r->saw_video && !r->job->indepth_scan )
        {
            // The first data packet with a PTS from an audio or video stream
            // that we're decoding defines 'time zero'. Discard packets until
            // we get one.
            if (buf->s.start != AV_NOPTS_VALUE &&
                buf->s.renderOffset != AV_NOPTS_VALUE &&
                 (buf->s.id == r->title->video_id ||
                  is_audio( r, buf->s.id)))
            {
                // force a new scr offset computation
                r->scr_changes = r->demux.scr_changes - 1;
                // create a stream state if we don't have one so the
                // offset will get computed correctly.
                id_to_st( r, buf, 1 );
                r->saw_video = 1;
                hb_log( "reader: first SCR %"PRId64" id 0x%x DTS %"PRId64,
                        r->demux.last_scr, buf->s.id, buf->s.renderOffset );
            }
            else
            {
                fifos = NULL;
            }
        }

        if ( r->job->indepth_scan || fifos )
        {
            if ( buf->s.renderOffset != AV_NOPTS_VALUE )
            {
                if ( r->scr_changes != r->demux.scr_changes )
                {
                    // This is the first audio or video packet after an SCR
                    // change. Compute a new scr offset that would make this
                    // packet follow the last of this stream with the
                    // correct average spacing.
                    stream_timing_t *st = id_to_st( r, buf, 0 );

                    // if this is the video stream and we don't have
                    // audio yet or this is an audio stream
                    // generate a new scr
                    if ( st->is_audio ||
                         ( st == r->stream_timing && !r->saw_audio ) )
                    {
                        new_scr_offset( r, buf );
                        r->sub_scr_set = 0;
                    }
                    else
                    {
                        // defer the scr change until we get some
                        // audio since audio has a timestamp per
                        // frame but video & subtitles don't. Clear
                        // the timestamps so the decoder will generate
                        // them from the frame durations.
                        if (is_subtitle(r, buf->s.id) &&
                            buf->s.start != AV_NOPTS_VALUE)
                        {
                            if (!r->sub_scr_set)
                            {
                                // We can't generate timestamps in the
                                // subtitle decoder as we can for
                                // audio & video.  So we need to make
                                // the closest guess that we can
                                // for the subtitles start time here.
                                int64_t last = r->stream_timing[0].last;
                                r->scr_offset = buf->s.start - last;
                                r->sub_scr_set = 1;
                            }
                        }
                        else
                        {
                            buf->s.start = AV_NOPTS_VALUE;
                            buf->s.renderOffset = AV_NOPTS_VALUE;
                        }
                    }
                }
            }
            if ( buf->s.start != AV_NOPTS_VALUE )
            {
                int64_t start = buf->s.start - r->scr_offset;

                if (!r->start_found || r->job->indepth_scan)
                {
                    UpdateState( r, start );
                }

                if (r->job->indepth_scan && r->job->pts_to_stop &&
                    start >= r->pts_to_start + r->job->pts_to_stop)
                {
                    // sync normally would terminate p-to-p
                    // but sync doesn't run during indepth scan
                    hb_log("reader: reached pts %"PRId64", exiting early", start);
                    reader_send_eof(r);
                    hb_buffer_list_close(&list);
                    return HB_WORK_DONE;
                }

                if (!r->start_found && start >= r->pts_to_start)
                {
                    // pts_to_start point found
                    // Note that this code path only gets executed for
                    // medai where we have not performed an initial seek
                    // to get close to the start time. So the 'start' time
                    // is the time since the first frame.

                    if (r->stream)
                    {
                        // libav multi-threaded decoders can get into
                        // a bad state if the initial data is not
                        // decodable.  So try to improve the chances of
                        // a good start by waiting for an initial iframe
                        hb_stream_set_need_keyframe(r->stream, 1);
                        hb_buffer_close( &buf );
                        continue;
                    }
                    r->start_found = 1;
                    // sync.c also pays attention to job->pts_to_start
                    // It eats up the 10 second slack that we build in
                    // to the start time here in reader (so that video
                    // decode is clean at the start time).
                    // sync.c expects pts_to_start to be relative to the
                    // first timestamp it sees.
                    if (r->job->pts_to_start > start)
                    {
                        r->job->pts_to_start -= start;
                    }
                    else
                    {
                        r->job->pts_to_start = 0;
                    }
                }
                // This log is handy when you need to debug timing problems
                //hb_log("id %x scr_offset %"PRId64
                //       " start %"PRId64" --> %"PRId64"",
                //        buf->s.id, r->scr_offset, buf->s.start,
                //        buf->s.start - r->scr_offset);
                buf->s.start -= r->scr_offset;
                if ( buf->s.stop != AV_NOPTS_VALUE )
                {
                    buf->s.stop -= r->scr_offset;
                }
            }
            if ( buf->s.renderOffset != AV_NOPTS_VALUE )
            {
                // This packet is referenced to the same SCR as the last.
                // Adjust timestamp to remove the System Clock Reference
                // offset then update the average inter-packet time
                // for this stream.
                buf->s.renderOffset -= r->scr_offset;
                update_ipt( r, buf );
            }
#if 0
            // JAS: This was added to fix a rare "audio time went backward"
            // sync error I found in one sample.  But it has a bad side
            // effect on DVDs, causing frequent "adding silence" sync
            // errors. So I am disabling it.
            else
            {
                update_ipt( r, buf );
            }
#endif
        }
        buf = splice_discontinuity(r, buf);
        if( fifos && buf != NULL )
        {
            if ( !r->start_found )
            {
                hb_buffer_close( &buf );
                continue;
            }

            buf->sequence = r->sequence++;
            /* if there are mutiple output fifos, send a copy of the
             * buffer down all but the first (we have to not ship the
             * original buffer or we'll race with the thread that's
             * consuming the buffer & inject garbage into the data stream). */
            for (ii = 1; fifos[ii] != NULL; ii++)
            {
                hb_buffer_t *buf_copy = hb_buffer_init(buf->size);
                buf_copy->s = buf->s;
                memcpy(buf_copy->data, buf->data, buf->size);
                push_buf(r, fifos[ii], buf_copy);
            }
            push_buf(r, fifos[0], buf);
            buf = NULL;
        }
        else
        {
            hb_buffer_close(&buf);
        }
    }

    hb_buffer_list_close(&list);
    return HB_WORK_OK;
}
Example #9
0
/*
 * determine_payload_type_from_rtp - determine with protocol we're dealing with
 * in the rtp session.  Set various calculations for the sync task, as well...
 */
int CPlayerMedia::determine_payload_type_from_rtp(void)
{
  uint8_t payload_type, temp;
  format_list_t *fmt;
  uint64_t tickpersec;

  if (m_head != NULL) {
    payload_type = m_head->rtp_pak_pt;
  } else {
    payload_type = atoi(m_media_info->fmt_list->fmt);
  }
  fmt = m_media_info->fmt_list;
  while (fmt != NULL) {
    // rtp payloads are all numeric
    temp = atoi(fmt->fmt);
    if (temp == payload_type) {
      m_media_fmt = fmt;
      if (fmt->rtpmap_name != NULL) {
	tickpersec = fmt->rtpmap_clock_rate;
      } else {
	if (payload_type >= 96) {
	  media_message(LOG_ERR, "Media %s, rtp payload type of %u, no rtp map",
			m_media_info->media, payload_type);
	  return (FALSE);
	} else {
	  // generic payload type.  between 0 and 23 are audio - most
	  // are 8000
	  // all video (above 25) are 90000
	  tickpersec = 90000;
	  // this will handle the >= 0 case as well.
	  if (payload_type <= 23) {
	    tickpersec = 8000;
	    if (payload_type == 6) {
	      tickpersec = 16000;
	    } else if (payload_type == 10 || payload_type == 11) {
	      tickpersec = 44100;
	    } else if (payload_type == 14) 
	      tickpersec = 90000;
	  }
	}
      }

      create_rtp_byte_stream(payload_type,
			     tickpersec,
			     fmt);
      uint64_t start_time = (uint64_t)(m_play_start_time * 1000.0);
      m_rtp_byte_stream->play(start_time);
      m_byte_stream = m_rtp_byte_stream;
      if (is_audio()) {
	m_rtp_byte_stream->set_sync(m_parent);
      } else {
	m_parent->synchronize_rtp_bytestreams(NULL);
      }
#if 1
      media_message(LOG_DEBUG, "media %s - rtp tps %u ntp per rtp ",
			   m_media_info->media,
			   m_rtptime_tickpersec);
#endif

      return (TRUE);
    }
    fmt = fmt->next;
  }
  media_message(LOG_ERR, "Payload type %d not in format list for media %s", 
		payload_type, get_name());
  return (FALSE);
}
Example #10
0
void CPlayerMedia::create_rtp_byte_stream (uint8_t rtp_pt,
					   uint64_t tps,
					   format_list_t *fmt)
{
  int codec;
  rtp_check_return_t plugin_ret;
  rtp_plugin_t *rtp_plugin;
  int stream_ondemand;
  rtp_plugin = NULL;
  plugin_ret = check_for_rtp_plugins(fmt, rtp_pt, &rtp_plugin, &config);
  
  stream_ondemand = 0;
  if (m_stream_ondemand == 1 &&
      get_range_from_media(m_media_info) != NULL) {
    // m_stream_ondemand == 1 means we're using RTSP, and having a range 
    // in the SDP means that we have an ondemand presentation; otherwise, we
    // want to treat it like a broadcast session, and use the RTCP.
    stream_ondemand = 1;
  }
  if (plugin_ret != RTP_PLUGIN_NO_MATCH) {
    switch (plugin_ret) {
    case RTP_PLUGIN_MATCH:
      player_debug_message("Starting rtp bytestream %s from plugin", 
			   rtp_plugin->name);
      m_rtp_byte_stream = new CPluginRtpByteStream(rtp_plugin,
						 fmt,
						 rtp_pt,
						 stream_ondemand,
						 tps,
						 &m_head,
						 &m_tail,
						 m_rtsp_base_seq_received,
						 m_rtp_base_seq,
						 m_rtsp_base_ts_received,
						 m_rtp_base_ts,
						 m_rtcp_received,
						 m_rtcp_ntp_frac,
						 m_rtcp_ntp_sec,
						 m_rtcp_rtp_ts);
      return;
    case RTP_PLUGIN_MATCH_USE_VIDEO_DEFAULT:
      // just fall through...
      break; 
    case RTP_PLUGIN_MATCH_USE_AUDIO_DEFAULT:
      m_rtp_byte_stream = 
	new CAudioRtpByteStream(rtp_pt, 
				fmt, 
				stream_ondemand,
				tps,
				&m_head,
				&m_tail,
				m_rtsp_base_seq_received,
				m_rtp_base_seq,
				m_rtsp_base_ts_received,
				m_rtp_base_ts,
				m_rtcp_received,
				m_rtcp_ntp_frac,
				m_rtcp_ntp_sec,
				m_rtcp_rtp_ts);
      if (m_rtp_byte_stream != NULL) {
	player_debug_message("Starting generic audio byte stream");
	return;
      }

    case RTP_PLUGIN_NO_MATCH:
    default:
      break;
    }
  } else {
    if (is_audio() == false && (rtp_pt == 32)) {
      codec = VIDEO_MPEG12;
      m_rtp_byte_stream = new CMpeg3RtpByteStream(rtp_pt,
						  fmt, 
						  stream_ondemand,
						  tps,
						  &m_head,
						  &m_tail,
						  m_rtsp_base_seq_received,
						  m_rtp_base_seq,
						  m_rtsp_base_ts_received,
						  m_rtp_base_ts,
						  m_rtcp_received,
						  m_rtcp_ntp_frac,
						  m_rtcp_ntp_sec,
						  m_rtcp_rtp_ts);
      if (m_rtp_byte_stream != NULL) {
	return;
      }
    } else {
      if (rtp_pt == 14) {
	codec = MPEG4IP_AUDIO_MP3;
      } else if (rtp_pt <= 23) {
	codec = MPEG4IP_AUDIO_GENERIC;
      }  else {
	if (fmt->rtpmap_name == NULL) return;

	codec = lookup_audio_codec_by_name(fmt->rtpmap_name);
	if (codec < 0) {
	  codec = MPEG4IP_AUDIO_NONE; // fall through everything to generic
	}
      }
      switch (codec) {
      case MPEG4IP_AUDIO_MP3: {
	m_rtp_byte_stream = 
	  new CAudioRtpByteStream(rtp_pt, fmt, 
				  stream_ondemand,
				  tps,
				  &m_head,
				  &m_tail,
				  m_rtsp_base_seq_received,
				  m_rtp_base_seq,
				  m_rtsp_base_ts_received,
				  m_rtp_base_ts,
				  m_rtcp_received,
				  m_rtcp_ntp_frac,
				  m_rtcp_ntp_sec,
				  m_rtcp_rtp_ts);
	if (m_rtp_byte_stream != NULL) {
	  m_rtp_byte_stream->set_skip_on_advance(4);
	  player_debug_message("Starting mp3 2250 audio byte stream");
	  return;
	}
      }
	break;
      case MPEG4IP_AUDIO_MP3_ROBUST:
	m_rtp_byte_stream = 
	  new CRfc3119RtpByteStream(rtp_pt, fmt, 
				    stream_ondemand,
				    tps,
				    &m_head,
				    &m_tail,
				    m_rtsp_base_seq_received,
				    m_rtp_base_seq,
				    m_rtsp_base_ts_received,
				    m_rtp_base_ts,
				    m_rtcp_received,
				    m_rtcp_ntp_frac,
				    m_rtcp_ntp_sec,
				    m_rtcp_rtp_ts);
	if (m_rtp_byte_stream != NULL) {
	  player_debug_message("Starting mpa robust byte stream");
	  return;
	}
	break;
      case MPEG4IP_AUDIO_GENERIC:
	m_rtp_byte_stream = 
	  new CAudioRtpByteStream(rtp_pt, fmt, 
				  stream_ondemand,
				  tps,
				  &m_head,
				  &m_tail,
				  m_rtsp_base_seq_received,
				  m_rtp_base_seq,
				  m_rtsp_base_ts_received,
				  m_rtp_base_ts,
				  m_rtcp_received,
				  m_rtcp_ntp_frac,
				  m_rtcp_ntp_sec,
				  m_rtcp_rtp_ts);
	if (m_rtp_byte_stream != NULL) {
	  player_debug_message("Starting generic audio byte stream");
	  return;
	}
      default:
	break;
      }
    }
  }
  if (m_rtp_byte_stream == NULL) 
    m_rtp_byte_stream = new CRtpByteStream(fmt->media->media,
					   fmt, 
					   rtp_pt,
					   stream_ondemand,
					   tps,
					   &m_head,
					   &m_tail,
					   m_rtsp_base_seq_received,
					   m_rtp_base_seq,
					   m_rtsp_base_ts_received,
					   m_rtp_base_ts,
					   m_rtcp_received,
					   m_rtcp_ntp_frac,
					   m_rtcp_ntp_sec,
					   m_rtcp_rtp_ts);
}
Example #11
0
bool
mmg_track_t::is_webm_compatible() {
  static wxRegEx re_valid_webm_codecs(wxT("VP8|Vorbis"), wxRE_ICASE);

  return (is_audio() || is_video()) && re_valid_webm_codecs.Matches(ctype);
}
Example #12
0
File: fb.c Project: gettler/mvpmc
static void 
recurse_find_audio(char* cur, char** item, int* nextitem)
{
	DIR *d = NULL;
	struct dirent *de = NULL;
	struct stat64 sb;
	int i;
        char comp1[PATH_SIZE];
        char comp2[PATH_SIZE];
	char curdir[PATH_SIZE];

	// Append a trailing slash to the current path
	snprintf(curdir, PATH_SIZE, "%s/", cur);
	printf("Recursing directory %s for audio files\n", curdir);

	d = opendir(curdir);
	if (d == NULL) {
		printf("Not a directory: %s", curdir);
		return;
	}

	int noitems = 0;
	char** contents = alloca(sizeof(char*) * MAX_PLAYLIST_ENTRIES);
	while ( (de = readdir(d)) != NULL && noitems < MAX_PLAYLIST_ENTRIES) {

		// Ignore ./.. or blank name directories
		if (*de->d_name == '.' || *de->d_name == '/' || *de->d_name == '\0') {
			continue;
		}

		char path[PATH_SIZE];
		snprintf(path, PATH_SIZE, "%s/%s", cur, de->d_name);

		if (stat64(path, &sb) != 0) {
			printf("Couldn't stat file: %s\n", path);
			return;
		}
		else if (S_ISDIR(sb.st_mode)) {
			// It's a directory, recurse it
			printf("Found directory %s, recursing...\n", path);
			recurse_find_audio(path, item, nextitem);
		}
		else if (is_audio(de->d_name)) {
			// We have an audio file, add it to the list
			printf("Found audio file %s, adding as item %d\n", path, noitems);
			contents[noitems++] = strdup(path);
			printf("Next item is %d\n",  noitems);
		}
		else {
			printf("Skipping non-audio file %s\n", path);
		}
	}
	closedir(d);

        // Sort the directory contents via bubblesort (space efficient - 
        // everything else inefficient :-)
        printf("Sorting directory contents\n");
        int sorted = 0;
        while (!sorted) {
            sorted = 1;
            for (i = 0; i < noitems-1; i++) {
               
                // Create a copy of our two items to compare. If
                // they start with a single digit by itself, then prepend
                // a zero for alphanumeric comparison - we're guessing the 
                // filename must start with a track number and we'd like 
                // numbers < 10 to order correctly to be below numbers > 10.
		char* fname1 = (char*) strrchr(contents[i], '/');
		char* fname2 = (char*) strrchr(contents[i+1], '/');
		fname1++;
		fname2++;

                if (isdigit(*fname1) && !isdigit(*(fname1 + 1)))
                    sprintf(comp1, "0%s", fname1);
                else
                    strcpy(comp1, fname1);
                if (isdigit(*fname2) && !isdigit(*(fname2 + 1)))
                    sprintf(comp2, "0%s", fname2);
                else
                    strcpy(comp2, fname2);
            
                if (strcmp(comp1, comp2) > 0) {
                    printf("SWAP: %s with %s\n", comp1, comp2);
                    char* tmp = contents[i];
                    contents[i] = contents[i+1];
                    contents[i+1] = tmp;
                    sorted = 0;
                }
                else {
                    printf("NOSWAP: %s with %s\n", comp1, comp2);
                }
            }
        }

        // Append the sorted contents to our list of items
        printf("Adding sorted contents to item list\n");
        for (i = 0; i < noitems; i++) {

            // If we've hit the limit of the playlist size, free up
            // directory entries from now on as we can't reassign them
            // to the playlist (where they would be freed)
            if (*nextitem == MAX_PLAYLIST_ENTRIES) {
                free(contents[i]);
                continue;
            }

            // Add the item to the playlist
            item[*nextitem] = contents[i];
            printf("playlist item %d: %s\n", *nextitem, contents[i]);
            (*nextitem)++;

        }

}
Example #13
0
File: fb.c Project: gettler/mvpmc
static void
select_callback(mvp_widget_t *widget, char *item, void *key)
{
	char path[1024], *ptr;
	struct stat64 sb;

	sprintf(path, "%s/%s", cwd, item);
	if (stat64(path, &sb)!=0) {
		printf("Could not stat %s error %d\n",item,errno);
		if (strcmp(item,"../")==0 ) {
			// probably lost network put you back in root
			strcpy(cwd,"/");
			strcpy(path,"/");
			stat64(path, &sb);
		}
	}

	if (current_pl && !is_playlist(item)) {
		free(current_pl);
		current_pl = NULL;
	}

	if (current_pl && (playlist == NULL)) {
		free(current_pl);
		current_pl = NULL;
	}

	printf("%s(): path '%s'\n", __FUNCTION__, path);

	if (current && (strcmp(path, current) == 0)) {
		printf("selected current item\n");
		if (is_video(item) || (is_streaming(item) > 100)) {
			mvpw_hide(widget);
			mvpw_hide(fb_progress);
			av_move(0, 0, 0);
			screensaver_disable();
			return;
		}
	}

	if (current_pl && (strcmp(path, current_pl) == 0)) {
		if (is_playlist(item)) {
			mvpw_show(fb_progress);
			mvpw_set_timer(fb_progress, fb_osd_update, 500);
			mvpw_hide(widget);
			printf("Show playlist menu\n");
			mvpw_show(playlist_widget);
			mvpw_focus(playlist_widget);
			return;
		}
	}

	if (S_ISDIR(sb.st_mode)) {
		if (strcmp(item, "../") == 0) {
			strcpy(path, cwd);
			if (path[strlen(path)-1] == '/')
				path[strlen(path)-1] = '\0';
			if ((ptr=strrchr(path, '/')) != NULL)
				*ptr = '\0';
			if (path[0] == '\0')
				sprintf(path, "/");
		} else {
			if ((ptr=strrchr(path, '/')) != NULL)
				*ptr = '\0';
		}
		if (strstr(path,"/uPnP")!=NULL && strstr(cwd,"/uPnP")==NULL ){
			mount_djmount(path);
				
		} else if (strstr(path,"/uPnP")==NULL && strstr(cwd,"/uPnP")!=NULL ) { 
			unmount_djmount();
		}
		strncpy(cwd, path, sizeof(cwd));

		while ((cwd[0] == '/') && (cwd[1] == '/'))
			memmove(cwd, cwd+1, strlen(cwd));

		mvpw_clear_menu(widget);
		mvpw_set_menu_title(widget, cwd);

		busy_start();
		add_dirs(widget);
		add_files(widget);
		busy_end();

		mvpw_expose(widget);
	} else {
		switch_hw_state(MVPMC_STATE_FILEBROWSER);

		if (current)
			free(current);
		current = NULL;
		audio_stop = 1;
		pthread_kill(audio_thread, SIGURG);

		while (audio_playing)
			usleep(1000);

		current = strdup(path);

		if (is_streaming(item) > 100) {
			// Use VLC callbacks for streaming items
			video_functions = &vlc_functions;
			// Allow broadcast messages to be sent so
			// we can tell VLC to start the stream
			vlc_broadcast_enabled = 1;
		} else {
			video_functions = &file_functions;
		}

		add_osd_widget(fb_program_widget, OSD_PROGRAM,
			       osd_settings.program, NULL);

		mvpw_set_text_str(fb_name, item);

		/*
		 * This code sends the currently playing file name to the display.
		 */
		snprintf(display_message, sizeof(display_message),
			 "File:%s\n", item);
		display_send(display_message);

		audio_clear();
		video_clear();
		playlist_clear();

		if (is_video(item)) {
			if (key != NULL) {
				mvpw_hide(widget);
				mvpw_hide(fb_progress);
				av_move(0, 0, 0);
			} else {
				mvpw_show(fb_progress);
			}
			mvpw_set_timer(fb_progress, fb_osd_update, 500);
			video_play(NULL);
			mvpw_show(root);
			mvpw_expose(root);
			mvpw_focus(root);
		} else if (is_audio(item) || is_streaming(item)>=0 ) {
			mvpw_show(fb_progress);
			mvpw_set_timer(fb_progress, fb_osd_update, 500);
			audio_play(NULL);
		} else if (is_image(item)) {
			mvpw_hide(widget);
			printf("Displaying image '%s'\n", path);
			if (mvpw_load_image_jpeg(iw, path) == 0) {
				mvpw_show_image_jpeg(iw);
				av_wss_update_aspect(WSS_ASPECT_UNKNOWN);
			} else {
				mvpw_set_image(iw, path);
			}
			mvpw_show(iw);
			mvpw_focus(iw);
			loaded_offset = 0;
			loaded_status = 0;
			fb_next_image(1);
		} else if (is_playlist(item)) {
			if (current_pl)
				free(current_pl);
			current_pl = strdup(path);
			mvpw_show(fb_progress);
			mvpw_set_timer(fb_progress, fb_osd_update, 500);
			mvpw_hide(widget);
			printf("Show playlist menu\n");
			mvpw_show(playlist_widget);
			mvpw_focus(playlist_widget);
			playlist_clear();
			playlist_play(NULL);
		}
	}
}
Example #14
0
/* And our main album art functions */
void
update_if_album_art(const char * path)
{
	char * dir;
	char * match = NULL;
	char * file = NULL;
	int ncmp = 0;
	struct album_art_name_s * album_art_name;
	DIR * dh;
	struct dirent *dp;
	enum file_types type = TYPE_UNKNOWN;
	sqlite_int64 art_id = 0;

	match = strdup(basename((char *)path));
	/* Check if this file name matches a specific audio or video file */
	if( ends_with(match, ".cover.jpg") )
	{
		ncmp = strlen(match)-10;
	}
	else
	{
		ncmp = strrchr(match, '.')-match;
	}
	/* Check if this file name matches one of the default album art names */
	for( album_art_name = album_art_names; album_art_name; album_art_name = album_art_name->next )
	{
		if( strcmp(album_art_name->name, match) == 0 )
			break;
	}

	dir = dirname(strdup(path));
	dh = opendir(dir);
	if( !dh )
		return;
	while ((dp = readdir(dh)) != NULL)
	{
		switch( dp->d_type )
		{
			case DT_REG:
				type = TYPE_FILE;
				break;
			case DT_LNK:
			case DT_UNKNOWN:
				asprintf(&file, "%s/%s", dir, dp->d_name);
				type = resolve_unknown_type(file, ALL_MEDIA);
				free(file);
				break;
			default:
				type = TYPE_UNKNOWN;
				break;
		}
		if( type != TYPE_FILE )
			continue;
		if( (*(dp->d_name) != '.') &&
		    (is_video(dp->d_name) || is_audio(dp->d_name)) &&
		    (album_art_name || strncmp(dp->d_name, match, ncmp) == 0) )
		{
			DPRINTF(E_DEBUG, L_METADATA, "New file %s looks like cover art for %s\n", path, dp->d_name);
			asprintf(&file, "%s/%s", dir, dp->d_name);
			art_id = find_album_art(file, NULL, 0);
			if( sql_exec(db, "UPDATE DETAILS set ALBUM_ART = %lld where PATH = '%q'", art_id, file) != SQLITE_OK )
				DPRINTF(E_WARN, L_METADATA, "Error setting %s as cover art for %s\n", match, dp->d_name);
			free(file);
		}
	}
	closedir(dh);
	
	free(dir);
	free(match);
}
/*
 * Main decode thread.
 */
int CPlayerMedia::decode_thread (void) 
{
  //  uint32_t msec_per_frame = 0;
  int ret = 0;
  int thread_stop = 0, decoding = 0;
  uint32_t decode_skipped_frames = 0;
  frame_timestamp_t ourtime, lasttime;
      // Tell bytestream we're starting the next frame - they'll give us
      // the time.
  uint8_t *frame_buffer;
  uint32_t frame_len;
  void *ud = NULL;
  
  uint32_t frames_decoded;
  uint64_t start_decode_time = 0;
  uint64_t last_decode_time = 0;
  bool have_start_time = false;
  bool have_frame_ts = false;
  bool found_out_of_range_ts = false;
  uint64_t bytes_decoded;

  lasttime.msec_timestamp = 0;
  frames_decoded = 0;
  bytes_decoded = 0;


  while (thread_stop == 0) {
    // waiting here for decoding or thread stop
    ret = SDL_SemWait(m_decode_thread_sem);
#ifdef DEBUG_DECODE
    media_message(LOG_DEBUG, "%s Decode thread awake",
		  get_name());
#endif
    parse_decode_message(thread_stop, decoding);

    if (decoding == 1) {
      // We've been told to start decoding - if we don't have a codec, 
      // create one
      m_sync->set_wait_sem(m_decode_thread_sem);
      if (m_plugin == NULL) {
	switch (m_sync_type) {
	case VIDEO_SYNC:
	  create_video_plugin(NULL, 
			      STREAM_TYPE_RTP,
			      NULL,
			      -1,
			      -1,
			      m_media_fmt,
			      NULL,
			      m_user_data,
			      m_user_data_size);
	  break;
	case AUDIO_SYNC:
	  create_audio_plugin(NULL,
			      STREAM_TYPE_RTP,
			      NULL,
			      -1, 
			      -1, 
			      m_media_fmt,
			      NULL,
			      m_user_data,
			      m_user_data_size);
	  break;
	case TIMED_TEXT_SYNC:
	  create_text_plugin(NULL,
			     STREAM_TYPE_RTP, 
			     NULL, 
			     m_media_fmt, 
			     m_user_data, 
			     m_user_data_size);
	  break;
	}
	if (m_plugin_data == NULL) {
	  m_plugin = NULL;
	} else {
	  media_message(LOG_DEBUG, "Starting %s codec from decode thread",
			m_plugin->c_name);
	}
      }
      if (m_plugin != NULL) {
	m_plugin->c_do_pause(m_plugin_data);
      } else {
	while (thread_stop == 0 && decoding) {
	  SDL_Delay(100);
	  if (m_rtp_byte_stream) {
	    m_rtp_byte_stream->flush_rtp_packets();
	  }
	  parse_decode_message(thread_stop, decoding);
	}
      }
    }
    /*
     * this is our main decode loop
     */
#ifdef DEBUG_DECODE
    media_message(LOG_DEBUG, "%s Into decode loop", get_name());
#endif
    while ((thread_stop == 0) && decoding) {
      parse_decode_message(thread_stop, decoding);
      if (thread_stop != 0)
	continue;
      if (decoding == 0) {
	m_plugin->c_do_pause(m_plugin_data);
	have_frame_ts = false;
	continue;
      }
      if (m_byte_stream->eof()) {
	media_message(LOG_INFO, "%s hit eof", get_name());
	if (m_sync) m_sync->set_eof();
	decoding = 0;
	continue;
      }
      if (m_byte_stream->have_frame() == false) {
	// Indicate that we're waiting, and wait for a message from RTP
	// task.
	wait_on_bytestream();
	continue;
      }

      frame_buffer = NULL;
      bool have_frame;
      memset(&ourtime, 0, sizeof(ourtime));
      have_frame = m_byte_stream->start_next_frame(&frame_buffer, 
						   &frame_len,
						   &ourtime,
						   &ud);
      if (have_frame == false) continue;
      /*
       * If we're decoding video, see if we're playing - if so, check
       * if we've fallen significantly behind the audio
       */
      if (get_sync_type() == VIDEO_SYNC &&
	  (m_parent->get_session_state() == SESSION_PLAYING) &&
	  have_frame_ts) {
	int64_t ts_diff = ourtime.msec_timestamp - lasttime.msec_timestamp;

	if (ts_diff > TO_D64(1000) ||
	    ts_diff < TO_D64(-1000)) {
	  // out of range timestamp - we'll want to not skip here
	  found_out_of_range_ts = true;
	  media_message(LOG_INFO, "found out of range ts "U64" last "U64" "D64,
			ourtime.msec_timestamp, 
			lasttime.msec_timestamp,
			ts_diff);
	} else {
	  uint64_t current_time = m_parent->get_playing_time();
	  if (found_out_of_range_ts) {
	    ts_diff = current_time - ourtime.msec_timestamp;
	    if (ts_diff > TO_D64(0) && ts_diff < TO_D64(5000)) {
	      found_out_of_range_ts = false;
	      media_message(LOG_INFO, 
			    "ts back in playing range "U64" "D64,
			    ourtime.msec_timestamp, ts_diff);
	    }
	  } else {
	    // regular time
	    if (current_time >= ourtime.msec_timestamp) {
	      media_message(LOG_INFO, 
			    "Candidate for skip decode "U64" our "U64, 
			    current_time, ourtime.msec_timestamp);
	      // If the bytestream can skip ahead, let's do so
	      if (m_byte_stream->can_skip_frame() != 0) {
		int ret;
		int hassync;
		int count;
		current_time += 200; 
		count = 0;
		// Skip up to the current time + 200 msec
		ud = NULL;
		do {
		  if (ud != NULL) free(ud);
		  frame_buffer = NULL;
		  ret = m_byte_stream->skip_next_frame(&ourtime, 
						       &hassync,
						       &frame_buffer, 
						       &frame_len,
						       &ud);
		  decode_skipped_frames++;
		} while (ret != 0 &&
			 !m_byte_stream->eof() && 
			 current_time > ourtime.msec_timestamp);
		if (m_byte_stream->eof() || ret == 0) continue;
		media_message(LOG_INFO, "Skipped ahead "U64 " to "U64, 
			      current_time - 200, ourtime.msec_timestamp);
		/*
		 * Ooh - fun - try to match to the next sync value - if not, 
		 * 15 frames
		 */
		do {
		  if (ud != NULL) free(ud);
		  ret = m_byte_stream->skip_next_frame(&ourtime, 
						       &hassync,
						       &frame_buffer, 
						       &frame_len,
						       &ud);
		  if (hassync < 0) {
		    uint64_t diff = ourtime.msec_timestamp - current_time;
		    if (diff > TO_U64(200)) {
		      hassync = 1;
		    }
		  }
		  decode_skipped_frames++;
		  count++;
		} while (ret != 0 &&
			 hassync <= 0 &&
			 count < 30 &&
			 !m_byte_stream->eof());
		if (m_byte_stream->eof() || ret == 0) continue;
#ifdef DEBUG_DECODE
		media_message(LOG_INFO, 
			      "Matched ahead - count %d, sync %d time "U64,
			      count, hassync, ourtime.msec_timestamp);
#endif
	      }
	    }
	  } // end regular time
	}
      }
      lasttime = ourtime;
      have_frame_ts = true;
#ifdef DEBUG_DECODE
      media_message(LOG_DEBUG, "Decoding %s frame " U64, get_name(),
		    ourtime.msec_timestamp);
#endif
      if (frame_buffer != NULL && frame_len != 0) {
	int sync_frame;
	ret = m_plugin->c_decode_frame(m_plugin_data,
				       &ourtime,
				       m_streaming,
				       &sync_frame,
				       frame_buffer, 
				       frame_len,
				       ud);
#ifdef DEBUG_DECODE
	media_message(LOG_DEBUG, "Decoding %s frame return %d", 
		      get_name(), ret);
#endif
	if (ret > 0) {
	  frames_decoded++;
	  if (have_start_time == false) {
	    have_start_time = true;
	    start_decode_time = ourtime.msec_timestamp;
	  }
	  last_decode_time = ourtime.msec_timestamp;
	  m_byte_stream->used_bytes_for_frame(ret);
	  bytes_decoded += ret;
	} else {
	  m_byte_stream->used_bytes_for_frame(frame_len);
	}

      }
    }
    // calculate frame rate for session
  }
  if (is_audio() == false)
    media_message(LOG_NOTICE, "Video decoder skipped %u frames", 
		  decode_skipped_frames);
  if (last_decode_time > start_decode_time) {
    double fps, bps;
    double secs;
    uint64_t total_time = last_decode_time - start_decode_time;
    secs = UINT64_TO_DOUBLE(total_time);
    secs /= 1000.0;
#if 0
    media_message(LOG_DEBUG, "last time "U64" first "U64, 
		  last_decode_time, start_decode_time);
#endif
    fps = frames_decoded;
    fps /= secs;
    bps = UINT64_TO_DOUBLE(bytes_decoded);
    bps *= 8.0 / secs;
    media_message(LOG_NOTICE, "%s - bytes "U64", seconds %g, fps %g bps %g",
		  get_name(),
		  bytes_decoded, secs, 
		  fps, bps);
  }
  if (m_plugin) {
    m_plugin->c_close(m_plugin_data);
    m_plugin_data = NULL;
  }
  return (0);
}