Example #1
0
File: avio.c Project: 1c0n/xbmc
static inline int retry_transfer_wrapper(URLContext *h, unsigned char *buf, int size, int size_min,
                                         int (*transfer_func)(URLContext *h, unsigned char *buf, int size))
{
    int ret, len;
    int fast_retries = 5;
    int64_t wait_since = 0;

    len = 0;
    while (len < size_min) {
        ret = transfer_func(h, buf+len, size-len);
        if (ret == AVERROR(EINTR))
            continue;
        if (h->flags & AVIO_FLAG_NONBLOCK)
            return ret;
        if (ret == AVERROR(EAGAIN)) {
            ret = 0;
            if (fast_retries) {
                fast_retries--;
            } else {
                if (h->rw_timeout) {
                    if (!wait_since)
                        wait_since = av_gettime();
                    else if (av_gettime() > wait_since + h->rw_timeout)
                        return AVERROR(EIO);
                }
                av_usleep(1000);
            }
        } else if (ret < 1)
            return ret < 0 ? ret : len;
        if (ret)
           fast_retries = FFMAX(fast_retries, 2);
        len += ret;
        if (len < size && ff_check_interrupt(&h->interrupt_callback))
            return AVERROR_EXIT;
    }
    return len;
}
Example #2
0
static int
shmgrab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
	shm_grab_t *s = s1->priv_data;
	struct sembuf ssbuf;

    int64_t curtime, delay;
    struct timespec ts;

    /* Calculate the time of the next frame */
    s->time_frame += INT64_C(1000000);

    /* wait based on the frame rate */
    for(;;) {
        curtime = av_gettime();
        delay = s->time_frame * av_q2d(s->time_base) - curtime;
        if (delay <= 0) {
            if (delay < INT64_C(-1000000) * av_q2d(s->time_base)) {
                s->time_frame += INT64_C(1000000);
            }
            break;
        }
        ts.tv_sec = delay / 1000000;
        ts.tv_nsec = (delay % 1000000) * 1000;
        nanosleep(&ts, NULL);
    }

    if (av_new_packet(pkt, s->frame_size) < 0)
        return AVERROR(EIO);

    pkt->pts = curtime;

    /* lock */
	ssbuf.sem_num=0;
	ssbuf.sem_op=-1;
	ssbuf.sem_flg=1;	// flag == 1 indica acesso bloqueante
	semop(s->semid,&ssbuf,1);

	/* XXX: avoid memcpy */
    	memcpy(pkt->data, s->shmdata, s->frame_size);

	 /* unlock */
	ssbuf.sem_num=0;
	ssbuf.sem_op=1;
	ssbuf.sem_flg=1;	// flag == 1 indica acesso bloqueante
	semop(s->semid,&ssbuf,1);

    return s->frame_size;
}
Example #3
0
static guint
player_av_get_position (Player *self)
{
    PlayerAVPrivate *priv = PLAYER_AV (self)->priv;

    if (priv->start_time != -1) {
        if (priv->stop_time != -1) {
            return (priv->stop_time - priv->start_time) / 1000000;
        } else {
            return (av_gettime () - priv->start_time) / 1000000;
        }
    } else {
        return 0;
    }
}
Example #4
0
static int peak_write_chunk(AVFormatContext *s)
{
    WAVMuxContext *wav = s->priv_data;
    AVIOContext *pb = s->pb;
    AVCodecContext *enc = s->streams[0]->codec;
    int64_t peak = ff_start_tag(s->pb, "levl");
    int64_t now0;
    time_t now_secs;
    char timestamp[28];

    /* Peak frame of incomplete block at end */
    if (wav->peak_block_pos)
        peak_write_frame(s);

    memset(timestamp, 0, sizeof(timestamp));
    if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
        struct tm tmpbuf;
        av_log(s, AV_LOG_INFO, "Writing local time and date to Peak Envelope Chunk\n");
        now0 = av_gettime();
        now_secs = now0 / 1000000;
        if (strftime(timestamp, sizeof(timestamp), "%Y:%m:%d:%H:%M:%S:", localtime_r(&now_secs, &tmpbuf))) {
            av_strlcatf(timestamp, sizeof(timestamp), "%03d", (int)((now0 / 1000) % 1000));
        } else {
            av_log(s, AV_LOG_ERROR, "Failed to write timestamp\n");
            return -1;
        }
    }

    avio_wl32(pb, 1);                           /* version */
    avio_wl32(pb, wav->peak_format);            /* 8 or 16 bit */
    avio_wl32(pb, wav->peak_ppv);               /* positive and negative */
    avio_wl32(pb, wav->peak_block_size);        /* frames per value */
    avio_wl32(pb, enc->channels);               /* number of channels */
    avio_wl32(pb, wav->peak_num_frames);        /* number of peak frames */
    avio_wl32(pb, wav->peak_pos_pop);           /* audio sample frame index */
    avio_wl32(pb, 128);                         /* equal to size of header */
    avio_write(pb, timestamp, 28);              /* ASCII time stamp */
    ffio_fill(pb, 0, 60);

    avio_write(pb, wav->peak_output, wav->peak_outbuf_bytes);

    ff_end_tag(pb, peak);

    if (!wav->data)
        wav->data = peak;

    return 0;
}
Example #5
0
HRESULT DeckLinkCaptureDelegate::VideoInputFrameArrived(
    IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
{

    frameCount++;

    // Handle Video Frame
    if (videoFrame) {
        BMDTimeValue frameTime;
        BMDTimeValue frameDuration;
        int64_t pts;
        videoFrame->GetStreamTime(&frameTime, &frameDuration,
                                  video_st->time_base.den);

        pts = frameTime / video_st->time_base.num;

        if (initial_video_pts == AV_NOPTS_VALUE) {
            initial_video_pts = pts;
        }

        pts -= initial_video_pts;

        write_video_packet(videoFrame, pts, frameDuration);

        if (serial_fd > 0) {
            char line[8] = {0};
            int count = read(serial_fd, line, 7);
            if (count > 0)
                fprintf(stderr, "read %d bytes: %s  \n", count, line);
            else line[0] = ' ';
            write_data_packet(line, 7, pts);
        }

        if (wallclock) {
            int64_t t = av_gettime();
            char line[20];
            snprintf(line, sizeof(line), "%" PRId64, t);
            write_data_packet(line, strlen(line), pts);
        }
    }

    // Handle Audio Frame
    if (audioFrame)
        write_audio_packet(audioFrame);


    return S_OK;
}
Example #6
0
static int sap_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVFormatContext *rtpctx;
    struct SAPState *sap = s->priv_data;
    int64_t now = av_gettime();

    if (!sap->last_time || now - sap->last_time > 5000000) {
        int ret = ffurl_write(sap->ann_fd, sap->ann, sap->ann_size);
        /* Don't abort even if we get "Destination unreachable" */
        if (ret < 0 && ret != AVERROR(ECONNREFUSED))
            return ret;
        sap->last_time = now;
    }
    rtpctx = s->streams[pkt->stream_index]->priv_data;
    return ff_write_chained(rtpctx, 0, pkt, s);
}
Example #7
0
/*	Update volume filter
 */
void BarPlayerSetVolume (player_t * const player) {
	assert (player != NULL);

	if (player->mode != PLAYER_PLAYING) {
		return;
	}

	int ret;
#ifdef HAVE_AVFILTER_GRAPH_SEND_COMMAND
	/* ffmpeg and libav disagree on the type of this option (string vs. double)
	 * -> print to string and let them parse it again */
	char strbuf[16];
	snprintf (strbuf, sizeof (strbuf), "%fdB",
			player->settings->volume + player->gain);
	assert (player->fgraph != NULL);
	if ((ret = avfilter_graph_send_command (player->fgraph, "volume", "volume",
					strbuf, NULL, 0, 0)) < 0) {
#else
	/* convert from decibel */
	const double volume = pow (10, (player->settings->volume + player->gain) / 20);
	/* libav does not provide other means to set this right now. it might not
	 * even work everywhere. */
	assert (player->fvolume != NULL);
	if ((ret = av_opt_set_double (player->fvolume->priv, "volume", volume,
			0)) != 0) {
#endif
		printError (player->settings, "Cannot set volume", ret);
	}
}

#define softfail(msg) \
	printError (player->settings, msg, ret); \
	return false;

#ifndef HAVE_AV_TIMEOUT
/*	interrupt callback for libav, which lacks a timeout option
 *
 *	obviously calling ping() a lot of times and then calling av_gettime here
 *	again is rather inefficient.
 */
static int intCb (void * const data) {
	player_t * const player = data;
	assert (player != NULL);
	/* 10 seconds timeout (usec) */
	return (av_gettime () - player->ping) > 10*1000000;
}
Example #8
0
static boolean I_AVCapFrameRate(void)
{
    uint64_t curTics = av_gettime();
    double elapsed = (double)(curTics - currentPts) / 1000.0;

    if(elapsed < frameTime)
    {
        if(frameTime - elapsed > 3.0)
        {
            // give up a small timeslice
            I_Sleep(2);
        }
        
        return true;
    }
    
    return false;
}
Example #9
0
static int audio_read_packet(AVFormatContext * s1, AVPacket * pkt)
{
	AudioData *s = s1->priv_data;
	int ret, bdelay;
	int64_t cur_time;
	struct audio_buf_info abufi;

	if ((ret = av_new_packet(pkt, s->frame_size)) < 0)
		return ret;

	ret = read(s->fd, pkt->data, pkt->size);
	if (ret <= 0) {
		av_free_packet(pkt);
		pkt->size = 0;
		if (ret < 0)
			return AVERROR(errno);
		else
			return AVERROR_EOF;
	}
	pkt->size = ret;

	/* compute pts of the start of the packet */
	cur_time = av_gettime();
	bdelay = ret;
	if (ioctl(s->fd, SNDCTL_DSP_GETISPACE, &abufi) == 0) {
		bdelay += abufi.bytes;
	}
	/* subtract time represented by the number of bytes in the audio fifo */
	cur_time -= (bdelay * 1000000LL) / (s->sample_rate * s->channels);

	/* convert to wanted units */
	pkt->pts = cur_time;

	if (s->flip_left && s->channels == 2) {
		int i;
		short *p = (short *) pkt->data;

		for (i = 0; i < ret; i += 4) {
			*p = ~*p;
			p += 2;
		}
	}
	return 0;
}
void  fill_audio(void *udata, Uint8 *stream, int len)//CLS_DlgStreamPusher::
{
	struct_stream_info* struct_stream = (struct_stream_info*)udata;
	if (struct_stream == NULL){
		TRACE("struct_stream == NULL \n");
		return;
	}
	audio_callback_time = av_gettime();

	int frame_size = av_samples_get_buffer_size(NULL, struct_stream->m_audio_tgt.channels, 1, struct_stream->m_audio_tgt.fmt, 1);
	int len1 = 0;

	if (audio_len == 0)	/*  Only  play  if  we  have  data  left  */
		return;
	len = (len>audio_len ? audio_len : len);	/*  Mix as  much  data  as  possible  */

	while (len > 0 && audio_len > 0){
		if (struct_stream->m_audio_buf_index >= struct_stream->m_audio_buf_size){
			if (struct_stream->m_aduio_pkt_size < 0){
				struct_stream->m_audio_buf = struct_stream->m_silence_buf;
				struct_stream->m_audio_buf_size = sizeof(struct_stream->m_silence_buf) / frame_size * frame_size;
			}
			else{
				if (struct_stream->m_show_mode == SHOW_MODE_WAVES){
					UpdateSampleDisplay(struct_stream, (int16_t *)struct_stream->m_audio_buf, struct_stream->m_aduio_pkt_size);
				}
				struct_stream->m_audio_buf_size = struct_stream->m_aduio_pkt_size;
			}
		}

		len1 = struct_stream->m_audio_buf_size - struct_stream->m_audio_buf_index;
		if (len1 > len)
			len1 = len;
		memcpy(stream, (uint8_t *)struct_stream->m_audio_buf + struct_stream->m_audio_buf_index, len1);
		audio_len -= len;
		len -= len1;
		stream += len1;
		struct_stream->m_audio_buf_index += len1;
	}
	audio_len = 0;

	struct_stream->m_audio_write_buf_size = struct_stream->m_audio_buf_size - struct_stream->m_audio_buf_index;
}
Example #11
0
static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
    VideoData *s = s1->priv_data;
    int64_t curtime, delay;
    struct timespec ts;

    /* Calculate the time of the next frame */
    s->time_frame += INT64_C(1000000);

    /* wait based on the frame rate */
    for(;;) {
        curtime = av_gettime();
        delay = s->time_frame  * s->frame_rate_base / s->frame_rate - curtime;
        if (delay <= 0) {
            if (delay < INT64_C(-1000000) * s->frame_rate_base / s->frame_rate) {
                /* printf("grabbing is %d frames late (dropping)\n", (int) -(delay / 16666)); */
                s->time_frame += INT64_C(1000000);
            }
            break;
        }
        ts.tv_sec = delay / 1000000;
        ts.tv_nsec = (delay % 1000000) * 1000;
        nanosleep(&ts, NULL);
    }

    if (av_new_packet(pkt, s->frame_size) < 0)
        return AVERROR(EIO);

    pkt->pts = curtime;

    /* read one frame */
    if (s->aiw_enabled) {
        return aiw_read_picture(s, pkt->data);
    } else if (s->use_mmap) {
        return v4l_mm_read_picture(s, pkt->data);
    } else {
        if (read(s->fd, pkt->data, pkt->size) != pkt->size)
            return AVERROR(EIO);
        return s->frame_size;
    }
}
Example #12
0
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    SetPTSContext *setpts = ctx->priv;

    setpts->type = inlink->type;
    setpts->var_values[VAR_TB] = av_q2d(inlink->time_base);
    setpts->var_values[VAR_RTCSTART] = av_gettime();

    setpts->var_values[VAR_SAMPLE_RATE] =
        setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;

    setpts->var_values[VAR_FRAME_RATE] = inlink->frame_rate.num && inlink->frame_rate.den ?
        av_q2d(inlink->frame_rate) : NAN;

    av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n",
           setpts->var_values[VAR_TB],
           setpts->var_values[VAR_FRAME_RATE],
           setpts->var_values[VAR_SAMPLE_RATE]);
    return 0;
}
Example #13
0
int ff_mediacodec_dec_flush(AVCodecContext *avctx, MediaCodecDecContext *s)
{
    FFAMediaCodec *codec = s->codec;
    int status;

    s->queued_buffer_nb = 0;
    s->dequeued_buffer_nb = 0;

    s->flushing = 0;

    status = ff_AMediaCodec_flush(codec);
    if (status < 0) {
        av_log(NULL, AV_LOG_ERROR, "Failed to flush MediaCodec %p", codec);
        return AVERROR_EXTERNAL;
    }

    s->first_buffer = 0;
    s->first_buffer_at = av_gettime();

    return 0;
}
Example #14
0
static int mediacodec_dec_flush_codec(AVCodecContext *avctx, MediaCodecDecContext *s)
{
    FFAMediaCodec *codec = s->codec;
    int status;

    s->dequeued_buffer_nb = 0;

    s->draining = 0;
    s->flushing = 0;
    s->eos = 0;

    status = ff_AMediaCodec_flush(codec);
    if (status < 0) {
        av_log(avctx, AV_LOG_ERROR, "Failed to flush codec\n");
        return AVERROR_EXTERNAL;
    }

    s->first_buffer = 0;
    s->first_buffer_at = av_gettime();

    return 0;
}
Example #15
0
void*	UTimer::timerThread(void* data){

		utimer_link cur;
		int64_t	now;

		ulog_info("timerThread enter");

		UPlayer* player = (UPlayer*)data;

		assert(player);
#if PLATFORM_DEF == ANDROID_PLATFORM
		player->mCrashHandler.registerTid();
#endif

		while(timer_thread_started){
			usleep(UTIMER_MIN_PRECISION);
			lock();
			cur = timer_head;

			while(cur){
				if(cur->timer->mIsRunning){
					if(( (now=av_gettime()) - cur->timer->mLastEntryTime ) >= cur->timer->mInterval){
						cur->timer->mLastEntryTime = now;
						cur->timer->mFNTimer(cur->timer->mData);
					}
				}
				cur = cur->next;
			}
			unlock();
		}
#if PLATFORM_DEF == ANDROID_PLATFORM
		player->mCrashHandler.unRegisterTid();
#endif

		ulog_info("timerThread exit");

		return (void *)0;
}
Example #16
0
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
    AlsaData *s  = s1->priv_data;
    int res;
    int64_t dts;
    snd_pcm_sframes_t delay = 0;

    if (av_new_packet(pkt, s->period_size * s->frame_size) < 0) {
        return AVERROR(EIO);
    }

    while ((res = snd_pcm_readi(s->h, pkt->data, s->period_size)) < 0) {
        if (res == -EAGAIN) {
            av_packet_unref(pkt);

            return AVERROR(EAGAIN);
        }
        if (ff_alsa_xrun_recover(s1, res) < 0) {
            av_log(s1, AV_LOG_ERROR, "ALSA read error: %s\n",
                   snd_strerror(res));
            av_packet_unref(pkt);

            return AVERROR(EIO);
        }
        ff_timefilter_reset(s->timefilter);
    }

    dts = av_gettime();
    snd_pcm_delay(s->h, &delay);
    dts -= av_rescale(delay + res, 1000000, s->sample_rate);
    pkt->pts = ff_timefilter_update(s->timefilter, dts, s->last_period);
    s->last_period = res;

    pkt->size = res * s->frame_size;

    return 0;
}
int bandwidth_measure_get_bandwidth(void  *band,int *fast_band,int *mid_band,int *avg_band)
{
	struct ratesdata *rates=(struct ratesdata *)band;
	int64_t time_us;
	
	time_us=rates->latest_f_duration_us;
	if(time_us>0 && rates->latest_f_bytes>0)
		*fast_band=(int64_t)rates->latest_f_bytes*8*1000*1000/time_us;/*bits per seconds*/
	else
		*fast_band=0;

	time_us=rates->latest_m_duration_us;
	if(time_us>0 && rates->latest_m_bytes>0)
		*mid_band=(int64_t)rates->latest_m_bytes*8*1000*1000/time_us;/*bits per seconds*/
	else
		*mid_band=0;

	time_us=av_gettime()-rates->init_time_us;
	if(time_us>0)
		*avg_band=rates->total_bytes*8*1000*1000/time_us;/*bits per seconds*/
	else
		*avg_band=0;
	return 0;
}
int stream_component_open(VideoState *is, int stream_index) {

    AVFormatContext *pFormatCtx = is->pFormatCtx;
    AVCodecContext *codecCtx = NULL;
    AVCodec *codec = NULL;
    AVDictionary *optionsDict = NULL;
    SDL_AudioSpec wanted_spec, spec;

    if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) {
        return -1;
    }

    // Get a pointer to the codec context for the video stream
    codecCtx = pFormatCtx->streams[stream_index]->codec;

    if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) {
        // Set audio settings from codec info
        wanted_spec.freq = codecCtx->sample_rate;
        wanted_spec.format = AUDIO_S16SYS;
        wanted_spec.channels = codecCtx->channels;
        wanted_spec.silence = 0;
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
        wanted_spec.callback = audio_callback;
        wanted_spec.userdata = is;

        if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
            return -1;
        }

        is->audio_hw_buf_size = spec.size;
    }

    codec = avcodec_find_decoder(codecCtx->codec_id);

    if(!codec || (avcodec_open2(codecCtx, codec, &optionsDict) < 0)) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1;
    }

    switch(codecCtx->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            is->audioStream = stream_index;
            is->audio_st = pFormatCtx->streams[stream_index];
            is->audio_buf_size = 0;
            is->audio_buf_index = 0;

            /* averaging filter for audio sync */
            is->audio_diff_avg_coef = exp(log(0.01 / AUDIO_DIFF_AVG_NB));
            is->audio_diff_avg_count = 0;
            /* Correct audio only if larger error than this */
            is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / codecCtx->sample_rate;

            memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
            packet_queue_init(&is->audioq);
            SDL_PauseAudio(0);
            break;

        case AVMEDIA_TYPE_VIDEO:
            is->videoStream = stream_index;
            is->video_st = pFormatCtx->streams[stream_index];

            is->frame_timer = (double)av_gettime() / 1000000.0;
            is->frame_last_delay = 40e-3;
            is->video_current_pts_time = av_gettime();

            packet_queue_init(&is->videoq);
            is->video_tid = SDL_CreateThread(video_thread, is);
            is->sws_ctx =
                sws_getContext
                (
                    is->video_st->codec->width,
                    is->video_st->codec->height,
                    is->video_st->codec->pix_fmt,
                    is->video_st->codec->width,
                    is->video_st->codec->height,
                    PIX_FMT_YUV420P,
                    SWS_BILINEAR,
                    NULL,
                    NULL,
                    NULL
                );
            codecCtx->get_buffer = our_get_buffer;

            break;

        default:
            break;
    }

    return 0;
}
void video_refresh_timer(void *userdata) {

    VideoState *is = (VideoState *)userdata;
    VideoPicture *vp;
    double actual_delay, delay, sync_threshold, ref_clock, diff;

    if(is->video_st) {
        if(is->pictq_size == 0) {
            schedule_refresh(is, 1);

        } else {
            vp = &is->pictq[is->pictq_rindex];

            is->video_current_pts = vp->pts;
            is->video_current_pts_time = av_gettime();

            delay = vp->pts - is->frame_last_pts; /* the pts from last time */

            if(delay <= 0 || delay >= 1.0) {
                /* if incorrect delay, use previous one */
                delay = is->frame_last_delay;
            }

            /* save for next time */
            is->frame_last_delay = delay;
            is->frame_last_pts = vp->pts;

            /* update delay to sync to audio if not master source */
            if(is->av_sync_type != AV_SYNC_VIDEO_MASTER) {
                ref_clock = get_master_clock(is);
                diff = vp->pts - ref_clock;

                /* Skip or repeat the frame. Take delay into account
                   FFPlay still doesn't "know if this is the best guess." */
                sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;

                if(fabs(diff) < AV_NOSYNC_THRESHOLD) {
                    if(diff <= -sync_threshold) {
                        delay = 0;

                    } else if(diff >= sync_threshold) {
                        delay = 2 * delay;
                    }
                }
            }

            is->frame_timer += delay;
            /* computer the REAL delay */
            actual_delay = is->frame_timer - (av_gettime() / 1000000.0);

            if(actual_delay < 0.010) {
                /* Really it should skip the picture instead */
                actual_delay = 0.010;
            }

            schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));

            /* show the picture! */
            video_display(is);

            /* update queue for next picture! */
            if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) {
                is->pictq_rindex = 0;
            }

            SDL_LockMutex(is->pictq_mutex);
            is->pictq_size--;
            SDL_CondSignal(is->pictq_cond);
            SDL_UnlockMutex(is->pictq_mutex);
        }

    } else {
        schedule_refresh(is, 100);
    }
}
double get_external_clock(VideoState *is) {
    return av_gettime() / 1000000.0;
}
double get_video_clock(VideoState *is) {
    double delta;

    delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
    return is->video_current_pts + delta;
}
Example #22
0
void video_refresh_timer(void *userdata) {

    VideoState *is = (VideoState *)userdata;
    VideoPicture *vp;
    double actual_delay, delay, sync_threshold, ref_clock, diff;

    if(is->video_st) {
        if(is->pictq_size == 0) {
            fprintf(stderr, "%s pictq_size is 0, schedule another refresh\n", __FUNCTION__);
            schedule_refresh(is, 1);
        } else {
            vp = &is->pictq[is->pictq_rindex];

            delay = vp->pts - is->frame_last_pts; /* the pts from last time */
            fprintf(stderr, "delay 1: %.8f\n", delay);
            if(delay <= 0 || delay >= 1.0) { //larger than 1 seconds or smaller than 0
                /* if incorrect delay, use previous one */
                delay = is->frame_last_delay;
                fprintf(stderr, "delay 2: %.8f\n", delay);
            }
            /* save for next time */
            is->frame_last_delay = delay;
            is->frame_last_pts = vp->pts;

            /* update delay to sync to audio */
            ref_clock = get_audio_clock(is);
            diff = vp->pts - ref_clock;
            fprintf(stderr, "audio video diff: %.8f\n", diff);

            /* Skip or repeat the frame. Take delay into account
               FFPlay still doesn't "know if this is the best guess." */
            sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;
            if(fabs(diff) < AV_NOSYNC_THRESHOLD) {
                if(diff <= -sync_threshold) {
                    delay = 0;
                } else if(diff >= sync_threshold) {
                    delay = 2 * delay;
                }
            }
            is->frame_timer += delay;
            /* computer the REAL delay */
            actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
            fprintf(stderr, "actual_delay %.8f\n", actual_delay);
            if(actual_delay < 0.010) { //smaller than 10 ms
                /* Really it should skip the picture instead */
                actual_delay = 0.010;
            }
            // Why add 0.5 here. I see many 0.5 in multimedia framework code, such as stagefright.
            fprintf(stderr, "%s, delay: %.8f\n", __FUNCTION__, actual_delay*1000+0.5);
            // Video is faster than audio, so we need a delay render.
            // after we show a frame, we figure out when the next frame should be shown
            schedule_refresh(is, (int)(actual_delay * 1000 + 0.5)); 
            /* show the picture! */
            video_display(is);
            fprintf(stderr, "\n---------------------------------------------------------------------\n");

            /* update queue for next picture! */
            if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) {
                is->pictq_rindex = 0;
            }
            SDL_LockMutex(is->pictq_mutex);
            is->pictq_size--;
            SDL_CondSignal(is->pictq_cond);
            SDL_UnlockMutex(is->pictq_mutex);
        }
    } else {
        fprintf(stderr, "%s, schedule_refresh for another 100 ms\n", __FUNCTION__);
        schedule_refresh(is, 100);
    }
}
Example #23
0
static int rtp_write_header(AVFormatContext *s1)
{
    RTPMuxContext *s = s1->priv_data;
    int payload_type, max_packet_size, n;
    AVStream *st;

    if (s1->nb_streams != 1)
        return -1;
    st = s1->streams[0];

    payload_type = ff_rtp_get_payload_type(st->codec);
    if (payload_type < 0)
        payload_type = RTP_PT_PRIVATE; /* private payload type */
    if (s1->rtp_pt > 0)
        payload_type = s1->rtp_pt; /* private payload type */
    s->payload_type = payload_type;

// following 2 FIXMEs could be set based on the current time, there is normally no info leak, as RTP will likely be transmitted immediately
    if (random_state.index == 0)
        av_init_random(av_gettime() + (getpid() << 16), &random_state);
    //s->base_timestamp = 0; /* FIXME: was random(), what should this be? */
    s->base_timestamp = av_random(&random_state);
    s->timestamp = s->base_timestamp;
    s->cur_timestamp = 0;
    //s->ssrc = 0; /* FIXME: was random(), what should this be? */
    s->ssrc = av_random(&random_state);
    s->first_packet = 1;
    s->first_rtcp_ntp_time = AV_NOPTS_VALUE;

    max_packet_size = url_fget_max_packet_size(s1->pb);
    if (max_packet_size <= 12)
        return AVERROR(EIO);
    s->buf = av_malloc(max_packet_size);
    if (s->buf == NULL) {
        return AVERROR(ENOMEM);
    }
    s->max_payload_size = max_packet_size - 12;

    s->max_frames_per_packet = 0;
    if (s1->max_delay) {
        if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
            if (st->codec->frame_size == 0) {
                av_log(s1, AV_LOG_ERROR, "Cannot respect max delay: frame size = 0\n");
            } else {
                s->max_frames_per_packet = av_rescale_rnd(s1->max_delay, st->codec->sample_rate, AV_TIME_BASE * st->codec->frame_size, AV_ROUND_DOWN);
            }
        }
        if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
            /* FIXME: We should round down here... */
            s->max_frames_per_packet = av_rescale_q(s1->max_delay, (AVRational){1, 1000000}, st->codec->time_base);
        }
    }

    av_set_pts_info(st, 32, 1, 90000);
    switch(st->codec->codec_id) {
    case CODEC_ID_MP2:
    case CODEC_ID_MP3:
        s->buf_ptr = s->buf + 4;
        break;
    case CODEC_ID_MPEG1VIDEO:
    case CODEC_ID_MPEG2VIDEO:
        break;
    case CODEC_ID_MPEG2TS:
        n = s->max_payload_size / TS_PACKET_SIZE;
        if (n < 1)
            n = 1;
        s->max_payload_size = n * TS_PACKET_SIZE;
        s->buf_ptr = s->buf;
        break;
    case CODEC_ID_AAC:
        s->num_frames = 0;
    default:
        if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
            av_set_pts_info(st, 32, 1, st->codec->sample_rate);
        }
        s->buf_ptr = s->buf;
        break;
    }

    memset(s->stock_buf, 0, sizeof(s->stock_buf));
    s->stock_len = 0;

    return 0;
}
Example #24
0
int CCDataManager::OpenFile(const std::string& mediaUrl,
                 AVFormatContext** ppFormatCtx,
                 int* pASIndex,
                 int* pVSIndex)
{
    *pASIndex = -1;
    *pVSIndex = -1;
    if(avformat_open_input(ppFormatCtx,mediaUrl.c_str(), NULL, NULL) != 0)
	{
		std::cout << "can not open media file" << std::endl;
		return FAILURE;
	}else
	{
		std::cout << "open media file" << std::endl;
	}

	if(avformat_find_stream_info(*ppFormatCtx, NULL) < 0)
	{
		std::cout << "can not retrieve file info" << std::endl;
		return FAILURE;
	}else
	{
		std::cout << "retrieve file info" << std::endl;
	}

	av_dump_format(*ppFormatCtx,
			0, mediaUrl.c_str(), 0);

	for(unsigned i=0; i<(*ppFormatCtx)->nb_streams; i++)
	{
		if(*pASIndex != -1
				&& *pVSIndex != -1)
		{
			//We have find the stream
			break;
		}

        if(*pASIndex == -1
                && (*ppFormatCtx)->streams[i]->codec->codec_type
					== AVMEDIA_TYPE_AUDIO)
		{
		    *pASIndex = i;
        }

		if(*pVSIndex == -1
				&& (*ppFormatCtx)->streams[i]->codec->codec_type
					== AVMEDIA_TYPE_VIDEO)
		{
		    *pVSIndex = i;
        }

	}

    int64_t realStartTime = av_gettime() / 1000;
    CCSystemAlarm::GetInstance()->SetRealStartTime(realStartTime);

    //std::cout << "The total time is : " << (*ppFormatCtx)->duration / 1000000 << std::endl;
	//(*ppFormatCtx)->start_time = av_gettime() / 1000.0;
	//std::cout << "The start time is :" << (*ppFormatCtx)->start_time << std::endl;
	//std::cout << "The total file size is : " << avio_size((*ppFormatCtx)->pb) << std::endl;
    //std::cout << "The Bit_rate is : " << avio_size((*ppFormatCtx)->pb) / ((*ppFormatCtx)->duration / 1000000) << std::endl;
    //std::cout << "The real bit rate is: " << (*ppFormatCtx)->bit_rate / 8 << std::endl;

	//avio_seek((*ppFormatCtx)->pb, 0, SEEK_END);
	//std::cout << "The total file size is : " << avio_tell((*ppFormatCtx)->pb) << std::endl;

	return 0;
}
Example #25
0
/**
 * Grab a frame from x11 (public device demuxer API).
 *
 * @param s1 Context from avformat core
 * @param pkt Packet holding the brabbed frame
 * @return frame size in bytes
 */
static int
x11grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
    struct x11_grab *s = s1->priv_data;
    Display *dpy = s->dpy;
    XImage *image = s->image;
    int x_off = s->x_off;
    int y_off = s->y_off;

    int screen;
    Window root;
    int follow_mouse = s->follow_mouse;

    int64_t curtime, delay;
    struct timespec ts;

    /* Calculate the time of the next frame */
    s->time_frame += INT64_C(1000000);

    /* wait based on the frame rate */
    for(;;) {
        curtime = av_gettime();
        delay = s->time_frame * av_q2d(s->time_base) - curtime;
        if (delay <= 0) {
            if (delay < INT64_C(-1000000) * av_q2d(s->time_base)) {
                s->time_frame += INT64_C(1000000);
            }
            break;
        }
        ts.tv_sec = delay / 1000000;
        ts.tv_nsec = (delay % 1000000) * 1000;
        nanosleep(&ts, NULL);
    }

    av_init_packet(pkt);
    pkt->data = image->data;
    pkt->size = s->frame_size;
    pkt->pts = curtime;

    screen = DefaultScreen(dpy);
    root = RootWindow(dpy, screen);
    if (follow_mouse) {
        int screen_w, screen_h;
        int pointer_x, pointer_y, _;
        Window w;

        screen_w = DisplayWidth(dpy, screen);
        screen_h = DisplayHeight(dpy, screen);
        XQueryPointer(dpy, root, &w, &w, &pointer_x, &pointer_y, &_, &_, &_);
        if (follow_mouse == -1) {
            // follow the mouse, put it at center of grabbing region
            x_off += pointer_x - s->width  / 2 - x_off;
            y_off += pointer_y - s->height / 2 - y_off;
        } else {
            // follow the mouse, but only move the grabbing region when mouse
            // reaches within certain pixels to the edge.
            if (pointer_x > x_off + s->width - follow_mouse) {
                x_off += pointer_x - (x_off + s->width - follow_mouse);
            } else if (pointer_x < x_off + follow_mouse)
                x_off -= (x_off + follow_mouse) - pointer_x;
            if (pointer_y > y_off + s->height - follow_mouse) {
                y_off += pointer_y - (y_off + s->height - follow_mouse);
            } else if (pointer_y < y_off + follow_mouse)
                y_off -= (y_off + follow_mouse) - pointer_y;
        }
        // adjust grabbing region position if it goes out of screen.
        s->x_off = x_off = FFMIN(FFMAX(x_off, 0), screen_w - s->width);
        s->y_off = y_off = FFMIN(FFMAX(y_off, 0), screen_h - s->height);

        if (s->show_region && s->region_win)
            XMoveWindow(dpy, s->region_win,
                        s->x_off - REGION_WIN_BORDER,
                        s->y_off - REGION_WIN_BORDER);
    }

    if (s->show_region) {
        if (s->region_win) {
            XEvent evt;
            // clean up the events, and do the initinal draw or redraw.
            for (evt.type = NoEventMask; XCheckMaskEvent(dpy, ExposureMask | StructureNotifyMask, &evt); );
            if (evt.type)
                x11grab_draw_region_win(s);
        } else {
            x11grab_region_win_init(s);
        }
    }

    if(s->use_shm) {
        if (!XShmGetImage(dpy, root, image, x_off, y_off, AllPlanes)) {
            av_log (s1, AV_LOG_INFO, "XShmGetImage() failed\n");
        }
    } else {
        if (!xget_zpixmap(dpy, root, image, x_off, y_off)) {
            av_log (s1, AV_LOG_INFO, "XGetZPixmap() failed\n");
        }
    }

    if (s->draw_mouse) {
        paint_mouse_pointer(image, s);
    }

    return s->frame_size;
}
Example #26
0
/**
 * Initialize the x11 grab device demuxer (public device demuxer API).
 *
 * @param s1 Context from avformat core
 * @return <ul>
 *          <li>AVERROR(ENOMEM) no memory left</li>
 *          <li>AVERROR(EIO) other failure case</li>
 *          <li>0 success</li>
 *         </ul>
 */
static int
x11grab_read_header(AVFormatContext *s1)
{
    struct x11_grab *x11grab = s1->priv_data;
    Display *dpy;
    AVStream *st = NULL;
    enum PixelFormat input_pixfmt;
    XImage *image;
    int x_off = 0;
    int y_off = 0;
    int screen;
    int use_shm;
    char *dpyname, *offset;
    int ret = 0;
    AVRational framerate;

    dpyname = av_strdup(s1->filename);
    offset = strchr(dpyname, '+');
    if (offset) {
        sscanf(offset, "%d,%d", &x_off, &y_off);
        x11grab->draw_mouse = !strstr(offset, "nomouse");
        *offset= 0;
    }

    if ((ret = av_parse_video_size(&x11grab->width, &x11grab->height, x11grab->video_size)) < 0) {
        av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n");
        goto out;
    }
    if ((ret = av_parse_video_rate(&framerate, x11grab->framerate)) < 0) {
        av_log(s1, AV_LOG_ERROR, "Could not parse framerate: %s.\n", x11grab->framerate);
        goto out;
    }
    av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n",
           s1->filename, dpyname, x_off, y_off, x11grab->width, x11grab->height);

    dpy = XOpenDisplay(dpyname);
    av_freep(&dpyname);
    if(!dpy) {
        av_log(s1, AV_LOG_ERROR, "Could not open X display.\n");
        ret = AVERROR(EIO);
        goto out;
    }

    st = avformat_new_stream(s1, NULL);
    if (!st) {
        ret = AVERROR(ENOMEM);
        goto out;
    }
    avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */

    screen = DefaultScreen(dpy);

    if (x11grab->follow_mouse) {
        int screen_w, screen_h;
        Window w;

        screen_w = DisplayWidth(dpy, screen);
        screen_h = DisplayHeight(dpy, screen);
        XQueryPointer(dpy, RootWindow(dpy, screen), &w, &w, &x_off, &y_off, &ret, &ret, &ret);
        x_off -= x11grab->width / 2;
        y_off -= x11grab->height / 2;
        x_off = FFMIN(FFMAX(x_off, 0), screen_w - x11grab->width);
        y_off = FFMIN(FFMAX(y_off, 0), screen_h - x11grab->height);
        av_log(s1, AV_LOG_INFO, "followmouse is enabled, resetting grabbing region to x: %d y: %d\n", x_off, y_off);
    }

    use_shm = XShmQueryExtension(dpy);
    av_log(s1, AV_LOG_INFO, "shared memory extension%s found\n", use_shm ? "" : " not");

    if(use_shm) {
        int scr = XDefaultScreen(dpy);
        image = XShmCreateImage(dpy,
                                DefaultVisual(dpy, scr),
                                DefaultDepth(dpy, scr),
                                ZPixmap,
                                NULL,
                                &x11grab->shminfo,
                                x11grab->width, x11grab->height);
        x11grab->shminfo.shmid = shmget(IPC_PRIVATE,
                                        image->bytes_per_line * image->height,
                                        IPC_CREAT|0777);
        if (x11grab->shminfo.shmid == -1) {
            av_log(s1, AV_LOG_ERROR, "Fatal: Can't get shared memory!\n");
            ret = AVERROR(ENOMEM);
            goto out;
        }
        x11grab->shminfo.shmaddr = image->data = shmat(x11grab->shminfo.shmid, 0, 0);
        x11grab->shminfo.readOnly = False;

        if (!XShmAttach(dpy, &x11grab->shminfo)) {
            av_log(s1, AV_LOG_ERROR, "Fatal: Failed to attach shared memory!\n");
            /* needs some better error subroutine :) */
            ret = AVERROR(EIO);
            goto out;
        }
    } else {
        image = XGetImage(dpy, RootWindow(dpy, screen),
                          x_off,y_off,
                          x11grab->width, x11grab->height,
                          AllPlanes, ZPixmap);
    }

    switch (image->bits_per_pixel) {
    case 8:
        av_log (s1, AV_LOG_DEBUG, "8 bit palette\n");
        input_pixfmt = PIX_FMT_PAL8;
        break;
    case 16:
        if (       image->red_mask   == 0xf800 &&
                   image->green_mask == 0x07e0 &&
                   image->blue_mask  == 0x001f ) {
            av_log (s1, AV_LOG_DEBUG, "16 bit RGB565\n");
            input_pixfmt = PIX_FMT_RGB565;
        } else if (image->red_mask   == 0x7c00 &&
                   image->green_mask == 0x03e0 &&
                   image->blue_mask  == 0x001f ) {
            av_log(s1, AV_LOG_DEBUG, "16 bit RGB555\n");
            input_pixfmt = PIX_FMT_RGB555;
        } else {
            av_log(s1, AV_LOG_ERROR, "RGB ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel);
            av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask);
            ret = AVERROR(EIO);
            goto out;
        }
        break;
    case 24:
        if (        image->red_mask   == 0xff0000 &&
                    image->green_mask == 0x00ff00 &&
                    image->blue_mask  == 0x0000ff ) {
            input_pixfmt = PIX_FMT_BGR24;
        } else if ( image->red_mask   == 0x0000ff &&
                    image->green_mask == 0x00ff00 &&
                    image->blue_mask  == 0xff0000 ) {
            input_pixfmt = PIX_FMT_RGB24;
        } else {
            av_log(s1, AV_LOG_ERROR,"rgb ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel);
            av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask);
            ret = AVERROR(EIO);
            goto out;
        }
        break;
    case 32:
        input_pixfmt = PIX_FMT_0RGB32;
        break;
    default:
        av_log(s1, AV_LOG_ERROR, "image depth %i not supported ... aborting\n", image->bits_per_pixel);
        ret = AVERROR(EINVAL);
        goto out;
    }

    x11grab->frame_size = x11grab->width * x11grab->height * image->bits_per_pixel/8;
    x11grab->dpy = dpy;
    x11grab->time_base  = (AVRational) {
        framerate.den, framerate.num
    };
    x11grab->time_frame = av_gettime() / av_q2d(x11grab->time_base);
    x11grab->x_off = x_off;
    x11grab->y_off = y_off;
    x11grab->image = image;
    x11grab->use_shm = use_shm;

    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_RAWVIDEO;
    st->codec->width  = x11grab->width;
    st->codec->height = x11grab->height;
    st->codec->pix_fmt = input_pixfmt;
    st->codec->time_base = x11grab->time_base;
    st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(x11grab->time_base) * 8;

out:
    return ret;
}
Example #27
0
// currently, SASDL_seek() will return -1 on both EOF and error.
// so the user should directly stop the video loop when receiving -1.
static int _SASDL_seek_internal(SASDLContext *sasdl_ctx,
                                double seek_dst, int accurate)
{
     if(seek_dst >= SASDL_get_video_duration(sasdl_ctx))
          return SASDL_stop(sasdl_ctx);
     
     if(seek_dst < 0.0f)
          seek_dst = 0.0f;

     int ret = SA_seek(sasdl_ctx->sa_ctx, seek_dst);
     if(ret < 0)
          return ret;

     SDL_mutexP(sasdl_ctx->ap_lock);
     SAAudioPacket *ap = sasdl_ctx->ap;
     if(ap != NULL)
     {
          SA_free_ap(ap);
          sasdl_ctx->ap = NULL;
          sasdl_ctx->audio_buf_index = 0;
     }

     SAVideoPacket *vp = SA_get_vp(sasdl_ctx->sa_ctx);
     if(vp == NULL) {
          sasdl_ctx->video_eof = TRUE;
          sasdl_ctx->frame_next = NULL;

          // *FIXME*: is releasing the lock here proper?
          //          (this line is a hack for saya)
          SDL_mutexV(sasdl_ctx->ap_lock);
          
          return -1;
     } else {
          sasdl_ctx->frame_next = vp->frame_ptr;
          sasdl_ctx->frame_next_pts = vp->pts;
          SA_free_vp(vp);

          if(seek_dst != 0.0f)
               _SASDL_convert_frame_next_to_cur(sasdl_ctx);
          else
               _SASDL_fill_frame_cur_black(sasdl_ctx);
     }

     if (accurate) {
          while (sasdl_ctx->frame_next_pts < seek_dst) {
              vp = SA_get_vp(sasdl_ctx->sa_ctx);
              if (vp == NULL) {
                  sasdl_ctx->video_eof = TRUE;
                  sasdl_ctx->frame_next = NULL;

                  // FIXME: if the mutexV() out of the if-accurate statement
                  //        is modified, change here as well.
                  SDL_mutexV(sasdl_ctx->ap_lock);

                  return -1;
              } else {
                  if (vp->pts > seek_dst) {
                      _SASDL_convert_frame_next_to_cur(sasdl_ctx);
                  }
                  sasdl_ctx->frame_next = vp->frame_ptr;
                  sasdl_ctx->frame_next_pts = vp->pts;
                  SA_free_vp(vp);
              }
          }
     } else {
          // the real position we have reached.
          seek_dst = sasdl_ctx->frame_next_pts;
     }

     ap = SA_get_ap(sasdl_ctx->sa_ctx);
     if(ap == NULL) {
          sasdl_ctx->audio_eof = TRUE;
     } else {
          while(ap->pts < seek_dst) {
               SA_free_ap(ap);
               ap = SA_get_ap(sasdl_ctx->sa_ctx);
               if(ap == NULL) {
                    sasdl_ctx->audio_eof = TRUE;
                    break;
               }
          }
          sasdl_ctx->ap = ap;
     }
     SDL_mutexV(sasdl_ctx->ap_lock);

     if(sasdl_ctx->status == SASDL_is_playing)
          sasdl_ctx->start_time = av_gettime() - (int64_t)(seek_dst * 1000000.0f);
     else {
          sasdl_ctx->video_restart_at = seek_dst;
          if(sasdl_ctx->status == SASDL_is_stopped)
               sasdl_ctx->status = SASDL_is_paused;
     }

     if(seek_dst < SASDL_get_video_duration(sasdl_ctx)) {
          sasdl_ctx->video_eof = sasdl_ctx->audio_eof = FALSE;
     } else {
          sasdl_ctx->video_eof = sasdl_ctx->audio_eof = TRUE;
          SASDL_stop(sasdl_ctx);
     }

     return ret;
}
Example #28
0
static int rtsp_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    RTSPState *rt = s->priv_data;
    int ret;
    RTSPMessageHeader reply1, *reply = &reply1;
    char cmd[1024];

retry:
    if (rt->server_type == RTSP_SERVER_REAL) {
        int i;

        for (i = 0; i < s->nb_streams; i++)
            rt->real_setup[i] = s->streams[i]->discard;

        if (!rt->need_subscription) {
            if (memcmp (rt->real_setup, rt->real_setup_cache,
                        sizeof(enum AVDiscard) * s->nb_streams)) {
                snprintf(cmd, sizeof(cmd),
                         "Unsubscribe: %s\r\n",
                         rt->last_subscription);
                ff_rtsp_send_cmd(s, "SET_PARAMETER", rt->control_uri,
                                 cmd, reply, NULL);
                if (reply->status_code != RTSP_STATUS_OK)
                    return AVERROR_INVALIDDATA;
                rt->need_subscription = 1;
            }
        }

        if (rt->need_subscription) {
            int r, rule_nr, first = 1;

            memcpy(rt->real_setup_cache, rt->real_setup,
                   sizeof(enum AVDiscard) * s->nb_streams);
            rt->last_subscription[0] = 0;

            snprintf(cmd, sizeof(cmd),
                     "Subscribe: ");
            for (i = 0; i < rt->nb_rtsp_streams; i++) {
                rule_nr = 0;
                for (r = 0; r < s->nb_streams; r++) {
                    if (s->streams[r]->id == i) {
                        if (s->streams[r]->discard != AVDISCARD_ALL) {
                            if (!first)
                                av_strlcat(rt->last_subscription, ",",
                                           sizeof(rt->last_subscription));
                            ff_rdt_subscribe_rule(
                                rt->last_subscription,
                                sizeof(rt->last_subscription), i, rule_nr);
                            first = 0;
                        }
                        rule_nr++;
                    }
                }
            }
            av_strlcatf(cmd, sizeof(cmd), "%s\r\n", rt->last_subscription);
            ff_rtsp_send_cmd(s, "SET_PARAMETER", rt->control_uri,
                             cmd, reply, NULL);
            if (reply->status_code != RTSP_STATUS_OK)
                return AVERROR_INVALIDDATA;
            rt->need_subscription = 0;

            if (rt->state == RTSP_STATE_STREAMING)
                rtsp_read_play (s);
        }
    }

    ret = ff_rtsp_fetch_packet(s, pkt);
    if (ret < 0) {
        if (ret == AVERROR(ETIMEDOUT) && !rt->packets) {
            if (rt->lower_transport == RTSP_LOWER_TRANSPORT_UDP &&
                rt->lower_transport_mask & (1 << RTSP_LOWER_TRANSPORT_TCP)) {
                RTSPMessageHeader reply1, *reply = &reply1;
                av_log(s, AV_LOG_WARNING, "UDP timeout, retrying with TCP\n");
                if (rtsp_read_pause(s) != 0)
                    return -1;
                // TEARDOWN is required on Real-RTSP, but might make
                // other servers close the connection.
                if (rt->server_type == RTSP_SERVER_REAL)
                    ff_rtsp_send_cmd(s, "TEARDOWN", rt->control_uri, NULL,
                                     reply, NULL);
                rt->session_id[0] = '\0';
                if (resetup_tcp(s) == 0) {
                    rt->state = RTSP_STATE_IDLE;
                    rt->need_subscription = 1;
                    if (rtsp_read_play(s) != 0)
                        return -1;
                    goto retry;
                }
            }
        }
        return ret;
    }
    rt->packets++;

    /* send dummy request to keep TCP connection alive */
    if ((av_gettime() - rt->last_cmd_time) / 1000000 >= rt->timeout / 2 ||
         rt->auth_state.stale) {
        if (rt->server_type == RTSP_SERVER_WMS ||
           (rt->server_type != RTSP_SERVER_REAL &&
            rt->get_parameter_supported)) {
            ff_rtsp_send_cmd_async(s, "GET_PARAMETER", rt->control_uri, NULL);
        } else {
            ff_rtsp_send_cmd_async(s, "OPTIONS", "*", NULL);
        }
        /* The stale flag should be reset when creating the auth response in
         * ff_rtsp_send_cmd_async, but reset it here just in case we never
         * called the auth code (if we didn't have any credentials set). */
        rt->auth_state.stale = 0;
    }

    return 0;
}
Example #29
0
static int parse_playlist(URLContext *h, const char *url)
{
    AppleHTTPContext *s = h->priv_data;
    AVIOContext *in;
    int ret = 0, duration = 0, is_segment = 0, is_variant = 0, bandwidth = 0;
    char line[1024];
    const char *ptr;

    if ((ret = avio_open(&in, url, AVIO_FLAG_READ)) < 0)
        return ret;

    read_chomp_line(in, line, sizeof(line));
    if (strcmp(line, "#EXTM3U"))
        return AVERROR_INVALIDDATA;

    free_segment_list(s);
    s->finished = 0;
    while (!in->eof_reached) {
        read_chomp_line(in, line, sizeof(line));
        if (av_strstart(line, "#EXT-X-STREAM-INF:", &ptr)) {
            struct variant_info info = {{0}};
            is_variant = 1;
            ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_variant_args,
                               &info);
            bandwidth = atoi(info.bandwidth);
        } else if (av_strstart(line, "#EXT-X-TARGETDURATION:", &ptr)) {
            s->target_duration = atoi(ptr);
        } else if (av_strstart(line, "#EXT-X-MEDIA-SEQUENCE:", &ptr)) {
            s->start_seq_no = atoi(ptr);
        } else if (av_strstart(line, "#EXT-X-ENDLIST", &ptr)) {
            s->finished = 1;
        } else if (av_strstart(line, "#EXTINF:", &ptr)) {
            is_segment = 1;
            duration = atoi(ptr);
        } else if (av_strstart(line, "#", NULL)) {
            continue;
        } else if (line[0]) {
            if (is_segment) {
                struct segment *seg = av_malloc(sizeof(struct segment));
                if (!seg) {
                    ret = AVERROR(ENOMEM);
                    goto fail;
                }
                seg->duration = duration;
                ff_make_absolute_url(seg->url, sizeof(seg->url), url, line);
                dynarray_add(&s->segments, &s->n_segments, seg);
                is_segment = 0;
            } else if (is_variant) {
                struct variant *var = av_malloc(sizeof(struct variant));
                if (!var) {
                    ret = AVERROR(ENOMEM);
                    goto fail;
                }
                var->bandwidth = bandwidth;
                ff_make_absolute_url(var->url, sizeof(var->url), url, line);
                dynarray_add(&s->variants, &s->n_variants, var);
                is_variant = 0;
            }
        }
    }
    s->last_load_time = av_gettime();

fail:
    avio_close(in);
    return ret;
}
Example #30
0
static uint64_t ntp_time(void)
{
  return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
}