Exemple #1
0
static struct task_struct* pfair_schedule(struct task_struct * prev)
{
	struct pfair_state* state = &__get_cpu_var(pfair_state);
	int blocks;
	struct task_struct* next = NULL;

	raw_spin_lock(&pfair_lock);

	blocks  = is_realtime(prev) && !is_running(prev);

	if (state->local && safe_to_schedule(state->local, state->cpu))
		next = state->local;

	if (prev != next) {
		tsk_rt(prev)->scheduled_on = NO_CPU;
		if (next)
			tsk_rt(next)->scheduled_on = state->cpu;
	}

	raw_spin_unlock(&pfair_lock);

	if (next)
		TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n",
			   tsk_pfair(next)->release, pfair_time, litmus_clock());
	else if (is_realtime(prev))
		TRACE("Becomes idle at %lu (%llu)\n", pfair_time, litmus_clock());

	return next;
}
Exemple #2
0
inline static void __simple_on_unscheduled(struct task_struct* t)
{
	BUG_ON(!t);

	if (is_realtime(t) && budget_precisely_tracked(t))
		cancel_enforcement_timer(t);
}
Exemple #3
0
void sched_state_will_schedule(struct task_struct* tsk)
{
	/* Litmus hack: we only care about processor-local invocations of
	 * set_tsk_need_resched(). We can't reliably set the flag remotely
	 * since it might race with other updates to the scheduling state.  We
	 * can't rely on the runqueue lock protecting updates to the sched
	 * state since processors do not acquire the runqueue locks for all
	 * updates to the sched state (to avoid acquiring two runqueue locks at
	 * the same time). Further, if tsk is residing on a remote processor,
	 * then that processor doesn't actually know yet that it is going to
	 * reschedule; it still must receive an IPI (unless a local invocation
	 * races).
	 */
	if (likely(task_cpu(tsk) == smp_processor_id())) {
		VERIFY_SCHED_STATE(TASK_SCHEDULED | SHOULD_SCHEDULE | TASK_PICKED | WILL_SCHEDULE);
		if (is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK))
			set_sched_state(PICKED_WRONG_TASK);
		else
			set_sched_state(WILL_SCHEDULE);
	} else
		/* Litmus tasks should never be subject to a remote
		 * set_tsk_need_resched(). */
		BUG_ON(is_realtime(tsk));
	TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n",
		   __builtin_return_address(0));
}
Exemple #4
0
static void pfair_release_at(struct task_struct* task, lt_t start)
{
	unsigned long flags;
	quanta_t release;

	BUG_ON(!is_realtime(task));

	raw_spin_lock_irqsave(&pfair_lock, flags);
	release_at(task, start);
	release = time2quanta(start, CEIL);

	if (release - pfair_time >= PFAIR_MAX_PERIOD)
		release = pfair_time + PFAIR_MAX_PERIOD;

	TRACE_TASK(task, "sys release at %lu\n", release);

	drop_all_references(task);
	prepare_release(task, release);
	pfair_add_release(task);

	/* Clear sporadic release flag, since this release subsumes any
	 * sporadic release on wake.
	 */
	tsk_pfair(task)->sporadic_release = 0;

	raw_spin_unlock_irqrestore(&pfair_lock, flags);
}
Exemple #5
0
int cancel_enforcement_timer(struct task_struct* t)
{
	struct enforcement_timer* et;
	int ret = 0;
	unsigned long flags;

	BUG_ON(!t);
	BUG_ON(!is_realtime(t));

	et = &tsk_rt(t)->budget.timer;

	TRACE_TASK(t, "canceling enforcement timer.\n");

	if (et->armed) {
		raw_spin_lock_irqsave(&et->lock, flags);
		if (et->armed) {
			ret = hrtimer_try_to_cancel(&et->timer);
			if (ret < 0)
				TRACE_TASK(t, "timer already running. failed to cancel.\n");
			else {
				TRACE_TASK(t, "canceled timer with %lld ns remaining.\n",
					ktime_to_ns(hrtimer_expires_remaining(&et->timer)));
				et->armed = 0;
			}
		}
		else
			TRACE_TASK(t, "timer was not armed (race).\n");
		raw_spin_unlock_irqrestore(&et->lock, flags);
	}
	else
		TRACE_TASK(t, "timer was not armed.\n");

	return ret;
}
Exemple #6
0
inline static void arm_enforcement_timer(struct task_struct* t, int force)
{
	struct enforcement_timer* et;
	lt_t when_to_fire, remaining_budget;
	lt_t now;
	unsigned long flags;

	BUG_ON(!t);
	BUG_ON(!is_realtime(t));

	et = &tsk_rt(t)->budget.timer;
	if (et->armed) {
		TRACE_TASK(t, "timer already armed!\n");
		return;
	}

	if (!force) {
		if ( (!budget_enforced(t) ||
				(budget_enforced(t) &&
					bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)))
				&&
			(!budget_signalled(t) ||
				(budget_signalled(t) &&
					bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)))) {
			TRACE_TASK(t,
					"trying to arm timer when budget "
					"has already been exhausted.\n");
			return;
		}
	}

	TRACE_TASK(t, "arming enforcement timer.\n");

	/* __hrtimer_start_range_ns() cancels the timer
	 * anyway, so we don't have to check whether it is still armed */
	raw_spin_lock_irqsave(&et->lock, flags);

	if (et->armed) {
		TRACE_TASK(t, "timer already armed (race)!\n");
		goto out;
	}

	now = litmus_clock();
	remaining_budget = budget_remaining(t);
	when_to_fire = now + remaining_budget;

	TRACE_TASK(t, "budget remaining: %ld, when_to_fire: %ld\n",
					remaining_budget, when_to_fire);

	__hrtimer_start_range_ns(&et->timer,
				 ns_to_ktime(when_to_fire),
				 0 /* delta */,
				 HRTIMER_MODE_ABS_PINNED,  /* TODO: need to use non-pinned? */
				 0 /* no wakeup */);
	et->armed = 1;

out:
	raw_spin_unlock_irqrestore(&et->lock, flags);
}
Exemple #7
0
static void psnedf_task_block(struct task_struct *t)
{
	/* only running tasks can block, thus t is in no queue */
	TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state);

	BUG_ON(!is_realtime(t));
	BUG_ON(is_queued(t));
}
Exemple #8
0
static void psnedf_tick(struct task_struct *t)
{
	psnedf_domain_t *pedf = local_pedf;

	/* Check for inconsistency. We don't need the lock for this since
	 * ->scheduled is only changed in schedule, which obviously is not
	 *  executing in parallel on this CPU
	 */
	BUG_ON(is_realtime(t) && t != pedf->scheduled);

	if (is_realtime(t) &&
		tsk_rt(t)->budget.ops && budget_quantum_tracked(t) &&
		budget_exhausted(t)) {
		TRACE_TASK(t, "budget exhausted\n");
		budget_state_machine2(t,on_exhausted,!IN_SCHEDULE);
	}
}
Exemple #9
0
/**
 * Main entry point; start worker threads, setup signal handling, wait for threads to exit, exit
 * @param argc - Num args
 * @param argv - Args
 * @returns 0 on success, error code on failure
 * NOTE: NEVER USE exit(3)! Instead call Thread_QuitProgram
 */
int main(int argc, char ** argv)
{

	// Open log before calling ParseArguments (since ParseArguments may call the Log functions)
	openlog("mctxserv", LOG_PID | LOG_PERROR, LOG_USER);

	ParseArguments(argc, argv); // Setup the g_options structure from program arguments

	Log(LOGINFO, "Server started");


	
	#ifdef REALTIME_VERSION
	
	if (is_realtime())
	{
		Log(LOGDEBUG, "Running under realtime kernel");
	}
	else
	{
		Fatal("Not running under realtime kernel");
	}
	struct sched_param param;
	param.sched_priority = 49;
	if (sched_setscheduler(0, SCHED_FIFO, &param) < 0)
		Fatal("sched_setscheduler failed - %s", strerror(errno));
	if (mlockall(MCL_CURRENT | MCL_FUTURE) == -1)
		Fatal("mlockall failed - %s", strerror(errno));
	stack_prefault();
	#endif //REALTIME_VERSION

	

	Pin_Init();
	
	// Try and start things
	
	//const char *ret;
	//if ((ret = Control_SetMode(CONTROL_START, "test")) != NULL)
	//	Fatal("Control_SetMode failed with '%s'", ret);
	

	// run request thread in the main thread
	FCGI_RequestLoop(NULL);

	
	Control_SetMode(CONTROL_STOP, NULL);
	//if ((ret = Control_SetMode(CONTROL_STOP, "test")) != NULL)
	//	Fatal("Control_SetMode failed with '%s'", ret);
	
	//Sensor_StopAll();
	//Actuator_StopAll();

	Pin_Close();

	Cleanup();
	return 0;
}
Exemple #10
0
/* pfair_tick - this function is called for every local timer
 *                         interrupt.
 */
static void pfair_tick(struct task_struct* t)
{
	struct pfair_state* state = &__get_cpu_var(pfair_state);
	quanta_t time, cur;
	int retry = 10;

	do {
		cur  = current_quantum(state);
		PTRACE("q %lu at %llu\n", cur, litmus_clock());

		/* Attempt to advance time. First CPU to get here
		 * will prepare the next quantum.
		 */
		time = cmpxchg(&pfair_time,
			       cur - 1,   /* expected */
			       cur        /* next     */
			);
		if (time == cur - 1) {
			/* exchange succeeded */
			wait_for_quantum(cur - 1, state);
			schedule_next_quantum(cur);
			retry = 0;
		} else if (time_before(time, cur - 1)) {
			/* the whole system missed a tick !? */
			catchup_quanta(time, cur, state);
			retry--;
		} else if (time_after(time, cur)) {
			/* our timer lagging behind!? */
			TRACE("BAD pfair_time:%lu > cur:%lu\n", time, cur);
			retry--;
		} else {
			/* Some other CPU already started scheduling
			 * this quantum. Let it do its job and then update.
			 */
			retry = 0;
		}
	} while (retry);

	/* Spin locally until time advances. */
	wait_for_quantum(cur, state);

	/* copy assignment */
	/* FIXME: what if we race with a future update? Corrupted state? */
	state->local      = state->linked;
	/* signal that we are done */
	mb();
	state->local_tick = state->cur_tick;

	if (state->local != current
	    && (is_realtime(current) || is_present(state->local)))
		set_tsk_need_resched(current);
}
Exemple #11
0
feather_callback void do_sched_trace_task_switch_away(unsigned long id,
						      unsigned long _task)
{
	struct task_struct *t = (struct task_struct*) _task;
	struct st_event_record* rec;
	if (is_realtime(t)) {
		rec = get_record(ST_SWITCH_AWAY, t);
		if (rec) {
			rec->data.switch_away.when      = now();
			rec->data.switch_away.exec_time = get_exec_time(t);
			put_record(rec);
		}
	}
}
Exemple #12
0
// processes an incoming byte in the midi state machine
void midi_msg_proc(uint8_t b)
{
    bool is_status = ((b & 0x80) != 0);

    // handle status byte
    if (is_status)  {
        if (is_realtime(b)) {
            // real-time, no data, process immediately
            msg_handle_fn(b, NULL, 0);
        } else {
            // non real-time
            idx = 0;
            len = get_len(b);
            status = b;
            if (len == 0) {
                // no data, process immediately
                msg_handle_fn(status, NULL, len);
                status = next_status(status);
            }
        }
        return;
    }

    // handle data byte
    if ((status > 0) && (len > 0)) {
        if (idx < len) {
            // store data byte
            data[idx++] = b;
            if (idx == len) {
                idx = 0;
                // process it
                msg_handle_fn(status, data, len);
                // prepare for next message
                status = next_status(status);
            }
        } else {
            // overrun, should never happen
        }
    } else {
        // unexpected data, ignore it
    }
}
Exemple #13
0
static void pfair_task_exit(struct task_struct * t)
{
	unsigned long flags;

	BUG_ON(!is_realtime(t));

	/* Remote task from release or ready queue, and ensure
	 * that it is not the scheduled task for ANY CPU. We
	 * do this blanket check because occassionally when
	 * tasks exit while blocked, the task_cpu of the task
	 * might not be the same as the CPU that the PFAIR scheduler
	 * has chosen for it.
	 */
	raw_spin_lock_irqsave(&pfair_lock, flags);

	TRACE_TASK(t, "RIP, state:%d\n", t->state);
	drop_all_references(t);

	raw_spin_unlock_irqrestore(&pfair_lock, flags);

	kfree(t->rt_param.pfair);
	t->rt_param.pfair = NULL;
}
Exemple #14
0
static int pfair_higher_prio(struct task_struct* first,
			     struct task_struct* second)
{
	return  /* first task must exist */
		first && (
		/* Does the second task exist and is it a real-time task?  If
		 * not, the first task (which is a RT task) has higher
		 * priority.
		 */
		!second || !is_realtime(second)  ||

		/* Is the (subtask) deadline of the first task earlier?
		 * Then it has higher priority.
		 */
		time_before(cur_deadline(first), cur_deadline(second)) ||

		/* Do we have a deadline tie?
		 * Then break by B-bit.
		 */
		(cur_deadline(first) == cur_deadline(second) &&
		 (cur_overlap(first) > cur_overlap(second) ||

		/* Do we have a B-bit tie?
		 * Then break by group deadline.
		 */
		(cur_overlap(first) == cur_overlap(second) &&
		 (time_after(cur_group_deadline(first),
			     cur_group_deadline(second)) ||

		/* Do we have a group deadline tie?
		 * Then break by PID, which are unique.
		 */
		(cur_group_deadline(first) ==
		 cur_group_deadline(second) &&
		 first->pid < second->pid))))));
}
Exemple #15
0
static struct task_struct* psnedf_schedule(struct task_struct * prev)
{
	psnedf_domain_t* 	pedf = local_pedf;
	rt_domain_t*		edf  = &pedf->domain;
	struct task_struct*	next;

	int 			out_of_time, sleep, preempt,
				np, exists, blocks, resched;

	raw_readyq_lock(&pedf->slock);

	/* sanity checking
	 * differently from gedf, when a task exits (dead)
	 * pedf->schedule may be null and prev _is_ realtime
	 */
	BUG_ON(pedf->scheduled && pedf->scheduled != prev);
	BUG_ON(pedf->scheduled && !is_realtime(prev));

	/* (0) Determine state */
	exists      = pedf->scheduled != NULL;
	blocks      = exists && !is_running(pedf->scheduled);
	out_of_time = exists &&
				  budget_enforced(pedf->scheduled) &&
				  bt_flag_is_set(pedf->scheduled, BTF_BUDGET_EXHAUSTED);
	np 	    = exists && is_np(pedf->scheduled);
	sleep	    = exists && is_completed(pedf->scheduled);
	preempt     = edf_preemption_needed(edf, prev);

	/* If we need to preempt do so.
	 * The following checks set resched to 1 in case of special
	 * circumstances.
	 */
	resched = preempt;

	/* Do budget stuff */
	if (blocks)
		budget_state_machine(prev,on_blocked);
	else if (sleep)
		budget_state_machine(prev,on_sleep);
	else if (preempt)
		budget_state_machine(prev,on_preempt);

	/* If a task blocks we have no choice but to reschedule.
	 */
	if (blocks)
		resched = 1;

	/* Request a sys_exit_np() call if we would like to preempt but cannot.
	 * Multiple calls to request_exit_np() don't hurt.
	 */
	if (np && (out_of_time || preempt || sleep))
		request_exit_np(pedf->scheduled);

	/* Any task that is preemptable and either exhausts its execution
	 * budget or wants to sleep completes. We may have to reschedule after
	 * this.
	 */
	if (!np && (out_of_time || sleep) && !blocks) {
		job_completion(pedf->scheduled, !sleep);
		resched = 1;
	}

	/* The final scheduling decision. Do we need to switch for some reason?
	 * Switch if we are in RT mode and have no task or if we need to
	 * resched.
	 */
	next = NULL;
	if ((!np || blocks) && (resched || !exists)) {
		/* When preempting a task that does not block, then
		 * re-insert it into either the ready queue or the
		 * release queue (if it completed). requeue() picks
		 * the appropriate queue.
		 */
		if (pedf->scheduled && !blocks)
			requeue(pedf->scheduled, edf);
		next = __take_ready(edf);
	} else
		/* Only override Linux scheduler if we have a real-time task
		 * scheduled that needs to continue.
		 */
		if (exists)
			next = prev;

	if (next) {
		TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
	} else {
		TRACE("becoming idle at %llu\n", litmus_clock());
	}

	pedf->scheduled = next;
	sched_state_task_picked();
	raw_readyq_unlock(&pedf->slock);

	return next;
}
Exemple #16
0
feather_callback void save_timestamp_task(unsigned long event,
					  unsigned long t_ptr)
{
	int rt = is_realtime((struct task_struct *) t_ptr);
	__save_timestamp(event, rt ? TSK_RT : TSK_BE);
}
Exemple #17
0
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
static inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
		    struct lockdep_map *nest_lock, unsigned long ip)
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
	unsigned long flags;

	preempt_disable();
	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);

#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
	/*
	 * Optimistic spinning.
	 *
	 * We try to spin for acquisition when we find that there are no
	 * pending waiters and the lock owner is currently running on a
	 * (different) CPU.
	 *
	 * The rationale is that if the lock owner is running, it is likely to
	 * release the lock soon.
	 *
	 * Since this needs the lock owner, and this mutex implementation
	 * doesn't track the owner atomically in the lock field, we need to
	 * track it non-atomically.
	 *
	 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
	 * to serialize everything.
	 *
	 * The mutex spinners are queued up using MCS lock so that only one
	 * spinner can compete for the mutex. However, if mutex spinning isn't
	 * going to happen, there is no point in going through the lock/unlock
	 * overhead.
	 */
	if (!mutex_can_spin_on_owner(lock))
		goto slowpath;

	for (;;) {
		struct task_struct *owner;
		struct mspin_node  node;

		/*
		 * If there's an owner, wait for it to either
		 * release the lock or go to sleep.
		 */
		mspin_lock(MLOCK(lock), &node);
		owner = ACCESS_ONCE(lock->owner);
		if (owner && !mutex_spin_on_owner(lock, owner)) {
			mspin_unlock(MLOCK(lock), &node);
			break;
		}

		if ((atomic_read(&lock->count) == 1) &&
		    (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
			lock_acquired(&lock->dep_map, ip);
			mutex_set_owner(lock);
			mspin_unlock(MLOCK(lock), &node);
			preempt_enable();
			return 0;
		}
		mspin_unlock(MLOCK(lock), &node);

		/*
		 * When there's no owner, we might have preempted between the
		 * owner acquiring the lock and setting the owner field. If
		 * we're an RT task that will live-lock because we won't let
		 * the owner complete.
		 */
		if (!owner && (need_resched() ||
			       rt_task(task) || is_realtime(task)))
			break;

		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
		arch_mutex_cpu_relax();
	}
slowpath:
#endif
	spin_lock_mutex(&lock->wait_lock, flags);

	debug_mutex_lock_common(lock, &waiter);
	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

	if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
		goto done;

	lock_contended(&lock->dep_map, ip);

	for (;;) {
		/*
		 * Lets try to take the lock again - this is needed even if
		 * we get here for the first time (shortly after failing to
		 * acquire the lock), to make sure that we get a wakeup once
		 * it's unlocked. Later on, if we sleep, this is the
		 * operation that gives us the lock. We xchg it to -1, so
		 * that when we release the lock, we properly wake up the
		 * other waiters:
		 */
		if (MUTEX_SHOW_NO_WAITER(lock) &&
		   (atomic_xchg(&lock->count, -1) == 1))
			break;

		/*
		 * got a signal? (This code gets eliminated in the
		 * TASK_UNINTERRUPTIBLE case.)
		 */
		if (unlikely(signal_pending_state(state, task))) {
			mutex_remove_waiter(lock, &waiter,
					    task_thread_info(task));
			mutex_release(&lock->dep_map, 1, ip);
			spin_unlock_mutex(&lock->wait_lock, flags);

			debug_mutex_free_waiter(&waiter);
			preempt_enable();
			return -EINTR;
		}
		__set_task_state(task, state);

		/* didn't get the lock, go to sleep: */
		spin_unlock_mutex(&lock->wait_lock, flags);
		schedule_preempt_disabled();
		spin_lock_mutex(&lock->wait_lock, flags);
	}

done:
	lock_acquired(&lock->dep_map, ip);
	/* got the lock - rejoice! */
	mutex_remove_waiter(lock, &waiter, current_thread_info());
	mutex_set_owner(lock);

	/* set it to 0 if there are no waiters left: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);

	spin_unlock_mutex(&lock->wait_lock, flags);

	debug_mutex_free_waiter(&waiter);
	preempt_enable();

	return 0;
}
static struct task_struct* demo_schedule(struct task_struct * prev)
{
        struct demo_cpu_state *local_state = local_cpu_state();

        /* next == NULL means "schedule background work". */
        struct task_struct *next = NULL;

        /* prev's task state */
        int exists, out_of_time, job_completed, self_suspends, preempt, resched;

        raw_spin_lock(&local_state->local_queues.ready_lock);

        BUG_ON(local_state->scheduled && local_state->scheduled != prev);
        BUG_ON(local_state->scheduled && !is_realtime(prev));

        exists = local_state->scheduled != NULL;
        self_suspends = exists && !is_current_running();
        out_of_time = exists && budget_enforced(prev) && budget_exhausted(prev);
        job_completed = exists && is_completed(prev);

        /* preempt is true if task `prev` has lower priority than something on
         * the ready queue. */
        preempt = edf_preemption_needed(&local_state->local_queues, prev);

        /* check all conditions that make us reschedule */
        resched = preempt;

        /* if `prev` suspends, it CANNOT be scheduled anymore => reschedule */
        if (self_suspends) {
                resched = 1;
        }

        /* also check for (in-)voluntary job completions */
        if (out_of_time || job_completed) {
                demo_job_completion(prev, out_of_time);
                resched = 1;
        }

        if (resched) {
                /* First check if the previous task goes back onto the ready
                 * queue, which it does if it did not self_suspend.
                 */
                if (exists && !self_suspends) {
                        demo_requeue(prev, local_state);
                }
                next = __take_ready(&local_state->local_queues);
        } else {
                /* No preemption is required. */
                next = local_state->scheduled;
        }

        local_state->scheduled = next;
        if (exists && prev != next) {
                TRACE_TASK(prev, "descheduled.\n");
        }
        if (next) {
                TRACE_TASK(next, "scheduled.\n");
        }

        /* This mandatory. It triggers a transition in the LITMUS^RT remote
         * preemption state machine. Call this AFTER the plugin has made a local
         * scheduling decision.
         */
        sched_state_task_picked();

        raw_spin_unlock(&local_state->local_queues.ready_lock);
        return next;
}
Exemple #19
0
void QFFmpegPlayer::run()
{


    int err, i, ret;
    int st_index[AVMEDIA_TYPE_NB];
    AVPacket pkt1, *pkt = &pkt1;
    int64_t stream_start_time;
    int pkt_in_play_range = 0;
    AVDictionaryEntry *t;
    AVDictionary **opts;
    int orig_nb_streams;
    QMutex *wait_mutex = new QMutex();
    int scan_all_pmts_set = 0;
    int64_t pkt_ts;
    float max_frame_duration;
    int realtime;

    AVFormatContext *formatCtx=avformat_alloc_context();
    AVFrame *frame=av_frame_alloc();


    int video_stream = -1;
    int audio_stream = -1;
    int subtitle_stream = -1;
    int eof = 0;



    formatCtx->interrupt_callback.callback = DecodeInterruptCallback;
    formatCtx->interrupt_callback.opaque = this;

    memset(st_index, -1, sizeof(st_index));
    ret = avformat_open_input(&formatCtx, "D:/Music/mww.mp4",NULL, NULL);
    if (ret < 0) {
        qDebug("avformat_open_input fail!");
        return;
    }

    // av_dump_format(formatCtx, 0, 0, 0);

    av_format_inject_global_side_data(formatCtx);


    orig_nb_streams = formatCtx->nb_streams;


    if (formatCtx->pb)
        formatCtx->pb->eof_reached = 0;


    max_frame_duration = (formatCtx->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;



    realtime = is_realtime(formatCtx);




    for (i = 0; i < formatCtx->nb_streams; i++) {
        AVStream *st = formatCtx->streams[i];
        AVMediaType type = st->codec->codec_type;
        //st->discard = AVDISCARD_ALL;
        if (st_index[type] == -1)
            st_index[type] = i;
    }

    for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
        if (st_index[i] == -1) {
            st_index[i] = INT_MAX;
        }
    }


    st_index[AVMEDIA_TYPE_VIDEO] =
        av_find_best_stream(formatCtx, AVMEDIA_TYPE_VIDEO,
                            st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);

    st_index[AVMEDIA_TYPE_AUDIO] =
        av_find_best_stream(formatCtx, AVMEDIA_TYPE_AUDIO,
                            st_index[AVMEDIA_TYPE_AUDIO],
                            st_index[AVMEDIA_TYPE_VIDEO],
                            NULL, 0);

    st_index[AVMEDIA_TYPE_SUBTITLE] =
        av_find_best_stream(formatCtx, AVMEDIA_TYPE_SUBTITLE,
                            st_index[AVMEDIA_TYPE_SUBTITLE],
                            (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
                             st_index[AVMEDIA_TYPE_AUDIO] :
                             st_index[AVMEDIA_TYPE_VIDEO]),
                            NULL, 0);
    video_stream=st_index[AVMEDIA_TYPE_VIDEO];
    audio_stream=st_index[AVMEDIA_TYPE_AUDIO];

    qDebug("video:%d audio:%d subtitle:%d",st_index[AVMEDIA_TYPE_VIDEO],st_index[AVMEDIA_TYPE_AUDIO],st_index[AVMEDIA_TYPE_SUBTITLE]);


    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
        AVStream *st = formatCtx->streams[st_index[AVMEDIA_TYPE_VIDEO]];
        AVCodecContext *avctx = st->codec;
        AVRational sar = av_guess_sample_aspect_ratio(formatCtx, st, NULL);
        if (avctx->width)
        {
            qDebug("window size:%d*%d SAR:%d/%d",avctx->width,avctx->height,sar.num,sar.den);
        }

    }


    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
        AVCodecContext *mAudioCtx = formatCtx->streams[st_index[AVMEDIA_TYPE_AUDIO]]->codec;

        AVCodec *mAudioCodec = avcodec_find_decoder(mAudioCtx->codec_id);
        if(mAudioCodec->capabilities & CODEC_CAP_DR1)
            mAudioCtx->flags |= CODEC_FLAG_EMU_EDGE;

        ret = avcodec_open2(mAudioCtx, mAudioCodec, NULL);
        if(ret<0)
            return;

        formatCtx->streams[st_index[AVMEDIA_TYPE_AUDIO]]->discard = AVDISCARD_DEFAULT;

        //sample_rate    = mAudioCtx->sample_rate;
        //nb_channels    = mAudioCtx->channels;
        //channel_layout = mAudioCtx->channel_layout;

        //开启音频解码
        mAudioDecoder->setCodecCtx(mAudioCtx);
        mAudioDecoder->setStream(formatCtx->streams[st_index[AVMEDIA_TYPE_AUDIO]]);
        mAudioDecoder->start();
    }


    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
        AVCodecContext *mVideoCtx = formatCtx->streams[st_index[AVMEDIA_TYPE_VIDEO]]->codec;

        AVCodec *mVideoCodec = avcodec_find_decoder(mVideoCtx->codec_id);
        //if(mVideoCodec->capabilities & CODEC_CAP_DR1)
        //    mVideoCodec->flags |= CODEC_FLAG_EMU_EDGE;

        ret = avcodec_open2(mVideoCtx, mVideoCodec, NULL);
        if(ret<0)
            return;
        mVideoDecoder->setFormatCtx(formatCtx);
        mVideoDecoder->setCodecCtx(mVideoCtx);
        mVideoDecoder->setStream(formatCtx->streams[st_index[AVMEDIA_TYPE_VIDEO]]);
        mVideoDecoder->start();

    }


    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {


    }


    while(true)
    {


        if(abort)
        {
            qDebug("++++++++++");
            //this->deleteLater();

            return;
        }

        if(mAudioDecoder->getQueueLength() >10 && mVideoDecoder->getQueueLength() >10)
        {
            QThread::msleep(20);
            continue;
        }


        ret = av_read_frame(formatCtx, pkt);
        //qDebug("av_read_frame:%d",pkt->stream_index);



        if (ret < 0)
        {
            if ((ret == AVERROR_EOF || avio_feof(formatCtx->pb)) && !eof)
            {
                /*if (mVideoState->video_stream >= 0)
                mVideoState->videoq.packet_queue_put_nullpacket(mVideoState->video_stream);
                if (mVideoState->audio_stream >= 0)
                mVideoState->audioq.packet_queue_put_nullpacket(mVideoState->audio_stream);
                if (mVideoState->subtitle_stream >= 0)
                mVideoState->subtitleq.packet_queue_put_nullpacket(mVideoState->subtitle_stream);*/
                eof = 1;
            }
            if (formatCtx->pb && formatCtx->pb->error)
                return;

        }
        else
        {

            eof = 0;

            int64_t stream_start_time = formatCtx->streams[pkt->stream_index]->start_time;
            pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
            //qDebug(".............:%d",pkt->stream_index);

            if (pkt->stream_index == audio_stream )
            {
                mAudioDecoder->addPacket(*pkt);
            }
            else if (pkt->stream_index == video_stream)
                //&& !(formatCtx->streams[pkt->stream_index]->disposition & AV_DISPOSITION_ATTACHED_PIC))
            {
                mVideoDecoder->addPacket(*pkt);


            }
            else if (pkt->stream_index == subtitle_stream)
            {
                av_free_packet(pkt);
            }
            else
            {
                av_free_packet(pkt);
            }

            //qDebug("else end");
        }

    }

}
Exemple #20
0
static void pfair_task_block(struct task_struct *t)
{
	BUG_ON(!is_realtime(t));
	TRACE_TASK(t, "blocks at %llu, state:%d\n",
		   litmus_clock(), t->state);
}