/**
 * handle_IRQ_event - irq action chain handler
 * @irq:	the interrupt number
 * @action:	the interrupt action chain for this irq
 *
 * Handles the action chain of an irq event
 */
irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
{
	irqreturn_t ret, retval = IRQ_NONE;
	unsigned int status = 0;
	struct pt_regs *regs = get_irq_regs();

	trace_mark(kernel_irq_entry, "irq_id %u kernel_mode %u", irq,
		(regs)?(!user_mode(regs)):(1));

	handle_dynamic_tick(action);

	if (!(action->flags & IRQF_DISABLED))
		local_irq_enable_in_hardirq();

	do {
		ret = action->handler(irq, action->dev_id);
		if (ret == IRQ_HANDLED)
			status |= action->flags;
		retval |= ret;
		action = action->next;
	} while (action);

	if (status & IRQF_SAMPLE_RANDOM)
		add_interrupt_randomness(irq);
	local_irq_disable();

	trace_mark(kernel_irq_exit, MARK_NOARGS);

	return retval;
}
Ejemplo n.º 2
0
static void xnsched_watchdog_handler(struct xntimer *timer)
{
	struct xnsched *sched = xnpod_current_sched();
	struct xnthread *thread = sched->curr;

	if (likely(xnthread_test_state(thread, XNROOT))) {
		xnsched_reset_watchdog(sched);
		return;
	}

	if (likely(++sched->wdcount < wd_timeout_arg))
		return;

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (xnthread_test_state(thread, XNSHADOW) &&
	    !xnthread_amok_p(thread)) {
		trace_mark(xn_nucleus, watchdog_signal,
			   "thread %p thread_name %s",
			   thread, xnthread_name(thread));
		xnprintf("watchdog triggered -- signaling runaway thread "
			 "'%s'\n", xnthread_name(thread));
		xnthread_set_info(thread, XNAMOK | XNKICKED);
		xnshadow_send_sig(thread, SIGDEBUG, SIGDEBUG_WATCHDOG, 1);
	} else
#endif /* CONFIG_XENO_OPT_PERVASIVE */
	{
		trace_mark(xn_nucleus, watchdog, "thread %p thread_name %s",
			   thread, xnthread_name(thread));
		xnprintf("watchdog triggered -- killing runaway thread '%s'\n",
			 xnthread_name(thread));
		xnpod_delete_thread(thread);
	}
	xnsched_reset_watchdog(sched);
}
Ejemplo n.º 3
0
static int my_open(struct inode *inode, struct file *file)
{
	int i;

	trace_mark(samples, subsystem_event, "integer %d string %s", 123,
		"example string");
	for (i = 0; i < 10; i++)
		trace_mark(samples, subsystem_eventb, MARK_NOARGS);
	return -EPERM;
}
Ejemplo n.º 4
0
int
main (void)
{
  /* Some code to make sure that breakpoints on `main' and `ust/bar' marker
     are set at different addresses.  */
  int a = 0;
  int b = a;

  trace_mark(ust, bar, "str %s", "FOOBAZ");
  trace_mark(ust, bar2, "number1 %d number2 %d", 53, 9800);

  end ();
  return 0;
}
Ejemplo n.º 5
0
void xnintr_clock_handler(void)
{
	xnstat_exectime_t *prev;
	struct xnsched *sched;
	unsigned cpu;

	cpu = xnarch_current_cpu();

	if (!cpumask_test_cpu(cpu, &xnarch_supported_cpus)) {
		xnarch_relay_tick();
		return;
	}

	sched = xnpod_sched_slot(cpu);

	prev = xnstat_exectime_switch(sched,
		&nkclock.stat[xnsched_cpu(sched)].account);
	xnstat_counter_inc(&nkclock.stat[xnsched_cpu(sched)].hits);

	trace_mark(xn_nucleus, irq_enter, "irq %u", XNARCH_TIMER_IRQ);
	trace_mark(xn_nucleus, tbase_tick, "base %s", nktbase.name);

	++sched->inesting;
	__setbits(sched->lflags, XNINIRQ);

	xnlock_get(&nklock);
	xntimer_tick_aperiodic();
	xnlock_put(&nklock);

	xnstat_exectime_switch(sched, prev);

	if (--sched->inesting == 0) {
		__clrbits(sched->lflags, XNINIRQ);
		xnpod_schedule();
		sched = xnpod_current_sched();
	}
	/*
	 * If the clock interrupt preempted a real-time thread, any
	 * transition to the root thread has already triggered a host
	 * tick propagation from xnpod_schedule(), so at this point,
	 * we only need to propagate the host tick in case the
	 * interrupt preempted the root thread.
	 */
	if (testbits(sched->lflags, XNHTICK) &&
	    xnthread_test_state(sched->curr, XNROOT))
		xnintr_host_tick(sched);

	trace_mark(xn_nucleus, irq_exit, "irq %u", XNARCH_TIMER_IRQ);
}
Ejemplo n.º 6
0
int xnsynch_flush(struct xnsynch *synch, xnflags_t reason)
{
	struct xnpholder *holder;
	int status;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_flush, "synch %p reason %lu",
		   synch, reason);

	status = emptypq_p(&synch->pendq) ? XNSYNCH_DONE : XNSYNCH_RESCHED;

	while ((holder = getpq(&synch->pendq)) != NULL) {
		struct xnthread *sleeper = link2thread(holder, plink);
		xnthread_set_info(sleeper, reason);
		sleeper->wchan = NULL;
		xnpod_resume_thread(sleeper, XNPEND);
	}

	if (testbits(synch->status, XNSYNCH_CLAIMED)) {
		xnsynch_clear_boost(synch, synch->owner);
		status = XNSYNCH_RESCHED;
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return status;
}
Ejemplo n.º 7
0
xnflags_t xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout,
			   xntmode_t timeout_mode)
{
	struct xnthread *thread = xnpod_current_thread();
	spl_t s;

	XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_sleepon,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);

	if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */
		appendpq(&synch->pendq, &thread->plink);
	else /* i.e. priority-sorted */
		insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));

	xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);

	xnlock_put_irqrestore(&nklock, s);

	return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK);
}
Ejemplo n.º 8
0
/*!
 * \fn int xnintr_detach (xnintr_t *intr)
 * \brief Detach an interrupt object.
 *
 * Detach an interrupt object previously attached by
 * xnintr_attach(). After this operation is completed, no more IRQs
 * are directed to the object's ISR, but the interrupt object itself
 * remains valid. A detached interrupt object can be attached again by
 * a subsequent call to xnintr_attach().
 *
 * @param intr The descriptor address of the interrupt object to
 * detach.
 *
 * @return 0 is returned on success. Otherwise:
 *
 * - -EINVAL is returned if a low-level error occurred while detaching
 * the interrupt, or if the interrupt object was not attached. In both
 * cases, no action is performed.
 *
 * @note The caller <b>must not</b> hold nklock when invoking this service,
 * this would cause deadlocks.
 *
 * Environments:
 *
 * This service can be called from:
 *
 * - Kernel module initialization/cleanup code
 * - Kernel-based task
 *
 * Rescheduling: never.
 */
int xnintr_detach(xnintr_t *intr)
{
	int ret;

	trace_mark(xn_nucleus, irq_detach, "irq %u", intr->irq);

	down(&intrlock);

	if (!__testbits(intr->flags, XN_ISR_ATTACHED)) {
		ret = -EINVAL;
		goto out;
	}

	__clrbits(intr->flags, XN_ISR_ATTACHED);

	ret = xnintr_irq_detach(intr);
	if (ret)
		goto out;

	xnintr_stat_counter_dec();
 out:
	up(&intrlock);

	return ret;
}
Ejemplo n.º 9
0
void xntbase_start(xntbase_t *base)
{
	xnticks_t start_date;
	spl_t s;

	if (base == &nktbase || xntbase_enabled_p(base))
		return;

	trace_mark(xn_nucleus, tbase_start, "base %s", base->name);

	xnlock_get_irqsave(&nklock, s);

	start_date = xnarch_get_cpu_time();

	/* Only synchronise non-isolated time bases on the master base. */
	if (!xntbase_isolated_p(base)) {
		base->wallclock_offset = xntbase_ns2ticks(base,
			start_date + nktbase.wallclock_offset);
		__setbits(base->status, XNTBSET);
	}

	start_date += base->tickvalue;
	__setbits(base->status, XNTBRUN);

	xnlock_put_irqrestore(&nklock, s);

	xntslave_start(base2slave(base), start_date, base->tickvalue);
}
Ejemplo n.º 10
0
void trace_mark_begin(
		const char* title,
		const char* args) {
	if (trace_mark_enabled()) {
		trace_mark('B', current->tgid, title, args, current->comm);
	}
}
Ejemplo n.º 11
0
int xnintr_attach(xnintr_t *intr, void *cookie)
{
	int ret;

	trace_mark(xn_nucleus, irq_attach, "irq %u name %s",
		   intr->irq, intr->name);

	intr->cookie = cookie;
	memset(&intr->stat, 0, sizeof(intr->stat));

#ifdef CONFIG_SMP
	xnarch_set_irq_affinity(intr->irq, nkaffinity);
#endif /* CONFIG_SMP */

	down(&intrlock);

	if (__testbits(intr->flags, XN_ISR_ATTACHED)) {
		ret = -EBUSY;
		goto out;
	}

	ret = xnintr_irq_attach(intr);
	if (ret)
		goto out;

	__setbits(intr->flags, XN_ISR_ATTACHED);
	xnintr_stat_counter_inc();
out:
	up(&intrlock);

	return ret;
}
Ejemplo n.º 12
0
xnpholder_t *xnsynch_wakeup_this_sleeper(xnsynch_t *synch, xnpholder_t *holder)
{
	xnthread_t *thread, *lastowner;
	xnpholder_t *nholder;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	lastowner = synch->owner;
	nholder = poppq(&synch->pendq, holder);

	thread = link2thread(holder, plink);
	thread->wchan = NULL;
	thread->wwake = synch;
	synch->owner = thread;
	xnthread_set_info(thread, XNWAKEN);
	trace_mark(xn_nucleus_synch_wakeup_all,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);
	xnpod_resume_thread(thread, XNPEND);

	if (testbits(synch->status, XNSYNCH_CLAIMED))
		xnsynch_clear_boost(synch, lastowner);

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return nholder;
}
Ejemplo n.º 13
0
struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch)
{
	struct xnthread *thread = NULL;
	struct xnpholder *holder;
	spl_t s;

	XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));

	xnlock_get_irqsave(&nklock, s);

	holder = getpq(&synch->pendq);
	if (holder) {
		thread = link2thread(holder, plink);
		thread->wchan = NULL;
		trace_mark(xn_nucleus, synch_wakeup_one,
			   "thread %p thread_name %s synch %p",
			   thread, xnthread_name(thread), synch);
		xnpod_resume_thread(thread, XNPEND);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return thread;
}
Ejemplo n.º 14
0
/*!
 * \fn int xnintr_detach (xnintr_t *intr)
 * \brief Detach an interrupt object.
 *
 * Detach an interrupt object previously attached by
 * xnintr_attach(). After this operation is completed, no more IRQs
 * are directed to the object's ISR, but the interrupt object itself
 * remains valid. A detached interrupt object can be attached again by
 * a subsequent call to xnintr_attach().
 *
 * @param intr The descriptor address of the interrupt object to
 * detach.
 *
 * @return 0 is returned on success. Otherwise:
 *
 * - -EINVAL is returned if a low-level error occurred while detaching
 * the interrupt, or if the interrupt object was not attached. In both
 * cases, no action is performed.
 *
 * @note The caller <b>must not</b> hold nklock when invoking this service,
 * this would cause deadlocks.
 *
 * Environments:
 *
 * This service can be called from:
 *
 * - Kernel module initialization/cleanup code
 * - Kernel-based task
 *
 * Rescheduling: never.
 */
int xnintr_detach(xnintr_t *intr)
{
	int ret;
	spl_t s;

	trace_mark(xn_nucleus, irq_detach, "irq %u", intr->irq);

	xnlock_get_irqsave(&intrlock, s);

	if (!__testbits(intr->flags, XN_ISR_ATTACHED)) {
		ret = -EINVAL;
		goto out;
	}

	__clrbits(intr->flags, XN_ISR_ATTACHED);

	ret = xnintr_irq_detach(intr);
	if (ret)
		goto out;

	xnintr_stat_counter_dec();
 out:
	xnlock_put_irqrestore(&intrlock, s);

	return ret;
}
Ejemplo n.º 15
0
static struct xnthread *
xnsynch_release_thread(struct xnsynch *synch, struct xnthread *lastowner)
{
	const int use_fastlock = xnsynch_fastlock_p(synch);
	xnhandle_t lastownerh, newownerh;
	struct xnthread *newowner;
	struct xnpholder *holder;
	spl_t s;

	XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (xnthread_test_state(lastowner, XNOTHER)) {
		if (xnthread_get_rescnt(lastowner) == 0)
			xnshadow_send_sig(lastowner, SIGDEBUG,
					  SIGDEBUG_MIGRATE_PRIOINV, 1);
		else
			xnthread_dec_rescnt(lastowner);
	}
#endif
	lastownerh = xnthread_handle(lastowner);

	if (use_fastlock &&
	    likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh)))
		return NULL;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_release, "synch %p", synch);

	holder = getpq(&synch->pendq);
	if (holder) {
		newowner = link2thread(holder, plink);
		newowner->wchan = NULL;
		newowner->wwake = synch;
		synch->owner = newowner;
		xnthread_set_info(newowner, XNWAKEN);
		xnpod_resume_thread(newowner, XNPEND);

		if (testbits(synch->status, XNSYNCH_CLAIMED))
			xnsynch_clear_boost(synch, lastowner);

		newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner),
						     xnsynch_pended_p(synch));
	} else {
		newowner = NULL;
		synch->owner = NULL;
		newownerh = XN_NO_HANDLE;
	}
	if (use_fastlock) {
		xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
		xnarch_atomic_set(lockp, newownerh);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return newowner;
}
Ejemplo n.º 16
0
void probe_block_rq_requeue(void *data, struct request_queue *q, struct request *rq)
{
	int rw = rq->cmd_flags & 0x03;

	if (blk_discard_rq(rq))
		rw |= (1 << BIO_RW_DISCARD);

	if (blk_pc_request(rq)) {
		trace_mark_tp(block, rq_requeue_pc, block_rq_requeue,
			probe_block_rq_requeue,
			"data_len %u rw %d errors %d",
			blk_rq_bytes(rq), rw, rq->errors);
	} else {
		/*
		 * FIXME Using a simple trace_mark for the second event
		 * possibility because tracepoints do not support multiple
		 * connections to the same probe yet. They should have some
		 * refcounting. Need to enable both rq_requeue_pc and
		 * rq_requeue_fs markers to have the rq_requeue_fs marker
		 * enabled.
		 */
		trace_mark(block, rq_requeue_fs,
			"hard_sector %llu "
			"rw %d errors %d", (unsigned long long)blk_rq_pos(rq),
			rw, rq->errors);
	}
}
Ejemplo n.º 17
0
void xntimer_freeze(void)
{
	int nr_cpus, cpu;
	spl_t s;

	trace_mark(xn_nucleus, timer_freeze, MARK_NOARGS);

	xnlock_get_irqsave(&nklock, s);

	nr_cpus = xnarch_num_online_cpus();

	for (cpu = 0; cpu < nr_cpus; cpu++) {

		xntimerq_t *timerq = &xnpod_sched_slot(cpu)->timerqueue;
		xntimerh_t *holder;

		while ((holder = xntimerq_head(timerq)) != NULL) {
			__setbits(aplink2timer(holder)->status, XNTIMER_DEQUEUED);
			xntimerq_remove(timerq, holder);
		}

		/* Dequeuing all timers from the master time base
		 * freezes all slave time bases the same way, so there
		 * is no need to handle anything more here. */
	}

	xnlock_put_irqrestore(&nklock, s);
}
Ejemplo n.º 18
0
void xnintr_affinity(xnintr_t *intr, xnarch_cpumask_t cpumask)
{
	trace_mark(xn_nucleus, irq_affinity, "irq %u %lu",
		   intr->irq, *(unsigned long *)&cpumask);

	xnarch_set_irq_affinity(intr->irq, cpumask);
}
Ejemplo n.º 19
0
static notrace void
ltt_tracer_call(unsigned long ip, unsigned long parent_ip)
{
	int cpu = raw_smp_processor_id();
	if (likely(!per_cpu(tracing_cpu, cpu)
			&& !atomic_read(&system_trace_refcount)))
		return;
	trace_mark(ftrace_entry, "ip 0x%lX parent_ip 0x%lX", ip, parent_ip);
}
Ejemplo n.º 20
0
void xntbase_stop(xntbase_t *base)
{
	if (base == &nktbase || !xntbase_enabled_p(base))
		return;

	xntslave_stop(base2slave(base));
	__clrbits(base->status, XNTBRUN | XNTBSET);

	trace_mark(xn_nucleus, tbase_stop, "base %s", base->name);
}
Ejemplo n.º 21
0
int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
{
	struct inode *inode = dentry->d_inode;
	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
	int ret = 0;

	J_ASSERT(ext4_journal_current_handle() == NULL);

	trace_mark(ext4_sync_file, "dev %s datasync %d ino %ld parent %ld",
		   inode->i_sb->s_id, datasync, inode->i_ino,
		   dentry->d_parent->d_inode->i_ino);

	/*
	 * data=writeback:
	 *  The caller's filemap_fdatawrite()/wait will sync the data.
	 *  sync_inode() will sync the metadata
	 *
	 * data=ordered:
	 *  The caller's filemap_fdatawrite() will write the data and
	 *  sync_inode() will write the inode if it is dirty.  Then the caller's
	 *  filemap_fdatawait() will wait on the pages.
	 *
	 * data=journal:
	 *  filemap_fdatawrite won't do anything (the buffers are clean).
	 *  ext4_force_commit will write the file data into the journal and
	 *  will wait on that.
	 *  filemap_fdatawait() will encounter a ton of newly-dirtied pages
	 *  (they were dirtied by commit).  But that's OK - the blocks are
	 *  safe in-journal, which is all fsync() needs to ensure.
	 */
	if (ext4_should_journal_data(inode)) {
		ret = ext4_force_commit(inode->i_sb);
		goto out;
	}

	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
		goto out;

	/*
	 * The VFS has written the file data.  If the inode is unaltered
	 * then we need not start a commit.
	 */
	if (inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC)) {
		struct writeback_control wbc = {
			.sync_mode = WB_SYNC_ALL,
			.nr_to_write = 0, /* sys_fsync did this */
		};
		ret = sync_inode(inode, &wbc);
		if (journal && (journal->j_flags & JBD2_BARRIER))
			blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
	}
out:
	return ret;
}
Ejemplo n.º 22
0
void trace_mark_finish(
		unsigned char* name,
		unsigned long cookie) {
	if (trace_mark_enabled()) {
		char args[TRACE_MARK_STR_LEN];

		snprintf(args, TRACE_MARK_STR_LEN, "%ld", cookie);

		trace_mark('F', current->tgid, name, args, current->comm);
	}
}
static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
			     u32 pid)
{
	struct in_device *in_dev = ifa->ifa_dev;
	struct in_ifaddr *ifa1, **ifap, **last_primary;

	ASSERT_RTNL();

	if (!ifa->ifa_local) {
		inet_free_ifa(ifa);
		return 0;
	}

	ifa->ifa_flags &= ~IFA_F_SECONDARY;
	last_primary = &in_dev->ifa_list;

	for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
	     ifap = &ifa1->ifa_next) {
		if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
		    ifa->ifa_scope <= ifa1->ifa_scope)
			last_primary = &ifa1->ifa_next;
		if (ifa1->ifa_mask == ifa->ifa_mask &&
		    inet_ifa_match(ifa1->ifa_address, ifa)) {
			if (ifa1->ifa_local == ifa->ifa_local) {
				inet_free_ifa(ifa);
				return -EEXIST;
			}
			if (ifa1->ifa_scope != ifa->ifa_scope) {
				inet_free_ifa(ifa);
				return -EINVAL;
			}
			ifa->ifa_flags |= IFA_F_SECONDARY;
		}
		trace_mark(net_insert_ifa_ipv4, "label %s address #4u%lu",
			ifa->ifa_label,
			(unsigned long)ifa->ifa_address);
	}

	if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
		net_srandom(ifa->ifa_local);
		ifap = last_primary;
	}

	ifa->ifa_next = *ifap;
	*ifap = ifa;

	/* Send message first, then call notifier.
	   Notifier will trigger FIB update, so that
	   listeners of netlink will know about new ifaddr */
	rtmsg_ifa(RTM_NEWADDR, ifa, nlh, pid);
	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);

	return 0;
}
asmlinkage int printk(const char *fmt, ...)
{
    va_list args;
    int r;

    va_start(args, fmt);
    trace_mark(kernel_printk, "ip %p", __builtin_return_address(0));
    r = vprintk(fmt, args);
    va_end(args);

    return r;
}
Ejemplo n.º 25
0
int xntimer_start_aperiodic(xntimer_t *timer,
			    xnticks_t value, xnticks_t interval,
			    xntmode_t mode)
{
	xnticks_t date, now;

	trace_mark(xn_nucleus, timer_start,
		   "timer %p base %s value %Lu interval %Lu mode %u",
		   timer, xntimer_base(timer)->name, value, interval, mode);

	if (!testbits(timer->status, XNTIMER_DEQUEUED))
		xntimer_dequeue_aperiodic(timer);

	now = xnarch_get_cpu_tsc();

	__clrbits(timer->status,
		  XNTIMER_REALTIME | XNTIMER_FIRED | XNTIMER_PERIODIC);
	switch (mode) {
	case XN_RELATIVE:
		if ((xnsticks_t)value < 0)
			return -ETIMEDOUT;
		date = xnarch_ns_to_tsc(value) + now;
		break;
	case XN_REALTIME:
		__setbits(timer->status, XNTIMER_REALTIME);
		value -= nktbase.wallclock_offset;
		/* fall through */
	default: /* XN_ABSOLUTE || XN_REALTIME */
		date = xnarch_ns_to_tsc(value);
		if ((xnsticks_t)(date - now) <= 0)
			return -ETIMEDOUT;
		break;
	}

	xntimerh_date(&timer->aplink) = date;

	timer->interval = XN_INFINITE;
	if (interval != XN_INFINITE) {
		timer->interval = xnarch_ns_to_tsc(interval);
		timer->pexpect = date;
		__setbits(timer->status, XNTIMER_PERIODIC);
	}

	xntimer_enqueue_aperiodic(timer);
	if (xntimer_heading_p(timer)) {
		if (xntimer_sched(timer) != xnpod_current_sched())
			xntimer_next_remote_shot(xntimer_sched(timer));
		else
			xntimer_next_local_shot(xntimer_sched(timer));
	}

	return 0;
}
Ejemplo n.º 26
0
void trace_mark_int(
		unsigned long ppid,
		const char* name,
		unsigned long value,
		const char* category) {
	if (trace_mark_enabled()) {
		char args[TRACE_MARK_STR_LEN];

		snprintf(args, TRACE_MARK_STR_LEN, "%ld", value);

		trace_mark('C', ppid, name, args, category);
	}
}
Ejemplo n.º 27
0
/* Must be called with nklock locked, interrupts off. */
void xnsched_zombie_hooks(struct xnthread *thread)
{
	XENO_BUGON(NUCLEUS, thread->sched->zombie != NULL);
	thread->sched->zombie = thread;

	trace_mark(xn_nucleus, sched_finalize,
		   "thread_out %p thread_out_name %s",
		   thread, xnthread_name(thread));

	xnpod_run_hooks(&nkpod->tdeleteq, thread, "DELETE");

	xnsched_forget(thread);
}
Ejemplo n.º 28
0
struct xnthread *xnsynch_release(struct xnsynch *synch)
{
	const int use_fastlock = xnsynch_fastlock_p(synch);
	struct xnthread *newowner, *lastowner;
	xnhandle_t lastownerh, newownerh;
	struct xnpholder *holder;
	spl_t s;

	XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));

	lastownerh = xnthread_handle(xnpod_current_thread());

	if (use_fastlock &&
	    likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh)))
		return NULL;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_release, "synch %p", synch);

	holder = getpq(&synch->pendq);
	if (holder) {
		newowner = link2thread(holder, plink);
		newowner->wchan = NULL;
		newowner->wwake = synch;
		lastowner = synch->owner;
		synch->owner = newowner;
		xnthread_set_info(newowner, XNWAKEN);
		xnpod_resume_thread(newowner, XNPEND);

		if (testbits(synch->status, XNSYNCH_CLAIMED))
			xnsynch_clear_boost(synch, lastowner);

		newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner),
						     xnsynch_pended_p(synch));
	} else {
		newowner = NULL;
		synch->owner = NULL;
		newownerh = XN_NO_HANDLE;
	}
	if (use_fastlock) {
		xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
		xnarch_atomic_set(lockp, newownerh);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return newowner;
}
Ejemplo n.º 29
0
/**
 * Migrate a timer.
 *
 * This call migrates a timer to another cpu. In order to avoid pathological
 * cases, it must be called from the CPU to which @a timer is currently
 * attached.
 *
 * @param timer The address of the timer object to be migrated.
 *
 * @param sched The address of the destination CPU xnsched_t structure.
 *
 * @retval -EINVAL if @a timer is queued on another CPU than current ;
 * @retval 0 otherwise.
 *
 */
int xntimer_migrate(xntimer_t *timer, xnsched_t *sched)
{
	int err = 0;
	int queued;
	spl_t s;

	trace_mark(xn_nucleus, timer_migrate, "timer %p cpu %d",
		   timer, (int)xnsched_cpu(sched));

	xnlock_get_irqsave(&nklock, s);

	if (sched == timer->sched)
		goto unlock_and_exit;

	queued = !testbits(timer->status, XNTIMER_DEQUEUED);

	/* Avoid the pathological case where the timer interrupt did not occur yet
	   for the current date on the timer source CPU, whereas we are trying to
	   migrate it to a CPU where the timer interrupt already occured. This would
	   not be a problem in aperiodic mode. */

	if (queued) {

		if (timer->sched != xnpod_current_sched()) {
			err = -EINVAL;
			goto unlock_and_exit;
		}

#ifdef CONFIG_XENO_OPT_TIMING_PERIODIC
		timer->base->ops->stop_timer(timer);
#else /* !CONFIG_XENO_OPT_TIMING_PERIODIC */
		xntimer_stop_aperiodic(timer);
#endif /* !CONFIG_XENO_OPT_TIMING_PERIODIC */
	}

	timer->sched = sched;

	if (queued)
#ifdef CONFIG_XENO_OPT_TIMING_PERIODIC
		timer->base->ops->move_timer(timer);
#else /* !CONFIG_XENO_OPT_TIMING_PERIODIC */
		xntimer_move_aperiodic(timer);
#endif /* !CONFIG_XENO_OPT_TIMING_PERIODIC */

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 30
0
void xntimer_stop_aperiodic(xntimer_t *timer)
{
	int heading;

	trace_mark(xn_nucleus, timer_stop, "timer %p", timer);

	heading = xntimer_heading_p(timer);
	xntimer_dequeue_aperiodic(timer);

	/* If we removed the heading timer, reprogram the next shot if
	   any. If the timer was running on another CPU, let it tick. */
	if (heading && xntimer_sched(timer) == xnpod_current_sched())
		xntimer_next_local_shot(xntimer_sched(timer));
}