Ejemplo n.º 1
0
	void PracticeBCHandling::user_init_dirichlet_bcs( libMesh::FEMSystem* /*system*/,
							libMesh::DofMap& dof_map,
							BoundaryID bc_id,
							BCType bc_type ) const{
	
		std::set<BoundaryID> dbc_ids;
	  dbc_ids.insert(bc_id);

	  std::vector<VariableIndex> dbc_vars;
	  dbc_vars.push_back(_c_var);
	  if(_has_zc)
	  	dbc_vars.push_back(_zc_var);
	  if(_has_fc)
	  	dbc_vars.push_back(_fc_var);
	  if(_has_auxc)
	  	dbc_vars.push_back(_aux_c_var);
	  if(_has_auxzc)
	  	dbc_vars.push_back(_aux_zc_var);
	  if(_has_auxfc)
	  	dbc_vars.push_back(_aux_fc_var);
	
    libMesh::ConstFunction<libMesh::Number> c_func(this->get_dirichlet_bc_value(bc_id));
	
	  libMesh::DirichletBoundary c_dbc( dbc_ids, dbc_vars, &c_func );
	
	  dof_map.add_dirichlet_boundary( c_dbc );	
	
	}
Ejemplo n.º 2
0
static void
sctp_handle_tick(int delta)
{
	sctp_os_timer_t *c;
	void (*c_func)(void *);
	void *c_arg;

	SCTP_TIMERQ_LOCK();
	/* update our tick count */
	ticks += delta;
	c = TAILQ_FIRST(&SCTP_BASE_INFO(callqueue));
	while (c) {
		if (c->c_time <= ticks) {
			sctp_os_timer_next = TAILQ_NEXT(c, tqe);
			TAILQ_REMOVE(&SCTP_BASE_INFO(callqueue), c, tqe);
			c_func = c->c_func;
			c_arg = c->c_arg;
			c->c_flags &= ~SCTP_CALLOUT_PENDING;
			SCTP_TIMERQ_UNLOCK();
			c_func(c_arg);
			SCTP_TIMERQ_LOCK();
			c = sctp_os_timer_next;
		} else {
			c = TAILQ_NEXT(c, tqe);
		}
	}
	sctp_os_timer_next = NULL;
	SCTP_TIMERQ_UNLOCK();
}
void *user_sctp_timer_iterate(void * threadname)
{
    sctp_os_timer_t *c;
    void (*c_func)(void *);
    void *c_arg;
    sctp_os_timer_t *sctp_os_timer_next = NULL;
    /*
     * The MSEC_TO_TICKS conversion depends on hz. The to_ticks in
     * sctp_os_timer_start also depends on hz. E.g. if hz=1000 then
     * for multiple INIT the to_ticks is 2000, 4000, 8000, 16000, 32000, 60000
     *  and further to_ticks level off at 60000 i.e. 60 seconds.
     * If hz=100 then for multiple INIT the to_ticks are 200, 400, 800 and so-on.
     */
    int time_to_ticks = MSEC_TO_TICKS(TIMEOUT_INTERVAL); 
    __suseconds_t timeout_interval = TIMEOUT_INTERVAL  * 1000; /* in microseconds */

    struct timeval timeout;
    struct timeval *timeout_ptr;
    fd_set read_fds;                       
    int fd = 23; /* what should this value be? */
    FD_ZERO(&read_fds);              
    FD_SET(fd, &read_fds);
    
    while(1) {
        
        timeout.tv_sec  = 0; 
        timeout.tv_usec = timeout_interval;
        timeout_ptr = &timeout;
        
        select(FD_SIZE, &read_fds, NULL, NULL, timeout_ptr);    
        

            /* update our tick count */
            uticks += time_to_ticks;
            SCTP_TIMERQ_LOCK();
            c = TAILQ_FIRST(&SCTP_BASE_INFO(callqueue));
            while (c) {
		if (c->c_time <= uticks) {
                    sctp_os_timer_next = TAILQ_NEXT(c, tqe);
                    TAILQ_REMOVE(&SCTP_BASE_INFO(callqueue), c, tqe);
                    c_func = c->c_func;
                    c_arg = c->c_arg;
                    c->c_flags &= ~SCTP_CALLOUT_PENDING;
                    SCTP_TIMERQ_UNLOCK();
                    c_func(c_arg);
                    SCTP_TIMERQ_LOCK();
                    c = sctp_os_timer_next;
		} else {
                    c = TAILQ_NEXT(c, tqe);
		}
            }
            
            SCTP_TIMERQ_UNLOCK();
    }
            return NULL;
}
Ejemplo n.º 4
0
/**
 * sensor_convert_from_raw: convert raw sensor values to interpreted values.
 * It can be used to convert raw data, raw thresholds or raw hysteresis of a sensor
 * to human readable format.
 *
 * @sensor: sensor.
 * @val: raw sensor value.
 * @result: the interpreted value
 *
 * Return value: SA_OK if conversion possible
 **/
SaErrorT 
sensor_convert_from_raw (SaHpiSensorRecT *sensor,
                         SaHpiUint32T     val,
                         SaHpiFloat32T   *result)
{
        SaHpiFloat32T m, b, b_exp, r_exp, fval;
        linearizer c_func;
        SaHpiSensorFactorsT *factors = &sensor->DataFormat.Factors;

        if ( factors->Linearization == SAHPI_SL_NONLINEAR )
                c_func = c_linear;
        else if ( factors->Linearization <= 11 )
                c_func = linearize[factors->Linearization];
        else
                return SA_ERR_HPI_INVALID_DATA;

        val &= 0xff;

        m     = (SaHpiFloat32T)factors->M_Factor;
        b     = (SaHpiFloat32T)factors->B_Factor;
        r_exp = (SaHpiFloat32T)factors->ExpR;
        b_exp = (SaHpiFloat32T)factors->ExpB;

        switch( sensor->DataFormat.SignFormat ) {
        case SAHPI_SDF_UNSIGNED:
                fval = (SaHpiFloat32T)val;
                break;

        case SAHPI_SDF_1S_COMPLEMENT:
                val = sign_extend( val, 8 );
                if ( val < 0 )
                        val += 1;

                fval = (SaHpiFloat32T)val;
                break;

        case SAHPI_SDF_2S_COMPLEMENT:
                fval = (SaHpiFloat32T)sign_extend( val, 8 );
                break;

        default:
                return SA_ERR_HPI_INVALID_DATA;
        }

        *result = c_func(   ((m * fval) + (b * pow(10, b_exp)))
                          * pow(10, r_exp) );

        return SA_OK;
}
Ejemplo n.º 5
0
 void dlsodar_constraint_compat(const int *neq, const double *t_, const double *y, const int *ng, double *gout){
   c_func(gout, *t_, y, data);
 }
Ejemplo n.º 6
0
void EKF::correct(VectorXd state)
{
    c_func(error_out,state);
}
Ejemplo n.º 7
0
/*
 * This procedure is the main loop of our per-cpu helper thread.  The
 * sc->isrunning flag prevents us from racing hardclock_softtick() and
 * a critical section is sufficient to interlock sc->curticks and protect
 * us from remote IPI's / list removal.
 *
 * The thread starts with the MP lock released and not in a critical
 * section.  The loop itself is MP safe while individual callbacks
 * may or may not be, so we obtain or release the MP lock as appropriate.
 */
static void
softclock_handler(void *arg)
{
	softclock_pcpu_t sc;
	struct callout *c;
	struct callout_tailq *bucket;
	void (*c_func)(void *);
	void *c_arg;
	int mpsafe = 1;

	/*
	 * Run the callout thread at the same priority as other kernel
	 * threads so it can be round-robined.
	 */
	/*lwkt_setpri_self(TDPRI_SOFT_NORM);*/

	sc = arg;
	crit_enter();
loop:
	while (sc->softticks != (int)(sc->curticks + 1)) {
		bucket = &sc->callwheel[sc->softticks & callwheelmask];

		for (c = TAILQ_FIRST(bucket); c; c = sc->next) {
			if (c->c_time != sc->softticks) {
				sc->next = TAILQ_NEXT(c, c_links.tqe);
				continue;
			}
			if (c->c_flags & CALLOUT_MPSAFE) {
				if (mpsafe == 0) {
					mpsafe = 1;
					rel_mplock();
				}
			} else {
				/*
				 * The request might be removed while we 
				 * are waiting to get the MP lock.  If it
				 * was removed sc->next will point to the
				 * next valid request or NULL, loop up.
				 */
				if (mpsafe) {
					mpsafe = 0;
					sc->next = c;
					get_mplock();
					if (c != sc->next)
						continue;
				}
			}
			sc->next = TAILQ_NEXT(c, c_links.tqe);
			TAILQ_REMOVE(bucket, c, c_links.tqe);

			sc->running = c;
			c_func = c->c_func;
			c_arg = c->c_arg;
			c->c_func = NULL;
			KKASSERT(c->c_flags & CALLOUT_DID_INIT);
			c->c_flags &= ~CALLOUT_PENDING;
			crit_exit();
			c_func(c_arg);
			crit_enter();
			sc->running = NULL;
			/* NOTE: list may have changed */
		}
		++sc->softticks;
	}
	sc->isrunning = 0;
	lwkt_deschedule_self(&sc->thread);	/* == curthread */
	lwkt_switch();
	goto loop;
	/* NOT REACHED */
}
Ejemplo n.º 8
0
suffix:semicolon
r_goto
id|err
suffix:semicolon
)brace
id|sprintf
c_func
(paren
id|did
comma
l_string|&quot;%s%d&quot;
comma
(paren
id|strlen
c_func
(paren
id|ID
)paren
OL
l_int|1
)paren
ques
c_cond
l_string|&quot;eicon&quot;
suffix:colon
id|ID
comma
id|pci_cards
)paren
suffix:semicolon
r_if
c_cond
/*
 * Software (low priority) clock interrupt.
 * Run periodic events from timeout queue.
 */
void
softclock(void *dummy)
{
	struct callout *c;
	struct callout_tailq *bucket;
	int curticks;
	int steps;	/* #steps since we last allowed interrupts */
	int depth;
	int mpcalls;
	int mtxcalls;
	int gcalls;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	mpcalls = 0;
	mtxcalls = 0;
	gcalls = 0;
	depth = 0;
	steps = 0;
	mtx_lock_spin(&callout_lock);
	while (softticks != ticks) {
		softticks++;
		/*
		 * softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = softticks;
		bucket = &callwheel[curticks & callwheelmask];
		c = TAILQ_FIRST(bucket);
		while (c) {
			depth++;
			if (c->c_time != curticks) {
				c = TAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					nextsoftcheck = c;
					/* Give interrupts a chance. */
					mtx_unlock_spin(&callout_lock);
					;	/* nothing */
					mtx_lock_spin(&callout_lock);
					c = nextsoftcheck;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;
				struct mtx *c_mtx;
				int c_flags;

				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(bucket, c, c_links.tqe);
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_mtx = c->c_mtx;
				c_flags = c->c_flags;
				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
					c->c_func = NULL;
					c->c_flags = CALLOUT_LOCAL_ALLOC;
					SLIST_INSERT_HEAD(&callfree, c,
							  c_links.sle);
					curr_callout = NULL;
				} else {
					c->c_flags =
					    (c->c_flags & ~CALLOUT_PENDING);
					curr_callout = c;
				}
				curr_cancelled = 0;
				mtx_unlock_spin(&callout_lock);
				if (c_mtx != NULL) {
					mtx_lock(c_mtx);
					/*
					 * The callout may have been cancelled
					 * while we switched locks.
					 */
					if (curr_cancelled) {
						mtx_unlock(c_mtx);
						goto skip;
					}
					/* The callout cannot be stopped now. */
					curr_cancelled = 1;

					if (c_mtx == &Giant) {
						gcalls++;
						CTR3(KTR_CALLOUT,
						    "callout %p func %p arg %p",
						    c, c_func, c_arg);
					} else {
						mtxcalls++;
						CTR3(KTR_CALLOUT, "callout mtx"
						    " %p func %p arg %p",
						    c, c_func, c_arg);
					}
				} else {
					mpcalls++;
					CTR3(KTR_CALLOUT,
					    "callout mpsafe %p func %p arg %p",
					    c, c_func, c_arg);
				}
#ifdef DIAGNOSTIC
				binuptime(&bt1);
#endif
#ifdef MAXHE_TODO
				THREAD_NO_SLEEPING();
				c_func(c_arg);
				THREAD_SLEEPING_OK();
#else
				c_func(c_arg);
#endif // MAXHE_TODO
#ifdef DIAGNOSTIC
				binuptime(&bt2);
				bintime_sub(&bt2, &bt1);
				if (bt2.frac > maxdt) {
					if (lastfunc != c_func ||
					    bt2.frac > maxdt * 2) {
						bintime2timespec(&bt2, &ts2);
						printf(
			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
						    c_func, c_arg,
						    (intmax_t)ts2.tv_sec,
						    ts2.tv_nsec);
					}
					maxdt = bt2.frac;
					lastfunc = c_func;
				}
#endif
				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
					mtx_unlock(c_mtx);
			skip:
				mtx_lock_spin(&callout_lock);
				curr_callout = NULL;
				if (callout_wait) {
					/*
					 * There is someone waiting
					 * for the callout to complete.
					 */
					callout_wait = 0;
					mtx_unlock_spin(&callout_lock);
					wakeup(&callout_wait);
					mtx_lock_spin(&callout_lock);
				}
				steps = 0;
				c = nextsoftcheck;
			}
		}
	}
#ifdef MAXHE_TODO
	avg_depth += (depth * 1000 - avg_depth) >> 8;
	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
	avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8;
	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
#endif // MAXHE_TODO
	nextsoftcheck = NULL;
	mtx_unlock_spin(&callout_lock);
}
Ejemplo n.º 10
0
static void
softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
    int *lockcalls, int *gcalls)
{
	void (*c_func)(void *);
	void *c_arg;
	int c_flags;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

	KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) ==
	    (CALLOUT_PENDING | CALLOUT_ACTIVE),
	    ("softclock_call_cc: pend|act %p %x", c, c->c_flags));
	c_func = c->c_func;
	c_arg = c->c_arg;
	c_flags = c->c_flags;
	if (c->c_flags & CALLOUT_LOCAL_ALLOC)
		c->c_flags = CALLOUT_LOCAL_ALLOC;
	else
		c->c_flags &= ~CALLOUT_PENDING;
	cc->cc_curr = c;
	cc->cc_cancel = 0;
	CC_UNLOCK(cc);
	{
		(*mpcalls)++;
		CTR3(KTR_CALLOUT, "callout mpsafe %p func %p arg %p",
		    c, c_func, c_arg);
	}
#ifdef DIAGNOSTIC
	binuptime(&bt1);
#endif
	c_func(c_arg);
#ifdef DIAGNOSTIC
	binuptime(&bt2);
	bintime_sub(&bt2, &bt1);
	if (bt2.frac > maxdt) {
		if (lastfunc != c_func || bt2.frac > maxdt * 2) {
			bintime2timespec(&bt2, &ts2);
			printf(
		"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
			    c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
		}
		maxdt = bt2.frac;
		lastfunc = c_func;
	}
#endif
	CTR1(KTR_CALLOUT, "callout %p finished", c);

	CC_LOCK(cc);
	KASSERT(cc->cc_curr == c, ("mishandled cc_curr"));
	cc->cc_curr = NULL;

	/*
	 * If the current callout is locally allocated (from
	 * timeout(9)) then put it on the freelist.
	 *
	 * Note: we need to check the cached copy of c_flags because
	 * if it was not local, then it's not safe to deref the
	 * callout pointer.
	 */
	KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 ||
	    c->c_flags == CALLOUT_LOCAL_ALLOC,
	    ("corrupted callout"));
	if (c_flags & CALLOUT_LOCAL_ALLOC)
		callout_cc_del(c, cc);
}
Ejemplo n.º 11
0
void
COT_clock(struct callout_block *cb)
{
	struct callout *c;
	struct callout_tailq *bucket;
	clock_t curticks;
	int steps;      /* #steps since we last allowed interrupts */
	int depth;
	int mpcalls;
	int mtxcalls;
	int gcalls;

#ifndef MAX_SOFTCLOCK_STEPS
#define	MAX_SOFTCLOCK_STEPS	100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	mpcalls = 0;
	mtxcalls = 0;
	gcalls = 0;
	depth = 0;
	steps = 0;

	while (cb->softticks != cb->ticks) {
		cb->softticks++;
		/*
		 * softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = cb->softticks;
		bucket = &cb->callwheel[curticks & cb->callwheelmask];
		c = VTAILQ_FIRST(bucket);
		while (c) {
			depth++;
			if (c->c_time != curticks) {
				c = VTAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					cb->nextsoftcheck = c;
					c = cb->nextsoftcheck;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;

				cb->nextsoftcheck = VTAILQ_NEXT(c, c_links.tqe);
				VTAILQ_REMOVE(bucket, c, c_links.tqe);
				c_func = c->c_func;
				c_arg = c->c_arg;
				c->c_flags = (c->c_flags & ~CALLOUT_PENDING);
				mpcalls++;
				if (callout_debug)
					fprintf(stdout,
					    "callout mpsafe %p func %p "
					    "arg %p", c, c_func, c_arg);
				c_func(c_arg);
				if (callout_debug)
					fprintf(stdout,
					    "callout %p finished", c);
				steps = 0;
				c = cb->nextsoftcheck;
			}
		}
	}
	avg_depth += (depth * 1000 - avg_depth) >> 8;
	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
	avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8;
	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
	cb->nextsoftcheck = NULL;
}
Ejemplo n.º 12
0
int main() {
  c_func();
}
Ejemplo n.º 13
0
/*
 * This procedure is the main loop of our per-cpu helper thread.  The
 * sc->isrunning flag prevents us from racing hardclock_softtick() and
 * a critical section is sufficient to interlock sc->curticks and protect
 * us from remote IPI's / list removal.
 *
 * The thread starts with the MP lock released and not in a critical
 * section.  The loop itself is MP safe while individual callbacks
 * may or may not be, so we obtain or release the MP lock as appropriate.
 */
static void
softclock_handler(void *arg)
{
	softclock_pcpu_t sc;
	struct callout *c;
	struct callout_tailq *bucket;
	struct callout slotimer;
	int mpsafe = 1;
	int flags;

	/*
	 * Setup pcpu slow clocks which we want to run from the callout
	 * thread.
	 */
	callout_init_mp(&slotimer);
	callout_reset(&slotimer, hz * 10, slotimer_callback, &slotimer);

	/*
	 * Run the callout thread at the same priority as other kernel
	 * threads so it can be round-robined.
	 */
	/*lwkt_setpri_self(TDPRI_SOFT_NORM);*/

	/*
	 * Loop critical section against ipi operations to this cpu.
	 */
	sc = arg;
	crit_enter();
loop:
	while (sc->softticks != (int)(sc->curticks + 1)) {
		bucket = &sc->callwheel[sc->softticks & cwheelmask];

		for (c = TAILQ_FIRST(bucket); c; c = sc->next) {
			if (c->c_time != sc->softticks) {
				sc->next = TAILQ_NEXT(c, c_links.tqe);
				continue;
			}

			flags = c->c_flags;
			if (flags & CALLOUT_MPSAFE) {
				if (mpsafe == 0) {
					mpsafe = 1;
					rel_mplock();
				}
			} else {
				/*
				 * The request might be removed while we 
				 * are waiting to get the MP lock.  If it
				 * was removed sc->next will point to the
				 * next valid request or NULL, loop up.
				 */
				if (mpsafe) {
					mpsafe = 0;
					sc->next = c;
					get_mplock();
					if (c != sc->next)
						continue;
				}
			}

			/*
			 * Queue protection only exists while we hold the
			 * critical section uninterrupted.
			 *
			 * Adjust sc->next when removing (c) from the queue,
			 * note that an IPI on this cpu may make further
			 * adjustments to sc->next.
			 */
			sc->next = TAILQ_NEXT(c, c_links.tqe);
			TAILQ_REMOVE(bucket, c, c_links.tqe);

			KASSERT((c->c_flags & CALLOUT_ARMED) &&
				(c->c_flags & CALLOUT_PENDING) &&
				CALLOUT_FLAGS_TO_CPU(c->c_flags) ==
				mycpu->gd_cpuid,
				("callout %p: bad flags %08x", c, c->c_flags));

			/*
			 * Once CALLOUT_PENDING is cleared, sc->running
			 * protects the callout structure's existance but
			 * only until we call c_func().  A callout_stop()
			 * or callout_reset() issued from within c_func()
			 * will not block.  The callout can also be kfree()d
			 * by c_func().
			 *
			 * We set EXECUTED before calling c_func() so a
			 * callout_stop() issued from within c_func() returns
			 * the correct status.
			 */
			if ((flags & (CALLOUT_AUTOLOCK | CALLOUT_ACTIVE)) ==
			    (CALLOUT_AUTOLOCK | CALLOUT_ACTIVE)) {
				void (*c_func)(void *);
				void *c_arg;
				struct lock *c_lk;
				int error;

				/*
				 * NOTE: sc->running must be set prior to
				 *	 CALLOUT_PENDING being cleared to
				 *	 avoid missed CANCELs and *_stop()
				 *	 races.
				 */
				sc->running = (intptr_t)c;
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_lk = c->c_lk;
				c->c_func = NULL;
				KKASSERT(c->c_flags & CALLOUT_DID_INIT);
				flags = callout_unpend_disarm(c);
				error = lockmgr(c_lk, LK_EXCLUSIVE |
						      LK_CANCELABLE);
				if (error == 0) {
					atomic_set_int(&c->c_flags,
						       CALLOUT_EXECUTED);
					crit_exit();
					c_func(c_arg);
					crit_enter();
					lockmgr(c_lk, LK_RELEASE);
				}
			} else if (flags & CALLOUT_ACTIVE) {
				void (*c_func)(void *);
				void *c_arg;

				sc->running = (intptr_t)c;
				c_func = c->c_func;
				c_arg = c->c_arg;
				c->c_func = NULL;
				KKASSERT(c->c_flags & CALLOUT_DID_INIT);
				flags = callout_unpend_disarm(c);
				atomic_set_int(&c->c_flags, CALLOUT_EXECUTED);
				crit_exit();
				c_func(c_arg);
				crit_enter();
			} else {
				flags = callout_unpend_disarm(c);
			}

			/*
			 * Read and clear sc->running.  If bit 0 was set,
			 * a callout_stop() is likely blocked waiting for
			 * the callback to complete.
			 *
			 * The sigclear above also cleared CALLOUT_WAITING
			 * and returns the contents of flags prior to clearing
			 * any bits.
			 *
			 * Interlock wakeup any _stop's waiting on us.  Note
			 * that once c_func() was called, the callout
			 * structure (c) pointer may no longer be valid.  It
			 * can only be used for the wakeup.
			 */
			if ((atomic_readandclear_ptr(&sc->running) & 1) ||
			    (flags & CALLOUT_WAITING)) {
				wakeup(c);
			}
			/* NOTE: list may have changed */
		}
		++sc->softticks;
	}

	/*
	 * Don't leave us holding the MP lock when we deschedule ourselves.
	 */
	if (mpsafe == 0) {
		mpsafe = 1;
		rel_mplock();
	}
	sc->isrunning = 0;
	lwkt_deschedule_self(&sc->thread);	/* == curthread */
	lwkt_switch();
	goto loop;
	/* NOT REACHED */
}
Ejemplo n.º 14
0
DECL|macro|FSCK_FATAL
mdefine_line|#define FSCK_FATAL -1
DECL|macro|FSCK_INFO
mdefine_line|#define FSCK_INFO -2
DECL|macro|FOREACH_MSG_ID
mdefine_line|#define FOREACH_MSG_ID(FUNC) &bslash;&n;&t;/* fatal errors */ &bslash;&n;&t;FUNC(NUL_IN_HEADER, FATAL) &bslash;&n;&t;FUNC(UNTERMINATED_HEADER, FATAL) &bslash;&n;&t;/* errors */ &bslash;&n;&t;FUNC(BAD_DATE, ERROR) &bslash;&n;&t;FUNC(BAD_DATE_OVERFLOW, ERROR) &bslash;&n;&t;FUNC(BAD_EMAIL, ERROR) &bslash;&n;&t;FUNC(BAD_NAME, ERROR) &bslash;&n;&t;FUNC(BAD_OBJECT_SHA1, ERROR) &bslash;&n;&t;FUNC(BAD_PARENT_SHA1, ERROR) &bslash;&n;&t;FUNC(BAD_TAG_OBJECT, ERROR) &bslash;&n;&t;FUNC(BAD_TIMEZONE, ERROR) &bslash;&n;&t;FUNC(BAD_TREE, ERROR) &bslash;&n;&t;FUNC(BAD_TREE_SHA1, ERROR) &bslash;&n;&t;FUNC(BAD_TYPE, ERROR) &bslash;&n;&t;FUNC(DUPLICATE_ENTRIES, ERROR) &bslash;&n;&t;FUNC(MISSING_AUTHOR, ERROR) &bslash;&n;&t;FUNC(MISSING_COMMITTER, ERROR) &bslash;&n;&t;FUNC(MISSING_EMAIL, ERROR) &bslash;&n;&t;FUNC(MISSING_GRAFT, ERROR) &bslash;&n;&t;FUNC(MISSING_NAME_BEFORE_EMAIL, ERROR) &bslash;&n;&t;FUNC(MISSING_OBJECT, ERROR) &bslash;&n;&t;FUNC(MISSING_PARENT, ERROR) &bslash;&n;&t;FUNC(MISSING_SPACE_BEFORE_DATE, ERROR) &bslash;&n;&t;FUNC(MISSING_SPACE_BEFORE_EMAIL, ERROR) &bslash;&n;&t;FUNC(MISSING_TAG, ERROR) &bslash;&n;&t;FUNC(MISSING_TAG_ENTRY, ERROR) &bslash;&n;&t;FUNC(MISSING_TAG_OBJECT, ERROR) &bslash;&n;&t;FUNC(MISSING_TREE, ERROR) &bslash;&n;&t;FUNC(MISSING_TYPE, ERROR) &bslash;&n;&t;FUNC(MISSING_TYPE_ENTRY, ERROR) &bslash;&n;&t;FUNC(MULTIPLE_AUTHORS, ERROR) &bslash;&n;&t;FUNC(TAG_OBJECT_NOT_TAG, ERROR) &bslash;&n;&t;FUNC(TREE_NOT_SORTED, ERROR) &bslash;&n;&t;FUNC(UNKNOWN_TYPE, ERROR) &bslash;&n;&t;FUNC(ZERO_PADDED_DATE, ERROR) &bslash;&n;&t;/* warnings */ &bslash;&n;&t;FUNC(BAD_FILEMODE, WARN) &bslash;&n;&t;FUNC(EMPTY_NAME, WARN) &bslash;&n;&t;FUNC(FULL_PATHNAME, WARN) &bslash;&n;&t;FUNC(HAS_DOT, WARN) &bslash;&n;&t;FUNC(HAS_DOTDOT, WARN) &bslash;&n;&t;FUNC(HAS_DOTGIT, WARN) &bslash;&n;&t;FUNC(NULL_SHA1, WARN) &bslash;&n;&t;FUNC(ZERO_PADDED_FILEMODE, WARN) &bslash;&n;&t;/* infos (reported as warnings, but ignored by default) */ &bslash;&n;&t;FUNC(BAD_TAG_NAME, INFO) &bslash;&n;&t;FUNC(MISSING_TAGGER_ENTRY, INFO)
DECL|macro|MSG_ID
mdefine_line|#define MSG_ID(id, msg_type) FSCK_MSG_##id,
DECL|enum|fsck_msg_id
r_enum
id|fsck_msg_id
(brace
DECL|enumerator|FOREACH_MSG_ID
id|FOREACH_MSG_ID
c_func
(paren
id|MSG_ID
)paren
id|FSCK_MSG_MAX
)brace
suffix:semicolon
DECL|macro|MSG_ID
macro_line|#undef MSG_ID
DECL|macro|STR
mdefine_line|#define STR(x) #x
DECL|macro|MSG_ID
mdefine_line|#define MSG_ID(id, msg_type) { STR(id), NULL, FSCK_##msg_type },
r_static
r_struct
(brace
DECL|member|id_string
r_const
Ejemplo n.º 15
0
/*
 * Software (low priority) clock interrupt.
 * Run periodic events from timeout queue.
 */
void
softclock(void *dummy)
{
	struct callout *c;
	struct callout_tailq *bucket;
	int curticks;
	int steps;	/* #steps since we last allowed interrupts */
	int depth;
	int mpcalls;
	int gcalls;
	int wakeup_cookie;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	mpcalls = 0;
	gcalls = 0;
	depth = 0;
	steps = 0;
	mtx_lock_spin(&callout_lock);
	while (softticks != ticks) {
		softticks++;
		/*
		 * softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = softticks;
		bucket = &callwheel[curticks & callwheelmask];
		c = TAILQ_FIRST(bucket);
		while (c) {
			depth++;
			if (c->c_time != curticks) {
				c = TAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					nextsoftcheck = c;
					/* Give interrupts a chance. */
					mtx_unlock_spin(&callout_lock);
					;	/* nothing */
					mtx_lock_spin(&callout_lock);
					c = nextsoftcheck;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;
				int c_flags;

				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(bucket, c, c_links.tqe);
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_flags = c->c_flags;
				c->c_func = NULL;
				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
					c->c_flags = CALLOUT_LOCAL_ALLOC;
					SLIST_INSERT_HEAD(&callfree, c,
							  c_links.sle);
				} else {
					c->c_flags =
					    (c->c_flags & ~CALLOUT_PENDING);
				}
				curr_callout = c;
				mtx_unlock_spin(&callout_lock);
				if (!(c_flags & CALLOUT_MPSAFE)) {
					mtx_lock(&Giant);
					gcalls++;
					CTR1(KTR_CALLOUT, "callout %p", c_func);
				} else {
					mpcalls++;
					CTR1(KTR_CALLOUT, "callout mpsafe %p",
					    c_func);
				}
#ifdef DIAGNOSTIC
				binuptime(&bt1);
				mtx_lock(&dont_sleep_in_callout);
#endif
				c_func(c_arg);
#ifdef DIAGNOSTIC
				mtx_unlock(&dont_sleep_in_callout);
				binuptime(&bt2);
				bintime_sub(&bt2, &bt1);
				if (bt2.frac > maxdt) {
					if (lastfunc != c_func ||
					    bt2.frac > maxdt * 2) {
					bintime2timespec(&bt2, &ts2);
					printf(
			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
					c_func, c_arg,
						    (intmax_t)ts2.tv_sec,
						    ts2.tv_nsec);
					}
					maxdt = bt2.frac;
					lastfunc = c_func;
				}
#endif
				if (!(c_flags & CALLOUT_MPSAFE))
					mtx_unlock(&Giant);
				mtx_lock_spin(&callout_lock);
				curr_callout = NULL;
				if (wakeup_needed) {
					/*
					 * There might be someone waiting
					 * for the callout to complete.
					 */
					wakeup_cookie = wakeup_ctr;
					mtx_unlock_spin(&callout_lock);
					mtx_lock(&callout_wait_lock);
					cv_broadcast(&callout_wait);
					wakeup_done_ctr = wakeup_cookie;
					mtx_unlock(&callout_wait_lock);
					mtx_lock_spin(&callout_lock);
					wakeup_needed = 0;
				}
				steps = 0;
				c = nextsoftcheck;
			}
		}
	}
	avg_depth += (depth * 1000 - avg_depth) >> 8;
	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
	nextsoftcheck = NULL;
	mtx_unlock_spin(&callout_lock);
}
Ejemplo n.º 16
0
/*
 * Software (low priority) clock interrupt.
 * Run periodic events from timeout queue.
 */
void
softclock()
{
	register struct callout *c;
	register struct callout_tailq *bucket;
	register int s;
	register int curticks;
	register int steps;	/* #steps since we last allowed interrupts */

#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	steps = 0;
	s = splhigh();
	while (softticks != ticks) {
		softticks++;
		/*
		 * softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = softticks;
		bucket = &callwheel[curticks & callwheelmask];
		c = TAILQ_FIRST(bucket);
		while (c) {
			if (c->c_time != curticks) {
				c = TAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					nextsoftcheck = c;
					/* Give interrupts a chance. */
					splx(s);
					s = splhigh();
					c = nextsoftcheck;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;

				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(bucket, c, c_links.tqe);
				c_func = c->c_func;
				c_arg = c->c_arg;
				c->c_func = NULL;
				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
					c->c_flags = CALLOUT_LOCAL_ALLOC;
					SLIST_INSERT_HEAD(&callfree, c,
							  c_links.sle);
				} else {
					c->c_flags =
					    (c->c_flags & ~CALLOUT_PENDING);
				}
				splx(s);
				c_func(c_arg);
				s = splhigh();
				steps = 0;
				c = nextsoftcheck;
			}
		}
	}
	nextsoftcheck = NULL;
	splx(s);
}
Ejemplo n.º 17
0
int foo::caller() {
    c_func(this);
    cpp_func(this);
    return 0;
}
Ejemplo n.º 18
0
/*
 * Software (low priority) clock interrupt.
 * Run periodic events from timeout queue.
 */
void
softclock(void *arg)
{
	struct callout_cpu *cc;
	struct callout *c;
	struct callout_tailq *bucket;
	int curticks;
	int steps;	/* #steps since we last allowed interrupts */
	int depth;
	int mpcalls;
	int lockcalls;
	int gcalls;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	mpcalls = 0;
	lockcalls = 0;
	gcalls = 0;
	depth = 0;
	steps = 0;
	cc = (struct callout_cpu *)arg;
	CC_LOCK(cc);
	while (cc->cc_softticks != ticks) {
		/*
		 * cc_softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = cc->cc_softticks;
		cc->cc_softticks++;
		bucket = &cc->cc_callwheel[curticks & callwheelmask];
		c = TAILQ_FIRST(bucket);
		while (c) {
			depth++;
			if (c->c_time != curticks) {
				c = TAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					cc->cc_next = c;
					/* Give interrupts a chance. */
					CC_UNLOCK(cc);
					;	/* nothing */
					CC_LOCK(cc);
					c = cc->cc_next;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;
				struct lock_class *class;
				struct lock_object *c_lock;
				int c_flags, sharedlock;

				cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(bucket, c, c_links.tqe);
				class = (c->c_lock != NULL) ?
				    LOCK_CLASS(c->c_lock) : NULL;
				sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ?
				    0 : 1;
				c_lock = c->c_lock;
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_flags = c->c_flags;
				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
					c->c_flags = CALLOUT_LOCAL_ALLOC;
				} else {
					c->c_flags =
					    (c->c_flags & ~CALLOUT_PENDING);
				}
				cc->cc_curr = c;
				cc->cc_cancel = 0;
				CC_UNLOCK(cc);
				if (c_lock != NULL) {
					class->lc_lock(c_lock, sharedlock);
					/*
					 * The callout may have been cancelled
					 * while we switched locks.
					 */
					if (cc->cc_cancel) {
						class->lc_unlock(c_lock);
						goto skip;
					}
					/* The callout cannot be stopped now. */
					cc->cc_cancel = 1;

					if (c_lock == &Giant.lock_object) {
						gcalls++;
						CTR3(KTR_CALLOUT,
						    "callout %p func %p arg %p",
						    c, c_func, c_arg);
					} else {
						lockcalls++;
						CTR3(KTR_CALLOUT, "callout lock"
						    " %p func %p arg %p",
						    c, c_func, c_arg);
					}
				} else {
					mpcalls++;
					CTR3(KTR_CALLOUT,
					    "callout mpsafe %p func %p arg %p",
					    c, c_func, c_arg);
				}
#ifdef DIAGNOSTIC
				binuptime(&bt1);
#endif
				THREAD_NO_SLEEPING();
				SDT_PROBE(callout_execute, kernel, ,
				    callout_start, c, 0, 0, 0, 0);
				c_func(c_arg);
				SDT_PROBE(callout_execute, kernel, ,
				    callout_end, c, 0, 0, 0, 0);
				THREAD_SLEEPING_OK();
#ifdef DIAGNOSTIC
				binuptime(&bt2);
				bintime_sub(&bt2, &bt1);
				if (bt2.frac > maxdt) {
					if (lastfunc != c_func ||
					    bt2.frac > maxdt * 2) {
						bintime2timespec(&bt2, &ts2);
						printf(
			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
						    c_func, c_arg,
						    (intmax_t)ts2.tv_sec,
						    ts2.tv_nsec);
					}
					maxdt = bt2.frac;
					lastfunc = c_func;
				}
#endif
				CTR1(KTR_CALLOUT, "callout %p finished", c);
				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
					class->lc_unlock(c_lock);
			skip:
				CC_LOCK(cc);
				/*
				 * If the current callout is locally
				 * allocated (from timeout(9))
				 * then put it on the freelist.
				 *
				 * Note: we need to check the cached
				 * copy of c_flags because if it was not
				 * local, then it's not safe to deref the
				 * callout pointer.
				 */
				if (c_flags & CALLOUT_LOCAL_ALLOC) {
					KASSERT(c->c_flags ==
					    CALLOUT_LOCAL_ALLOC,
					    ("corrupted callout"));
					c->c_func = NULL;
					SLIST_INSERT_HEAD(&cc->cc_callfree, c,
					    c_links.sle);
				}
				cc->cc_curr = NULL;
				if (cc->cc_waiting) {
					/*
					 * There is someone waiting
					 * for the callout to complete.
					 */
					cc->cc_waiting = 0;
					CC_UNLOCK(cc);
					wakeup(&cc->cc_waiting);
					CC_LOCK(cc);
				}
				steps = 0;
				c = cc->cc_next;
			}
		}
	}