Beispiel #1
0
void
cpu_intr(struct trapframe *tf)
{
	struct intr_event *ie;
	uint64_t eirr, eimr;
	int i;

	critical_enter();

	/* find a list of enabled interrupts */
	eirr = read_c0_eirr64();
	eimr = read_c0_eimr64();
	eirr &= eimr;
	
	if (eirr == 0) { 
		critical_exit();
		return;
	}
	/*
	 * No need to clear the EIRR here as the handler writes to
	 * compare which ACKs the interrupt.
	 */
	if (eirr & (1 << IRQ_TIMER)) {
		intr_event_handle(xlr_interrupts[IRQ_TIMER].ie, tf);
		critical_exit();
		return;
	}
	
	/* FIXME sched pin >? LOCK>? */
	for (i = sizeof(eirr) * 8 - 1; i >= 0; i--) {
		if ((eirr & (1ULL << i)) == 0)
			continue;

		ie = xlr_interrupts[i].ie;
		/* Don't account special IRQs */
		switch (i) {
		case IRQ_IPI:
		case IRQ_MSGRING:
			break;
		default:
			mips_intrcnt_inc(mips_intr_counters[i]);
		}

		/* Ack the IRQ on the CPU */
		write_c0_eirr64(1ULL << i);
		if (intr_event_handle(ie, tf) != 0) {
			printf("stray interrupt %d\n", i);
		}
	}
	critical_exit();
}
Beispiel #2
0
// REFRESH UI for all parameters
void hub_ui_refresh(t_hub *x, t_symbol*, long, t_atom*)
{
	subscriberList *subscriber = x->subscriber;	// head of the linked list
	
	subscriberIterator i;
	t_subscriber* t;
	critical_enter(0);
	for(i = subscriber->begin(); i != subscriber->end(); ++i) {
		t = *i;
		if(t->type == jps_subscribe_parameter)
			object_method_typed(t->object, jps_ui_slash_refresh, 0, 0L, NULL);
	}
	critical_exit(0);
}
void
update_gdt_fsbase(struct thread *td, uint32_t base)
{
	struct user_segment_descriptor *sd;

	if (td != curthread)
		return;
	td->td_pcb->pcb_full_iret = 1;
	critical_enter();
	sd = PCPU_GET(fs32p);
	sd->sd_lobase = base & 0xffffff;
	sd->sd_hibase = (base >> 24) & 0xff;
	critical_exit();
}
Beispiel #4
0
void
spinlock_enter(void)
{
	struct thread *td;
	register_t pil;

	td = curthread;
	if (td->td_md.md_spinlock_count == 0) {
		pil = intr_disable();
		td->td_md.md_saved_pil = pil;
	}
	td->td_md.md_spinlock_count++;
	critical_enter();
}
Beispiel #5
0
/*
 * A free operation has occurred -- update malloc type statistics for the
 * amount of the bucket size.  Occurs within a critical section so that the
 * thread isn't preempted and doesn't migrate while updating per-CPU
 * statistics.
 */
void
malloc_type_freed(struct malloc_type *mtp, unsigned long size)
{
	struct malloc_type_internal *mtip;
	struct malloc_type_stats *mtsp;

	critical_enter();
	mtip = mtp->ks_handle;
	mtsp = &mtip->mti_stats[curcpu];
	mtsp->mts_memfreed += size;
	mtsp->mts_numfrees++;

	critical_exit();
}
Beispiel #6
0
SpiFlashOpResult IRAM spi_flash_erase_sector(uint16_t sec)
{
    CHECK_PARAM_RET (sec < flashchip->chip_size / flashchip->sector_size, SPI_FLASH_RESULT_ERR);

    critical_enter ();
    Cache_Read_Disable();

    SpiFlashOpResult ret = SPIEraseSector (sec);

    Cache_Read_Enable(0, 0, 1);
    critical_exit ();

    return ret;
}
Beispiel #7
0
/*
 * A very short dispatch, to try and maximise assembler code use
 * between all exception types. Maybe 'true' interrupts should go
 * here, and the trap code can come in separately
 */
void
powerpc_interrupt(struct trapframe *framep)
{
	struct thread *td;
	struct trapframe *oldframe;
	register_t ee;

	td = curthread;

	CTR2(KTR_INTR, "%s: EXC=%x", __func__, framep->exc);

	switch (framep->exc) {
	case EXC_EXI:
		critical_enter();
		PIC_DISPATCH(root_pic, framep);
		critical_exit();
		break;

	case EXC_DECR:
		critical_enter();
		atomic_add_int(&td->td_intr_nesting_level, 1);
		oldframe = td->td_intr_frame;
		td->td_intr_frame = framep;
		decr_intr(framep);
		td->td_intr_frame = oldframe;
		atomic_subtract_int(&td->td_intr_nesting_level, 1);
		critical_exit();
		break;

	default:
		/* Re-enable interrupts if applicable. */
		ee = framep->srr1 & PSL_EE;
		if (ee != 0)
			mtmsr(mfmsr() | ee);
		trap(framep);
	}	        
}
Beispiel #8
0
void
spinlock_enter(void)
{
	struct thread *td;
	register_t daif;

	td = curthread;
	if (td->td_md.md_spinlock_count == 0) {
		daif = intr_disable();
		td->td_md.md_spinlock_count = 1;
		td->td_md.md_saved_daif = daif;
	} else
		td->td_md.md_spinlock_count++;
	critical_enter();
}
Beispiel #9
0
void cb_free(CircBuff_t * cb) {
	if (cb->invalid) return;

	critical_enter(&cb->mutex);
	free((void *) cb->buffer);
	cb->invalid = 1;

	if (cb->is_waiting) mutex_signal(&cb->locker);

	critical_leave(&cb->mutex);

    mutex_free(&cb->locker);
    mutex_free(&cb->mutex);

}
Beispiel #10
0
int
_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
    int trylock, const char *file, int line)
{

	if (SCHEDULER_STOPPED())
		return (1);

#ifdef INVARIANTS
	if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
		critical_enter();
		KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
		    curthread) == 0,
		    ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
		    rm->lock_object.lo_name, file, line));
		critical_exit();
	}
#endif
	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
	    curthread, rm->lock_object.lo_name, file, line));
	KASSERT(!rm_destroyed(rm),
	    ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
	if (!trylock) {
		KASSERT(!rm_wowned(rm),
		    ("rm_rlock: wlock already held for %s @ %s:%d",
		    rm->lock_object.lo_name, file, line));
		WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
		    NULL);
	}

	if (_rm_rlock(rm, tracker, trylock)) {
		if (trylock)
			LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
			    line);
		else
			LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
			    line);
		WITNESS_LOCK(&rm->lock_object, 0, file, line);

		curthread->td_locks++;

		return (1);
	} else if (trylock)
		LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);

	return (0);
}
Beispiel #11
0
void
spinlock_enter(void)
{
	struct thread *td;
	register_t pil;

	td = curthread;
	if (td->td_md.md_spinlock_count == 0) {
		pil = rdpr(pil);
		wrpr(pil, 0, PIL_TICK);
		td->td_md.md_spinlock_count = 1;
		td->td_md.md_saved_pil = pil;
	} else
		td->td_md.md_spinlock_count++;
	critical_enter();
}
Beispiel #12
0
setval_t set_lookup(set_t *s, setkey_t k)
{
    node_t *n;
    setval_t v = NULL;
    ptst_t *ptst;

    k = CALLER_TO_INTERNAL_KEY(k);

    ptst = critical_enter();

    n = weak_find(&s->root, k);
    if ( n != NULL ) v = GET_VALUE(n);

    critical_exit(ptst);
    return v;
}
Beispiel #13
0
int
cpu_set_user_tls(struct thread *td, void *tls_base)
{

	td->td_md.md_tp = (register_t)tls_base;
	if (td == curthread) {
		critical_enter();
#ifdef ARM_TP_ADDRESS
		*(register_t *)ARM_TP_ADDRESS = (register_t)tls_base;
#else
		set_tls((void *)tls_base);
#endif
		critical_exit();
	}
	return (0);
}