Exemplo n.º 1
0
status_t
get_sem_count(sem_id id, int32 *_count)
{
	int slot;
	int state;

	if (sSemsActive == false)
		return B_NO_MORE_SEMS;
	if (id < 0)
		return B_BAD_SEM_ID;
	if (_count == NULL)
		return B_BAD_VALUE;

	slot = id % sMaxSems;

	state = disable_interrupts();
	GRAB_SEM_LOCK(sSems[slot]);

	if (sSems[slot].id != id) {
		RELEASE_SEM_LOCK(sSems[slot]);
		restore_interrupts(state);
		TRACE(("sem_get_count: invalid sem_id %ld\n", id));
		return B_BAD_SEM_ID;
	}

	*_count = sSems[slot].u.used.count;

	RELEASE_SEM_LOCK(sSems[slot]);
	restore_interrupts(state);

	return B_OK;
}
Exemplo n.º 2
0
/*
 * softintr_dispatch:
 *
 *	Process pending software interrupts on the specified queue.
 *
 *	NOTE: We must already be at the correct interrupt priority level.
 */
void
softintr_dispatch(int si)
{
	struct soft_intrq *siq = &soft_intrq[si];
	struct soft_intrhand *sih;
	int oldirqstate;

	siq->siq_evcnt.ev_count++;
	for (;;) {
		oldirqstate = disable_interrupts(I32_bit);
		sih = TAILQ_FIRST(&siq->siq_list);
		if (sih == NULL) {
			restore_interrupts(oldirqstate);
			break;
		}

		TAILQ_REMOVE(&siq->siq_list, sih, sih_list);
		sih->sih_pending = 0;

		uvmexp.softs++;

		restore_interrupts(oldirqstate);

		(*sih->sih_func)(sih->sih_arg);
	}
}
Exemplo n.º 3
0
/*!	Finds a free message and gets it.
	NOTE: has side effect of disabling interrupts
	return value is the former interrupt state
*/
static cpu_status
find_free_message(struct smp_msg** msg)
{
	cpu_status state;

	TRACE(("find_free_message: entry\n"));

retry:
	while (sFreeMessageCount <= 0) {
		state = disable_interrupts();
		process_all_pending_ici(smp_get_current_cpu());
		restore_interrupts(state);
		PAUSE();
	}
	state = disable_interrupts();
	acquire_spinlock(&sFreeMessageSpinlock);

	if (sFreeMessageCount <= 0) {
		// someone grabbed one while we were getting the lock,
		// go back to waiting for it
		release_spinlock(&sFreeMessageSpinlock);
		restore_interrupts(state);
		goto retry;
	}

	*msg = sFreeMessages;
	sFreeMessages = (*msg)->next;
	sFreeMessageCount--;

	release_spinlock(&sFreeMessageSpinlock);

	TRACE(("find_free_message: returning msg %p\n", *msg));

	return state;
}
Exemplo n.º 4
0
int32
tx_cleanup_thread(void *us)
{
	PLM_PACKET pPacket;
	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK)(us);
	struct be_b57_dev *pUmDevice = (struct be_b57_dev *)(us);
	struct B_UM_PACKET *pUmPacket;
	cpu_status cpu;

	while (1) {
		cpu = disable_interrupts();
		acquire_spinlock(&pUmDevice->lock);

		pPacket = (PLM_PACKET)
			QQ_PopHead(&pDevice->TxPacketXmittedQ.Container);

		release_spinlock(&pUmDevice->lock);
		restore_interrupts(cpu);
		if (pPacket == 0)
			break;
		pUmPacket = (struct B_UM_PACKET *)(pPacket);
		chunk_pool_put(pUmPacket->data);
		pUmPacket->data = NULL;

		cpu = disable_interrupts();
		acquire_spinlock(&pUmDevice->lock);
		QQ_PushTail(&pDevice->TxPacketFreeQ.Container, pPacket);
		release_spinlock(&pUmDevice->lock);
		restore_interrupts(cpu);
	}
	return LM_STATUS_SUCCESS;
}
Exemplo n.º 5
0
void
midi_interrupt_op(
	int32 op,
	void * data)
{
	midi_dev * port = (midi_dev *)data;
	ddprintf(("port = %p\n", port));
	if (op == B_MPU_401_ENABLE_CARD_INT) {
		cpu_status cp;
		ddprintf(("cmedia_pci: B_MPU_401_ENABLE_CARD_INT\n"));
		cp = disable_interrupts();
		acquire_spinlock(&port->card->hardware);
		increment_interrupt_handler(port->card);
		set_direct(port->card, 0x01, 0x00, 0x80);
		set_indirect(port->card, 0x2A, 0x04, 0xff);
		release_spinlock(&port->card->hardware);
		restore_interrupts(cp);
	}
	else if (op == B_MPU_401_DISABLE_CARD_INT) {
		/* turn off MPU interrupts */
		cpu_status cp;
		ddprintf(("cmedia_pci: B_MPU_401_DISABLE_CARD_INT\n"));
		cp = disable_interrupts();
		acquire_spinlock(&port->card->hardware);
		set_direct(port->card, 0x01, 0x80, 0x80);
		/* remove interrupt handler if necessary */
		decrement_interrupt_handler(port->card);
		release_spinlock(&port->card->hardware);
		restore_interrupts(cp);
	}
	ddprintf(("cmedia_pci: midi_interrupt_op() done\n"));
}
Exemplo n.º 6
0
static status_t
b57_read(void *cookie,off_t pos,void *data,size_t *numBytes)
{
	struct be_b57_dev *pUmDevice = (struct be_b57_dev *)cookie;
	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
	PLM_PACKET pPacket;
	struct B_UM_PACKET *pUmPacket;
	cpu_status cpu;

	if (pUmDevice->block)
		acquire_sem(pUmDevice->packet_release_sem);
	else {
		/* Decrement the receive sem anyway, but don't block
		   this is a horrible hack, but it works. */
		acquire_sem_etc(pUmDevice->packet_release_sem, 1, B_RELATIVE_TIMEOUT, 0);
	}

	cpu = disable_interrupts();
	acquire_spinlock(&pUmDevice->lock);

	pPacket = (PLM_PACKET)
		QQ_PopHead(&pUmDevice->RxPacketReadQ.Container);

	release_spinlock(&pUmDevice->lock);
	restore_interrupts(cpu);

	if (pPacket == 0) {
		*numBytes = -1;
		return B_ERROR;
	}

	pUmPacket = (struct B_UM_PACKET *) pPacket;
	if (pPacket->PacketStatus != LM_STATUS_SUCCESS
		|| pPacket->PacketSize > 1518) {
		cpu = disable_interrupts();
		acquire_spinlock(&pUmDevice->lock);

		QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);

		release_spinlock(&pUmDevice->lock);
		restore_interrupts(cpu);
		*numBytes = -1;
		return B_ERROR;
	}

	if ((pPacket->PacketSize) < *numBytes)
		*numBytes = pPacket->PacketSize;

	memcpy(data,pUmPacket->data,*numBytes);
	cpu = disable_interrupts();
	acquire_spinlock(&pUmDevice->lock);

	QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);

	release_spinlock(&pUmDevice->lock);
	restore_interrupts(cpu);

	return B_OK;
}
Exemplo n.º 7
0
void
smp_send_ici(int32 targetCPU, int32 message, addr_t data, addr_t data2,
	addr_t data3, void* dataPointer, uint32 flags)
{
	struct smp_msg *msg;

	TRACE(("smp_send_ici: target 0x%lx, mess 0x%lx, data 0x%lx, data2 0x%lx, "
		"data3 0x%lx, ptr %p, flags 0x%lx\n", targetCPU, message, data, data2,
		data3, dataPointer, flags));

	if (sICIEnabled) {
		int state;
		int currentCPU;

		// find_free_message leaves interrupts disabled
		state = find_free_message(&msg);

		currentCPU = smp_get_current_cpu();
		if (targetCPU == currentCPU) {
			return_free_message(msg);
			restore_interrupts(state);
			return; // nope, cant do that
		}

		// set up the message
		msg->message = message;
		msg->data = data;
		msg->data2 = data2;
		msg->data3 = data3;
		msg->data_ptr = dataPointer;
		msg->ref_count = 1;
		msg->flags = flags;
		msg->done = false;

		// stick it in the appropriate cpu's mailbox
		acquire_spinlock_nocheck(&sCPUMessageSpinlock[targetCPU]);
		msg->next = sCPUMessages[targetCPU];
		sCPUMessages[targetCPU] = msg;
		release_spinlock(&sCPUMessageSpinlock[targetCPU]);

		arch_smp_send_ici(targetCPU);

		if ((flags & SMP_MSG_FLAG_SYNC) != 0) {
			// wait for the other cpu to finish processing it
			// the interrupt handler will ref count it to <0
			// if the message is sync after it has removed it from the mailbox
			while (msg->done == false) {
				process_all_pending_ici(currentCPU);
				PAUSE();
			}
			// for SYNC messages, it's our responsibility to put it
			// back into the free list
			return_free_message(msg);
		}

		restore_interrupts(state);
	}
}
Exemplo n.º 8
0
static status_t
delete_sem_internal(sem_id id, bool checkPermission)
{
	if (sSemsActive == false)
		return B_NO_MORE_SEMS;
	if (id < 0)
		return B_BAD_SEM_ID;

	int32 slot = id % sMaxSems;

	cpu_status state = disable_interrupts();
	GRAB_SEM_LIST_LOCK();
	GRAB_SEM_LOCK(sSems[slot]);

	if (sSems[slot].id != id) {
		RELEASE_SEM_LOCK(sSems[slot]);
		RELEASE_SEM_LIST_LOCK();
		restore_interrupts(state);
		TRACE(("delete_sem: invalid sem_id %ld\n", id));
		return B_BAD_SEM_ID;
	}

	if (checkPermission
		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
		RELEASE_SEM_LOCK(sSems[slot]);
		RELEASE_SEM_LIST_LOCK();
		restore_interrupts(state);
		dprintf("thread %ld tried to delete kernel semaphore %ld.\n",
			thread_get_current_thread_id(), id);
		return B_NOT_ALLOWED;
	}

	if (sSems[slot].u.used.owner >= 0) {
		list_remove_link(&sSems[slot].u.used.team_link);
		sSems[slot].u.used.owner = -1;
	} else
		panic("sem %ld has no owner", id);

	RELEASE_SEM_LIST_LOCK();

	char* name;
	uninit_sem_locked(sSems[slot], &name);

	SpinLocker schedulerLocker(gSchedulerLock);
	scheduler_reschedule_if_necessary_locked();
	schedulerLocker.Unlock();

	restore_interrupts(state);

	free(name);
	return B_OK;
}
Exemplo n.º 9
0
status_t
deselect_sem(int32 id, struct select_info* info, bool kernel)
{
	cpu_status state;
	int32 slot;

	if (id < 0)
		return B_BAD_SEM_ID;

	if (info->selected_events == 0)
		return B_OK;

	slot = id % sMaxSems;

	state = disable_interrupts();
	GRAB_SEM_LOCK(sSems[slot]);

	if (sSems[slot].id == id) {
		select_info** infoLocation = &sSems[slot].u.used.select_infos;
		while (*infoLocation != NULL && *infoLocation != info)
			infoLocation = &(*infoLocation)->next;

		if (*infoLocation == info)
			*infoLocation = info->next;
	}

	RELEASE_SEM_LOCK(sSems[slot]);
	restore_interrupts(state);

	return B_OK;
}
Exemplo n.º 10
0
static int
omaprtc_settime(todr_chip_handle_t tch, struct clock_ymdhms *dt)
{
	struct omaprtc_softc *sc = tch->cookie;
	int s;

	s = disable_interrupts(I32_bit);

	while (rtc_is_busy()) {
		;
	}

	/* It's ok to write these without stopping the
	 * RTC, because the BUSY mechanism lets us guarantee
	 * that we're not in the middle of, e.g., rolling
	 * seconds into minutes.
	 */

	bus_space_write_1(sc->sc_iot, sc->sc_ioh,
			  YEARS_REG, TOBCD(dt->dt_year - BASEYEAR));
	bus_space_write_1(sc->sc_iot, sc->sc_ioh,
			  MONTHS_REG, TOBCD(dt->dt_mon));
	bus_space_write_1(sc->sc_iot, sc->sc_ioh,
			  WEEKS_REG, TOBCD(dt->dt_wday & 0x0f));
	bus_space_write_1(sc->sc_iot, sc->sc_ioh,
			  DAYS_REG, TOBCD(dt->dt_day));
	bus_space_write_1(sc->sc_iot, sc->sc_ioh,
			  SECONDS_REG, TOBCD(dt->dt_sec));
	bus_space_write_1(sc->sc_iot, sc->sc_ioh,
			  HOURS_REG, TOBCD(dt->dt_hour));
	bus_space_write_1(sc->sc_iot, sc->sc_ioh,
			  MINUTES_REG, TOBCD(dt->dt_min));
	restore_interrupts(s);
        return 0;
}
/* kmode function for kprintf
*/
int kmode(int m)
{
	if ( m == KPOLLED )
		return(disable_interrupts());
	else
		return(restore_interrupts(m));
}
Exemplo n.º 12
0
void
arch_smp_send_ici(int32 target_cpu)
{
	uint32 config;
	uint32 timeout;
	cpu_status state;

	state = disable_interrupts();

	config = apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK;
	apic_write(APIC_INTR_COMMAND_2, config | sCPUAPICIds[target_cpu] << 24);

	config = apic_read(APIC_INTR_COMMAND_1) & APIC_INTR_COMMAND_1_MASK;
	apic_write(APIC_INTR_COMMAND_1, config | 0xfd | APIC_DELIVERY_MODE_FIXED
		| APIC_INTR_COMMAND_1_ASSERT
		| APIC_INTR_COMMAND_1_DEST_MODE_PHYSICAL
		| APIC_INTR_COMMAND_1_DEST_FIELD);

	timeout = 100000000;
	// wait for message to be sent
	while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0 && --timeout != 0)
		asm volatile ("pause;");

	if (timeout == 0)
		panic("arch_smp_send_ici: timeout, target_cpu %" B_PRId32, target_cpu);

	restore_interrupts(state);
}
Exemplo n.º 13
0
/*!	Called by the get_sem_info() macro. */
status_t
_get_sem_info(sem_id id, struct sem_info *info, size_t size)
{
	status_t status = B_OK;
	int state;
	int slot;

	if (!sSemsActive)
		return B_NO_MORE_SEMS;
	if (id < 0)
		return B_BAD_SEM_ID;
	if (info == NULL || size != sizeof(sem_info))
		return B_BAD_VALUE;

	slot = id % sMaxSems;

	state = disable_interrupts();
	GRAB_SEM_LOCK(sSems[slot]);

	if (sSems[slot].id != id) {
		status = B_BAD_SEM_ID;
		TRACE(("get_sem_info: invalid sem_id %ld\n", id));
	} else
		fill_sem_info(&sSems[slot], info, size);

	RELEASE_SEM_LOCK(sSems[slot]);
	restore_interrupts(state);

	return status;
}
Exemplo n.º 14
0
status_t
delete_timer(timer_id id)
{
	cpu_status cpu;
	bool deleted;
	int i;

	deleted = false;
	
	cpu = disable_interrupts();
	acquire_spinlock(&sTimerSpinlock);
	
	for (i = 0; i < sTimerCount; i++) {
		if (sTimerData[i].id == id) {
			if (i != (sTimerCount - 1) && sTimerCount != 1) {
				memcpy(&sTimerData[i], &sTimerData[sTimerCount - 1], sizeof(struct timer_info));
			}
			sTimerCount--;
			deleted = true;
			break;
		}
	}
	
	release_spinlock(&sTimerSpinlock);
	restore_interrupts(cpu);

	if (!deleted)
		return B_ERROR;
		
	release_sem_etc(sTimerSem, 1, B_DO_NOT_RESCHEDULE);
	return B_OK;
}
void
X86PagingStructures32Bit::Init(page_directory_entry* virtualPageDir,
	phys_addr_t physicalPageDir, page_directory_entry* kernelPageDir)
{
	pgdir_virt = virtualPageDir;
	pgdir_phys = physicalPageDir;

	// zero out the bottom portion of the new pgdir
	memset(pgdir_virt + FIRST_USER_PGDIR_ENT, 0,
		NUM_USER_PGDIR_ENTS * sizeof(page_directory_entry));

	// insert this new map into the map list
	{
		int state = disable_interrupts();
		acquire_spinlock(&sPagingStructuresListLock);

		// copy the top portion of the page dir from the kernel page dir
		if (kernelPageDir != NULL) {
			memcpy(pgdir_virt + FIRST_KERNEL_PGDIR_ENT,
				kernelPageDir + FIRST_KERNEL_PGDIR_ENT,
				NUM_KERNEL_PGDIR_ENTS * sizeof(page_directory_entry));
		}

		sPagingStructuresList.Add(this);

		release_spinlock(&sPagingStructuresListLock);
		restore_interrupts(state);
	}
}
Exemplo n.º 16
0
/**************************************************************************
* scrolls the text on the screen up by one line.
* console - The address of the selected console.
**************************************************************************/
void video_scroll_console(console_t *console) {
	u32int flags;

	disable_and_save_interrupts(flags);

    // Row 25 is the end, this means we need to scroll up
    if((console->cursor_y) >= crt_height)
    {
        //move the last line up
        s32int i;
        for (i = 0*crt_width; i < ((crt_height - 1) * crt_width); i++) {
        	(console->vid_buffer)[i] = (console->vid_buffer)[i+crt_width];
        }

        // The cursor should now be on the last line.
        (console->cursor_y) = (crt_height - 1);

        //blank the new line
        for (i =  0; i < crt_width; i++) {
        	//(console->vid_buffer)[i] = blank;
        	video_place_char(i, console->cursor_y, ' ', console);
        }

    }
    restore_interrupts(flags);
}
Exemplo n.º 17
0
void scrToggleTestPin (void)
{
    u_int16_t  seqReg;
#ifdef SHARK
    u_int savedints;

    savedints = disable_interrupts(I32_bit | F32_bit);    
#endif

    sequoiaRead(SEQUOIA_2GPIO,&seqReg);

    if (testPin)
    {
        testPin = 0;
        CLR(seqReg,SCR_BUGA);
    } 
    else
    {
        SET(seqReg,SCR_BUGA);
        testPin = 1;
    }
    sequoiaWrite(SEQUOIA_2GPIO,seqReg);
#ifdef SHARK
    restore_interrupts(savedints);
#endif
}
Exemplo n.º 18
0
/* routines to read/write to sequoia registers */
void sequoiaWrite(int reg,u_int16_t  seqReg)     
{   
#ifdef SHARK
    u_int savedints;

    savedints = disable_interrupts(I32_bit | F32_bit);    
#endif

    /*
       On SHARK, the fiq comes from the pmi/smi.  After receiving
       a FIQ, the SMI must be cleared.  The SMI gets cleared by
       changing to sleep mode, thereby lowering PC[4]. */
    // need to do the right thing with the cache if this is going to work */
    if (reg == PMC_FOMPCR_REG) {
      bus_space_write_2(&isa_io_bs_tag,sequoia_ioh,SEQUOIA_INDEX_OFFSET,reg);
      bus_space_write_2(&isa_io_bs_tag,sequoia_ioh,SEQUOIA_DATA_OFFSET,
			seqReg | (FOMPCR_M_PCON4));
      bus_space_write_2(&isa_io_bs_tag,sequoia_ioh,SEQUOIA_INDEX_OFFSET,
			PMC_SLPMPCR_REG);
      bus_space_write_2(&isa_io_bs_tag,sequoia_ioh,SEQUOIA_DATA_OFFSET,
			seqReg & ~(SLPMPCR_M_PCSLP4));
      sequoia_index_cache = PMC_SLPMPCR_REG;
    } else {
      /* normal case: just do the write */
      if(sequoia_index_cache != reg)
      {
        sequoia_index_cache = reg;
        bus_space_write_2(&isa_io_bs_tag,sequoia_ioh,SEQUOIA_INDEX_OFFSET,reg);
      }
      bus_space_write_2(&isa_io_bs_tag,sequoia_ioh,SEQUOIA_DATA_OFFSET,seqReg);
    }
#ifdef SHARK
    restore_interrupts(savedints);
#endif
}
Exemplo n.º 19
0
status_t
_user_set_cpu_enabled(int32 cpu, bool enabled)
{
	status_t status = B_OK;
	cpu_status state;
	int32 i, count;

	if (cpu < 0 || cpu >= smp_get_num_cpus())
		return B_BAD_VALUE;

	// We need to lock here to make sure that no one can disable
	// the last CPU

	state = disable_interrupts();
	acquire_spinlock(&sSetCpuLock);

	if (!enabled) {
		// check if this is the last CPU to be disabled
		for (i = 0, count = 0; i < smp_get_num_cpus(); i++) {
			if (!gCPU[i].disabled)
				count++;
		}

		if (count == 1)
			status = B_NOT_ALLOWED;
	}

	if (status == B_OK)
		gCPU[cpu].disabled = !enabled;

	release_spinlock(&sSetCpuLock);
	restore_interrupts(state);
	return status;
}
void
M68KPagingStructures040::Init(page_root_entry* virtualPageRoot,
	phys_addr_t physicalPageRoot, page_root_entry* kernelPageRoot)
{
	pgroot_virt = virtualPageRoot;
	pgroot_phys = physicalPageRoot;

	// zero out the bottom portion of the new pgroot
	memset(pgroot_virt + FIRST_USER_PGROOT_ENT, 0,
		NUM_USER_PGROOT_ENTS * sizeof(page_root_entry));

	// insert this new map into the map list
	{
		int state = disable_interrupts();
		acquire_spinlock(&sPagingStructuresListLock);

		// copy the top portion of the page dir from the kernel page dir
		if (kernelPageRoot != NULL) {
			memcpy(pgroot_virt + FIRST_KERNEL_PGROOT_ENT,
				kernelPageRoot + FIRST_KERNEL_PGROOT_ENT,
				NUM_KERNEL_PGROOT_ENTS * sizeof(page_root_entry));
		}

		sPagingStructuresList.Add(this);

		release_spinlock(&sPagingStructuresListLock);
		restore_interrupts(state);
	}
}
Exemplo n.º 21
0
void *slab_alloc(struct slab_allocator *sa)
{
    void *object = 0;
    int old_flags;

    old_flags = disable_interrupts();
    acquire_spinlock(&sa->lock);
    if (sa->free_list)
    {
        // Grab freed object
        object = sa->free_list;
        sa->free_list = *((void**) object);
    }
    else
    {
        // If there is no wilderness, or the slab is full, create a new
        // wilderness slab
        if (sa->wilderness_slab == 0
                || sa->wilderness_offset + sa->object_size > sa->slab_size)
        {
            sa->wilderness_slab = kmalloc(sa->slab_size);
            sa->wilderness_offset = 0;
        }

        object = (void*)((char*) sa->wilderness_slab + sa->wilderness_offset);
        sa->wilderness_offset += sa->object_size;
    }

    release_spinlock(&sa->lock);
    restore_interrupts(old_flags);

    return object;
}
Exemplo n.º 22
0
void
x86_hardware_interrupt(struct iframe* frame)
{
	int32 vector = frame->vector - ARCH_INTERRUPT_BASE;
	bool levelTriggered = false;
	Thread* thread = thread_get_current_thread();

	if (sCurrentPIC->is_spurious_interrupt(vector)) {
		TRACE(("got spurious interrupt at vector %ld\n", vector));
		return;
	}

	levelTriggered = sCurrentPIC->is_level_triggered_interrupt(vector);

	if (!levelTriggered) {
		// if it's not handled by the current pic then it's an apic generated
		// interrupt like local interrupts, msi or ipi.
		if (!sCurrentPIC->end_of_interrupt(vector))
			apic_end_of_interrupt();
	}

	int_io_interrupt_handler(vector, levelTriggered);

	if (levelTriggered) {
		if (!sCurrentPIC->end_of_interrupt(vector))
			apic_end_of_interrupt();
	}

	cpu_status state = disable_interrupts();
	if (thread->cpu->invoke_scheduler) {
		SpinLocker schedulerLocker(thread->scheduler_lock);
		scheduler_reschedule(B_THREAD_READY);
		schedulerLocker.Unlock();
		restore_interrupts(state);
	} else if (thread->post_interrupt_callback != NULL) {
		void (*callback)(void*) = thread->post_interrupt_callback;
		void* data = thread->post_interrupt_data;

		thread->post_interrupt_callback = NULL;
		thread->post_interrupt_data = NULL;

		restore_interrupts(state);

		callback(data);
	}
}
Exemplo n.º 23
0
RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
{
    AssertPtr(pState);

    RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
    restore_interrupts((cpu_status)pState->uOldCpuState);
    pState->uOldCpuState = 0;
}
Exemplo n.º 24
0
void ledSetDebug(int command)
{
    u_int16_t  seqReg;
#ifdef SHARK
    u_int savedints;

    savedints = disable_interrupts(I32_bit | F32_bit);    
#endif
    sequoiaRead (PMC_FOMPCR_REG, &seqReg);


    switch (command) 
    {
        case LED_DEBUG_STATE_0:    
            CLR(seqReg,LED_DEBUG_YELLOW_BIT);
            CLR(seqReg,LED_DEBUG_GREEN_BIT);
            break;

        case LED_DEBUG_STATE_1:
            SET(seqReg,LED_DEBUG_YELLOW_BIT);
            CLR(seqReg,LED_DEBUG_GREEN_BIT);
            break;

        case LED_DEBUG_STATE_2:
            CLR(seqReg,LED_DEBUG_YELLOW_BIT);
            SET(seqReg,LED_DEBUG_GREEN_BIT);
            break;

        case LED_DEBUG_STATE_3:
            SET(seqReg,LED_DEBUG_YELLOW_BIT);
            SET(seqReg,LED_DEBUG_GREEN_BIT);
            break;

        case LED_DEBUG_YELLOW_ON:
            SET(seqReg,LED_DEBUG_YELLOW_BIT);
            break;

        case LED_DEBUG_YELLOW_OFF:
            CLR(seqReg,LED_DEBUG_YELLOW_BIT);
            break;

        case LED_DEBUG_GREEN_ON:
            SET(seqReg,LED_DEBUG_GREEN_BIT);
            break;

        case LED_DEBUG_GREEN_OFF:
            CLR(seqReg,LED_DEBUG_GREEN_BIT);
            break;

        default:
            panic("ledSetDebug: invalid command %d\n",command);
            break;
    }
    sequoiaWrite(PMC_FOMPCR_REG, seqReg);
#ifdef SHARK
    restore_interrupts(savedints);
#endif
}
Exemplo n.º 25
0
/*
 * cpu_initclocks:
 *
 *	Initialize the clock and get them going.
 */
void
cpu_initclocks(void)
{
	u_int oldirqstate;

#if 0
	if (hz < 50 || COUNTS_PER_SEC % hz) {
		printf("Cannot get %d Hz clock; using 100 Hz\n", hz);
		hz = 100;
	}
#endif

	/*
	 * We only have one timer available; stathz and profhz are
	 * always left as 0 (the upper-layer clock code deals with
	 * this situation).
	 */
	if (stathz != 0)
		printf("Cannot get %d Hz statclock\n", stathz);
	stathz = 0;

	if (profhz != 0)
		printf("Cannot get %d Hz profclock\n", profhz);
	profhz = 0;

	/* Report the clock frequency. */
	aprint_normal("clock: hz=%d stathz=%d profhz=%d\n", hz, stathz, profhz);

	oldirqstate = disable_interrupts(I32_bit);

	/* Hook up the clock interrupt handler. */
	clock_ih = becc_intr_establish(ICU_TIMERA, IPL_CLOCK,
	    clockhandler, NULL);
	if (clock_ih == NULL)
		panic("cpu_initclocks: unable to register timer interrupt");

	/* Set up the new clock parameters. */

	/* Stop timer, clear interrupt */
	BECC_CSR_WRITE(BECC_TSCRA, TSCRx_TIF);

	counts_per_hz = COUNTS_PER_SEC / hz;

	/* Set the timer preload value. */
	BECC_CSR_WRITE(BECC_TPRA, counts_per_hz - 1);

	/* ...and start it in motion. */
	BECC_CSR_WRITE(BECC_TSCRA, TSCRx_TE | TSCRx_CM);

#ifdef __HAVE_FAST_SOFTINTS
	/* register soft interrupt handler as well */
	becc_intr_establish(ICU_SOFT, IPL_SOFTCLOCK, becc_softint, NULL);
#endif

	restore_interrupts(oldirqstate);

	tc_init(&becc_timecounter);
}
Exemplo n.º 26
0
void
M68KVMTranslationMap::Flush()
{
	if (fInvalidPagesCount <= 0)
		return;

	Thread* thread = thread_get_current_thread();
	thread_pin_to_current_cpu(thread);

	if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
		// invalidate all pages
		TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
			fInvalidPagesCount);

		if (fIsKernelMap) {
			arch_cpu_global_TLB_invalidate();
			smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
				NULL, SMP_MSG_FLAG_SYNC);
		} else {
			cpu_status state = disable_interrupts();
			arch_cpu_user_TLB_invalidate();
			restore_interrupts(state);

			int cpu = smp_get_current_cpu();
			CPUSet cpuMask = PagingStructures()->active_on_cpus;
			cpuMask.ClearBit(cpu);

			if (!cpuMask.IsEmpty()) {
				smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
					0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
			}
		}
	} else {
		TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
			fInvalidPagesCount);

		arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);

		if (fIsKernelMap) {
			smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
				(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
				SMP_MSG_FLAG_SYNC);
		} else {
			int cpu = smp_get_current_cpu();
			CPUSet cpuMask = PagingStructures()->active_on_cpus;
			cpuMask.ClearBit(cpu);

			if (!cpuMask.IsEmpty()) {
				smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
					(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
					SMP_MSG_FLAG_SYNC);
			}
		}
	}
	fInvalidPagesCount = 0;

	thread_unpin_from_current_cpu(thread);
}
Exemplo n.º 27
0
static int
omaprtc_gettime(todr_chip_handle_t tch, struct clock_ymdhms *dt)
{
	struct omaprtc_softc *sc = tch->cookie;
	int s;

	/* Wait for RTC_STATUS_REG:BUSY to go low to
	 * guarantee our read is correct.  BUSY will
	 * only be high for one 32kHz period (30.5us)
	 * each second, so we'll usually pass right
	 * through.
	 *
	 * Watch RTC_CTRL_REG:STOP_RTC as well so
	 * we don't spin forever if someone disables the RTC.
	 *
	 * Turn interrupts off, because we are only allowed
	 * to read/write the registers for 1/2 of a 32kHz
	 * clock period (= 15us) after detecting that BUSY
	 * is low.
	 */

	s = disable_interrupts(I32_bit);

	while (rtc_is_busy()) {
		;
	}

	dt->dt_year =
		FROMBCD(bus_space_read_1(sc->sc_iot,
					 sc->sc_ioh,
					 YEARS_REG)) + BASEYEAR;
	dt->dt_mon =
		FROMBCD(bus_space_read_1(sc->sc_iot,
					 sc->sc_ioh,
					 MONTHS_REG));
	dt->dt_wday =
		FROMBCD(bus_space_read_1(sc->sc_iot,
					 sc->sc_ioh,
					 WEEKS_REG) & 0x0f);
	dt->dt_day =
		FROMBCD(bus_space_read_1(sc->sc_iot,
					 sc->sc_ioh,
					 DAYS_REG));
	dt->dt_sec =
		FROMBCD(bus_space_read_1(sc->sc_iot,
					 sc->sc_ioh,
					 SECONDS_REG));
	dt->dt_hour =
		FROMBCD(bus_space_read_1(sc->sc_iot,
					 sc->sc_ioh,
					 HOURS_REG));
	dt->dt_min =
		FROMBCD(bus_space_read_1(sc->sc_iot,
					 sc->sc_ioh,
					 MINUTES_REG));
	restore_interrupts(s);
        return 0;
}
Exemplo n.º 28
0
/*
 * cpu_initclocks:
 *
 *	Initialize the clock and get them going.
 */
void
cpu_initclocks(void)
{
    struct ixpclk_softc* sc = ixpclk_sc;
    struct resource *irq;
    device_t dev = sc->sc_dev;
    u_int oldirqstate;
    int rid = 0;
    void *ihl;

    if (hz < 50 || COUNTS_PER_SEC % hz) {
        printf("Cannot get %d Hz clock; using 100 Hz\n", hz);
        hz = 100;
    }
    tick = 1000000 / hz;	/* number of microseconds between interrupts */

    /*
     * We only have one timer available; stathz and profhz are
     * always left as 0 (the upper-layer clock code deals with
     * this situation).
     */
    if (stathz != 0)
        printf("Cannot get %d Hz statclock\n", stathz);
    stathz = 0;

    if (profhz != 0)
        printf("Cannot get %d Hz profclock\n", profhz);
    profhz = 0;

    /* Report the clock frequency. */

    oldirqstate = disable_interrupts(I32_bit);

    irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, IXP425_INT_TMR0,
                             IXP425_INT_TMR0, 1, RF_ACTIVE);
    if (!irq)
        panic("Unable to setup the clock irq handler.\n");
    else
        bus_setup_intr(dev, irq, INTR_TYPE_CLK, ixpclk_intr, NULL,
                       NULL, &ihl);

    /* Set up the new clock parameters. */

    /* clear interrupt */
    bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_STATUS,
                      OST_WARM_RESET | OST_WDOG_INT | OST_TS_INT |
                      OST_TIM1_INT | OST_TIM0_INT);

    counts_per_hz = COUNTS_PER_SEC / hz;

    /* reload value & Timer enable */
    bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_TIM0_RELOAD,
                      (counts_per_hz & TIMERRELOAD_MASK) | OST_TIMER_EN);

    tc_init(&ixp425_timer_timecounter);
    restore_interrupts(oldirqstate);
    rid = 0;
}
Exemplo n.º 29
0
void
enable_irq(int irq)
{
	u_int oldirqstate; 

	oldirqstate = disable_interrupts(I32_bit);
	current_mask |= (1 << irq);
	irq_setmasks();
	restore_interrupts(oldirqstate);
}  
Exemplo n.º 30
0
RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
{
    AssertPtr(pState);
    //dprintf("%s(%p)\n", __FUNCTION__, pState);

    RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
    //RELEASE_THREAD_LOCK();
    restore_interrupts((cpu_status)pState->uOldCpuState);
    pState->uOldCpuState = 0;
}