Пример #1
0
char 
SORAAPI 
remove_thread_safe_enlist_head(struct thread_safe_enlist* tslist, 
	void** data,
	char lock) {

	KIRQL irql;
	acquire_spinlock(lock, 
		tslist->m_sync,
		irql);	
	if (IsListEmpty(&tslist->m_head.m_entry)) {
		release_spinlock(lock, 
			tslist->m_sync,
			irql);
		return false;
	}
	struct LIST_ENTRY_EX* entry;
	entry = (LIST_ENTRY_EX*)RemoveHeadList(&tslist->m_head.m_entry);
	*data = entry->m_value;
	ExFreeToNPagedLookasideList(&tslist->m_lookaside,
		entry);
	InterlockedDecrement(&tslist->m_count);
	release_spinlock(lock, 
		tslist->m_sync,
		irql);
	return true;
}
Пример #2
0
int32
tx_cleanup_thread(void *us)
{
	PLM_PACKET pPacket;
	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK)(us);
	struct be_b57_dev *pUmDevice = (struct be_b57_dev *)(us);
	struct B_UM_PACKET *pUmPacket;
	cpu_status cpu;

	while (1) {
		cpu = disable_interrupts();
		acquire_spinlock(&pUmDevice->lock);

		pPacket = (PLM_PACKET)
			QQ_PopHead(&pDevice->TxPacketXmittedQ.Container);

		release_spinlock(&pUmDevice->lock);
		restore_interrupts(cpu);
		if (pPacket == 0)
			break;
		pUmPacket = (struct B_UM_PACKET *)(pPacket);
		chunk_pool_put(pUmPacket->data);
		pUmPacket->data = NULL;

		cpu = disable_interrupts();
		acquire_spinlock(&pUmDevice->lock);
		QQ_PushTail(&pDevice->TxPacketFreeQ.Container, pPacket);
		release_spinlock(&pUmDevice->lock);
		restore_interrupts(cpu);
	}
	return LM_STATUS_SUCCESS;
}
Пример #3
0
void
midi_interrupt_op(
	int32 op,
	void * data)
{
	midi_dev * port = (midi_dev *)data;
	ddprintf(("port = %p\n", port));
	if (op == B_MPU_401_ENABLE_CARD_INT) {
		cpu_status cp;
		ddprintf(("cmedia_pci: B_MPU_401_ENABLE_CARD_INT\n"));
		cp = disable_interrupts();
		acquire_spinlock(&port->card->hardware);
		increment_interrupt_handler(port->card);
		set_direct(port->card, 0x01, 0x00, 0x80);
		set_indirect(port->card, 0x2A, 0x04, 0xff);
		release_spinlock(&port->card->hardware);
		restore_interrupts(cp);
	}
	else if (op == B_MPU_401_DISABLE_CARD_INT) {
		/* turn off MPU interrupts */
		cpu_status cp;
		ddprintf(("cmedia_pci: B_MPU_401_DISABLE_CARD_INT\n"));
		cp = disable_interrupts();
		acquire_spinlock(&port->card->hardware);
		set_direct(port->card, 0x01, 0x80, 0x80);
		/* remove interrupt handler if necessary */
		decrement_interrupt_handler(port->card);
		release_spinlock(&port->card->hardware);
		restore_interrupts(cp);
	}
	ddprintf(("cmedia_pci: midi_interrupt_op() done\n"));
}
Пример #4
0
static struct smp_msg *smp_check_for_message(int curr_cpu, int *source_mailbox)
{
    struct smp_msg *msg;

    acquire_spinlock_nocheck(&cpu_msg_spinlock[curr_cpu]);
    msg = smp_msgs[curr_cpu];
    if(msg != 0) {
        smp_msgs[curr_cpu] = msg->next;
        release_spinlock(&cpu_msg_spinlock[curr_cpu]);
        //		dprintf(" found msg 0x%x in cpu mailbox\n", msg);
        *source_mailbox = MAILBOX_LOCAL;
    } else {
        // try getting one from the broadcast mailbox

        release_spinlock(&cpu_msg_spinlock[curr_cpu]);
        acquire_spinlock_nocheck(&broadcast_msg_spinlock);

        msg = smp_broadcast_msgs;
        while(msg != 0) {
            if(CHECK_BIT(msg->proc_bitmap, curr_cpu) != 0) {
                // we have handled this one already
                msg = msg->next;
                continue;
            }

            // mark it so we wont try to process this one again
            msg->proc_bitmap = SET_BIT(msg->proc_bitmap, curr_cpu);
            *source_mailbox = MAILBOX_BCAST;
            break;
        }
        release_spinlock(&broadcast_msg_spinlock);
        //		dprintf(" found msg 0x%x in broadcast mailbox\n", msg);
    }
    return msg;
}
Пример #5
0
/*!	Finds a free message and gets it.
	NOTE: has side effect of disabling interrupts
	return value is the former interrupt state
*/
static cpu_status
find_free_message(struct smp_msg** msg)
{
	cpu_status state;

	TRACE(("find_free_message: entry\n"));

retry:
	while (sFreeMessageCount <= 0) {
		state = disable_interrupts();
		process_all_pending_ici(smp_get_current_cpu());
		restore_interrupts(state);
		PAUSE();
	}
	state = disable_interrupts();
	acquire_spinlock(&sFreeMessageSpinlock);

	if (sFreeMessageCount <= 0) {
		// someone grabbed one while we were getting the lock,
		// go back to waiting for it
		release_spinlock(&sFreeMessageSpinlock);
		restore_interrupts(state);
		goto retry;
	}

	*msg = sFreeMessages;
	sFreeMessages = (*msg)->next;
	sFreeMessageCount--;

	release_spinlock(&sFreeMessageSpinlock);

	TRACE(("find_free_message: returning msg %p\n", *msg));

	return state;
}
Пример #6
0
int signal_local_semaphore( struct thread* tr, int sem_id )
{
	struct sem_link *sl;
	struct thread *target;
	struct process *proc;

	proc = tr->process;
	
	acquire_spinlock( & (proc->sems_lock) );
		
		if ( proc->sems[ sem_id ].sem_id != sem_id ) 
		{
			release_spinlock( & (proc->sems_lock) );
			return -1;
		}
		
		proc->sems[ sem_id ].count -= 1;
			
			// wake up any waiting threads
			sl = proc->sems[ sem_id ].waiting_list;	
			if ( sl != NULL )
			{
				target = find_thread_with_id( tr->process, sl->tid );
				if ( target != NULL )
						set_thread_state( target, THREAD_RUNNING );

				proc->sems[ sem_id ].waiting_list = sl->next;
				free( sl );
			}

	release_spinlock( & ( proc->sems_lock ) );
	return 0;
}
Пример #7
0
static status_t
b57_read(void *cookie,off_t pos,void *data,size_t *numBytes)
{
	struct be_b57_dev *pUmDevice = (struct be_b57_dev *)cookie;
	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
	PLM_PACKET pPacket;
	struct B_UM_PACKET *pUmPacket;
	cpu_status cpu;

	if (pUmDevice->block)
		acquire_sem(pUmDevice->packet_release_sem);
	else {
		/* Decrement the receive sem anyway, but don't block
		   this is a horrible hack, but it works. */
		acquire_sem_etc(pUmDevice->packet_release_sem, 1, B_RELATIVE_TIMEOUT, 0);
	}

	cpu = disable_interrupts();
	acquire_spinlock(&pUmDevice->lock);

	pPacket = (PLM_PACKET)
		QQ_PopHead(&pUmDevice->RxPacketReadQ.Container);

	release_spinlock(&pUmDevice->lock);
	restore_interrupts(cpu);

	if (pPacket == 0) {
		*numBytes = -1;
		return B_ERROR;
	}

	pUmPacket = (struct B_UM_PACKET *) pPacket;
	if (pPacket->PacketStatus != LM_STATUS_SUCCESS
		|| pPacket->PacketSize > 1518) {
		cpu = disable_interrupts();
		acquire_spinlock(&pUmDevice->lock);

		QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);

		release_spinlock(&pUmDevice->lock);
		restore_interrupts(cpu);
		*numBytes = -1;
		return B_ERROR;
	}

	if ((pPacket->PacketSize) < *numBytes)
		*numBytes = pPacket->PacketSize;

	memcpy(data,pUmPacket->data,*numBytes);
	cpu = disable_interrupts();
	acquire_spinlock(&pUmDevice->lock);

	QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);

	release_spinlock(&pUmDevice->lock);
	restore_interrupts(cpu);

	return B_OK;
}
Пример #8
0
void rhine_xmit(rhine *r, const char *ptr, ssize_t len)
{
#if 0
	PANIC_UNIMPLEMENTED();
#if 0
	int i;
#endif

//restart:
	sem_acquire(r->tx_sem, 1);
	mutex_lock(&r->lock);

#if 0
	dprintf("XMIT %d %x (%d)\n",r->txbn, ptr, len);

	dprintf("dumping packet:");
	for(i=0; i<len; i++) {
		if(i%8 == 0)
			dprintf("\n");
		dprintf("0x%02x ", ptr[i]);
	}
	dprintf("\n");
#endif

	int_disable_interrupts();
	acquire_spinlock(&r->reg_spinlock);

#if 0
	/* wait for clear-to-send */
	if(!(RTL_READ_32(r, RT_TXSTATUS0 + r->txbn*4) & RT_TX_HOST_OWNS)) {
		dprintf("rhine_xmit: no txbuf free\n");
		rhine_dumptxstate(r);
		release_spinlock(&r->reg_spinlock);
		int_restore_interrupts();
		mutex_unlock(&r->lock);
		sem_release(r->tx_sem, 1);
		goto restart;
	}
#endif

	memcpy((void*)(r->txbuf + r->txbn * 0x800), ptr, len);
	if(len < ETHERNET_MIN_SIZE)
		len = ETHERNET_MIN_SIZE;

	RTL_WRITE_32(r, RT_TXSTATUS0 + r->txbn*4, len | 0x80000);
	if(++r->txbn >= 4)
		r->txbn = 0;

	release_spinlock(&r->reg_spinlock);
	int_restore_interrupts();

	mutex_unlock(&r->lock);
#endif
}
Пример #9
0
err_code resume_thread(thread_id id) {
  err_code err = ERR_NONE;
  CAST_TO_THREAD(thrd, id);

  acquire_spinlock(&inactive.lock);
  if (thrd->state == THREAD_STATE_PAUSED) {
    thrd->state = THREAD_STATE_UNKNOWN;
    if (inactive.tail == thrd)
      inactive.tail = thrd->next;
    if (thrd->next)
      thrd->next->prev = thrd->prev;
    if (thrd->prev)
      thrd->prev->next = thrd->next;
    inactive.total_threads--;
  }
  else
    err = ERR_BAD_STATE;
  release_spinlock(&inactive.lock);

  if (!err) {
    thrd->real_priority = thrd->priority;
    thrd->quantum = 0;
    update_priority_quantum(thrd);

    struct cpu_task task = { .type = CPU_TASK_RESUME, .thread = thrd };
    run_cpu_task(find_least_loaded_cpu(thrd->affinity), &task);
  }
Пример #10
0
void free_single_page(region_t r, struct page *p)
/* Assumes freepages_lock held */
{
	/* pthread_t pt = pthread_self(); */
#ifndef NMEMDEBUG
	ASSERT_INUSE(p, r);
	set_page_region(MAPNB(p), PAGENB(p), FREEPAGE);
#endif /* ifndef NMEMDEBUG */
	list_id = get_next_random_list(MAXLISTS);
	while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1)
		list_id = get_next_random_list(MAXLISTS);
	p->next                     = single_pages[list_id].pages;
	single_pages[list_id].pages = p;
	single_pages[list_id].page_count++;
	release_spinlock(&single_pages[list_id].lock);
	/*acquire_spinlock1( &single_pages[p->list_id].lock );*/
	/*p->next = single_pages[p->list_id].pages;*/
	/*single_pages[p->list_id].pages = p;*/
	/*single_pages[p->list_id].page_count++;*/
	/*release_spinlock( &single_pages[p->list_id].lock );*/

	/*p->next = single_pages[Hash(pt)%MAXLISTS].pages;*/
	/*single_pages[Hash(pt)%MAXLISTS].pages = p;*/
	/*single_pages[Hash(pt)%MAXLISTS].page_count++;*/
}
Пример #11
0
struct page* alloc_single_page(struct page *next)
{
	struct page *p = NULL;

	/* pthread_t pt = pthread_self(); */

	list_id = get_next_random_list(MAXLISTS);
	while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1)
		list_id = get_next_random_list(MAXLISTS);
	/*if( single_pages[Hash(pt)%MAXLISTS].page_count == 0 ){*/
	if (single_pages[list_id % MAXLISTS].page_count == 0)
		p = alloc_new(PAGE_GROUP_SIZE, NULL);
	add_single_pages(p);
	/*p = single_pages[Hash(pt)%MAXLISTS].pages;*/
	p = single_pages[list_id % MAXLISTS].pages;
	/*single_pages[Hash(pt)%MAXLISTS].pages = p->next;*/
	single_pages[list_id % MAXLISTS].pages = p->next;
	p->next                                = next;
	/*single_pages[Hash(pt)%MAXLISTS].page_count--;*/
	single_pages[list_id % MAXLISTS].page_count--;
	/*release_spinlock( &single_pages[Hash(pt)%MAXLISTS].lock );*/
	release_spinlock(&single_pages[list_id % MAXLISTS].lock);
	/*list_id++;*/

	return p;
}
Пример #12
0
struct page* alloc_pages(int n, struct page *next)
{
	/* pthread_t pt = pthread_self(); */
	struct page *ret_val, *p = NULL;

	assert(n >= K);
	list_id = get_next_random_list(MAXLISTS);
	while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1)
		list_id = get_next_random_list(MAXLISTS);
	/*if( n > single_pages[Hash(pt)%MAXLISTS].page_count ){*/
	if (n > single_pages[list_id % MAXLISTS].page_count)
		p = alloc_new(n + PAGE_GROUP_SIZE, NULL);
	add_single_pages(p);
	/*ret_val = single_pages[Hash(pt)%MAXLISTS].pages;*/
	/*single_pages[Hash(pt)%MAXLISTS].pages =*/
	/*single_pages[Hash(pt)%MAXLISTS].pages->next;*/
	ret_val =
	        single_pages[list_id % MAXLISTS].pages;
	single_pages[list_id %
	             MAXLISTS].pages =
	        single_pages[list_id % MAXLISTS].pages->next;
	ret_val->next = next;
	/*single_pages[Hash(pt)%MAXLISTS].page_count -= n;*/
	single_pages[list_id % MAXLISTS].page_count -= n;
	/*release_spinlock( &single_pages[Hash(pt)%MAXLISTS].lock );*/
	release_spinlock(&single_pages[list_id % MAXLISTS].lock);
	/*list_id++;*/

	return ret_val;
}
static void rtl8169_int(void* data)
{
    int rc = INT_NO_RESCHEDULE;
    rtl8169 *r = (rtl8169 *)data;
    uint16 istat;

    acquire_spinlock(&r->reg_spinlock);

    istat = RTL_READ_16(r, REG_ISR);
    SHOW_FLOW(3, "rtl8169_int: istat 0x%x\n", istat);
    if (istat == 0)
        goto done;

    if (istat & (IMR_ROK|IMR_RER|IMR_RDU|IMR_RXOVL)) {
        rc |= rtl8169_rxint(r, istat);
    }
    if (istat & (IMR_TOK|IMR_TER|IMR_TDU)) {
        rc |= rtl8169_txint(r, istat);
    }

    RTL_WRITE_16(r, REG_ISR, istat);

done:
    release_spinlock(&r->reg_spinlock);

    // TODO req reschedule if needed?
    (void) rc;
    //return rc;
}
Пример #14
0
void
smp_send_broadcast_ici_interrupts_disabled(int32 currentCPU, int32 message,
	addr_t data, addr_t data2, addr_t data3, void *dataPointer, uint32 flags)
{
	if (!sICIEnabled)
		return;

	TRACE(("smp_send_broadcast_ici_interrupts_disabled: cpu %ld mess 0x%lx, "
		"data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%lx\n",
		currentCPU, message, data, data2, data3, dataPointer, flags));

	struct smp_msg *msg;
	find_free_message_interrupts_disabled(currentCPU, &msg);

	msg->message = message;
	msg->data = data;
	msg->data2 = data2;
	msg->data3 = data3;
	msg->data_ptr = dataPointer;
	msg->ref_count = sNumCPUs - 1;
	msg->flags = flags;
	msg->proc_bitmap = SET_BIT(0, currentCPU);
	msg->done = false;

	TRACE(("smp_send_broadcast_ici_interrupts_disabled %ld: inserting msg %p "
		"into broadcast mbox\n", currentCPU, msg));

	// stick it in the appropriate cpu's mailbox
	acquire_spinlock_nocheck(&sBroadcastMessageSpinlock);
	msg->next = sBroadcastMessages;
	sBroadcastMessages = msg;
	release_spinlock(&sBroadcastMessageSpinlock);

	arch_smp_send_broadcast_ici();

	TRACE(("smp_send_broadcast_ici_interrupts_disabled %ld: sent interrupt\n",
		currentCPU));

	if ((flags & SMP_MSG_FLAG_SYNC) != 0) {
		// wait for the other cpus to finish processing it
		// the interrupt handler will ref count it to <0
		// if the message is sync after it has removed it from the mailbox
		TRACE(("smp_send_broadcast_ici_interrupts_disabled %ld: waiting for "
			"ack\n", currentCPU));

		while (msg->done == false) {
			process_all_pending_ici(currentCPU);
			PAUSE();
		}

		TRACE(("smp_send_broadcast_ici_interrupts_disabled %ld: returning "
			"message to free list\n", currentCPU));

		// for SYNC messages, it's our responsibility to put it
		// back into the free list
		return_free_message(msg);
	}

	TRACE(("smp_send_broadcast_ici_interrupts_disabled: done\n"));
}
Пример #15
0
void *slab_alloc(struct slab_allocator *sa)
{
    void *object = 0;
    int old_flags;

    old_flags = disable_interrupts();
    acquire_spinlock(&sa->lock);
    if (sa->free_list)
    {
        // Grab freed object
        object = sa->free_list;
        sa->free_list = *((void**) object);
    }
    else
    {
        // If there is no wilderness, or the slab is full, create a new
        // wilderness slab
        if (sa->wilderness_slab == 0
                || sa->wilderness_offset + sa->object_size > sa->slab_size)
        {
            sa->wilderness_slab = kmalloc(sa->slab_size);
            sa->wilderness_offset = 0;
        }

        object = (void*)((char*) sa->wilderness_slab + sa->wilderness_offset);
        sa->wilderness_offset += sa->object_size;
    }

    release_spinlock(&sa->lock);
    restore_interrupts(old_flags);

    return object;
}
void
X86PagingStructures32Bit::Init(page_directory_entry* virtualPageDir,
	phys_addr_t physicalPageDir, page_directory_entry* kernelPageDir)
{
	pgdir_virt = virtualPageDir;
	pgdir_phys = physicalPageDir;

	// zero out the bottom portion of the new pgdir
	memset(pgdir_virt + FIRST_USER_PGDIR_ENT, 0,
		NUM_USER_PGDIR_ENTS * sizeof(page_directory_entry));

	// insert this new map into the map list
	{
		int state = disable_interrupts();
		acquire_spinlock(&sPagingStructuresListLock);

		// copy the top portion of the page dir from the kernel page dir
		if (kernelPageDir != NULL) {
			memcpy(pgdir_virt + FIRST_KERNEL_PGDIR_ENT,
				kernelPageDir + FIRST_KERNEL_PGDIR_ENT,
				NUM_KERNEL_PGDIR_ENTS * sizeof(page_directory_entry));
		}

		sPagingStructuresList.Add(this);

		release_spinlock(&sPagingStructuresListLock);
		restore_interrupts(state);
	}
}
void
M68KPagingStructures040::Init(page_root_entry* virtualPageRoot,
	phys_addr_t physicalPageRoot, page_root_entry* kernelPageRoot)
{
	pgroot_virt = virtualPageRoot;
	pgroot_phys = physicalPageRoot;

	// zero out the bottom portion of the new pgroot
	memset(pgroot_virt + FIRST_USER_PGROOT_ENT, 0,
		NUM_USER_PGROOT_ENTS * sizeof(page_root_entry));

	// insert this new map into the map list
	{
		int state = disable_interrupts();
		acquire_spinlock(&sPagingStructuresListLock);

		// copy the top portion of the page dir from the kernel page dir
		if (kernelPageRoot != NULL) {
			memcpy(pgroot_virt + FIRST_KERNEL_PGROOT_ENT,
				kernelPageRoot + FIRST_KERNEL_PGROOT_ENT,
				NUM_KERNEL_PGROOT_ENTS * sizeof(page_root_entry));
		}

		sPagingStructuresList.Add(this);

		release_spinlock(&sPagingStructuresListLock);
		restore_interrupts(state);
	}
}
Пример #18
0
status_t
delete_timer(timer_id id)
{
	cpu_status cpu;
	bool deleted;
	int i;

	deleted = false;
	
	cpu = disable_interrupts();
	acquire_spinlock(&sTimerSpinlock);
	
	for (i = 0; i < sTimerCount; i++) {
		if (sTimerData[i].id == id) {
			if (i != (sTimerCount - 1) && sTimerCount != 1) {
				memcpy(&sTimerData[i], &sTimerData[sTimerCount - 1], sizeof(struct timer_info));
			}
			sTimerCount--;
			deleted = true;
			break;
		}
	}
	
	release_spinlock(&sTimerSpinlock);
	restore_interrupts(cpu);

	if (!deleted)
		return B_ERROR;
		
	release_sem_etc(sTimerSem, 1, B_DO_NOT_RESCHEDULE);
	return B_OK;
}
Пример #19
0
status_t
_user_set_cpu_enabled(int32 cpu, bool enabled)
{
	status_t status = B_OK;
	cpu_status state;
	int32 i, count;

	if (cpu < 0 || cpu >= smp_get_num_cpus())
		return B_BAD_VALUE;

	// We need to lock here to make sure that no one can disable
	// the last CPU

	state = disable_interrupts();
	acquire_spinlock(&sSetCpuLock);

	if (!enabled) {
		// check if this is the last CPU to be disabled
		for (i = 0, count = 0; i < smp_get_num_cpus(); i++) {
			if (!gCPU[i].disabled)
				count++;
		}

		if (count == 1)
			status = B_NOT_ALLOWED;
	}

	if (status == B_OK)
		gCPU[cpu].disabled = !enabled;

	release_spinlock(&sSetCpuLock);
	restore_interrupts(state);
	return status;
}
Пример #20
0
int create_global_semaphore( int pid, int capacity )
{
	int i;
	int sem_id = -1;

	if ( capacity < 0 ) return -1;

	acquire_spinlock( & global_sems_lock );
	
		for (  i = 0; i < GLOBAL_SEM_COUNT; i++)
		 if ( global_sems[i].sem_id == -1 )
		 {
		 	global_sems[i].sem_id 	= i;
		 	global_sems[i].capacity = capacity;
		 	global_sems[i].count 	= 0;
		 	global_sems[i].pid 		= pid;
		 	global_sems[i].waiting_list 	= NULL;
			sem_id = i;
			break;
		 }

	release_spinlock( & global_sems_lock );

	return sem_id;
}
Пример #21
0
int create_local_semaphore( struct process *proc, int capacity )
{
	int i;
	int sem_id = -1;

	if ( capacity < 0 ) return -1;

	acquire_spinlock( & (proc->sems_lock) );

		for (  i = 0; i < LOCAL_SEM_COUNT; i++)
		 if ( proc->sems[i].sem_id == -1 )
		 {
		 	proc->sems[i].sem_id 	= i;
		 	proc->sems[i].capacity 	= capacity;
		 	proc->sems[i].count 	= 0;
		 	proc->sems[i].pid = proc->pid;	// Hmm, redundant. tid rather?
		 	proc->sems[i].waiting_list = NULL;
			sem_id = i;
			break;
		 }

	release_spinlock( & (proc->sems_lock) );
		
	return sem_id;
}
Пример #22
0
int appf_warm_reset(void)
{
    int ret;

    struct appf_cpu *cpu;
    struct appf_cluster *cluster;
    int cpu_index, cluster_index;

	writel(0,0xC810001C);
	writel(0,0xC8100020);
	writel(0,0xC110990C);

	//**********************//
	//writel(readl(0xDA00434c)&(~(0x1<<29)),0xDA00434c);// Disable GPO filter for 32k
//	writel(0x3600000,0xDA00434c);// Disable GPO filter for 32k
	//**********************//
    
    cpu_index = appf_platform_get_cpu_index();
    cluster_index = appf_platform_get_cluster_index();

    cluster = main_table.cluster_table + cluster_index;
    cpu = cluster->cpu_table + cpu_index;

    get_spinlock(cpu_index, cluster->context->lock);

    appf_platform_restore_context(cluster, cpu);
	
    ret = appf_platform_leave_cstate(cpu_index, cpu, cluster);
    
    release_spinlock(cpu_index, cluster->context->lock);

	pwr_delay(10);
		
    return ret;
}
Пример #23
0
/** Returns the number of objects in the queue */
size_t aqueue_count( struct aqueue *aq )
{
	size_t rc;
	acquire_spinlock( &(aq->lock) );
	rc = (aq->last_position - aq->position);
	release_spinlock( &(aq->lock) );
	return rc;
}
Пример #24
0
int irq_ack( struct thread *t, int irq, int status )
{
	acquire_spinlock( &irq_lock );

	dmesg("%!ACK with status %i for %i\n", status, irq );

		if ( status == 0 )
		{
			unmask_irq( irq );
			release_spinlock( &irq_lock );
			return 0;
		}

	dmesg("%!Unhandled IRQ %i\n", irq );
	release_spinlock( &irq_lock );
	return 0;
}
Пример #25
0
/*free a block.*/
void kcache_free(struct kcache_struct *pcs,void *ptr)
{
	size_t p=(size_t)ptr;
	size_t i=(p-(size_t)pcs->c_bpool)/pcs->c_bsize;
	get_spinlock(pcs->c_bmaplck);
	test1andset(i,pcs->c_bmap);
	release_spinlock(pcs->c_bmaplck);
}
Пример #26
0
void
smp_send_ici(int32 targetCPU, int32 message, addr_t data, addr_t data2,
	addr_t data3, void* dataPointer, uint32 flags)
{
	struct smp_msg *msg;

	TRACE(("smp_send_ici: target 0x%lx, mess 0x%lx, data 0x%lx, data2 0x%lx, "
		"data3 0x%lx, ptr %p, flags 0x%lx\n", targetCPU, message, data, data2,
		data3, dataPointer, flags));

	if (sICIEnabled) {
		int state;
		int currentCPU;

		// find_free_message leaves interrupts disabled
		state = find_free_message(&msg);

		currentCPU = smp_get_current_cpu();
		if (targetCPU == currentCPU) {
			return_free_message(msg);
			restore_interrupts(state);
			return; // nope, cant do that
		}

		// set up the message
		msg->message = message;
		msg->data = data;
		msg->data2 = data2;
		msg->data3 = data3;
		msg->data_ptr = dataPointer;
		msg->ref_count = 1;
		msg->flags = flags;
		msg->done = false;

		// stick it in the appropriate cpu's mailbox
		acquire_spinlock_nocheck(&sCPUMessageSpinlock[targetCPU]);
		msg->next = sCPUMessages[targetCPU];
		sCPUMessages[targetCPU] = msg;
		release_spinlock(&sCPUMessageSpinlock[targetCPU]);

		arch_smp_send_ici(targetCPU);

		if ((flags & SMP_MSG_FLAG_SYNC) != 0) {
			// wait for the other cpu to finish processing it
			// the interrupt handler will ref count it to <0
			// if the message is sync after it has removed it from the mailbox
			while (msg->done == false) {
				process_all_pending_ici(currentCPU);
				PAUSE();
			}
			// for SYNC messages, it's our responsibility to put it
			// back into the free list
			return_free_message(msg);
		}

		restore_interrupts(state);
	}
}
Пример #27
0
static void return_free_message(struct smp_msg *msg)
{
    //	dprintf("return_free_message: returning msg 0x%x\n", msg);
    acquire_spinlock_nocheck(&free_msg_spinlock);
    msg->next = free_msgs;
    free_msgs = msg;
    free_msg_count++;
    release_spinlock(&free_msg_spinlock);
}
Пример #28
0
int install_interrupt(uint8_t num, isr_t function) {
    acquire_spinlock(&idt_dispatch_table_lock);
    if (idt_dispatch_table[num] != NULL) {
        return -1;
    }
    idt_dispatch_table[num] = function;
    release_spinlock(&idt_dispatch_table_lock);
    return 0;
}
Пример #29
0
int destroy_global_semaphore( int pid, int sem_id )
{
	struct sem_link *sl		= NULL;
	struct sem_link *tmp	= NULL;
	struct process *proc	= NULL;
	struct thread *tr		= NULL;

	if ( sem_id < 0 ) return -1;
	if ( sem_id >= GLOBAL_SEM_COUNT ) return -1;
	
	acquire_spinlock( & global_sems_lock );

		if ( ( global_sems[ sem_id ].sem_id != sem_id ) ||
			 ( global_sems[ sem_id ].pid 	!= pid ) )
		{
			// Invalid or not allowed.
			release_spinlock( & global_sems_lock );
			return -1;
		}
	
		// Tell the waiting guys to go away.
		sl = global_sems[ sem_id ].waiting_list;
		while ( sl != NULL )
		{
			tmp = sl;
			
			proc = checkout_process( sl->pid, WRITER );
			if ( proc != NULL ) 
			{
				tr = find_thread_with_id( proc, sl->tid );
				if ( tr != NULL ) 
						set_thread_state( tr, THREAD_RUNNING );
				commit_process( proc );
			}
			sl = sl->next;
			free( tmp );
		}

	 	global_sems[ sem_id ].waiting_list = NULL;
		global_sems[ sem_id ].sem_id 	= -1;	// DELETED!

	release_spinlock( & global_sems_lock );
	return 0;
}
Пример #30
0
/** Unlocks all the CPU's after a sched_lock_all call. */
void sched_unlock_all()
{
	int i;
	for ( i = 0; i < cpu_count; i++ )
	{
		sched_unlock( i );
	}
	release_spinlock( & sched_global_lock );

}