Example #1
0
/*
 *	thread_call_initialize:
 *
 *	Initialize this module, called
 *	early during system initialization.
 */
void
thread_call_initialize(void)
{
	thread_call_t			call;
	thread_call_group_t		group = &thread_call_group0;
	kern_return_t			result;
	thread_t				thread;
	int						i;
	spl_t					s;

	i = sizeof (thread_call_data_t);
	thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
	zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
	zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);

	lck_attr_setdefault(&thread_call_lck_attr);
	lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);

#if defined(__i386__) || defined(__x86_64__)
        lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#else
        lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#endif
	queue_init(&group->pending_queue);
	queue_init(&group->delayed_queue);

	s = splsched();
	thread_call_lock_spin();

	timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);

	wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
	wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO);

	queue_init(&thread_call_internal_queue);
	for (
	    	call = internal_call_storage;
			call < &internal_call_storage[internal_call_count];
			call++) {

		enqueue_tail(&thread_call_internal_queue, qe(call));
	}

	thread_call_daemon_awake = TRUE;

	thread_call_unlock();
	splx(s);

	result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_call_initialize");

	thread_deallocate(thread);
}
Example #2
0
/* TODO: put lock name as argument
 * and reconstruct each wait_queue name */
error_t rwlock_init(struct rwlock_s *rwlock)
{
	//spinlock_init(&rwlock->lock,"RWLOCK");
	mcs_lock_init(&rwlock->lock, "RWLOCK");
	rwlock->signature = RWLOCK_ID;
	rwlock->count     = 0;
	wait_queue_init(&rwlock->rd_wait_queue, "RWLOCK: Rreaders");
	wait_queue_init(&rwlock->wr_wait_queue, "RWLOCK: Writers");

	return 0;
}
Example #3
0
/* Initialize a mutex object - call before any use and do not call again once
 * the object is available to other threads */
void mutex_init(struct mutex *m)
{
    wait_queue_init(&m->queue);
    m->recursion = 0;
    blocker_init(&m->blocker);
    corelock_init(&m->cl);
}
Example #4
0
void sem_init_with_address(semaphore_t *sem, uintptr_t addr, int value) {
	sem->value = value;
	sem->addr = addr;
	sem->valid = 1;
	set_sem_count(sem, 0);
	wait_queue_init(&(sem->wait_queue));
}
Example #5
0
File: sem.c Project: spinlock/ucore
void
sem_init(semaphore_t *sem, int value) {
    sem->value = value;
    sem->valid = 1;
    set_sem_count(sem, 0);
    wait_queue_init(&(sem->wait_queue));
}
Example #6
0
// swap_init - init swap fs, two swap lists, alloc memory & init for swap_entry record array mem_map
//           - init the hash list.
void
swap_init(void) {
    swapfs_init();
    swap_list_init(&active_list);
    swap_list_init(&inactive_list);

    if (!(512 <= max_swap_offset && max_swap_offset < MAX_SWAP_OFFSET_LIMIT)) {
        panic("bad max_swap_offset %08x.\n", max_swap_offset);
    }

    mem_map = kmalloc(sizeof(short) * max_swap_offset);
    assert(mem_map != NULL);

    size_t offset;
    for (offset = 0; offset < max_swap_offset; offset ++) {
        mem_map[offset] = SWAP_UNUSED;
    }

    int i;
    for (i = 0; i < HASH_LIST_SIZE; i ++) {
        list_init(hash_list + i);
    }

    sem_init(&swap_in_sem, 1);

    check_swap();
    check_mm_swap();
    check_mm_shm_swap();

    wait_queue_init(&kswapd_done);
    swap_init_ok = 1;
}
Example #7
0
/*
 *	Routine:	semaphore_create
 *
 *	Creates a semaphore.
 *	The port representing the semaphore is returned as a parameter.
 */
kern_return_t
semaphore_create(
	task_t			task,
	semaphore_t		*new_semaphore,
	int				policy,
	int				value)
{
	semaphore_t		 s = SEMAPHORE_NULL;



	if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX) {
		*new_semaphore = SEMAPHORE_NULL;
		return KERN_INVALID_ARGUMENT;
	}

	s = (semaphore_t) zalloc (semaphore_zone);

	if (s == SEMAPHORE_NULL) {
		*new_semaphore = SEMAPHORE_NULL;
		return KERN_RESOURCE_SHORTAGE; 
	}

	wait_queue_init(&s->wait_queue, policy); /* also inits lock */
	s->count = value;
	s->ref_count = 1;

	/*
	 *  Create and initialize the semaphore port
	 */
	s->port	= ipc_port_alloc_kernel();
	if (s->port == IP_NULL) {	
		/* This will deallocate the semaphore */	
		semaphore_dereference(s);
		*new_semaphore = SEMAPHORE_NULL;
		return KERN_RESOURCE_SHORTAGE; 
	}

	ipc_kobject_set (s->port, (ipc_kobject_t) s, IKOT_SEMAPHORE);

	/*
	 *  Associate the new semaphore with the task by adding
	 *  the new semaphore to the task's semaphore list.
	 *
	 *  Associate the task with the new semaphore by having the
	 *  semaphores task pointer point to the owning task's structure.
	 */
	task_lock(task);
	enqueue_head(&task->semaphore_list, (queue_entry_t) s);
	task->semaphores_owned++;
	s->owner = task;
	s->active = TRUE;
	task_unlock(task);

	*new_semaphore = s;

	return KERN_SUCCESS;
}		  
/* Initialize the semaphore object.
 * max = maximum up count the semaphore may assume (max >= 1)
 * start = initial count of semaphore (0 <= count <= max) */
void semaphore_init(struct semaphore *s, int max, int start)
{
    KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
                  "semaphore_init->inv arg\n");
    wait_queue_init(&s->queue);
    s->max = max;
    s->count = start;
    corelock_init(&s->cl);
}
Example #9
0
File: sem.c Project: TySag/project
void sem_init(semaphore_t * sem, int value)
{
	sem->value = value;
	sem->valid = 1;
#ifdef UCONFIG_BIONIC_LIBC
	sem->addr = 0;		//-1 : // Not for futex
#endif //UCONFIG_BIONIC_LIBC
	set_sem_count(sem, 0);
	wait_queue_init(&(sem->wait_queue));
}
Example #10
0
File: mutex.c Project: dankex/lk
/**
 * @brief  Initialize a mutex_t
 */
void mutex_init(mutex_t *m)
{
#if MUTEX_CHECK
	m->magic = MUTEX_MAGIC;
	m->holder = 0; // In good code, release is only called if acquire was successful
#endif

	m->count = 0;
	wait_queue_init(&m->wait);
}
Example #11
0
error_t thread_dup(struct task_s *task,
                   struct thread_s *dst,
                   struct cpu_s *dst_cpu,
                   struct cluster_s *dst_clstr,
                   struct thread_s *src)
{
    register uint_t sched_policy;
    register uint_t cpu_lid;
    register uint_t cid;
    struct page_s *page;

    sched_policy = sched_getpolicy(src);
    cpu_lid      = dst_cpu->lid;
    cid          = dst_clstr->id;
    page         = dst->info.page;

    // Duplicate
    page_copy(page, src->info.page);

    // Initialize dst thread
    spinlock_init(&dst->lock, "Thread");
    dst->flags                           = 0;
    thread_clear_joinable(dst);
    dst->locks_count                     = 0;
    dst->ticks_nr                        = 0;
    thread_set_current_cpu(dst, dst_cpu);
    sched_setpolicy(dst, sched_policy);
    dst->task                            = task;
    dst->type                            = PTHREAD;
    dst->info.sched_nr                   = 0;
    dst->info.ppm_last_cid               = cid;
    dst->info.migration_cntr             = 0;
    dst->info.migration_fail_cntr        = 0;
    dst->info.tm_exec                    = 0;
    dst->info.tm_tmp                     = 0;
    dst->info.tm_usr                     = 0;
    dst->info.tm_sys                     = 0;
    dst->info.tm_sleep                   = 0;
    dst->info.tm_wait                    = 0;
    signal_init(dst);
    dst->info.join                       = NULL;
    wait_queue_init(&dst->info.wait_queue, "Join/Exit Sync");
    dst->info.attr.sched_policy          = sched_policy;
    dst->info.attr.cid                   = cid;
    dst->info.attr.cpu_lid               = cpu_lid;
    dst->info.attr.cpu_gid               = dst_cpu->gid;
    dst->info.attr.tid                   = (uint_t) dst;
    dst->info.attr.pid                   = task->pid;
    dst->info.kstack_addr                = (uint_t*)dst;
    dst->info.page                       = page;
    dst->signature                       = THREAD_ID;

    return 0;
}
Example #12
0
/**
 * @brief  Initialize an event object
 *
 * @param e        Event object to initialize
 * @param initial  Initial value for "signaled" state
 * @param flags    0 or EVENT_FLAG_AUTOUNSIGNAL
 */
void event_init(event_t *e, bool initial, uint flags)
{
#if EVENT_CHECK
//	ASSERT(e->magic != EVENT_MAGIC);
#endif

	e->magic = EVENT_MAGIC;
	e->signalled = initial;
	e->flags = flags;
	wait_queue_init(&e->wait);
}
Example #13
0
/*
 *	thread_call_initialize:
 *
 *	Initialize this module, called
 *	early during system initialization.
 */
void
thread_call_initialize(void)
{
	thread_call_t			call;
	kern_return_t			result;
	thread_t			thread;
	int				i;

	i = sizeof (thread_call_data_t);
	thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
	zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
	zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);

	lck_attr_setdefault(&thread_call_lck_attr);
	lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);

#if defined(__i386__) || defined(__x86_64__)
        lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#else
        lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#endif

	nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs);
	wait_queue_init(&daemon_wqueue, SYNC_POLICY_FIFO);

	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_LOW], THREAD_CALL_PRIORITY_LOW, 0, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_USER], THREAD_CALL_PRIORITY_USER, 0, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], THREAD_CALL_PRIORITY_KERNEL, 1, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_THREAD_MIN, FALSE);

	disable_ints_and_lock();

	queue_init(&thread_call_internal_queue);
	for (
			call = internal_call_storage;
			call < &internal_call_storage[INTERNAL_CALL_COUNT];
			call++) {

		enqueue_tail(&thread_call_internal_queue, qe(call));
	}

	thread_call_daemon_awake = TRUE;

	enable_ints_and_unlock();

	result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, NULL, BASEPRI_PREEMPT + 1, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_call_initialize");

	thread_deallocate(thread);
}
Example #14
0
static void
stdin_device_init(struct device *dev) {
    memset(dev, 0, sizeof(*dev));
    dev->d_blocks = 0;
    dev->d_blocksize = 1;
    dev->d_open = stdin_open;
    dev->d_close = stdin_close;
    dev->d_io = stdin_io;
    dev->d_ioctl = stdin_ioctl;

    p_rpos = p_wpos = 0;
    wait_queue_init(wait_queue);
}
Example #15
0
/*
 *	Routine:	ipc_mqueue_init
 *	Purpose:
 *		Initialize a newly-allocated message queue.
 */
void
ipc_mqueue_init(
	ipc_mqueue_t	mqueue,
	boolean_t	is_set)
{
	if (is_set) {
		wait_queue_set_init(&mqueue->imq_set_queue, SYNC_POLICY_FIFO|SYNC_POLICY_PREPOST);
	} else {
		wait_queue_init(&mqueue->imq_wait_queue, SYNC_POLICY_FIFO);
		ipc_kmsg_queue_init(&mqueue->imq_messages);
		mqueue->imq_seqno = 0;
		mqueue->imq_msgcount = 0;
		mqueue->imq_qlimit = MACH_PORT_QLIMIT_DEFAULT;
		mqueue->imq_fullwaiters = FALSE;
	}
}
Example #16
0
/*
 *	Routine:		   wait_queue_alloc
 *	Purpose:
 *		Allocate and initialize a wait queue for use outside of
 *		of the mach part of the kernel.
 *	Conditions:
 *		Nothing locked - can block.
 *	Returns:
 *		The allocated and initialized wait queue
 *		WAIT_QUEUE_NULL if there is a resource shortage
 */
wait_queue_t
wait_queue_alloc(
	int policy)
{
	wait_queue_t wq;
	kern_return_t ret;

	wq = (wait_queue_t) zalloc(_wait_queue_zone);
	if (wq != WAIT_QUEUE_NULL) {
		ret = wait_queue_init(wq, policy);
		if (ret != KERN_SUCCESS) {
			zfree(_wait_queue_zone, wq);
			wq = WAIT_QUEUE_NULL;
		}
	}
	return wq;
}
Example #17
0
/*
 *	Routine:		   wait_queue_alloc
 *	Purpose:
 *		Allocate and initialize a wait queue for use outside of
 *		of the mach part of the kernel.
 *	Conditions:
 *		Nothing locked - can block.
 *	Returns:
 *		The allocated and initialized wait queue
 *		WAIT_QUEUE_NULL if there is a resource shortage
 */
wait_queue_t
wait_queue_alloc(
	int policy)
{
	wait_queue_t wq;
	kern_return_t ret;

	wq = (wait_queue_t) kalloc(sizeof(struct wait_queue));
	if (wq != WAIT_QUEUE_NULL) {
		ret = wait_queue_init(wq, policy);
		if (ret != KERN_SUCCESS) {
			kfree((vm_offset_t)wq, sizeof(struct wait_queue));
			wq = WAIT_QUEUE_NULL;
		}
	}
	return wq;
}
Example #18
0
static void
wait_queues_init(void)
{
	uint32_t	i, whsize;
	kern_return_t	kret;

	whsize = compute_wait_hash_size(processor_avail_count, machine_info.max_mem);
	num_wait_queues = (whsize / ((uint32_t)sizeof(struct wait_queue))) - 1;

	kret = kernel_memory_allocate(kernel_map, (vm_offset_t *) &wait_queues, whsize, 0, KMA_KOBJECT|KMA_NOPAGEWAIT);

	if (kret != KERN_SUCCESS || wait_queues == NULL)
		panic("kernel_memory_allocate() failed to allocate wait queues, error: %d, whsize: 0x%x", kret, whsize);

	for (i = 0; i < num_wait_queues; i++) {
		wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO);
	}
}
Example #19
0
/*
 *	Routine:        wait_queue_set_init
 *	Purpose:
 *		Initialize a previously allocated wait queue set.
 *	Returns:
 *		KERN_SUCCESS - The wait_queue_set_t was initialized
 *		KERN_INVALID_ARGUMENT - The policy parameter was invalid
 */
kern_return_t
wait_queue_set_init(
	wait_queue_set_t wqset,
	int policy)
{
	kern_return_t ret;

	ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
	if (ret != KERN_SUCCESS)
		return ret;

	wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
	if (policy & SYNC_POLICY_PREPOST)
		wqset->wqs_wait_queue.wq_prepost = TRUE;
	else 
		wqset->wqs_wait_queue.wq_prepost = FALSE;
	queue_init(&wqset->wqs_setlinks);
	queue_init(&wqset->wqs_preposts);
	return KERN_SUCCESS;
}
Example #20
0
static void
thread_call_group_setup(
		thread_call_group_t 		group, 
		thread_call_priority_t		pri,
		uint32_t			target_thread_count,
		boolean_t			parallel)
{
	queue_init(&group->pending_queue);
	queue_init(&group->delayed_queue);

	timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);
	timer_call_setup(&group->dealloc_timer, thread_call_dealloc_timer, group);

	wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);

	group->target_thread_count = target_thread_count;
	group->pri = thread_call_priority_to_sched_pri(pri);

	group->sched_call = sched_call_thread; 
	if (parallel) {
		group->flags |= TCG_PARALLEL;
		group->sched_call = NULL;
	} 
}
Example #21
0
void sem_init(semaphore_t *sem, int value)
{
	sem->value = value;
	wait_queue_init(&(sem->wait_queue));
}
Example #22
0
void
event_box_init(event_t *event_box) {
    wait_queue_init(&(event_box->wait_queue));
}
Example #23
0
void* thread_idle(void *arg)
{
	extern uint_t __ktext_start;
	register uint_t id;
	register uint_t cpu_nr;
	register struct thread_s *this;
	register struct cpu_s *cpu;
	struct thread_s *thread;
	register struct page_s *reserved_pg;
	register uint_t reserved;
	kthread_args_t *args;
	bool_t isBSCPU;
	uint_t tm_now;
	uint_t count;
	error_t err;

	this    = current_thread;
	cpu     = current_cpu;
	id      = cpu->gid;
	cpu_nr  = arch_onln_cpu_nr();
	args    = (kthread_args_t*) arg;
	isBSCPU = (cpu == cpu->cluster->bscpu);

	cpu_trace_write(cpu, thread_idle_func);

	if(isBSCPU)
		pmm_tlb_flush_vaddr((vma_t)&__ktext_start, PMM_UNKNOWN);

	cpu_set_state(cpu, CPU_ACTIVE);
	rt_timer_read(&tm_now);
	this->info.tm_born = tm_now;      
	this->info.tm_tmp  = tm_now;
	//// Reset stats /// 
	cpu_time_reset(cpu);
	////////////////////

	mcs_barrier_wait(&boot_sync);

	printk(INFO, "INFO: Starting Thread Idle On Core %d\tOK\n", cpu->gid);

	if(isBSCPU && (id == args->val[2]))
	{
		for(reserved = args->val[0]; reserved < args->val[1]; reserved += PMM_PAGE_SIZE)
		{
			reserved_pg = ppm_ppn2page(&cpu->cluster->ppm, reserved >> PMM_PAGE_SHIFT);
			page_state_set(reserved_pg, PGINIT);       
			ppm_free_pages(reserved_pg);
		}
	}

	thread = kthread_create(this->task, 
				&thread_event_manager, 
				NULL, 
				cpu->cluster->id, 
				cpu->lid);

	if(thread == NULL)
		PANIC("Failed to create default events handler Thread for CPU %d\n", id);

	thread->task   = this->task;
	cpu->event_mgr = thread;
	wait_queue_init(&thread->info.wait_queue, "Events");

	err = sched_register(thread);
	assert(err == 0);

	sched_add_created(thread);

	if(isBSCPU)
	{
		dqdt_update();
#if 0
		thread = kthread_create(this->task, 
					&cluster_manager_thread,
					cpu->cluster, 
					cpu->cluster->id, 
					cpu->lid);

		if(thread == NULL)
		{
			PANIC("Failed to create cluster manager thread, cid %d, cpu %d\n", 
			      cpu->cluster->id, 
			      cpu->gid);
		}

		thread->task          = this->task;
		cpu->cluster->manager = thread;
		wait_queue_init(&thread->info.wait_queue, "Cluster-Mgr");

		err = sched_register(thread);
		assert(err == 0);

		sched_add_created(thread);

#endif

		if(clusters_tbl[cpu->cluster->id].flags & CLUSTER_IO)
		{
			thread = kthread_create(this->task, 
						&kvfsd, 
						NULL, 
						cpu->cluster->id, 
						cpu->lid);
       
			if(thread == NULL)
			{
				PANIC("Failed to create KVFSD on cluster %d, cpu %d\n", 
				      cpu->cluster->id, 
				      cpu->gid);
			}

			thread->task  = this->task;
			wait_queue_init(&thread->info.wait_queue, "KVFSD");
			err           = sched_register(thread);
			assert(err == 0);
			sched_add_created(thread);
			printk(INFO,"INFO: kvfsd has been created\n");
		}
	}

	cpu_set_state(cpu,CPU_IDLE);

	while (true)
	{
		cpu_disable_all_irq(NULL);
     
		if((event_is_pending(&cpu->re_listner)) || (event_is_pending(&cpu->le_listner)))
		{
			wakeup_one(&cpu->event_mgr->info.wait_queue, WAIT_ANY);
		}
 
		sched_idle(this);

		count = sched_runnable_count(&cpu->scheduler);

		cpu_enable_all_irq(NULL);

		if(count != 0)
			sched_yield(this);
     
		//arch_set_power_state(cpu, ARCH_PWR_IDLE);
	}

	return NULL;
}
Example #24
0
error_t thread_create(struct task_s *task, pthread_attr_t *attr, struct thread_s **new_thread)
{
	kmem_req_t req;
	struct cluster_s *cluster;
	struct cpu_s *cpu;
	register struct thread_s *thread;
	struct page_s *page;

	//FIXME
	//cluster = cluster_cid2ptr(attr->cid);
	cluster = current_cluster;
	//cpu     = cpu_gid2ptr(attr->cpu_gid);
	cpu     = cpu_lid2ptr(attr->cpu_lid);

	// New Thread Ressources Allocation
	req.type  = KMEM_PAGE;
	req.size  = ARCH_THREAD_PAGE_ORDER;
	req.flags = AF_KERNEL | AF_ZERO | AF_REMOTE;
	req.ptr   = cluster;

#if CONFIG_THREAD_LOCAL_ALLOC
	req.ptr   = current_cluster;
#endif

	page      = kmem_alloc(&req);

	if(page == NULL) return EAGAIN;

	thread = (struct thread_s*) ppm_page2addr(page);

	thread_init(thread);

	// Initialize new thread
	thread_set_current_cpu(thread,cpu);

	sched_setpolicy(thread, attr->sched_policy);
	thread->task = task;
	thread->type = PTHREAD;

	if(attr->flags & PT_ATTR_DETACH)
		thread_clear_joinable(thread);
	else
		thread_set_joinable(thread);

	signal_init(thread);
	attr->tid = (uint_t)thread;
	attr->pid = task->pid;
	memcpy(&thread->info.attr, attr, sizeof(*attr));
	thread->info.page = page;
	thread->info.ppm_last_cid = attr->cid;

	wait_queue_init(&thread->info.wait_queue, "Join/Exit Sync");
	cpu_context_init(&thread->pws, thread); 

	// Set referenced thread pointer to new thread address
	*new_thread = thread;

        thread_dmsg(1, "%s: thread %x of task %u created on cluster %u by cpu %u\n",    \
                        __FUNCTION__, thread->info.attr.tid, thread->info.attr.pid,     \
                        thread->info.attr.cid, thread->info.attr.cpu_lid);

	return 0;
}
Example #25
0
/*
 *	ROUTINE:	lock_set_create		[exported]
 *
 *	Creates a lock set.
 *	The port representing the lock set is returned as a parameter.
 */      
kern_return_t
lock_set_create (
	task_t		task,
	lock_set_t	*new_lock_set,
	int		n_ulocks,
	int		policy)
{
	lock_set_t 	lock_set = LOCK_SET_NULL;
	ulock_t		ulock;
	vm_size_t 	size;
	int 		x;

	*new_lock_set = LOCK_SET_NULL;

	if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX)
		return KERN_INVALID_ARGUMENT;

	if ((VM_MAX_ADDRESS - sizeof(struct lock_set))/sizeof(struct ulock) < (unsigned)n_ulocks)
		return KERN_RESOURCE_SHORTAGE;

	size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
	lock_set = (lock_set_t) kalloc (size);

	if (lock_set == LOCK_SET_NULL)
		return KERN_RESOURCE_SHORTAGE; 


	lock_set_lock_init(lock_set);
	lock_set->n_ulocks = n_ulocks;
	lock_set->ref_count = (task == kernel_task) ? 1 : 2; /* one for kernel, one for port */

	/*
	 *  Create and initialize the lock set port
	 */
	lock_set->port = ipc_port_alloc_kernel();
	if (lock_set->port == IP_NULL) {	
		kfree(lock_set, size);
		return KERN_RESOURCE_SHORTAGE; 
	}

	ipc_kobject_set (lock_set->port,
			(ipc_kobject_t) lock_set,
			IKOT_LOCK_SET);

	/*
	 *  Initialize each ulock in the lock set
	 */

	for (x=0; x < n_ulocks; x++) {
		ulock = (ulock_t) &lock_set->ulock_list[x];
		ulock_lock_init(ulock);
		ulock->lock_set  = lock_set;
		ulock->holder	 = THREAD_NULL;
		ulock->blocked   = FALSE;
		ulock->unstable	 = FALSE;
		ulock->ho_wait	 = FALSE;
		ulock->accept_wait = FALSE;
		wait_queue_init(&ulock->wait_queue, policy);
	}

	lock_set_ownership_set(lock_set, task);

	lock_set->active = TRUE;
	*new_lock_set = lock_set;

	return KERN_SUCCESS;
}
Example #26
0
int main()
{
	char buf[64], *v[SZ];
	struct alias *a;
	int n, i, r, skip_actions;
	FILE *f;
	pthread_t rv, ks;

	ui_init();
	ui_animation(0);

	f = fopen("commands.desc", "r");
	if(f == NULL) {
		ui_print("failed to open commands.desc\n");
		goto end;
	}

	// parse command list
	while(fgets(buf, 64, f)) {
		for(i = 0, v[i] = strtok(buf, " \n");
			 v[i]; v[++i] = strtok(0, " \n"));
		parse_line(i, v);
	}
	fclose(f);

	// init bin
	for(i = 0;; ) {
		a = get_alias("bin", i++);
		if(!a) break;
		ui_bin_add_tag(a->str);
	}
	ui_bin_add_tag("timeout");

	// init options tip
	ui_tip_update(1, "Press the corresponding key to perform that function:");
	ui_tip_update(3, "<S> Handler Speed");
	ui_tip_update(4, "<R> Reset");
	ui_tip_update(5, "<Q> Quit");

	// init io
	r = io_init();
	ptsname_r(r, buf, 64);
	ui_print("pts name is: %s (%d)\n", buf, r);

	// init wait queue
	wait_queue_init();

	// init receiver
	pthread_create(&rv, NULL, receiver, NULL);

	// init keyserver
	pthread_create(&ks, NULL, keyserver, NULL);

again:
	ui_print("\n\nnew routine:\n");
	skip_actions = 0;
	for(wait_queue_init(), i = 0;
	 i < lscount; i++) {
		if(skip_actions && (!strcmp(lines[i].argv[0], "send")
					  || !strcmp(lines[i].argv[0], "wait")))
			continue;
		if(exec_line(lines[i].argc, lines[i].argv) < 0)
			skip_actions = 1;
	}
	wait_queue_deinit();
	if(!stop) goto again;

end:
	ui_deinit();
	io_deinit();
}