Exemple #1
0
void init_scheduler()
{
	if(num_Thread==0)
	{
		th_t *mainThread, *scheduleThread;
		
		if((mainThread = thread_alloc()) == NULL)
			abort();
			
		if((scheduleThread = thread_alloc()) == NULL)
			abort();
		init_sigaction();	
		
		if((ready_queueHead = queueHead_alloc()) == NULL)
			abort();
			
		if((sched_queueHead = queueHead_alloc()) == NULL)
			abort();
			
		th_queue_init(ready_queueHead);
		th_queue_init(sched_queueHead);
		
		//th_queue_init(&kernel_queue);
		mainThread->mctx.status = TH_WAITING;
		mainThread->mctx.stackAddr = NULL;
		mainThread->tid = num_Thread++;
		scheduleThread->mctx.status = TH_SCHED;
		//scheduleThread->tid = num_kernel_thread++;
		main_kernel_id = scheduleThread->tid = getpid();
		num_kernel_thread++;
		

		if((scheduleThread->mctx.stackAddr = stack_alloc()) == NULL)
			abort();
			
		if((mainThread->mctx.stackAddr = stack_alloc()) == NULL)
			abort();
			
		//create machine context
		void (*fnptr)(void*) = (void(*)(void*))scheduler;
		mctx_create(&(scheduleThread->mctx), fnptr, NULL, scheduleThread->mctx.stackAddr, STACK_SIZE);

	//		mctx_create(&mctx_list[0],fnptr,NULL,mctx_list[0].stackAddr,STACK_SIZE);

		th_queue_insert(sched_queueHead, PRIORITY_SCHEDULER, scheduleThread);
		
		th_queue_insert(ready_queueHead, PRIORITY_NORMAL, mainThread);		
		
		init_sigaction();
		init_mutex();
		//th_queue_insert(&kernel_queue, PRIORITY_SCHEDULER, scheduleThread);
		switch_to_scheduler();	//jump to scheduler
	}
}
Exemple #2
0
struct thread *thread_create(unsigned int flags, void *(*run)(void *), void *arg) {
	struct thread *t;
	int priority;

	/* check mutually exclusive flags */
	if ((flags & THREAD_FLAG_PRIORITY_LOWER)
			&& (flags & THREAD_FLAG_PRIORITY_HIGHER)) {
		return err_ptr(EINVAL);
	}

	if((flags & THREAD_FLAG_NOTASK) && !(flags & THREAD_FLAG_SUSPENDED)) {
		return err_ptr(EINVAL);
	}

	/* check correct executive function */
	if (!run) {
		return err_ptr(EINVAL);
	}

	/* calculate current thread priority. It can be change later with
	 * thread_set_priority () function
	 */
	priority = thread_priority_by_flags(flags);

	/* below we will work with thread's instances and therefore we need to
	 * lock scheduler (disable scheduling) to our structures is not be
	 * corrupted
	 */
	sched_lock();
	{
		/* allocate memory */
		if (!(t = thread_alloc())) {
			t = err_ptr(ENOMEM);
			goto out_unlock;
		}

		/* initialize internal thread structure */
		thread_init(t, priority, run, arg);

		/* link with task if needed */
		if (!(flags & THREAD_FLAG_NOTASK)) {
			task_thread_register(task_self(), t);
		}

		thread_cancel_init(t);

		if (!(flags & THREAD_FLAG_SUSPENDED)) {
			thread_launch(t);
		}

		if (flags & THREAD_FLAG_DETACHED) {
			thread_detach(t);
		}

	}
out_unlock:
	sched_unlock();

	return t;
}
Exemple #3
0
VALUE
rb_thread_alloc(VALUE klass)
{
    VALUE self = thread_alloc(klass);
    ruby_thread_init(self);
    return self;
}
/*
 * Returns a copy of the entire string with leading and trailing spaces
 * trimmed.
 */
static char *
trimline(struct replay_thread *thr, const char *str)
{
	size_t len;
	char *p;

	/* skip leading space */
	while (*str && *str == ' ')
		++str;

	/* seek to end of string */
	for (len = 0; str[len]; ++len)
		 /* nothing */ ;

	/* trim trailing space */
	while (len && str[len - 1] == ' ')
		--len;

	/* copy and return */
	if ((p = thread_alloc(thr, len + 1)) == NULL)
		return (NULL);
	memcpy(p, str, len);
	p[len] = '\0';
	return (p);
}
Exemple #5
0
int thread_new(void) {
	struct thread *thread = thread_alloc();

	if (!thread) {
		debug_printf("error: thread table full\n");
		return -1;
	}

	return thread->id;
}
Exemple #6
0
static void
thread_init(void *dcontext)
{
    fuzz_pass_context_t *fp = thread_alloc(dcontext, sizeof(fuzz_pass_context_t),
                                           HEAPSTAT_MISC);
    memset(fp, 0, sizeof(fuzz_pass_context_t));
    fp->dcontext = dcontext;
    fp->thread_state = create_fault_state(dcontext);
    drmgr_set_tls_field(dcontext, tls_idx_fuzzer, (void *) fp);
}
void* thread_realloc(void* addr, size_t size)
{
    if (addr == NULL) {
        return thread_alloc(size);
    }
    alloc_block_header* blk = (alloc_block_header*)addr - 1;
    size_t new_size = ((size + BLOCK_ALIGNMENT-1) & ~(BLOCK_ALIGNMENT-1)) + sizeof(alloc_block_header);
    if (blk->size >= new_size) {
        return addr;
    }
    if (blk->size > MAX_BLOCK_SIZE) {
        blk = (alloc_block_header*)realloc(blk, new_size);
        blk->size = new_size;
        return (alloc_block_header*)blk + 1;
    } else {
        void* new_addr = thread_alloc(size);
        memcpy(new_addr, addr, blk->size - sizeof(alloc_block_header));
        thread_free(addr);
        return new_addr;
    }
}
Exemple #8
0
void dispatch_to_scheduler(th_t* thread)
{
	int pri, currentPri;
	th_queue_t* foundQueueTh;
	int flag = 1;
	
	if(num_Thread ==0)
	{
		init_scheduler();
	}
	
	thread->tid = num_Thread++;
	thread->mctx.status = TH_WAITING;
	th_queue_insert(ready_queueHead, pri, thread);
	printf("*** %d thread insert ***\n", num_Thread);
	
	//Choose kernel thread to be added
	if(num_kernel_thread < maxKernelThreads)
	{
		//Create a new kernel thread
		//Create scheduler stack for new kernel thread
		
		th_t *scheduleThread;
		int pid;
		if((scheduleThread = thread_alloc()) == NULL)
			abort();
		
		scheduleThread->mctx.status = TH_SCHED;
		pid = scheduleThread->tid = getpid()+1;
		scheduleThread->current_tid = 0;
		num_kernel_thread++;

		if((scheduleThread->mctx.stackAddr = stack_alloc()) == NULL)
			abort();
		th_queue_insert(sched_queueHead, PRIORITY_SCHEDULER, scheduleThread);
		
		rfork_thread(RFPROC | RFNOTEG|RFMEM, scheduleThread->mctx.stackAddr, (int(*)(void*))start_kernel_thread, pid);
	}
	return;
	/*
	if(pri > currentPri)
	{
		foundQueueTh->thread->mctx.status = TH_WAITING;
		currentTid = thread->tid;
		thread->mctx.status = TH_RUNNING;

		enable_timer();
		mctx_switch(&(foundQueueTh->thread->mctx), &(thread->mctx));
	}
	else
		switch_to_scheduler();
	*/
}
Exemple #9
0
static struct thd_info *
enter()
{
    /* Flag to avoid recursion while getting the thread id: */
    static int Prof_getting_id = 0;
    static int ident;
    register int *thread_id;
    register struct thd_info *thdp;

    if (!Prof_initialized) {
	prof_init();
    }

    if (Prof_getting_id) {
	return NULL;
    }
    Prof_getting_id = 1;
    thread_id = (int *) thread_alloc(&ident, sizeof(int));
    Prof_getting_id = 0;

    /* When initialized, the glocal variable referred to by `ident'
     * contains the thread number plus one.
     */
    if (*thread_id == 0) {
	/* first time called within this thread */
	if (Prof_nthreads < MAXTHREADS) {
	    *thread_id = ++Prof_nthreads;

	    /* initialize thread info structure */
	    thdp = &thread_info[*thread_id - 1];
	    thdp->thd_here = 0;
	    thdp->thd_funcs = NULL;
	    thdp->thd_stack = NULL;
	    mu_init(&thdp->thd_mu);
	} else {
	    thdp = NULL;
	}
    } else {
	thdp = &thread_info[*thread_id - 1];
    }

    if (thdp != NULL) {
	if (!thdp->thd_here) {
	    thdp->thd_here = 1;
	    mu_lock(&thdp->thd_mu);
	} else {
	    thdp = NULL;
	}
    }

    return thdp;
}
Exemple #10
0
struct thread *thread_send(struct thread *image, pid_t target, portid_t port, struct msg *msg) {
	struct process *p_targ;
	struct thread *new_image;

	/* find target process */
	p_targ = process_get(target);

	/* check process */
	if (!p_targ || !p_targ->entry) {
		return image;
	}

	/* create new thread */
	new_image = thread_alloc();
	thread_bind(new_image, p_targ);

	new_image->ds      = 0x23;
	new_image->cs      = 0x1B;
	new_image->ss      = 0x23;
	new_image->eflags  = 0;
	new_image->useresp = new_image->stack + SEGSZ;
	new_image->proc    = p_targ;
	new_image->eip     = p_targ->entry;

	/* set up registers in new thread */
	new_image->ebx     = 0;
	new_image->ecx     = (msg) ? msg->count : 0;
	new_image->edx     = port;
	new_image->esi     = (image) ? image->proc->pid : 0;
	new_image->edi     = 0;
	new_image->msg     = msg;

	/* set new thread's user id */
	new_image->user = (!image || p_targ->user) ? p_targ->user : image->user;

	/* insert new thread into scheduler */
	schedule_insert(new_image);

	/* return new thread */
	return new_image;
}
thread_t *
thread_attach (void)
{
  thread_t *thr;
  int rc;

  thr = thread_alloc ();
  thr->thr_stack_size = (unsigned long) -1;
  thr->thr_attached = 1;
  if (thr->thr_cv == NULL)
    goto failed;

  *((pthread_t *) thr->thr_handle) = pthread_self ();

  rc = pthread_setspecific (_key_current, thr);
  CKRET (rc);

  /* Store the context so we can easily restart a dead thread */
  setjmp (thr->thr_init_context);

  thr->thr_status = RUNNING;
  _thread_init_attributes (thr);
  thr->thr_stack_base = 0;

  return thr;

failed:
  if (thr->thr_sem)
    semaphore_free (thr->thr_sem);
  if (thr->thr_schedule_sem)
    semaphore_free (thr->thr_schedule_sem);
  if (thr->thr_handle)
    dk_free (thr->thr_handle, sizeof (pthread_t));
  dk_free (thr, sizeof (thread_t));
  return NULL;
}
thread_t *
thread_create (
    thread_init_func initial_function,
    unsigned long stack_size,
    void *initial_argument)
{
  thread_t *thr;
  int rc;

  assert (_main_thread != NULL);

  if (stack_size == 0)
    stack_size = THREAD_STACK_SIZE;

#if (SIZEOF_VOID_P == 8)
  stack_size *= 2;
#endif
#if defined (__x86_64 ) && defined (SOLARIS)
  /*GK: the LDAP on that platform requires that */
  stack_size *= 2;
#endif
#ifdef HPUX_ITANIUM64
  stack_size += 8 * 8192;
#endif

  stack_size = ((stack_size / 8192) + 1) * 8192;

#if defined (PTHREAD_STACK_MIN)
  if (stack_size < PTHREAD_STACK_MIN)
    {
      stack_size = PTHREAD_STACK_MIN;
    }
#endif
  /* Any free threads with the right stack size? */
  Q_LOCK ();
  for (thr = (thread_t *) _deadq.thq_head.thr_next;
       thr != (thread_t *) &_deadq.thq_head;
       thr = (thread_t *) thr->thr_hdr.thr_next)
    {
      /* if (thr->thr_stack_size >= stack_size) */
	break;
    }
  Q_UNLOCK ();

  /* No free threads, create a new one */
  if (thr == (thread_t *) &_deadq.thq_head)
    {
#ifndef OLD_PTHREADS
      size_t os_stack_size = stack_size;
#endif
      thr = thread_alloc ();
      thr->thr_initial_function = initial_function;
      thr->thr_initial_argument = initial_argument;
      thr->thr_stack_size = stack_size;
      if (thr->thr_cv == NULL)
	goto failed;

#ifdef HPUX_ITANIUM64
      if (stack_size > PTHREAD_STACK_MIN)
        {
	  size_t s, rses;
          pthread_attr_getstacksize (&_thread_attr, &s);
	  pthread_attr_getrsestacksize_np (&_thread_attr, &rses);
	  log_error ("default rses=%d stack=%d : %m", rses,s);
	}
#endif


#ifndef OLD_PTHREADS
# if  defined(HAVE_PTHREAD_ATTR_SETSTACKSIZE)
      rc = pthread_attr_setstacksize (&_thread_attr, stack_size);
      if (rc)
	{
          log_error ("Failed setting the OS thread stack size to %d : %m", stack_size);
	}
# endif

#if defined(HAVE_PTHREAD_ATTR_GETSTACKSIZE)
      if (0 == pthread_attr_getstacksize (&_thread_attr, &os_stack_size))
	{
	  if (os_stack_size > 4 * 8192)
	    stack_size = thr->thr_stack_size = ((unsigned long) os_stack_size) - 4 * 8192;
	}
#endif
#ifdef HPUX_ITANIUM64
      if (stack_size > PTHREAD_STACK_MIN)
        {
	  size_t rsestack_size = stack_size / 2;
          rc = pthread_attr_setrsestacksize_np (&_thread_attr, rsestack_size);
	  if (rc)
	    {
	      log_error ("Failed setting the OS thread 'rse' stack size to %d (plain stack size set to %d) : %m", rsestack_size, stack_size);
	    }
	  thr->thr_stack_size /= 2;
	}
#endif

      rc = pthread_create ((pthread_t *) thr->thr_handle, &_thread_attr,
	  _thread_boot, thr);
      CKRET (rc);

      /* rc = pthread_detach (*(pthread_t *) thr->thr_handle); */
      /* CKRET (rc); */

#else /* OLD_PTHREAD */
      rc = pthread_attr_setstacksize (&_thread_attr, stack_size);
      CKRET (rc);

      rc = pthread_create ((pthread_t *) thr->thr_handle, _thread_attr,
	  _thread_boot, thr);
      CKRET (rc);

      /* rc = pthread_detach ((pthread_t *) thr->thr_handle); */
      /* CKRET (rc); */
#endif

      _thread_num_total++;
#if 0
      if (DO_LOG(LOG_THR))
	log_info ("THRD_0 OS threads create (%i)", _thread_num_total);
#endif
      thread_set_priority (thr, NORMAL_PRIORITY);
    }
  else
    {
      Q_LOCK ();
      thread_queue_remove (&_deadq, thr);
      _thread_num_dead--;
      Q_UNLOCK ();
      assert (thr->thr_status == DEAD);
      /* Set new context for the thread and resume it */
      thr->thr_initial_function = initial_function;
      thr->thr_initial_argument = initial_argument;
      thr->thr_status = RUNNABLE;
      rc = pthread_cond_signal ((pthread_cond_t *) thr->thr_cv);
      CKRET (rc);
/*    if (DO_LOG(LOG_THR))
	log_info ("THRD_3 OS threads reuse. Info threads - total (%ld) wait (%ld) dead (%ld)",
            _thread_num_total, _thread_num_wait, _thread_num_dead);*/
    }

  return thr;

failed:
  if (thr->thr_status == RUNNABLE)
    {
      _thread_free_attributes (thr);
      dk_free (thr, sizeof (thread_t));
    }
  return NULL;
}
Exemple #13
0
int
fork1(struct thread *td, struct fork_req *fr)
{
	struct proc *p1, *newproc;
	struct thread *td2;
	struct vmspace *vm2;
	struct file *fp_procdesc;
	vm_ooffset_t mem_charged;
	int error, nprocs_new, ok;
	static int curfail;
	static struct timeval lastfail;
	int flags, pages;

	flags = fr->fr_flags;
	pages = fr->fr_pages;

	if ((flags & RFSTOPPED) != 0)
		MPASS(fr->fr_procp != NULL && fr->fr_pidp == NULL);
	else
		MPASS(fr->fr_procp == NULL);

	/* Check for the undefined or unimplemented flags. */
	if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0)
		return (EINVAL);

	/* Signal value requires RFTSIGZMB. */
	if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0)
		return (EINVAL);

	/* Can't copy and clear. */
	if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
		return (EINVAL);

	/* Check the validity of the signal number. */
	if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG)
		return (EINVAL);

	if ((flags & RFPROCDESC) != 0) {
		/* Can't not create a process yet get a process descriptor. */
		if ((flags & RFPROC) == 0)
			return (EINVAL);

		/* Must provide a place to put a procdesc if creating one. */
		if (fr->fr_pd_fd == NULL)
			return (EINVAL);

		/* Check if we are using supported flags. */
		if ((fr->fr_pd_flags & ~PD_ALLOWED_AT_FORK) != 0)
			return (EINVAL);
	}

	p1 = td->td_proc;

	/*
	 * Here we don't create a new process, but we divorce
	 * certain parts of a process from itself.
	 */
	if ((flags & RFPROC) == 0) {
		if (fr->fr_procp != NULL)
			*fr->fr_procp = NULL;
		else if (fr->fr_pidp != NULL)
			*fr->fr_pidp = 0;
		return (fork_norfproc(td, flags));
	}

	fp_procdesc = NULL;
	newproc = NULL;
	vm2 = NULL;

	/*
	 * Increment the nprocs resource before allocations occur.
	 * Although process entries are dynamically created, we still
	 * keep a global limit on the maximum number we will
	 * create. There are hard-limits as to the number of processes
	 * that can run, established by the KVA and memory usage for
	 * the process data.
	 *
	 * Don't allow a nonprivileged user to use the last ten
	 * processes; don't let root exceed the limit.
	 */
	nprocs_new = atomic_fetchadd_int(&nprocs, 1) + 1;
	if ((nprocs_new >= maxproc - 10 && priv_check_cred(td->td_ucred,
	    PRIV_MAXPROC, 0) != 0) || nprocs_new >= maxproc) {
		error = EAGAIN;
		sx_xlock(&allproc_lock);
		if (ppsratecheck(&lastfail, &curfail, 1)) {
			printf("maxproc limit exceeded by uid %u (pid %d); "
			    "see tuning(7) and login.conf(5)\n",
			    td->td_ucred->cr_ruid, p1->p_pid);
		}
		sx_xunlock(&allproc_lock);
		goto fail2;
	}

	/*
	 * If required, create a process descriptor in the parent first; we
	 * will abandon it if something goes wrong. We don't finit() until
	 * later.
	 */
	if (flags & RFPROCDESC) {
		error = procdesc_falloc(td, &fp_procdesc, fr->fr_pd_fd,
		    fr->fr_pd_flags, fr->fr_pd_fcaps);
		if (error != 0)
			goto fail2;
	}

	mem_charged = 0;
	if (pages == 0)
		pages = kstack_pages;
	/* Allocate new proc. */
	newproc = uma_zalloc(proc_zone, M_WAITOK);
	td2 = FIRST_THREAD_IN_PROC(newproc);
	if (td2 == NULL) {
		td2 = thread_alloc(pages);
		if (td2 == NULL) {
			error = ENOMEM;
			goto fail2;
		}
		proc_linkup(newproc, td2);
	} else {
		if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) {
			if (td2->td_kstack != 0)
				vm_thread_dispose(td2);
			if (!thread_alloc_stack(td2, pages)) {
				error = ENOMEM;
				goto fail2;
			}
		}
	}

	if ((flags & RFMEM) == 0) {
		vm2 = vmspace_fork(p1->p_vmspace, &mem_charged);
		if (vm2 == NULL) {
			error = ENOMEM;
			goto fail2;
		}
		if (!swap_reserve(mem_charged)) {
			/*
			 * The swap reservation failed. The accounting
			 * from the entries of the copied vm2 will be
			 * subtracted in vmspace_free(), so force the
			 * reservation there.
			 */
			swap_reserve_force(mem_charged);
			error = ENOMEM;
			goto fail2;
		}
	} else
		vm2 = NULL;

	/*
	 * XXX: This is ugly; when we copy resource usage, we need to bump
	 *      per-cred resource counters.
	 */
	proc_set_cred_init(newproc, crhold(td->td_ucred));

	/*
	 * Initialize resource accounting for the child process.
	 */
	error = racct_proc_fork(p1, newproc);
	if (error != 0) {
		error = EAGAIN;
		goto fail1;
	}

#ifdef MAC
	mac_proc_init(newproc);
#endif
	newproc->p_klist = knlist_alloc(&newproc->p_mtx);
	STAILQ_INIT(&newproc->p_ktr);

	/* We have to lock the process tree while we look for a pid. */
	sx_slock(&proctree_lock);
	sx_xlock(&allproc_lock);

	/*
	 * Increment the count of procs running with this uid. Don't allow
	 * a nonprivileged user to exceed their current limit.
	 *
	 * XXXRW: Can we avoid privilege here if it's not needed?
	 */
	error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0);
	if (error == 0)
		ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
	else {
		ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
		    lim_cur(td, RLIMIT_NPROC));
	}
	if (ok) {
		do_fork(td, fr, newproc, td2, vm2, fp_procdesc);
		return (0);
	}

	error = EAGAIN;
	sx_sunlock(&proctree_lock);
	sx_xunlock(&allproc_lock);
#ifdef MAC
	mac_proc_destroy(newproc);
#endif
	racct_proc_exit(newproc);
fail1:
	crfree(newproc->p_ucred);
	newproc->p_ucred = NULL;
fail2:
	if (vm2 != NULL)
		vmspace_free(vm2);
	uma_zfree(proc_zone, newproc);
	if ((flags & RFPROCDESC) != 0 && fp_procdesc != NULL) {
		fdclose(td, fp_procdesc, *fr->fr_pd_fd);
		fdrop(fp_procdesc, td);
	}
	atomic_add_int(&nprocs, -1);
	pause("fork", hz / 2);
	return (error);
}
Exemple #14
0
int
thread_setup(struct thread *self, int _priority)
{
    int ret = 1;
    int r;
    unsigned long r_l;
    struct pd *pd;
    unsigned long priority = 100;

    assert(self != NULL);

    pd = self->owner;

    /* Note: These don't allocate any resources */
#if defined(CONFIG_SESSION)
    session_p_list_init(&self->client_sessions);
    session_p_list_init(&self->server_sessions);
#endif
    if (_priority != -1) {
        priority = (unsigned long)_priority;
    }

    self->magic = THREAD_MAGIC;
#if defined(CONFIG_EAS)
    self->eas = NULL;
#endif
    r = thread_alloc(self); /* Allocate a thread id, ALLOC #1 */
    if (r != 0) {
        /* Can't allocate a new thread ID */
        return 1;
    }

    /* Activate new thread */
    {
#ifdef NO_UTCB_RELOCATE
        self->utcb = (void *)-1UL;
#endif
        r_l = thread_new(self, pd_l4_space(pd), self->id, IGUANA_SERVER, IGUANA_SERVER);

        if (r_l != 1) {
            if (L4_ErrorCode() == L4_ErrNoMem || 
                L4_ErrorCode() == L4_ErrUtcbArea) {
                /*
                 * L4 has run out of memory... this is probably very bad, but
                 * we want to keep going for as long as we can 
                 */
                goto thread_error_state;
            } else {
                ERROR_PRINT_L4;
                assert(!"This shouldn't happen");
            }
        } else {
            ret = 0;
        }
        /* Set Priority */
        r = L4_Set_Priority(self->id, priority);
        assert(r != 0);
    }

    return ret;

  thread_error_state:
    /* Here we clean up anything we have allocated */
    thread_free(self->id);
    return 1;
}
Exemple #15
0
/*
 * Create a kernel thread.  It shares its address space
 * with proc0 - ie: kernel only.
 *
 * func is the function to start.
 * arg is the parameter to pass to function on first startup.
 * newtdp is the return value pointing to the thread's struct thread.
 *  ** XXX fix this --> flags are flags to fork1 (in unistd.h) 
 * fmt and following will be *printf'd into (*newtd)->td_name (for ps, etc.).
 */
int
kthread_add(void (*func)(void *), void *arg, struct proc *p,
    struct thread **newtdp, int flags, int pages, const char *fmt, ...)
{
	va_list ap;
	struct thread *newtd, *oldtd;

	if (!proc0.p_stats)
		panic("kthread_add called too soon");

	/* If no process supplied, put it on proc0 */
	if (p == NULL)
		p = &proc0;

	/* Initialize our new td  */
	newtd = thread_alloc(pages);
	if (newtd == NULL)
		return (ENOMEM);

	PROC_LOCK(p);
	oldtd = FIRST_THREAD_IN_PROC(p);

	bzero(&newtd->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));
	bcopy(&oldtd->td_startcopy, &newtd->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));

	/* set up arg0 for 'ps', et al */
	va_start(ap, fmt);
	vsnprintf(newtd->td_name, sizeof(newtd->td_name), fmt, ap);
	va_end(ap);

	newtd->td_proc = p;  /* needed for cpu_set_upcall */

	/* XXX optimise this probably? */
	/* On x86 (and probably the others too) it is way too full of junk */
	/* Needs a better name */
	cpu_set_upcall(newtd, oldtd);
	/* put the designated function(arg) as the resume context */
	cpu_set_fork_handler(newtd, func, arg);

	newtd->td_pflags |= TDP_KTHREAD;
	thread_cow_get_proc(newtd, p);

	/* this code almost the same as create_thread() in kern_thr.c */
	p->p_flag |= P_HADTHREADS;
	thread_link(newtd, p);
	thread_lock(oldtd);
	/* let the scheduler know about these things. */
	sched_fork_thread(oldtd, newtd);
	TD_SET_CAN_RUN(newtd);
	thread_unlock(oldtd);
	PROC_UNLOCK(p);

	tidhash_add(newtd);

	/* Avoid inheriting affinity from a random parent. */
	cpuset_setthread(newtd->td_tid, cpuset_root);

	/* Delay putting it on the run queue until now. */
	if (!(flags & RFSTOPPED)) {
		thread_lock(newtd);
		sched_add(newtd, SRQ_BORING); 
		thread_unlock(newtd);
	}
	if (newtdp)
		*newtdp = newtd;
	return 0;
}
Exemple #16
0
static int
create_thread(struct thread *td, mcontext_t *ctx,
	    void (*start_func)(void *), void *arg,
	    char *stack_base, size_t stack_size,
	    char *tls_base,
	    long *child_tid, long *parent_tid,
	    int flags, struct rtprio *rtp)
{
	stack_t stack;
	struct thread *newtd;
	struct proc *p;
	int error;

	p = td->td_proc;

	/* Have race condition but it is cheap. */
	if (p->p_numthreads >= max_threads_per_proc) {
		++max_threads_hits;
		return (EPROCLIM);
	}

	if (rtp != NULL) {
		switch(rtp->type) {
		case RTP_PRIO_REALTIME:
		case RTP_PRIO_FIFO:
			/* Only root can set scheduler policy */
			if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0)
				return (EPERM);
			if (rtp->prio > RTP_PRIO_MAX)
				return (EINVAL);
			break;
		case RTP_PRIO_NORMAL:
			rtp->prio = 0;
			break;
		default:
			return (EINVAL);
		}
	}

#ifdef RACCT
	PROC_LOCK(td->td_proc);
	error = racct_add(p, RACCT_NTHR, 1);
	PROC_UNLOCK(td->td_proc);
	if (error != 0)
		return (EPROCLIM);
#endif

	/* Initialize our td */
	newtd = thread_alloc(0);
	if (newtd == NULL) {
		error = ENOMEM;
		goto fail;
	}

	cpu_set_upcall(newtd, td);

	/*
	 * Try the copyout as soon as we allocate the td so we don't
	 * have to tear things down in a failure case below.
	 * Here we copy out tid to two places, one for child and one
	 * for parent, because pthread can create a detached thread,
	 * if parent wants to safely access child tid, it has to provide 
	 * its storage, because child thread may exit quickly and
	 * memory is freed before parent thread can access it.
	 */
	if ((child_tid != NULL &&
	    suword_lwpid(child_tid, newtd->td_tid)) ||
	    (parent_tid != NULL &&
	    suword_lwpid(parent_tid, newtd->td_tid))) {
		thread_free(newtd);
		error = EFAULT;
		goto fail;
	}

	bzero(&newtd->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));
	bcopy(&td->td_startcopy, &newtd->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));
	newtd->td_proc = td->td_proc;
	newtd->td_ucred = crhold(td->td_ucred);

	if (ctx != NULL) { /* old way to set user context */
		error = set_mcontext(newtd, ctx);
		if (error != 0) {
			thread_free(newtd);
			crfree(td->td_ucred);
			goto fail;
		}
	} else {
		/* Set up our machine context. */
		stack.ss_sp = stack_base;
		stack.ss_size = stack_size;
		/* Set upcall address to user thread entry function. */
		cpu_set_upcall_kse(newtd, start_func, arg, &stack);
		/* Setup user TLS address and TLS pointer register. */
		error = cpu_set_user_tls(newtd, tls_base);
		if (error != 0) {
			thread_free(newtd);
			crfree(td->td_ucred);
			goto fail;
		}
	}

	PROC_LOCK(td->td_proc);
	td->td_proc->p_flag |= P_HADTHREADS;
	thread_link(newtd, p); 
	bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name));
	thread_lock(td);
	/* let the scheduler know about these things. */
	sched_fork_thread(td, newtd);
	thread_unlock(td);
	if (P_SHOULDSTOP(p))
		newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
	PROC_UNLOCK(p);

	tidhash_add(newtd);

	thread_lock(newtd);
	if (rtp != NULL) {
		if (!(td->td_pri_class == PRI_TIMESHARE &&
		      rtp->type == RTP_PRIO_NORMAL)) {
			rtp_to_pri(rtp, newtd);
			sched_prio(newtd, newtd->td_user_pri);
		} /* ignore timesharing class */
	}
	TD_SET_CAN_RUN(newtd);
	sched_add(newtd, SRQ_BORING);
	thread_unlock(newtd);

	return (0);

fail:
#ifdef RACCT
	PROC_LOCK(p);
	racct_sub(p, RACCT_NTHR, 1);
	PROC_UNLOCK(p);
#endif
	return (error);
}
int video_generator_init(video_generator_settings* cfg, video_generator* g) {

    int i = 0;
    int dx = 0;
    int max_els = RXS_MAX_CHARS * 8; /* members per char */
    video_generator_char* c = NULL;
    int num_frames; /* used for bip/bop calculations. */

    if (!g) {
        return -1;
    }
    if (!cfg) {
        return -2;
    }
    if (!cfg->width) {
        return -3;
    }
    if (!cfg->height) {
        return -4;
    }
    if (!cfg->fps) {
        return -5;
    }

    /* initalize members */
    g->frame = 0;
    g->ybytes = cfg->width * cfg->height;
    g->ubytes = (cfg->width * 0.5) * (cfg->height * 0.5);
    g->vbytes = g->ubytes;
    g->nbytes = g->ybytes + g->ubytes + g->vbytes;
    g->width = cfg->width;
    g->height = cfg->height;
    g->fps = (1.0 / cfg->fps) * 1000 * 1000;

    g->y = (uint8_t*)malloc(g->nbytes);
    g->u = g->y + g->ybytes;
    g->v = g->y + (g->ybytes + g->ubytes);

    g->planes[0] = g->y;
    g->planes[1] = g->u;
    g->planes[2] = g->v;

    g->strides[0] = cfg->width;
    g->strides[1] = cfg->width * 0.5;
    g->strides[2] = cfg->width * 0.5;

    g->step = (1.0 / (5 * cfg->fps)); /* move the bar in 5 seconds from top to bottom */
    g->perc = 0.0;
    g->fps_num = 1;
    g->fps_den = cfg->fps;

    /* initialize the characters */
    while (i < max_els) {
        c = &g->chars[dx];
        c->id = numbersfont_char_data[i++];
        c->x = numbersfont_char_data[i++];
        c->y = numbersfont_char_data[i++];
        c->width = numbersfont_char_data[i++];
        c->height = numbersfont_char_data[i++];
        c->xoffset = numbersfont_char_data[i++];
        c->yoffset = numbersfont_char_data[i++];
        c->xadvance = numbersfont_char_data[i++];
        dx++;
    }

    /* bitmap font specifics */
    g->font_w = 264;
    g->font_h = 50;
    g->font_line_height = 63;

    /* default audio settings. */
    g->audio_bip_frequency = 0;
    g->audio_bop_frequency = 0;
    g->audio_nchannels = 0;
    g->audio_samplerate = 0;
    g->audio_nbytes = 0;
    g->audio_buffer = NULL;
    g->audio_callback = NULL;
    g->audio_thread = NULL;

    /* initialize audio */
    if (NULL != cfg->audio_callback) {

        if (0 == cfg->bip_frequency) {
            printf("Error: audio_callback set but no bip_frequency set. Use e.g. 500.");
            return -6;
        }

        if (0 == cfg->bop_frequency) {
            printf("Error: audio_callback set but no bop_frequency set. Use e.g. 1500.");
            return -7;
        }

        /* we allocate a buffer up to 4 seconds. */
        g->audio_bip_frequency = cfg->bip_frequency;
        g->audio_bop_frequency = cfg->bop_frequency;
        g->audio_bip_millis = 100;
        g->audio_bop_millis = 100;
        g->audio_nchannels = 2;
        g->audio_samplerate = 44100;
        g->audio_nsamples = 1024;
        g->audio_nseconds = 4;
        g->audio_nbytes = sizeof(int16_t) * g->audio_samplerate * g->audio_nchannels * g->audio_nseconds;
        g->audio_callback = cfg->audio_callback;

        /* alloc the buffer. */
        g->audio_buffer = (int16_t*)malloc(g->audio_nbytes);
        if (!g->audio_buffer) {
            printf("Error while allocating the audio buffer.");
            g->audio_buffer = NULL;
            return -7;
        }

        /* fill with silence */
        memset((uint8_t*)g->audio_buffer, 0x00, g->audio_nbytes);

        /* bip */
        dx= 0;
        num_frames = (g->audio_bip_millis/1000.0) * g->audio_samplerate;
        for (i = g->audio_samplerate;  i < (g->audio_samplerate + num_frames); ++i) {
            dx = i * 2;
            g->audio_buffer[dx + 0] = 10000 * sin( (6.28318530718/g->audio_samplerate) * g->audio_bip_frequency * i);
            g->audio_buffer[dx + 1] = g->audio_buffer[dx + 0];
        }

        /* bop */
        num_frames = (g->audio_bip_millis/1000.0) * g->audio_samplerate;
        for (i = (g->audio_samplerate * 3); i < (g->audio_samplerate * 3 + num_frames); ++i) {
            dx = i * 2;
            g->audio_buffer[dx + 0] = 10000 * sin( (6.28318530718/g->audio_samplerate) * g->audio_bop_frequency * i);
            g->audio_buffer[dx + 1] = g->audio_buffer[dx + 0];
        }

        /* init mutex. */
        if (0 != mutex_init(&g->audio_mutex)) {
            printf("Error: cannot initialize the audio mutex!");
            free(g->audio_buffer);
            g->audio_buffer = NULL;
            return -8;
        }

        /* start audio thread. */
        g->audio_thread = thread_alloc(audio_thread, (void*)g);
        if (NULL == g->audio_thread) {
            printf("Error: cannot create audio thread.\n");
            free(g->audio_buffer);
            g->audio_buffer = NULL;
            return -9;
        }
    }

    return 0;
}