Пример #1
0
void __assertion_handle_failure(const struct __assertion_point *point) {
	if (cpudata_var(assert_recursive_lock)) {
		printk("\nrecursion detected on CPU %d\n",
				cpu_get_id());
		goto out;
	}
	cpudata_var(assert_recursive_lock) = 1;

	spin_lock_ipl_disable(&assert_lock);

	print_oops();
	printk(
		" ASSERTION FAILED on CPU %d\n"
		LOCATION_FUNC_FMT("\t", "\n") "\n"
		"%s\n",

		cpu_get_id(),
		LOCATION_FUNC_ARGS(&point->location),
		point->expression);

	if (*__assertion_message_buff)
		printk("\n\t(%s)\n", __assertion_message_buff);

	whereami();

	spin_unlock(&assert_lock);  /* leave IRQs off */

out:
	arch_shutdown(ARCH_SHUTDOWN_MODE_ABORT);
	/* NOTREACHED */
}
Пример #2
0
static void cpu_default_irq_handler(struct irq_action_s *action)
{
	unsigned int irq_num = (unsigned int) action->data;

	isr_dmsg(WARNING, "WARNING: No registered handler fo IRQ %d on CPU %d\n", irq_num, cpu_get_id());
	cpu_disable_single_irq(irq_num, NULL);
	isr_dmsg(WARNING, "WARNING: IRQ %d on CPU %d has been masked\n", irq_num, cpu_get_id());
}
Пример #3
0
int platform_init(struct sof *sof)
{
	int ret;
	struct dai *esai;

	clock_init();
	scheduler_init();

	platform_timer_start(platform_timer);
	sa_init(sof);

	clock_set_freq(CLK_CPU(cpu_get_id()), CLK_MAX_CPU_HZ);

	/* init DMA */
	ret = edma_init();
	if (ret < 0)
		return -ENODEV;

	/* initialize the host IPC mechanims */
	ipc_init(sof);

	ret = dai_init();
	if (ret < 0)
		return -ENODEV;

	esai = dai_get(SOF_DAI_IMX_ESAI, 0, DAI_CREAT);
	if (!esai)
		return -ENODEV;

	dai_probe(esai);

	return 0;
}
Пример #4
0
int idle_thread_create(void) {
	struct thread *t;

	t = thread_create(THREAD_FLAG_NOTASK | THREAD_FLAG_SUSPENDED, idle_run, NULL);
	if (err(t)) {
		log_error(" Couldn't create thread err=%d", err(t));
		return err(t);
	}

	task_thread_register(task_kernel_task(), t);
	schedee_priority_set(&t->schedee, SCHED_PRIORITY_MIN);
	log_debug("idle_schedee = %#x", &t->schedee);

	cpu_init(cpu_get_id(), t);
	thread_launch(t);

	return 0;
}
Пример #5
0
//inline 
error_t remote_fifo_put(struct remote_fifo_s *remote_fifo, cid_t cid, void *item)
{
	size_t wridx;
	size_t rdidx;
	size_t total_slot_nbr;
	uint_t irq_state;

#if RF_PRINT
	uint32_t start;
	uint32_t end;

	start = cpu_time_stamp(); //cpu_get_ticks(current_cpu);
#endif

	total_slot_nbr  = remote_lw((void*)&remote_fifo->slot_nbr, cid);

	//assert(size);//if the message is bigger than cacheline, it could wrap arround the fifo => msg is not countigius

	mcs_lock_remote(&remote_fifo->lock, cid, &irq_state);

	wridx = remote_lw((void*)&remote_fifo->wridx, cid);
	rdidx = remote_lw((void*)&remote_fifo->rdidx, cid);

	if(((wridx + 1) % total_slot_nbr) == rdidx)
	{
		mcs_unlock_remote(&remote_fifo->lock, cid, irq_state);
		return EAGAIN;
	}

	item_set(remote_fifo, cid, item, wridx);

	remote_sw((void*)&remote_fifo->wridx, cid, 
				(wridx + 1) % total_slot_nbr);
	
	mcs_unlock_remote(&remote_fifo->lock, cid, irq_state);

#if RF_PRINT
	end = cpu_time_stamp();

	printk(INFO, "[%d] %s: posting in cid %d at %d\n", 
		cpu_get_id(), __FUNCTION__, cid, end-start);
#endif
	return 0;
}
Пример #6
0
int sys_mkfifo (char *pathname, uint_t mode)
{
	register error_t err = 0;
	struct task_s *task = current_task;

	current_thread->info.errno = ENOSYS;
	return -1;

	if((err = vfs_mkfifo(task->vfs_cwd, pathname, mode)))
	{
		printk(INFO, "INFO: sys_mkfifo: Thread %x, CPU %d, Error Code %d\n", 
		       current_thread, 
		       cpu_get_id(), 
		       err);

		return -1;
	}
   
	return 0;
}
Пример #7
0
static int
dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
{

    if (dtrace_here) {
        printk("copycheck: uaddr=%p kaddr=%p size=%d\n", (void *) uaddr, (void*) kaddr, (int) size);
    }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
    if (__range_not_ok(uaddr, size)) {
#else
    if (!addr_valid(uaddr) || !addr_valid(uaddr + size)) {
#endif
//printk("uaddr=%p size=%d\n", uaddr, size);
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        cpu_core[cpu_get_id()].cpuc_dtrace_illval = uaddr;
        return (0);
    }
    return (1);
}
# endif

void
dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
              volatile uint16_t *flags)
{
    if (dtrace_memcpy_with_error((void *) kaddr, (void *) uaddr, size) == 0) {
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        return;
    }
}

void
dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
               volatile uint16_t *flags)
{
    if (dtrace_memcpy_with_error((void *) uaddr, (void *) kaddr, size) == 0) {
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        return;
    }
}

void
dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
                 volatile uint16_t *flags)
{
    if (dtrace_memcpy_with_error((void *) kaddr, (void *) uaddr, size) == 0) {
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        return;
    }
}

void
dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
                  volatile uint16_t *flags)
{
    if (dtrace_memcpy_with_error((void *) kaddr, (void *) uaddr, size) == 0) {
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        return;
    }
}

uint8_t
dtrace_fuword8(void *uaddr)
{
    extern uint8_t dtrace_fuword8_nocheck(void *);
    if (!access_ok(VERIFY_READ, uaddr, 1)) {
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        printk("dtrace_fuword8: uaddr=%p CPU_DTRACE_BADADDR\n", uaddr);
        cpu_core[cpu_get_id()].cpuc_dtrace_illval = (uintptr_t)uaddr;
        return (0);
    }
    return (dtrace_fuword8_nocheck(uaddr));
}

uint16_t
dtrace_fuword16(void *uaddr)
{
    extern uint16_t dtrace_fuword16_nocheck(void *);
    if (!access_ok(VERIFY_WRITE, uaddr, 2)) {
        printk("dtrace_fuword16: uaddr=%p CPU_DTRACE_BADADDR\n", uaddr);
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        cpu_core[cpu_get_id()].cpuc_dtrace_illval = (uintptr_t)uaddr;
        return (0);
    }
    return (dtrace_fuword16_nocheck(uaddr));
}

uint32_t
dtrace_fuword32(void *uaddr)
{
    extern uint32_t dtrace_fuword32_nocheck(void *);
    if (!addr_valid(uaddr)) {
        HERE2();
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        cpu_core[cpu_get_id()].cpuc_dtrace_illval = (uintptr_t)uaddr;
        return (0);
    }
    return (dtrace_fuword32_nocheck(uaddr));
}

uint64_t
dtrace_fuword64(void *uaddr)
{
    extern uint64_t dtrace_fuword64_nocheck(void *);
    if (!addr_valid(uaddr)) {
        HERE2();
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        cpu_core[cpu_get_id()].cpuc_dtrace_illval = (uintptr_t)uaddr;
        return (0);
    }
    return (dtrace_fuword64_nocheck(uaddr));
}
Пример #8
0
/*ARGSUSED*/
void
dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
{
    printk("need to do this dtrace_getufpstack\n");
# if 0
    klwp_t *lwp = ttolwp(curthread);
    proc_t *p = ttoproc(curthread);
    struct regs *rp;
    uintptr_t pc, sp, oldcontext;
    volatile uint8_t *flags =
        (volatile uint8_t *)&cpu_core[cpu_get_id()].cpuc_dtrace_flags;
    size_t s1, s2;

    if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
        return;

    if (*flags & CPU_DTRACE_FAULT)
        return;

    if (pcstack_limit <= 0)
        return;

    *pcstack++ = (uint64_t)p->p_pid;
    pcstack_limit--;

    if (pcstack_limit <= 0)
        return;

    pc = rp->r_pc;
    sp = rp->r_fp;
    oldcontext = lwp->lwp_oldcontext;

    if (dtrace_data_model(p) == DATAMODEL_NATIVE) {
        s1 = sizeof (struct frame) + 2 * sizeof (long);
        s2 = s1 + sizeof (siginfo_t);
    } else {
        s1 = sizeof (struct frame32) + 3 * sizeof (int);
        s2 = s1 + sizeof (siginfo32_t);
    }

    if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
        *pcstack++ = (uint64_t)pc;
        *fpstack++ = 0;
        pcstack_limit--;
        if (pcstack_limit <= 0)
            return;

        if (dtrace_data_model(p) == DATAMODEL_NATIVE)
            pc = dtrace_fulword((void *)rp->r_sp);
        else
            pc = dtrace_fuword32((void *)rp->r_sp);
    }

    while (pc != 0 && sp != 0) {
        *pcstack++ = (uint64_t)pc;
        *fpstack++ = sp;
        pcstack_limit--;
        if (pcstack_limit <= 0)
            break;

        if (oldcontext == sp + s1 || oldcontext == sp + s2) {
            if (dtrace_data_model(p) == DATAMODEL_NATIVE) {
                ucontext_t *ucp = (ucontext_t *)oldcontext;
                greg_t *gregs = ucp->uc_mcontext.gregs;

                sp = dtrace_fulword(&gregs[REG_FP]);
                pc = dtrace_fulword(&gregs[REG_PC]);

                oldcontext = dtrace_fulword(&ucp->uc_link);
            } else {
                ucontext_t *ucp = (ucontext_t *)oldcontext;
                greg_t *gregs = ucp->uc_mcontext.gregs;

                sp = dtrace_fuword32(&gregs[EBP]);
                pc = dtrace_fuword32(&gregs[EIP]);

                oldcontext = dtrace_fuword32(&ucp->uc_link);
            }
        } else {
            if (dtrace_data_model(p) == DATAMODEL_NATIVE) {
                struct frame *fr = (struct frame *)sp;

                pc = dtrace_fulword(&fr->fr_savpc);
                sp = dtrace_fulword(&fr->fr_savfp);
            } else {
                struct frame32 *fr = (struct frame32 *)sp;

                pc = dtrace_fuword32(&fr->fr_savpc);
                sp = dtrace_fuword32(&fr->fr_savfp);
            }
        }

        /*
         * This is totally bogus:  if we faulted, we're going to clear
         * the fault and break.  This is to deal with the apparently
         * broken Java stacks on x86.
         */
        if (*flags & CPU_DTRACE_FAULT) {
            *flags &= ~CPU_DTRACE_FAULT;
            break;
        }
    }

    while (pcstack_limit-- > 0)
        *pcstack++ = NULL;
# endif
}
Пример #9
0
void
dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
{   uint64_t *pcstack_end = pcstack + pcstack_limit;
    volatile uint8_t *flags =
        (volatile uint8_t *)&cpu_core[cpu_get_id()].cpuc_dtrace_flags;
    unsigned long *sp;
    unsigned long *bos;

    if (*flags & CPU_DTRACE_FAULT)
        return;

    if (pcstack_limit <= 0)
        return;

    *pcstack++ = (uint64_t)current->pid;

    if (pcstack >= pcstack_end)
        return;

    /***********************************************/
    /*   Linux provides a built in function which  */
    /*   is  good  because  stack walking is arch  */
    /*   dependent.            (save_stack_trace)  */
    /*   					       */
    /*   Unfortunately  this is options dependent  */
    /*   (CONFIG_STACKTRACE) so we cannot use it.  */
    /*   And  its  GPL  anyhow, so we cannot copy  */
    /*   it.				       */
    /*   					       */
    /*   Whats worse is that we might be compiled  */
    /*   with a frame pointer (only on x86-32) so  */
    /*   we have three scenarios to handle.	       */
    /***********************************************/

    /***********************************************/
    /*   Ye  gods! The world is an awful place to  */
    /*   live. The target process, may or may not  */
    /*   have   frame  pointers.  In  fact,  some  */
    /*   frames  may have it and some may not (eg  */
    /*   different   libraries  may  be  compiled  */
    /*   differently).			       */
    /*   					       */
    /*   Looks like distro owners dont care about  */
    /*   debuggabiity,   and  give  us  no  frame  */
    /*   pointers.				       */
    /*   					       */
    /*   This  function  is  really important and  */
    /*   useful.  On  modern  Linux  systems, gdb  */
    /*   (and  pstack) contain all the smarts. In  */
    /*   fact,  pstack  is often a wrapper around  */
    /*   gdb  -  i.e. its so complex we cannot do  */
    /*   this.				       */
    /***********************************************/

    /***********************************************/
    /*   Bear  in  mind  that  user stacks can be  */
    /*   megabytes  in  size,  vs  kernel  stacks  */
    /*   which  are  limited  to a few K (4 or 8K  */
    /*   typically).			       */
    /***********************************************/

//	sp = current->thread.rsp;
# if defined(__i386)
    bos = sp = KSTK_ESP(current);
#	define	ALIGN_MASK	3
# else
    /***********************************************/
    /*   KSTK_ESP()  doesnt exist for x86_64 (its  */
    /*   set to -1).			       */
    /***********************************************/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
#  if defined(KSTK_EIP)
    /***********************************************/
    /*   Handle  ARM and more kernel independent,  */
    /*   but might not exist.		       */
    /***********************************************/
    bos = sp = (unsigned long *) KSTK_EIP(current);
#  else
    bos = sp = (unsigned long *) task_pt_regs(current)->sp;
#  endif
#else
    bos = sp = task_pt_regs(current)->rsp;
#endif
#	define	ALIGN_MASK	7
#endif

    /***********************************************/
    /*   Walk  the  stack.  We  cannot  rely on a  */
    /*   frame  pointer  at  each  level,  and we  */
    /*   really  want to avoid probing every word  */
    /*   in  the  stack  - a large stack will eat  */
    /*   cpu  looking at thousands of entries. So  */
    /*   try  and  heuristically see if we have a  */
    /*   likely  frame  pointer  to jump over the  */
    /*   frame,  but, if not, just go one word at  */
    /*   a time.				       */
    /*   					       */
    /*   Try  and be careful we dont walk outside  */
    /*   the  stack  or  walk  backwards  in  the  */
    /*   stack, too.			       */
    /***********************************************/
    {   uintptr_t *spend = sp + 1024;
        struct vm_area_struct *vma = find_vma(current->mm, (unsigned long) sp);
        if (vma)
            spend = (uintptr_t *) (vma->vm_end - sizeof(int *));

        /*printk("xbos=%p %p\n", bos, spend);*/

        /***********************************************/
        /*   Have  you ever looked at the output from  */
        /*   GCC in amd64 mode? Things like:	       */
        /*   					       */
        /*   push %r12				       */
        /*   push %rbp				       */
        /*   					       */
        /*   will make you come out in a cold sweat -  */
        /*   no   way  to  find  the  frame  pointer,  */
        /*   without doing what GDB does (ie read the  */
        /*   DWARF  stack  unwind info). So, for now,  */
        /*   you  get  some  false  positives  in the  */
        /*   output - but we try to be conservative.   */
        /***********************************************/
        while (sp >= bos && sp < spend && validate_ptr(sp)) {
            /*printk("  %p %d: %p %d\n", sp, validate_ptr(sp), sp[0], validate_ptr(sp[0]));*/
            if (validate_ptr((void *) sp[0])) {
                uintptr_t p = sp[-1];
                /***********************************************/
                /*   Try  and  avoid false positives in stack  */
                /*   entries   -   we  want  this  to  be  an  */
                /*   executable instruction.		       */
                /***********************************************/
                if (((unsigned long *) sp[0] < bos || (unsigned long *) sp[0] > spend) &&
                        (vma = find_vma(current->mm, sp[0])) != NULL &&
                        vma->vm_flags & VM_EXEC) {
                    *pcstack++ = sp[0];
                    if (pcstack >= pcstack_end)
                        break;
                }
                if (((int) p & ALIGN_MASK) == 0 && p > (uintptr_t) sp && p < (uintptr_t) spend)
                    sp = (unsigned long *) p;
            }
            sp++;
        }
    }

    /***********************************************/
    /*   Erase  anything  else  in  the buffer to  */
    /*   avoid confusion.			       */
    /***********************************************/
    while (pcstack < pcstack_end)
        *pcstack++ = (pc_t) NULL;
}
Пример #10
0
int sys_fork(uint_t flags, uint_t cpu_gid)
{
	fork_info_t info;
	struct dqdt_attr_s attr;
	struct thread_s *this_thread;
	struct task_s *this_task;
	struct thread_s *child_thread;
	struct task_s *child_task;
	uint_t irq_state;
	uint_t cpu_lid;
	uint_t cid;
	error_t err;
	uint_t tm_start;
	uint_t tm_end;
	uint_t tm_bRemote;
	uint_t tm_aRemote;

	tm_start = cpu_time_stamp();

	fork_dmsg(1, "%s: cpu %d, started [%d]\n",
		  __FUNCTION__, 
		  cpu_get_id(),
		  tm_start);

	this_thread = current_thread;
	this_task   = this_thread->task;
	info.current_clstr = current_cluster;

	err = atomic_add(&this_task->childs_nr, 1);
  
	if(err >= CONFIG_TASK_CHILDS_MAX_NR)
	{
		err = EAGAIN;
		goto fail_childs_nr;
	}

	fork_dmsg(1, "%s: task of pid %d can fork a child [%d]\n",
		  __FUNCTION__, 
		  this_task->pid,
		  cpu_time_stamp());

	info.isDone      = false;
	info.this_thread = this_thread;
	info.this_task   = this_task;
	info.flags       = flags;

	cpu_disable_all_irq(&irq_state);
	cpu_restore_irq(irq_state);
  
	if(current_cpu->fpu_owner == this_thread)
	{
		fork_dmsg(1, "%s: going to save FPU\n", __FUNCTION__);
		cpu_fpu_context_save(&this_thread->uzone);
	}

	if(flags & PT_FORK_USE_TARGET_CPU)
	{
		cpu_gid       = cpu_gid % arch_onln_cpu_nr();
		cpu_lid       = arch_cpu_lid(cpu_gid);
		cid           = arch_cpu_cid(cpu_gid);
		attr.cid      = cid;
		attr.cpu_id   = arch_cpu_lid(cpu_gid);
		info.isPinned = true;
	}
	else
	{
		info.isPinned = false;
		dqdt_attr_init(&attr, NULL);
		err = dqdt_task_placement(dqdt_root, &attr);
	}

        info.cpu = cpu_lid2ptr(attr.cpu_id);
        info.cid_exec = attr.cid_exec;

        /* Keeps the first two processes on current cluster. This is used by cluster zero to keep
         * the "sh" process on this cluster. Init is forced on current_cluster in the
         * task_load_init() function.
         */
        if ( this_task->pid < PID_MIN_GLOBAL+2 )
                info.cid_exec = current_cid;

	fork_dmsg(1, "%s: new task will be placed on cluster %d, cpu %d. Task will be moved on cluster %u on exec()\n", \
                        __FUNCTION__, attr.cid, attr.cpu_id, info.cid_exec);

	tm_bRemote = cpu_time_stamp();
	err = do_fork(&info);
	tm_aRemote = cpu_time_stamp();

	if(err)
		goto fail_do_fork;

	child_thread = info.child_thread;
	child_task   = info.child_task;

	spinlock_lock(&this_task->lock);

	list_add(&this_task->children, &child_task->list);
	spinlock_unlock(&this_task->lock);

	fork_dmsg(1, "%s: childs (task & thread) have been registered in their parents lists [%d]\n", 
		  __FUNCTION__, 
		  cpu_time_stamp());
  
	fork_dmsg(1, "%s: going to add child to target scheduler\n", __FUNCTION__);
	sched_add_created(child_thread);
	tm_end = cpu_time_stamp();
    
	fork_dmsg(1, "%s: cpu %d, pid %d, done [s:%u, bR:%u, aR:%u, e:%u, d:%u, t:%u, r:%u]\n",
	       __FUNCTION__,
	       cpu_get_id(),
	       this_task->pid,
	       tm_start,
	       tm_bRemote,
	       tm_aRemote,
	       tm_end,
	       attr.tm_request,
	       tm_end - tm_start,
	       info.tm_event);

	return child_task->pid;

fail_do_fork:
fail_childs_nr:
	atomic_add(&this_task->childs_nr, -1);
	this_thread->info.errno = err;
	return -1;
}
Пример #11
0
error_t do_fork(fork_info_t *info)
{
	kmem_req_t req;
	struct dqdt_attr_s attr;
	struct thread_s *child_thread;
	struct task_s *child_task;
	struct page_s *page;
	uint_t cid;
	error_t err;
	sint_t order;
  
	fork_dmsg(1, "%s: cpu %d, started [%d]\n", 
		  __FUNCTION__, 
		  cpu_get_id(),
		  cpu_time_stamp());
  
	child_thread = NULL;
	child_task   = NULL;
	page         = NULL;
	cid	      = info->cpu->cluster->id;
	attr.cid      = cid;
	attr.cpu_id   = 0;
        attr.cid_exec = info->cid_exec;

	//dqdt_update_threads_number(attr.cluster->levels_tbl[0], attr.cpu->lid, 1);
	dqdt_update_threads_number(cid, attr.cpu_id, 1);

        //attr.cluster = info->current_clstr;
        attr.cid = cid;
	err = task_create(&child_task, &attr, CPU_USR_MODE);
  
        //attr.cluster = info->cpu->cluster;
        attr.cid = cid;

	if(err) goto fail_task;

	fork_dmsg(1, "%s: cpu %d, ppid %d, task @0x%x, pid %d, task @0x%x [%d]\n",
		  __FUNCTION__, 
		  cpu_get_id(), 
		  info->this_task->pid, 
		  info->this_task,
		  child_task->pid,
		  child_task,
		  cpu_time_stamp());
  
	req.type  = KMEM_PAGE;
	req.size  = ARCH_THREAD_PAGE_ORDER;
	req.flags = AF_KERNEL | AF_REMOTE;
	req.ptr   = info->cpu->cluster;
	req.ptr   = info->current_clstr;

	page = kmem_alloc(&req);

	if(page == NULL) 
		goto fail_mem;

	fork_dmsg(1, "%s: child pid will be %d on cluster %d, cpu %d [%d]\n", 
		  __FUNCTION__, 
		  child_task->pid, 
		  child_task->cpu->cluster->id, 
		  child_task->cpu->gid,
		  cpu_time_stamp());

	err = task_dup(child_task, info->this_task);
  
	if(err) goto fail_task_dup;

	signal_manager_destroy(child_task);
	signal_manager_init(child_task);
  
	fork_dmsg(1, "%s: parent task has been duplicated [%d]\n", 
		  __FUNCTION__, 
		  cpu_time_stamp());

	child_task->current_clstr = info->current_clstr;

	err = vmm_dup(&child_task->vmm, &info->this_task->vmm);

	if(err) goto fail_vmm_dup;
  
	fork_dmsg(1, "%s: parent vmm has been duplicated [%d]\n", 
		  __FUNCTION__, 
		  cpu_time_stamp());

	child_thread = (struct thread_s*) ppm_page2addr(page);

	/* Set the child page before calling thread_dup */
	child_thread->info.page = page;

	err = thread_dup(child_task,
			 child_thread,
			 info->cpu,
			 info->cpu->cluster,
			 info->this_thread);

	if(err) goto fail_thread_dup;

	/* Adjust child_thread attributes */
	if(info->flags & PT_FORK_USE_AFFINITY)
	{
		child_thread->info.attr.flags |= (info->flags & ~(PT_ATTR_LEGACY_MASK));

		if(!(info->flags & PT_ATTR_MEM_PRIO))
			child_thread->info.attr.flags &= ~(PT_ATTR_MEM_PRIO);

		if(!(info->flags & PT_ATTR_AUTO_MGRT))
			child_thread->info.attr.flags &= ~(PT_ATTR_AUTO_MGRT);

		if(!(info->flags & PT_ATTR_AUTO_NXTT))
			child_thread->info.attr.flags &= ~(PT_ATTR_AUTO_NXTT);
	}

	fork_dmsg(1, "%s: parent current thread has been duplicated, tid %x [%d]\n",
		  __FUNCTION__, 
		  child_thread, 
		  cpu_time_stamp());
	
	if(info->isPinned)
		thread_migration_disabled(child_thread);
	else
		thread_migration_enabled(child_thread);
	
	list_add_last(&child_task->th_root, &child_thread->rope);
	child_task->threads_count = 1;
	child_task->threads_nr ++;
	child_task->state = TASK_READY;

	order = bitmap_ffs2(child_task->bitmap, 0, sizeof(child_task->bitmap));

	if(order == -1) goto fail_order;

	bitmap_clear(child_task->bitmap, order);
	child_thread->info.attr.key = order;
	child_thread->info.order = order;
	child_task->next_order = order + 1;
	child_task->max_order = order;
	child_task->uid = info->this_task->uid;
	child_task->parent = info->this_task->pid;

	err = sched_register(child_thread);
  
	assert(err == 0);
    
	cpu_context_set_tid(&child_thread->info.pss, (reg_t)child_thread);
	cpu_context_set_pmm(&child_thread->info.pss, &child_task->vmm.pmm);
	cpu_context_dup_finlize(&child_thread->pws, &child_thread->info.pss);
  
	child_thread->info.retval = 0;
	child_thread->info.errno = 0;

	info->child_thread = child_thread;
	info->child_task = child_task;
	return 0;

fail_order:
fail_thread_dup:
fail_vmm_dup:
fail_task_dup:
	printk(WARNING, "WARNING: %s: destroy child thread\n", __FUNCTION__);
	req.ptr = page;
	kmem_free(&req);

fail_mem:
fail_task:
	//FIXME
	//dqdt_update_threads_number(attr.cluster->levels_tbl[0], attr.cpu->lid, -1);
	dqdt_update_threads_number(attr.cid, attr.cpu_id, -1);

	printk(WARNING, "WARNING: %s: destroy child task\n", __FUNCTION__);

	if(child_task != NULL)
		task_destroy(child_task);

	printk(WARNING, "WARNING: %s: fork err %d [%d]\n", 
	       __FUNCTION__, 
	       err, 
	       cpu_time_stamp());

	return err;
}
Пример #12
0
static void barrier_do_broadcast(struct barrier_s *barrier)
{
	register uint_t tm_first;
	register uint_t tm_last;
	register uint_t tm_start;
	register uint_t wqdbsz;
	register uint_t tm_end;
	register uint_t ticket;
	register uint_t index;
	register uint_t count;
	register uint_t event;
	register void  *listner;
	register wqdb_t *wqdb;
	register uint_t i;
 
	tm_start = cpu_time_stamp();
	tm_first = barrier->tm_first;
	tm_last  = barrier->tm_last;
	wqdbsz   = PMM_PAGE_SIZE / sizeof(wqdb_record_t);
	ticket   = 0;

#if ARCH_HAS_BARRIERS
	count    = barrier->count;
#else
	count    = barrier->count - 1;	/* last don't sleep */
#endif

	for(index = 0; ((index < BARRIER_WQDB_NR) && (ticket < count)); index++)
	{
		wqdb = barrier->wqdb_tbl[index];

		for(i = 0; ((i < wqdbsz) && (ticket < count)); i++)
		{

#if CONFIG_BARRIER_BORADCAST_UREAD
			event   = cpu_uncached_read(&wqdb->tbl[i].event);
			listner = (void*) cpu_uncached_read(&wqdb->tbl[i].listner);
#else
			event   = wqdb->tbl[i].event;
			listner = wqdb->tbl[i].listner;
#endif

			if(listner != NULL)
			{
				wqdb->tbl[i].listner = NULL;
#if CONFIG_USE_SCHED_LOCKS
				sched_wakeup((struct thread_s*) listner);
#else
				sched_event_send(listner, event);
#endif
				ticket ++;
			}
		}
	}

	tm_end = cpu_time_stamp();

	printk(INFO, "INFO: %s: cpu %d [F: %d, L: %d, B: %d, E: %d, T: %d]\n",
	       __FUNCTION__,
	       cpu_get_id(),
	       tm_first, 
	       tm_last, 
	       tm_start,
	       tm_end,
	       tm_end - tm_first);
}
Пример #13
0
error_t barrier_init(struct barrier_s *barrier, uint_t count, uint_t scope)
{
	struct page_s *page;
	uint_t wqdbsz;
	uint_t i;
	error_t err;
	kmem_req_t req;

	wqdbsz = PMM_PAGE_SIZE / sizeof(wqdb_record_t);
  
	if(count == 0) 
		return EINVAL;

	if(current_task->threads_limit > (BARRIER_WQDB_NR*wqdbsz))
	{
		printk(INFO, "INFO: %s: pid %d, cpu %d, task threads limit exceed barrier ressources of %d\n",
		       __FUNCTION__,
		       current_task->pid,
		       cpu_get_id(),
		       BARRIER_WQDB_NR*wqdbsz);

		return ENOMEM;
	}
    
	if(count > BARRIER_WQDB_NR*wqdbsz) 
		return ENOMEM;

	barrier->owner = (scope == BARRIER_INIT_PRIVATE) ? current_task : NULL;

#if ARCH_HAS_BARRIERS
	barrier->cluster = current_cluster;
	event_set_handler(&barrier->event, &barrier_broadcast_event);
	event_set_argument(&barrier->event, barrier);
	barrier->hwid = arch_barrier_init(barrier->cluster, &barrier->event, count);
    
	if(barrier->hwid < 0)
		return ENOMEM;		/* TODO: we can use software barrier instead */
#else
	if(barrier->owner != NULL)
		atomic_init(&barrier->waiting, count);
	else
	{
		spinlock_init(&barrier->lock, "barrier");
		barrier->index = 0;
	}

#endif	/* ARCH_HAS_BARRIERS */

	req.type  = KMEM_PAGE;
	req.size  = 0;
	req.flags = AF_USER | AF_ZERO;
	err       = 0;

	for(i = 0; i < BARRIER_WQDB_NR; i++)
	{
		page = kmem_alloc(&req);
    
		if(page == NULL)
		{ 
			err = ENOMEM;
			break;
		}
    
		barrier->wqdb_tbl[i]  = ppm_page2addr(page);
		barrier->pages_tbl[i] = page;
	}

	if(err)
	{
		err = i;
    
		for(i = 0; i < err; i++)
		{
			req.ptr = barrier->pages_tbl[i];
			kmem_free(&req);
		}
    
		return ENOMEM;
	}

	barrier->count     = count;
	barrier->signature = BARRIER_ID;
	barrier->state[0]  = 0;
	barrier->state[1]  = 0;
	barrier->phase     = 0;
	barrier->name      = "Barrier-Sync";
  
	return 0;
}
Пример #14
0
void* kvfsd(void *arg)
{
	uint_t tm_now, cntr;
	struct task_s *task;
	struct thread_s *this;
	struct cpu_s *cpu;
	struct alarm_info_s info;
	struct event_s event;
	uint_t fs_type;
	error_t err;
	
	cpu_enable_all_irq(NULL);

	printk(INFO, "INFO: Starting KVFSD on CPU %d [ %d ]\n", cpu_get_id(), cpu_time_stamp());

	task    = current_task;
	fs_type = VFS_TYPES_NR;

#if CONFIG_ROOTFS_IS_EXT2
	fs_type = VFS_EXT2_TYPE;
#endif
 
#if CONFIG_ROOTFS_IS_VFAT

#if CONFIG_ROOTFS_IS_EXT2
#error More than one root fs has been selected
#endif

	fs_type = VFS_VFAT_TYPE;
#endif  /* CONFIG_ROOTFS_IS_VFAT_TYPE */
  
	err = vfs_init(__sys_blk,
		       fs_type,
		       VFS_MAX_NODE_NUMBER,
		       VFS_MAX_FILE_NUMBER,
		       &task->vfs_root);

	task->vfs_cwd = task->vfs_root;

	printk(INFO, "INFO: Virtual File System (VFS) Is Ready\n");

	sysconf_init();

	if(err == 0)
	{
		if((err = task_load_init(task)))
		{
			printk(WARNING, "WARNING: failed to load user process, err %d [%u]\n", 
			       err,
			       cpu_time_stamp());
		}
	}

#if CONFIG_DEV_VERSION
	if(err != 0)
	{
		struct thread_s *thread;

		printk(INFO, "INFO: Creating kernel level terminal\n"); 

		thread = kthread_create(task, 
					&kMiniShelld, 
					NULL, 
					current_cluster->id,
					current_cpu->lid);
		thread->task = task;
		list_add_last(&task->th_root, &thread->rope);
		err = sched_register(thread);
		assert(err == 0);
		sched_add_created(thread);
	}
#endif

	this = current_thread;
	cpu  = current_cpu;

	event_set_senderId(&event, this);
	event_set_priority(&event, E_FUNC);
	event_set_handler(&event, &kvfsd_alarm_event_handler);
  
	info.event = &event;
	cntr       = 0;

	while(1)
	{
		alarm_wait(&info, 10);
		sched_sleep(this);
		tm_now = cpu_time_stamp();
		printk(INFO, "INFO: System Current TimeStamp %u\n", tm_now);
		sync_all_pages();

		if((cntr % 4) == 0)
			dqdt_print_summary(dqdt_root);

		cntr ++;
	}
	return NULL;
}
Пример #15
0
/* 
 * FIXME: define spinlock_rdlock() so all locking on task->th_lock 
 * becoms rdlock but on join/detach/destroy 
 */
int sys_thread_wakeup(pthread_t tid, pthread_t *tid_tbl, uint_t count)
{
	struct task_s *task;
	struct thread_s *this;
	struct thread_s *target;
	pthread_t tbl[100];
	void *listner;
	uint_t event;
	sint_t i;
	error_t err;

	this = current_thread;
	task = this->task;
	i = -1;

	if(tid_tbl != NULL)
	{
		if((((uint_t)tid_tbl + (count*sizeof(pthread_t))) >= CONFIG_KERNEL_OFFSET) || 
		   (count == 0) || (count > 100))
		{
			err = -1;
			goto fail_tid_tbl;
		}

		if((err = cpu_uspace_copy(&tbl[0], tid_tbl, sizeof(pthread_t*) * count))) 
			goto fail_usapce;

		if(tbl[0] != tid)
		{
			err = -2;
			goto fail_first_tid;
		}
	}
	else
	{
		count = 1;
		tbl[0] = tid;
	}

	for(i = 0; i < count; i++)
	{
		tid = tbl[i];

		if(tid > task->max_order)
		{
			err = -3;
			goto fail_tid;
		}

		target = task->th_tbl[tid];
   
		if((target == NULL) || (target->signature != THREAD_ID))
		{
			err = -4;
			goto fail_target;
		}

		listner = sched_get_listner(target, SCHED_OP_UWAKEUP);
		event = sched_event_make(target,SCHED_OP_UWAKEUP);
    
		if(this->info.isTraced == true)
		{
			printk(INFO,"%s: tid %d --> tid %d [%d][%d]\n", 
			       __FUNCTION__, 
			       this->info.order, 
			       tid, 
			       cpu_time_stamp(),
			       i);
		}

		sched_event_send(listner,event);
		cpu_wbflush();
	}

	return 0;

fail_target:
fail_tid:
fail_first_tid:
fail_usapce:
fail_tid_tbl:

	printk(INFO, "%s: cpu %d, pid %d, tid %x, i %d, count %d, ttid %x, request has failed with err %d [%d]\n",
	       __FUNCTION__,
	       cpu_get_id(),
	       task->pid,
	       this,
	       i,
	       count,
	       tid,
	       err,
	       cpu_time_stamp());
  
	this->info.errno = EINVAL;
	return -1;
}