Ejemplo n.º 1
0
int main(int argc, char** argv)
{
	size_t window_size[] = {TEX_WIDTH, TEX_HEIGHT};
	cl_int err;

        GLInfo* glinfo = GLInfo::instance();

	if (glinfo->initialize(argc,argv, window_size, 
                               "OpenCL-OpenGL interop test") != 0){
		std::cerr << "Failed to initialize GL" << std::endl;
		exit(1);
	} else { 
		std::cout << "Initialized GL succesfully" << std::endl;
	}

        CLInfo* clinfo = CLInfo::instance();

	if (clinfo->initialize() != CL_SUCCESS){
		std::cerr << "Failed to initialize CL" << std::endl;
		exit(1);
	} else { 
		std::cout << "Initialized CL succesfully" << std::endl;
	}

	clinfo->print_info();

	////////////// Create gl_tex and buf /////////////////
	gl_tex = create_tex_gl(TEX_WIDTH,TEX_HEIGHT);
	gl_buf = create_buf_gl(1024);
	print_gl_tex_2d_info(gl_tex);
	///////////// Create empty cl_mem
	if (create_empty_cl_mem(CL_MEM_READ_WRITE, 1024, &cl_buf_mem)) {
		std::cerr << "*** Error creating OpenCL buffer" << std::endl;
	} else {
		std::cerr << "Created OpenCl buffer succesfully" << std::endl;
		print_cl_mem_info(cl_buf_mem);
	}
	print_cl_mem_info(cl_buf_mem);

	////////////// Create cl_mem from gl_tex
	if (create_cl_mem_from_gl_tex(gl_tex, &cl_tex_mem))
		exit(1);
	print_cl_image_2d_info(cl_tex_mem);
	
	if (init_cl_kernel("src/kernel/clgl-test.cl", "Grad", &clkernelinfo)){
		std::cerr << "Failed to initialize CL kernel" << std::endl;
		exit(1);
	} else {
		std::cerr << "Initialized CL kernel succesfully" << std::endl;
	}

	clkernelinfo.work_dim = 2;
	clkernelinfo.arg_count = 2;
	clkernelinfo.global_work_size[0] = TEX_WIDTH;
	clkernelinfo.global_work_size[1] = TEX_HEIGHT;

	std::cout << "Setting texture mem object argument for kernel" << std::endl;
	err = clSetKernelArg(clkernelinfo.kernel,0,sizeof(cl_mem),&cl_tex_mem);
	if (error_cl(err, "clSetKernelArg 0"))
		exit(1);

	glutKeyboardFunc(gl_key);
	glutDisplayFunc(gl_loop);
	glutIdleFunc(gl_loop);

#ifdef __linux__
	clock_gettime(CLOCK_MONOTONIC, &tp);
#elif defined _WIN32
        tp = snap_time();

#endif

///////////////////////////// Extra thread

	std::cout << "Creating thread!\n";
        int ret;
        ret = pthread_create( &extra_thread, NULL, extra_thread_function, NULL);
        ret = pthread_barrier_init(&thread_barrier, NULL, 2);
        
	std::cout << std::endl;
	glutMainLoop();	

	return 0;
}
Ejemplo n.º 2
0
static void
kerext_process_create(void *data) {
	struct kerargs_process_create *kap = data;
	pid_t							pid;
	PROCESS							*prp, *parent;
	THREAD							*act = actives[KERNCPU];
	int								status, i;
	struct _cred_info				info;

	if((parent = lookup_pid(kap->parent_pid)) == NULL) {
		kererr(act, ESRCH);
		return;
	}

	if (parent->num_processes >= parent->rlimit_vals_soft[RLIMIT_NPROC]) {
		kererr(act, EAGAIN);
		return;
	}

	lock_kernel();

	// Check that we haven't run out of process vector entries
	// The process index & PID_MASK should never be all zeros
	// or all ones. All ones could cause SYNC_*() to return
	// valid looking numbers from an uninitialized sync.
	if((process_vector.nentries - process_vector.nfree) >= PID_MASK - 1) {
		kererr(act, EAGAIN);
		return;
	}

	// Alloc a process entry.
	if((prp = object_alloc(NULL, &process_souls)) == NULL) {
		kererr(act, ENOMEM);
		return;
	}

	if(kap->parent_pid) {
		prp->flags = _NTO_PF_LOADING | _NTO_PF_NOZOMBIE | _NTO_PF_RING0;
		prp->lcp = kap->lcp;
	}
	snap_time(&prp->start_time, 1);

	MUTEX_INIT(prp, &prp->mpartlist_lock);
	MUTEX_INIT(prp, &prp->spartlist_lock);

	CRASHCHECK((kap->extra == NULL) || (kap->extra->mpart_list == NULL) || 
				((kap->extra->spart_list == NULL) && SCHEDPART_INSTALLED()));
	{
		part_list_t  *mpart_list = kap->extra->mpart_list;
		part_list_t  *spart_list = kap->extra->spart_list;

		/* first thing is to associate with all specified partitions */
		for (i=0; i<mpart_list->num_entries; i++)
		{
			if ((status = MEMPART_ASSOCIATE(prp, mpart_list->i[i].id, mpart_list->i[i].flags)) != EOK)
			{
				(void)MEMPART_DISASSOCIATE(prp, part_id_t_INVALID);
				(void)MUTEX_DESTROY(prp, &prp->mpartlist_lock);
				(void)MUTEX_DESTROY(prp, &prp->spartlist_lock);
				object_free(NULL, &process_souls, prp);
				kererr(act, status);
				return;
			}
		}
		if (SCHEDPART_INSTALLED())
		{
			for (i=0; i<spart_list->num_entries; i++)
			{
				if ((status = SCHEDPART_ASSOCIATE(prp, spart_list->i[i].id, spart_list->i[i].flags)) != EOK)
				{
					(void)MEMPART_DISASSOCIATE(prp, part_id_t_INVALID);
					(void)MUTEX_DESTROY(prp, &prp->mpartlist_lock);
					(void)MUTEX_DESTROY(prp, &prp->spartlist_lock);
					object_free(NULL, &process_souls, prp);
					kererr(act, status);
					return;
				}
			}
		}
	}

	// Allocate a vector for 1 thread but don't get a thread entry.
	if(vector_add(&prp->threads, NULL, 1) == -1) {
		(void)SCHEDPART_DISASSOCIATE(prp, part_id_t_INVALID);
		(void)MEMPART_DISASSOCIATE(prp, part_id_t_INVALID);
		(void)MUTEX_DESTROY(prp, &prp->mpartlist_lock);
		(void)MUTEX_DESTROY(prp, &prp->spartlist_lock);
		object_free(NULL, &process_souls, prp);
		kererr(act, ENOMEM);
		return;
	}

	// Add process to the process table vector.
	if((pid = vector_add(&process_vector, prp, 0)) == -1) {
		(void)SCHEDPART_DISASSOCIATE(prp, part_id_t_INVALID);
		(void)MEMPART_DISASSOCIATE(prp, part_id_t_INVALID);
		(void)MUTEX_DESTROY(prp, &prp->mpartlist_lock);
		(void)MUTEX_DESTROY(prp, &prp->spartlist_lock);
		vector_free(&prp->threads);
		object_free(NULL, &process_souls, prp);
		kererr(act, ENOMEM);
		return;
	}

	prp->boundry_addr = VM_KERN_SPACE_BOUNDRY;
	prp->pid = pid | pid_unique; 	// adjust pid_unique during process destroy
	SIGMASK_SPECIAL(&prp->sig_queue);

	// Call out to allow memory manager to initialize the address space
	if((status = memmgr.mcreate(prp)) != EOK) {
		(void)SCHEDPART_DISASSOCIATE(prp, part_id_t_INVALID);
		(void)MEMPART_DISASSOCIATE(prp, part_id_t_INVALID);
		(void)MUTEX_DESTROY(prp, &prp->mpartlist_lock);
		(void)MUTEX_DESTROY(prp, &prp->spartlist_lock);
		vector_rem(&process_vector, PINDEX(pid));
		vector_free(&prp->threads);
		object_free(NULL, &process_souls, prp);
		kererr(act, status);
		return;
	}

	// Inherit parents information
	info = parent->cred->info;
	info.sgid = parent->cred->info.egid;
	info.suid = 0;			// The loader will set to euid after loading...
	cred_set(&prp->cred, &info);
	prp->seq = 1;

	// inherit setrlimit/getrlimit settings 
	for(i=0; i < RLIM_NLIMITS; i++) {
		prp->rlimit_vals_soft[i] = parent->rlimit_vals_soft[i];
		prp->rlimit_vals_hard[i] = parent->rlimit_vals_hard[i];
	}
	prp->max_cpu_time = parent->max_cpu_time;

	// stop core file generation if RLIMIT_CORE is 0
	if (prp->rlimit_vals_soft[RLIMIT_CORE] == 0) {
		prp->flags |= _NTO_PF_NOCOREDUMP;
	}

	// Inherit default scheduling partition
	// from creating thread.
	prp->default_dpp = SELECT_DPP(act, prp, schedpart_getid(prp));

	prp->pgrp = parent->pgrp;
	prp->umask = parent->umask;
	prp->sig_ignore = parent->sig_ignore;
	SIGMASK_NO_KILLSTOP(&prp->sig_ignore);

	if((prp->limits = lookup_limits(parent->cred->info.euid))  ||
	   (prp->limits = parent->limits)) {
		prp->limits->links++;
	}

	if((prp->session = parent->session)) {
		atomic_add(&prp->session->links, 1);
	}

	// Link the new process in as a child of its creator.
	prp->child = NULL;
	prp->parent = parent;
	prp->sibling = parent->child;
	parent->child = prp;
	++parent->num_processes;
	_TRACE_PR_EMIT_CREATE(prp);
	SETKSTATUS(act, prp);
}
Ejemplo n.º 3
0
void gl_loop()
{
	static int i = 0;
	static int dir = 1;
	
	//////////// CL STUFF
        acquire_gl_tex(cl_tex_mem);
	// glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        pthread_barrier_wait(&thread_barrier);

	// cl_int arg = i%STEPS;
	// err = clSetKernelArg(clkernelinfo.kernel,1,sizeof(cl_int),&arg);
	// if (error_cl(err, "clSetKernelArg 1"))
	// 	exit(1);
	// execute_cl(clkernelinfo);
	
	// ////////////////// Immediate mode textured quad
        // release_gl_tex(cl_tex_mem);
        pthread_barrier_wait(&thread_barrier);
        release_gl_tex(cl_tex_mem);
	glBindTexture(GL_TEXTURE_2D, gl_tex);

	glBegin(GL_TRIANGLE_STRIP);

	glTexCoord2f(1.0,1.0);
	glVertex2f(1.0,1.0);

	glTexCoord2f(1.0,0.0);
	glVertex2f(1.0,-1.0);

	glTexCoord2f(0.0,1.0);
	glVertex2f(-1.0,1.0);

	glTexCoord2f(0.0,0.0);
	glVertex2f(-1.0,-1.0);

	glEnd();
	////////////////////////////////////////////

	i += dir;
	if (!(i % (STEPS-1))){
		dir *= -1;
#ifdef __linux__
		timespec _tp;
		clock_gettime(CLOCK_MONOTONIC, &_tp);
		double msec = compute_diff(tp, _tp);
#elif defined _WIN32
                __int64 _tp;
                _tp = snap_time();
                double msec = compute_diff(tp,_tp);
#endif
		std::cout << "Time elapsed: " 
			  << msec << " milliseconds " 
			  << "\t(" 
			  << int(STEPS / (msec/1000))
			  << " FPS)          \r" ;
		std::flush(std::cout);

                tp = _tp;
	}		
	glutSwapBuffers();
	// std::cout << "Main thread reporting to barrier!" << std::endl;
        // pthread_barrier_wait(&thread_barrier);
	// std::cout << "Main thread exiting barrier!" << std::endl;
}
Ejemplo n.º 4
0
/**
//
// This routine is used to start a thread. It will block the parent
// to hold off reporting the thread was created until all the user
// parameters are verified (e.g. stack was created, attributes are valid).
// It is called from kernel mode, so it can't examine any of the user
// attributes itself. It just set the WAAA flag and lets thread_specret()
// do the looking while it can fault...
//*/
int rdecl
thread_create(THREAD *act, PROCESS *prp, const struct sigevent *evp, unsigned thread_create_flags) {
// thread_create_flags are an or-ing of kermacros.h : THREAD_CREATE_*_FLAG
	THREAD	*thp;
	int		tid;
	int		align;

chk_lock();
	// Allocate a thread object.
	if((thp = object_alloc(prp, &thread_souls)) == NULL) {
		return EAGAIN;
	}
	thp->process = prp;
	thp->type = TYPE_THREAD;
	thp->last_chid = -1;
	thp->syscall = -1;
	thp->client = 0;
	snap_time(&thp->start_time, 1);
	//no need to init thp->timestamp_since_block since we start READY

	// If not a sysmanager process then inherit the address space
	// of the calling thread. If first thread initialize to prp;
	if(prp->pid == SYSMGR_PID) {
		thp->aspace_prp = NULL;
	} else {
		thp->aspace_prp = act->process == prp ? act->aspace_prp : prp;
	}

	// Inherit signal mask and thread priv.
	thp->sig_blocked = act->sig_blocked;
	SIGMASK_SPECIAL(&thp->sig_blocked);
	SIGMASK_NO_KILLSTOP(&thp->sig_blocked);

	// Start the new thread at the current threads priority.
	// Because we can block active, we don't want priority
	// inversion if the new thread is a lower priority.
	thp->priority = thp->real_priority = act->real_priority;
	thp->policy = act->policy;

	/*
	 *  Inherit scheduler partition.
         *
	 *  If the creating thread(act) and child thread(thp) are in the same
	 *  process, the child inherits both the dynamic partition (->dpp) and
	 *  the home partition(->orig_dpp) from the creating thread.
         *
	 *  Otherwise, we assume we are Proc and either:
	 *  1) Proc is creating the main thread of a process (result of msg send), or
	 *  2) Proc is creating the terminator thread in a process (result of pulse).
	 *  So we set both dynamic and home partitions of the child to the
	 *  default partition of the parent's process.
	 *
	 */
	if (prp == act->process) {
		thp->dpp = act->dpp;
		thp->orig_dpp = act->orig_dpp;
	} else {
		thp->dpp = prp->default_dpp;
		thp->orig_dpp = prp->default_dpp;
	}

	//set child to start running critical if triggered by critical sigev_thread. Child's critical state will
	//last only up until it goes received blocked.
	thp->sched_flags = 0;
	if ( thread_create_flags & THREAD_CREATE_APS_CRITICAL_FLAG) AP_MARK_THREAD_CRITICAL(thp);

	// Inherit parent runmask
	thp->runmask = thp->default_runmask = act->default_runmask;

	/*
	 Add a new schedinfo.ss_info object for every thread
     we create, even if it isn't needed (delete later).  We are
     temporarily inheriting the priority, so it only makes sense.
	*/
	if(IS_SCHED_SS(thp)) {
		if(!(thp->schedinfo.ss_info = object_alloc(prp, &ssinfo_souls))) {
			object_free(prp, &thread_souls, thp);
			return EAGAIN;
		}
		memset(thp->schedinfo.ss_info, 0, sizeof(*thp->schedinfo.ss_info));
		//Transfer the inherited attributes across, may be reset later
		thp->schedinfo.ss_info->init_budget = act->schedinfo.ss_info->init_budget;
		thp->schedinfo.ss_info->repl_period = act->schedinfo.ss_info->repl_period;
		thp->schedinfo.ss_info->max_repl = act->schedinfo.ss_info->max_repl;
		thp->schedinfo.ss_info->low_priority = act->schedinfo.ss_info->low_priority;
		thp->schedinfo.ss_info->replenishment.thp = thp;

		//Ensure that we have enough information set-up to allow us to run with a valid
 		//sporadic configuration (ie non-zero execution budget, snapped activation time)
		//Leaving the other fields (repl_count, consumed) as 0 is fine
		snap_time(&thp->schedinfo.ss_info->activation_time, 0);
		thp->schedinfo.ss_info->curr_budget = thp->schedinfo.ss_info->init_budget;
	} else {
		RR_RESET_TICK(thp);
	}

	// Add the object to the process thread vector.
	if((tid = vector_add(&prp->threads, thp, 0)) == -1) {
		object_free(prp, &thread_souls, thp);
		return EAGAIN;
	}
	if(prp->valid_thp == NULL) {
		prp->valid_thp = thp;
	}
	thp->tid = tid;


	/* Give the vendor extension a chance to take first dibs */
	if ( kerop_thread_create_hook != NULL ) {
		int r = kerop_thread_create_hook(act, prp, evp, thread_create_flags, thp);
		if ( r != EOK ) {
			object_free(prp, &thread_souls, thp);
			return r;
		}
	}

	// Figure out the default aligment
	if(prp != act->process) {
		align = align_fault;
	} else if (act->flags & _NTO_TF_ALIGN_FAULT) {
		align = +1;
	} else {
		align = -1;
	}
	cpu_thread_init(act, thp, align);
	thp->flags |= (act->flags & _NTO_TF_IOPRIV);

	// Stash some stuff for use in the WAAA special return code
	thp->args.wa.not_sigev_thread = (thread_create_flags & THREAD_CREATE_BLOCK_FLAG)!=0;
	thp->args.wa.attr = evp->sigev_notify_attributes;
	thp->args.wa.real_attr = thp->args.wa.attr;
	SETKIP_FUNC(thp, evp->sigev_notify_function);
	thp->args.wa.arg = evp->sigev_value.sival_ptr;

	// We cannot initalize the thread private data at this time because
	// we may not have addressability to it. We set a flag which will
	// initalize it when it runs for the first time (its birth cry).
	thp->flags |= _NTO_TF_WAAA | _NTO_TF_DETACHED;

	if(prp->num_active_threads == 0) {
		//get a default value for process/termination priority
		prp->terming_priority = act->priority;
		prp->process_priority = 0;	//Set later on in the specret
	}

	++prp->num_active_threads;

	// Make thread lookups invalid
	vector_flag(&prp->threads, thp->tid, 1);
#ifdef _mt_LTT_TRACES_	/* PDB */
	mt_trace_task_create(prp->pid, tid, thp->priority);
#endif
	_TRACE_TH_EMIT_CREATE(thp);
	if(thread_create_flags & THREAD_CREATE_BLOCK_FLAG) {
		// If the parent needs to know if the create succeeded, we block it.
		act->state = STATE_WAITTHREAD;
		_TRACE_TH_EMIT_STATE(act, WAITTHREAD);
		block_and_ready(thp);
		act->blocked_on = thp;
		SETKSTATUS(act, tid + 1);
		thp->join = act;
	} else {
		ready(thp);
	}

	return ENOERROR;
}
Ejemplo n.º 5
0
int main(int argc, char *argv[])
{
    (void) argc;
    (void) argv;

    hash_table ht;
    ht_init(&ht, HT_KEY_CONST | HT_VALUE_CONST, 0.05);

    char *s1 = (char*)"teststring 1";
    char *s2 = (char*)"teststring 2";
    char *s3 = (char*)"teststring 3";

    ht_insert(&ht, s1, strlen(s1)+1, s2, strlen(s2)+1);

    int contains = ht_contains(&ht, s1, strlen(s1)+1);
    test(contains, "Checking for key \"%s\"", s1);

    size_t value_size;
    char *got = ht_get(&ht, s1, strlen(s1)+1, &value_size);

    fprintf(stderr, "Value size: %zu\n", value_size);
    fprintf(stderr, "Got: {\"%s\": -----\"%s\"}\n", s1, got);

    test(value_size == strlen(s2)+1,
            "Value size was %zu (desired %lu)",
            value_size, strlen(s2)+1);

    fprintf(stderr, "Replacing {\"%s\": \"%s\"} with {\"%s\": \"%s\"}\n", s1, s2, s1, s3);
    ht_insert(&ht, s1, strlen(s1)+1, s3, strlen(s3)+1);

    unsigned int num_keys;
    void **keys;

    keys = ht_keys(&ht, &num_keys);
    test(num_keys == 1, "HashTable has %d keys", num_keys);
    test(keys != NULL, "Keys is not null");
    if(keys)
      free(keys);
    got = ht_get(&ht, s1, strlen(s1)+1, &value_size);

    fprintf(stderr, "Value size: %zu\n", value_size);
    fprintf(stderr, "Got: {\"%s\": \"%s\"}\n", s1, got);

    test(value_size == strlen(s3)+1,
            "Value size was %zu (desired %lu)",
            value_size, strlen(s3)+1);

    fprintf(stderr, "Removing entry with key \"%s\"\n", s1);
    ht_remove(&ht, s1, strlen(s1)+1);

    contains = ht_contains(&ht, s1, strlen(s1)+1);
    test(!contains, "Checking for removal of key \"%s\"", s1);

    keys = ht_keys(&ht, &num_keys);
    test(num_keys == 0, "HashTable has %d keys", num_keys);
    if(keys)
      free(keys);

    fprintf(stderr, "Stress test");
    int key_count = 1000000;
    int i;
    int *many_keys = malloc(key_count * sizeof(*many_keys));
    int *many_values = malloc(key_count * sizeof(*many_values));

    srand(time(NULL));

    for(i = 0; i < key_count; i++)
    {
        many_keys[i] = i;
        many_values[i] = rand();
    }

    struct timespec t1;
    struct timespec t2;

    t1 = snap_time();

    for(i = 0; i < key_count; i++)
    {
        ht_insert(&ht, &(many_keys[i]), sizeof(many_keys[i]), &(many_values[i]), sizeof(many_values[i]));
    }

    t2 = snap_time();

    fprintf(stderr, "Inserting %d keys took %.2f seconds\n", key_count, get_elapsed(t1, t2));
    fprintf(stderr, "Checking inserted keys\n");

    int ok_flag = 1;
    for(i = 0; i < key_count; i++)
    {
        if(ht_contains(&ht, &(many_keys[i]), sizeof(many_keys[i])))
        {
            size_t value_size;
            int value;

            value = *(int*)ht_get(&ht, &(many_keys[i]), sizeof(many_keys[i]), &value_size);

            if(value != many_values[i])
            {
                fprintf(stderr, "Key value mismatch. Got {%d: %d} expected: {%d: %d}\n",
                        many_keys[i], value, many_keys[i], many_values[i]);
                ok_flag = 0;
                break;
            }
        }
        else
        {
            fprintf(stderr, "Missing key-value pair {%d: %d}\n", many_keys[i], many_values[i]);
            ok_flag = 0;
            break;
        }
    }


    test(ok_flag == 1, "Result was %d", ok_flag);
    ht_clear(&ht);
    ht_resize(&ht, 4194304);
    t1 = snap_time();

    for(i = 0; i < key_count; i++)
    {
        ht_insert(&ht, &(many_keys[i]), sizeof(many_keys[i]), &(many_values[i]), sizeof(many_values[i]));
    }

    t2 = snap_time();

    fprintf(stderr, "Inserting %d keys (on preallocated table) took %.2f seconds\n", key_count, get_elapsed(t1, t2));
    for(i = 0; i < key_count; i++)
    {
        ht_remove(&ht, &(many_keys[i]), sizeof(many_keys[i]));
    }
    test(ht_size(&ht) == 0, "%d keys remaining", ht_size(&ht));
    ht_destroy(&ht);
    free(many_keys);
    free(many_values);

    return report_results();
}
Ejemplo n.º 6
0
int kdecl
ker_msg_receivev(THREAD *act, struct kerargs_msg_receivev *kap) {
	CHANNEL		*chp;
	CONNECT		*cop;
	THREAD		*thp;
	THREAD		**owner;
	int			 tid, chid;
	unsigned	 tls_flags;
	VECTOR		*chvec;

	chid = act->last_chid = kap->chid;		// Used for priority boost
	chvec = &act->process->chancons;

	if(chid & _NTO_GLOBAL_CHANNEL) {
		chid &= ~_NTO_GLOBAL_CHANNEL;
		chvec = &chgbl_vector;
	}
	if((chp = vector_lookup(chvec, chid)) == NULL  ||
	   chp->type != TYPE_CHANNEL) {
	   	lock_kernel();
		return ESRCH;
	}

	if(kap->info) {
		WR_VERIFY_PTR(act, kap->info, sizeof(*kap->info));
		// NOTE:
		// Make sure the receive info pointer is valid. Note that we need some
		// extra checks in the mainline when filling in the rcvinfo (this is no
		// longer done in specret).
		//
		// Note: we don't probe the whole buffer, rather touch start and end,
		// which is faster and sufficient
		//
		WR_PROBE_INT(act, kap->info, 1);
		WR_PROBE_INT(act, &kap->info->reserved, 1);
	}

	if(chp->flags & (_NTO_CHF_ASYNC | _NTO_CHF_GLOBAL)) {
		if(chp->flags & _NTO_CHF_GLOBAL) {
			cop = NULL;
			if(kap->coid) {
				if((cop = lookup_connect(kap->coid)) == NULL  ||  cop->type != TYPE_CONNECTION) {
					return EBADF;
				}
			}

			return msgreceive_gbl(act, (CHANNELGBL*) chp, kap->rmsg, -kap->rparts, kap->info, cop, kap->coid);
		} else {
			return msgreceive_async(act, (CHANNELASYNC*) chp, kap->rmsg, kap->rparts);
		}
	}

	/*
	 * Validate incoming IOVs and calculate receive length
	 */
 	if(kap->rparts >= 0) {
		int len = 0;
		int len_last = 0;
		IOV *iov = kap->rmsg;
		int rparts = kap->rparts;

		if (kap->rparts != 0) {
			if (!WITHIN_BOUNDRY((uintptr_t)iov, (uintptr_t)(&iov[rparts]), act->process->boundry_addr)) {
				return EFAULT;
			}
		}

		// Calculate receive length -- even if not requested, we use it for msginfo
		// Do boundary check
		while(rparts) {
			uintptr_t base, last;

			len += GETIOVLEN(iov);
			if (len <len_last ) {
				/*overflow. excessively long user IOV, possibly overlayed. pr62575 */
				return EOVERFLOW;
			}
			len_last = len;
			base = (uintptr_t)GETIOVBASE(iov);
			last = base + GETIOVLEN(iov) - 1;
			if(((base > last) || !WITHIN_BOUNDRY(base, last, act->process->boundry_addr)) && (GETIOVLEN(iov) != 0)) {
				return EFAULT;
			}
			++iov;
			--rparts;
		}
		act->args.ms.srcmsglen = len;
	} else {
		// Single part -- validate receive address
		uintptr_t base, last;
		base = (uintptr_t) kap->rmsg;
		last = base + (-kap->rparts) - 1;
		if((base > last) || !WITHIN_BOUNDRY(base, last, act->process->boundry_addr)) {
			// We know length is non-zero from test above
			return EFAULT;
		}
		act->args.ms.srcmsglen = -kap->rparts;
	}


restart:
	// Was there was a waiting thread or pulse on the channel?
	thp = pril_first(&chp->send_queue);
restart2:
	if(thp) {
		int xferstat;
		unsigned	type = TYPE_MASK(thp->type);

		// Yes. There is a waiting message.
		if((type == TYPE_PULSE) || (type == TYPE_VPULSE)) {
			PULSE *pup = (PULSE *)(void *)thp;

			act->restart = NULL;
			xferstat = xferpulse(act, kap->rmsg, kap->rparts, pup->code, pup->value, pup->id);

			if(type == TYPE_VPULSE) {
				thp = (THREAD *)pup->id;
				get_rcvinfo(thp, -1, thp->blocked_on, kap->info);
			}

			lock_kernel();
			act->timeout_flags = 0;

			// By default the receiver runs with message driven priority.
			// RUSH: Fix for partition inheritance
			if(act->priority != pup->priority  &&  (chp->flags & _NTO_CHF_FIXED_PRIORITY) == 0) {
				adjust_priority(act, pup->priority, act->process->default_dpp, 1);
				act->real_priority = act->priority;
			} else if(act->dpp != act->process->default_dpp) {
				adjust_priority(act, act->priority, act->process->default_dpp, 1);
			}

			pulse_remove(chp->process, &chp->send_queue, pup);

			if((thp = act->client) != 0) {
				/* need to clear client's server field */
				act->client = 0;
				thp->args.ms.server = 0;
			}

			if(xferstat) {
				return EFAULT;
			}
			_TRACE_COMM_IPC_RET(act);

			return EOK;
		}

		// If the receive request was for a pulse only, keep checking the list..
		if(KTYPE(act) == __KER_MSG_RECEIVEPULSEV) {
			thp = thp->next.thread;
			goto restart2;
		}

#if defined(VARIANT_smp) && defined(SMP_MSGOPT)
		// If thp is in the xfer status in another CPU, try next one
		if(thp->internal_flags & _NTO_ITF_MSG_DELIVERY) {
			thp = thp->next.thread;
			goto restart2;
		}
#endif

		// If an immediate timeout was specified we unblock the sender.
		if(IMTO(thp, STATE_REPLY)) {
			lock_kernel();
			force_ready(thp, ETIMEDOUT);
			unlock_kernel();
			KER_PREEMPT(act, ENOERROR);
			goto restart;
		}

		if(thp->flags & _NTO_TF_BUFF_MSG) {
			xferstat = xfer_cpy_diov(act, kap->rmsg, thp->args.msbuff.buff, kap->rparts, thp->args.msbuff.msglen);
		} else {
			act->args.ri.rmsg = kap->rmsg;
			act->args.ri.rparts = kap->rparts;

			START_SMP_XFER(act, thp);

			xferstat = xfermsg(act, thp, 0, 0);

			lock_kernel();
			END_SMP_XFER(act, thp);

#if defined(VARIANT_smp) && defined(SMP_MSGOPT)
			if(thp->internal_flags & _NTO_ITF_MSG_FORCE_RDY) {
				force_ready(thp,KSTATUS(thp));
				thp->internal_flags &= ~_NTO_ITF_MSG_FORCE_RDY;
				KERCALL_RESTART(act);
				act->restart = 0;
				return ENOERROR;
			}
			if(act->flags & (_NTO_TF_SIG_ACTIVE | _NTO_TF_CANCELSELF)) {
				KERCALL_RESTART(act);
				act->restart = 0;
				return ENOERROR;
			}
#endif
		}

		if(xferstat) {
			lock_kernel();

			// Only a send fault will unblock the sender.
			if(xferstat & XFER_SRC_FAULT) {
				// Let sender know it faulted and restart receive.
				force_ready(thp, EFAULT);
				unlock_kernel();
				KER_PREEMPT(act, ENOERROR);
				goto restart;
			}

			if((thp = act->client) != 0) {
				/* need to clear client's server field */
				act->client = 0;
				thp->args.ms.server = 0;
			}

			// Let receiver and sender know reason for fault.
			act->timeout_flags = 0;
			return EFAULT;
		}

		if(TYPE_MASK(thp->type) == TYPE_VTHREAD) {
			tid = thp->args.ri.rparts;
		} else {
			tid = thp->tid;
		}
		cop = thp->blocked_on;
		if(thp->args.ms.srcmsglen == ~0U) {
			// This should never occur with the new code
			crash();
			/* NOTREACHED */
			thp->args.ms.srcmsglen = thp->args.ms.msglen;
		}

		// If the receive specified an info buffer stuff it as well.
		// thp->args.ms.msglen was set by xfermsg
		if(kap->info) {
		//	get_rcvinfo(thp, -1, cop, kap->info);
			STUFF_RCVINFO(thp, cop, kap->info);
			if(thp->flags & _NTO_TF_BUFF_MSG) {
				if(kap->info->msglen > act->args.ms.srcmsglen) kap->info->msglen = act->args.ms.srcmsglen;
			}
		}

		lock_kernel();
		_TRACE_COMM_IPC_RET(act);
		act->timeout_flags = 0;
		act->restart = NULL;

		// Because _NTO_TF_RCVINFO and _NTO_TF_SHORT_MSG will not be set, set this to NULL
		thp->restart = NULL;

		if(act->client != 0) {
			/* need to clear client's server field */
			act->client->args.ms.server = 0;
		}
		thp->args.ms.server = act;
		act->client = thp;

		pril_rem(&chp->send_queue, thp);
		if(thp->state == STATE_SEND) {
			thp->state = STATE_REPLY;
			snap_time(&thp->timestamp_last_block,0);
			_TRACE_TH_EMIT_STATE(thp, REPLY);
			SETKSTATUS(act, (tid << 16) | cop->scoid);
		} else {
			thp->state = STATE_NET_REPLY;
			_TRACE_TH_EMIT_STATE(thp, NET_REPLY);
			SETKSTATUS(act, -((tid << 16) | cop->scoid));
		}
		LINKPRIL_BEG(chp->reply_queue, thp, THREAD);

		// By default the receiver runs with message driven priority.
		// RUSH: Fix for partition inheritance
		if((act->priority != thp->priority || act->dpp != thp->dpp) &&  (chp->flags & _NTO_CHF_FIXED_PRIORITY) == 0) {
			AP_INHERIT_CRIT(act, thp);
			adjust_priority(act, thp->priority, thp->dpp, 1);
			if(act->real_priority != act->priority) act->real_priority = act->priority;
		} else {
			AP_CLEAR_CRIT(act);
		}

		return ENOERROR;
	}

	// No-one waiting for a msg so block
	tls_flags = act->un.lcl.tls->__flags;
	lock_kernel();
	_TRACE_COMM_IPC_RET(act);

	if((thp = act->client) != 0) {
		/* need to clear client's server field */
		act->client = 0;
		thp->args.ms.server = 0;
	}

	if(IMTO(act, STATE_RECEIVE)) {
		return ETIMEDOUT;
	}

	if(PENDCAN(tls_flags)) {
		SETKIP_FUNC(act, act->process->canstub);
		return ENOERROR;
	}

	// Can't call block() here, because act may not be actives[KERNCPU]
	// anymore - if the sender faulted, we call force_ready() above and
	// that might change actives[KERNCPU]
	unready(act, STATE_RECEIVE);

	// End inheritance of partition and critical state. This must be after block() so that we microbill
	// the partition we where running in before we reset to the original partition. PR26990
	act->dpp = act->orig_dpp;
	AP_CLEAR_CRIT(act);

	act->blocked_on = chp;
	act->args.ri.rmsg = kap->rmsg;
	act->args.ri.rparts = kap->rparts;
	act->args.ri.info = kap->info;

	// Add to the receive queue, put pulse only receives at the end of
	// the list so the ker_msg_send() only has to check the head of the list
	owner = &chp->receive_queue;
	if(KTYPE(act) == __KER_MSG_RECEIVEPULSEV) {
		act->internal_flags |= _NTO_ITF_RCVPULSE;
		for( ;; ) {
			thp = *owner;
			if(thp == NULL) break;
			if(thp->internal_flags & _NTO_ITF_RCVPULSE) break;
			owner = &thp->next.thread;
		}
	}
	LINKPRIL_BEG(*owner, act, THREAD);
	return ENOERROR;
}
Ejemplo n.º 7
0
int kdecl
ker_msg_sendv(THREAD *act, struct kerargs_msg_sendv *kap) {
	CONNECT		*cop;
	CHANNEL		*chp;
	int			 type = KTYPE(act);
	THREAD		*thp;
	THREAD		*sender;
	PROCESS		*actprp = act->process;
	unsigned	th_flags = 0;
	uint32_t	net_srcmsglen = -1U;


	/*
	 * These are the usual incoming checks
	 *  - validate connection
	 *  - get channel pointer
	 *  - check for cancellation
	 */

	// Lookup src connect.
	if((cop = inline_lookup_connect(actprp, kap->coid)) == NULL || cop->type != TYPE_CONNECTION) {
		return EBADF;
	}

	// Get dst channel.
	if((chp = cop->channel) == NULL) {
		return EBADF;
	}

	 _TRACE_COMM_EMIT_SMSG(act, cop, (act->tid << 16) | cop->scoid);

	if(PENDCAN(act->un.lcl.tls->__flags) && (type != __KER_MSG_SENDVNC)) {
		lock_kernel();
		SETKIP_FUNC(act, act->process->canstub);
		return ENOERROR;
	}

	/*
	 * The base conditions are now met. If this is a netcon or async channel,
	 * we handle separately
	 */
	 if(chp->flags & (_NTO_CHF_ASYNC | _NTO_CHF_GLOBAL)) {
		if(chp->flags & _NTO_CHF_GLOBAL) {
			return msgsend_gbl(act, cop, kap->smsg, -kap->sparts, (unsigned)-kap->rparts, kap->coid);
		} else {
			return msgsend_async(act, cop);
		}
	 }

	 sender = act;

	// Store incoming args
	if(cop->flags & COF_NETCON) {
		RD_PROBE_INT(act, kap->rmsg, sizeof(struct _vtid_info) / sizeof(int));
		sender = (THREAD *)(void *)net_send1(kap->rparts, (struct _vtid_info *)(void *)kap->rmsg);
		if(sender == NULL) {
			return EINVAL;
		}
		if(sender->state != STATE_STOPPED) crash();
		sender->args.ms.rmsg = kap->rmsg;
		sender->args.ms.rparts = kap->rparts;
		act->args.ms.smsg = kap->smsg;
		act->args.ms.sparts = kap->sparts;
		// Do this up-front while we have addressabilty
		net_srcmsglen = ((struct _vtid_info *)(void *)kap->rmsg)->srcmsglen;
	} else {
		sender->args.ms.coid = kap->coid;
		sender->args.ms.rmsg = kap->rmsg;
		sender->args.ms.rparts = kap->rparts;
	}

	sender->flags &= ~_NTO_TF_BUFF_MSG;
	// Make sure the SPECRET_PENDING bit isn't set when we don't need it.
	sender->internal_flags &= ~_NTO_ITF_SPECRET_PENDING;

	// Validate incoming IOVs - override for QNET case - rparts/rmsg have special meaning
	if(cop->flags & COF_NETCON) {
		sender->args.ms.dstmsglen = ((struct _vtid_info *)(void *)kap->rmsg)->dstmsglen;
	} else if(kap->rparts >= 0) {
		int len = 0;
		int len_last = 0;
		IOV *iov = kap->rmsg;
		int rparts = kap->rparts;
		int niov = 0;

		// Incoming reply IOV -- make copy of reply IOVs
		// Calculate reply length -- even if not requested, it is almost free
		// Also do boundary check
		while(rparts) {
			uintptr_t base, last;

			len += GETIOVLEN(iov);
			if (len <len_last ) {
				/*overflow. excessively long user IOV, possibly overlayed. pr62575 */
				return EOVERFLOW;
			}
			len_last=len;
			base = (uintptr_t)GETIOVBASE(iov);
			last = base + GETIOVLEN(iov) - 1;
			if(((base > last) || !WITHIN_BOUNDRY(base, last, sender->process->boundry_addr)) && (GETIOVLEN(iov) != 0)) {
				return EFAULT;
			}
			// Keep copy of IOV
			if(niov < _NUM_CACHED_REPLY_IOV) {
			//	sender->args.ms.riov[niov] = *iov;
			}
			++iov;
			++niov;
			--rparts;
		}
		sender->args.ms.dstmsglen = len;
	} else {
		// Single part -- validate and store reply address
		uintptr_t base, last;
		base = (uintptr_t) kap->rmsg;
		last = base + (-kap->rparts) - 1;
		if((base > last) || !WITHIN_BOUNDRY(base, last, sender->process->boundry_addr)) {
			// We know length is non-zero from test above
			return EFAULT;
		}
		sender->args.ms.dstmsglen = -kap->rparts;
	}


	/* Send IOVs */
	if(kap->sparts < 0) {
		// Single part -- do the boundary check and copy if short message
		uintptr_t base, last;
		int	len;

		base = (uintptr_t) kap->smsg;
		len = -kap->sparts;
		last = base + len - 1;
		if((base > last) || !WITHIN_BOUNDRY(base, last, sender->process->boundry_addr)) {
			// We know length is non-zero from test above
			return EFAULT;
		}
		sender->args.ms.srcmsglen = len;

		if(len <= sizeof(sender->args.msbuff.buff)) {
			(void)__inline_xfer_memcpy(sender->args.msbuff.buff, (char *)base, sender->args.msbuff.msglen = len);
			th_flags = _NTO_TF_BUFF_MSG;
		}
	} else if(kap->sparts == 1) {
		// Single IOV -- do the boundary check and copy if short message
		uintptr_t base, last, len;

		base = (uintptr_t)GETIOVBASE(kap->smsg);
		len = GETIOVLEN(kap->smsg);
		last = base + len - 1;
		if(((base > last) || !WITHIN_BOUNDRY(base, last, sender->process->boundry_addr)) && (len != 0)) {
			return EFAULT;
		}
		sender->args.ms.srcmsglen = len;
		if(len <= sizeof(sender->args.msbuff.buff)) {
			(void)__inline_xfer_memcpy(sender->args.msbuff.buff, (char *)base, sender->args.ms.msglen = len);
			th_flags = _NTO_TF_BUFF_MSG;
		}
	} else {
		// Multi IOV case
		int len = 0;
		int len_last =0;
		IOV *iov = kap->smsg;
		int sparts = kap->sparts;

		// Calculate send length -- even if not requested, it is almost free
		// Also do boundary check
		while(sparts) {
			uintptr_t base, last;

			len += GETIOVLEN(iov);
			if (len <len_last ) {
				/*overflow. excessively long user IOV, possibly overlayed. pr62575 */
				return EOVERFLOW;
			}
			len_last = len;
			base = (uintptr_t)GETIOVBASE(iov);
			last = base + GETIOVLEN(iov) - 1;
			if(((base > last) || !WITHIN_BOUNDRY(base, last, sender->process->boundry_addr)) && (GETIOVLEN(iov) != 0)) {
				return EFAULT;
			}
			++iov;
			--sparts;
			// Keep copy of IOV -- NYI, only really need if no receiver
			//if(niov < _NUM_CACHED_SEND_IOV) {
			//	sender->args.ms.siov[niov] = *iov;
			//}
		}
		sender->args.ms.srcmsglen = len;
		if(len <= sizeof(sender->args.msbuff.buff)) {
			int pos = 0;
			iov = kap->smsg;
			sparts = kap->sparts;
			// Multi-IOV incoming message that is short
			// FIXME -- need memcpy_siov for efficiency
			while(sparts) {
				int ilen = GETIOVLEN(iov);
				__inline_xfer_memcpy(&sender->args.msbuff.buff[pos], GETIOVBASE(iov), ilen);

				pos += ilen;
				iov++;
				sparts--;
			}
			sender->args.ms.msglen = len;
			th_flags = _NTO_TF_BUFF_MSG;
		}
	}

	// Now that the up-front business is done, we do the actual copy. If
	// this was identified as a short message, we have copied the message into the msgbuff area.

	// Was there was a waiting thread on the channel?

	thp = chp->receive_queue;
#if defined(VARIANT_smp) && defined(SMP_MSGOPT)
	while((thp != NULL) && (thp->internal_flags & _NTO_ITF_MSG_DELIVERY)) {
		thp = thp->next.thread;
	}
#endif
	if((thp != NULL) && !(thp->internal_flags & _NTO_ITF_RCVPULSE) ) {

		int xferstat;
		// If an immediate timeout was specified we return immediately.
		if(IMTO(act, STATE_REPLY)) {
			sender->flags &= ~_NTO_TF_BUFF_MSG;
			return ETIMEDOUT;
		}

		// Is this a long message?
		if(th_flags == 0) {
			sender->args.ms.smsg = kap->smsg;
			sender->args.ms.sparts = kap->sparts;
			START_SMP_XFER(act, thp);
			// Yes. Transfer the data.
			xferstat = xfermsg(thp, act, 0, 0);
			sender->args.ms.msglen = act->args.ms.msglen;

			lock_kernel();
			END_SMP_XFER(act, thp);

#if defined(VARIANT_smp) && defined(SMP_MSGOPT)
			if(thp->internal_flags & _NTO_ITF_MSG_FORCE_RDY) {
				force_ready(thp,KSTATUS(thp));
				thp->internal_flags &= ~_NTO_ITF_MSG_FORCE_RDY;
				KERCALL_RESTART(act);
				act->restart = 0;
				return ENOERROR;
			}
			if(act->flags & (_NTO_TF_SIG_ACTIVE | _NTO_TF_CANCELSELF)) {
				/* send is a cancelation point */
				KERCALL_RESTART(act);
				act->restart = 0;
				return ENOERROR;
			}
#endif

			if(xferstat) {
				lock_kernel();
				// If sender faulted let him know and abort the operation
				// without waking up the receiver.
				if(xferstat & XFER_SRC_FAULT) {
					goto send_fault;
				}
				// If receiver faulted, wake him up with an error and fail the
				// send.
				goto rcv_fault;
			}
		} else {

			// Short message. We do the following:
			// - switch aspace to receiver
			if(thp->aspace_prp && thp->aspace_prp != aspaces_prp[KERNCPU]) {
				/*
				 * Lock/unlock kernel if necessary before calling memmgr.aspace
				 */
				SWITCH_ASPACE(thp->aspace_prp, &aspaces_prp[KERNCPU], act);
			}
			// - copy message and handle errors
			if((xferstat = xfer_cpy_diov(thp, thp->args.ri.rmsg, sender->args.msbuff.buff, thp->args.ri.rparts, sender->args.msbuff.msglen))) {
				lock_kernel();
				// Has to be a receiver fault;
				goto rcv_fault;
			}
			sender->flags |= _NTO_TF_BUFF_MSG;
			// Note: below this point, we should NOT reference kap anywhere
			// as kap points to the original aspace
		}


		// If the receive specified an info buffer stuff it as well.
		// However, we are not in the address space of the destination
		// thread, we switch now
		thp->restart = NULL;

		if(thp->args.ri.info)  {
			struct _msg_info *repp = thp->args.ri.info;
			// Fill in rcvinfo
			// Switch to aspace of receiver. It's already adjusted if short msg.
			if(th_flags == 0) {
				if(thp->aspace_prp && thp->aspace_prp != aspaces_prp[KERNCPU]) {
					/*
					 * Kernel is already locked so we don't need SWITCH_ASPACE
					 */
					memmgr.aspace(thp->aspace_prp,&aspaces_prp[KERNCPU]);
				}
				if(cop->flags & COF_NETCON) {
					// Note: have to adjust srcmsglen before stuffing rcvinfo!
					sender->args.ms.srcmsglen = net_srcmsglen;
				}
			}
			// We can use a fast inline version as we know the thread does not
			// have an unblock pending
			STUFF_RCVINFO(sender, cop, thp->args.ri.info);

			// RUSH: Adjust msglen in better fashion...
			if(thp->args.ms.srcmsglen < repp->msglen) {
				repp->msglen = thp->args.ms.srcmsglen;
			}
		}

		lock_kernel();
		SETKSTATUS(thp, (sender->tid << 16) | cop->scoid);

		// Unlink receive thread from the receive queue.
		LINKPRIL_REM(thp);

		sender->args.ms.server = thp;
		thp->client = sender;

		// Check fast path conditions - no timeouts, no QNET, no sporadic.
		// We can inline the block_and_ready()
		if((sender->timeout_flags == 0) &&
			(thp->timeout_flags == 0) &&
			!(cop->flags & COF_NETCON) &&
			!(chp->flags & _NTO_CHF_FIXED_PRIORITY) &&
			!IS_SCHED_SS(sender)) {

			// By default the receiver runs with message driven priority.
			thp->real_priority = thp->priority = sender->priority;
			thp->dpp = sender->dpp;
			AP_INHERIT_CRIT(thp, sender);

			sender->state = STATE_REPLY;	// Must be set before calling block_and_ready()
			snap_time(&sender->timestamp_last_block,0);
			_TRACE_TH_EMIT_STATE(sender, REPLY);
#if defined(INLINE_BLOCKANDREADY)
			// This is an inline version of block an ready
			// We can use this for non-SMP (no runmask).
			// This also works for AP as we inherit the partition
			thp->next.thread = NULL;
			thp->prev.thread = NULL;
#ifdef _mt_LTT_TRACES_	/* PDB */
			//mt_TRACE_DEBUG("PDB 4.2");
			//mt_trace_var_debug(actives[KERNCPU]->process->pid, actives[KERNCPU]->tid, actives[KERNCPU]);
			mt_trace_task_suspend(actives[KERNCPU]->process->pid, actives[KERNCPU]->tid);
#endif
			//thp->restart = NULL;
			actives[KERNCPU] = thp;
			thp->state = STATE_RUNNING;
			//@@@ Hmm. This inline version of block_and_ready() may cause a small inaccuacy with APS.
			//thp->runcpu = KERNCPU;
#ifdef _mt_LTT_TRACES_	/* PDB */
			//mt_TRACE_DEBUG("PDB 4.3");
			//mt_trace_var_debug(thp->process->pid, thp->tid, thp);
			mt_trace_task_resume(thp->process->pid, thp->tid);
#endif
			_TRACE_TH_EMIT_STATE(thp, RUNNING);
#else
			block_and_ready(thp);
#endif
		} else {
			if((chp->flags & _NTO_CHF_FIXED_PRIORITY) == 0) {
				// By default the receiver runs with message driven priority.
				thp->real_priority = thp->priority = sender->priority;
				thp->dpp = sender->dpp;
				AP_INHERIT_CRIT(thp, sender);
			}
			sender->state = STATE_REPLY;	// Must be set before calling block_and_ready()
			_TRACE_TH_EMIT_STATE(sender, REPLY);

			if(cop->flags & COF_NETCON) {
				SETKSTATUS(act, 1);
				if((sender->flags & _NTO_TF_BUFF_MSG) == 0) {
					// #### Note: use net_srcmsglen saved above before we switch aspace
					sender->args.ms.srcmsglen = net_srcmsglen;
				}

				SETKSTATUS(thp, (sender->args.ms.rparts << 16) | cop->scoid);
				ready(thp);
			} else {
				block_and_ready(thp);
			}

			if(thp->timeout_flags & _NTO_TIMEOUT_REPLY) {
				// arm the timeout for reply block
				timeout_start(thp);
			}
		}

		// Block the active thread and ready the receiver thread
		sender->blocked_on = cop;

		// Link the now reply blocked sending thread in the reply queue
		LINKPRIL_BEG(chp->reply_queue, sender, THREAD);
		++cop->links;

		return ENOERROR;
	}

	// No-one waiting for a msg
	// If a normal thread
	//     Block the active thread
	//     Link the now send blocked thread into the reply queue
	// If a network thread send
	//     Link the passed vthread into the reply queue
	// Boost the servers priority to the clients if needed.
	if(th_flags == 0) {
		sender->args.ms.smsg = kap->smsg;
		sender->args.ms.sparts = kap->sparts;
			// FUTURE: Make copy of send IOVs
	} else {
		sender->flags |= _NTO_TF_BUFF_MSG;
	}


	if(IMTO(sender, STATE_SEND)) {
		sender->flags &= ~_NTO_TF_BUFF_MSG;
		return ETIMEDOUT;
	}

	lock_kernel();

	// Incoming network Send.
	// We use vtid passed in kap->rparts and _vtid_info passed in kap->rmsg
	if(cop->flags & COF_NETCON) {
		if(sender->flags & _NTO_TF_BUFF_MSG) {
			SETKSTATUS(act, 1);
		} else {
			// Return zero telling the network manager we still need the send data.
			// A _PULSE_CODE_NET_ACK will be sent later when the receive completes.
			sender->args.ms.srcmsglen = net_srcmsglen;
			SETKSTATUS(act, 0);
		}
		sender->state = STATE_SEND;
		snap_time(&sender->timestamp_last_block,0);
		_TRACE_TH_EMIT_STATE(sender, SEND);
	} else {
		//
		// Don't allow any MsgSend's to processes that are dying.
		// Only have to check here because of code in nano_signal.c
		// - check the comment where we turn on the _NTO_PF_COREDUMP
		// flag.
		//
		if(chp->process->flags & (_NTO_PF_TERMING | _NTO_PF_ZOMBIE | _NTO_PF_COREDUMP)) {
			return ENXIO;
		}
		// Can't use block(), because 'sender' might not actually be the
		// actives[KERNCPU] anymore...
		unready(sender, STATE_SEND);
	}

	sender->blocked_on = cop;
	pril_add(&chp->send_queue, sender);
	++cop->links;

	// To prevent priority inversion, boost all threads in the server
	//
	// for non-APS scheduling: raise prio of thread who last used this channel to at least that of the sender
	//
	// for APS scheduling: also cause the out-of-budget threads to inherit the budget of the sender,
	// but do not inherit the critical state.
	if((chp->flags & _NTO_CHF_FIXED_PRIORITY) == 0) {
		int i;

		for(i = 0 ; i < chp->process->threads.nentries ; ++i) {
			if(VECP(thp, &chp->process->threads, i) &&  thp->last_chid == chp->chid) {
				short may_run = may_thread_run(thp);
				if ( thp->priority < sender->priority ) {
					adjust_priority(thp, sender->priority, may_run ? thp->dpp : sender->dpp, 1 );
					thp->real_priority = thp->priority;
				} else {
					if (!may_run) {
						// server threads are higher prio, but have no budget. So inherit budget only
						adjust_priority(thp, thp->priority, sender->dpp, 1);
					}
				}
			}
		}
	}

	return ENOERROR;

send_fault:
	sender->flags &= ~_NTO_TF_BUFF_MSG;

	return EFAULT;

rcv_fault:
	sender->flags &= ~_NTO_TF_BUFF_MSG;
	kererr(thp, EFAULT);
	LINKPRIL_REM(thp);
	ready(thp);

	/* Restart the kernel call - same behavior as receive path */
	KERCALL_RESTART(act);

	return ENOERROR;
}