示例#1
0
文件: client.c 项目: kvap/raft
static bool query(Message *msg, Message *answer, int timeout_ms) {
	timeout_t timeout;
	if (timeout_ms < 0) {
		while (true) {
			timeout_start(&timeout, 100);

			if (try_query(msg, answer, &timeout)) {
				return true;
			} else {
				disconnect_leader();
			}
		}
	} else {
		timeout_start(&timeout, timeout_ms);

		TIMEOUT_LOOP_START(&timeout); {
			if (try_query(msg, answer, &timeout)) {
				return true;
			} else {
				disconnect_leader();
			}
		} TIMEOUT_LOOP_END(&timeout);
	}

	shout("query failed after %d ms\n", timeout_elapsed_ms(&timeout));
	return false;
}
示例#2
0
文件: atben.c 项目: cfriedt/ben-wpan
int atben_interrupt_wait(void *handle, int timeout_ms)
{
	struct timeout to;
	int timedout = 0;
	uint8_t irq;

	if (timeout_ms > 0)
		timeout_start(&to, timeout_ms);
	while (1) {
		if (timeout_ms > 0)
			timedout = timeout_reached(&to);
		if (atben_interrupt(handle)) {
			irq = atben_reg_read(handle, REG_IRQ_STATUS);
			if (irq)
				return irq;
			fprintf(stderr, "ignoring stray interrupt\n");
		}
		if (timedout)
			return 0;
		if (timeout_ms >= 0)
			usleep(1000);
	}

	return 0;
}
示例#3
0
文件: uart_state.c 项目: baertschi/WK
void uart_process_data(u8 cmd)
{
    if(uartState == UART_IDLE)
    {
        if(cmd >= NTAG_SCAN_CMD && cmd <= NTAG_VERSION_CMD)
        {
            timeout_start(4);
            uartCmd = cmd;
            uartState = UART_CNT;
        }
        else if(cmd == NTAG_CMD_ERROR || cmd == NTAG_CNT_ERROR || cmd == NTAG_CHK_ERROR)
        {
            uartState = UART_RESEND;
        }
        else
        {
            uartState = UART_CMD_TIMEOUT; // command error
            timeout_start(3);
        }
    }
    else if(uartState == UART_CNT)
    {
        uartByteCnt = cmd;
        if(uartCmd == NTAG_SCAN_CMD)
        {
            if(uartByteCnt != 0)
            {
                uartState = UART_CNT_TIMEOUT;
//                timeout_start(2);
            }
            else
            {
                uartState = UART_CHECK;
            }
        }
        else if(uartCmd == NTAG_SELECT_CMD)
        {
            if(uartByteCnt != 9)
            {
                uartState = UART_CNT_TIMEOUT;
//                timeout_start(2);
            }
            else
            {
                uartState = UART_DATA;
//                timeout_start(2);
                uartByteInc = 0;
            }
        }
        else if(uartCmd == NTAG_READ_CMD)
        {
            if(uartByteCnt != 11)
            {
                uartState = UART_CNT_TIMEOUT;
//                timeout_start(2);
            }
            else
            {
                uartState = UART_DATA;
//                timeout_start(2);
                uartByteInc = 0;
            }
        }
        else if(uartCmd == NTAG_WRITE_CMD)
        {
            if(uartByteCnt < 14 && uartByteCnt > 42)
            {
                uartState = UART_CNT_TIMEOUT;
//                timeout_start(2);
            }
            else
            {
                uartState = UART_DATA;
//                timeout_start(3);
                uartByteInc = 0;
            }
        }
        else if(uartCmd == NTAG_VERSION_CMD)
        {
            if(uartByteCnt != 0)
            {
                uartState = UART_CNT_TIMEOUT;
//                timeout_start(2);
            }
            else
            {
                uartState = UART_CHECK;
            }
        }
    }
    else if(uartState == UART_DATA)
    {
        if(uartByteCnt)
        {
            uartData[uartByteInc] = cmd;
            uartByteInc++;
            if(uartByteCnt == uartByteInc)  // time-out to be added for this condition being not met
            {
                uartState = UART_CHECK;
                uartByteInc = 0;
            }
        }
    }
    else if(uartState == UART_CHECK)
    {
        uartCheckSum = cmd;
        uartState = UART_CAL;  // null state to temperory reject incoming data
        timeout_stop(); // all data received
    }
}
示例#4
0
int http_connect(struct http_addr *addr, unsigned int timeout)
{
	if(addr->port <= 0 || addr->port >= 0XFFFF)
	{
		_WARN("Error port: %d, set by DEF_PORT.", addr->port);
		addr->port = DEF_PORT;
	}

	int connfd, flag;
	struct hostent *he;
	struct sockaddr_in server;

	char *tmp;

	/* remove the port from host */
	tmp = calloc(strlen(addr->host) + 1, 1);
	memccpy(tmp, addr->host, ':', strlen(addr->host));

	/* remove port */
	if(strchr(tmp, ':'))
	{
		*(tmp + strlen(tmp) - 1) = '\0';
	}

	if(he = gethostbyname(tmp), he == NULL)
	{
		_ERROR("Get host(%s) info error.", addr->host);

		free(tmp);

		return -1;
	}

	free(tmp);

	/* init server's address information */
	memset(&server, 0, sizeof(struct sockaddr_in));
	server.sin_family = AF_INET;
	server.sin_port = htons(addr->port);
	server.sin_addr = *((struct in_addr *)he->h_addr);

	/* create tcp connection */
	connfd = socket(AF_INET, SOCK_STREAM, 0);
	if(connfd < 0)
	{
		_ERROR("Create TCP connection error: %s", strerror(errno));
		return -1;
	}

	/* set non-blocking */
	if(flag = fcntl(connfd, F_GETFL), flag < 0)
	{
		_ERROR("Get connection file descriptor flag error: %s", strerror(errno));

		close(connfd);
		return -1;
	}

	if(fcntl(connfd, F_SETFL, flag | O_NONBLOCK) < 0)
	{
		_ERROR("Set connection file descriptor flag error: %s", strerror(errno));

		close(connfd);
		return -1;
	}

	int ret, status;

	/* connect to server with timeout */
	ret = connect( connfd, (struct sockaddr *)&server, sizeof(struct sockaddr) );

	/* connect to server is ok */
	if(ret < 0)
	{
		/* timeout check */
		if(errno == EINPROGRESS)
		{
			while(1)
			{
				timeout_start(connfd, timeout > 60 ? 0 : timeout, TIMEOUT_WRITE, &status);
				break;
				timeout_end();

				if(status == EINTR)
					continue;

				else if(status < 0)
				{
					_ERROR("Connect to server(%s) error: %s", addr->host, strerror(status));
					return -1;
				}
				else if(status == 0)
				{
					_ERROR("Connect to server(%s) timeout.", addr->host);
					return -1;
				}

				/* connection is ok */
				break;
			}
		}
		else
		{
			_ERROR("Connect to server(%s) error: %s", addr->host, strerror(errno));
			return -1;
		}
	}

	/* set blocking */
	if(fcntl(connfd, F_SETFL, flag) < 0)
	{
		_ERROR("Set connfd blocking is error: %s", strerror(errno));
		close(connfd);
		return -1;
	}

	return connfd;
}
示例#5
0
int kdecl
ker_msg_sendv(THREAD *act, struct kerargs_msg_sendv *kap) {
	CONNECT		*cop;
	CHANNEL		*chp;
	int			 type = KTYPE(act);
	THREAD		*thp;
	THREAD		*sender;
	PROCESS		*actprp = act->process;
	unsigned	th_flags = 0;
	uint32_t	net_srcmsglen = -1U;


	/*
	 * These are the usual incoming checks
	 *  - validate connection
	 *  - get channel pointer
	 *  - check for cancellation
	 */

	// Lookup src connect.
	if((cop = inline_lookup_connect(actprp, kap->coid)) == NULL || cop->type != TYPE_CONNECTION) {
		return EBADF;
	}

	// Get dst channel.
	if((chp = cop->channel) == NULL) {
		return EBADF;
	}

	 _TRACE_COMM_EMIT_SMSG(act, cop, (act->tid << 16) | cop->scoid);

	if(PENDCAN(act->un.lcl.tls->__flags) && (type != __KER_MSG_SENDVNC)) {
		lock_kernel();
		SETKIP_FUNC(act, act->process->canstub);
		return ENOERROR;
	}

	/*
	 * The base conditions are now met. If this is a netcon or async channel,
	 * we handle separately
	 */
	 if(chp->flags & (_NTO_CHF_ASYNC | _NTO_CHF_GLOBAL)) {
		if(chp->flags & _NTO_CHF_GLOBAL) {
			return msgsend_gbl(act, cop, kap->smsg, -kap->sparts, (unsigned)-kap->rparts, kap->coid);
		} else {
			return msgsend_async(act, cop);
		}
	 }

	 sender = act;

	// Store incoming args
	if(cop->flags & COF_NETCON) {
		RD_PROBE_INT(act, kap->rmsg, sizeof(struct _vtid_info) / sizeof(int));
		sender = (THREAD *)(void *)net_send1(kap->rparts, (struct _vtid_info *)(void *)kap->rmsg);
		if(sender == NULL) {
			return EINVAL;
		}
		if(sender->state != STATE_STOPPED) crash();
		sender->args.ms.rmsg = kap->rmsg;
		sender->args.ms.rparts = kap->rparts;
		act->args.ms.smsg = kap->smsg;
		act->args.ms.sparts = kap->sparts;
		// Do this up-front while we have addressabilty
		net_srcmsglen = ((struct _vtid_info *)(void *)kap->rmsg)->srcmsglen;
	} else {
		sender->args.ms.coid = kap->coid;
		sender->args.ms.rmsg = kap->rmsg;
		sender->args.ms.rparts = kap->rparts;
	}

	sender->flags &= ~_NTO_TF_BUFF_MSG;
	// Make sure the SPECRET_PENDING bit isn't set when we don't need it.
	sender->internal_flags &= ~_NTO_ITF_SPECRET_PENDING;

	// Validate incoming IOVs - override for QNET case - rparts/rmsg have special meaning
	if(cop->flags & COF_NETCON) {
		sender->args.ms.dstmsglen = ((struct _vtid_info *)(void *)kap->rmsg)->dstmsglen;
	} else if(kap->rparts >= 0) {
		int len = 0;
		int len_last = 0;
		IOV *iov = kap->rmsg;
		int rparts = kap->rparts;
		int niov = 0;

		// Incoming reply IOV -- make copy of reply IOVs
		// Calculate reply length -- even if not requested, it is almost free
		// Also do boundary check
		while(rparts) {
			uintptr_t base, last;

			len += GETIOVLEN(iov);
			if (len <len_last ) {
				/*overflow. excessively long user IOV, possibly overlayed. pr62575 */
				return EOVERFLOW;
			}
			len_last=len;
			base = (uintptr_t)GETIOVBASE(iov);
			last = base + GETIOVLEN(iov) - 1;
			if(((base > last) || !WITHIN_BOUNDRY(base, last, sender->process->boundry_addr)) && (GETIOVLEN(iov) != 0)) {
				return EFAULT;
			}
			// Keep copy of IOV
			if(niov < _NUM_CACHED_REPLY_IOV) {
			//	sender->args.ms.riov[niov] = *iov;
			}
			++iov;
			++niov;
			--rparts;
		}
		sender->args.ms.dstmsglen = len;
	} else {
		// Single part -- validate and store reply address
		uintptr_t base, last;
		base = (uintptr_t) kap->rmsg;
		last = base + (-kap->rparts) - 1;
		if((base > last) || !WITHIN_BOUNDRY(base, last, sender->process->boundry_addr)) {
			// We know length is non-zero from test above
			return EFAULT;
		}
		sender->args.ms.dstmsglen = -kap->rparts;
	}


	/* Send IOVs */
	if(kap->sparts < 0) {
		// Single part -- do the boundary check and copy if short message
		uintptr_t base, last;
		int	len;

		base = (uintptr_t) kap->smsg;
		len = -kap->sparts;
		last = base + len - 1;
		if((base > last) || !WITHIN_BOUNDRY(base, last, sender->process->boundry_addr)) {
			// We know length is non-zero from test above
			return EFAULT;
		}
		sender->args.ms.srcmsglen = len;

		if(len <= sizeof(sender->args.msbuff.buff)) {
			(void)__inline_xfer_memcpy(sender->args.msbuff.buff, (char *)base, sender->args.msbuff.msglen = len);
			th_flags = _NTO_TF_BUFF_MSG;
		}
	} else if(kap->sparts == 1) {
		// Single IOV -- do the boundary check and copy if short message
		uintptr_t base, last, len;

		base = (uintptr_t)GETIOVBASE(kap->smsg);
		len = GETIOVLEN(kap->smsg);
		last = base + len - 1;
		if(((base > last) || !WITHIN_BOUNDRY(base, last, sender->process->boundry_addr)) && (len != 0)) {
			return EFAULT;
		}
		sender->args.ms.srcmsglen = len;
		if(len <= sizeof(sender->args.msbuff.buff)) {
			(void)__inline_xfer_memcpy(sender->args.msbuff.buff, (char *)base, sender->args.ms.msglen = len);
			th_flags = _NTO_TF_BUFF_MSG;
		}
	} else {
		// Multi IOV case
		int len = 0;
		int len_last =0;
		IOV *iov = kap->smsg;
		int sparts = kap->sparts;

		// Calculate send length -- even if not requested, it is almost free
		// Also do boundary check
		while(sparts) {
			uintptr_t base, last;

			len += GETIOVLEN(iov);
			if (len <len_last ) {
				/*overflow. excessively long user IOV, possibly overlayed. pr62575 */
				return EOVERFLOW;
			}
			len_last = len;
			base = (uintptr_t)GETIOVBASE(iov);
			last = base + GETIOVLEN(iov) - 1;
			if(((base > last) || !WITHIN_BOUNDRY(base, last, sender->process->boundry_addr)) && (GETIOVLEN(iov) != 0)) {
				return EFAULT;
			}
			++iov;
			--sparts;
			// Keep copy of IOV -- NYI, only really need if no receiver
			//if(niov < _NUM_CACHED_SEND_IOV) {
			//	sender->args.ms.siov[niov] = *iov;
			//}
		}
		sender->args.ms.srcmsglen = len;
		if(len <= sizeof(sender->args.msbuff.buff)) {
			int pos = 0;
			iov = kap->smsg;
			sparts = kap->sparts;
			// Multi-IOV incoming message that is short
			// FIXME -- need memcpy_siov for efficiency
			while(sparts) {
				int ilen = GETIOVLEN(iov);
				__inline_xfer_memcpy(&sender->args.msbuff.buff[pos], GETIOVBASE(iov), ilen);

				pos += ilen;
				iov++;
				sparts--;
			}
			sender->args.ms.msglen = len;
			th_flags = _NTO_TF_BUFF_MSG;
		}
	}

	// Now that the up-front business is done, we do the actual copy. If
	// this was identified as a short message, we have copied the message into the msgbuff area.

	// Was there was a waiting thread on the channel?

	thp = chp->receive_queue;
#if defined(VARIANT_smp) && defined(SMP_MSGOPT)
	while((thp != NULL) && (thp->internal_flags & _NTO_ITF_MSG_DELIVERY)) {
		thp = thp->next.thread;
	}
#endif
	if((thp != NULL) && !(thp->internal_flags & _NTO_ITF_RCVPULSE) ) {

		int xferstat;
		// If an immediate timeout was specified we return immediately.
		if(IMTO(act, STATE_REPLY)) {
			sender->flags &= ~_NTO_TF_BUFF_MSG;
			return ETIMEDOUT;
		}

		// Is this a long message?
		if(th_flags == 0) {
			sender->args.ms.smsg = kap->smsg;
			sender->args.ms.sparts = kap->sparts;
			START_SMP_XFER(act, thp);
			// Yes. Transfer the data.
			xferstat = xfermsg(thp, act, 0, 0);
			sender->args.ms.msglen = act->args.ms.msglen;

			lock_kernel();
			END_SMP_XFER(act, thp);

#if defined(VARIANT_smp) && defined(SMP_MSGOPT)
			if(thp->internal_flags & _NTO_ITF_MSG_FORCE_RDY) {
				force_ready(thp,KSTATUS(thp));
				thp->internal_flags &= ~_NTO_ITF_MSG_FORCE_RDY;
				KERCALL_RESTART(act);
				act->restart = 0;
				return ENOERROR;
			}
			if(act->flags & (_NTO_TF_SIG_ACTIVE | _NTO_TF_CANCELSELF)) {
				/* send is a cancelation point */
				KERCALL_RESTART(act);
				act->restart = 0;
				return ENOERROR;
			}
#endif

			if(xferstat) {
				lock_kernel();
				// If sender faulted let him know and abort the operation
				// without waking up the receiver.
				if(xferstat & XFER_SRC_FAULT) {
					goto send_fault;
				}
				// If receiver faulted, wake him up with an error and fail the
				// send.
				goto rcv_fault;
			}
		} else {

			// Short message. We do the following:
			// - switch aspace to receiver
			if(thp->aspace_prp && thp->aspace_prp != aspaces_prp[KERNCPU]) {
				/*
				 * Lock/unlock kernel if necessary before calling memmgr.aspace
				 */
				SWITCH_ASPACE(thp->aspace_prp, &aspaces_prp[KERNCPU], act);
			}
			// - copy message and handle errors
			if((xferstat = xfer_cpy_diov(thp, thp->args.ri.rmsg, sender->args.msbuff.buff, thp->args.ri.rparts, sender->args.msbuff.msglen))) {
				lock_kernel();
				// Has to be a receiver fault;
				goto rcv_fault;
			}
			sender->flags |= _NTO_TF_BUFF_MSG;
			// Note: below this point, we should NOT reference kap anywhere
			// as kap points to the original aspace
		}


		// If the receive specified an info buffer stuff it as well.
		// However, we are not in the address space of the destination
		// thread, we switch now
		thp->restart = NULL;

		if(thp->args.ri.info)  {
			struct _msg_info *repp = thp->args.ri.info;
			// Fill in rcvinfo
			// Switch to aspace of receiver. It's already adjusted if short msg.
			if(th_flags == 0) {
				if(thp->aspace_prp && thp->aspace_prp != aspaces_prp[KERNCPU]) {
					/*
					 * Kernel is already locked so we don't need SWITCH_ASPACE
					 */
					memmgr.aspace(thp->aspace_prp,&aspaces_prp[KERNCPU]);
				}
				if(cop->flags & COF_NETCON) {
					// Note: have to adjust srcmsglen before stuffing rcvinfo!
					sender->args.ms.srcmsglen = net_srcmsglen;
				}
			}
			// We can use a fast inline version as we know the thread does not
			// have an unblock pending
			STUFF_RCVINFO(sender, cop, thp->args.ri.info);

			// RUSH: Adjust msglen in better fashion...
			if(thp->args.ms.srcmsglen < repp->msglen) {
				repp->msglen = thp->args.ms.srcmsglen;
			}
		}

		lock_kernel();
		SETKSTATUS(thp, (sender->tid << 16) | cop->scoid);

		// Unlink receive thread from the receive queue.
		LINKPRIL_REM(thp);

		sender->args.ms.server = thp;
		thp->client = sender;

		// Check fast path conditions - no timeouts, no QNET, no sporadic.
		// We can inline the block_and_ready()
		if((sender->timeout_flags == 0) &&
			(thp->timeout_flags == 0) &&
			!(cop->flags & COF_NETCON) &&
			!(chp->flags & _NTO_CHF_FIXED_PRIORITY) &&
			!IS_SCHED_SS(sender)) {

			// By default the receiver runs with message driven priority.
			thp->real_priority = thp->priority = sender->priority;
			thp->dpp = sender->dpp;
			AP_INHERIT_CRIT(thp, sender);

			sender->state = STATE_REPLY;	// Must be set before calling block_and_ready()
			snap_time(&sender->timestamp_last_block,0);
			_TRACE_TH_EMIT_STATE(sender, REPLY);
#if defined(INLINE_BLOCKANDREADY)
			// This is an inline version of block an ready
			// We can use this for non-SMP (no runmask).
			// This also works for AP as we inherit the partition
			thp->next.thread = NULL;
			thp->prev.thread = NULL;
#ifdef _mt_LTT_TRACES_	/* PDB */
			//mt_TRACE_DEBUG("PDB 4.2");
			//mt_trace_var_debug(actives[KERNCPU]->process->pid, actives[KERNCPU]->tid, actives[KERNCPU]);
			mt_trace_task_suspend(actives[KERNCPU]->process->pid, actives[KERNCPU]->tid);
#endif
			//thp->restart = NULL;
			actives[KERNCPU] = thp;
			thp->state = STATE_RUNNING;
			//@@@ Hmm. This inline version of block_and_ready() may cause a small inaccuacy with APS.
			//thp->runcpu = KERNCPU;
#ifdef _mt_LTT_TRACES_	/* PDB */
			//mt_TRACE_DEBUG("PDB 4.3");
			//mt_trace_var_debug(thp->process->pid, thp->tid, thp);
			mt_trace_task_resume(thp->process->pid, thp->tid);
#endif
			_TRACE_TH_EMIT_STATE(thp, RUNNING);
#else
			block_and_ready(thp);
#endif
		} else {
			if((chp->flags & _NTO_CHF_FIXED_PRIORITY) == 0) {
				// By default the receiver runs with message driven priority.
				thp->real_priority = thp->priority = sender->priority;
				thp->dpp = sender->dpp;
				AP_INHERIT_CRIT(thp, sender);
			}
			sender->state = STATE_REPLY;	// Must be set before calling block_and_ready()
			_TRACE_TH_EMIT_STATE(sender, REPLY);

			if(cop->flags & COF_NETCON) {
				SETKSTATUS(act, 1);
				if((sender->flags & _NTO_TF_BUFF_MSG) == 0) {
					// #### Note: use net_srcmsglen saved above before we switch aspace
					sender->args.ms.srcmsglen = net_srcmsglen;
				}

				SETKSTATUS(thp, (sender->args.ms.rparts << 16) | cop->scoid);
				ready(thp);
			} else {
				block_and_ready(thp);
			}

			if(thp->timeout_flags & _NTO_TIMEOUT_REPLY) {
				// arm the timeout for reply block
				timeout_start(thp);
			}
		}

		// Block the active thread and ready the receiver thread
		sender->blocked_on = cop;

		// Link the now reply blocked sending thread in the reply queue
		LINKPRIL_BEG(chp->reply_queue, sender, THREAD);
		++cop->links;

		return ENOERROR;
	}

	// No-one waiting for a msg
	// If a normal thread
	//     Block the active thread
	//     Link the now send blocked thread into the reply queue
	// If a network thread send
	//     Link the passed vthread into the reply queue
	// Boost the servers priority to the clients if needed.
	if(th_flags == 0) {
		sender->args.ms.smsg = kap->smsg;
		sender->args.ms.sparts = kap->sparts;
			// FUTURE: Make copy of send IOVs
	} else {
		sender->flags |= _NTO_TF_BUFF_MSG;
	}


	if(IMTO(sender, STATE_SEND)) {
		sender->flags &= ~_NTO_TF_BUFF_MSG;
		return ETIMEDOUT;
	}

	lock_kernel();

	// Incoming network Send.
	// We use vtid passed in kap->rparts and _vtid_info passed in kap->rmsg
	if(cop->flags & COF_NETCON) {
		if(sender->flags & _NTO_TF_BUFF_MSG) {
			SETKSTATUS(act, 1);
		} else {
			// Return zero telling the network manager we still need the send data.
			// A _PULSE_CODE_NET_ACK will be sent later when the receive completes.
			sender->args.ms.srcmsglen = net_srcmsglen;
			SETKSTATUS(act, 0);
		}
		sender->state = STATE_SEND;
		snap_time(&sender->timestamp_last_block,0);
		_TRACE_TH_EMIT_STATE(sender, SEND);
	} else {
		//
		// Don't allow any MsgSend's to processes that are dying.
		// Only have to check here because of code in nano_signal.c
		// - check the comment where we turn on the _NTO_PF_COREDUMP
		// flag.
		//
		if(chp->process->flags & (_NTO_PF_TERMING | _NTO_PF_ZOMBIE | _NTO_PF_COREDUMP)) {
			return ENXIO;
		}
		// Can't use block(), because 'sender' might not actually be the
		// actives[KERNCPU] anymore...
		unready(sender, STATE_SEND);
	}

	sender->blocked_on = cop;
	pril_add(&chp->send_queue, sender);
	++cop->links;

	// To prevent priority inversion, boost all threads in the server
	//
	// for non-APS scheduling: raise prio of thread who last used this channel to at least that of the sender
	//
	// for APS scheduling: also cause the out-of-budget threads to inherit the budget of the sender,
	// but do not inherit the critical state.
	if((chp->flags & _NTO_CHF_FIXED_PRIORITY) == 0) {
		int i;

		for(i = 0 ; i < chp->process->threads.nentries ; ++i) {
			if(VECP(thp, &chp->process->threads, i) &&  thp->last_chid == chp->chid) {
				short may_run = may_thread_run(thp);
				if ( thp->priority < sender->priority ) {
					adjust_priority(thp, sender->priority, may_run ? thp->dpp : sender->dpp, 1 );
					thp->real_priority = thp->priority;
				} else {
					if (!may_run) {
						// server threads are higher prio, but have no budget. So inherit budget only
						adjust_priority(thp, thp->priority, sender->dpp, 1);
					}
				}
			}
		}
	}

	return ENOERROR;

send_fault:
	sender->flags &= ~_NTO_TF_BUFF_MSG;

	return EFAULT;

rcv_fault:
	sender->flags &= ~_NTO_TF_BUFF_MSG;
	kererr(thp, EFAULT);
	LINKPRIL_REM(thp);
	ready(thp);

	/* Restart the kernel call - same behavior as receive path */
	KERCALL_RESTART(act);

	return ENOERROR;
}