void te_free_parameters(te_expr *n) { if (!n) return; switch (TYPE_MASK(n->type)) { case TE_FUNCTION7: case TE_CLOSURE7: te_free(n->parameters[6]); case TE_FUNCTION6: case TE_CLOSURE6: te_free(n->parameters[5]); case TE_FUNCTION5: case TE_CLOSURE5: te_free(n->parameters[4]); case TE_FUNCTION4: case TE_CLOSURE4: te_free(n->parameters[3]); case TE_FUNCTION3: case TE_CLOSURE3: te_free(n->parameters[2]); case TE_FUNCTION2: case TE_CLOSURE2: te_free(n->parameters[1]); case TE_FUNCTION1: case TE_CLOSURE1: te_free(n->parameters[0]); } }
double te_eval(const te_expr *n) { if (!n) return NAN; switch(TYPE_MASK(n->type)) { case TE_CONSTANT: return n->value; case TE_VARIABLE: return *n->bound; case TE_FUNCTION0: case TE_FUNCTION1: case TE_FUNCTION2: case TE_FUNCTION3: case TE_FUNCTION4: case TE_FUNCTION5: case TE_FUNCTION6: case TE_FUNCTION7: switch(ARITY(n->type)) { case 0: return TE_FUN(void)(); case 1: return TE_FUN(double)(M(0)); case 2: return TE_FUN(double, double)(M(0), M(1)); case 3: return TE_FUN(double, double, double)(M(0), M(1), M(2)); case 4: return TE_FUN(double, double, double, double)(M(0), M(1), M(2), M(3)); case 5: return TE_FUN(double, double, double, double, double)(M(0), M(1), M(2), M(3), M(4)); case 6: return TE_FUN(double, double, double, double, double, double)(M(0), M(1), M(2), M(3), M(4), M(5)); case 7: return TE_FUN(double, double, double, double, double, double, double)(M(0), M(1), M(2), M(3), M(4), M(5), M(6)); default: return NAN; } case TE_CLOSURE0: case TE_CLOSURE1: case TE_CLOSURE2: case TE_CLOSURE3: case TE_CLOSURE4: case TE_CLOSURE5: case TE_CLOSURE6: case TE_CLOSURE7: switch(ARITY(n->type)) { case 0: return TE_FUN(void*)(n->parameters[0]); case 1: return TE_FUN(void*, double)(n->parameters[1], M(0)); case 2: return TE_FUN(void*, double, double)(n->parameters[2], M(0), M(1)); case 3: return TE_FUN(void*, double, double, double)(n->parameters[3], M(0), M(1), M(2)); case 4: return TE_FUN(void*, double, double, double, double)(n->parameters[4], M(0), M(1), M(2), M(3)); case 5: return TE_FUN(void*, double, double, double, double, double)(n->parameters[5], M(0), M(1), M(2), M(3), M(4)); case 6: return TE_FUN(void*, double, double, double, double, double, double)(n->parameters[6], M(0), M(1), M(2), M(3), M(4), M(5)); case 7: return TE_FUN(void*, double, double, double, double, double, double, double)(n->parameters[7], M(0), M(1), M(2), M(3), M(4), M(5), M(6)); default: return NAN; } default: return NAN; } }
static void pn (const te_expr *n, int depth) { int i, arity; printf("%*s", depth, ""); switch(TYPE_MASK(n->type)) { case TE_CONSTANT: printf("%f\n", n->value); break; case TE_VARIABLE: printf("bound %p\n", n->bound); break; case TE_FUNCTION0: case TE_FUNCTION1: case TE_FUNCTION2: case TE_FUNCTION3: case TE_FUNCTION4: case TE_FUNCTION5: case TE_FUNCTION6: case TE_FUNCTION7: case TE_CLOSURE0: case TE_CLOSURE1: case TE_CLOSURE2: case TE_CLOSURE3: case TE_CLOSURE4: case TE_CLOSURE5: case TE_CLOSURE6: case TE_CLOSURE7: arity = ARITY(n->type); printf("f%d", arity); for(i = 0; i < arity; i++) { printf(" %p", n->parameters[i]); } printf("\n"); for(i = 0; i < arity; i++) { pn(n->parameters[i], depth + 1); } break; } }
static te_expr *base(state *s) { /* <base> = <constant> | <variable> | <function-0> {"(" ")"} | <function-1> <power> | <function-X> "(" <expr> {"," <expr>} ")" | "(" <list> ")" */ te_expr *ret; int arity; switch (TYPE_MASK(s->type)) { case TOK_NUMBER: ret = new_expr(TE_CONSTANT, 0); ret->value = s->value; next_token(s); break; case TOK_VARIABLE: ret = new_expr(TE_VARIABLE, 0); ret->bound = s->bound; next_token(s); break; case TE_FUNCTION0: case TE_CLOSURE0: ret = new_expr(s->type, 0); ret->function = s->function; if (IS_CLOSURE(s->type)) ret->parameters[0] = s->context; next_token(s); if (s->type == TOK_OPEN) { next_token(s); if (s->type != TOK_CLOSE) { s->type = TOK_ERROR; } else { next_token(s); } } break; case TE_FUNCTION1: case TE_CLOSURE1: ret = new_expr(s->type, 0); ret->function = s->function; if (IS_CLOSURE(s->type)) ret->parameters[1] = s->context; next_token(s); ret->parameters[0] = power(s); break; case TE_FUNCTION2: case TE_FUNCTION3: case TE_FUNCTION4: case TE_FUNCTION5: case TE_FUNCTION6: case TE_FUNCTION7: case TE_CLOSURE2: case TE_CLOSURE3: case TE_CLOSURE4: case TE_CLOSURE5: case TE_CLOSURE6: case TE_CLOSURE7: arity = ARITY(s->type); ret = new_expr(s->type, 0); ret->function = s->function; if (IS_CLOSURE(s->type)) ret->parameters[arity] = s->context; next_token(s); if (s->type != TOK_OPEN) { s->type = TOK_ERROR; } else { int i; for(i = 0; i < arity; i++) { next_token(s); ret->parameters[i] = expr(s); if(s->type != TOK_SEP) { break; } } if(s->type != TOK_CLOSE || i != arity - 1) { s->type = TOK_ERROR; } else { next_token(s); } } break; case TOK_OPEN: next_token(s); ret = list(s); if (s->type != TOK_CLOSE) { s->type = TOK_ERROR; } else { next_token(s); } break; default: ret = new_expr(0, 0); s->type = TOK_ERROR; ret->value = NAN; break; } return ret; }
void next_token(state *s) { s->type = TOK_NULL; if (!*s->next){ s->type = TOK_END; return; } do { /* Try reading a number. */ if ((s->next[0] >= '0' && s->next[0] <= '9') || s->next[0] == '.') { s->value = strtod(s->next, (char**)&s->next); s->type = TOK_NUMBER; } else { /* Look for a variable or builtin function call. */ if (s->next[0] >= 'a' && s->next[0] <= 'z') { const char *start; start = s->next; while ((s->next[0] >= 'a' && s->next[0] <= 'z') || (s->next[0] >= '0' && s->next[0] <= '9')) s->next++; const te_variable *var = find_lookup(s, start, s->next - start); if (!var) var = find_builtin(start, s->next - start); if (!var) { s->type = TOK_ERROR; } else { switch(TYPE_MASK(var->type)) { case TE_VARIABLE: s->type = TOK_VARIABLE; s->bound = var->address; break; case TE_CLOSURE0: case TE_CLOSURE1: case TE_CLOSURE2: case TE_CLOSURE3: case TE_CLOSURE4: case TE_CLOSURE5: case TE_CLOSURE6: case TE_CLOSURE7: s->context = var->context; case TE_FUNCTION0: case TE_FUNCTION1: case TE_FUNCTION2: case TE_FUNCTION3: case TE_FUNCTION4: case TE_FUNCTION5: case TE_FUNCTION6: case TE_FUNCTION7: s->type = var->type; s->function = var->address; break; } } } else { /* Look for an operator or special character. */ switch (s->next++[0]) { case '+': s->type = TOK_INFIX; s->function = add; break; case '-': s->type = TOK_INFIX; s->function = sub; break; case '*': s->type = TOK_INFIX; s->function = mul; break; case '/': s->type = TOK_INFIX; s->function = divide; break; case '^': s->type = TOK_INFIX; s->function = pow; break; case '%': s->type = TOK_INFIX; s->function = fmod; break; case '(': s->type = TOK_OPEN; break; case ')': s->type = TOK_CLOSE; break; case ',': s->type = TOK_SEP; break; case ' ': case '\t': case '\n': case '\r': break; default: s->type = TOK_ERROR; break; } } } } while (s->type == TOK_NULL); }
static void kerext_debug_channel(void *args) { THREAD *tp; struct kerargs_debug_channel *kap = args; CHANNEL *chp = kap->chp; debug_channel_t *p = kap->data; THREAD *act = actives[KERNCPU]; switch(kap->state) { case DC_INIT: p->chid = chp->chid; p->type = chp->type; p->zero = chp->zero; p->flags = chp->flags; kap->state = DC_COUNT_RECEIVE; // fall through case DC_COUNT_RECEIVE: p->receive_queue_depth = 0; for(tp = chp->receive_queue; tp != NULL; tp = tp->next.thread) { p->receive_queue_depth++; } kap->state = DC_COUNT_REPLY; // fall through case DC_COUNT_REPLY: p->reply_queue_depth = 0; for(tp = chp->reply_queue; tp != NULL; tp = tp->next.thread) { p->reply_queue_depth++; } kap->state = DC_COUNT_SEND_START; // fall through case DC_COUNT_SEND_START: lock_kernel(); p->send_queue_depth = 0; p->pulse_queue_depth = 0; kap->up.pril = pril_first(&chp->send_queue); pril_update_register(&chp->send_queue, &kap->up); kap->state = DC_COUNT_SEND_CONTINUE; // fall through case DC_COUNT_SEND_CONTINUE: lock_kernel(); pril_update_unregister(&chp->send_queue, &kap->up); for(tp = (THREAD *)kap->up.pril; tp != NULL; tp = pril_next(tp)) { switch(TYPE_MASK(tp->type)) { case TYPE_PULSE: case TYPE_VPULSE: p->pulse_queue_depth += ((PULSE *)tp)->count; break; default: p->send_queue_depth++; break; } if(NEED_PREEMPT(act)) { // Tell the pril routines to update the up.pril pointer // if somebody deletes the entry while we're preempted kap->up.pril = pril_next(tp); pril_update_register(&chp->send_queue, &kap->up); return; } } kap->state = DC_DONE; break; default: crash(); } }
int kdecl ker_msg_receivev(THREAD *act, struct kerargs_msg_receivev *kap) { CHANNEL *chp; CONNECT *cop; THREAD *thp; THREAD **owner; int tid, chid; unsigned tls_flags; VECTOR *chvec; chid = act->last_chid = kap->chid; // Used for priority boost chvec = &act->process->chancons; if(chid & _NTO_GLOBAL_CHANNEL) { chid &= ~_NTO_GLOBAL_CHANNEL; chvec = &chgbl_vector; } if((chp = vector_lookup(chvec, chid)) == NULL || chp->type != TYPE_CHANNEL) { lock_kernel(); return ESRCH; } if(kap->info) { WR_VERIFY_PTR(act, kap->info, sizeof(*kap->info)); // NOTE: // Make sure the receive info pointer is valid. Note that we need some // extra checks in the mainline when filling in the rcvinfo (this is no // longer done in specret). // // Note: we don't probe the whole buffer, rather touch start and end, // which is faster and sufficient // WR_PROBE_INT(act, kap->info, 1); WR_PROBE_INT(act, &kap->info->reserved, 1); } if(chp->flags & (_NTO_CHF_ASYNC | _NTO_CHF_GLOBAL)) { if(chp->flags & _NTO_CHF_GLOBAL) { cop = NULL; if(kap->coid) { if((cop = lookup_connect(kap->coid)) == NULL || cop->type != TYPE_CONNECTION) { return EBADF; } } return msgreceive_gbl(act, (CHANNELGBL*) chp, kap->rmsg, -kap->rparts, kap->info, cop, kap->coid); } else { return msgreceive_async(act, (CHANNELASYNC*) chp, kap->rmsg, kap->rparts); } } /* * Validate incoming IOVs and calculate receive length */ if(kap->rparts >= 0) { int len = 0; int len_last = 0; IOV *iov = kap->rmsg; int rparts = kap->rparts; if (kap->rparts != 0) { if (!WITHIN_BOUNDRY((uintptr_t)iov, (uintptr_t)(&iov[rparts]), act->process->boundry_addr)) { return EFAULT; } } // Calculate receive length -- even if not requested, we use it for msginfo // Do boundary check while(rparts) { uintptr_t base, last; len += GETIOVLEN(iov); if (len <len_last ) { /*overflow. excessively long user IOV, possibly overlayed. pr62575 */ return EOVERFLOW; } len_last = len; base = (uintptr_t)GETIOVBASE(iov); last = base + GETIOVLEN(iov) - 1; if(((base > last) || !WITHIN_BOUNDRY(base, last, act->process->boundry_addr)) && (GETIOVLEN(iov) != 0)) { return EFAULT; } ++iov; --rparts; } act->args.ms.srcmsglen = len; } else { // Single part -- validate receive address uintptr_t base, last; base = (uintptr_t) kap->rmsg; last = base + (-kap->rparts) - 1; if((base > last) || !WITHIN_BOUNDRY(base, last, act->process->boundry_addr)) { // We know length is non-zero from test above return EFAULT; } act->args.ms.srcmsglen = -kap->rparts; } restart: // Was there was a waiting thread or pulse on the channel? thp = pril_first(&chp->send_queue); restart2: if(thp) { int xferstat; unsigned type = TYPE_MASK(thp->type); // Yes. There is a waiting message. if((type == TYPE_PULSE) || (type == TYPE_VPULSE)) { PULSE *pup = (PULSE *)(void *)thp; act->restart = NULL; xferstat = xferpulse(act, kap->rmsg, kap->rparts, pup->code, pup->value, pup->id); if(type == TYPE_VPULSE) { thp = (THREAD *)pup->id; get_rcvinfo(thp, -1, thp->blocked_on, kap->info); } lock_kernel(); act->timeout_flags = 0; // By default the receiver runs with message driven priority. // RUSH: Fix for partition inheritance if(act->priority != pup->priority && (chp->flags & _NTO_CHF_FIXED_PRIORITY) == 0) { adjust_priority(act, pup->priority, act->process->default_dpp, 1); act->real_priority = act->priority; } else if(act->dpp != act->process->default_dpp) { adjust_priority(act, act->priority, act->process->default_dpp, 1); } pulse_remove(chp->process, &chp->send_queue, pup); if((thp = act->client) != 0) { /* need to clear client's server field */ act->client = 0; thp->args.ms.server = 0; } if(xferstat) { return EFAULT; } _TRACE_COMM_IPC_RET(act); return EOK; } // If the receive request was for a pulse only, keep checking the list.. if(KTYPE(act) == __KER_MSG_RECEIVEPULSEV) { thp = thp->next.thread; goto restart2; } #if defined(VARIANT_smp) && defined(SMP_MSGOPT) // If thp is in the xfer status in another CPU, try next one if(thp->internal_flags & _NTO_ITF_MSG_DELIVERY) { thp = thp->next.thread; goto restart2; } #endif // If an immediate timeout was specified we unblock the sender. if(IMTO(thp, STATE_REPLY)) { lock_kernel(); force_ready(thp, ETIMEDOUT); unlock_kernel(); KER_PREEMPT(act, ENOERROR); goto restart; } if(thp->flags & _NTO_TF_BUFF_MSG) { xferstat = xfer_cpy_diov(act, kap->rmsg, thp->args.msbuff.buff, kap->rparts, thp->args.msbuff.msglen); } else { act->args.ri.rmsg = kap->rmsg; act->args.ri.rparts = kap->rparts; START_SMP_XFER(act, thp); xferstat = xfermsg(act, thp, 0, 0); lock_kernel(); END_SMP_XFER(act, thp); #if defined(VARIANT_smp) && defined(SMP_MSGOPT) if(thp->internal_flags & _NTO_ITF_MSG_FORCE_RDY) { force_ready(thp,KSTATUS(thp)); thp->internal_flags &= ~_NTO_ITF_MSG_FORCE_RDY; KERCALL_RESTART(act); act->restart = 0; return ENOERROR; } if(act->flags & (_NTO_TF_SIG_ACTIVE | _NTO_TF_CANCELSELF)) { KERCALL_RESTART(act); act->restart = 0; return ENOERROR; } #endif } if(xferstat) { lock_kernel(); // Only a send fault will unblock the sender. if(xferstat & XFER_SRC_FAULT) { // Let sender know it faulted and restart receive. force_ready(thp, EFAULT); unlock_kernel(); KER_PREEMPT(act, ENOERROR); goto restart; } if((thp = act->client) != 0) { /* need to clear client's server field */ act->client = 0; thp->args.ms.server = 0; } // Let receiver and sender know reason for fault. act->timeout_flags = 0; return EFAULT; } if(TYPE_MASK(thp->type) == TYPE_VTHREAD) { tid = thp->args.ri.rparts; } else { tid = thp->tid; } cop = thp->blocked_on; if(thp->args.ms.srcmsglen == ~0U) { // This should never occur with the new code crash(); /* NOTREACHED */ thp->args.ms.srcmsglen = thp->args.ms.msglen; } // If the receive specified an info buffer stuff it as well. // thp->args.ms.msglen was set by xfermsg if(kap->info) { // get_rcvinfo(thp, -1, cop, kap->info); STUFF_RCVINFO(thp, cop, kap->info); if(thp->flags & _NTO_TF_BUFF_MSG) { if(kap->info->msglen > act->args.ms.srcmsglen) kap->info->msglen = act->args.ms.srcmsglen; } } lock_kernel(); _TRACE_COMM_IPC_RET(act); act->timeout_flags = 0; act->restart = NULL; // Because _NTO_TF_RCVINFO and _NTO_TF_SHORT_MSG will not be set, set this to NULL thp->restart = NULL; if(act->client != 0) { /* need to clear client's server field */ act->client->args.ms.server = 0; } thp->args.ms.server = act; act->client = thp; pril_rem(&chp->send_queue, thp); if(thp->state == STATE_SEND) { thp->state = STATE_REPLY; snap_time(&thp->timestamp_last_block,0); _TRACE_TH_EMIT_STATE(thp, REPLY); SETKSTATUS(act, (tid << 16) | cop->scoid); } else { thp->state = STATE_NET_REPLY; _TRACE_TH_EMIT_STATE(thp, NET_REPLY); SETKSTATUS(act, -((tid << 16) | cop->scoid)); } LINKPRIL_BEG(chp->reply_queue, thp, THREAD); // By default the receiver runs with message driven priority. // RUSH: Fix for partition inheritance if((act->priority != thp->priority || act->dpp != thp->dpp) && (chp->flags & _NTO_CHF_FIXED_PRIORITY) == 0) { AP_INHERIT_CRIT(act, thp); adjust_priority(act, thp->priority, thp->dpp, 1); if(act->real_priority != act->priority) act->real_priority = act->priority; } else { AP_CLEAR_CRIT(act); } return ENOERROR; } // No-one waiting for a msg so block tls_flags = act->un.lcl.tls->__flags; lock_kernel(); _TRACE_COMM_IPC_RET(act); if((thp = act->client) != 0) { /* need to clear client's server field */ act->client = 0; thp->args.ms.server = 0; } if(IMTO(act, STATE_RECEIVE)) { return ETIMEDOUT; } if(PENDCAN(tls_flags)) { SETKIP_FUNC(act, act->process->canstub); return ENOERROR; } // Can't call block() here, because act may not be actives[KERNCPU] // anymore - if the sender faulted, we call force_ready() above and // that might change actives[KERNCPU] unready(act, STATE_RECEIVE); // End inheritance of partition and critical state. This must be after block() so that we microbill // the partition we where running in before we reset to the original partition. PR26990 act->dpp = act->orig_dpp; AP_CLEAR_CRIT(act); act->blocked_on = chp; act->args.ri.rmsg = kap->rmsg; act->args.ri.rparts = kap->rparts; act->args.ri.info = kap->info; // Add to the receive queue, put pulse only receives at the end of // the list so the ker_msg_send() only has to check the head of the list owner = &chp->receive_queue; if(KTYPE(act) == __KER_MSG_RECEIVEPULSEV) { act->internal_flags |= _NTO_ITF_RCVPULSE; for( ;; ) { thp = *owner; if(thp == NULL) break; if(thp->internal_flags & _NTO_ITF_RCVPULSE) break; owner = &thp->next.thread; } } LINKPRIL_BEG(*owner, act, THREAD); return ENOERROR; }
int kdecl ker_msg_error(THREAD *act, struct kerargs_msg_error *kap) { CONNECT *cop; THREAD *thp; if((cop = lookup_rcvid((KERARGS *)(void *)kap, kap->rcvid, &thp)) == NULL) { return ENOERROR; } // Make sure thread is replied blocked on a channel owned by this process. if(thp->state != STATE_REPLY && thp->state != STATE_NET_REPLY) { return ESRCH; } // Verify that the message has been fully received, and that the receiving // thread has completed any specialret() processing that needs to be done. if ( thp->internal_flags & _NTO_ITF_SPECRET_PENDING ) { return ESRCH; } if(thp->blocked_on != cop) { CONNECT *cop1 = cop; cop = thp->blocked_on; if((cop->flags & COF_VCONNECT) == 0 || cop->un.lcl.cop != cop1) { return ESRCH; } } if(thp->internal_flags & _NTO_ITF_UNBLOCK_QUEUED) { remove_unblock(thp, cop, kap->rcvid); } lock_kernel(); if(thp->args.ms.server != 0) { thp->args.ms.server->client = 0; thp->args.ms.server = 0; } thp->flags &= ~(_NTO_TF_BUFF_MSG | _NTO_TF_UNBLOCK_REQ); if(kap->err == ERESTART) { CRASHCHECK(TYPE_MASK(thp->type) != TYPE_THREAD); SETKIP(thp, KIP(thp) - KER_ENTRY_SIZE); } else { if ( _TRACE_GETSYSCALL(thp->syscall) == __KER_CHANNEL_DESTROY ) { /* you may think we're crazy... but... ok, we ARE, but it's for the best, really. * * The only way we could be doing a MsgError on a thread that is really calling ChannelDestroy is * if we were handling the destruction of channel which has off-node connections, and net_send2 * had to deal with the qnet manager. It's now doing a MsgError to let us know the status of * that message. Unfortunately, the status field will overwrite the syscall type field of the * thread, which will mean that he will restart his kernel call with the wrong kernel call number! * * Aiieeeeee! */ if ( kap->err != EOK ) { KerextSlogf( _SLOG_SETCODE( _SLOGC_QNET, 0 ), _SLOG_INFO, "Qnet ChannelDestroy failed %d", kap->err); } } else { kererr(thp, kap->err); } } _TRACE_COMM_EMIT_ERROR(thp, cop, thp->tid+1); LINKPRIL_REM(thp); if(--cop->links == 0) { connect_detach(cop, thp->priority); } ready(thp); act->restart = NULL; return EOK; }