static inline int get_dir(const char* path ,char* buf ,int* pi ,int* bi) { const char* head = NULL; const char* tail = NULL; int len = 0; int path_len = 0; int ret = 0; if(IS_END(path)) return END; path += *pi; buf += *bi; path_len = strlen(path); head = path; tail = (path_len-1) >= 0 ? memchr(path+1 ,'/' ,path_len-1) : NULL; if(!tail) // so it must be the last dir { len = path_len; if(IS_RELATIVE(head)) { buf -= drop_last_dir(buf); buf[0] = '\0'; return END; } buf[len] = '\0'; // end fixed string ret = LAST; } else if(IS_RELATIVE(head)) { *bi -= drop_last_dir(buf); *pi += 3; // length of "/.." return RELATIVE; } else // not relative path { len = tail - head; ret = NORMAL; } memcpy(buf ,head ,len); *bi += len; *pi += len; return ret; }
// // COPY_VALUE_Debug: C // // The implementation of COPY_VALUE_CORE is designed to be fairly optimal // (since it is being called in lieu of what would have been a memcpy() or // plain assignment). It is left in its raw form as an inline function to // to help convey that it is nearly as efficient as an assignment. // // This adds some verbose checking in the debug build to help debug cases // where the relative information bits are incorrect. // void COPY_VALUE_Debug( REBVAL *dest, const RELVAL *src, REBCTX *specifier ) { assert(!IS_END(src)); assert(!IS_TRASH_DEBUG(src)); #ifdef __cplusplus Assert_Cell_Writable(dest, __FILE__, __LINE__); #endif if (IS_RELATIVE(src)) { assert(ANY_WORD(src) || ANY_ARRAY(src)); if (specifier == SPECIFIED) { Debug_Fmt("Internal Error: Relative item used with SPECIFIED"); PROBE_MSG(src, "word or array"); PROBE_MSG(FUNC_VALUE(VAL_RELATIVE(src)), "func"); assert(FALSE); } else if ( VAL_RELATIVE(src) != VAL_FUNC(CTX_FRAME_FUNC_VALUE(specifier)) ) { Debug_Fmt("Internal Error: Function mismatch in specific binding"); PROBE_MSG(src, "word or array"); PROBE_MSG(FUNC_VALUE(VAL_RELATIVE(src)), "expected func"); PROBE_MSG(CTX_FRAME_FUNC_VALUE(specifier), "actual func"); assert(FALSE); } } COPY_VALUE_CORE(dest, src, specifier); }
// // Bind_Relative_Inner_Loop: C // // Recursive function for relative function word binding. Returns TRUE if // any relative bindings were made. // static void Bind_Relative_Inner_Loop( struct Reb_Binder *binder, RELVAL *head, REBARR *paramlist, REBU64 bind_types ) { RELVAL *value = head; for (; NOT_END(value); value++) { REBU64 type_bit = FLAGIT_KIND(VAL_TYPE(value)); // The two-pass copy-and-then-bind should have gotten rid of all the // relative values to other functions during the copy. // // !!! Long term, in a single pass copy, this would have to deal // with relative values and run them through the specification // process if they were not just getting overwritten. // assert(!IS_RELATIVE(value)); if (type_bit & bind_types) { REBINT n = Try_Get_Binder_Index(binder, VAL_WORD_CANON(value)); if (n != 0) { // // Word's canon symbol is in frame. Relatively bind it. // (clear out existing binding flags first). // UNBIND_WORD(value); SET_VAL_FLAGS(value, WORD_FLAG_BOUND | VALUE_FLAG_RELATIVE); INIT_WORD_FUNC(value, AS_FUNC(paramlist)); // incomplete func INIT_WORD_INDEX(value, n); } } else if (ANY_ARRAY(value)) { Bind_Relative_Inner_Loop( binder, VAL_ARRAY_AT(value), paramlist, bind_types ); // Set the bits in the ANY-ARRAY! REBVAL to indicate that it is // relative to the function. // // !!! Technically speaking it is not necessary for an array to // be marked relative if it doesn't contain any relative words // under it. However, for uniformity in the near term, it's // easiest to debug if there is a clear mark on arrays that are // part of a deep copy of a function body either way. // SET_VAL_FLAG(value, VALUE_FLAG_RELATIVE); INIT_RELATIVE(value, AS_FUNC(paramlist)); // incomplete func } } }
// // Assert_No_Relative: C // // Check to make sure there are no relative values in an array, maybe deeply. // // !!! What if you have an ANY-ARRAY! inside your array at a position N, // but there is a relative value in the VAL_ARRAY() of that value at an // index earlier than N? This currently considers that an error since it // checks the whole array...which is more conservative (asserts on more // cases). But should there be a flag to ask to honor the index? // void Assert_No_Relative(REBARR *array, REBOOL deep) { RELVAL *item = ARR_HEAD(array); while (NOT_END(item)) { if (IS_RELATIVE(item)) { Debug_Fmt("Array contained relative item and wasn't supposed to."); PROBE_MSG(item, "relative item"); Panic_Array(array); } if (!IS_VOID_OR_SAFE_TRASH(item) && ANY_ARRAY(item) && deep) Assert_No_Relative(VAL_ARRAY(item), deep); ++item; } }
// // INIT_WORD_INDEX_Debug: C // void INIT_WORD_INDEX_Debug(RELVAL *v, REBCNT i) { assert(ANY_WORD(v)); assert(GET_VAL_FLAG((v), WORD_FLAG_BOUND)); if (IS_RELATIVE(v)) assert( VAL_WORD_CANON(v) == VAL_PARAM_CANON(FUNC_PARAM(VAL_WORD_FUNC(v), i)) ); else assert( VAL_WORD_CANON(v) == CTX_KEY_CANON(VAL_WORD_CONTEXT(KNOWN(v)), i) ); v->payload.any_word.index = i; }
// // Copy_Rerelativized_Array_Deep_Managed: C // // The invariant of copying in general is that when you are done with the // copy, there are no relative values in that copy. One exception to this // is the deep copy required to make a relative function body in the first // place (which it currently does in two passes--a normal deep copy followed // by a relative binding). The other exception is when a relativized // function body is copied to make another relativized function body. // // This is specialized logic for the latter case. It's constrained enough // to be simple (all relative values are known to be relative to the same // function), and the feature is questionable anyway. So it's best not to // further complicate ordinary copying with a parameterization to copy // and change all the relative binding information from one function's // paramlist to another. // REBARR *Copy_Rerelativized_Array_Deep_Managed( REBARR *original, REBACT *before, // references to `before` will be changed to `after` REBACT *after ){ const REBFLGS flags = NODE_FLAG_MANAGED; REBARR *copy = Make_Array_For_Copy(ARR_LEN(original), flags, original); RELVAL *src = ARR_HEAD(original); RELVAL *dest = ARR_HEAD(copy); for (; NOT_END(src); ++src, ++dest) { if (not IS_RELATIVE(src)) { Move_Value(dest, KNOWN(src)); continue; } // All relative values under a sub-block must be relative to the // same function. // assert(VAL_RELATIVE(src) == before); Move_Value_Header(dest, src); if (ANY_ARRAY_OR_PATH(src)) { INIT_VAL_NODE( dest, Copy_Rerelativized_Array_Deep_Managed( VAL_ARRAY(src), before, after ) ); PAYLOAD(Any, dest).second = PAYLOAD(Any, src).second; INIT_BINDING(dest, after); // relative binding } else { assert(ANY_WORD(src)); PAYLOAD(Any, dest) = PAYLOAD(Any, src); INIT_BINDING(dest, after); } } TERM_ARRAY_LEN(copy, ARR_LEN(original)); return copy; }
// // Unbind_Values_Core: C // // Unbind words in a block, optionally unbinding those which are // bound to a particular target (if target is NULL, then all // words will be unbound regardless of their VAL_WORD_CONTEXT). // void Unbind_Values_Core(RELVAL *head, REBCTX *context, REBOOL deep) { RELVAL *value = head; for (; NOT_END(value); value++) { if ( ANY_WORD(value) && ( !context || ( IS_WORD_BOUND(value) && !IS_RELATIVE(value) && VAL_WORD_CONTEXT(KNOWN(value)) == context ) ) ) { UNBIND_WORD(value); } else if (ANY_ARRAY(value) && deep) Unbind_Values_Core(VAL_ARRAY_AT(value), context, TRUE); } }
void _dlstart_c(size_t *sp, size_t *dynv) { size_t i, aux[AUX_CNT], dyn[DYN_CNT]; int argc = *sp; char **argv = (void *)(sp+1); for (i=argc+1; argv[i]; i++); size_t *auxv = (void *)(argv+i+1); for (i=0; i<AUX_CNT; i++) aux[i] = 0; for (i=0; auxv[i]; i+=2) if (auxv[i]<AUX_CNT) aux[auxv[i]] = auxv[i+1]; for (i=0; i<DYN_CNT; i++) dyn[i] = 0; for (i=0; dynv[i]; i+=2) if (dynv[i]<DYN_CNT) dyn[dynv[i]] = dynv[i+1]; /* If the dynamic linker is invoked as a command, its load * address is not available in the aux vector. Instead, compute * the load address as the difference between &_DYNAMIC and the * virtual address in the PT_DYNAMIC program header. */ unsigned char *base = (void *)aux[AT_BASE]; if (!base) { size_t phnum = aux[AT_PHNUM]; size_t phentsize = aux[AT_PHENT]; Phdr *ph = (void *)aux[AT_PHDR]; for (i=phnum; i--; ph = (void *)((char *)ph + phentsize)) { if (ph->p_type == PT_DYNAMIC) { base = (void *)((size_t)dynv - ph->p_vaddr); break; } } } /* MIPS uses an ugly packed form for GOT relocations. Since we * can't make function calls yet and the code is tiny anyway, * it's simply inlined here. */ if (NEED_MIPS_GOT_RELOCS) { size_t local_cnt = 0; size_t *got = (void *)(base + dyn[DT_PLTGOT]); for (i=0; dynv[i]; i+=2) if (dynv[i]==DT_MIPS_LOCAL_GOTNO) local_cnt = dynv[i+1]; for (i=0; i<local_cnt; i++) got[i] += (size_t)base; } /* The use of the reloc_info structure and nested loops is a trick * to work around the fact that we can't necessarily make function * calls yet. Each struct in the array serves like the arguments * to a function call. */ struct { void *rel; size_t size; size_t stride; } reloc_info[] = { { base+dyn[DT_JMPREL], dyn[DT_PLTRELSZ], 2+(dyn[DT_PLTREL]==DT_RELA) }, { base+dyn[DT_REL], dyn[DT_RELSZ], 2 }, { base+dyn[DT_RELA], dyn[DT_RELASZ], 3 }, { 0, 0, 0 } }; for (i=0; reloc_info[i].stride; i++) { size_t *rel = reloc_info[i].rel; size_t rel_size = reloc_info[i].size; size_t stride = reloc_info[i].stride; for (; rel_size; rel+=stride, rel_size-=stride*sizeof(size_t)) { if (!IS_RELATIVE(rel[1])) continue; size_t *rel_addr = (void *)(base + rel[0]); size_t addend = stride==3 ? rel[2] : *rel_addr; *rel_addr = (size_t)base + addend; } } const char *strings = (void *)(base + dyn[DT_STRTAB]); const Sym *syms = (void *)(base + dyn[DT_SYMTAB]); /* Call dynamic linker stage-2, __dls2 */ for (i=0; ;i++) { const char *s = strings + syms[i].st_name; if (s[0]=='_' && s[1]=='_' && s[2]=='d' && s[3]=='l' && s[4]=='s' && s[5]=='2' && !s[6]) break; } ((stage2_func)(base + syms[i].st_value))(base); /* Call dynamic linker stage-3, __dls3 */ for (i=0; ;i++) { const char *s = strings + syms[i].st_name; if (s[0]=='_' && s[1]=='_' && s[2]=='d' && s[3]=='l' && s[4]=='s' && s[5]=='3' && !s[6]) break; } ((stage3_func)(base + syms[i].st_value))(sp); }
void _dlstart_c(size_t *sp, size_t *dynv) { size_t i, aux[AUX_CNT], dyn[DYN_CNT]; size_t *rel, rel_size, base; int argc = *sp; char **argv = (void *)(sp+1); for (i=argc+1; argv[i]; i++); size_t *auxv = (void *)(argv+i+1); for (i=0; i<AUX_CNT; i++) aux[i] = 0; for (i=0; auxv[i]; i+=2) if (auxv[i]<AUX_CNT) aux[auxv[i]] = auxv[i+1]; #if DL_FDPIC struct fdpic_loadseg *segs, fakeseg; size_t j; if (dynv) { /* crt_arch.h entry point asm is responsible for reserving * space and moving the extra fdpic arguments to the stack * vector where they are easily accessible from C. */ segs = ((struct fdpic_loadmap *)(sp[-1] ? sp[-1] : sp[-2]))->segs; } else { /* If dynv is null, the entry point was started from loader * that is not fdpic-aware. We can assume normal fixed- * displacement ELF loading was performed, but when ldso was * run as a command, finding the Ehdr is a heursitic: we * have to assume Phdrs start in the first 4k of the file. */ base = aux[AT_BASE]; if (!base) base = aux[AT_PHDR] & -4096; segs = &fakeseg; segs[0].addr = base; segs[0].p_vaddr = 0; segs[0].p_memsz = -1; Ehdr *eh = (void *)base; Phdr *ph = (void *)(base + eh->e_phoff); size_t phnum = eh->e_phnum; size_t phent = eh->e_phentsize; while (phnum-- && ph->p_type != PT_DYNAMIC) ph = (void *)((size_t)ph + phent); dynv = (void *)(base + ph->p_vaddr); } #endif for (i=0; i<DYN_CNT; i++) dyn[i] = 0; for (i=0; dynv[i]; i+=2) if (dynv[i]<DYN_CNT) dyn[dynv[i]] = dynv[i+1]; #if DL_FDPIC for (i=0; i<DYN_CNT; i++) { if (i==DT_RELASZ || i==DT_RELSZ) continue; if (!dyn[i]) continue; for (j=0; dyn[i]-segs[j].p_vaddr >= segs[j].p_memsz; j++); dyn[i] += segs[j].addr - segs[j].p_vaddr; } base = 0; const Sym *syms = (void *)dyn[DT_SYMTAB]; rel = (void *)dyn[DT_RELA]; rel_size = dyn[DT_RELASZ]; for (; rel_size; rel+=3, rel_size-=3*sizeof(size_t)) { if (!IS_RELATIVE(rel[1], syms)) continue; for (j=0; rel[0]-segs[j].p_vaddr >= segs[j].p_memsz; j++); size_t *rel_addr = (void *) (rel[0] + segs[j].addr - segs[j].p_vaddr); if (R_TYPE(rel[1]) == REL_FUNCDESC_VAL) { *rel_addr += segs[rel_addr[1]].addr - segs[rel_addr[1]].p_vaddr + syms[R_SYM(rel[1])].st_value; rel_addr[1] = dyn[DT_PLTGOT]; } else { size_t val = syms[R_SYM(rel[1])].st_value; for (j=0; val-segs[j].p_vaddr >= segs[j].p_memsz; j++); *rel_addr = rel[2] + segs[j].addr - segs[j].p_vaddr + val; } } #else /* If the dynamic linker is invoked as a command, its load * address is not available in the aux vector. Instead, compute * the load address as the difference between &_DYNAMIC and the * virtual address in the PT_DYNAMIC program header. */ base = aux[AT_BASE]; if (!base) { size_t phnum = aux[AT_PHNUM]; size_t phentsize = aux[AT_PHENT]; Phdr *ph = (void *)aux[AT_PHDR]; for (i=phnum; i--; ph = (void *)((char *)ph + phentsize)) { if (ph->p_type == PT_DYNAMIC) { base = (size_t)dynv - ph->p_vaddr; break; } } } /* MIPS uses an ugly packed form for GOT relocations. Since we * can't make function calls yet and the code is tiny anyway, * it's simply inlined here. */ if (NEED_MIPS_GOT_RELOCS) { size_t local_cnt = 0; size_t *got = (void *)(base + dyn[DT_PLTGOT]); for (i=0; dynv[i]; i+=2) if (dynv[i]==DT_MIPS_LOCAL_GOTNO) local_cnt = dynv[i+1]; for (i=0; i<local_cnt; i++) got[i] += base; } rel = (void *)(base+dyn[DT_REL]); rel_size = dyn[DT_RELSZ]; for (; rel_size; rel+=2, rel_size-=2*sizeof(size_t)) { if (!IS_RELATIVE(rel[1], 0)) continue; size_t *rel_addr = (void *)(base + rel[0]); *rel_addr += base; } rel = (void *)(base+dyn[DT_RELA]); rel_size = dyn[DT_RELASZ]; for (; rel_size; rel+=3, rel_size-=3*sizeof(size_t)) { if (!IS_RELATIVE(rel[1], 0)) continue; size_t *rel_addr = (void *)(base + rel[0]); *rel_addr = base + rel[2]; } #endif stage2_func dls2; GETFUNCSYM(&dls2, __dls2, base+dyn[DT_PLTGOT]); dls2((void *)base, sp); }
// // Compose_Any_Array_Throws: C // // Compose a block from a block of un-evaluated values and GROUP! arrays that // are evaluated. This calls into Do_Core, so if 'into' is provided, then its // series must be protected from garbage collection. // // deep - recurse into sub-blocks // only - parens that return blocks are kept as blocks // // Writes result value at address pointed to by out. // REBOOL Compose_Any_Array_Throws( REBVAL *out, const REBVAL *any_array, REBOOL deep, REBOOL only, REBOOL into ) { REBDSP dsp_orig = DSP; Reb_Enumerator e; PUSH_SAFE_ENUMERATOR(&e, any_array); // evaluating could disrupt any_array while (NOT_END(e.value)) { UPDATE_EXPRESSION_START(&e); // informs the error delivery better if (IS_GROUP(e.value)) { // // We evaluate here, but disable lookahead so it only evaluates // the GROUP! and doesn't trigger errors on what's after it. // REBVAL evaluated; DO_NEXT_REFETCH_MAY_THROW(&evaluated, &e, DO_FLAG_NO_LOOKAHEAD); if (THROWN(&evaluated)) { *out = evaluated; DS_DROP_TO(dsp_orig); DROP_SAFE_ENUMERATOR(&e); return TRUE; } if (IS_BLOCK(&evaluated) && !only) { // // compose [blocks ([a b c]) merge] => [blocks a b c merge] // RELVAL *push = VAL_ARRAY_AT(&evaluated); while (NOT_END(push)) { // // `evaluated` is known to be specific, but its specifier // may be needed to derelativize its children. // DS_PUSH_RELVAL(push, VAL_SPECIFIER(&evaluated)); push++; } } else if (!IS_VOID(&evaluated)) { // // compose [(1 + 2) inserts as-is] => [3 inserts as-is] // compose/only [([a b c]) unmerged] => [[a b c] unmerged] // DS_PUSH(&evaluated); } else { // // compose [(print "Voids *vanish*!")] => [] // } } else if (deep) { if (IS_BLOCK(e.value)) { // // compose/deep [does [(1 + 2)] nested] => [does [3] nested] REBVAL specific; COPY_VALUE(&specific, e.value, e.specifier); REBVAL composed; if (Compose_Any_Array_Throws( &composed, &specific, TRUE, only, into )) { *out = composed; DS_DROP_TO(dsp_orig); DROP_SAFE_ENUMERATOR(&e); return TRUE; } DS_PUSH(&composed); } else { if (ANY_ARRAY(e.value)) { // // compose [copy/(orig) (copy)] => [copy/(orig) (copy)] // !!! path and second group are copies, first group isn't // REBARR *copy = Copy_Array_Shallow( VAL_ARRAY(e.value), IS_RELATIVE(e.value) ? e.specifier // use parent specifier if relative... : VAL_SPECIFIER(const_KNOWN(e.value)) // else child's ); DS_PUSH_TRASH; Val_Init_Array_Index( DS_TOP, VAL_TYPE(e.value), copy, VAL_INDEX(e.value) ); // ...manages } else DS_PUSH_RELVAL(e.value, e.specifier); } FETCH_NEXT_ONLY_MAYBE_END(&e); } else { // // compose [[(1 + 2)] (reverse "wollahs")] => [[(1 + 2)] "shallow"] // DS_PUSH_RELVAL(e.value, e.specifier); FETCH_NEXT_ONLY_MAYBE_END(&e); } } if (into) Pop_Stack_Values_Into(out, dsp_orig); else Val_Init_Array(out, VAL_TYPE(any_array), Pop_Stack_Values(dsp_orig)); DROP_SAFE_ENUMERATOR(&e); return FALSE; }
// // Next_Path_Throws: C // // Evaluate next part of a path. // REBOOL Next_Path_Throws(REBPVS *pvs) { REBPEF dispatcher; // Path must have dispatcher, else return: dispatcher = Path_Dispatch[VAL_TYPE(pvs->value)]; if (!dispatcher) return FALSE; // unwind, then check for errors pvs->item++; //Debug_Fmt("Next_Path: %r/%r", pvs->path-1, pvs->path); // Determine the "selector". See notes on pvs->selector_temp for why // a local variable can't be used for the temporary space. // if (IS_GET_WORD(pvs->item)) { // e.g. object/:field pvs->selector = GET_MUTABLE_VAR_MAY_FAIL(pvs->item, pvs->item_specifier); if (IS_VOID(pvs->selector)) fail (Error_No_Value_Core(pvs->item, pvs->item_specifier)); SET_TRASH_IF_DEBUG(&pvs->selector_temp); } // object/(expr) case: else if (IS_GROUP(pvs->item)) { if (Do_At_Throws( &pvs->selector_temp, VAL_ARRAY(pvs->item), VAL_INDEX(pvs->item), IS_RELATIVE(pvs->item) ? pvs->item_specifier // if relative, use parent specifier... : VAL_SPECIFIER(const_KNOWN(pvs->item)) // ...else use child's )) { *pvs->store = pvs->selector_temp; return TRUE; } pvs->selector = &pvs->selector_temp; } else { // object/word and object/value case: // COPY_VALUE(&pvs->selector_temp, pvs->item, pvs->item_specifier); pvs->selector = &pvs->selector_temp; } switch (dispatcher(pvs)) { case PE_OK: break; case PE_SET_IF_END: if (pvs->opt_setval && IS_END(pvs->item + 1)) { *pvs->value = *pvs->opt_setval; pvs->opt_setval = NULL; } break; case PE_NONE: SET_BLANK(pvs->store); case PE_USE_STORE: pvs->value = pvs->store; pvs->value_specifier = SPECIFIED; break; default: assert(FALSE); } if (NOT_END(pvs->item + 1)) return Next_Path_Throws(pvs); return FALSE; }
// // Do_Path_Throws_Core: C // // Evaluate an ANY_PATH! REBVAL, starting from the index position of that // path value and continuing to the end. // // The evaluator may throw because GROUP! is evaluated, e.g. `foo/(throw 1020)` // // If label_sym is passed in as being non-null, then the caller is implying // readiness to process a path which may be a function with refinements. // These refinements will be left in order on the data stack in the case // that `out` comes back as IS_FUNCTION(). // // If `opt_setval` is given, the path operation will be done as a "SET-PATH!" // if the path evaluation did not throw or error. HOWEVER the set value // is NOT put into `out`. This provides more flexibility on performance in // the evaluator, which may already have the `val` where it wants it, and // so the extra assignment would just be overhead. // // !!! Path evaluation is one of the parts of R3-Alpha that has not been // vetted very heavily by Ren-C, and needs a review and overhaul. // REBOOL Do_Path_Throws_Core( REBVAL *out, REBSTR **label_out, const RELVAL *path, REBCTX *specifier, REBVAL *opt_setval ) { REBPVS pvs; REBDSP dsp_orig = DSP; assert(ANY_PATH(path)); // !!! There is a bug in the dispatch such that if you are running a // set path, it does not always assign the output, because it "thinks you // aren't going to look at it". This presumably originated from before // parens were allowed in paths, and neglects cases like: // // foo/(throw 1020): value // // We always have to check to see if a throw occurred. Until this is // streamlined, we have to at minimum set it to something that is *not* // thrown so that we aren't testing uninitialized memory. A safe trash // will do, which is unset in release builds. // if (opt_setval) SET_TRASH_SAFE(out); // None of the values passed in can live on the data stack, because // they might be relocated during the path evaluation process. // assert(!IN_DATA_STACK_DEBUG(out)); assert(!IN_DATA_STACK_DEBUG(path)); assert(!opt_setval || !IN_DATA_STACK_DEBUG(opt_setval)); // Not currently robust for reusing passed in path or value as the output assert(out != path && out != opt_setval); assert(!opt_setval || !THROWN(opt_setval)); // Initialize REBPVS -- see notes in %sys-do.h // pvs.opt_setval = opt_setval; pvs.store = out; pvs.orig = path; pvs.item = VAL_ARRAY_AT(pvs.orig); // may not be starting at head of PATH! // The path value that's coming in may be relative (in which case it // needs to use the specifier passed in). Or it may be specific already, // in which case we should use the specifier in the value to process // its array contents. // if (IS_RELATIVE(path)) { #if !defined(NDEBUG) assert(specifier != SPECIFIED); if (VAL_RELATIVE(path) != VAL_FUNC(CTX_FRAME_FUNC_VALUE(specifier))) { Debug_Fmt("Specificity mismatch found in path dispatch"); PROBE_MSG(path, "the path being evaluated"); PROBE_MSG(FUNC_VALUE(VAL_RELATIVE(path)), "expected func"); PROBE_MSG(CTX_FRAME_FUNC_VALUE(specifier), "actual func"); assert(FALSE); } #endif pvs.item_specifier = specifier; } else pvs.item_specifier = VAL_SPECIFIER(const_KNOWN(path)); // Seed the path evaluation process by looking up the first item (to // get a datatype to dispatch on for the later path items) // if (IS_WORD(pvs.item)) { pvs.value = GET_MUTABLE_VAR_MAY_FAIL(pvs.item, pvs.item_specifier); pvs.value_specifier = SPECIFIED; if (IS_VOID(pvs.value)) fail (Error_No_Value_Core(pvs.item, pvs.item_specifier)); } else { // !!! Ideally there would be some way to deal with writes to // temporary locations, like this pvs.value...if a set-path sets // it, then it will be discarded. COPY_VALUE(pvs.store, VAL_ARRAY_AT(pvs.orig), pvs.item_specifier); pvs.value = pvs.store; pvs.value_specifier = SPECIFIED; } // Start evaluation of path: if (IS_END(pvs.item + 1)) { // If it was a single element path, return the value rather than // try to dispatch it (would cause a crash at time of writing) // // !!! Is this the desired behavior, or should it be an error? } else if (Path_Dispatch[VAL_TYPE(pvs.value)]) { REBOOL threw = Next_Path_Throws(&pvs); // !!! See comments about why the initialization of out is necessary. // Without it this assertion can change on some things: // // t: now // t/time: 10:20:03 // // (It thinks pvs.value has its THROWN bit set when it completed // successfully. It was a PE_USE_STORE case where pvs.value was reset to // pvs.store, and pvs.store has its thrown bit set. Valgrind does not // catch any uninitialized variables.) // // There are other cases that do trip valgrind when omitting the // initialization, though not as clearly reproducible. // assert(threw == THROWN(pvs.value)); if (threw) return TRUE; // Check for errors: if (NOT_END(pvs.item + 1) && !IS_FUNCTION(pvs.value)) { // // Only function refinements should get by this line: REBVAL specified_orig; COPY_VALUE(&specified_orig, pvs.orig, specifier); REBVAL specified_item; COPY_VALUE(&specified_item, pvs.item, specifier); fail (Error(RE_INVALID_PATH, &specified_orig, &specified_item)); } } else if (!IS_FUNCTION(pvs.value)) { REBVAL specified; COPY_VALUE(&specified, pvs.orig, specifier); fail (Error(RE_BAD_PATH_TYPE, &specified, Type_Of(pvs.value))); } if (opt_setval) { // If SET then we don't return anything assert(IS_END(pvs.item) + 1); return FALSE; } // If storage was not used, then copy final value back to it: if (pvs.value != pvs.store) COPY_VALUE(pvs.store, pvs.value, pvs.value_specifier); assert(!THROWN(out)); // Return 0 if not function or is :path/word... if (!IS_FUNCTION(pvs.value)) { assert(IS_END(pvs.item) + 1); return FALSE; } if (label_out) { REBVAL refinement; // When a function is hit, path processing stops as soon as the // processed sub-path resolves to a function. The path is still sitting // on the position of the last component of that sub-path. Usually, // this last component in the sub-path is a word naming the function. // if (IS_WORD(pvs.item)) { *label_out = VAL_WORD_SPELLING(pvs.item); } else { // In rarer cases, the final component (completing the sub-path to // the function to call) is not a word. Such as when you use a path // to pick by index out of a block of functions: // // functions: reduce [:add :subtract] // functions/1 10 20 // // Or when you have an immediate function value in a path with a // refinement. Tricky to make, but possible: // // do reduce [ // to-path reduce [:append 'only] [a] [b] // ] // // !!! When a function was not invoked through looking up a word // (or a word in a path) to use as a label, there were once three // different alternate labels used. One was SYM__APPLY_, another // was ROOT_NONAME, and another was to be the type of the function // being executed. None are fantastic, we do the type for now. *label_out = Canon(SYM_FROM_KIND(VAL_TYPE(pvs.value))); } // Move on to the refinements (if any) ++pvs.item; // !!! Currently, the mainline path evaluation "punts" on refinements. // When it finds a function, it stops the path evaluation and leaves // the position pvs.path before the list of refinements. // // A more elegant solution would be able to process and notice (for // instance) that `:APPEND/ONLY` should yield a function value that // has been specialized with a refinement. Path chaining should thus // be able to effectively do this and give the refined function object // back to the evaluator or other client. // // If a label_sym is passed in, we recognize that a function dispatch // is going to be happening. We do not want to pay to generate the // new series that would be needed to make a temporary function that // will be invoked and immediately GC'd So we gather the refinements // on the data stack. // // This code simulates that path-processing-to-data-stack, but it // should really be something in dispatch iself. In any case, we put // refinements on the data stack...and caller knows refinements are // from dsp_orig to DSP (thanks to accounting, all other operations // should balance!) for (; NOT_END(pvs.item); ++pvs.item) { // "the refinements" if (IS_VOID(pvs.item)) continue; if (IS_GROUP(pvs.item)) { // // Note it is not legal to use the data stack directly as the // output location for a DO (might be resized) if (Do_At_Throws( &refinement, VAL_ARRAY(pvs.item), VAL_INDEX(pvs.item), IS_RELATIVE(pvs.item) ? pvs.item_specifier // if relative, use parent's : VAL_SPECIFIER(const_KNOWN(pvs.item)) // else embedded )) { *out = refinement; DS_DROP_TO(dsp_orig); return TRUE; } if (IS_VOID(&refinement)) continue; DS_PUSH(&refinement); } else if (IS_GET_WORD(pvs.item)) { DS_PUSH_TRASH; *DS_TOP = *GET_OPT_VAR_MAY_FAIL(pvs.item, pvs.item_specifier); if (IS_VOID(DS_TOP)) { DS_DROP; continue; } } else DS_PUSH_RELVAL(pvs.item, pvs.item_specifier); // Whatever we were trying to use as a refinement should now be // on the top of the data stack, and only words are legal ATM // if (!IS_WORD(DS_TOP)) { fail (Error(RE_BAD_REFINE, DS_TOP)); } // Go ahead and canonize the word symbol so we don't have to // do it each time in order to get a case-insenstive compare // INIT_WORD_SPELLING(DS_TOP, VAL_WORD_CANON(DS_TOP)); } // To make things easier for processing, reverse the refinements on // the data stack (we needed to evaluate them in forward order). // This way we can just pop them as we go, and know if they weren't // all consumed if it doesn't get back to `dsp_orig` by the end. if (dsp_orig != DSP) { REBVAL *bottom = DS_AT(dsp_orig + 1); REBVAL *top = DS_TOP; while (top > bottom) { refinement = *bottom; *bottom = *top; *top = refinement; top--; bottom++; } } } else { // !!! Historically this just ignores a result indicating this is a // function with refinements, e.g. ':append/only'. However that // ignoring seems unwise. It should presumably create a modified // function in that case which acts as if it has the refinement. // // If the caller did not pass in a label pointer we assume they are // likely not ready to process any refinements. // if (NOT_END(pvs.item + 1)) fail (Error(RE_TOO_LONG)); // !!! Better error or add feature } return FALSE; }