// // PD_Vector: C // // Path dispatch acts like PICK for GET-PATH! and POKE for SET-PATH! // REBINT PD_Vector(REBPVS *pvs) { if (pvs->opt_setval) { Poke_Vector_Fail_If_Locked( KNOWN(pvs->value), pvs->selector, pvs->opt_setval ); return PE_OK; } Pick_Vector(pvs->store, KNOWN(pvs->value), pvs->selector); return PE_USE_STORE; }
// // Specializer_Dispatcher: C // // The evaluator does not do any special "running" of a specialized frame. // All of the contribution that the specialization had to make was taken care // of when Eval_Core() used f->special to fill from the exemplar. So all this // does is change the phase and binding to match the function this layer wa // specializing. // REB_R Specializer_Dispatcher(REBFRM *f) { REBARR *details = ACT_DETAILS(FRM_PHASE(f)); REBVAL *exemplar = KNOWN(ARR_HEAD(details)); assert(IS_FRAME(exemplar)); INIT_FRM_PHASE(f, VAL_PHASE(exemplar)); FRM_BINDING(f) = VAL_BINDING(exemplar); return R_REDO_UNCHECKED; // redo uses the updated phase and binding }
// // INIT_WORD_INDEX_Debug: C // void INIT_WORD_INDEX_Debug(RELVAL *v, REBCNT i) { assert(ANY_WORD(v)); assert(GET_VAL_FLAG((v), WORD_FLAG_BOUND)); if (IS_RELATIVE(v)) assert( VAL_WORD_CANON(v) == VAL_PARAM_CANON(FUNC_PARAM(VAL_WORD_FUNC(v), i)) ); else assert( VAL_WORD_CANON(v) == CTX_KEY_CANON(VAL_WORD_CONTEXT(KNOWN(v)), i) ); v->payload.any_word.index = i; }
// // Copy_Rerelativized_Array_Deep_Managed: C // // The invariant of copying in general is that when you are done with the // copy, there are no relative values in that copy. One exception to this // is the deep copy required to make a relative function body in the first // place (which it currently does in two passes--a normal deep copy followed // by a relative binding). The other exception is when a relativized // function body is copied to make another relativized function body. // // This is specialized logic for the latter case. It's constrained enough // to be simple (all relative values are known to be relative to the same // function), and the feature is questionable anyway. So it's best not to // further complicate ordinary copying with a parameterization to copy // and change all the relative binding information from one function's // paramlist to another. // REBARR *Copy_Rerelativized_Array_Deep_Managed( REBARR *original, REBACT *before, // references to `before` will be changed to `after` REBACT *after ){ const REBFLGS flags = NODE_FLAG_MANAGED; REBARR *copy = Make_Array_For_Copy(ARR_LEN(original), flags, original); RELVAL *src = ARR_HEAD(original); RELVAL *dest = ARR_HEAD(copy); for (; NOT_END(src); ++src, ++dest) { if (not IS_RELATIVE(src)) { Move_Value(dest, KNOWN(src)); continue; } // All relative values under a sub-block must be relative to the // same function. // assert(VAL_RELATIVE(src) == before); Move_Value_Header(dest, src); if (ANY_ARRAY_OR_PATH(src)) { INIT_VAL_NODE( dest, Copy_Rerelativized_Array_Deep_Managed( VAL_ARRAY(src), before, after ) ); PAYLOAD(Any, dest).second = PAYLOAD(Any, src).second; INIT_BINDING(dest, after); // relative binding } else { assert(ANY_WORD(src)); PAYLOAD(Any, dest) = PAYLOAD(Any, src); INIT_BINDING(dest, after); } } TERM_ARRAY_LEN(copy, ARR_LEN(original)); return copy; }
// // Rebind_Values_Deep: C // // Rebind all words that reference src target to dst target. // Rebind is always deep. // void Rebind_Values_Deep( REBCTX *src, REBCTX *dst, RELVAL *head, struct Reb_Binder *opt_binder ) { RELVAL *value = head; for (; NOT_END(value); value++) { if (ANY_ARRAY(value)) { Rebind_Values_Deep(src, dst, VAL_ARRAY_AT(value), opt_binder); } else if ( ANY_WORD(value) && GET_VAL_FLAG(value, WORD_FLAG_BOUND) && !GET_VAL_FLAG(value, VALUE_FLAG_RELATIVE) && VAL_WORD_CONTEXT(KNOWN(value)) == src ) { INIT_WORD_CONTEXT(value, dst); if (opt_binder != NULL) { INIT_WORD_INDEX( value, Try_Get_Binder_Index(opt_binder, VAL_WORD_CANON(value)) ); } } else if (IS_FUNCTION(value) && IS_FUNCTION_INTERPRETED(value)) { // // !!! Extremely questionable feature--walking into function // bodies and changing them. This R3-Alpha concept was largely // broken (didn't work for closures) and created a lot of extra // garbage (inheriting an object's methods meant making deep // copies of all that object's method bodies...each time). // Ren-C has a different idea in the works. // Rebind_Values_Deep( src, dst, VAL_FUNC_BODY(value), opt_binder ); } } }
// // Unbind_Values_Core: C // // Unbind words in a block, optionally unbinding those which are // bound to a particular target (if target is NULL, then all // words will be unbound regardless of their VAL_WORD_CONTEXT). // void Unbind_Values_Core(RELVAL *head, REBCTX *context, REBOOL deep) { RELVAL *value = head; for (; NOT_END(value); value++) { if ( ANY_WORD(value) && ( !context || ( IS_WORD_BOUND(value) && !IS_RELATIVE(value) && VAL_WORD_CONTEXT(KNOWN(value)) == context ) ) ) { UNBIND_WORD(value); } else if (ANY_ARRAY(value) && deep) Unbind_Values_Core(VAL_ARRAY_AT(value), context, TRUE); } }
GRAPHSTUDIO_NAMESPACE_START // cf stdafx.h for explanation // For filter DLL building, allow GUID functionality and its dependencies to be suppressed #ifndef GUID_LIST_SUPRESS_GUIDS #define KNOWN(x) { x, _T( # x ) } struct GuidPair { GUID guid; LPCTSTR name; }; const GuidPair KnownGuidList[] = { // filters KNOWN(CLSID_AsyncReader), KNOWN(GUID_NULL), // Media Types KNOWN(MEDIATYPE_AnalogAudio), KNOWN(MEDIATYPE_AnalogVideo), KNOWN(MEDIATYPE_Audio), KNOWN(MEDIATYPE_AUXLine21Data), KNOWN(MEDIATYPE_AUXTeletextPage), KNOWN(MEDIATYPE_CC_CONTAINER), KNOWN(MEDIATYPE_DTVCCData), KNOWN(MEDIATYPE_DVD_ENCRYPTED_PACK), KNOWN(MEDIATYPE_DVD_NAVIGATION), KNOWN(MEDIATYPE_File), KNOWN(MEDIATYPE_Interleaved), KNOWN(MEDIATYPE_LMRT),
AVFilterChannelLayouts *ff_merge_channel_layouts(AVFilterChannelLayouts *a, AVFilterChannelLayouts *b) { AVFilterChannelLayouts *ret = NULL; unsigned a_all = a->all_layouts + a->all_counts; unsigned b_all = b->all_layouts + b->all_counts; int ret_max, ret_nb = 0, i, j, round; if (a == b) return a; /* Put the most generic set in a, to avoid doing everything twice */ if (a_all < b_all) { FFSWAP(AVFilterChannelLayouts *, a, b); FFSWAP(unsigned, a_all, b_all); } if (a_all) { if (a_all == 1 && !b_all) { /* keep only known layouts in b; works also for b_all = 1 */ for (i = j = 0; i < b->nb_channel_layouts; i++) if (KNOWN(b->channel_layouts[i])) b->channel_layouts[j++] = b->channel_layouts[i]; /* Not optimal: the unknown layouts of b may become known after another merge. */ if (!j) return NULL; b->nb_channel_layouts = j; } MERGE_REF(b, a, channel_layouts, AVFilterChannelLayouts, fail); return b; } ret_max = a->nb_channel_layouts + b->nb_channel_layouts; if (!(ret = av_mallocz(sizeof(*ret))) || !(ret->channel_layouts = av_malloc(sizeof(*ret->channel_layouts) * ret_max))) goto fail; /* a[known] intersect b[known] */ for (i = 0; i < a->nb_channel_layouts; i++) { if (!KNOWN(a->channel_layouts[i])) continue; for (j = 0; j < b->nb_channel_layouts; j++) { if (a->channel_layouts[i] == b->channel_layouts[j]) { ret->channel_layouts[ret_nb++] = a->channel_layouts[i]; a->channel_layouts[i] = b->channel_layouts[j] = 0; } } } /* 1st round: a[known] intersect b[generic] 2nd round: a[generic] intersect b[known] */ for (round = 0; round < 2; round++) { for (i = 0; i < a->nb_channel_layouts; i++) { uint64_t fmt = a->channel_layouts[i], bfmt; if (!fmt || !KNOWN(fmt)) continue; bfmt = FF_COUNT2LAYOUT(av_get_channel_layout_nb_channels(fmt)); for (j = 0; j < b->nb_channel_layouts; j++) if (b->channel_layouts[j] == bfmt) ret->channel_layouts[ret_nb++] = a->channel_layouts[i]; } /* 1st round: swap to prepare 2nd round; 2nd round: put it back */ FFSWAP(AVFilterChannelLayouts *, a, b); } /* a[generic] intersect b[generic] */ for (i = 0; i < a->nb_channel_layouts; i++) { if (KNOWN(a->channel_layouts[i])) continue; for (j = 0; j < b->nb_channel_layouts; j++) if (a->channel_layouts[i] == b->channel_layouts[j]) ret->channel_layouts[ret_nb++] = a->channel_layouts[i]; } ret->nb_channel_layouts = ret_nb; if (!ret->nb_channel_layouts) goto fail; MERGE_REF(ret, a, channel_layouts, AVFilterChannelLayouts, fail); MERGE_REF(ret, b, channel_layouts, AVFilterChannelLayouts, fail); return ret; fail: if (ret) { av_freep(&ret->refs); av_freep(&ret->channel_layouts); } av_freep(&ret); return NULL; }
// // Do_Breakpoint_Throws: C // // A call to Do_Breakpoint_Throws does delegation to a hook in the host, which // (if registered) will generally start an interactive session for probing the // environment at the break. The RESUME native cooperates by being able to // give back a value (or give back code to run to produce a value) that the // call to breakpoint returns. // // RESUME has another feature, which is to be able to actually unwind and // simulate a return /AT a function *further up the stack*. (This may be // switched to a feature of a "step out" command at some point.) // REBOOL Do_Breakpoint_Throws( REBVAL *out, REBOOL interrupted, // Ctrl-C (as opposed to a BREAKPOINT) const REBVAL *default_value, REBOOL do_default ) { REBVAL *target = BLANK_VALUE; REBVAL temp; if (!PG_Breakpoint_Quitting_Hook) { // // Host did not register any breakpoint handler, so raise an error // about this as early as possible. // fail (Error(RE_HOST_NO_BREAKPOINT)); } // We call the breakpoint hook in a loop, in order to keep running if any // inadvertent FAILs or THROWs occur during the interactive session. // Only a conscious call of RESUME speaks the protocol to break the loop. // while (TRUE) { struct Reb_State state; REBCTX *error; push_trap: PUSH_TRAP(&error, &state); // The host may return a block of code to execute, but cannot // while evaluating do a THROW or a FAIL that causes an effective // "resumption". Halt is the exception, hence we PUSH_TRAP and // not PUSH_UNHALTABLE_TRAP. QUIT is also an exception, but a // desire to quit is indicated by the return value of the breakpoint // hook (which may or may not decide to request a quit based on the // QUIT command being run). // // The core doesn't want to get involved in presenting UI, so if // an error makes it here and wasn't trapped by the host first that // is a bug in the host. It should have done its own PUSH_TRAP. // if (error) { #if !defined(NDEBUG) REBVAL error_value; Val_Init_Error(&error_value, error); PROBE_MSG(&error_value, "Error not trapped during breakpoint:"); Panic_Array(CTX_VARLIST(error)); #endif // In release builds, if an error managed to leak out of the // host's breakpoint hook somehow...just re-push the trap state // and try it again. // goto push_trap; } // Call the host's breakpoint hook. // if (PG_Breakpoint_Quitting_Hook(&temp, interrupted)) { // // If a breakpoint hook returns TRUE that means it wants to quit. // The value should be the /WITH value (as in QUIT/WITH), so // not actually a "resume instruction" in this case. // assert(!THROWN(&temp)); *out = *NAT_VALUE(quit); CONVERT_NAME_TO_THROWN(out, &temp); return TRUE; // TRUE = threw } // If a breakpoint handler returns FALSE, then it should have passed // back a "resume instruction" triggered by a call like: // // resume/do [fail "This is how to fail from a breakpoint"] // // So now that the handler is done, we will allow any code handed back // to do whatever FAIL it likes vs. trapping that here in a loop. // DROP_TRAP_SAME_STACKLEVEL_AS_PUSH(&state); // Decode and process the "resume instruction" { REBFRM *frame; REBVAL *mode; REBVAL *payload; #if !defined(NDEBUG) REBOOL found = FALSE; #endif assert(IS_GROUP(&temp)); assert(VAL_LEN_HEAD(&temp) == RESUME_INST_MAX); // The instruction was built from raw material, non-relative // mode = KNOWN(VAL_ARRAY_AT_HEAD(&temp, RESUME_INST_MODE)); payload = KNOWN(VAL_ARRAY_AT_HEAD(&temp, RESUME_INST_PAYLOAD)); target = KNOWN(VAL_ARRAY_AT_HEAD(&temp, RESUME_INST_TARGET)); assert(IS_FRAME(target)); // // The first thing we need to do is determine if the target we // want to return to has another breakpoint sandbox blocking // us. If so, what we need to do is actually retransmit the // resume instruction so it can break that wall, vs. transform // it into an EXIT/FROM that would just get intercepted. // for (frame = FS_TOP; frame != NULL; frame = frame->prior) { if (NOT(Is_Any_Function_Frame(frame))) continue; if (Is_Function_Frame_Fulfilling(frame)) continue; if ( frame != FS_TOP && ( FUNC_DISPATCHER(frame->func) == &N_pause || FUNC_DISPATCHER(frame->func) == &N_breakpoint ) ) { // We hit a breakpoint (that wasn't this call to // breakpoint, at the current FS_TOP) before finding // the sought after target. Retransmit the resume // instruction so that level will get it instead. // *out = *NAT_VALUE(resume); CONVERT_NAME_TO_THROWN(out, &temp); return TRUE; // TRUE = thrown } // If the frame were the one we were looking for, it would be // reified (so it would have a context to match) // if (frame->varlist == NULL) continue; if (VAL_CONTEXT(target) == AS_CONTEXT(frame->varlist)) { // Found a match before hitting any breakpoints, so no // need to retransmit. // #if !defined(NDEBUG) found = TRUE; #endif break; } } // RESUME should not have been willing to use a target that // is not on the stack. // #if !defined(NDEBUG) assert(found); #endif if (IS_BLANK(mode)) { // // If the resume instruction had no /DO or /WITH of its own, // then it doesn't override whatever the breakpoint provided // as a default. (If neither the breakpoint nor the resume // provided a /DO or a /WITH, result will be void.) // goto return_default; // heeds `target` } assert(IS_LOGIC(mode)); if (VAL_LOGIC(mode)) { if (DO_VAL_ARRAY_AT_THROWS(&temp, payload)) { // // Throwing is not compatible with /AT currently. // if (!IS_BLANK(target)) fail (Error_No_Catch_For_Throw(&temp)); // Just act as if the BREAKPOINT call itself threw // *out = temp; return TRUE; // TRUE = thrown } // Ordinary evaluation result... } else temp = *payload; } // The resume instruction will be GC'd. // goto return_temp; } DEAD_END; return_default: if (do_default) { if (DO_VAL_ARRAY_AT_THROWS(&temp, default_value)) { // // If the code throws, we're no longer in the sandbox...so we // bubble it up. Note that breakpoint runs this code at its // level... so even if you request a higher target, any throws // will be processed as if they originated at the BREAKPOINT // frame. To do otherwise would require the EXIT/FROM protocol // to add support for DO-ing at the receiving point. // *out = temp; return TRUE; // TRUE = thrown } } else temp = *default_value; // generally void if no /WITH return_temp: // // If the target is a function, then we're looking to simulate a return // from something up the stack. This uses the same mechanic as // definitional returns--a throw named by the function or closure frame. // // !!! There is a weak spot in definitional returns for FUNCTION! that // they can only return to the most recent invocation; which is a weak // spot of FUNCTION! in general with stack relative variables. Also, // natives do not currently respond to definitional returns...though // they can do so just as well as FUNCTION! can. // Make_Thrown_Exit_Value(out, target, &temp, NULL); return TRUE; // TRUE = thrown }
// // Clonify: C // // Clone the series embedded in a value *if* it's in the given set of types // (and if "cloning" makes sense for them, e.g. they are not simple scalars). // // Note: The resulting clones will be managed. The model for lists only // allows the topmost level to contain unmanaged values...and we *assume* the // values we are operating on here live inside of an array. // void Clonify( REBVAL *v, REBFLGS flags, REBU64 types ){ if (C_STACK_OVERFLOWING(&types)) Fail_Stack_Overflow(); // !!! It may be possible to do this faster/better, the impacts on higher // quoting levels could be incurring more cost than necessary...but for // now err on the side of correctness. Unescape the value while cloning // and then escape it back. // REBCNT num_quotes = VAL_NUM_QUOTES(v); Dequotify(v); enum Reb_Kind kind = cast(enum Reb_Kind, KIND_BYTE_UNCHECKED(v)); assert(kind < REB_MAX_PLUS_MAX); // we dequoted it (pseudotypes ok) if (types & FLAGIT_KIND(kind) & TS_SERIES_OBJ) { // // Objects and series get shallow copied at minimum // REBSER *series; if (ANY_CONTEXT(v)) { INIT_VAL_CONTEXT_VARLIST( v, CTX_VARLIST(Copy_Context_Shallow_Managed(VAL_CONTEXT(v))) ); series = SER(CTX_VARLIST(VAL_CONTEXT(v))); } else { if (IS_SER_ARRAY(VAL_SERIES(v))) { series = SER( Copy_Array_At_Extra_Shallow( VAL_ARRAY(v), 0, // !!! what if VAL_INDEX() is nonzero? VAL_SPECIFIER(v), 0, NODE_FLAG_MANAGED ) ); INIT_VAL_NODE(v, series); // copies args // If it was relative, then copying with a specifier // means it isn't relative any more. // INIT_BINDING(v, UNBOUND); } else { series = Copy_Sequence_Core( VAL_SERIES(v), NODE_FLAG_MANAGED ); INIT_VAL_NODE(v, series); } } // If we're going to copy deeply, we go back over the shallow // copied series and "clonify" the values in it. // if (types & FLAGIT_KIND(kind) & TS_ARRAYS_OBJ) { REBVAL *sub = KNOWN(ARR_HEAD(ARR(series))); for (; NOT_END(sub); ++sub) Clonify(sub, flags, types); } } else if (types & FLAGIT_KIND(kind) & FLAGIT_KIND(REB_ACTION)) { // // !!! While Ren-C has abandoned the concept of copying the body // of functions (they are black boxes which may not *have* a // body), it would still theoretically be possible to do what // COPY does and make a function with a new and independently // hijackable identity. Assume for now it's better that the // HIJACK of a method for one object will hijack it for all // objects, and one must filter in the hijacking's body if one // wants to take more specific action. // assert(false); } else { // We're not copying the value, so inherit the const bit from the // original value's point of view, if applicable. // if (NOT_CELL_FLAG(v, EXPLICITLY_MUTABLE)) v->header.bits |= (flags & ARRAY_FLAG_CONST_SHALLOW); } Quotify(v, num_quotes); }
// // Make_Vector_Spec: C // // Make a vector from a block spec. // // make vector! [integer! 32 100] // make vector! [decimal! 64 100] // make vector! [unsigned integer! 32] // Fields: // signed: signed, unsigned // datatypes: integer, decimal // dimensions: 1 - N // bitsize: 1, 8, 16, 32, 64 // size: integer units // init: block of values // REBVAL *Make_Vector_Spec(RELVAL *bp, REBCTX *specifier, REBVAL *value) { REBINT type = -1; // 0 = int, 1 = float REBINT sign = -1; // 0 = signed, 1 = unsigned REBINT dims = 1; REBINT bits = 32; REBCNT size = 1; REBSER *vect; REBVAL *iblk = 0; // UNSIGNED if (IS_WORD(bp) && VAL_WORD_SYM(bp) == SYM_UNSIGNED) { sign = 1; bp++; } // INTEGER! or DECIMAL! if (IS_WORD(bp)) { if (SAME_SYM_NONZERO(VAL_WORD_SYM(bp), SYM_FROM_KIND(REB_INTEGER))) type = 0; else if ( SAME_SYM_NONZERO(VAL_WORD_SYM(bp), SYM_FROM_KIND(REB_DECIMAL)) ){ type = 1; if (sign > 0) return 0; } else return 0; bp++; } if (type < 0) type = 0; if (sign < 0) sign = 0; // BITS if (IS_INTEGER(bp)) { bits = Int32(KNOWN(bp)); if ( (bits == 32 || bits == 64) || (type == 0 && (bits == 8 || bits == 16)) ) bp++; else return 0; } else return 0; // SIZE if (NOT_END(bp) && IS_INTEGER(bp)) { if (Int32(KNOWN(bp)) < 0) return 0; size = Int32(KNOWN(bp)); bp++; } // Initial data: if (NOT_END(bp) && (IS_BLOCK(bp) || IS_BINARY(bp))) { REBCNT len = VAL_LEN_AT(bp); if (IS_BINARY(bp) && type == 1) return 0; if (len > size) size = len; iblk = KNOWN(bp); bp++; } VAL_RESET_HEADER(value, REB_VECTOR); // Index offset: if (NOT_END(bp) && IS_INTEGER(bp)) { VAL_INDEX(value) = (Int32s(KNOWN(bp), 1) - 1); bp++; } else VAL_INDEX(value) = 0; if (NOT_END(bp)) return 0; vect = Make_Vector(type, sign, dims, bits, size); if (!vect) return 0; if (iblk) Set_Vector_Row(vect, iblk); INIT_VAL_SERIES(value, vect); MANAGE_SERIES(vect); // index set earlier return value; }