/* for sv_dup on new threads */ struct mro_meta* Perl_mro_meta_dup(pTHX_ struct mro_meta* smeta, CLONE_PARAMS* param) { struct mro_meta* newmeta; PERL_ARGS_ASSERT_MRO_META_DUP; Newx(newmeta, 1, struct mro_meta); Copy(smeta, newmeta, 1, struct mro_meta); if (newmeta->mro_linear_dfs) { newmeta->mro_linear_dfs = MUTABLE_AV(SvREFCNT_inc(sv_dup((const SV *)newmeta->mro_linear_dfs, param))); /* This is just acting as a shortcut pointer, and will be automatically updated on the first get. */ newmeta->mro_linear_c3 = NULL; } else if (newmeta->mro_linear_c3) { /* Only the current MRO is stored, so this owns the data. */ newmeta->mro_linear_c3 = MUTABLE_AV(SvREFCNT_inc(sv_dup((const SV *)newmeta->mro_linear_c3, param))); } if (newmeta->mro_nextmethod) newmeta->mro_nextmethod = MUTABLE_HV(SvREFCNT_inc(sv_dup((const SV *)newmeta->mro_nextmethod, param))); if (newmeta->isa) newmeta->isa = MUTABLE_HV(SvREFCNT_inc(sv_dup((const SV *)newmeta->isa, param))); return newmeta; }
SV * Perl_mro_set_private_data(pTHX_ struct mro_meta *const smeta, const struct mro_alg *const which, SV *const data) { PERL_ARGS_ASSERT_MRO_SET_PRIVATE_DATA; if (!smeta->mro_linear_dfs) { if (smeta->mro_which == which) { /* If all we need to store is the current MRO's data, then don't use memory on a hash with 1 element - store it direct, and signal this by leaving the would-be-hash NULL. */ smeta->mro_linear_c3 = MUTABLE_AV(data); return data; } else { HV *const hv = newHV(); /* Start with 2 buckets. It's unlikely we'll need more. */ HvMAX(hv) = 1; smeta->mro_linear_dfs = MUTABLE_AV(hv); if (smeta->mro_linear_c3) { /* If we were storing something directly, put it in the hash before we lose it. */ Perl_mro_set_private_data(aTHX_ smeta, smeta->mro_which, MUTABLE_SV(smeta->mro_linear_c3)); } } } /* We get here if we're storing more than one linearisation for this stash, or the linearisation we are storing is not that if its current MRO. */ if (smeta->mro_which == which) { /* If we've been asked to store the private data for the current MRO, then cache it. */ smeta->mro_linear_c3 = MUTABLE_AV(data); } if (!Perl_hv_common(aTHX_ MUTABLE_HV(smeta->mro_linear_dfs), NULL, which->name, which->length, which->kflags, HV_FETCH_ISSTORE, data, which->hash)) { Perl_croak(aTHX_ "panic: hv_store() failed in set_mro_private_data() " "for '%.*s' %d", (int) which->length, which->name, which->kflags); } return data; }
AV * Perl_newAV(pTHX) { return MUTABLE_AV(newSV_type(SVt_PVAV)); /* sv_upgrade does AvREAL_only(): AvALLOC(av) = 0; AvARRAY(av) = NULL; AvMAX(av) = AvFILLp(av) = -1; */ }
SV * Perl_mro_get_private_data(pTHX_ struct mro_meta *const smeta, const struct mro_alg *const which) { SV **data; PERL_ARGS_ASSERT_MRO_GET_PRIVATE_DATA; data = (SV **)Perl_hv_common(aTHX_ MUTABLE_HV(smeta->mro_linear_dfs), NULL, which->name, which->length, which->kflags, HV_FETCH_JUST_SV, NULL, which->hash); if (!data) return NULL; /* If we've been asked to look up the private data for the current MRO, then cache it. */ if (smeta->mro_which == which) smeta->mro_linear_c3 = MUTABLE_AV(*data); return *data; }
/* =for apidoc mro_get_linear_isa_dfs Returns the Depth-First Search linearization of @ISA the given stash. The return value is a read-only AV*. C<level> should be 0 (it is used internally in this function's recursion). You are responsible for C<SvREFCNT_inc()> on the return value if you plan to store it anywhere semi-permanently (otherwise it might be deleted out from under you the next time the cache is invalidated). =cut */ static AV* S_mro_get_linear_isa_dfs(pTHX_ HV *stash, U32 level) { AV* retval; GV** gvp; GV* gv; AV* av; const HEK* stashhek; struct mro_meta* meta; SV *our_name; HV *stored; PERL_ARGS_ASSERT_MRO_GET_LINEAR_ISA_DFS; assert(HvAUX(stash)); stashhek = HvNAME_HEK(stash); if (!stashhek) Perl_croak(aTHX_ "Can't linearize anonymous symbol table"); if (level > 100) Perl_croak(aTHX_ "Recursive inheritance detected in package '%s'", HEK_KEY(stashhek)); meta = HvMROMETA(stash); /* return cache if valid */ if((retval = MUTABLE_AV(MRO_GET_PRIVATE_DATA(meta, &dfs_alg)))) { return retval; } /* not in cache, make a new one */ retval = MUTABLE_AV(sv_2mortal(MUTABLE_SV(newAV()))); /* We use this later in this function, but don't need a reference to it beyond the end of this function, so reference count is fine. */ our_name = newSVhek(stashhek); av_push(retval, our_name); /* add ourselves at the top */ /* fetch our @ISA */ gvp = (GV**)hv_fetchs(stash, "ISA", FALSE); av = (gvp && (gv = *gvp) && isGV_with_GP(gv)) ? GvAV(gv) : NULL; /* "stored" is used to keep track of all of the classnames we have added to the MRO so far, so we can do a quick exists check and avoid adding duplicate classnames to the MRO as we go. It's then retained to be re-used as a fast lookup for ->isa(), by adding our own name and "UNIVERSAL" to it. */ stored = MUTABLE_HV(sv_2mortal(MUTABLE_SV(newHV()))); if(av && AvFILLp(av) >= 0) { SV **svp = AvARRAY(av); I32 items = AvFILLp(av) + 1; /* foreach(@ISA) */ while (items--) { SV* const sv = *svp++; HV* const basestash = gv_stashsv(sv, 0); SV *const *subrv_p; I32 subrv_items; if (!basestash) { /* if no stash exists for this @ISA member, simply add it to the MRO and move on */ subrv_p = &sv; subrv_items = 1; } else { /* otherwise, recurse into ourselves for the MRO of this @ISA member, and append their MRO to ours. The recursive call could throw an exception, which has memory management implications here, hence the use of the mortal. */ const AV *const subrv = mro_get_linear_isa_dfs(basestash, level + 1); subrv_p = AvARRAY(subrv); subrv_items = AvFILLp(subrv) + 1; } while(subrv_items--) { SV *const subsv = *subrv_p++; /* LVALUE fetch will create a new undefined SV if necessary */ HE *const he = hv_fetch_ent(stored, subsv, 1, 0); assert(he); if(HeVAL(he) != &PL_sv_undef) { /* It was newly created. Steal it for our new SV, and replace it in the hash with the "real" thing. */ SV *const val = HeVAL(he); HEK *const key = HeKEY_hek(he); HeVAL(he) = &PL_sv_undef; /* Save copying by making a shared hash key scalar. We inline this here rather than calling Perl_newSVpvn_share because we already have the scalar, and we already have the hash key. */ assert(SvTYPE(val) == SVt_NULL); sv_upgrade(val, SVt_PV); SvPV_set(val, HEK_KEY(share_hek_hek(key))); SvCUR_set(val, HEK_LEN(key)); SvREADONLY_on(val); SvFAKE_on(val); SvPOK_on(val); if (HEK_UTF8(key)) SvUTF8_on(val); av_push(retval, val); } } } } (void) hv_store_ent(stored, our_name, &PL_sv_undef, 0); (void) hv_store(stored, "UNIVERSAL", 9, &PL_sv_undef, 0); SvREFCNT_inc_simple_void_NN(stored); SvTEMP_off(stored); SvREADONLY_on(stored); meta->isa = stored; /* now that we're past the exception dangers, grab our own reference to the AV we're about to use for the result. The reference owned by the mortals' stack will be released soon, so everything will balance. */ SvREFCNT_inc_simple_void_NN(retval); SvTEMP_off(retval); /* we don't want anyone modifying the cache entry but us, and we do so by replacing it completely */ SvREADONLY_on(retval); return MUTABLE_AV(Perl_mro_set_private_data(aTHX_ meta, &dfs_alg, MUTABLE_SV(retval))); }
/* =for apidoc mro_get_linear_isa_dfs Returns the Depth-First Search linearization of C<@ISA> the given stash. The return value is a read-only AV*. C<level> should be 0 (it is used internally in this function's recursion). You are responsible for C<SvREFCNT_inc()> on the return value if you plan to store it anywhere semi-permanently (otherwise it might be deleted out from under you the next time the cache is invalidated). =cut */ static AV* S_mro_get_linear_isa_dfs(pTHX_ HV *stash, U32 level) { AV* retval; GV** gvp; GV* gv; AV* av; const HEK* stashhek; struct mro_meta* meta; SV *our_name; HV *stored = NULL; PERL_ARGS_ASSERT_MRO_GET_LINEAR_ISA_DFS; assert(HvAUX(stash)); stashhek = HvAUX(stash)->xhv_name_u.xhvnameu_name && HvENAME_HEK_NN(stash) ? HvENAME_HEK_NN(stash) : HvNAME_HEK(stash); if (!stashhek) Perl_croak(aTHX_ "Can't linearize anonymous symbol table"); if (level > 100) Perl_croak(aTHX_ "Recursive inheritance detected in package '%"HEKf"'", HEKfARG(stashhek)); meta = HvMROMETA(stash); /* return cache if valid */ if((retval = MUTABLE_AV(MRO_GET_PRIVATE_DATA(meta, &dfs_alg)))) { return retval; } /* not in cache, make a new one */ retval = MUTABLE_AV(sv_2mortal(MUTABLE_SV(newAV()))); /* We use this later in this function, but don't need a reference to it beyond the end of this function, so reference count is fine. */ our_name = newSVhek(stashhek); av_push(retval, our_name); /* add ourselves at the top */ /* fetch our @ISA */ gvp = (GV**)hv_fetchs(stash, "ISA", FALSE); av = (gvp && (gv = *gvp) && isGV_with_GP(gv)) ? GvAV(gv) : NULL; /* "stored" is used to keep track of all of the classnames we have added to the MRO so far, so we can do a quick exists check and avoid adding duplicate classnames to the MRO as we go. It's then retained to be re-used as a fast lookup for ->isa(), by adding our own name and "UNIVERSAL" to it. */ if(av && AvFILLp(av) >= 0) { SV **svp = AvARRAY(av); I32 items = AvFILLp(av) + 1; /* foreach(@ISA) */ while (items--) { SV* const sv = *svp ? *svp : &PL_sv_undef; HV* const basestash = gv_stashsv(sv, 0); SV *const *subrv_p; I32 subrv_items; svp++; if (!basestash) { /* if no stash exists for this @ISA member, simply add it to the MRO and move on */ subrv_p = &sv; subrv_items = 1; } else { /* otherwise, recurse into ourselves for the MRO of this @ISA member, and append their MRO to ours. The recursive call could throw an exception, which has memory management implications here, hence the use of the mortal. */ const AV *const subrv = mro_get_linear_isa_dfs(basestash, level + 1); subrv_p = AvARRAY(subrv); subrv_items = AvFILLp(subrv) + 1; } if (stored) { while(subrv_items--) { SV *const subsv = *subrv_p++; /* LVALUE fetch will create a new undefined SV if necessary */ HE *const he = hv_fetch_ent(stored, subsv, 1, 0); assert(he); if(HeVAL(he) != &PL_sv_undef) { /* It was newly created. Steal it for our new SV, and replace it in the hash with the "real" thing. */ SV *const val = HeVAL(he); HEK *const key = HeKEY_hek(he); HeVAL(he) = &PL_sv_undef; sv_sethek(val, key); av_push(retval, val); } } } else { /* We are the first (or only) parent. We can short cut the complexity above, because our @ISA is simply us prepended to our parent's @ISA, and our ->isa cache is simply our parent's, with our name added. */ /* newSVsv() is slow. This code is only faster if we can avoid it by ensuring that SVs in the arrays are shared hash key scalar SVs, because we can "copy" them very efficiently. Although to be fair, we can't *ensure* this, as a reference to the internal array is returned by mro::get_linear_isa(), so we'll have to be defensive just in case someone faffed with it. */ if (basestash) { SV **svp; stored = MUTABLE_HV(sv_2mortal((SV*)newHVhv(HvMROMETA(basestash)->isa))); av_extend(retval, subrv_items); AvFILLp(retval) = subrv_items; svp = AvARRAY(retval); while(subrv_items--) { SV *const val = *subrv_p++; *++svp = SvIsCOW_shared_hash(val) ? newSVhek(SvSHARED_HEK_FROM_PV(SvPVX(val))) : newSVsv(val); } } else { /* They have no stash. So create ourselves an ->isa cache as if we'd copied it from what theirs should be. */ stored = MUTABLE_HV(sv_2mortal(MUTABLE_SV(newHV()))); (void) hv_store(stored, "UNIVERSAL", 9, &PL_sv_undef, 0); av_push(retval, newSVhek(HeKEY_hek(hv_store_ent(stored, sv, &PL_sv_undef, 0)))); } } } } else { /* We have no parents. */ stored = MUTABLE_HV(sv_2mortal(MUTABLE_SV(newHV()))); (void) hv_store(stored, "UNIVERSAL", 9, &PL_sv_undef, 0); } (void) hv_store_ent(stored, our_name, &PL_sv_undef, 0); SvREFCNT_inc_simple_void_NN(stored); SvTEMP_off(stored); SvREADONLY_on(stored); meta->isa = stored; /* now that we're past the exception dangers, grab our own reference to the AV we're about to use for the result. The reference owned by the mortals' stack will be released soon, so everything will balance. */ SvREFCNT_inc_simple_void_NN(retval); SvTEMP_off(retval); /* we don't want anyone modifying the cache entry but us, and we do so by replacing it completely */ SvREADONLY_on(retval); return MUTABLE_AV(Perl_mro_set_private_data(aTHX_ meta, &dfs_alg, MUTABLE_SV(retval))); }
static AV* S_mro_get_linear_isa_c3(pTHX_ HV* stash, U32 level) { AV* retval; GV** gvp; GV* gv; AV* isa; const HEK* stashhek; struct mro_meta* meta; PERL_ARGS_ASSERT_MRO_GET_LINEAR_ISA_C3; assert(HvAUX(stash)); stashhek = HvNAME_HEK(stash); if (!stashhek) Perl_croak(aTHX_ "Can't linearize anonymous symbol table"); if (level > 100) Perl_croak(aTHX_ "Recursive inheritance detected in package '%s'", HEK_KEY(stashhek)); meta = HvMROMETA(stash); /* return cache if valid */ if((retval = meta->mro_linear_c3)) { return retval; } /* not in cache, make a new one */ gvp = (GV**)hv_fetchs(stash, "ISA", FALSE); isa = (gvp && (gv = *gvp) && isGV_with_GP(gv)) ? GvAV(gv) : NULL; if ( isa && ! SvAVOK(isa) ) { Perl_croak(aTHX_ "@ISA is not an array but %s", Ddesc((SV*)isa)); } /* For a better idea how the rest of this works, see the much clearer pure perl version in Algorithm::C3 0.01: http://search.cpan.org/src/STEVAN/Algorithm-C3-0.01/lib/Algorithm/C3.pm (later versions go about it differently than this code for speed reasons) */ if(isa && AvFILLp(isa) >= 0) { SV** seqs_ptr; I32 seqs_items; HV* const tails = MUTABLE_HV(sv_2mortal(MUTABLE_SV(newHV()))); AV *const seqs = MUTABLE_AV(sv_2mortal(MUTABLE_SV(newAV()))); I32* heads; /* This builds @seqs, which is an array of arrays. The members of @seqs are the MROs of the members of @ISA, followed by @ISA itself. */ I32 items = AvFILLp(isa) + 1; SV** isa_ptr = AvARRAY(isa); while(items--) { SV* const isa_item = *isa_ptr++; if ( ! SvPVOK(isa_item) ) { Perl_croak(aTHX_ "@ISA element which is not an plain value"); } { HV* const isa_item_stash = gv_stashsv(isa_item, 0); if(!isa_item_stash) { /* if no stash, make a temporary fake MRO containing just itself */ AV* const isa_lin = newAV(); av_push(isa_lin, newSVsv(isa_item)); av_push(seqs, (SV*)isa_lin); } else { /* recursion */ AV* const isa_lin = mro_get_linear_isa_c3(isa_item_stash, level + 1); av_push(seqs, SvREFCNT_inc_NN((SV*)isa_lin)); } } } av_push(seqs, SvREFCNT_inc_NN((SV*)isa)); /* This builds "heads", which as an array of integer array indices, one per seq, which point at the virtual "head" of the seq (initially zero) */ Newxz(heads, AvFILLp(seqs)+1, I32); /* This builds %tails, which has one key for every class mentioned in the tail of any sequence in @seqs (tail meaning everything after the first class, the "head"). The value is how many times this key appears in the tails of @seqs. */ seqs_ptr = AvARRAY(seqs); seqs_items = AvFILLp(seqs) + 1; while(seqs_items--) { AV *const seq = MUTABLE_AV(*seqs_ptr++); I32 seq_items = AvFILLp(seq); if(seq_items > 0) { SV** seq_ptr = AvARRAY(seq) + 1; while(seq_items--) { SV* const seqitem = *seq_ptr++; /* LVALUE fetch will create a new undefined SV if necessary */ HE* const he = hv_fetch_ent(tails, seqitem, 1, 0); if(he) { SV* const val = HeVAL(he); /* This will increment undef to 1, which is what we want for a newly created entry. */ sv_inc(val); } } } } /* Initialize retval to build the return value in */ retval = newAV(); av_push(retval, newSVhek(stashhek)); /* us first */ /* This loop won't terminate until we either finish building the MRO, or get an exception. */ while(1) { SV* cand = NULL; SV* winner = NULL; int s; /* "foreach $seq (@seqs)" */ SV** const avptr = AvARRAY(seqs); for(s = 0; s <= AvFILLp(seqs); s++) { SV** svp; AV * const seq = MUTABLE_AV(avptr[s]); SV* seqhead; if(!seq) continue; /* skip empty seqs */ svp = av_fetch(seq, heads[s], 0); seqhead = *svp; /* seqhead = head of this seq */ if(!winner) { HE* tail_entry; SV* val; /* if we haven't found a winner for this round yet, and this seqhead is not in tails (or the count for it in tails has dropped to zero), then this seqhead is our new winner, and is added to the final MRO immediately */ cand = seqhead; if((tail_entry = hv_fetch_ent(tails, cand, 0, 0)) && (val = HeVAL(tail_entry)) && (SvIV(val) > 0)) continue; winner = newSVsv(cand); av_push(retval, winner); /* note however that even when we find a winner, we continue looping over @seqs to do housekeeping */ } if(!sv_cmp(seqhead, winner)) { /* Once we have a winner (including the iteration where we first found him), inc the head ptr for any seq which had the winner as a head, NULL out any seq which is now empty, and adjust tails for consistency */ const int new_head = ++heads[s]; if(new_head > AvFILLp(seq)) { SvREFCNT_dec(avptr[s]); avptr[s] = NULL; } else { HE* tail_entry; SV* val; /* Because we know this new seqhead used to be a tail, we can assume it is in tails and has a positive value, which we need to dec */ svp = av_fetch(seq, new_head, 0); seqhead = *svp; tail_entry = hv_fetch_ent(tails, seqhead, 0, 0); val = HeVAL(tail_entry); sv_dec(val); } } } /* if we found no candidates, we are done building the MRO. !cand means no seqs have any entries left to check */ if(!cand) { Safefree(heads); break; } /* If we had candidates, but nobody won, then the @ISA hierarchy is not C3-incompatible */ if(!winner) { SV *errmsg; I32 i; errmsg = newSVpvf(aTHX_ "Inconsistent hierarchy during C3 merge of class '%s':\n\t" "current merge results [\n", HEK_KEY(stashhek)); for (i = 0; i <= av_len(retval); i++) { SV **elem = av_fetch(retval, i, 0); sv_catpvf(aTHX_ errmsg, "\t\t%"SVf",\n", SVfARG(*elem)); } sv_catpvf(aTHX_ errmsg, "\t]\n\tmerging failed on '%"SVf"'", SVfARG(cand)); /* we have to do some cleanup before we croak */ AvREFCNT_dec(retval); Safefree(heads); croak(aTHX_ "%"SVf, SVfARG(errmsg)); } } } else { /* @ISA was undefined or empty */ /* build a retval containing only ourselves */ retval = newAV(); av_push(retval, newSVhek(stashhek)); } /* we don't want anyone modifying the cache entry but us, and we do so by replacing it completely */ SvREADONLY_on(retval); meta->mro_linear_c3 = retval; return retval; }