void frame_gettree_recur(framestruc **fin, int ord, framestruc ***fout) { framestruc **x; if (!fin) return; if (ord==0) for (x=fin; *x; x++) *fout = alist_append(*fout,*x); for (x=fin; *x; x++) { if (ord==1) *fout = alist_append(*fout,*x); if (FRTREEMARK(*x)==11) {PROGERROR("Probably cycled recursion in frames!"); return;} FRTREEMARK(*x) = 11; frame_gettree_recur(FRSONS(*x),ord,fout); FRTREEMARK(*x) = 0; if (ord==2) *fout = alist_append(*fout,*x); } }
bool ld_map_dv_entry(Mapfile *mf, Sdf_desc *sdf, bool require, const char *version) { Sdv_desc sdv; sdv.sdv_name = version; sdv.sdv_ref = mf->mf_name; sdv.sdv_flags = 0; if (require) { /* * Add a VERNEED entry for the specified version * from this object: * * MapfileVersion Syntax * ---------------------------------------- * 1 obj - $ADDVERS=version; * 2 DEPENDENCY obj { REQUIRE=version }; */ sdf->sdf_flags |= FLG_SDF_ADDVER; if (alist_append(&sdf->sdf_verneed, &sdv, sizeof (Sdv_desc), AL_CNT_SDF_VERSIONS) == NULL) return (false); } else { /* Allow */ /* * Allow linking to symbols found in this version, or * from the versions it inherits from. * * MapfileVersion Syntax * ---------------------------------------- * 1 obj - version; * 2 DEPENDENCY obj { ALLOW=version }; */ sdf->sdf_flags |= FLG_SDF_SELECT; if (alist_append(&sdf->sdf_vers, &sdv, sizeof (Sdv_desc), AL_CNT_SDF_VERSIONS) == NULL) return (false); } //DBG_CALL(Dbg_map_dv_entry(mf->mf_ofl->ofl_lml, mf->mf_lineno, // require, version)); return (true); }
/* * Clean up (free) an audit descriptor. First, gather a list of all handles, * and then close each one down. This is done rather than using the handles * directly from the auditors, as the audit list can be torn down as a result * of the dlclose. In other words, what you're pointing at can be removed * while your still pointing at it. */ void audit_desc_cleanup(Audit_desc *adp, Rt_map *clmp) { Audit_list *alp; Listnode *lnp, *olnp; Alist *ghalp = 0; if (adp == 0) return; if (adp->ad_name) free(adp->ad_name); olnp = 0; for (LIST_TRAVERSE(&(adp->ad_list), lnp, alp)) { (void) alist_append(&ghalp, &(alp->al_ghp), sizeof (Grp_hdl *), AL_CNT_GROUPS); if (olnp) free(olnp); olnp = lnp; } if (olnp) free(olnp); if (ghalp) { Grp_hdl ** ghpp; Aliste off; for (ALIST_TRAVERSE(ghalp, off, ghpp)) (void) dlclose_intn(*ghpp, clmp); free(ghalp); } free(adp); }
void frame_setson_ext(framestruc *fr, framestruc *parent, framestruc *where) { framestruc *pold, **rr; if (!fr) return; pold = FRPARENT(fr); if (pold) { /* removing from the son-list of its old parent */ if (!FRSONS(pold)) rr = NULL; else for (rr=FRSONS(pold); *rr && *rr!=fr; rr++) ; if (rr?(*rr==fr):0) { alist_delete(rr); FRNUMSONS(pold)--; } else {PROGERROR("Probably broken list of sons (%p) in the parent %p.",fr,pold);} } FRPARENT(fr) = parent; if (parent) { /* the new parent */ if (!where) { FRSONS_XD(parent) = alist_append(FRSONS(parent),fr); } else { if (!FRSONS(parent)) rr = NULL; else for (rr=FRSONS(parent); *rr && *rr!=where; rr++) ; if (rr?(*rr!=where):1) {PROGERROR("Nonexistent \"where\" position given.");} FRSONS_XD(parent) = alist_insert(FRSONS(parent),rr,fr); } FRNUMSONS(parent)++; } }
void* alist_applist(void *ls, void *apl) { void **x; if (!ls) return apl; for (x=apl; x?*x:0; x++) ls = alist_append(ls,*x); if (apl) alist_free(apl); return ls; }
void* alist_copy(void *ls) { void **x, **st,**ln; if (!ls) {PROGERROR("Cannot copy NULL list!"); return ls;} ln = new_alist(20+alist_getlength(ls)); st = alist_getstart(ls); for (x=st; *x; x++) ln = alist_append(ln,*x); return ln+ ((void**)ls-st); }
alist alist_copy(alist src) { alist al; aentry ae; assert(src != NULL); al = alist_new(); for (ae = src->head; ae != NULL; ae = ae->next) alist_append(al, ae->p); return al; }
void frame_addoption(framestruc *fr, optionstruc *op, int li) { int i,k; char *s; FROPTIONS(fr) = alist_append(FROPTIONS(fr),op); FRLASTOPTI(fr) = li; for (i=k=0; k<4 && optdescdefs[i].name; i++) { s = optdescdefs[i].name; if (k>=0 && strncmp(OPTNAME(op),s,strlen(s))==0) k = 1; if (OPTISNAME(op,s)) k = 2; if (k>=2) k = (optdescdefs[i].numpar<0 || OPTPARNUM(op)==optdescdefs[i].numpar)?4:-1; } if (k<=0) {USERERROR("Unknown option \'%s\' or wrong number of option values (%d).",OPTNAME(op),OPTPARNUM(op));} }
static historyp make_variable_history(void) { alist al; historyp hp; hpair *pair; al = htable_list(var_table); hp = new_history(FALSE); for (pair = alist_first(al); pair != NULL; pair = alist_next(al)) alist_append(hp->completions, zstrdup(pair->key)); alist_delete(al); return hp; }
static void foo() { (void) _crle_msg((Msg)&__crle_msg[0]); (void) alist_append(NULL, NULL, 0, 0); alist_delete_by_offset(NULL, NULL); (void) alist_insert_by_offset(NULL, NULL, 0, 0, 0); alist_reset(NULL); (void) aplist_delete_value(NULL, NULL); aplist_reset(NULL); (void) aplist_test(NULL, NULL, 0); }
void* alist_insert(void *ls, void *ins, void *ai) { void **et; long di; if (!ins || !ai) {PROGERROR("Cannot insert NULL element or NULL position to a list!"); return ls;} if (!ls) ls = alist_getstart(ins); di = (void**)ins-(void**)ls; /* (must keep the difference, since ls may change next) */ ls = alist_append(ls,ai); et = alist_getend(ls); et--; if (di<0 || di>et-(void**)ls) {PROGERROR("The insert position %p (%ld) is not in the list %p-%p?!",ins,di,ls,et);} for ( ; et-(void**)ls>di; et--) et[0] = et[-1]; et[0] = ai; /* the inserting position is now prepared */ return ls; }
int struct_matgirth_ext(int ch, ematrix *e, int cc, ematrix ***depout) { ematrix **mat,**x,*y; int i,j,k,r; if (!e) {PROGERROR("The matrix e must be given here!"); return -1;} if (depout?*depout:0) {PROGERROR("The list depout must be given initialized to NULL!");} DEBUG(CURDLEV+1,"Computing the shortest cycle (girth) of %p(%dx%d) [%s], cc=%d ...\n",e,ROWSM(e),COLSM(e),EMNAME(e)?EMNAME(e):"",cc); if (ROWSM(e)<=0) return 0; if (COLSM(e)<=0) return 9999; if (cc>ROWSM(e)+1) return -1; /** * We look for the shortest cycle(s) by brute force - trying all * k-element subsets of e and computing their rank in the matroid. * Whenever a cycle is found, we stop further search -- no bigger k. * We also collect the shortest cycles in *depout if requested. **/ r = j = -1; for (k=1; (k<=cc || cc<0); k++) { if (k>ROWSM(e)+1) {PROGERROR("something wrong - no cycle found?! k=%d, cc=%d, rows=%d",k,cc,ROWSM(e)); break;} mat = ematrix_submatrices_sub(e,k); DEBUG(CURDLEV+2,"Trying list of %d subsets of size %d...\n",alist_getlength(mat),k); for (x=mat; x?*x:0; x++) { i = ematrix_setrank(e,*x); if (i>=k) continue; if (i<k-1) {PROGERROR("something wrong - missed a shorter cycle?! k=%d,i=%d",k,i);} if (r<=0) r = k; if (depout && r==k) *depout = alist_append(*depout,ematrix_copy(*x)); else break; } if (mat) dispose_alist_mats(mat); if (r>0) break; } if (cc>0 && r<0) r = cc; /* (if no cycle shorter than cc found above) */ #ifndef FASTPROG /* paranoic testing of the cycle computation: */ if (IFRANDDEBUGLESS(222) && ch>=0) { y = ematrix_copy(e); for (k=0; k<6; k++) { /* (extra testing with pivoted matrix) */ i = RANDOM()%ROWSM(y); j = RANDOM()%COLSM(y); if (SIGNM(y,i,j)!=0) ematrix_pivot(y,i,j); } if (r!=struct_matgirth_ext(-1,y,cc,NULL)) {PROGERROR("incorrect computation of shortest cycle, ret=%d",r);} dispose_ematrix(y); } DEBUG(CURDLEV+1," - shortest cycle (girth) computed %s %d\n",(cc<0?"exactly":(r>=0?"":"greater than")),r); #endif return r; /* the return value k, cc, or -1 computed above */ }
static Audit_client * _audit_create_head_client(Rt_map *hlmp, Rt_map *almp) { Audit_client ac, *acp; Lm_list *hlml = LIST(hlmp); ac.ac_lmp = almp; ac.ac_cookie = (uintptr_t)hlmp; ac.ac_flags = 0; if ((acp = alist_append(&(hlml->lm_aud_cookies), &ac, sizeof (Audit_client), AL_CNT_COOKIES)) == NULL) return (NULL); return (acp); }
bool ld_map_seg_ent_files(Mapfile *mf, Ent_desc *enp, Elf64_Word ecf_type, const char *str) { Ent_desc_file edf; /* * The v1 sysv syntax can let an empty string get in, consisting of * just a '*' where the '*' is interpreted as 'basename'. */ if (str[0] == '\0') { mf_fatal0(mf, (MSG_MAP_MALFORM)); return (false); } /* Basename or objname string must not contain a path separator (/) */ if ((ecf_type != TYP_ECF_PATH) && (strchr(str, '/') != NULL)) { const char *msg = (ecf_type == TYP_ECF_BASENAME) ? (MSG_MAP_BADBNAME) : (MSG_MAP_BADONAME); mf_fatal(mf, msg, str); return (false); } edf.edf_flags = ecf_type; edf.edf_name = str; edf.edf_name_len = strlen(edf.edf_name); /* Does it have an archive member suffix? */ if ((edf.edf_name[edf.edf_name_len - 1] == ')') && (strrchr(edf.edf_name, '(') != NULL)) edf.edf_flags |= FLG_ECF_ARMEMBER; if (alist_append(&enp->ec_files, &edf, sizeof (edf), AL_CNT_EC_FILES) == NULL) return (false); /* * Note that an entrance criteria requiring file name matching exists * in the system. This is used by ld_place_path_info_init() to * skip Place_pathinfo initialization in cases where there are * no entrance criteria that will use the results. */ mf->mf_ofl->ofl_flags |= FLG_OF_EC_FILES; return (true); }
/* * Add a section name to the output section sort list for the given * segment. * * entry: * mf - Mapfile descriptor * sgp - Segment in question * sec_name - Name of section to be added. * * exit: * Returns true for success, false for failure. */ bool ld_map_seg_os_order_add(Mapfile *mf, Sg_desc *sgp, const char *sec_name) { size_t idx; Sec_order *scop; /* * Make sure it's not already on the list */ for (ALIST_TRAVERSE(sgp->sg_os_order, idx, scop)) if (strcmp(scop->sco_secname, sec_name) == 0) { mf_fatal(mf, (MSG_MAP_DUP_OS_ORD), sec_name); return (false); } scop = alist_append(&sgp->sg_os_order, NULL, sizeof (Sec_order), AL_CNT_SG_SECORDER); if (scop == NULL) return (false); scop->sco_secname = sec_name; //DBG_CALL(Dbg_map_seg_os_order(mf->mf_ofl->ofl_lml, sgp, sec_name, // alist_nitems(sgp->sg_os_order), mf->mf_lineno)); /* * Output section ordering is a relatively expensive operation, * and one that is generally not used. In order to avoid needless * work, the FLG_OF_OS_ORDER must be set when it will be needed. * The section we just added needs this flag to be set. However, * it is possible that a subsequent mapfile directive may come * along and clear the order list, making it unnecessary. * * Instead of setting it here, we do a final pass over the segments * in ld_map_finalize() and set it there if a segment with sorting * requirements is seen. */ return (true); }
void alist_insert(alist al, unsigned int i, void *p) { if (i >= al->size) { i = al->size; alist_append(al, p); } else if (i == 0) alist_prepend(al, p); else { aentry oldae = find_aentry(al, i); aentry ae = aentry_new(p); ae->next = oldae; if (oldae->prev != NULL) { ae->prev = oldae->prev; ae->prev->next = ae; } oldae->prev = ae; al->current = ae; al->idx = i; ++al->size; } }
/** * Spawn a new particle. */ void particles_particle_spawn(struct particles *em, struct anim *anim, particle_think_t particle_think, float x, float y, float w, float h, float angle, float vx, float vy, float age_max) { /* Max particles reached? */ if(em->particles_count >= em->particles_max) { em->particles_max_counter++; return; } /* Get the next available particle. */ struct particle *p = particles_particle_next(em); if(p == NULL) { em->particles_max_counter++; return; } particle_init(p, anim, particle_think, x, y, w, h, angle, vx, vy, age_max); /* Add it to the list of alive particles. */ alist_append(em->particles, p); }
int main(int argc, char *argv[]) { unsigned next = 0; struct alist_node* pool = (struct alist_node*)calloc(sizeof(struct alist_node), 20); alist_append(pool, &next); fprintf(stdout, "ITR: %d\n", next); alist_append(pool, &next); fprintf(stdout, "ITR: %d\n", next); alist_append(pool, &next); fprintf(stdout, "ITR: %d\n", next); alist_append(pool, &next); fprintf(stdout, "ITR: %d\n", next); alist_append(pool, &next); fprintf(stdout, "ITR: %d\n", next); alist_append(pool, &next); fprintf(stdout, "ITR: %d\n", next); alist_erase(pool, 2); alist_erase(pool, 4); alist_traverse(pool, alist_say, 1, 6, 0); return 0; }
void frame_addcommand(framestruc *fr, optionstruc *op, int li) { FRCOMMANDS(fr) = alist_append(FRCOMMANDS(fr),op); junk = li; /* (should we use this as above???) */ }
Rt_map * setup(char **envp, auxv_t *auxv, Word _flags, char *_platform, int _syspagsz, char *_rtldname, ulong_t ld_base, ulong_t interp_base, int fd, Phdr *phdr, char *execname, char **argv, uid_t uid, uid_t euid, gid_t gid, gid_t egid, void *aoutdyn, int auxflags, uint_t *hwcap) { Rt_map *rlmp, *mlmp, *clmp, **tobj = NULL; Ehdr *ehdr; rtld_stat_t status; int features = 0, ldsoexec = 0; size_t eaddr, esize; char *str, *argvname; Word lmflags; mmapobj_result_t *mpp; Fdesc fdr = { 0 }, fdm = { 0 }; Rej_desc rej = { 0 }; APlist *ealp = NULL; /* * Now that ld.so has relocated itself, initialize our own 'environ' so * as to establish an address suitable for any libc requirements. */ _environ = (char **)((ulong_t)auxv - sizeof (char *)); _init(); _environ = envp; /* * Establish a base time. Total time diagnostics start from entering * ld.so.1 here, however the base time is reset each time the ld.so.1 * is re-entered. Note also, there will be a large time associated * with the first diagnostic from ld.so.1, as bootstrapping ld.so.1 * and establishing the liblddbg infrastructure takes some time. */ (void) gettimeofday(&DBG_TOTALTIME, NULL); DBG_DELTATIME = DBG_TOTALTIME; /* * Determine how ld.so.1 has been executed. */ if ((fd == -1) && (phdr == NULL)) { /* * If we received neither the AT_EXECFD nor the AT_PHDR aux * vector, ld.so.1 must have been invoked directly from the * command line. */ ldsoexec = 1; /* * AT_SUN_EXECNAME provides the most precise name, if it is * available, otherwise fall back to argv[0]. At this time, * there is no process name. */ if (execname) rtldname = execname; else if (argv[0]) rtldname = argv[0]; else rtldname = (char *)MSG_INTL(MSG_STR_UNKNOWN); } else { /* * Otherwise, we have a standard process. AT_SUN_EXECNAME * provides the most precise name, if it is available, * otherwise fall back to argv[0]. Provided the application * is already mapped, the process is the application, so * simplify the application name for use in any diagnostics. */ if (execname) argvname = execname; else if (argv[0]) argvname = execname = argv[0]; else argvname = execname = (char *)MSG_INTL(MSG_STR_UNKNOWN); if (fd == -1) { if ((str = strrchr(argvname, '/')) != NULL) procname = ++str; else procname = argvname; } /* * At this point, we don't know the runtime linkers full path * name. The _rtldname passed to us is the SONAME of the * runtime linker, which is typically /lib/ld.so.1 no matter * what the full path is. Use this for now, we'll reset the * runtime linkers name once the application is analyzed. */ if (_rtldname) { if ((str = strrchr(_rtldname, '/')) != NULL) rtldname = ++str; else rtldname = _rtldname; } else rtldname = (char *)MSG_INTL(MSG_STR_UNKNOWN); /* exec() brought in two objects for us. Count the second one */ cnt_map++; } /* * Initialize any global variables. */ at_flags = _flags; if ((org_scapset->sc_plat = _platform) != NULL) org_scapset->sc_platsz = strlen(_platform); if (org_scapset->sc_plat == NULL) platform_name(org_scapset); if (org_scapset->sc_mach == NULL) machine_name(org_scapset); /* * If pagesize is unspecified find its value. */ if ((syspagsz = _syspagsz) == 0) syspagsz = _sysconfig(_CONFIG_PAGESIZE); /* * Add the unused portion of the last data page to the free space list. * The page size must be set before doing this. Here, _end refers to * the end of the runtime linkers bss. Note that we do not use the * unused data pages from any included .so's to supplement this free * space as badly behaved .os's may corrupt this data space, and in so * doing ruin our data. */ eaddr = S_DROUND((size_t)&_end); esize = eaddr % syspagsz; if (esize) { esize = syspagsz - esize; addfree((void *)eaddr, esize); } /* * Establish initial link-map list flags, and link-map list alists. */ if (alist_append(&lml_main.lm_lists, NULL, sizeof (Lm_cntl), AL_CNT_LMLISTS) == NULL) return (0); lml_main.lm_flags |= LML_FLG_BASELM; lml_main.lm_lmid = LM_ID_BASE; lml_main.lm_lmidstr = (char *)MSG_ORIG(MSG_LMID_BASE); if (alist_append(&lml_rtld.lm_lists, NULL, sizeof (Lm_cntl), AL_CNT_LMLISTS) == NULL) return (0); lml_rtld.lm_flags |= (LML_FLG_RTLDLM | LML_FLG_HOLDLOCK); lml_rtld.lm_tflags |= LML_TFLG_NOAUDIT; lml_rtld.lm_lmid = LM_ID_LDSO; lml_rtld.lm_lmidstr = (char *)MSG_ORIG(MSG_LMID_LDSO); /* * Determine whether we have a secure executable. */ security(uid, euid, gid, egid, auxflags); /* * Make an initial pass of environment variables to pick off those * related to locale processing. At the same time, collect and save * any LD_XXXX variables for later processing. Note that this later * processing will be skipped if ld.so.1 is invoked from the command * line with -e LD_NOENVIRON. */ if (envp && (readenv_user((const char **)envp, &ealp) == 1)) return (0); /* * If ld.so.1 has been invoked directly, process its arguments. */ if (ldsoexec) { /* * Process any arguments that are specific to ld.so.1, and * reorganize the process stack to effectively remove ld.so.1 * from the stack. Reinitialize the environment pointer, as * this pointer may have been shifted after skipping ld.so.1's * arguments. */ if (rtld_getopt(argv, &envp, &auxv, &(lml_main.lm_flags), &(lml_main.lm_tflags), (aoutdyn != 0)) == 1) { eprintf(&lml_main, ERR_NONE, MSG_INTL(MSG_USG_BADOPT)); return (0); } _environ = envp; /* * Open the object that ld.so.1 is to execute. */ argvname = execname = argv[0]; if ((fd = open(argvname, O_RDONLY)) == -1) { int err = errno; eprintf(&lml_main, ERR_FATAL, MSG_INTL(MSG_SYS_OPEN), argvname, strerror(err)); return (0); } } /* * Having processed any ld.so.1 command line options, return to process * any LD_XXXX environment variables. */ if (ealp) { if (((rtld_flags & RT_FL_NOENVIRON) == 0) && (procenv_user(ealp, &(lml_main.lm_flags), &(lml_main.lm_tflags), (aoutdyn != 0)) == 1)) return (0); free(ealp); } /* * Initialize a hardware capability descriptor for use in comparing * each loaded object. The aux vector must provide AF_SUN_HWCAPVERIFY, * as prior to this setting any hardware capabilities that were found * could not be relied upon. */ if (auxflags & AF_SUN_HWCAPVERIFY) { rtld_flags2 |= RT_FL2_HWCAP; org_scapset->sc_hw_1 = (Xword)hwcap[0]; org_scapset->sc_hw_2 = (Xword)hwcap[1]; } /* * Create a mapping descriptor for ld.so.1. We can determine our * two segments information from known symbols. */ if ((mpp = calloc(2, sizeof (mmapobj_result_t))) == NULL) return (0); mpp[0].mr_addr = (caddr_t)M_PTRUNC(ld_base); mpp[0].mr_msize = (caddr_t)&_etext - mpp[0].mr_addr; mpp[0].mr_fsize = mpp[0].mr_msize; mpp[0].mr_prot = (PROT_READ | PROT_EXEC); mpp[1].mr_addr = (caddr_t)M_PTRUNC((uintptr_t)&r_debug); mpp[1].mr_msize = (caddr_t)&_end - mpp[1].mr_addr; mpp[1].mr_fsize = (caddr_t)&_edata - mpp[1].mr_addr; mpp[1].mr_prot = (PROT_READ | PROT_WRITE | PROT_EXEC); if ((fdr.fd_nname = stravl_insert(_rtldname, 0, 0, 0)) == NULL) return (0); if ((rlmp = elf_new_lmp(&lml_rtld, ALIST_OFF_DATA, &fdr, (Addr)mpp->mr_addr, (size_t)((uintptr_t)eaddr - (uintptr_t)ld_base), NULL, NULL, NULL)) == NULL) return (0); MMAPS(rlmp) = mpp; MMAPCNT(rlmp) = 2; PADSTART(rlmp) = (ulong_t)mpp[0].mr_addr; PADIMLEN(rlmp) = (ulong_t)mpp[0].mr_addr + (ulong_t)mpp[1].mr_addr + (ulong_t)mpp[1].mr_msize; MODE(rlmp) |= (RTLD_LAZY | RTLD_NODELETE | RTLD_GLOBAL | RTLD_WORLD); FLAGS(rlmp) |= (FLG_RT_ANALYZED | FLG_RT_RELOCED | FLG_RT_INITDONE | FLG_RT_INITCLCT | FLG_RT_FINICLCT | FLG_RT_MODESET); /* * Initialize the runtime linkers information. */ interp = &_interp; interp->i_name = (char *)rtldname; interp->i_faddr = (caddr_t)ADDR(rlmp); ldso_plt_init(rlmp); /* * Map in the file, if exec has not already done so, or if the file * was passed as an argument to an explicit execution of ld.so.1 from * the command line. */ if (fd != -1) { /* * Map the file. Once the object is mapped we no longer need * the file descriptor. */ (void) rtld_fstat(fd, &status); fdm.fd_oname = argvname; fdm.fd_ftp = map_obj(&lml_main, &fdm, status.st_size, argvname, fd, &rej); (void) close(fd); if (fdm.fd_ftp == NULL) { Conv_reject_desc_buf_t rej_buf; eprintf(&lml_main, ERR_FATAL, MSG_INTL(err_reject[rej.rej_type]), argvname, conv_reject_desc(&rej, &rej_buf, M_MACH)); return (0); } /* * Finish processing the loading of the file. */ if ((fdm.fd_nname = stravl_insert(argvname, 0, 0, 0)) == NULL) return (0); fdm.fd_dev = status.st_dev; fdm.fd_ino = status.st_ino; if ((mlmp = load_file(&lml_main, ALIST_OFF_DATA, NULL, &fdm, NULL)) == NULL) return (0); /* * We now have a process name for error diagnostics. */ if ((str = strrchr(argvname, '/')) != NULL) procname = ++str; else procname = argvname; if (ldsoexec) { mmapobj_result_t *mpp = MMAPS(mlmp); uint_t mnum, mapnum = MMAPCNT(mlmp); void *brkbase = NULL; /* * Since ld.so.1 was the primary executed object - the * brk() base has not yet been initialized, we need to * initialize it. For an executable, initialize it to * the end of the object. For a shared object (ET_DYN) * initialize it to the first page in memory. */ for (mnum = 0; mnum < mapnum; mnum++, mpp++) brkbase = mpp->mr_addr + mpp->mr_msize; if (brkbase == NULL) brkbase = (void *)syspagsz; if (_brk_unlocked(brkbase) == -1) { int err = errno; eprintf(&lml_main, ERR_FATAL, MSG_INTL(MSG_SYS_BRK), argvname, strerror(err)); return (0); } } } else { /* * Set up function ptr and arguments according to the type * of file class the executable is. (Currently only supported * types are ELF and a.out format.) Then create a link map * for the executable. */ if (aoutdyn) { #ifdef A_OUT mmapobj_result_t *mpp; /* * Create a mapping structure sufficient to describe * a single two segments. The ADDR() of the a.out is * established as 0, which is required but the AOUT * relocation code. */ if ((mpp = calloc(sizeof (mmapobj_result_t), 2)) == NULL) return (0); if ((fdm.fd_nname = stravl_insert(execname, 0, 0, 0)) == NULL) return (0); if ((mlmp = aout_new_lmp(&lml_main, ALIST_OFF_DATA, &fdm, 0, 0, aoutdyn, NULL, NULL)) == NULL) return (0); /* * Establish the true mapping information for the a.out. */ if (aout_get_mmap(&lml_main, mpp)) { free(mpp); return (0); } MSIZE(mlmp) = (size_t)(mpp[1].mr_addr + mpp[1].mr_msize) - S_ALIGN((size_t)mpp[0].mr_addr, syspagsz); MMAPS(mlmp) = mpp; MMAPCNT(mlmp) = 2; PADSTART(mlmp) = (ulong_t)mpp->mr_addr; PADIMLEN(mlmp) = mpp->mr_msize; /* * Disable any object configuration cache (BCP apps * bring in sbcp which can benefit from any object * cache, but both the app and sbcp can't use the same * objects). */ rtld_flags |= RT_FL_NOOBJALT; /* * Make sure no-direct bindings are in effect. */ lml_main.lm_tflags |= LML_TFLG_NODIRECT; #else eprintf(&lml_main, ERR_FATAL, MSG_INTL(MSG_ERR_REJ_UNKFILE), argvname); return (0); #endif } else if (phdr) { Phdr *pptr; Off i_offset = 0; Addr base = 0; ulong_t phsize; mmapobj_result_t *mpp, *fmpp, *hmpp = NULL; uint_t mapnum = 0; int i; size_t msize; /* * Using the executables phdr address determine the base * address of the input file. NOTE, this assumes the * program headers and elf header are part of the same * mapped segment. Although this has held for many * years now, it might be more flexible if the kernel * gave use the ELF headers start address, rather than * the Program headers. * * Determine from the ELF header if we're been called * from a shared object or dynamic executable. If the * latter, then any addresses within the object are used * as is. Addresses within shared objects must be added * to the process's base address. */ ehdr = (Ehdr *)((Addr)phdr - phdr->p_offset); phsize = ehdr->e_phentsize; if (ehdr->e_type == ET_DYN) base = (Addr)ehdr; /* * Allocate a mapping array to retain mapped segment * information. */ if ((fmpp = mpp = calloc(ehdr->e_phnum, sizeof (mmapobj_result_t))) == NULL) return (0); /* * Extract the needed information from the segment * headers. */ for (i = 0, pptr = phdr; i < ehdr->e_phnum; i++) { if (pptr->p_type == PT_INTERP) { i_offset = pptr->p_offset; interp->i_faddr = (caddr_t)interp_base; } if ((pptr->p_type == PT_LOAD) && (pptr->p_filesz || pptr->p_memsz)) { int perm = (PROT_READ | PROT_EXEC); size_t off; if (i_offset && pptr->p_filesz && (i_offset >= pptr->p_offset) && (i_offset <= (pptr->p_memsz + pptr->p_offset))) { interp->i_name = (char *) pptr->p_vaddr + i_offset - pptr->p_offset + base; i_offset = 0; } if (pptr->p_flags & PF_W) perm |= PROT_WRITE; /* * Retain segments mapping info. Round * each segment to a page boundary, as * this insures addresses are suitable * for mprotect() if required. */ off = pptr->p_vaddr + base; if (hmpp == NULL) { hmpp = mpp; mpp->mr_addr = (caddr_t)ehdr; } else mpp->mr_addr = (caddr_t)off; off -= (size_t)(uintptr_t)mpp->mr_addr; mpp->mr_msize = pptr->p_memsz + off; mpp->mr_fsize = pptr->p_filesz + off; mpp->mr_prot = perm; mpp++, mapnum++; } pptr = (Phdr *)((ulong_t)pptr + phsize); } mpp--; msize = (size_t)(mpp->mr_addr + mpp->mr_msize) - S_ALIGN((size_t)fmpp->mr_addr, syspagsz); if ((fdm.fd_nname = stravl_insert(execname, 0, 0, 0)) == NULL) return (0); if ((mlmp = elf_new_lmp(&lml_main, ALIST_OFF_DATA, &fdm, (Addr)hmpp->mr_addr, msize, NULL, NULL, NULL)) == NULL) return (0); MMAPS(mlmp) = fmpp; MMAPCNT(mlmp) = mapnum; PADSTART(mlmp) = (ulong_t)fmpp->mr_addr; PADIMLEN(mlmp) = (ulong_t)fmpp->mr_addr + (ulong_t)mpp->mr_addr + (ulong_t)mpp->mr_msize; } } /* * Establish the interpretors name as that defined within the initial * object (executable). This provides for ORIGIN processing of ld.so.1 * dependencies. Note, the NAME() of the object remains that which was * passed to us as the SONAME on execution. */ if (ldsoexec == 0) { size_t len = strlen(interp->i_name); if (expand(&interp->i_name, &len, 0, 0, (PD_TKN_ISALIST | PD_TKN_CAP), rlmp) & PD_TKN_RESOLVED) fdr.fd_flags |= FLG_FD_RESOLVED; } fdr.fd_pname = interp->i_name; (void) fullpath(rlmp, &fdr); /* * The runtime linker acts as a filtee for various dl*() functions that * are defined in libc (and libdl). Make sure this standard name for * the runtime linker is also registered in the FullPathNode AVL tree. */ (void) fpavl_insert(&lml_rtld, rlmp, _rtldname, 0); /* * Having established the true runtime linkers name, simplify the name * for error diagnostics. */ if ((str = strrchr(PATHNAME(rlmp), '/')) != NULL) rtldname = ++str; else rtldname = PATHNAME(rlmp); /* * Expand the fullpath name of the application. This typically occurs * as a part of loading an object, but as the kernel probably mapped * it in, complete this processing now. */ (void) fullpath(mlmp, 0); /* * Some troublesome programs will change the value of argv[0]. Dupping * the process string protects us, and insures the string is left in * any core files. */ if ((str = (char *)strdup(procname)) == NULL) return (0); procname = str; FLAGS(mlmp) |= (FLG_RT_ISMAIN | FLG_RT_MODESET); FLAGS1(mlmp) |= FL1_RT_USED; /* * It's the responsibility of MAIN(crt0) to call it's _init and _fini * section, therefore null out any INIT/FINI so that this object isn't * collected during tsort processing. And, if the application has no * initarray or finiarray we can economize on establishing bindings. */ INIT(mlmp) = FINI(mlmp) = NULL; if ((INITARRAY(mlmp) == NULL) && (FINIARRAY(mlmp) == NULL)) FLAGS1(mlmp) |= FL1_RT_NOINIFIN; /* * Identify lddstub if necessary. */ if (lml_main.lm_flags & LML_FLG_TRC_LDDSTUB) FLAGS1(mlmp) |= FL1_RT_LDDSTUB; /* * Retain our argument information for use in dlinfo. */ argsinfo.dla_argv = argv--; argsinfo.dla_argc = (long)*argv; argsinfo.dla_envp = envp; argsinfo.dla_auxv = auxv; (void) enter(0); /* * Add our two main link-maps to the dynlm_list */ if (aplist_append(&dynlm_list, &lml_main, AL_CNT_DYNLIST) == NULL) return (0); if (aplist_append(&dynlm_list, &lml_rtld, AL_CNT_DYNLIST) == NULL) return (0); /* * Reset the link-map counts for both lists. The init count is used to * track how many objects have pending init sections, this gets incre- * mented each time an object is relocated. Since ld.so.1 relocates * itself, it's init count will remain zero. * The object count is used to track how many objects have pending fini * sections, as ld.so.1 handles its own fini we can zero its count. */ lml_main.lm_obj = 1; lml_rtld.lm_obj = 0; /* * Initialize debugger information structure. Some parts of this * structure were initialized statically. */ r_debug.rtd_rdebug.r_map = (Link_map *)lml_main.lm_head; r_debug.rtd_rdebug.r_ldsomap = (Link_map *)lml_rtld.lm_head; r_debug.rtd_rdebug.r_ldbase = r_debug.rtd_rdebug.r_ldsomap->l_addr; r_debug.rtd_dynlmlst = &dynlm_list; /* * Determine the dev/inode information for the executable to complete * load_so() checking for those who might dlopen(a.out). */ if (rtld_stat(PATHNAME(mlmp), &status) == 0) { STDEV(mlmp) = status.st_dev; STINO(mlmp) = status.st_ino; } /* * Initialize any configuration information. */ if (!(rtld_flags & RT_FL_NOCFG)) { if ((features = elf_config(mlmp, (aoutdyn != 0))) == -1) return (0); } #if defined(_ELF64) /* * If this is a 64-bit process, determine whether this process has * restricted the process address space to 32-bits. Any dependencies * that are restricted to a 32-bit address space can only be loaded if * the executable has established this requirement. */ if (CAPSET(mlmp).sc_sf_1 & SF1_SUNW_ADDR32) rtld_flags2 |= RT_FL2_ADDR32; #endif /* * Establish any alternative capabilities, and validate this object * if it defines it's own capabilities information. */ if (cap_alternative() == 0) return (0); if (cap_check_lmp(mlmp, &rej) == 0) { if (lml_main.lm_flags & LML_FLG_TRC_ENABLE) { /* LINTED */ (void) printf(MSG_INTL(ldd_warn[rej.rej_type]), NAME(mlmp), rej.rej_str); } else { /* LINTED */ eprintf(&lml_main, ERR_FATAL, MSG_INTL(err_reject[rej.rej_type]), NAME(mlmp), rej.rej_str); return (0); } } /* * Establish the modes of the initial object. These modes are * propagated to any preloaded objects and explicit shared library * dependencies. * * If we're generating a configuration file using crle(1), remove * any RTLD_NOW use, as we don't want to trigger any relocation proc- * essing during crle(1)'s first past (this would just be unnecessary * overhead). Any filters are explicitly loaded, and thus RTLD_NOW is * not required to trigger filter loading. * * Note, RTLD_NOW may have been established during analysis of the * application had the application been built -z now. */ MODE(mlmp) |= (RTLD_NODELETE | RTLD_GLOBAL | RTLD_WORLD); if (rtld_flags & RT_FL_CONFGEN) { MODE(mlmp) |= RTLD_CONFGEN; MODE(mlmp) &= ~RTLD_NOW; rtld_flags2 &= ~RT_FL2_BINDNOW; } if ((MODE(mlmp) & RTLD_NOW) == 0) { if (rtld_flags2 & RT_FL2_BINDNOW) MODE(mlmp) |= RTLD_NOW; else MODE(mlmp) |= RTLD_LAZY; } /* * If debugging was requested initialize things now that any cache has * been established. A user can specify LD_DEBUG=help to discover the * list of debugging tokens available without running the application. * However, don't allow this setting from a configuration file. * * Note, to prevent recursion issues caused by loading and binding the * debugging libraries themselves, a local debugging descriptor is * initialized. Once the debugging setup has completed, this local * descriptor is copied to the global descriptor which effectively * enables diagnostic output. * * Ignore any debugging request if we're being monitored by a process * that expects the old getpid() initialization handshake. */ if ((rpl_debug || prm_debug) && ((rtld_flags & RT_FL_DEBUGGER) == 0)) { Dbg_desc _dbg_desc = {0}; struct timeval total = DBG_TOTALTIME; struct timeval delta = DBG_DELTATIME; if (rpl_debug) { if (dbg_setup(rpl_debug, &_dbg_desc) == 0) return (0); if (_dbg_desc.d_extra & DBG_E_HELP_EXIT) rtldexit(&lml_main, 0); } if (prm_debug) (void) dbg_setup(prm_debug, &_dbg_desc); *dbg_desc = _dbg_desc; DBG_TOTALTIME = total; DBG_DELTATIME = delta; } /* * Now that debugging is enabled generate any diagnostics from any * previous events. */ if (DBG_ENABLED) { DBG_CALL(Dbg_cap_val(&lml_main, org_scapset, alt_scapset, M_MACH)); DBG_CALL(Dbg_file_config_dis(&lml_main, config->c_name, features)); DBG_CALL(Dbg_file_ldso(rlmp, envp, auxv, LIST(rlmp)->lm_lmidstr, ALIST_OFF_DATA)); if (THIS_IS_ELF(mlmp)) { DBG_CALL(Dbg_file_elf(&lml_main, PATHNAME(mlmp), ADDR(mlmp), MSIZE(mlmp), LIST(mlmp)->lm_lmidstr, ALIST_OFF_DATA)); } else { DBG_CALL(Dbg_file_aout(&lml_main, PATHNAME(mlmp), ADDR(mlmp), MSIZE(mlmp), LIST(mlmp)->lm_lmidstr, ALIST_OFF_DATA)); } } /* * Enable auditing. */ if (rpl_audit || prm_audit || profile_lib) { int ndx; const char *aud[3]; aud[0] = rpl_audit; aud[1] = prm_audit; aud[2] = profile_lib; /* * Any global auditing (set using LD_AUDIT or LD_PROFILE) that * can't be established is non-fatal. */ if ((auditors = calloc(1, sizeof (Audit_desc))) == NULL) return (0); for (ndx = 0; ndx < 3; ndx++) { if (aud[ndx]) { if ((auditors->ad_name = strdup(aud[ndx])) == NULL) return (0); rtld_flags2 |= RT_FL2_FTL2WARN; (void) audit_setup(mlmp, auditors, PD_FLG_EXTLOAD, NULL); rtld_flags2 &= ~RT_FL2_FTL2WARN; } } lml_main.lm_tflags |= auditors->ad_flags; } if (AUDITORS(mlmp)) { /* * Any object required auditing (set with a DT_DEPAUDIT dynamic * entry) that can't be established is fatal. */ if (FLAGS1(mlmp) & FL1_RT_GLOBAUD) { /* * If this object requires global auditing, use the * local auditing information to set the global * auditing descriptor. The effect is that a * DT_DEPAUDIT act as an LD_AUDIT. */ if ((auditors == NULL) && ((auditors = calloc(1, sizeof (Audit_desc))) == NULL)) return (0); auditors->ad_name = AUDITORS(mlmp)->ad_name; if (audit_setup(mlmp, auditors, 0, NULL) == 0) return (0); lml_main.lm_tflags |= auditors->ad_flags; /* * Clear the local auditor information. */ free((void *) AUDITORS(mlmp)); AUDITORS(mlmp) = NULL; } else { /* * Establish any local auditing. */ if (audit_setup(mlmp, AUDITORS(mlmp), 0, NULL) == 0) return (0); AFLAGS(mlmp) |= AUDITORS(mlmp)->ad_flags; lml_main.lm_flags |= LML_FLG_LOCAUDIT; } } /* * Explicitly add the initial object and ld.so.1 to those objects being * audited. Note, although the ld.so.1 link-map isn't auditable, * establish a cookie for ld.so.1 as this may be bound to via the * dl*() family. */ if ((lml_main.lm_tflags | AFLAGS(mlmp)) & LML_TFLG_AUD_MASK) { if (((audit_objopen(mlmp, mlmp) == 0) || (audit_objopen(mlmp, rlmp) == 0)) && (AFLAGS(mlmp) & LML_TFLG_AUD_MASK)) return (0); } /* * Map in any preloadable shared objects. Establish the caller as the * head of the main link-map list. In the case of being exercised from * lddstub, the caller gets reassigned to the first target shared object * so as to provide intuitive diagnostics from ldd(). * * Note, it is valid to preload a 4.x shared object with a 5.0 * executable (or visa-versa), as this functionality is required by * ldd(1). */ clmp = mlmp; if (rpl_preload && (preload(rpl_preload, mlmp, &clmp) == 0)) return (0); if (prm_preload && (preload(prm_preload, mlmp, &clmp) == 0)) return (0); /* * Load all dependent (needed) objects. */ if (analyze_lmc(&lml_main, ALIST_OFF_DATA, mlmp, mlmp, NULL) == NULL) return (0); /* * Relocate all the dependencies we've just added. * * If this process has been established via crle(1), the environment * variable LD_CONFGEN will have been set. crle(1) may create this * process twice. The first time crle only needs to gather dependency * information. The second time, is to dldump() the images. * * If we're only gathering dependencies, relocation is unnecessary. * As crle(1) may be building an arbitrary family of objects, they may * not fully relocate either. Hence the relocation phase is not carried * out now, but will be called by crle(1) once all objects have been * loaded. */ if ((rtld_flags & RT_FL_CONFGEN) == 0) { DBG_CALL(Dbg_util_nl(&lml_main, DBG_NL_STD)); if (relocate_lmc(&lml_main, ALIST_OFF_DATA, mlmp, mlmp, NULL) == 0) return (0); /* * Inform the debuggers that basic process initialization is * complete, and that the state of ld.so.1 (link-map lists, * etc.) is stable. This handshake enables the debugger to * initialize themselves, and consequently allows the user to * set break points in .init code. * * Most new debuggers use librtld_db to monitor activity events. * Older debuggers indicated their presence by setting the * DT_DEBUG entry in the dynamic executable (see elf_new_lm()). * In this case, getpid() is called so that the debugger can * catch the system call. This old mechanism has some * restrictions, as getpid() should not be called prior to * basic process initialization being completed. This * restriction has become increasingly difficult to maintain, * as the use of auditors, LD_DEBUG, and the initialization * handshake with libc can result in "premature" getpid() * calls. The use of this getpid() handshake is expected to * disappear at some point in the future, and there is intent * to work towards that goal. */ rd_event(&lml_main, RD_DLACTIVITY, RT_CONSISTENT); rd_event(&lml_rtld, RD_DLACTIVITY, RT_CONSISTENT); if (rtld_flags & RT_FL_DEBUGGER) { r_debug.rtd_rdebug.r_flags |= RD_FL_ODBG; (void) getpid(); } } /* * Indicate preinit activity, and call any auditing routines. These * routines are called before initializing any threads via libc, or * before collecting the complete set of .inits on the primary link-map. * Although most libc interfaces are encapsulated in local routines * within libc, they have been known to escape (ie. call a .plt). As * the appcert auditor uses preinit as a trigger to establish some * external interfaces to the main link-maps libc, we need to activate * this trigger before exercising any code within libc. Additionally, * I wouldn't put it past an auditor to add additional objects to the * primary link-map. Hence, we collect .inits after the audit call. */ rd_event(&lml_main, RD_PREINIT, 0); if (aud_activity || ((lml_main.lm_tflags | AFLAGS(mlmp)) & LML_TFLG_AUD_ACTIVITY)) audit_activity(mlmp, LA_ACT_CONSISTENT); if (aud_preinit || ((lml_main.lm_tflags | AFLAGS(mlmp)) & LML_TFLG_AUD_PREINIT)) audit_preinit(mlmp); /* * If we're creating initial configuration information, we're done * now that the auditing step has been called. */ if (rtld_flags & RT_FL_CONFGEN) { leave(LIST(mlmp), 0); return (mlmp); } /* * Sort the .init sections of all objects we've added. If we're * tracing we only need to execute this under ldd(1) with the -i or -u * options. */ lmflags = lml_main.lm_flags; if (((lmflags & LML_FLG_TRC_ENABLE) == 0) || (lmflags & (LML_FLG_TRC_INIT | LML_FLG_TRC_UNREF))) { if ((tobj = tsort(mlmp, LIST(mlmp)->lm_init, RT_SORT_REV)) == (Rt_map **)S_ERROR) return (0); } /* * If we are tracing we're done. This is the one legitimate use of a * direct call to rtldexit() rather than return, as we don't want to * return and jump to the application. */ if (lmflags & LML_FLG_TRC_ENABLE) { unused(&lml_main); rtldexit(&lml_main, 0); } /* * Check if this instance of the linker should have a primary link * map. This flag allows multiple copies of the -same- -version- * of the linker (and libc) to run in the same address space. * * Without this flag we only support one copy of the linker in a * process because by default the linker will always try to * initialize at one primary link map The copy of libc which is * initialized on a primary link map will initialize global TLS * data which can be shared with other copies of libc in the * process. The problem is that if there is more than one copy * of the linker, only one copy should link libc onto a primary * link map, otherwise libc will attempt to re-initialize global * TLS data. So when a copy of the linker is loaded with this * flag set, it will not initialize any primary link maps since * presumably another copy of the linker will do this. * * Note that this flag only allows multiple copies of the -same- * -version- of the linker (and libc) to coexist. This approach * will not work if we are trying to load different versions of * the linker and libc into the same process. The reason for * this is that the format of the global TLS data may not be * the same for different versions of libc. In this case each * different version of libc must have it's own primary link map * and be able to maintain it's own TLS data. The only way this * can be done is by carefully managing TLS pointers on transitions * between code associated with each of the different linkers. * Note that this is actually what is done for processes in lx * branded zones. Although in the lx branded zone case, the * other linker and libc are actually gld and glibc. But the * same general TLS management mechanism used by the lx brand * would apply to any attempts to run multiple versions of the * solaris linker and libc in a single process. */ if (auxflags & AF_SUN_NOPLM) rtld_flags2 |= RT_FL2_NOPLM; /* * Establish any static TLS for this primary link-map. Note, regardless * of whether TLS is available, an initial handshake occurs with libc to * indicate we're processing the primary link-map. Having identified * the primary link-map, initialize threads. */ if (rt_get_extern(&lml_main, mlmp) == 0) return (0); if ((rtld_flags2 & RT_FL2_NOPLM) == 0) { if (tls_statmod(&lml_main, mlmp) == 0) return (0); rt_thr_init(&lml_main); rtld_flags2 |= RT_FL2_PLMSETUP; } else { rt_thr_init(&lml_main); } /* * Fire all dependencies .init sections. Identify any unused * dependencies, and leave the runtime linker - effectively calling * the dynamic executables entry point. */ call_array(PREINITARRAY(mlmp), (uint_t)PREINITARRAYSZ(mlmp), mlmp, SHT_PREINIT_ARRAY); if (tobj) call_init(tobj, DBG_INIT_SORT); rd_event(&lml_main, RD_POSTINIT, 0); unused(&lml_main); DBG_CALL(Dbg_util_call_main(mlmp)); rtld_flags |= (RT_FL_OPERATION | RT_FL_APPLIC); leave(LIST(mlmp), 0); return (mlmp); }
int main(void) { alist al; char *t1 = "def", *t2 = "abc", *t3 = "xyz"; char *s; al = alist_new(); assert(alist_count(al) == 0); assert(alist_current(al) == NULL); assert(alist_current_idx(al) == -1); alist_append(al, t1); assert(alist_count(al) == 1); assert(alist_current(al) == t1); assert(alist_current_idx(al) == 0); alist_append(al, t2); assert(alist_count(al) == 2); assert(alist_current(al) == t2); assert(alist_current_idx(al) == 1); s = alist_first(al); assert(s == t1); assert(alist_current(al) == t1); assert(alist_current_idx(al) == 0); s = alist_next(al); assert(s == t2); assert(alist_current(al) == t2); assert(alist_current_idx(al) == 1); s = alist_next(al); assert(s == NULL); assert(alist_current(al) == NULL); assert(alist_current_idx(al) == -1); alist_prepend(al, t3); assert(alist_count(al) == 3); assert(alist_current(al) == t3); assert(alist_current_idx(al) == 0); printf("elements:\n"); for (s = alist_first(al); s != NULL; s = alist_next(al)) printf("element %d: %s\n", alist_current_idx(al), s); alist_sort(al, sorter); printf("sorted elements:\n"); for (s = alist_first(al); s != NULL; s = alist_next(al)) printf("element %d: %s\n", alist_current_idx(al), s) ; assert(alist_at(al, 0) == t2); assert(alist_at(al, 1) == t1); assert(alist_at(al, 2) == t3); alist_clear(al); assert(alist_count(al) == 0); assert(alist_current(al) == NULL); assert(alist_current_idx(al) == -1); alist_insert(al, 5, t1); assert(alist_count(al) == 1); assert(alist_current(al) == t1); assert(alist_current_idx(al) == 0); alist_insert(al, 0, t2); assert(alist_count(al) == 2); assert(alist_current(al) == t2); assert(alist_current_idx(al) == 0); alist_insert(al, 1, t3); assert(alist_count(al) == 3); assert(alist_at(al, 0) == t2); assert(alist_at(al, 1) == t3); assert(alist_at(al, 2) == t1); assert(alist_current(al) == t3); assert(alist_current_idx(al) == 1); alist_delete(al); printf("alist test successful.\n"); return 0; }
int grepr_addline(ematrix *ex, ematrix *ed, int tr, int xpfd, ematrix ***lout) { ematrix *ee,*ef, **el=NULL,**elo=NULL, **x; int i,j,k,p,r, ro,co, xp=-1; if (!ed) {PROGERROR("the matrix ed must be given!"); return -1;} ro = ROWSM(ed); co = COLSM(ed); if (tr<0) tr = ((ex? ROWSM(ex)<ro: ro<=1)? 0:1); /* (tr need not be given - determining from matrix sizes) */ if (ex? (ro!=ROWSM(ex)+(!tr) || co!=COLSM(ex)+(!!tr)): (ro!=1 && co!=1)) {PROGERROR("the matrices ex, ed have wrong sizes! tr=%d; ro=%d, co=%d - %d, %d",tr,ro,co,ROWSM(ex),COLSM(ex)); return -1;} if ((ex?ISREFMAT(ex):0) || ISREFMAT(ed)) {PROGERROR("do not give refering matrices here"); return -1;} if (lout?*lout:0) {PROGERROR("the list lout must be given empty (null)"); return -1;} DEBUG(CURDLEV+1,"Adding a %s to matrix %p to (%dx%d), pf=%d\n",tr?"column":"row",ex,ro,co,xpfd); if (ex && lout) EMATDEBUGS(CURDLEV+2,ex,"\t\t-\t"); /** * We first try the easy extensions: * If ed is 1xC or Rx1, then ex has entries 0 or 1 depending on ed. * If some other line of ed is parallel (and nonzero) to the added line, * then we make ex by copying the corresponding line to append. * (We have to use the proper pfield xpfd for ed!) **/ xp = pfield_curindex(); if (xpfd>=0) pfield_switchto_fast(xpfd); if (lout) EMATDEBUGS(CURDLEV+2,ed,"\t\t~\t"); p = tr? ematrix_parallel_col(ed,co-1): ematrix_parallel_row(ed,ro-1); if (p>=0) if (ematrix_linezero(ed,tr,p)>0) p = -1; if (xpfd>=0 && xp>=0) pfield_switchto_fast(xp); if (!ex) { ee = new_ematrix(ro,co,ro,co); for (i=0; i<ro; i++) for (j=0; j<co; j++) { SETEXSIGMZERO(ee,i,j); if (SIGNM(ed,i,j)!=0) SIGNM(ee,i,j) = 1; } elo = alist_append(elo,ee); k = 1; DEBUG(CURDLEV+1,"Creating a %s matrix with %s.\n",tr?"1xC":"Rx1",gener_extprintline(0,ee,tr)); } else if (p>=0 && ro+co>=3) { ef = ematrix_copy_to(ex,ro,co); ef = ematrix_append_rc(ef,tr); if (tr) { for (i=0; i<ro; i++) if (SIGNM(ed,i,co-1)) COPYEXSIGM(ef,i,co-1,ef,i,p); } else { for (i=0; i<co; i++) if (SIGNM(ed,ro-1,i)) COPYEXSIGM(ef,ro-1,i,ef,p,i); } elo = alist_append(elo,ef); k = 1; DEBUG(CURDLEV+1,"Appending a parallel %s %s.\n",tr?"column":"row",gener_extprintline(0,ef,tr)); } else { /** * In all other cases we use the next routine: * We generate all one-line extensions of ex which have the same * zero pattern in the appended line as ed. * Then we check the subdeterminants of each extension against * the matrix ed, and store copies of isomorphic ones. **/ r = gener_matextens_zeros(ex,tr,ed,&el); DEBUG(CURDLEV+1,"Filtering %d (zero-respecting) extension lines...\n",alist_getlength(el)); if (r<0) return r; for (x=el, k=0; x?*x:0; x++) if (ematrix_havesamedets_repres(*x,ed,xpfd,tr)) { k++; elo = alist_append(elo,ematrix_copy(*x)); DEBUG(CURDLEV+1+2*(k>2),"Appending a #%d(%d) %s %s.\n",k,alist_getlength(elo),tr?"column":"row",gener_extprintline(0,*x,tr)); } if (el) dispose_alist_mats(el); } #ifndef FASTPROG if (IFRANDDEBUGLESS(222) && k>0) { x = elo+RANDOM()%k; if (!ematrix_havesamedets(*x,ed,xpfd)) {PROGERROR("the extended matrix has not same subdeterminants!");} } if (IFRANDDEBUGLESS(222) && ro>2 && co>2 && lout) { ee = ematrix_copydual(ex); ef = ematrix_copydual(ed); ematrix_swaprows(ee,0,co-2); ematrix_swaprows(ef,0,co-2); DEBUG(CURDLEV+1,"Recursive testing the number of added lines...\n"); r = grepr_addline(ee,ef,(tr<0?-1:!tr),xpfd,NULL); if (r!=k && (r>=0 || k>=0)) {PROGERROR("wrong result of a recursive call %d!=%d",r,k);} } #endif if (lout) *lout = (*lout? alist_applist(*lout,elo): elo); else if (elo) dispose_alist_mats(elo); return k; }
/* * Assign move descriptors with the associated target symbol. */ static uintptr_t append_move_desc(Ofl_desc *ofl, Sym_desc *sdp, Elf64_Move *mvp, Is_desc *isp) { int i, cnt = mvp->m_repeat; for (i = 0; i < cnt; i++) { size_t idx; Mv_desc *omdp, nmd; /* LINTED */ nmd.md_len = ELF_M_SIZE(mvp->m_info); nmd.md_start = mvp->m_poffset + i * ((mvp->m_stride + 1) * nmd.md_len); nmd.md_move = mvp; /* * Verify that this move descriptor doesn't overlap any existing * move descriptors. */ for (ALIST_TRAVERSE(sdp->sd_move, idx, omdp)) { Mv_desc *smdp, *lmdp; if (nmd.md_start > omdp->md_start) { smdp = omdp; lmdp = &nmd; } else { smdp = &nmd; lmdp = omdp; } /* * If this move entry is exactly the same as that of * a symbol that has overridden this symbol (for example * should two identical COMMON definitions be associated * with the same move data), simply ignore this move * element. */ if ((nmd.md_start == omdp->md_start) && ((nmd.md_len == smdp->md_len) && sdp->sd_file != isp->is_file)) continue; if ((nmd.md_start != omdp->md_start) && ((smdp->md_start + smdp->md_len) <= lmdp->md_start)) continue; ld_eprintf(ofl, ERR_FATAL, MSG_MOVE_OVERLAP, sdp->sd_file->ifl_name, EC_WORD(isp->is_scnndx), isp->is_name, demangle(sdp->sd_name), EC_XWORD(nmd.md_start), EC_XWORD(nmd.md_len), EC_XWORD(omdp->md_start), EC_XWORD(omdp->md_len)); /* * Indicate that an error has occurred, so that * processing can be terminated once all move errors * are flushed out. */ sdp->sd_flags |= FLG_SY_OVERLAP; return (1); } if (alist_append(&sdp->sd_move, &nmd, sizeof (Mv_desc), AL_CNT_SDP_MOVE) == NULL) return (S_ERROR); } return (1); }
static Projectile* _create_projectile(ProjArgs *args) { if(IN_DRAW_CODE) { log_fatal("Tried to spawn a projectile while in drawing code"); } Projectile *p = (Projectile*)objpool_acquire(stage_object_pools.projectiles); p->birthtime = global.frames; p->pos = p->pos0 = p->prevpos = args->pos; p->angle = args->angle; p->rule = args->rule; p->draw_rule = args->draw_rule; p->shader = args->shader_ptr; p->blend = args->blend; p->sprite = args->sprite_ptr; p->type = args->type; p->color = *args->color; p->max_viewport_dist = args->max_viewport_dist; p->size = args->size; p->collision_size = args->collision_size; p->flags = args->flags; p->timeout = args->timeout; p->damage = args->damage; p->damage_type = args->damage_type; p->clear_flags = 0; if(args->shader_params != NULL) { p->shader_params = *args->shader_params; } memcpy(p->args, args->args, sizeof(p->args)); p->ent.draw_layer = args->layer; p->ent.draw_func = ent_draw_projectile; projectile_set_prototype(p, args->proto); // p->collision_size *= 10; // p->size *= 5; if((p->type == PROJ_ENEMY || p->type == PROJ_PLAYER) && (creal(p->size) <= 0 || cimag(p->size) <= 0)) { log_fatal("Tried to spawn a projectile with invalid size %f x %f", creal(p->size), cimag(p->size)); } if(!(p->ent.draw_layer & LAYER_LOW_MASK)) { drawlayer_low_t sublayer; switch(p->type) { case PROJ_ENEMY: // 1. Large projectiles go below smaller ones. sublayer = LAYER_LOW_MASK - (drawlayer_low_t)projectile_rect_area(p); sublayer = (sublayer << 4) & LAYER_LOW_MASK; // 2. Group by shader (hardcoded precedence). sublayer |= ht_get(&shader_sublayer_map, p->shader, 0) & 0xf; // If specific blending order is required, then you should set up the sublayer manually. p->ent.draw_layer |= sublayer; break; case PROJ_PARTICLE: // 1. Group by shader (hardcoded precedence). sublayer = ht_get(&shader_sublayer_map, p->shader, 0) & 0xf; sublayer <<= 4; sublayer |= 0x100; // If specific blending order is required, then you should set up the sublayer manually. p->ent.draw_layer |= sublayer; break; default: break; } } ent_register(&p->ent, ENT_PROJECTILE); // TODO: Maybe allow ACTION_DESTROY here? // But in that case, code that uses this function's return value must be careful to not dereference a NULL pointer. proj_call_rule(p, EVENT_BIRTH); alist_append(args->dest, p); return p; }
int struct_hasbwidth3_ext(int ch, ematrix *em, char **decomp, int *ldecomp) { ematrix *ee, *ex,*e1,*e2, *eu, *ef,*eff; ematrix **list, **lpair, **lln,*llnst[30], **x,**y; int i,j,k, a,b, r, msz; char buf[400]; if (ldecomp!=NULL) {PROGERROR("return ldecomp[] is not implemented yet!"); ldecomp = NULL;} if (!em) {PROGERROR("the matrix must be given here"); return -1;} ee = ematrix_copy(em); msz = ROWSM(ee)+COLSM(ee); #ifndef FASTPROG if (msz<10 || (msz<14 && IFRANDDEBUGLESS(22)) || IFRANDDEBUGLESS(222)) { if (!struct_isconnected(ee,3)) {PROGERROR("Can correctly test branch-width 3 only for 3-connected matrices!");} } DEBUG(CURDLEV,"Testing branch-width 3 for the matroid %p [%s]...\n",ee,EMNAME(em)?EMNAME(em):""); #endif lln = (msz<28?llnst: MMALLOC((msz+2)*sizeof(lln[0]))); list = ematrix_submatrices_sub(ee,1); if (alist_getlength(list)!=msz) {PROGERROR("wrong list of singletons generated here ?!?");} for (x=list; x?*x:0; x++) { snprintf(buf,5," %d ",(ROWSM(*x)>0?ROWSID(*x,0):COLSID(*x,0))); EMSETNAME(*x,buf); lln[GETREFMLINE(ee,*x,0)] = *x; } /** * We start with an initial partition list of the matroid into singletons. * Then we cycle the next procedure until the list has one part only. * Trivial cases are sorted first - a matroid on <=6 elements always * has bw <=3, and if only <=3 singletons remain besides one larger * part, then we are done as well. * The supplementary array refers to those parts in list that are * still singletons (by their row/column indices in ee). **/ while (alist_getlength(list)>1) { DEBUG(CURDLEV+2," new cycle for list %d\n",alist_getlength(list)); lpair = NULL; eu = e1 = e2 = NULL; r = 0; for (x=list, a=b=0,y=NULL; *x && a<2 && b<7; x++,b++) if (ROWSM(*x)+COLSM(*x)>1) { a++; y = x; } if (a==0 && b<=6) { e1 = ematrix_refer_all(ee); eu = ematrix_refer_all(ee); r = 1; } else if (a==1 && b<=4) { e1 = *y; e2 = ematrix_refextract_xall(ee,*y); eu = ematrix_refer_all(ee); r = 2; } /** * Here we try all pairs of parts in list. * If their union is 3-separating, and not both are singletons, * then we have a new part for replacement. **/ for (x=list; *x && !r; x++) for (y=x+1; *y && !r; y++) { if ((a=ROWSM(*x)+COLSM(*x))>1 || (b=ROWSM(*y)+COLSM(*y))>1) { eu = ematrix_union(*x,*y); if (ematrix_whatsep(ee,eu)<3) { if (a>=b) { e1 = *x; e2 = *y; } else { e1 = *y; e2 = *x; } r = 3; } else { dispose_ematrix(eu); } /** * If both in the pair are singletons, then we save this pair for * later testing, and we try our luck with the line-(co)closure. * If the closure has >=4 elements among singletons, then it forms * a new part. * We save a possible closure triple for later testing, but only * if the third element is in the middle of the pair. **/ } else { ef = ematrix_union(*x,*y); lpair = alist_append(lpair,ef); for (k=0; k<2 && !r; k++) { eff = ematrix_closure_tr(ee,ef,k); a = 1; for (i=ROWSM(eff)+COLSM(eff)-1; i>=0; i--) { j = GETREFMLINE(ee,eff,i); if (lln[j]==NULL) { if (i<ROWSM(eff)) ematrix_remove_row(eff,i); else ematrix_remove_col(eff,i-ROWSM(eff)); } else { a = a && (j==GETREFMLINE(ee,*x,0) || j==GETREFMLINE(ee,*y,0) || (j>GETREFMLINE(ee,*x,0) && j>GETREFMLINE(ee,*y,0))); } } if (ROWSM(eff)+COLSM(eff)>3) { e1 = eff; eu = ematrix_refer_all(eff); r = 1; } else if (a && ROWSM(eff)+COLSM(eff)==3) { lpair = alist_append(lpair,eff); } else dispose_ematrix(eff); } } DEBUG(CURDLEV+3," cycle x=%d, y=%d, r=%d, pair %d\n",(int)(x-list),(int)(y-list),r,alist_getlength(lpair)); } /** * Then we have to try all pairs that take one non-singleton part * and the other as one of the above pairs or triples, * or two disjoint above pairs or triples. * Again, we look at their union whether it is 3-separating. **/ for (x=list; *x && !r; x++) if (ROWSM(*x)+COLSM(*x)>1) { for (y=lpair; (y?*y:0) && !r; y++) { eu = ematrix_union(*x,*y); if (ematrix_whatsep(ee,eu)<3) { e1 = *x; e2 = ematrix_refer_all(*y); r = 2; } else dispose_ematrix(eu); } } for (x=lpair; (x?*x:0) && !r; x++) { if (ROWSM(*x)+COLSM(*x)>3) {PROGERROR("only pairs and triples should be here");} for (y=x+1; *y && !r; y++) if (ematrix_isdisjoint(*x,*y)) { eu = ematrix_union(*x,*y); if (ematrix_whatsep(ee,eu)<3) { e1 = ematrix_refer_all(*x); e2 = ematrix_refer_all(*y); r = 1; } else dispose_ematrix(eu); } } if (lpair) dispose_alist_mats(lpair); /** * Finally, if we have found a union (in eu) for the new part (r>0), * then we have to remove the current parts of the union, * print this new branch for records, and store the union into the list. **/ if (r<=0) break; buf[0] = 0; for (k=0, ex=e1; ex && k<2; k++, ex=e2) { if (r>k+1) { snprintf(buf+strlen(buf),180,"(%s)",EMNAME(ex)); list = alist_delete_val(list,ex); } else { for (i=ROWSM(ex)+COLSM(ex)-1, a=0; i>=0; i--) { j = GETREFMLINE(ee,ex,i); ef = lln[j]; lln[j] = NULL; if (!ef) {PROGERROR("should find only singleton elements here"); continue;} if (!a++) snprintf(buf+strlen(buf),3,"("); if (strlen(buf)<380) snprintf(buf+strlen(buf),5,"%s",EMNAME(ef)); list = alist_delete_val(list,ef); dispose_ematrix(ef); } if (a>0) snprintf(buf+strlen(buf),3,")"); } dispose_ematrix(ex); } DEBUG(CURDLEV+1," - new branch found %s\n",buf); EMSETNAME(eu,buf); list = alist_append(list,eu); } /** * Here we prepare the return values and possible debug printing. * We also try the result recursively on an equivalent dual matrix. **/ if (alist_getlength(list)<=1) { r = 1; if (decomp && list[0]) *decomp = MSTRDUP(EMNAME(list[0])); if (ch>1) OUTPUT("A width-3 branch decomposition of [%s] is: %s\n",EMNAME(em)?EMNAME(em):"",EMNAME(list[0])); } else { r = -1; if (ch>1) OUTPUT("There is NO width-3 branch decomposition of [%s].\n",EMNAME(em)?EMNAME(em):""); } #ifndef FASTPROG if (r>=0 && ch>=0) { DEBUG(CURDLEV-1,"Found a width 3 branch-decomposition of %p [%s] here\n\t\t\t\t\t%s.\n", ee,EMNAME(em)?EMNAME(em):"",list[0]?EMNAME(list[0]):""); if (msz>1 && (list[0]?ROWSM(list[0])+COLSM(list[0])!=msz:1)) {PROGERROR("wrong final partition in computation of branch-width 3");} } else if (ch>=0) { DEBUG(CURDLEV-1,"NO width 3 branch-decomposition of %p [%s] exists.\n",ee,EMNAME(em)?EMNAME(em):""); for (x=list,a=0; x?*x:0; x++) a += ROWSM(*x)+COLSM(*x); if (a!=msz) {PROGERROR("lost or extra elements in the partition list! %d!=%d",msz,a);} } if (IFRANDDEBUGLESS(222) && ch>=0) { ematrix_transpose(ee); /* extra debug testing with pivoted dual matrix */ for (k=0; k<4; k++) { i = RANDOM()%ROWSM(ee); j = RANDOM()%COLSM(ee); if (SIGNM(ee,i,j)!=0) ematrix_pivot(ee,i,j); } k = struct_hasbwidth3_ext(-1,ee,NULL,NULL); if (k!=r) {PROGERROR("wrong result when recursivly calling r%d o%d !",k,r); EMATDEBUG(0,ee," !r!\t"); EMATDEBUG(0,em," !o!\t");} } /* (ee is modified here!!!) */ #endif dispose_alist_mats(list); dispose_ematrix(ee); if (lln && lln!=llnst) FREE(lln); return r; }
/* * Enter a mapfile defined symbol into the given version * * entry: * mf - Mapfile descriptor * ms - Information related to symbol being added to version * * exit: * On success, returns true. On failure that requires an immediate * halt, returns false. * * On failure that requires eventual halt, but for which it would * be OK to continue parsing in hopes of flushing out additional * problems, increments mv->mv_errcnt, and returns true. */ bool ld_map_sym_enter(Mapfile *mf, ld_map_ver_t *mv, ld_map_sym_t *ms) { Ofl_desc *ofl = mf->mf_ofl; Elf64_Word hash; avl_index_t where; Elf64_Sym *sym; Sym_desc *sdp; const char *conflict; /* * Add the new symbol. It should be noted that all * symbols added by the mapfile start out with global * scope, thus they will fall through the normal symbol * resolution process. Elf64_Symbols defined as locals will * be reduced in scope after all input file processing. */ /* LINTED */ hash = (Elf64_Word)elf_hash(ms->ms_name); //DBG_CALL(Dbg_map_version(ofl->ofl_lml, mv->mv_name, ms->ms_name, // mv->mv_scope)); /* * Make sure that any parent or external declarations fall back to * references. */ if (ms->ms_sdflags & (FLG_SY_PARENT | FLG_SY_EXTERN)) { /* * Turn it into a reference by setting the section index * to UNDEF. */ ms->ms_shndx = SHN_UNDEF; /* * It is wrong to specify size or value for an external symbol. */ if (ms->ms_value_set || (ms->ms_size != 0)) { mf_fatal0(mf, (MSG_MAP_NOEXVLSZ)); mv->mv_errcnt++; return (true); } } if ((sdp = ld_sym_find(ms->ms_name, hash, &where, ofl)) == NULL) { if ((sym = libld_calloc(sizeof (Elf64_Sym), 1)) == NULL) return (false); sym->st_shndx = (Elf64_Half)ms->ms_shndx; sym->st_value = ms->ms_value; sym->st_size = ms->ms_size; sym->st_info = ELF_ST_INFO(STB_GLOBAL, ms->ms_type); if ((sdp = ld_sym_enter(ms->ms_name, sym, hash, ld_map_ifl(mf), ofl, 0, ms->ms_shndx, ms->ms_sdflags, &where)) == (Sym_desc *)S_ERROR) return (false); sdp->sd_flags &= ~FLG_SY_CLEAN; /* * Identify any references. FLG_SY_MAPREF is * turned off once a relocatable object with * the same symbol is found, thus the existence * of FLG_SY_MAPREF at symbol validation is * used to flag undefined/misspelled entries. */ if (sym->st_shndx == SHN_UNDEF) sdp->sd_flags |= (FLG_SY_MAPREF | FLG_SY_GLOBREF); } else { conflict = NULL; sym = sdp->sd_sym; /* * If this symbol already exists, make sure this * definition doesn't conflict with the former. * Provided it doesn't, multiple definitions * from different mapfiles can augment each * other. */ if (sym->st_value) { if (ms->ms_value && (sym->st_value != ms->ms_value)) conflict = (MSG_MAP_DIFF_SYMVAL); } else { sym->st_value = ms->ms_value; } if (sym->st_size) { if (ms->ms_size && (sym->st_size != ms->ms_size)) conflict = (MSG_MAP_DIFF_SYMSZ); } else { sym->st_size = ms->ms_size; } if (ELF_ST_TYPE(sym->st_info) != STT_NOTYPE) { if ((ms->ms_type != STT_NOTYPE) && (ELF_ST_TYPE(sym->st_info) != ms->ms_type)) conflict = (MSG_MAP_DIFF_SYMTYP); } else { sym->st_info = ELF_ST_INFO(STB_GLOBAL, ms->ms_type); } if (sym->st_shndx != SHN_UNDEF) { if ((ms->ms_shndx != SHN_UNDEF) && (sym->st_shndx != ms->ms_shndx)) conflict = (MSG_MAP_DIFF_SYMNDX); } else { sym->st_shndx = sdp->sd_shndx = ms->ms_shndx; } if ((sdp->sd_flags & MSK_SY_GLOBAL) && (sdp->sd_aux->sa_overndx != VER_NDX_GLOBAL) && (mv->mv_vdp->vd_ndx != VER_NDX_GLOBAL) && (sdp->sd_aux->sa_overndx != mv->mv_vdp->vd_ndx)) { conflict = (MSG_MAP_DIFF_SYMVER); } if (conflict) { mf_fatal(mf, (MSG_MAP_SYMDEF1), demangle(ms->ms_name), sdp->sd_file->ifl_name, conflict); mv->mv_errcnt++; return (true); } /* * If this mapfile entry supplies a definition, * indicate that the symbol is now used. */ if (ms->ms_shndx != SHN_UNDEF) sdp->sd_flags |= FLG_SY_MAPUSED; } /* * A symbol declaration that defines a size but no * value is processed as a request to create an * associated backing section. The intent behind this * functionality is to provide OBJT definitions within * filters that are not ABS. ABS symbols don't allow * copy-relocations to be established to filter OBJT * definitions. */ if ((ms->ms_shndx == SHN_ABS) && ms->ms_size && !ms->ms_value_set) { /* Create backing section if not there */ if (sdp->sd_isc == NULL) { Is_desc *isp; if (ms->ms_type == STT_OBJECT) { if ((isp = ld_make_data(ofl, ms->ms_size)) == (Is_desc *)S_ERROR) return (false); } else { if ((isp = ld_make_text(ofl, ms->ms_size)) == (Is_desc *)S_ERROR) return (false); } sdp->sd_isc = isp; isp->is_file = ld_map_ifl(mf); } /* * Now that backing storage has been created, * associate the symbol descriptor. Remove the * symbols special section tag so that it will * be assigned the correct section index as part * of update symbol processing. */ sdp->sd_flags &= ~FLG_SY_SPECSEC; ms->ms_sdflags &= ~FLG_SY_SPECSEC; } /* * Indicate the new symbols scope. Although the * symbols st_other field will eventually be updated as * part of writing out the final symbol, update the * st_other field here to trigger better diagnostics * during symbol validation (for example, undefined * references that are defined symbolic in a mapfile). */ if (mv->mv_scope == FLG_SCOPE_HIDD) { /* * This symbol needs to be reduced to local. */ if (ofl->ofl_flags & FLG_OF_REDLSYM) { sdp->sd_flags |= (FLG_SY_HIDDEN | FLG_SY_ELIM); sdp->sd_sym->st_other = STV_ELIMINATE; } else { sdp->sd_flags |= FLG_SY_HIDDEN; sdp->sd_sym->st_other = STV_HIDDEN; } } else if (mv->mv_scope == FLG_SCOPE_ELIM) { /* * This symbol needs to be eliminated. Note, * the symbol is also tagged as local to trigger * any necessary relocation processing prior * to the symbol being eliminated. */ sdp->sd_flags |= (FLG_SY_HIDDEN | FLG_SY_ELIM); sdp->sd_sym->st_other = STV_ELIMINATE; } else { /* * This symbol is explicitly defined to remain * global. */ sdp->sd_flags |= ms->ms_sdflags; /* * Qualify any global scope. */ if (mv->mv_scope == FLG_SCOPE_SNGL) { sdp->sd_flags |= (FLG_SY_SINGLE | FLG_SY_NDIR); sdp->sd_sym->st_other = STV_SINGLETON; } else if (mv->mv_scope == FLG_SCOPE_PROT) { sdp->sd_flags |= FLG_SY_PROTECT; sdp->sd_sym->st_other = STV_PROTECTED; } else if (mv->mv_scope == FLG_SCOPE_EXPT) { sdp->sd_flags |= FLG_SY_EXPORT; sdp->sd_sym->st_other = STV_EXPORTED; } else sdp->sd_flags |= FLG_SY_DEFAULT; /* * Record the present version index for later * potential versioning. */ if ((sdp->sd_aux->sa_overndx == 0) || (sdp->sd_aux->sa_overndx == VER_NDX_GLOBAL)) sdp->sd_aux->sa_overndx = mv->mv_vdp->vd_ndx; mv->mv_vdp->vd_flags |= FLG_VER_REFER; } conflict = NULL; /* * Carry out some validity checks to ensure incompatible * symbol characteristics have not been defined. * These checks are carried out after symbols are added * or resolved, to catch single instance, and * multi-instance definition inconsistencies. */ if ((sdp->sd_flags & (FLG_SY_HIDDEN | FLG_SY_ELIM)) && ((mv->mv_scope != FLG_SCOPE_HIDD) && (mv->mv_scope != FLG_SCOPE_ELIM))) { conflict = (MSG_MAP_DIFF_SYMLCL); } else if ((sdp->sd_flags & (FLG_SY_SINGLE | FLG_SY_EXPORT)) && ((mv->mv_scope != FLG_SCOPE_DFLT) && (mv->mv_scope != FLG_SCOPE_EXPT) && (mv->mv_scope != FLG_SCOPE_SNGL))) { conflict = (MSG_MAP_DIFF_SYMGLOB); } else if ((sdp->sd_flags & FLG_SY_PROTECT) && ((mv->mv_scope != FLG_SCOPE_DFLT) && (mv->mv_scope != FLG_SCOPE_PROT))) { conflict = (MSG_MAP_DIFF_SYMPROT); } else if ((sdp->sd_flags & FLG_SY_NDIR) && (mv->mv_scope == FLG_SCOPE_PROT)) { conflict = (MSG_MAP_DIFF_PROTNDIR); } else if ((sdp->sd_flags & FLG_SY_DIR) && (mv->mv_scope == FLG_SCOPE_SNGL)) { conflict = (MSG_MAP_DIFF_SNGLDIR); } if (conflict) { /* * Select the conflict message from either a * single instance or multi-instance definition. */ if (sdp->sd_file->ifl_name == mf->mf_name) { mf_fatal(mf, (MSG_MAP_SYMDEF2), demangle(ms->ms_name), conflict); } else { mf_fatal(mf, (MSG_MAP_SYMDEF1), demangle(ms->ms_name), sdp->sd_file->ifl_name, conflict); } mv->mv_errcnt++; return (true); } /* * Indicate that this symbol has been explicitly * contributed from a mapfile. */ sdp->sd_flags |= (FLG_SY_MAPFILE | FLG_SY_EXPDEF); /* * If we've encountered a symbol definition simulate * that an input file has been processed - this allows * things like filters to be created purely from a * mapfile. */ if (ms->ms_type != STT_NOTYPE) ofl->ofl_objscnt++; //DBG_CALL(Dbg_map_symbol(ofl, sdp)); /* * If this symbol has an associated filtee, record the * filtee string and associate the string index with the * symbol. This is used later to associate the syminfo * information with the necessary .dynamic entry. */ if (ms->ms_filtee) { Dfltr_desc * dftp; Sfltr_desc sft; size_t idx, _idx, nitems; /* * Make sure we don't duplicate any filtee * strings, and create a new descriptor if * necessary. */ idx = nitems = alist_nitems(ofl->ofl_dtsfltrs); for (ALIST_TRAVERSE(ofl->ofl_dtsfltrs, _idx, dftp)) { if ((ms->ms_dft_flag != dftp->dft_flag) || (strcmp(dftp->dft_str, ms->ms_filtee))) continue; idx = _idx; break; } if (idx == nitems) { Dfltr_desc dft; dft.dft_str = ms->ms_filtee; dft.dft_flag = ms->ms_dft_flag; dft.dft_ndx = 0; /* * The following append puts the new * item at the offset contained in * idx, because we know idx contains * the index of the next available slot. */ if (alist_append(&ofl->ofl_dtsfltrs, &dft, sizeof (Dfltr_desc), AL_CNT_OFL_DTSFLTRS) == NULL) return (false); } /* * Create a new filter descriptor for this * symbol. */ sft.sft_sdp = sdp; sft.sft_idx = idx; if (alist_append(&ofl->ofl_symfltrs, &sft, sizeof (Sfltr_desc), AL_CNT_OFL_SYMFLTRS) == NULL) return (false); }
uintptr_t ld_group_process(Is_desc *gisc, Ofl_desc *ofl) { Ifl_desc *gifl = gisc->is_file; Shdr *sshdr, *gshdr = gisc->is_shdr; Is_desc *isc; Sym *sym; const char *str; Group_desc gd; size_t ndx; int gnu_stt_section; /* * Confirm that the sh_link points to a valid section. */ if ((gshdr->sh_link == SHN_UNDEF) || (gshdr->sh_link >= gifl->ifl_shnum) || ((isc = gifl->ifl_isdesc[gshdr->sh_link]) == NULL)) { ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_FIL_INVSHLINK), gifl->ifl_name, EC_WORD(gisc->is_scnndx), gisc->is_name, EC_XWORD(gshdr->sh_link)); return (0); } if (gshdr->sh_entsize == 0) { ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_FIL_INVSHENTSIZE), gifl->ifl_name, EC_WORD(gisc->is_scnndx), gisc->is_name, EC_XWORD(gshdr->sh_entsize)); return (0); } /* * Get the associated symbol table. Sanity check the sh_info field * (which points to the signature symbol table entry) against the size * of the symbol table. */ sshdr = isc->is_shdr; sym = (Sym *)isc->is_indata->d_buf; if ((sshdr->sh_info == SHN_UNDEF) || (gshdr->sh_info >= (Word)(sshdr->sh_size / sshdr->sh_entsize)) || ((isc = gifl->ifl_isdesc[sshdr->sh_link]) == NULL)) { ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_FIL_INVSHINFO), gifl->ifl_name, EC_WORD(gisc->is_scnndx), gisc->is_name, EC_XWORD(gshdr->sh_info)); return (0); } sym += gshdr->sh_info; /* * Get the symbol name from the associated string table. */ str = (char *)isc->is_indata->d_buf; str += sym->st_name; /* * The GNU assembler can use section symbols as the signature symbol * as described by this comment in the gold linker (found via google): * * It seems that some versions of gas will create a section group * associated with a section symbol, and then fail to give a name * to the section symbol. In such a case, use the name of the * section. * * In order to support such objects, we do the same. */ gnu_stt_section = ((sym->st_name == 0) || (*str == '\0')) && (ELF_ST_TYPE(sym->st_info) == STT_SECTION); if (gnu_stt_section) str = gisc->is_name; /* * Generate a group descriptor. */ gd.gd_isc = gisc; gd.gd_oisc = NULL; gd.gd_name = str; gd.gd_data = gisc->is_indata->d_buf; gd.gd_cnt = gisc->is_indata->d_size / sizeof (Word); /* * If this group is a COMDAT group, validate the signature symbol. */ if ((gd.gd_data[0] & GRP_COMDAT) && !gnu_stt_section && ((ELF_ST_BIND(sym->st_info) == STB_LOCAL) || (sym->st_shndx == SHN_UNDEF))) { /* If section symbol, construct a printable name for it */ if (ELF_ST_TYPE(sym->st_info) == STT_SECTION) { if (gisc->is_sym_name == NULL) (void) ld_stt_section_sym_name(gisc); if (gisc->is_sym_name != NULL) str = gisc->is_sym_name; } ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_GRP_INVALSYM), gifl->ifl_name, EC_WORD(gisc->is_scnndx), gisc->is_name, str); return (0); } /* * If the signature symbol is a name generated by the GNU compiler to * refer to a header, we need sloppy relocation. */ if (is_header_gensym(str)) { if ((ofl->ofl_flags1 & FLG_OF1_NRLXREL) == 0) ofl->ofl_flags1 |= FLG_OF1_RLXREL; DBG_CALL(Dbg_sec_gnu_comdat(ofl->ofl_lml, gisc, TRUE, (ofl->ofl_flags1 & FLG_OF1_RLXREL) != 0)); } /* * Validate the section indices within the group. If this is a COMDAT * group, mark each section as COMDAT. */ for (ndx = 1; ndx < gd.gd_cnt; ndx++) { Word gndx; if ((gndx = gd.gd_data[ndx]) >= gifl->ifl_shnum) { ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_GRP_INVALNDX), gifl->ifl_name, EC_WORD(gisc->is_scnndx), gisc->is_name, ndx, gndx); return (0); } if (gd.gd_data[0] & GRP_COMDAT) gifl->ifl_isdesc[gndx]->is_flags |= FLG_IS_COMDAT; } /* * If this is a COMDAT group, determine whether this group has already * been encountered, or whether this is the first instance of the group. */ if ((gd.gd_data[0] & GRP_COMDAT) && (gpavl_loaded(ofl, &gd) == S_ERROR)) return (S_ERROR); /* * Associate the group descriptor with this input file. */ if (alist_append(&(gifl->ifl_groups), &gd, sizeof (Group_desc), AL_CNT_IFL_GROUPS) == NULL) return (S_ERROR); return (1); }
int struct_connectivity_ext(int ch, ematrix *e, int cn, int *ixcon, ematrix ***sepout) { ematrix **mat,**x,*y; int i,j,k, r,s, mr,con; if (!e) {PROGERROR("The matrix e must be given here!"); return -1;} if (sepout?*sepout:0) {PROGERROR("The list sepout must be given initialized to NULL!");} if (cn==0) {PROGERROR("Do not ask about connectivity 0 - disconnected matroids have connectivity 1!");} if (ixcon) *ixcon = 1; /* one may ask for internal (cn+1)-connectivity as an addition */ DEBUG(CURDLEV+1,"Computing matroid connectivity of %p [%s], cn=%d ...\n",e,EMNAME(e)?EMNAME(e):"",cn); mr = (ROWSM(e)<COLSM(e)? ROWSM(e):COLSM(e)); if (cn>mr+1) return -1; con = mr+1; /** * We separately test connectivity and 3-connectivity by faster routines. * A disconnected matroid either has a loop or a coloop, or it has no * "connected order" of columns. * A matroid that is not 3-connected is either not simple, or it has * a nonempty all-zero submatrix with the complement of rank 1. * These separations here are nontrivial automatically. **/ if (con>=mr && (cn<0 || cn>1)) { if (ematrix_linezero_any(e)>=0) con = 1; else if (struct_connorder_col(e,NULL)<0) con = 1; } if (con>=mr && (cn<0 || cn>2)) { if (ematrix_parallel_any(e)>=0) con = 2; else { mat = ematrix_submatrices_zero(e); /* (rank 0 nonempty submatrices) */ for (x=mat; (x?*x:0) && con>2; x++) { y = ematrix_refextract_xall(e,*x); if (ematrix_ismatrank_less(y,2)) con = 2; dispose_ematrix(y); } if (mat) dispose_alist_mats(mat); } } /** * If there is an exact k-separation in the matroid (whatsep==k-1), * then one of the two complementary submatrices has rank at most (k-1)/2, * so we look for it by brute force... * Only non-trivial (>=k both sides) separations are considered. * We also collect the small separations in *sepout if requested. * If ixcon!=NULL is given, then we have to look for one larger separations * for testing also internal (cn+1)-connectivity. * The meaning of variables is the following: * cn is the asked connectivity (cn==-1 to determine ot). * con is the best upper bound for connectivity found so far (starts at rank+1). **/ if (sepout && con<3) { con = 3; k = 1; if (ixcon) *ixcon = 0; /* (no internal connectivity tested for <3) */ } else k = 3; for (; k<con; k++) if (con>=mr && (cn<0 || k<cn+(ixcon!=NULL))) { mat = ematrix_submatrices_conn(e,(k-1)/2); for (x=mat; x?*x:0; x++) if (ROWSM(e)-ROWSM(*x)+COLSM(*x)>=k && COLSM(e)+ROWSM(*x)-COLSM(*x)>=k) { y = ematrix_refextract_xrow(e,*x); s = ematrix_whatsep_bound(e,y,k); if (s<k) con = k; /* (we have found a smaller separation) */ if (s<k-1) {PROGERROR("How is it that we have found a smaller separation then before? %d<%d",s,k-1);} if (sepout && s<k) *sepout = alist_append(*sepout,y); else dispose_ematrix(y); if (ixcon && s<k) if (ROWSM(e)-ROWSM(*x)+COLSM(*x)>k && COLSM(e)+ROWSM(*x)-COLSM(*x)>k) *ixcon = 0; if (s<k && !sepout && !ixcon) break; } if (mat) dispose_alist_mats(mat); } r = (cn<0? con: (con>=cn?1:-1)); #ifndef FASTPROG /* paranoic testing of the connectivity computation: */ if (IFRANDDEBUGLESS(222) && ch>=0) { y = ematrix_copydual(e); for (k=0; k<4; k++) { /* (extra testing with pivoted matrix) */ i = RANDOM()%ROWSM(y); j = RANDOM()%COLSM(y); if (SIGNM(y,i,j)!=0) ematrix_pivot(y,i,j); } if (r!=struct_connectivity_ext(-1,y,cn,NULL,NULL)) {PROGERROR("incorrect computation of matroid connectivity, ret=%d",r);} dispose_ematrix(y); } DEBUG(CURDLEV+1," - connectivity computed %s %d\n",(cn<0?"exactly":(r>=0?"at least":"less")),con); #endif ch = i = j = k; return r; /* the return value (cn<0? con: (con>=cn?1:-1)) computed above */ }