// return true if the segments that connect to the given Edge are nearly parallel bool segments_parallel( HEEdge e ) const { HEVertex endp1 = find_endpoint(e); HEVertex endp2 = find_endpoint( g[e].twin ); // find the segments HEEdge e1 = find_segment(endp1); HEEdge e2 = find_segment(endp2); e2 = g[e2].twin; // this makes the edges oriented in the same direction double dotprod = edge_dotprod(e1,e2); return fabs(dotprod)>_dot_product_threshold; }
static fru_errno_t frt_get_seg_def(fru_treehdl_t handle, const char *seg_name, fru_segdef_t *def) { fru_errno_t err = FRU_SUCCESS; int prot_flg = 0; segment_t segment; if ((err = find_segment(handle, seg_name, &prot_flg, &segment)) != FRU_SUCCESS) { return (err); } (void) memcpy(def->name, segment.name, SEG_NAME_LEN); def->name[SEG_NAME_LEN] = '\0'; def->desc.raw_data = segment.descriptor; def->size = segment.length; def->address = segment.offset; if (prot_flg == 0) def->hw_desc.field.read_only = 0; else def->hw_desc.field.read_only = 1; return (FRU_SUCCESS); }
int process_client_request(char* responseBuffer, const char* requestBuffer) { char myBuffer[512]; char* curToken; char* savePtr; int charCount = 0; strcpy(myBuffer, requestBuffer); // Use strtok_r to make it thread safe curToken = strtok_r(myBuffer, "/", &savePtr); if(strstr(curToken, "CANHAZ") != NULL) { // Am I busy? // if yes -> respond with BUSY packet if(i_am_busy()) { charCount = sprintf(responseBuffer, "BUSY"); } else { char fileName[255]; char hash[41]; int segmentNumber; // save the client info for reference curToken = strtok_r(NULL, "/", &savePtr); // parse the filename strcpy(fileName, strtok_r(NULL, "/", &savePtr)); // parse the segmentnumber segmentNumber = atoi(strtok_r(NULL, "/", &savePtr)); // parse the hash strcpy(hash, strtok_r(NULL, "/", &savePtr)); // Do I have this segment? // If yes -> send the data // If No -> send a 'no' packet char* dataBuffer = find_segment(fileName, segmentNumber, hash); if( dataBuffer != NULL ) { int numCharsWritten = sprintf(responseBuffer,"HAZ/%s/START/",hash); responseBuffer += numCharsWritten; memcpy(responseBuffer, dataBuffer, SEGMENT_SIZE ); free(dataBuffer); charCount = (numCharsWritten + SEGMENT_SIZE); } else { charCount = sprintf(responseBuffer,"HAZNOT/%s/%i/%s/",fileName, segmentNumber, hash); } } } else { printf("Another client sent a request that I cannot process.\n"); } //account for a final terminating character charCount++; return charCount; }
static inline int find_segment_and_ditherpoint(stpi_dither_channel_t *dc, unsigned inkval, stpi_ink_defn_t *lower, stpi_ink_defn_t *upper) { find_segment(dc, inkval, lower, upper); if (inkval <= lower->range) return 0; else if (inkval >= upper->range) return 65535; else return (65535u * (inkval - lower->range)) / (upper->range - lower->range); }
/* Gets the schedule for the core with index 'core_index' at time 'time' * in the current global TDMA schedule. */ core_sched_p getCoreSchedule( uint core_index, ull time ) { const sched_p glob_sched = getSchedule(); assert(glob_sched && core_index < glob_sched->n_cores && "Internal error: Invalid data structures!" ); /* Find the proper segment for start time in case there are * multiple segments present in the full bus schedule */ segment_p cur_seg = ( glob_sched->type != SCHED_TYPE_1 ) ? find_segment( glob_sched->seg_list, glob_sched->n_segments, time ) : glob_sched->seg_list[0]; /* Return the correct schedule entry. */ const core_sched_p core_schedule = cur_seg->per_core_sched[core_index]; assert(core_schedule && "Internal error: Invalid data structures!" ); return core_schedule; }
/* * Read and process the relocations for one link object, we assume all * relocation sections for loadable segments are stored contiguously in * the file. */ int elf_reloc(Rt_map *lmp, uint_t plt, int *in_nfavl, APlist **textrel) { ulong_t relbgn, relend, relsiz, basebgn, pltbgn, pltend; ulong_t _pltbgn, _pltend; ulong_t dsymndx, roffset, rsymndx, psymndx = 0; uchar_t rtype; long value, pvalue; Sym *symref, *psymref, *symdef, *psymdef; Syminfo *sip; char *name, *pname; Rt_map *_lmp, *plmp; int ret = 1, noplt = 0; int relacount = RELACOUNT(lmp), plthint = 0; Rel *rel; uint_t binfo, pbinfo; APlist *bound = NULL; /* * Although only necessary for lazy binding, initialize the first * global offset entry to go to elf_rtbndr(). dbx(1) seems * to find this useful. */ if ((plt == 0) && PLTGOT(lmp)) { mmapobj_result_t *mpp; /* * Make sure the segment is writable. */ if ((((mpp = find_segment((caddr_t)PLTGOT(lmp), lmp)) != NULL) && ((mpp->mr_prot & PROT_WRITE) == 0)) && ((set_prot(lmp, mpp, 1) == 0) || (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) return (0); elf_plt_init(PLTGOT(lmp), (caddr_t)lmp); } /* * Initialize the plt start and end addresses. */ if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); relsiz = (ulong_t)(RELENT(lmp)); basebgn = ADDR(lmp); if (PLTRELSZ(lmp)) plthint = PLTRELSZ(lmp) / relsiz; /* * If we've been called upon to promote an RTLD_LAZY object to an * RTLD_NOW then we're only interested in scaning the .plt table. * An uninitialized .plt is the case where the associated got entry * points back to the plt itself. Determine the range of the real .plt * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol. */ if (plt) { Slookup sl; Sresult sr; relbgn = pltbgn; relend = pltend; if (!relbgn || (relbgn == relend)) return (1); /* * Initialize the symbol lookup, and symbol result, data * structures. */ SLOOKUP_INIT(sl, MSG_ORIG(MSG_SYM_PLT), lmp, lmp, ld_entry_cnt, elf_hash(MSG_ORIG(MSG_SYM_PLT)), 0, 0, 0, LKUP_DEFT); SRESULT_INIT(sr, MSG_ORIG(MSG_SYM_PLT)); if (elf_find_sym(&sl, &sr, &binfo, NULL) == 0) return (1); symdef = sr.sr_sym; _pltbgn = symdef->st_value; if (!(FLAGS(lmp) & FLG_RT_FIXED) && (symdef->st_shndx != SHN_ABS)) _pltbgn += basebgn; _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) * M_PLT_ENTSIZE) + M_PLT_RESERVSZ; } else { /* * The relocation sections appear to the run-time linker as a * single table. Determine the address of the beginning and end * of this table. There are two different interpretations of * the ABI at this point: * * o The REL table and its associated RELSZ indicate the * concatenation of *all* relocation sections (this is the * model our link-editor constructs). * * o The REL table and its associated RELSZ indicate the * concatenation of all *but* the .plt relocations. These * relocations are specified individually by the JMPREL and * PLTRELSZ entries. * * Determine from our knowledege of the relocation range and * .plt range, the range of the total relocation table. Note * that one other ABI assumption seems to be that the .plt * relocations always follow any other relocations, the * following range checking drops that assumption. */ relbgn = (ulong_t)(REL(lmp)); relend = relbgn + (ulong_t)(RELSZ(lmp)); if (pltbgn) { if (!relbgn || (relbgn > pltbgn)) relbgn = pltbgn; if (!relbgn || (relend < pltend)) relend = pltend; } } if (!relbgn || (relbgn == relend)) { DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE)); return (1); } DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START)); /* * If we're processing a dynamic executable in lazy mode there is no * need to scan the .rel.plt table, however if we're processing a shared * object in lazy mode the .got addresses associated to each .plt must * be relocated to reflect the location of the shared object. */ if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) && (FLAGS(lmp) & FLG_RT_FIXED)) noplt = 1; sip = SYMINFO(lmp); /* * Loop through relocations. */ while (relbgn < relend) { mmapobj_result_t *mpp; uint_t sb_flags = 0; rtype = ELF_R_TYPE(((Rel *)relbgn)->r_info, M_MACH); /* * If this is a RELATIVE relocation in a shared object (the * common case), and if we are not debugging, then jump into a * tighter relocation loop (elf_reloc_relative). */ if ((rtype == R_386_RELATIVE) && ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) { if (relacount) { relbgn = elf_reloc_relative_count(relbgn, relacount, relsiz, basebgn, lmp, textrel, 0); relacount = 0; } else { relbgn = elf_reloc_relative(relbgn, relend, relsiz, basebgn, lmp, textrel, 0); } if (relbgn >= relend) break; rtype = ELF_R_TYPE(((Rel *)relbgn)->r_info, M_MACH); } roffset = ((Rel *)relbgn)->r_offset; /* * If this is a shared object, add the base address to offset. */ if (!(FLAGS(lmp) & FLG_RT_FIXED)) { /* * If we're processing lazy bindings, we have to step * through the plt entries and add the base address * to the corresponding got entry. */ if (plthint && (plt == 0) && (rtype == R_386_JMP_SLOT) && ((MODE(lmp) & RTLD_NOW) == 0)) { relbgn = elf_reloc_relative_count(relbgn, plthint, relsiz, basebgn, lmp, textrel, 0); plthint = 0; continue; } roffset += basebgn; } rsymndx = ELF_R_SYM(((Rel *)relbgn)->r_info); rel = (Rel *)relbgn; relbgn += relsiz; /* * Optimizations. */ if (rtype == R_386_NONE) continue; if (noplt && ((ulong_t)rel >= pltbgn) && ((ulong_t)rel < pltend)) { relbgn = pltend; continue; } /* * If we're promoting plts, determine if this one has already * been written. */ if (plt && ((*(ulong_t *)roffset < _pltbgn) || (*(ulong_t *)roffset > _pltend))) continue; /* * If this relocation is not against part of the image * mapped into memory we skip it. */ if ((mpp = find_segment((caddr_t)roffset, lmp)) == NULL) { elf_reloc_bad(lmp, (void *)rel, rtype, roffset, rsymndx); continue; } binfo = 0; /* * If a symbol index is specified then get the symbol table * entry, locate the symbol definition, and determine its * address. */ if (rsymndx) { /* * If a Syminfo section is provided, determine if this * symbol is deferred, and if so, skip this relocation. */ if (sip && is_sym_deferred((ulong_t)rel, basebgn, lmp, textrel, sip, rsymndx)) continue; /* * Get the local symbol table entry. */ symref = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); /* * If this is a local symbol, just use the base address. * (we should have no local relocations in the * executable). */ if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { value = basebgn; name = NULL; /* * Special case TLS relocations. */ if (rtype == R_386_TLS_DTPMOD32) { /* * Use the TLS modid. */ value = TLSMODID(lmp); } else if (rtype == R_386_TLS_TPOFF) { if ((value = elf_static_tls(lmp, symref, rel, rtype, 0, roffset, 0)) == 0) { ret = 0; break; } } } else { /* * If the symbol index is equal to the previous * symbol index relocation we processed then * reuse the previous values. (Note that there * have been cases where a relocation exists * against a copy relocation symbol, our ld(1) * should optimize this away, but make sure we * don't use the same symbol information should * this case exist). */ if ((rsymndx == psymndx) && (rtype != R_386_COPY)) { /* LINTED */ if (psymdef == 0) { DBG_CALL(Dbg_bind_weak(lmp, (Addr)roffset, (Addr) (roffset - basebgn), name)); continue; } /* LINTED */ value = pvalue; /* LINTED */ name = pname; /* LINTED */ symdef = psymdef; /* LINTED */ symref = psymref; /* LINTED */ _lmp = plmp; /* LINTED */ binfo = pbinfo; if ((LIST(_lmp)->lm_tflags | AFLAGS(_lmp)) & LML_TFLG_AUD_SYMBIND) { value = audit_symbind(lmp, _lmp, /* LINTED */ symdef, dsymndx, value, &sb_flags); } } else { Slookup sl; Sresult sr; /* * Lookup the symbol definition. * Initialize the symbol lookup, and * symbol result, data structures. */ name = (char *)(STRTAB(lmp) + symref->st_name); SLOOKUP_INIT(sl, name, lmp, 0, ld_entry_cnt, 0, rsymndx, symref, rtype, LKUP_STDRELOC); SRESULT_INIT(sr, name); symdef = NULL; if (lookup_sym(&sl, &sr, &binfo, in_nfavl)) { name = (char *)sr.sr_name; _lmp = sr.sr_dmap; symdef = sr.sr_sym; } /* * If the symbol is not found and the * reference was not to a weak symbol, * report an error. Weak references * may be unresolved. */ /* BEGIN CSTYLED */ if (symdef == 0) { if (sl.sl_bind != STB_WEAK) { if (elf_reloc_error(lmp, name, rel, binfo)) continue; ret = 0; break; } else { psymndx = rsymndx; psymdef = 0; DBG_CALL(Dbg_bind_weak(lmp, (Addr)roffset, (Addr) (roffset - basebgn), name)); continue; } } /* END CSTYLED */ /* * If symbol was found in an object * other than the referencing object * then record the binding. */ if ((lmp != _lmp) && ((FLAGS1(_lmp) & FL1_RT_NOINIFIN) == 0)) { if (aplist_test(&bound, _lmp, AL_CNT_RELBIND) == 0) { ret = 0; break; } } /* * Calculate the location of definition; * symbol value plus base address of * containing shared object. */ if (IS_SIZE(rtype)) value = symdef->st_size; else value = symdef->st_value; if (!(FLAGS(_lmp) & FLG_RT_FIXED) && !(IS_SIZE(rtype)) && (symdef->st_shndx != SHN_ABS) && (ELF_ST_TYPE(symdef->st_info) != STT_TLS)) value += ADDR(_lmp); /* * Retain this symbol index and the * value in case it can be used for the * subsequent relocations. */ if (rtype != R_386_COPY) { psymndx = rsymndx; pvalue = value; pname = name; psymdef = symdef; psymref = symref; plmp = _lmp; pbinfo = binfo; } if ((LIST(_lmp)->lm_tflags | AFLAGS(_lmp)) & LML_TFLG_AUD_SYMBIND) { dsymndx = (((uintptr_t)symdef - (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp)); value = audit_symbind(lmp, _lmp, symdef, dsymndx, value, &sb_flags); } } /* * If relocation is PC-relative, subtract * offset address. */ if (IS_PC_RELATIVE(rtype)) value -= roffset; /* * Special case TLS relocations. */ if (rtype == R_386_TLS_DTPMOD32) { /* * Relocation value is the TLS modid. */ value = TLSMODID(_lmp); } else if (rtype == R_386_TLS_TPOFF) { if ((value = elf_static_tls(_lmp, symdef, rel, rtype, name, roffset, value)) == 0) { ret = 0; break; } } } } else { /* * Special cases. */ if (rtype == R_386_TLS_DTPMOD32) { /* * TLS relocation value is the TLS modid. */ value = TLSMODID(lmp); } else value = basebgn; name = NULL; } DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH, M_REL_SHT_TYPE, rel, NULL, 0, name)); /* * Make sure the segment is writable. */ if (((mpp->mr_prot & PROT_WRITE) == 0) && ((set_prot(lmp, mpp, 1) == 0) || (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) { ret = 0; break; } /* * Call relocation routine to perform required relocation. */ switch (rtype) { case R_386_COPY: if (elf_copy_reloc(name, symref, lmp, (void *)roffset, symdef, _lmp, (const void *)value) == 0) ret = 0; break; case R_386_JMP_SLOT: if (((LIST(lmp)->lm_tflags | AFLAGS(lmp)) & (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && AUDINFO(lmp)->ai_dynplts) { int fail = 0; int pltndx = (((ulong_t)rel - (uintptr_t)JMPREL(lmp)) / relsiz); int symndx = (((uintptr_t)symdef - (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp)); (void) elf_plt_trace_write(roffset, lmp, _lmp, symdef, symndx, pltndx, (caddr_t)value, sb_flags, &fail); if (fail) ret = 0; } else { /* * Write standard PLT entry to jump directly * to newly bound function. */ DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD, (Xword)roffset, (Xword)value)); *(ulong_t *)roffset = value; } break; default: /* * Write the relocation out. */ if (do_reloc_rtld(rtype, (uchar_t *)roffset, (Word *)&value, name, NAME(lmp), LIST(lmp)) == 0) ret = 0; DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD, (Xword)roffset, (Xword)value)); } if ((ret == 0) && ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) break; if (binfo) { DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset, (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL, _lmp, (Addr)value, symdef->st_value, name, binfo)); } } return (relocate_finish(lmp, bound, ret)); }
BoundSeg * sort_boundary (BoundSeg *segs, gint ns, gint *num_groups) { gint i; gint index; gint x, y; gint startx, starty; gint empty = (num_segs == 0); BoundSeg *new_segs; index = 0; new_segs = NULL; for (i = 0; i < ns; i++) segs[i].visited = false; num_segs = 0; *num_groups = 0; while (! empty) { empty = true; /* find the index of a non-visited segment to start a group */ for (i = 0; i < ns; i++) if (segs[i].visited == false) { index = i; empty = false; i = ns; } if (! empty) { make_seg (segs[index].x1, segs[index].y1, segs[index].x2, segs[index].y2, segs[index].open); segs[index].visited = true; startx = segs[index].x1; starty = segs[index].y1; x = segs[index].x2; y = segs[index].y2; while ((index = find_segment (segs, ns, x, y)) != -1) { /* make sure ordering is correct */ if (x == segs[index].x1 && y == segs[index].y1) { make_seg (segs[index].x1, segs[index].y1, segs[index].x2, segs[index].y2, segs[index].open); x = segs[index].x2; y = segs[index].y2; } else { make_seg (segs[index].x2, segs[index].y2, segs[index].x1, segs[index].y1, segs[index].open); x = segs[index].x1; y = segs[index].y1; } segs[index].visited = true; } if (x != startx || y != starty) g_message ("sort_boundary(): Unconnected boundary group!"); /* Mark the end of a group */ *num_groups = *num_groups + 1; make_seg (-1, -1, -1, -1, 0); } } /* Make a copy of the boundary */ if (num_segs) { new_segs = g_new (BoundSeg, num_segs); memcpy (new_segs, tmp_segs, (sizeof (BoundSeg) * num_segs)); } /* Return the new boundary */ return new_segs; }
/** * boundary_sort: * @segs: unsorted input segs. * @num_segs: number of input segs * @num_groups: number of groups in the sorted segs * * This function takes an array of #BoundSeg's as returned by * boundary_find() and sorts it by contiguous groups. The returned * array contains markers consisting of -1 coordinates and is * @num_groups elements longer than @segs. * * Return value: the sorted segs **/ BoundSeg * boundary_sort (const BoundSeg *segs, gint num_segs, gint *num_groups) { Boundary *boundary; const BoundSeg **segs_ptrs_by_xy1; const BoundSeg **segs_ptrs_by_xy2; gint index; gint x, y; gint startx, starty; g_return_val_if_fail ((segs == NULL && num_segs == 0) || (segs != NULL && num_segs > 0), NULL); g_return_val_if_fail (num_groups != NULL, NULL); *num_groups = 0; if (num_segs == 0) return NULL; /* prepare arrays with BoundSeg pointers sorted by xy1 and xy2 accordingly */ segs_ptrs_by_xy1 = g_new (const BoundSeg *, num_segs); segs_ptrs_by_xy2 = g_new (const BoundSeg *, num_segs); for (index = 0; index < num_segs; index++) { segs_ptrs_by_xy1[index] = segs + index; segs_ptrs_by_xy2[index] = segs + index; } qsort (segs_ptrs_by_xy1, num_segs, sizeof (BoundSeg *), (GCompareFunc) cmp_segptr_xy1_addr); qsort (segs_ptrs_by_xy2, num_segs, sizeof (BoundSeg *), (GCompareFunc) cmp_segptr_xy2_addr); for (index = 0; index < num_segs; index++) ((BoundSeg *) segs)[index].visited = FALSE; boundary = boundary_new (NULL); for (index = 0; index < num_segs; index++) { const BoundSeg *cur_seg; if (segs[index].visited) continue; boundary_add_seg (boundary, segs[index].x1, segs[index].y1, segs[index].x2, segs[index].y2, segs[index].open); ((BoundSeg *) segs)[index].visited = TRUE; startx = segs[index].x1; starty = segs[index].y1; x = segs[index].x2; y = segs[index].y2; while ((cur_seg = find_segment (segs_ptrs_by_xy1, segs_ptrs_by_xy2, num_segs, x, y)) != NULL) { /* make sure ordering is correct */ if (x == cur_seg->x1 && y == cur_seg->y1) { boundary_add_seg (boundary, cur_seg->x1, cur_seg->y1, cur_seg->x2, cur_seg->y2, cur_seg->open); x = cur_seg->x2; y = cur_seg->y2; } else { boundary_add_seg (boundary, cur_seg->x2, cur_seg->y2, cur_seg->x1, cur_seg->y1, cur_seg->open); x = cur_seg->x1; y = cur_seg->y1; } ((BoundSeg *) cur_seg)->visited = TRUE; } if (G_UNLIKELY (x != startx || y != starty)) g_warning ("sort_boundary(): Unconnected boundary group!"); /* Mark the end of a group */ *num_groups = *num_groups + 1; boundary_add_seg (boundary, -1, -1, -1, -1, 0); } g_free (segs_ptrs_by_xy1); g_free (segs_ptrs_by_xy2); return boundary_free (boundary, FALSE); }
/* * Move data. Apply sparse initialization to data in zeroed bss. */ int move_data(Rt_map *lmp, APlist **textrel) { Lm_list *lml = LIST(lmp); Move *mv = MOVETAB(lmp); ulong_t num, mvnum = MOVESZ(lmp) / MOVEENT(lmp); int moves; /* * If these records are against the executable, and the executable was * built prior to Solaris 8, keep track of the move record symbol. See * comment in analyze.c:lookup_sym_interpose() in regards Solaris 8 * objects and DT_FLAGS. */ moves = (lmp == lml->lm_head) && ((FLAGS1(lmp) & FL1_RT_DTFLAGS) == 0); DBG_CALL(Dbg_move_data(lmp)); for (num = 0; num < mvnum; num++, mv++) { mmapobj_result_t *mpp; Addr addr, taddr; Half rep, repno, stride; Sym *sym; if ((sym = (Sym *)SYMTAB(lmp) + ELF_M_SYM(mv->m_info)) == 0) continue; stride = mv->m_stride + 1; addr = sym->st_value; /* * Determine the move data target, and verify the address is * writable. */ if ((FLAGS(lmp) & FLG_RT_FIXED) == 0) addr += ADDR(lmp); taddr = addr + mv->m_poffset; if ((mpp = find_segment((caddr_t)taddr, lmp)) == NULL) { elf_move_bad(lml, lmp, sym, num, taddr); continue; } if (((mpp->mr_prot & PROT_WRITE) == 0) && ((set_prot(lmp, mpp, 1) == 0) || (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) return (0); DBG_CALL(Dbg_move_entry2(lml, mv, sym->st_name, (const char *)(sym->st_name + STRTAB(lmp)))); for (rep = 0, repno = 0; rep < mv->m_repeat; rep++) { DBG_CALL(Dbg_move_expand(lml, mv, taddr)); switch (ELF_M_SIZE(mv->m_info)) { case 1: *((char *)taddr) = (char)mv->m_value; taddr += stride; repno++; break; case 2: /* LINTED */ *((Half *)taddr) = (Half)mv->m_value; taddr += 2 * stride; repno++; break; case 4: /* LINTED */ *((Word *)taddr) = (Word)mv->m_value; taddr += 4 * stride; repno++; break; case 8: /* LINTED */ *((unsigned long long *)taddr) = mv->m_value; taddr += 8 * stride; repno++; break; default: eprintf(lml, ERR_NONE, MSG_INTL(MSG_MOVE_ERR1)); break; } } /* * If any move records have been applied to this symbol, retain * the symbol address if required for backward compatibility * copy relocation processing. */ if (moves && repno && (aplist_append(&alp, (void *)addr, AL_CNT_MOVES) == NULL)) return (0); } /* * Binaries built in the early 1990's prior to Solaris 8, using the ild * incremental linker are known to have zero filled move sections * (presumably place holders for new, incoming move sections). If no * move records have been processed, remove the move identifier to * optimize the amount of backward compatibility copy relocation * processing that is needed. */ if (moves && (alp == NULL)) FLAGS(lmp) &= ~FLG_RT_MOVE; return (1); }
SML_PRIMITIVE void * sml_alloc(unsigned int objsize, void *frame_pointer) { size_t alloc_size; unsigned int blocksize_log2; struct alloc_ptr *ptr; void *obj; /* ensure that alloc_size is at least BLOCKSIZE_MIN. */ alloc_size = ALIGNSIZE(OBJ_HEADER_SIZE + objsize, BLOCKSIZE_MIN); if (alloc_size > BLOCKSIZE_MAX) { GCSTAT_ALLOC_COUNT(malloc, 0, alloc_size); sml_save_frame_pointer(frame_pointer); return sml_obj_malloc(alloc_size); } blocksize_log2 = CEIL_LOG2(alloc_size); ASSERT(BLOCKSIZE_MIN_LOG2 <= blocksize_log2 && blocksize_log2 <= BLOCKSIZE_MAX_LOG2); ptr = &ALLOC_PTR_SET()->alloc_ptr[blocksize_log2]; if (!BITPTR_TEST(ptr->freebit)) { GCSTAT_ALLOC_COUNT(fast, blocksize_log2, alloc_size); BITPTR_INC(ptr->freebit); obj = ptr->free; ptr->free += ptr->blocksize_bytes; goto alloced; } sml_save_frame_pointer(frame_pointer); if (ptr->free != NULL) { obj = find_bitmap(ptr); if (obj) goto alloced; } obj = find_segment(ptr); if (obj) goto alloced; GCSTAT_TRIGGER(blocksize_log2); do_gc(MAJOR); obj = find_segment(ptr); if (obj) goto alloced_major; extend_heap(heap_space.extend_step); obj = find_segment(ptr); if (obj) goto alloced_major; sml_fatal(0, "heap exceeded: intended to allocate %u bytes.", ptr->blocksize_bytes); alloced_major: ASSERT(check_newobj(obj)); /* NOTE: sml_run_finalizer may cause garbage collection. */ obj = sml_run_finalizer(obj); goto finished; alloced: ASSERT(check_newobj(obj)); finished: OBJ_HEADER(obj) = 0; return obj; }