static void aoutg_init(FILE * fp, efunc errfunc, ldfunc ldef, evalfunc eval) { aoutfp = fp; error = errfunc; evaluate = eval; (void)ldef; /* placate optimisers */ stext.data = saa_init(1L); stext.head = NULL; stext.tail = &stext.head; sdata.data = saa_init(1L); sdata.head = NULL; sdata.tail = &sdata.head; stext.len = stext.size = sdata.len = sdata.size = sbss.len = 0; stext.nrelocs = sdata.nrelocs = 0; stext.gsyms = sdata.gsyms = sbss.gsyms = NULL; stext.index = seg_alloc(); sdata.index = seg_alloc(); sbss.index = seg_alloc(); stext.asym = sdata.asym = sbss.asym = NULL; syms = saa_init((long)sizeof(struct Symbol)); nsyms = 0; bsym = raa_init(); strs = saa_init(1L); strslen = 0; fwds = NULL; }
static void as86_init(FILE *fp, efunc errfunc, ldfunc ldef, evalfunc eval) { as86fp = fp; error = errfunc; (void) ldef; /* placate optimisers */ stext.data = saa_init(1L); stext.datalen = 0L; stext.head = stext.last = NULL; stext.tail = &stext.head; sdata.data = saa_init(1L); sdata.datalen = 0L; sdata.head = sdata.last = NULL; sdata.tail = &sdata.head; bsslen = stext.len = stext.datalen = stext.size = sdata.len = sdata.datalen = sdata.size = 0; stext.index = seg_alloc(); sdata.index = seg_alloc(); bssindex = seg_alloc(); syms = saa_init((long)sizeof(struct Symbol)); nsyms = 0; bsym = raa_init(); strs = saa_init(1L); strslen = 0; as86_add_string (as86_module); }
static void as86_init(void) { stext.data = saa_init(1L); stext.datalen = 0L; stext.head = stext.last = NULL; stext.tail = &stext.head; sdata.data = saa_init(1L); sdata.datalen = 0L; sdata.head = sdata.last = NULL; sdata.tail = &sdata.head; bsslen = stext.len = stext.datalen = stext.size = sdata.len = sdata.datalen = sdata.size = 0; stext.index = seg_alloc(); sdata.index = seg_alloc(); bssindex = seg_alloc(); syms = saa_init((int32_t)sizeof(struct Symbol)); nsyms = 0; bsym = raa_init(); strs = saa_init(1L); strslen = 0; /* as86 module name = input file minus extension */ as86_add_string(filename_set_extension(inname, "")); }
static long dbg_section_names(char *name, int pass, int *bits) { int seg; /* * We must have an initial default: let's make it 16. */ if (!name) *bits = 16; if (!name) fprintf(dbgf, "section_name on init: returning %d\n", seg = seg_alloc()); else { int n = strcspn(name, " \t"); char *sname = nasm_strndup(name, n); struct Section *s; seg = NO_SEG; for (s = dbgsect; s; s = s->next) if (!strcmp(s->name, sname)) seg = s->number; if (seg == NO_SEG) { s = nasm_malloc(sizeof(*s)); s->name = sname; s->number = seg = seg_alloc(); s->next = dbgsect; dbgsect = s; fprintf(dbgf, "section_name %s (pass %d): returning %d\n", name, pass, seg); } } return seg; }
static void rdf_init(FILE *fp, efunc errfunc, ldfunc ldef, evalfunc eval) { ofile = fp; error = errfunc; seg[0] = newmembuf(); seg[1] = newmembuf(); header = newmembuf(); segtext = seg_alloc(); segdata = seg_alloc(); segbss = seg_alloc(); if (segtext != 0 || segdata != 2 || segbss != 4) error(ERR_PANIC,"rdf segment numbers not allocated as expected (%d,%d,%d)", segtext,segdata,segbss); bsslength=0; }
static int coff_make_section (char *name, unsigned long flags) { struct Section *s; s = nasm_malloc (sizeof(*s)); if (flags != BSS_FLAGS) s->data = saa_init (1L); else s->data = NULL; s->head = NULL; s->tail = &s->head; s->len = 0; s->nrelocs = 0; if (!strcmp(name, ".text")) s->index = def_seg; else s->index = seg_alloc(); strncpy (s->name, name, 8); s->name[8] = '\0'; s->flags = flags; if (nsects >= sectlen) sects = nasm_realloc (sects, (sectlen += SECT_DELTA)*sizeof(*sects)); sects[nsects++] = s; return nsects-1; }
void seg_effect_demo (void) { extern segbits_t *seg_writable_page, *seg_visible_page; segbits_t *src, *dst; U8 bit0 = 0; seg_alloc (); seg_apply = seg_overlay1; seg_overlay_data = 0; seg_overlay_mask = 0x5555; src = seg_visible_page; dst = seg_writable_page; for (;;) { dbprintf ("Mask = %04lX\n", seg_overlay_mask); dbprintf ("dst = %p src = %p\n", dst, seg_visible_page); seg_apply_init (dst, src, sizeof (seg_page_t)); seg_apply_loop (); seg_writable_page = seg_visible_page; seg_overlay_mask <<= 1; seg_overlay_mask |= bit0; bit0 ^= 1; task_sleep (TIME_100MS); } }
static int elf_make_section(char *name, int type, int flags, int align) { struct Section *s; s = nasm_malloc(sizeof(*s)); if (type != SHT_NOBITS) s->data = saa_init(1L); s->head = NULL; s->tail = &s->head; s->len = s->size = 0; s->nrelocs = 0; if (!strcmp(name, ".text")) s->index = def_seg; else s->index = seg_alloc(); add_sectname("", name); s->name = nasm_malloc(1 + strlen(name)); strcpy(s->name, name); s->type = type; s->flags = flags; s->align = align; s->gsyms = NULL; if (nsects >= sectlen) sects = nasm_realloc(sects, (sectlen += SECT_DELTA) * sizeof(*sects)); sects[nsects++] = s; return nsects - 1; }
static void aoutb_init(FILE *fp, efunc errfunc, ldfunc ldef, evalfunc eval) { bsd = TRUE; aoutg_init (fp, errfunc, ldef, eval); is_pic = 0x00; /* may become 0x40 */ aout_gotpc_sect = seg_alloc(); ldef("..gotpc", aout_gotpc_sect+1, 0L, NULL, FALSE,FALSE,&of_aoutb,error); aout_gotoff_sect = seg_alloc(); ldef("..gotoff", aout_gotoff_sect+1, 0L,NULL,FALSE,FALSE,&of_aoutb,error); aout_got_sect = seg_alloc(); ldef("..got", aout_got_sect+1, 0L, NULL, FALSE,FALSE,&of_aoutb,error); aout_plt_sect = seg_alloc(); ldef("..plt", aout_plt_sect+1, 0L, NULL, FALSE,FALSE,&of_aoutb,error); aout_sym_sect = seg_alloc(); ldef("..sym", aout_sym_sect+1, 0L, NULL, FALSE,FALSE,&of_aoutb,error); }
static void coff_gen_init(FILE *fp, efunc errfunc) { coffp = fp; error = errfunc; sects = NULL; nsects = sectlen = 0; syms = saa_init((long)sizeof(struct Symbol)); nsyms = 0; bsym = raa_init(); symval = raa_init(); strs = saa_init(1L); strslen = 0; def_seg = seg_alloc(); }
static void as86_init(void) { stext.data = saa_init(1L); stext.datalen = 0L; stext.head = stext.last = NULL; stext.tail = &stext.head; sdata.data = saa_init(1L); sdata.datalen = 0L; sdata.head = sdata.last = NULL; sdata.tail = &sdata.head; bsslen = stext.len = stext.datalen = stext.size = sdata.len = sdata.datalen = sdata.size = 0; stext.index = seg_alloc(); sdata.index = seg_alloc(); bssindex = seg_alloc(); syms = saa_init((int32_t)sizeof(struct Symbol)); nsyms = 0; bsym = raa_init(); strs = saa_init(1L); strslen = 0; as86_add_string(as86_module); }
/** * Execute a segment-style transition effect. */ static void seg_do_transition (void) { seg_page_t *seg_final_page; seg_page_t *tmp; U8 iteration; /* Save pointer to the final page -- that which will ultimately be displayed. This is the source page for all updates during the transition. */ seg_final_page = seg_writable_page; /* Allocate a new page, kept in seg_writable_page, and initialize it with the current visible page. This is the destination page for all updates. */ seg_alloc (); memcpy (seg_writable_page, seg_visible_page, sizeof (seg_page_t)); /* Invoke the constructor */ if (seg_transition->init) seg_transition->init (); iteration = 0; while (seg_in_transition && iteration < 255) { /* Delay */ task_sleep (seg_transition->delay); /* Do a partial update */ seg_in_transition = seg_transition->update (seg_final_page, iteration); iteration++; /* Make the current destination page visible, and allocate a new one for the next iteration. This can be done by a swap of the old page and new page pointers */ tmp = seg_visible_page; seg_visible_page = seg_writable_page; seg_writable_page = tmp; } seg_transition = NULL; }
static void elf_init(FILE * fp, efunc errfunc, ldfunc ldef, evalfunc eval) { elffp = fp; error = errfunc; evaluate = eval; (void)ldef; /* placate optimisers */ sects = NULL; nsects = sectlen = 0; syms = saa_init((long)sizeof(struct Symbol)); nlocals = nglobs = 0; bsym = raa_init(); strs = saa_init(1L); saa_wbytes(strs, "\0", 1L); saa_wbytes(strs, elf_module, (long)(strlen(elf_module) + 1)); strslen = 2 + strlen(elf_module); shstrtab = NULL; shstrtablen = shstrtabsize = 0;; add_sectname("", ""); fwds = NULL; elf_gotpc_sect = seg_alloc(); ldef("..gotpc", elf_gotpc_sect + 1, 0L, NULL, FALSE, FALSE, &of_elf, error); elf_gotoff_sect = seg_alloc(); ldef("..gotoff", elf_gotoff_sect + 1, 0L, NULL, FALSE, FALSE, &of_elf, error); elf_got_sect = seg_alloc(); ldef("..got", elf_got_sect + 1, 0L, NULL, FALSE, FALSE, &of_elf, error); elf_plt_sect = seg_alloc(); ldef("..plt", elf_plt_sect + 1, 0L, NULL, FALSE, FALSE, &of_elf, error); elf_sym_sect = seg_alloc(); ldef("..sym", elf_sym_sect + 1, 0L, NULL, FALSE, FALSE, &of_elf, error); def_seg = seg_alloc(); }
/********************************* UTILITIES ************************/ static int expand_table(HTAB *hashp) { HHDR *hctl; SEGMENT old_seg,new_seg; long old_bucket, new_bucket; long new_segnum, new_segndx; long old_segnum, old_segndx; ELEMENT *chain; BUCKET_INDEX *old,*newbi; register BUCKET_INDEX chainIndex,nextIndex; #ifdef HASH_STATISTICS hash_expansions++; #endif hctl = hashp->hctl; new_bucket = ++hctl->max_bucket; old_bucket = (hctl->max_bucket & hctl->low_mask); new_segnum = new_bucket >> hctl->sshift; new_segndx = MOD ( new_bucket, hctl->ssize ); if ( new_segnum >= hctl->nsegs ) { /* Allocate new segment if necessary */ if (new_segnum >= hctl->dsize) { dir_realloc(hashp); } if (! (hashp->dir[new_segnum] = seg_alloc(hashp))) { return (0); } hctl->nsegs++; } if ( new_bucket > hctl->high_mask ) { /* Starting a new doubling */ hctl->low_mask = hctl->high_mask; hctl->high_mask = new_bucket | hctl->low_mask; } /* * Relocate records to the new bucket */ old_segnum = old_bucket >> hctl->sshift; old_segndx = MOD(old_bucket, hctl->ssize); old_seg = GET_SEG(hashp,old_segnum); new_seg = GET_SEG(hashp,new_segnum); old = &old_seg[old_segndx]; newbi = &new_seg[new_segndx]; for (chainIndex = *old; chainIndex != INVALID_INDEX; chainIndex = nextIndex){ chain = GET_BUCKET(hashp,chainIndex); nextIndex = chain->next; if ( call_hash(hashp, (char *)&(chain->key), hctl->keysize) == old_bucket ) { *old = chainIndex; old = &chain->next; } else { *newbi = chainIndex; newbi = &chain->next; } chain->next = INVALID_INDEX; } return (1); }
static int init_htab (HTAB *hashp, int nelem) { register SEG_OFFSET *segp; register int nbuckets; register int nsegs; int l2; HHDR *hctl; hctl = hashp->hctl; /* * Divide number of elements by the fill factor and determine a desired * number of buckets. Allocate space for the next greater power of * two number of buckets */ nelem = (nelem - 1) / hctl->ffactor + 1; l2 = my_log2(nelem); nbuckets = 1 << l2; hctl->max_bucket = hctl->low_mask = nbuckets - 1; hctl->high_mask = (nbuckets << 1) - 1; nsegs = (nbuckets - 1) / hctl->ssize + 1; nsegs = 1 << my_log2(nsegs); if ( nsegs > hctl->dsize ) { hctl->dsize = nsegs; } /* Use two low order bits of points ???? */ /* if ( !(hctl->mem = bit_alloc ( nbuckets )) ) return(-1); if ( !(hctl->mod = bit_alloc ( nbuckets )) ) return(-1); */ /* allocate a directory */ if (!(hashp->dir)) { hashp->dir = (SEG_OFFSET *)hashp->alloc(hctl->dsize * sizeof(SEG_OFFSET)); if (! hashp->dir) return(-1); } /* Allocate initial segments */ for (segp = hashp->dir; hctl->nsegs < nsegs; hctl->nsegs++, segp++ ) { *segp = seg_alloc(hashp); if ( *segp == (SEG_OFFSET)0 ) { hash_destroy(hashp); return (0); } } # if HASH_DEBUG fprintf(stderr, "%s\n%s%x\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%x\n%s%x\n%s%d\n%s%d\n", "init_htab:", "TABLE POINTER ", hashp, "BUCKET SIZE ", hctl->bsize, "BUCKET SHIFT ", hctl->bshift, "DIRECTORY SIZE ", hctl->dsize, "SEGMENT SIZE ", hctl->ssize, "SEGMENT SHIFT ", hctl->sshift, "FILL FACTOR ", hctl->ffactor, "MAX BUCKET ", hctl->max_bucket, "HIGH MASK ", hctl->high_mask, "LOW MASK ", hctl->low_mask, "NSEGS ", hctl->nsegs, "NKEYS ", hctl->nkeys ); # endif return (0); }
/* * segment registry */ static int32_t ieee_segment(char *name, int pass, int *bits) { /* * We call the label manager here to define a name for the new * segment, and when our _own_ label-definition stub gets * called in return, it should register the new segment name * using the pointer it gets passed. That way we save memory, * by sponging off the label manager. */ if (!name) { *bits = 16; if (!any_segs) return 0; return seghead->index; } else { struct ieeeSection *seg; int ieee_idx, attrs; bool rn_error; char *p; /* * Look for segment attributes. */ attrs = 0; while (*name == '.') name++; /* hack, but a documented one */ p = name; while (*p && !nasm_isspace(*p)) p++; if (*p) { *p++ = '\0'; while (*p && nasm_isspace(*p)) *p++ = '\0'; } while (*p) { while (*p && !nasm_isspace(*p)) p++; if (*p) { *p++ = '\0'; while (*p && nasm_isspace(*p)) *p++ = '\0'; } attrs++; } ieee_idx = 1; for (seg = seghead; seg; seg = seg->next) { ieee_idx++; if (!strcmp(seg->name, name)) { if (attrs > 0 && pass == 1) nasm_error(ERR_WARNING, "segment attributes specified on" " redeclaration of segment: ignoring"); if (seg->use32) *bits = 32; else *bits = 16; return seg->index; } } *segtail = seg = nasm_malloc(sizeof(*seg)); seg->next = NULL; segtail = &seg->next; seg->index = seg_alloc(); seg->ieee_index = ieee_idx; any_segs = true; seg->name = NULL; seg->currentpos = 0; seg->align = 1; /* default */ seg->use32 = *bits == 32; /* default to user spec */ seg->combine = CMB_PUBLIC; /* default */ seg->pubhead = NULL; seg->pubtail = &seg->pubhead; seg->data = NULL; seg->fptr = NULL; seg->lochead = NULL; seg->loctail = &seg->lochead; /* * Process the segment attributes. */ p = name; while (attrs--) { p += strlen(p); while (!*p) p++; /* * `p' contains a segment attribute. */ if (!nasm_stricmp(p, "private")) seg->combine = CMB_PRIVATE; else if (!nasm_stricmp(p, "public")) seg->combine = CMB_PUBLIC; else if (!nasm_stricmp(p, "common")) seg->combine = CMB_COMMON; else if (!nasm_stricmp(p, "use16")) seg->use32 = false; else if (!nasm_stricmp(p, "use32")) seg->use32 = true; else if (!nasm_strnicmp(p, "align=", 6)) { seg->align = readnum(p + 6, &rn_error); if (seg->align == 0) seg->align = 1; if (rn_error) { seg->align = 1; nasm_error(ERR_NONFATAL, "segment alignment should be" " numeric"); } switch ((int)seg->align) { case 1: /* BYTE */ case 2: /* WORD */ case 4: /* DWORD */ case 16: /* PARA */ case 256: /* PAGE */ case 8: case 32: case 64: case 128: break; default: nasm_error(ERR_NONFATAL, "invalid alignment value %d", seg->align); seg->align = 1; break; } } else if (!nasm_strnicmp(p, "absolute=", 9)) { seg->align = SEG_ABS + readnum(p + 9, &rn_error); if (rn_error) nasm_error(ERR_NONFATAL, "argument to `absolute' segment" " attribute should be numeric"); } } ieee_seg_needs_update = seg; if (seg->align >= SEG_ABS) define_label(name, NO_SEG, seg->align - SEG_ABS, NULL, false, false); else define_label(name, seg->index + 1, 0L, NULL, false, false); ieee_seg_needs_update = NULL; if (seg->use32) *bits = 32; else *bits = 16; return seg->index; } }
/* * Compute derived fields of hctl and build the initial directory/segment * arrays */ static bool init_htab(HTAB *hashp, long nelem) { HASHHDR *hctl = hashp->hctl; HASHSEGMENT *segp; int nbuckets; int nsegs; /* * initialize mutex if it's a partitioned table */ if (IS_PARTITIONED(hctl)) SpinLockInit(&hctl->mutex); /* * Divide number of elements by the fill factor to determine a desired * number of buckets. Allocate space for the next greater power of two * number of buckets */ nbuckets = next_pow2_int((nelem - 1) / hctl->ffactor + 1); /* * In a partitioned table, nbuckets must be at least equal to * num_partitions; were it less, keys with apparently different partition * numbers would map to the same bucket, breaking partition independence. * (Normally nbuckets will be much bigger; this is just a safety check.) */ while (nbuckets < hctl->num_partitions) nbuckets <<= 1; hctl->max_bucket = hctl->low_mask = nbuckets - 1; hctl->high_mask = (nbuckets << 1) - 1; /* * Figure number of directory segments needed, round up to a power of 2 */ nsegs = (nbuckets - 1) / hctl->ssize + 1; nsegs = next_pow2_int(nsegs); /* * Make sure directory is big enough. If pre-allocated directory is too * small, choke (caller screwed up). */ if (nsegs > hctl->dsize) { if (!(hashp->dir)) hctl->dsize = nsegs; else return false; } /* Allocate a directory */ if (!(hashp->dir)) { CurrentDynaHashCxt = hashp->hcxt; hashp->dir = (HASHSEGMENT *) hashp->alloc(hctl->dsize * sizeof(HASHSEGMENT)); if (!hashp->dir) return false; } /* Allocate initial segments */ for (segp = hashp->dir; hctl->nsegs < nsegs; hctl->nsegs++, segp++) { *segp = seg_alloc(hashp); if (*segp == NULL) return false; } /* Choose number of entries to allocate at a time */ hctl->nelem_alloc = choose_nelem_alloc(hctl->entrysize); #if HASH_DEBUG fprintf(stderr, "init_htab:\n%s%p\n%s%ld\n%s%ld\n%s%d\n%s%ld\n%s%u\n%s%x\n%s%x\n%s%ld\n%s%ld\n", "TABLE POINTER ", hashp, "DIRECTORY SIZE ", hctl->dsize, "SEGMENT SIZE ", hctl->ssize, "SEGMENT SHIFT ", hctl->sshift, "FILL FACTOR ", hctl->ffactor, "MAX BUCKET ", hctl->max_bucket, "HIGH MASK ", hctl->high_mask, "LOW MASK ", hctl->low_mask, "NSEGS ", hctl->nsegs, "NENTRIES ", hctl->nentries); #endif return true; }
/* * Expand the table by adding one more hash bucket. */ static bool expand_table(HTAB *hashp) { HASHHDR *hctl = hashp->hctl; HASHSEGMENT old_seg, new_seg; long old_bucket, new_bucket; long new_segnum, new_segndx; long old_segnum, old_segndx; HASHBUCKET *oldlink, *newlink; HASHBUCKET currElement, nextElement; Assert(!IS_PARTITIONED(hctl)); #ifdef HASH_STATISTICS hash_expansions++; #endif new_bucket = hctl->max_bucket + 1; new_segnum = new_bucket >> hashp->sshift; new_segndx = MOD(new_bucket, hashp->ssize); if (new_segnum >= hctl->nsegs) { /* Allocate new segment if necessary -- could fail if dir full */ if (new_segnum >= hctl->dsize) if (!dir_realloc(hashp)) return false; if (!(hashp->dir[new_segnum] = seg_alloc(hashp))) return false; hctl->nsegs++; } /* OK, we created a new bucket */ hctl->max_bucket++; /* * *Before* changing masks, find old bucket corresponding to same hash * values; values in that bucket may need to be relocated to new bucket. * Note that new_bucket is certainly larger than low_mask at this point, * so we can skip the first step of the regular hash mask calc. */ old_bucket = (new_bucket & hctl->low_mask); /* * If we crossed a power of 2, readjust masks. */ if ((uint32) new_bucket > hctl->high_mask) { hctl->low_mask = hctl->high_mask; hctl->high_mask = (uint32) new_bucket | hctl->low_mask; } /* * Relocate records to the new bucket. NOTE: because of the way the hash * masking is done in calc_bucket, only one old bucket can need to be * split at this point. With a different way of reducing the hash value, * that might not be true! */ old_segnum = old_bucket >> hashp->sshift; old_segndx = MOD(old_bucket, hashp->ssize); old_seg = hashp->dir[old_segnum]; new_seg = hashp->dir[new_segnum]; oldlink = &old_seg[old_segndx]; newlink = &new_seg[new_segndx]; for (currElement = *oldlink; currElement != NULL; currElement = nextElement) { nextElement = currElement->link; if ((long) calc_bucket(hctl, currElement->hashvalue) == old_bucket) { *oldlink = currElement; oldlink = &currElement->link; } else { *newlink = currElement; newlink = &currElement->link; } } /* don't forget to terminate the rebuilt hash chains... */ *oldlink = NULL; *newlink = NULL; return true; }
/* * The "normal" argument decides if we should update the local segment * base name or not. */ void define_label(const char *label, int32_t segment, int64_t offset, bool normal) { union label *lptr; bool created, changed; int64_t size; int64_t lastdef; /* * The backend may invoke this before pass 1, so treat that as * a special "pass". */ const int64_t lpass = passn + 1; /* * Phase errors here can be one of two types: a new label appears, * or the offset changes. Increment global_offset_changed when that * happens, to tell the assembler core to make another pass. */ lptr = find_label(label, true, &created); lastdef = lptr->defn.defined; if (segment) { /* We are actually defining this label */ if (lptr->defn.type == LBL_EXTERN) { /* auto-promote EXTERN to GLOBAL */ lptr->defn.type = LBL_GLOBAL; lastdef = 0; /* We are "re-creating" this label */ } } else { /* It's a pseudo-segment (extern, common) */ segment = lptr->defn.segment ? lptr->defn.segment : seg_alloc(); } if (lastdef || lptr->defn.type == LBL_BACKEND) { /* * We have seen this on at least one previous pass, or * potentially earlier in this same pass (in which case we * will probably error out further down.) */ mangle_label_name(lptr); handle_herelabel(lptr, &segment, &offset); } if (ismagic(label) && lptr->defn.type == LBL_LOCAL) lptr->defn.type = LBL_SPECIAL; if (set_prevlabel(label) && normal) prevlabel = lptr->defn.label; if (lptr->defn.type == LBL_COMMON) { size = offset; offset = 0; } else { size = 0; /* This is a hack... */ } changed = created || !lastdef || lptr->defn.segment != segment || lptr->defn.offset != offset || lptr->defn.size != size; global_offset_changed += changed; if (lastdef == lpass) { int32_t saved_line = 0; const char *saved_fname = NULL; int noteflags; /* * Defined elsewhere in the program, seen in this pass. */ if (changed) { nasm_error(ERR_NONFATAL, "label `%s' inconsistently redefined", lptr->defn.label); noteflags = ERR_NOTE|ERR_HERE; } else { nasm_error(ERR_WARNING|WARN_LABEL_REDEF|ERR_PASS2, "label `%s' redefined to an identical value", lptr->defn.label); noteflags = ERR_NOTE|ERR_HERE|WARN_LABEL_REDEF|ERR_PASS2; } src_get(&saved_line, &saved_fname); src_set(lptr->defn.def_line, lptr->defn.def_file); nasm_error(noteflags, "label `%s' originally defined", lptr->defn.label); src_set(saved_line, saved_fname); } else if (changed && pass0 > 1 && lptr->defn.type != LBL_SPECIAL) { /* * WARN_LABEL_LATE defaults to an error, as this should never * actually happen. Just in case this is a backwards * compatibility problem, still make it a warning so that the * user can suppress or demote it. * * As a special case, LBL_SPECIAL symbols are allowed to be changed * even during the last pass. */ nasm_error(ERR_WARNING|WARN_LABEL_LATE, "label `%s' %s during code generation", lptr->defn.label, created ? "defined" : "changed"); } lptr->defn.segment = segment; lptr->defn.offset = offset; lptr->defn.size = size; lptr->defn.defined = lpass; if (changed || lastdef != lpass) src_get(&lptr->defn.def_line, &lptr->defn.def_file); if (lastdef != lpass) out_symdef(lptr); }
/** * Allocate a clean page for drawing. */ void seg_alloc_clean (void) { seg_alloc (); seg_erase (); }
static bool init_htab(HTAB *hashp, long nelem) { HASHHDR *hctl = hashp->hctl; HASHSEGMENT *segp; int nbuckets; int nsegs; /* * Divide number of elements by the fill factor to determine a desired * number of buckets. Allocate space for the next greater power of * two number of buckets */ nelem = (nelem - 1) / hctl->ffactor + 1; nbuckets = 1 << my_log2(nelem); hctl->max_bucket = hctl->low_mask = nbuckets - 1; hctl->high_mask = (nbuckets << 1) - 1; /* * Figure number of directory segments needed, round up to a power of * 2 */ nsegs = (nbuckets - 1) / hctl->ssize + 1; nsegs = 1 << my_log2(nsegs); /* * Make sure directory is big enough. If pre-allocated directory is * too small, choke (caller screwed up). */ if (nsegs > hctl->dsize) { if (!(hashp->dir)) hctl->dsize = nsegs; else return false; } /* Allocate a directory */ if (!(hashp->dir)) { CurrentDynaHashCxt = hashp->hcxt; hashp->dir = (HASHSEGMENT *) hashp->alloc(hctl->dsize * sizeof(HASHSEGMENT)); if (!hashp->dir) return false; } /* Allocate initial segments */ for (segp = hashp->dir; hctl->nsegs < nsegs; hctl->nsegs++, segp++) { *segp = seg_alloc(hashp); if (*segp == NULL) return false; } #if HASH_DEBUG fprintf(stderr, "init_htab:\n%s%p\n%s%ld\n%s%ld\n%s%d\n%s%ld\n%s%u\n%s%x\n%s%x\n%s%ld\n%s%ld\n", "TABLE POINTER ", hashp, "DIRECTORY SIZE ", hctl->dsize, "SEGMENT SIZE ", hctl->ssize, "SEGMENT SHIFT ", hctl->sshift, "FILL FACTOR ", hctl->ffactor, "MAX BUCKET ", hctl->max_bucket, "HIGH MASK ", hctl->high_mask, "LOW MASK ", hctl->low_mask, "NSEGS ", hctl->nsegs, "NENTRIES ", hctl->nentries); #endif return true; }