Example #1
0
/* Split plnode along plane. We know that plane really intersects
 * plnode->poly. We also know that plnode->poly is planar and convex.
 *
 * Given this assumptions we know that plnode->poly has to be split
 * into exactly two pieces. 
 */
static inline void SplitPolyNode(PolyListNode *plnode,
				 PolyListNode **front, PolyListNode **back,
				 EdgeIntersection edges[2],
				 struct obstack *scratch)
{
  const void **tagged_app = plnode->tagged_app;
  Poly *poly = plnode->poly, savedp;
  VARARRAY(savedv, Vertex *, poly->n_vertices);
  Vertex *v0, *v1, **vpos;
  int istart[2], iend[2], i, nv[2];
  Vertex *vstart[2], *vend[2];

#if BSPTREE_STATS
  ++n_tree_polys;
#endif

  vstart[0] = vstart[1] = vend[0] = vend[1] = NULL;
  istart[0] = istart[1] = iend[0] = iend[1] = -1;

  /* first point of intersection */
  if (fzero(edges[0].scp[0])) {
    v0 = poly->v[edges[0].v[0]];
    if (fpos(edges[0].scp[1])) {
      istart[0] = edges[0].v[0];
      iend[1]   = edges[0].v[0];
    } else {
      istart[1] = edges[0].v[0];
      iend[0]   = edges[0].v[0]; 
    }
  } else if (fzero(edges[0].scp[1])) {
    v0 = poly->v[edges[0].v[1]];
    if (fpos(edges[0].scp[0])) {
      istart[1] = edges[0].v[1];
      iend[0]   = edges[0].v[1];
    } else {
      istart[0] = edges[0].v[1];
      iend[1]   = edges[0].v[1]; 
    }
  } else {
    HPt3Coord mu0, mu1;
    Vertex *V0 = poly->v[edges[0].v[0]];
    Vertex *V1 = poly->v[edges[0].v[1]];
    
    v0 = obstack_alloc(scratch, sizeof(Vertex));
    mu0 = edges[0].scp[1]/(edges[0].scp[1]-edges[0].scp[0]);
#if 0
    mu1 = edges[0].scp[0]/(edges[0].scp[0]-edges[0].scp[1]);
#else
    mu1 = 1.0 - mu0;
#endif
    /* Use denormalized variant; otherwise textures may come out wrong
     * because the homogeneous divisor is used for perspective
     * corrections.
     */
    if (poly->flags & VERT_ST) {
      v0->st.s = mu0 * V0->st.s + mu1 * V1->st.s;
      v0->st.t = mu0 * V0->st.t + mu1 * V1->st.t;
      HPt3LinSumDenorm(mu0, &V0->pt, mu1, &V1->pt, &v0->pt);
    } else {
      HPt3LinSum(mu0, &V0->pt, mu1, &V1->pt, &v0->pt);
    } 
    if (!finite(v0->pt.x + v0->pt.y + v0->pt.z)){
      abort();
    }
    if (poly->flags & VERT_C) {
      CoLinSum(mu0, &V0->vcol, mu1, &V1->vcol, &v0->vcol);
    }
    if (true || (poly->flags & VERT_N)) {
      /* The averaged vertex normals do not have an orientation, so
       * try to orient them w.r.t. the polygon normal before computing
       * the linear combination.
       */
      if (Pt3Dot(&V0->vn, &poly->pn)*Pt3Dot(&V1->vn, &poly->pn) < 0) {
	Pt3Comb(-mu0, &V0->vn, mu1, &V1->vn, &v0->vn);
      } else {
	Pt3Comb(mu0, &V0->vn, mu1, &V1->vn, &v0->vn);
      }
      Pt3Unit(&v0->vn);
    }

    if (fpos(edges[0].scp[0])) {
      vstart[1] = vend[0] = v0;
      istart[1] = edges[0].v[1];
      iend[0]   = edges[0].v[0];
    } else {
      vstart[0] = vend[1] = v0;
      istart[0] = edges[0].v[1];
      iend[1]   = edges[0].v[0];
    }
  }

  /* second point of intersection */
  if (fzero(edges[1].scp[0])) {
    v1 = poly->v[edges[1].v[0]];
    if (fpos(edges[1].scp[1])) {
      istart[0] = edges[1].v[0];
      iend[1]   = edges[1].v[0];
    } else {
      istart[1] = edges[1].v[0];
      iend[0]   = edges[1].v[0]; 
    }
  } else if (fzero(edges[1].scp[1])) {
    v1 = poly->v[edges[1].v[1]];
    if (fpos(edges[1].scp[0])) {
      istart[1] = edges[1].v[1];
      iend[0]   = edges[1].v[1];
    } else {
      istart[0] = edges[1].v[1];
      iend[1]   = edges[1].v[1]; 
    }
  } else {
    HPt3Coord mu0, mu1;
    Vertex *V0 = poly->v[edges[1].v[0]];
    Vertex *V1 = poly->v[edges[1].v[1]];
    
    v1 = obstack_alloc(scratch, sizeof(Vertex));
    mu0 = edges[1].scp[1]/(edges[1].scp[1]-edges[1].scp[0]);
#if 0
    mu1 = edges[1].scp[0]/(edges[1].scp[0]-edges[1].scp[1]);
#else
    mu1 = 1.0 - mu0;
#endif
    if (poly->flags & VERT_ST) {
      v1->st.s = mu0 * V0->st.s + mu1 * V1->st.s;
      v1->st.t = mu0 * V0->st.t + mu1 * V1->st.t;
      HPt3LinSumDenorm(mu0, &V0->pt, mu1, &V1->pt, &v1->pt);
    } else {
      HPt3LinSum(mu0, &V0->pt, mu1, &V1->pt, &v1->pt);
    }
    if (!finite(v1->pt.x + v1->pt.y + v1->pt.z))
      abort();
    if (poly->flags & VERT_C) {
      CoLinSum(mu0, &V0->vcol, mu1, &V1->vcol, &v1->vcol);
    }
    if (true || (poly->flags & VERT_N)) {
      if (Pt3Dot(&V0->vn, &poly->pn)*Pt3Dot(&V1->vn, &poly->pn) < 0) {
	Pt3Comb(-mu0, &V0->vn, mu1, &V1->vn, &v1->vn);
      } else {
	Pt3Comb(mu0, &V0->vn, mu1, &V1->vn, &v1->vn);
      }
      Pt3Unit(&v1->vn);
    }

    if (fpos(edges[1].scp[0])) {
      vstart[1] = vend[0] = v1;
      istart[1] = edges[1].v[1];
      iend[0]   = edges[1].v[0];
    } else {
      vstart[0] = vend[1] = v1;
      istart[0] = edges[1].v[1];
      iend[1]   = edges[1].v[0];
    }
  }

  ListPush(*front, plnode);
  ListPush(*back, new_poly_list_node(tagged_app, scratch));

  if ((poly->flags & POLY_NONFLAT)) {
    if (!(*front)->pn) {
      /* Compute the normal on the parent element to avoid numerical
       * instabilities on increasingly degenerated polygons.
       */
      (*front)->pn = obstack_alloc(scratch, sizeof(Point3));
      PolyNormal(poly, (*front)->pn,
		 true /* 4d */, false /* evert */, NULL, NULL);
    }
    (*back)->pn = (*front)->pn;
  }

  for (i = 0; i < 2; i++) {
    nv[i] = iend[i] - istart[i] + 1;
    if (nv[i] < 0) {
      nv[i] += poly->n_vertices;
    }
    nv[i] += (vstart[i] != NULL) + (vend[i] != NULL);
  }

  if (poly->flags & POLY_SCRATCH) {

    savedp = *poly;
    memcpy(savedv, poly->v, poly->n_vertices*sizeof(Vertex *));

    if (nv[0] <= poly->n_vertices) {
      poly->n_vertices = nv[0];
      (*front)->poly = poly;
      (*back)->poly  = new_poly(nv[1], NULL, scratch);
    } else {
      if (nv[1] > poly->n_vertices) {
	abort();
      }
      poly->n_vertices = nv[1];
      (*back)->poly = poly;
      (*front)->poly  = new_poly(nv[0], NULL, scratch);
    }
    
    /* Attention: gcc had problems with this code snippet with
     * -fstrict-aliasing, the "#if 1" stuff seems to work. In the
     * "#else" version gcc somehow lost the "savedp.v = savedv"
     * assignment. I think this is a compiler bug.
     */
    poly = &savedp;
#if 1
    poly->v = savedv;
#else
    savedp.v = savedv;
#endif
  } else {
    (*front)->poly  = new_poly(nv[0], NULL, scratch);
    (*back)->poly  = new_poly(nv[1], NULL, scratch);
  }

  for (i = 0; i < 2; i++) {
    PolyListNode *half = (i == 0) ? *front : *back;
    int j;

    vpos = half->poly->v;
    if (vstart[i] != NULL) {
      *vpos++ = vstart[i];
    }
    if (istart[i] <= iend[i]) {
      for (j = istart[i]; j <= iend[i] && j < poly->n_vertices; j++) {
	*vpos++ = poly->v[j];
      }
    } else {
      for (j = istart[i]; j < poly->n_vertices; j++) {
	*vpos++ = poly->v[j];
      }
      for (j = 0; j <= iend[i]; j++) {
	*vpos++ = poly->v[j];
      }
    }
    if (vend[i] != NULL) {
      *vpos++ = vend[i];
    }
    half->poly->pcol  = poly->pcol;
    half->poly->pn    = poly->pn;
    half->poly->flags = poly->flags|POLY_SCRATCH;

    check_poly(half->poly);
  }
}
Example #2
0
/* Compute the shift for each trie node, as well as the delta
   table and next cache for the given keyword set. */
const char *
kwsprep (kwset_t kws)
{
  struct kwset *kwset;
  int i;
  struct trie *curr;
  char const *trans;
  unsigned char delta[NCHAR];

  kwset = (struct kwset *) kws;

  /* Initial values for the delta table; will be changed later.  The
     delta entry for a given character is the smallest depth of any
     node at which an outgoing edge is labeled by that character. */
  memset(delta, kwset->mind < UCHAR_MAX ? kwset->mind : UCHAR_MAX, NCHAR);

  /* Check if we can use the simple boyer-moore algorithm, instead
     of the hairy commentz-walter algorithm. */
  if (kwset->words == 1 && kwset->trans == NULL)
    {
      char c;

      /* Looking for just one string.  Extract it from the trie. */
      kwset->target = obstack_alloc(&kwset->obstack, kwset->mind);
      if (!kwset->target)
        return _("memory exhausted");
      for (i = kwset->mind - 1, curr = kwset->trie; i >= 0; --i)
        {
          kwset->target[i] = curr->links->label;
          curr = curr->links->trie;
        }
      /* Build the Boyer Moore delta.  Boy that's easy compared to CW. */
      for (i = 0; i < kwset->mind; ++i)
        delta[U(kwset->target[i])] = kwset->mind - (i + 1);
      /* Find the minimal delta2 shift that we might make after
         a backwards match has failed. */
      c = kwset->target[kwset->mind - 1];
      for (i = kwset->mind - 2; i >= 0; --i)
        if (kwset->target[i] == c)
          break;
      kwset->mind2 = kwset->mind - (i + 1);
    }
  else
    {
      struct trie *fail;
      struct trie *last, *next[NCHAR];

      /* Traverse the nodes of the trie in level order, simultaneously
         computing the delta table, failure function, and shift function. */
      for (curr = last = kwset->trie; curr; curr = curr->next)
        {
          /* Enqueue the immediate descendants in the level order queue. */
          enqueue(curr->links, &last);

          curr->shift = kwset->mind;
          curr->maxshift = kwset->mind;

          /* Update the delta table for the descendants of this node. */
          treedelta(curr->links, curr->depth, delta);

          /* Compute the failure function for the descendants of this node.  */
          treefails(curr->links, curr->fail, kwset->trie);

          /* Update the shifts at each node in the current node's chain
             of fails back to the root. */
          for (fail = curr->fail; fail; fail = fail->fail)
            {
              /* If the current node has some outgoing edge that the fail
                 doesn't, then the shift at the fail should be no larger
                 than the difference of their depths. */
              if (!hasevery(fail->links, curr->links))
                if (curr->depth - fail->depth < fail->shift)
                  fail->shift = curr->depth - fail->depth;

              /* If the current node is accepting then the shift at the
                 fail and its descendants should be no larger than the
                 difference of their depths. */
              if (curr->accepting && fail->maxshift > curr->depth - fail->depth)
                fail->maxshift = curr->depth - fail->depth;
            }
        }

      /* Traverse the trie in level order again, fixing up all nodes whose
         shift exceeds their inherited maxshift. */
      for (curr = kwset->trie->next; curr; curr = curr->next)
        {
          if (curr->maxshift > curr->parent->maxshift)
            curr->maxshift = curr->parent->maxshift;
          if (curr->shift > curr->maxshift)
            curr->shift = curr->maxshift;
        }

      /* Create a vector, indexed by character code, of the outgoing links
         from the root node. */
      for (i = 0; i < NCHAR; ++i)
        next[i] = NULL;
      treenext(kwset->trie->links, next);

      if ((trans = kwset->trans) != NULL)
        for (i = 0; i < NCHAR; ++i)
          kwset->next[i] = next[U(trans[i])];
      else
        memcpy(kwset->next, next, NCHAR * sizeof(struct trie *));
    }

  /* Fix things up for any translation table. */
  if ((trans = kwset->trans) != NULL)
    for (i = 0; i < NCHAR; ++i)
      kwset->delta[i] = delta[U(trans[i])];
  else
    memcpy(kwset->delta, delta, NCHAR);

  return NULL;
}
Example #3
0
/*
 * section_new() (for non-zerofill sections) switches to a new section, creating
 * it if needed, and creates a fresh fragment.  If it is the current section
 * nothing happens except checks to make sure the type, attributes and
 * sizeof_stub are the same.  The segment and section names will be trimed to
 * fit in the section structure and is the responsiblity of the caller to
 * report errors if they don't.  For zerofill sections only the struct frchain
 * for the section is returned after possibly being created (these section are
 * never made the current section and no frags are ever touched).
 *
 * Globals on input:
 *    frchain_now points to the (possibly none) struct frchain for the current
 *    section.
 *    frag_now points at an incomplete frag for current section.
 *    If frag_now == NULL, then there is no old, incomplete frag, so the old
 *    frag is not closed off.
 *
 * Globals on output:
 *    frchain_now points to the (possibly new) struct frchain for this section.
 *    frchain_root updated if needed (for the first section created).
 *    frag_now is set to the last (possibly new) frag in the section.
 */
frchainS *
section_new(
char *segname,
char *sectname,
unsigned long type,
unsigned long attributes,
unsigned long sizeof_stub)
{
    frchainS *frcP;
    frchainS **lastPP;
    unsigned long last_nsect;

	if(frags.chunk_size == 0)
	    /*
	     * This line is use instead of:
	     * 	 obstack_begin(&frags, 5000);
	     * which the only difference is that frags are allocated on 4 byte
	     * boundaries instead of the default.  The problem with the default
	     * is that on some RISC machines the obstack uses 8 (the alignment
	     * of a double after a char in a struct) and then the common use of:
	     * 	frag_now->fr_fix = obstack_next_free(&frags) -
	     *			   frag_now->fr_literal;
	     * can get an extra 4 bytes that are not in the frag because of the
	     * 8 byte alignment where only 4 byte alignment for frags are
	     * needed.
	     */
	    _obstack_begin(&frags, 5000, 4,
			   obstack_chunk_alloc, obstack_chunk_free);

	/*
	 * Determine if this section has been seen.
	 */
	last_nsect = 0;
	for(frcP = *(lastPP = &frchain_root);
	    frcP != NULL;
	    frcP = *(lastPP = &frcP->frch_next)){
	    if(strncmp(frcP->frch_section.segname, segname,
		       sizeof(frcP->frch_section.segname)) == 0 &&
	       strncmp(frcP->frch_section.sectname, sectname,
		       sizeof(frcP->frch_section.sectname)) == 0)
		break;
	    last_nsect = frcP->frch_nsect;
	}

	/*
	 * If this section has been seen make sure it's type and attributes
	 * for this call are the same as when the section was created.
	 */
	if(frcP != NULL){
	    if((frcP->frch_section.flags & SECTION_TYPE) != type){
		as_warn("section type does not match previous section type");
	    }
	    if(type == S_SYMBOL_STUBS &&
	       frcP->frch_section.reserved2 != sizeof_stub){
		as_warn("section stub size does not match previous section "
			"stub size");
	    }
	    frcP->frch_section.flags |= attributes;
	}

	/*
	 * If the current section is the same as for this call there is nothing
	 * more to do.
	 */
	if(frcP != NULL && (frchain_now == frcP || type == S_ZEROFILL)){
	    return(frcP);
	}

	/*
	 * For non-zerofill sections it will be made the current section so deal
	 * with the current frag.
	 */
	if(type != S_ZEROFILL){
	    /*
	     * If there is any current frag in the old section close it off.
	     */
	    if(frag_now != NULL){
		frag_now->fr_fix = obstack_next_free(&frags) -
				   frag_now->fr_literal;
		frag_wane(frag_now);
	    }

	    /*
	     * We must do the obstack_finish(), so the next object we put on
	     * obstack frags will not appear to start at the fr_literal of the
	     * current frag.  Also, it ensures that the next object will begin
	     * on a address that is aligned correctly for the engine that runs
	     * the assembler.
	     */
	    obstack_finish(&frags);
	}

	/*
	 * If this section exists since it is not the current section switch to
	 * it by making it the current chain and create a new frag in it.
	 */
	if(frcP != NULL){
	    /*
	     * For a zerofill section no frags are created here and since it
	     * exist just return a pointer to the section.
	     */
	    if((frcP->frch_section.flags & SECTION_TYPE) == S_ZEROFILL){
		return(frcP);
	    }
	    else{
		/*
		 * Make this section the current section.
		 */
		frchain_now = frcP;

		/*
		 * Make a fresh frag for the section.
		 */
		frag_now = (fragS *)obstack_alloc(&frags, SIZEOF_STRUCT_FRAG);
		memset(frag_now, '\0', SIZEOF_STRUCT_FRAG);
		frag_now->fr_next = NULL;

		/*
		 * Append the new frag to the existing frchain.
		 */
		frchain_now->frch_last->fr_next = frag_now;
		frchain_now->frch_last = frag_now;
	    }
	}
	else{
	    /*
	     * This section does not exist so create a new frchainS struct fill
	     * it in, link it to the chain
	     */
	    frcP = (frchainS *)xmalloc(sizeof(frchainS));
	    memset(frcP, '\0', sizeof(frchainS));
	    strncpy(frcP->frch_section.segname, segname,
		    sizeof(frcP->frch_section.segname));
	    strncpy(frcP->frch_section.sectname, sectname,
		    sizeof(frcP->frch_section.sectname));
	    frcP->frch_section.flags = attributes | type;
	    frcP->frch_section.reserved2 = sizeof_stub;

	    frcP->frch_nsect = last_nsect + 1;

	    *lastPP = frcP;

	    /*
	     * For zerofill sections no frag is created here so just return.
	     * For non-zerofill section create the sections new frag and
	     * make the section the current chain.
	     */
	    if(type == S_ZEROFILL){
		return(frcP);
	    }
	    else{
		/*
		 * Make a fresh frag for the new section.
		 */
		frag_now = (fragS *)obstack_alloc(&frags, SIZEOF_STRUCT_FRAG);
		memset(frag_now, '\0', SIZEOF_STRUCT_FRAG);
		frag_now->fr_next = NULL;

		/*
		 * Append the new frag to new frchain.
		 */
		frcP->frch_root = frag_now;
		frcP->frch_last = frag_now;

		/*
		 * Make this section the current section.
		 */
		frchain_now = frcP;
	    }
	}
	return(frchain_now);
}
Example #4
0
static struct type *
java_link_class_type (struct gdbarch *gdbarch,
                      struct type *type, struct value *clas)
{
    struct value *temp;
    const char *unqualified_name;
    const char *name = TYPE_TAG_NAME (type);
    int ninterfaces, nfields, nmethods;
    int type_is_object = 0;
    struct fn_field *fn_fields;
    struct fn_fieldlist *fn_fieldlists;
    struct value *fields;
    struct value *methods;
    struct value *method = NULL;
    struct value *field = NULL;
    int i, j;
    struct objfile *objfile = get_dynamics_objfile (gdbarch);
    struct type *tsuper;

    gdb_assert (name != NULL);
    unqualified_name = strrchr (name, '.');
    if (unqualified_name == NULL)
        unqualified_name = name;

    temp = clas;
    temp = value_struct_elt (&temp, NULL, "superclass", NULL, "structure");
    if (strcmp (name, "java.lang.Object") == 0)
    {
        tsuper = get_java_object_type ();
        if (tsuper && TYPE_CODE (tsuper) == TYPE_CODE_PTR)
            tsuper = TYPE_TARGET_TYPE (tsuper);
        type_is_object = 1;
    }
    else
        tsuper = type_from_class (gdbarch, temp);

#if 1
    ninterfaces = 0;
#else
    temp = clas;
    ninterfaces = value_as_long (value_struct_elt (&temp, NULL, "interface_len",
                                 NULL, "structure"));
#endif
    TYPE_N_BASECLASSES (type) = (tsuper == NULL ? 0 : 1) + ninterfaces;
    temp = clas;
    nfields = value_as_long (value_struct_elt (&temp, NULL, "field_count",
                             NULL, "structure"));
    nfields += TYPE_N_BASECLASSES (type);
    nfields++;			/* Add one for dummy "class" field.  */
    TYPE_NFIELDS (type) = nfields;
    TYPE_FIELDS (type) = (struct field *)
                         TYPE_ALLOC (type, sizeof (struct field) * nfields);

    memset (TYPE_FIELDS (type), 0, sizeof (struct field) * nfields);

    TYPE_FIELD_PRIVATE_BITS (type) =
        (B_TYPE *) TYPE_ALLOC (type, B_BYTES (nfields));
    B_CLRALL (TYPE_FIELD_PRIVATE_BITS (type), nfields);

    TYPE_FIELD_PROTECTED_BITS (type) =
        (B_TYPE *) TYPE_ALLOC (type, B_BYTES (nfields));
    B_CLRALL (TYPE_FIELD_PROTECTED_BITS (type), nfields);

    TYPE_FIELD_IGNORE_BITS (type) =
        (B_TYPE *) TYPE_ALLOC (type, B_BYTES (nfields));
    B_CLRALL (TYPE_FIELD_IGNORE_BITS (type), nfields);

    TYPE_FIELD_VIRTUAL_BITS (type) = (B_TYPE *)
                                     TYPE_ALLOC (type, B_BYTES (TYPE_N_BASECLASSES (type)));
    B_CLRALL (TYPE_FIELD_VIRTUAL_BITS (type), TYPE_N_BASECLASSES (type));

    if (tsuper != NULL)
    {
        TYPE_BASECLASS (type, 0) = tsuper;
        if (type_is_object)
            SET_TYPE_FIELD_PRIVATE (type, 0);
    }

    i = strlen (name);
    if (i > 2 && name[i - 1] == ']' && tsuper != NULL)
    {
        /* FIXME */
        TYPE_LENGTH (type) = TYPE_LENGTH (tsuper) + 4;   /* size with "length" */
    }
    else
    {
        temp = clas;
        temp = value_struct_elt (&temp, NULL, "size_in_bytes",
                                 NULL, "structure");
        TYPE_LENGTH (type) = value_as_long (temp);
    }

    fields = NULL;
    nfields--;			/* First set up dummy "class" field.  */
    SET_FIELD_PHYSADDR (TYPE_FIELD (type, nfields), value_address (clas));
    TYPE_FIELD_NAME (type, nfields) = "class";
    TYPE_FIELD_TYPE (type, nfields) = value_type (clas);
    SET_TYPE_FIELD_PRIVATE (type, nfields);

    for (i = TYPE_N_BASECLASSES (type); i < nfields; i++)
    {
        int accflags;
        int boffset;

        if (fields == NULL)
        {
            temp = clas;
            fields = value_struct_elt (&temp, NULL, "fields", NULL, "structure");
            field = value_ind (fields);
        }
        else
        {   /* Re-use field value for next field.  */
            CORE_ADDR addr
                = value_address (field) + TYPE_LENGTH (value_type (field));

            set_value_address (field, addr);
            set_value_lazy (field, 1);
        }
        temp = field;
        temp = value_struct_elt (&temp, NULL, "name", NULL, "structure");
        TYPE_FIELD_NAME (type, i) =
            get_java_utf8_name (&objfile->objfile_obstack, temp);
        temp = field;
        accflags = value_as_long (value_struct_elt (&temp, NULL, "accflags",
                                  NULL, "structure"));
        temp = field;
        temp = value_struct_elt (&temp, NULL, "info", NULL, "structure");
        boffset = value_as_long (value_struct_elt (&temp, NULL, "boffset",
                                 NULL, "structure"));
        if (accflags & 0x0001)	/* public access */
        {
            /* ??? */
        }
        if (accflags & 0x0002)	/* private access */
        {
            SET_TYPE_FIELD_PRIVATE (type, i);
        }
        if (accflags & 0x0004)	/* protected access */
        {
            SET_TYPE_FIELD_PROTECTED (type, i);
        }
        if (accflags & 0x0008)	/* ACC_STATIC */
            SET_FIELD_PHYSADDR (TYPE_FIELD (type, i), boffset);
        else
            SET_FIELD_BITPOS (TYPE_FIELD (type, i), 8 * boffset);
        if (accflags & 0x8000)	/* FIELD_UNRESOLVED_FLAG */
        {
            TYPE_FIELD_TYPE (type, i) = get_java_object_type ();	/* FIXME */
        }
        else
        {
            struct type *ftype;

            temp = field;
            temp = value_struct_elt (&temp, NULL, "type", NULL, "structure");
            ftype = type_from_class (gdbarch, temp);
            if (TYPE_CODE (ftype) == TYPE_CODE_STRUCT)
                ftype = lookup_pointer_type (ftype);
            TYPE_FIELD_TYPE (type, i) = ftype;
        }
    }

    temp = clas;
    nmethods = value_as_long (value_struct_elt (&temp, NULL, "method_count",
                              NULL, "structure"));
    j = nmethods * sizeof (struct fn_field);
    fn_fields = (struct fn_field *)
                obstack_alloc (&objfile->objfile_obstack, j);
    memset (fn_fields, 0, j);
    fn_fieldlists = (struct fn_fieldlist *)
                    alloca (nmethods * sizeof (struct fn_fieldlist));

    methods = NULL;
    for (i = 0; i < nmethods; i++)
    {
        const char *mname;
        int k;

        if (methods == NULL)
        {
            temp = clas;
            methods = value_struct_elt (&temp, NULL, "methods",
                                        NULL, "structure");
            method = value_ind (methods);
        }
        else
        {   /* Re-use method value for next method.  */
            CORE_ADDR addr
                = value_address (method) + TYPE_LENGTH (value_type (method));

            set_value_address (method, addr);
            set_value_lazy (method, 1);
        }

        /* Get method name.  */
        temp = method;
        temp = value_struct_elt (&temp, NULL, "name", NULL, "structure");
        mname = get_java_utf8_name (&objfile->objfile_obstack, temp);
        if (strcmp (mname, "<init>") == 0)
            mname = unqualified_name;

        /* Check for an existing method with the same name.
         * This makes building the fn_fieldslists an O(nmethods**2)
         * operation.  That could be using hashing, but I doubt it
         * is worth it.  Note that we do maintain the order of methods
         * in the inferior's Method table (as long as that is grouped
         * by method name), which I think is desirable.  --PB */
        for (k = 0, j = TYPE_NFN_FIELDS (type);;)
        {
            if (--j < 0)
            {   /* No match - new method name.  */
                j = TYPE_NFN_FIELDS (type)++;
                fn_fieldlists[j].name = mname;
                fn_fieldlists[j].length = 1;
                fn_fieldlists[j].fn_fields = &fn_fields[i];
                k = i;
                break;
            }
            if (strcmp (mname, fn_fieldlists[j].name) == 0)
            {   /* Found an existing method with the same name.  */
                int l;

                if (mname != unqualified_name)
                    obstack_free (&objfile->objfile_obstack, mname);
                mname = fn_fieldlists[j].name;
                fn_fieldlists[j].length++;
                k = i - k;	/* Index of new slot.  */
                /* Shift intervening fn_fields (between k and i) down.  */
                for (l = i; l > k; l--)
                    fn_fields[l] = fn_fields[l - 1];
                for (l = TYPE_NFN_FIELDS (type); --l > j;)
                    fn_fieldlists[l].fn_fields++;
                break;
            }
            k += fn_fieldlists[j].length;
        }
        fn_fields[k].physname = "";
        fn_fields[k].is_stub = 1;
        /* FIXME */
        fn_fields[k].type = lookup_function_type
                            (builtin_java_type (gdbarch)->builtin_void);
        TYPE_CODE (fn_fields[k].type) = TYPE_CODE_METHOD;
    }

    j = TYPE_NFN_FIELDS (type) * sizeof (struct fn_fieldlist);
    TYPE_FN_FIELDLISTS (type) = (struct fn_fieldlist *)
                                obstack_alloc (&objfile->objfile_obstack, j);
    memcpy (TYPE_FN_FIELDLISTS (type), fn_fieldlists, j);

    return type;
}
Example #5
0
/* Add the given string to the contents of the keyword set.  Return NULL
   for success, an error message otherwise. */
const char *
kwsincr (kwset_t kws, char const *text, size_t len)
{
  struct kwset *kwset;
  struct trie *trie;
  unsigned char label;
  struct tree *link;
  int depth;
  struct tree *links[DEPTH_SIZE];
  enum { L, R } dirs[DEPTH_SIZE];
  struct tree *t, *r, *l, *rl, *lr;

  kwset = (struct kwset *) kws;
  trie = kwset->trie;
  text += len;

  /* Descend the trie (built of reversed keywords) character-by-character,
     installing new nodes when necessary. */
  while (len--)
    {
      label = kwset->trans ? kwset->trans[U(*--text)] : *--text;

      /* Descend the tree of outgoing links for this trie node,
         looking for the current character and keeping track
         of the path followed. */
      link = trie->links;
      links[0] = (struct tree *) &trie->links;
      dirs[0] = L;
      depth = 1;

      while (link && label != link->label)
        {
          links[depth] = link;
          if (label < link->label)
            dirs[depth++] = L, link = link->llink;
          else
            dirs[depth++] = R, link = link->rlink;
        }

      /* The current character doesn't have an outgoing link at
         this trie node, so build a new trie node and install
         a link in the current trie node's tree. */
      if (!link)
        {
          link = (struct tree *) obstack_alloc(&kwset->obstack,
                                               sizeof (struct tree));
          if (!link)
            return _("memory exhausted");
          link->llink = NULL;
          link->rlink = NULL;
          link->trie = (struct trie *) obstack_alloc(&kwset->obstack,
                                                     sizeof (struct trie));
          if (!link->trie)
            {
              obstack_free(&kwset->obstack, link);
              return _("memory exhausted");
            }
          link->trie->accepting = 0;
          link->trie->links = NULL;
          link->trie->parent = trie;
          link->trie->next = NULL;
          link->trie->fail = NULL;
          link->trie->depth = trie->depth + 1;
          link->trie->shift = 0;
          link->label = label;
          link->balance = 0;

          /* Install the new tree node in its parent. */
          if (dirs[--depth] == L)
            links[depth]->llink = link;
          else
            links[depth]->rlink = link;

          /* Back up the tree fixing the balance flags. */
          while (depth && !links[depth]->balance)
            {
              if (dirs[depth] == L)
                --links[depth]->balance;
              else
                ++links[depth]->balance;
              --depth;
            }

          /* Rebalance the tree by pointer rotations if necessary. */
          if (depth && ((dirs[depth] == L && --links[depth]->balance)
                        || (dirs[depth] == R && ++links[depth]->balance)))
            {
              switch (links[depth]->balance)
                {
                case (char) -2:
                  switch (dirs[depth + 1])
                    {
                    case L:
                      r = links[depth], t = r->llink, rl = t->rlink;
                      t->rlink = r, r->llink = rl;
                      t->balance = r->balance = 0;
                      break;
                    case R:
                      r = links[depth], l = r->llink, t = l->rlink;
                      rl = t->rlink, lr = t->llink;
                      t->llink = l, l->rlink = lr, t->rlink = r, r->llink = rl;
                      l->balance = t->balance != 1 ? 0 : -1;
                      r->balance = t->balance != (char) -1 ? 0 : 1;
                      t->balance = 0;
                      break;
                    default:
                      abort ();
                    }
                  break;
                case 2:
                  switch (dirs[depth + 1])
                    {
                    case R:
                      l = links[depth], t = l->rlink, lr = t->llink;
                      t->llink = l, l->rlink = lr;
                      t->balance = l->balance = 0;
                      break;
                    case L:
                      l = links[depth], r = l->rlink, t = r->llink;
                      lr = t->llink, rl = t->rlink;
                      t->llink = l, l->rlink = lr, t->rlink = r, r->llink = rl;
                      l->balance = t->balance != 1 ? 0 : -1;
                      r->balance = t->balance != (char) -1 ? 0 : 1;
                      t->balance = 0;
                      break;
                    default:
                      abort ();
                    }
                  break;
                default:
                  abort ();
                }

              if (dirs[depth - 1] == L)
                links[depth - 1]->llink = t;
              else
                links[depth - 1]->rlink = t;
            }
        }

      trie = link->trie;
    }

  /* Mark the node we finally reached as accepting, encoding the
     index number of this word in the keyword set so far. */
  if (!trie->accepting)
    trie->accepting = 1 + 2 * kwset->words;
  ++kwset->words;

  /* Keep track of the longest and shortest string of the keyword set. */
  if (trie->depth < kwset->mind)
    kwset->mind = trie->depth;
  if (trie->depth > kwset->maxd)
    kwset->maxd = trie->depth;

  return NULL;
}
Example #6
0
void *
frame_obstack_alloc (unsigned long size)
{
  return obstack_alloc (&frame_cache_obstack, size);
}
Example #7
0
struct type *
type_from_class (struct gdbarch *gdbarch, struct value *clas)
{
    struct type *type;
    char *name;
    struct value *temp;
    struct objfile *objfile;
    struct value *utf8_name;
    char *nptr;
    CORE_ADDR addr;

    type = check_typedef (value_type (clas));
    if (TYPE_CODE (type) == TYPE_CODE_PTR)
    {
        if (value_logical_not (clas))
            return NULL;
        clas = value_ind (clas);
    }
    addr = value_address (clas);

    objfile = get_dynamics_objfile (gdbarch);
    if (java_class_is_primitive (clas))
    {
        struct value *sig;

        temp = clas;
        sig = value_struct_elt (&temp, NULL, "method_count", NULL, "structure");
        return java_primitive_type (gdbarch, value_as_long (sig));
    }

    /* Get Class name.  */
    /* If clasloader non-null, prepend loader address.  FIXME */
    temp = clas;
    utf8_name = value_struct_elt (&temp, NULL, "name", NULL, "structure");
    name = get_java_utf8_name (&objfile->objfile_obstack, utf8_name);
    for (nptr = name; *nptr != 0; nptr++)
    {
        if (*nptr == '/')
            *nptr = '.';
    }

    type = java_lookup_class (name);
    if (type != NULL)
        return type;

    type = alloc_type (objfile);
    TYPE_CODE (type) = TYPE_CODE_STRUCT;
    INIT_CPLUS_SPECIFIC (type);

    if (name[0] == '[')
    {
        char *signature = name;
        int namelen = java_demangled_signature_length (signature);

        if (namelen > strlen (name))
            name = obstack_alloc (&objfile->objfile_obstack, namelen + 1);
        java_demangled_signature_copy (name, signature);
        name[namelen] = '\0';
        temp = clas;
        /* Set array element type.  */
        temp = value_struct_elt (&temp, NULL, "methods", NULL, "structure");
        deprecated_set_value_type (temp,
                                   lookup_pointer_type (value_type (clas)));
        TYPE_TARGET_TYPE (type) = type_from_class (gdbarch, temp);
    }

    ALLOCATE_CPLUS_STRUCT_TYPE (type);
    TYPE_TAG_NAME (type) = name;

    add_class_symtab_symbol (add_class_symbol (type, addr));
    return java_link_class_type (gdbarch, type, clas);
}
Example #8
0
/**
 * Set a table internally-allocated cell text, or remove the cell.
 *
 * @param tbl   Cell table to set the text into.
 * @param col   Cell column to set the text into.
 * @param line  Cell line to set the text into.
 * @param text  Text to set, or NULL to remove the cell.
 */
static void
hidrd_ttbl_seta(hidrd_ttbl  *tbl,
                size_t       col,
                size_t       line,
                char        *text)
{
    size_t              row_line;
    hidrd_ttbl_row     *row;
    hidrd_ttbl_row     *new_row;
    size_t              cell_col;
    hidrd_ttbl_cell    *cell;
    hidrd_ttbl_cell    *new_cell;

    assert(tbl != NULL);

    /* Lookup row */
    for (row_line = 0, row = tbl->row;
         row->next != NULL && line > row_line + row->skip;
         row_line += 1 + row->skip, row = row->next);

    if (line > row_line)
    {
        /* Insert a new row after the found one */
        new_row = obstack_alloc(&tbl->obstack, sizeof(*new_row));
        new_row->next = row->next;
        row->next = new_row;

        /* Split/increase the skip */
        new_row->skip = new_row->next == NULL
                            ? 0 : row->skip - (line - row_line);
        row->skip = line - row_line - 1;

        /* Create the cell */
        new_cell = obstack_alloc(&tbl->obstack, sizeof(*new_cell));
        new_cell->next = NULL;
        new_cell->skip = 0;
        new_cell->text = NULL;
        new_row->cell = new_cell;

        /* Use the new row */
        row = new_row;
    }

    /* Lookup cell */
    for (cell_col = 0, cell = row->cell;
         cell->next != NULL && col > cell_col + cell->skip;
         cell_col += 1 + cell->skip, cell = cell->next);

    if (col > cell_col)
    {
        /* Insert a new cell after the found one */
        new_cell = obstack_alloc(&tbl->obstack, sizeof(*new_cell));
        new_cell->next = cell->next;
        cell->next = new_cell;

        /* Split/increase the skip */
        new_cell->skip = new_cell->next == NULL
                            ? 0 : cell->skip - (col - cell_col);
        cell->skip = col - cell_col - 1;

        /* Use the new cell */
        cell = new_cell;
    }


    /* Set the cell text */
    cell->text = text;
}
Example #9
0
File: pool.c Project: GimXu/global
/**
 * pool_malloc: allocate memory from pool
 *
 *	@param[in]	pool	POOL structure
 *	@param[in]	size	memory size
 *	@return		allocated memory
 */
void *
pool_malloc(POOL *pool, int size)
{
	return obstack_alloc(&pool->obstack, size);
}
Example #10
0
/* Read in and initialize the SOM export list which is present
   for all executables and shared libraries.  The import list
   consists of the symbols that are referenced in OBJFILE but
   not defined there.  (Variables that are imported are dealt
   with as "loc_indirect" vars.)
   Return value = number of import symbols read in. */
int
init_export_symbols (struct objfile *objfile)
{
  unsigned int export_list;
  unsigned int export_list_size;
  unsigned int string_table;
  unsigned int string_table_size;
  char *string_buffer;
  int i;
  int j;
  int k;
  asection *text_section;	/* section handle */
  unsigned int dl_header[12];	/* SOM executable header */

  /* A struct for an entry in the SOM export list */
  typedef struct
    {
      int next;			/* for hash table use -- we don't use this */
      int name;			/* index into string table */
      int value;		/* offset or plabel */
      int dont_care1;		/* not used */
      unsigned char type;	/* 0 = NULL, 2 = Data, 3 = Code, 7 = Storage, 13 = Plabel */
      char dont_care2;		/* not used */
      short dont_care3;		/* not used */
    }
  SomExportEntry;

  /* We read 100 entries in at a time from the disk file. */
#define SOM_READ_EXPORTS_NUM         100
#define SOM_READ_EXPORTS_CHUNK_SIZE  (sizeof (SomExportEntry) * SOM_READ_EXPORTS_NUM)
  SomExportEntry buffer[SOM_READ_EXPORTS_NUM];

  /* Initialize in case we error out */
  objfile->export_list = NULL;
  objfile->export_list_size = 0;

  /* It doesn't work, for some reason, to read in space $TEXT$;
     the subspace $SHLIB_INFO$ has to be used.  Some BFD quirk? pai/1997-08-05 */
  text_section = bfd_get_section_by_name (objfile->obfd, "$SHLIB_INFO$");
  if (!text_section)
    return 0;
  /* Get the SOM executable header */
  bfd_get_section_contents (objfile->obfd, text_section, dl_header, 0, 12 * sizeof (int));

  /* Check header version number for 10.x HP-UX */
  /* Currently we deal only with 10.x systems; on 9.x the version # is 89060912.
     FIXME: Change for future HP-UX releases and mods to the SOM executable format */
  if (dl_header[0] != 93092112)
    return 0;

  export_list = dl_header[8];
  export_list_size = dl_header[9];
  if (!export_list_size)
    return 0;
  string_table = dl_header[10];
  string_table_size = dl_header[11];
  if (!string_table_size)
    return 0;

  /* Suck in SOM string table */
  string_buffer = (char *) xmalloc (string_table_size);
  bfd_get_section_contents (objfile->obfd, text_section, string_buffer,
			    string_table, string_table_size);

  /* Allocate export list in the psymbol obstack; this has nothing
     to do with psymbols, just a matter of convenience.  We want the
     export list to be freed when the objfile is deallocated */
  objfile->export_list
    = (ExportEntry *) obstack_alloc (&objfile->objfile_obstack,
				   export_list_size * sizeof (ExportEntry));

  /* Read in the export entries, a bunch at a time */
  for (j = 0, k = 0;
       j < (export_list_size / SOM_READ_EXPORTS_NUM);
       j++)
    {
      bfd_get_section_contents (objfile->obfd, text_section, buffer,
			      export_list + j * SOM_READ_EXPORTS_CHUNK_SIZE,
				SOM_READ_EXPORTS_CHUNK_SIZE);
      for (i = 0; i < SOM_READ_EXPORTS_NUM; i++, k++)
	{
	  if (buffer[i].type != (unsigned char) 0)
	    {
	      objfile->export_list[k].name
		= (char *) obstack_alloc (&objfile->objfile_obstack, strlen (string_buffer + buffer[i].name) + 1);
	      strcpy (objfile->export_list[k].name, string_buffer + buffer[i].name);
	      objfile->export_list[k].address = buffer[i].value;
	      /* Some day we might want to record the type and other information too */
	    }
	  else
	    /* null type */
	    {
	      objfile->export_list[k].name = NULL;
	      objfile->export_list[k].address = 0;
	    }
	}
    }

  /* Get the leftovers */
  if (k < export_list_size)
    bfd_get_section_contents (objfile->obfd, text_section, buffer,
			      export_list + k * sizeof (SomExportEntry),
			  (export_list_size - k) * sizeof (SomExportEntry));
  for (i = 0; k < export_list_size; i++, k++)
    {
      if (buffer[i].type != (unsigned char) 0)
	{
	  objfile->export_list[k].name
	    = (char *) obstack_alloc (&objfile->objfile_obstack, strlen (string_buffer + buffer[i].name) + 1);
	  strcpy (objfile->export_list[k].name, string_buffer + buffer[i].name);
	  /* Some day we might want to record the type and other information too */
	  objfile->export_list[k].address = buffer[i].value;
	}
      else
	{
	  objfile->export_list[k].name = NULL;
	  objfile->export_list[k].address = 0;
	}
    }

  objfile->export_list_size = export_list_size;
  xfree (string_buffer);
  return export_list_size;
}
Example #11
0
struct compile_module *
compile_object_load (const char *object_file, const char *source_file)
{
  struct cleanup *cleanups, *cleanups_free_objfile;
  bfd *abfd;
  struct setup_sections_data setup_sections_data;
  CORE_ADDR addr, func_addr, regs_addr;
  struct bound_minimal_symbol bmsym;
  long storage_needed;
  asymbol **symbol_table, **symp;
  long number_of_symbols, missing_symbols;
  struct type *dptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
  unsigned dptr_type_len = TYPE_LENGTH (dptr_type);
  struct compile_module *retval;
  struct type *regs_type;
  char *filename, **matching;
  struct objfile *objfile;

  filename = tilde_expand (object_file);
  cleanups = make_cleanup (xfree, filename);

  abfd = gdb_bfd_open (filename, gnutarget, -1);
  if (abfd == NULL)
    error (_("\"%s\": could not open as compiled module: %s"),
          filename, bfd_errmsg (bfd_get_error ()));
  make_cleanup_bfd_unref (abfd);

  if (!bfd_check_format_matches (abfd, bfd_object, &matching))
    error (_("\"%s\": not in loadable format: %s"),
          filename, gdb_bfd_errmsg (bfd_get_error (), matching));

  if ((bfd_get_file_flags (abfd) & (EXEC_P | DYNAMIC)) != 0)
    error (_("\"%s\": not in object format."), filename);

  setup_sections_data.last_size = 0;
  setup_sections_data.last_section_first = abfd->sections;
  setup_sections_data.last_prot = -1;
  setup_sections_data.last_max_alignment = 1;
  bfd_map_over_sections (abfd, setup_sections, &setup_sections_data);
  setup_sections (abfd, NULL, &setup_sections_data);

  storage_needed = bfd_get_symtab_upper_bound (abfd);
  if (storage_needed < 0)
    error (_("Cannot read symbols of compiled module \"%s\": %s"),
          filename, bfd_errmsg (bfd_get_error ()));

  /* SYMFILE_VERBOSE is not passed even if FROM_TTY, user is not interested in
     "Reading symbols from ..." message for automatically generated file.  */
  objfile = symbol_file_add_from_bfd (abfd, filename, 0, NULL, 0, NULL);
  cleanups_free_objfile = make_cleanup_free_objfile (objfile);

  bmsym = lookup_minimal_symbol_text (GCC_FE_WRAPPER_FUNCTION, objfile);
  if (bmsym.minsym == NULL || MSYMBOL_TYPE (bmsym.minsym) == mst_file_text)
    error (_("Could not find symbol \"%s\" of compiled module \"%s\"."),
	   GCC_FE_WRAPPER_FUNCTION, filename);
  func_addr = BMSYMBOL_VALUE_ADDRESS (bmsym);

  /* The memory may be later needed
     by bfd_generic_get_relocated_section_contents
     called from default_symfile_relocate.  */
  symbol_table = obstack_alloc (&objfile->objfile_obstack, storage_needed);
  number_of_symbols = bfd_canonicalize_symtab (abfd, symbol_table);
  if (number_of_symbols < 0)
    error (_("Cannot parse symbols of compiled module \"%s\": %s"),
          filename, bfd_errmsg (bfd_get_error ()));

  missing_symbols = 0;
  for (symp = symbol_table; symp < symbol_table + number_of_symbols; symp++)
    {
      asymbol *sym = *symp;

      if (sym->flags != 0)
	continue;
      if (compile_debug)
	fprintf_unfiltered (gdb_stdout,
			    "lookup undefined ELF symbol \"%s\"\n",
			    sym->name);
      sym->flags = BSF_GLOBAL;
      sym->section = bfd_abs_section_ptr;
      if (strcmp (sym->name, "_GLOBAL_OFFSET_TABLE_") == 0)
	{
	  sym->value = 0;
	  continue;
	}
      bmsym = lookup_minimal_symbol (sym->name, NULL, NULL);
      switch (bmsym.minsym == NULL
	      ? mst_unknown : MSYMBOL_TYPE (bmsym.minsym))
	{
	case mst_text:
	  sym->value = BMSYMBOL_VALUE_ADDRESS (bmsym);
	  break;
	default:
	  warning (_("Could not find symbol \"%s\" "
		     "for compiled module \"%s\"."),
		   sym->name, filename);
	  missing_symbols++;
	}
    }
  if (missing_symbols)
    error (_("%ld symbols were missing, cannot continue."), missing_symbols);

  bfd_map_over_sections (abfd, copy_sections, symbol_table);

  regs_type = get_regs_type (objfile);
  if (regs_type == NULL)
    regs_addr = 0;
  else
    {
      /* Use read-only non-executable memory protection.  */
      regs_addr = gdbarch_infcall_mmap (target_gdbarch (),
					TYPE_LENGTH (regs_type),
					GDB_MMAP_PROT_READ);
      gdb_assert (regs_addr != 0);
      store_regs (regs_type, regs_addr);
    }

  discard_cleanups (cleanups_free_objfile);
  do_cleanups (cleanups);

  retval = xmalloc (sizeof (*retval));
  retval->objfile = objfile;
  retval->source_file = xstrdup (source_file);
  retval->func_addr = func_addr;
  retval->regs_addr = regs_addr;
  return retval;
}
Example #12
0
SymbolP
CreateSymbol ()
#endif
{
  return (SymbolP)obstack_alloc(MapObstack, sizeof(struct Symbol));
}
Example #13
0
struct compile_module *
compile_object_load (const char *object_file, const char *source_file,
		     enum compile_i_scope_types scope, void *scope_data)
{
  struct cleanup *cleanups, *cleanups_free_objfile;
  bfd *abfd;
  struct setup_sections_data setup_sections_data;
  CORE_ADDR addr, regs_addr, out_value_addr = 0;
  struct symbol *func_sym;
  struct type *func_type;
  struct bound_minimal_symbol bmsym;
  long storage_needed;
  asymbol **symbol_table, **symp;
  long number_of_symbols, missing_symbols;
  struct type *dptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
  unsigned dptr_type_len = TYPE_LENGTH (dptr_type);
  struct compile_module *retval;
  struct type *regs_type, *out_value_type = NULL;
  char *filename, **matching;
  struct objfile *objfile;
  int expect_parameters;
  struct type *expect_return_type;
  struct munmap_list *munmap_list_head = NULL;

  filename = tilde_expand (object_file);
  cleanups = make_cleanup (xfree, filename);

  abfd = gdb_bfd_open (filename, gnutarget, -1);
  if (abfd == NULL)
    error (_("\"%s\": could not open as compiled module: %s"),
          filename, bfd_errmsg (bfd_get_error ()));
  make_cleanup_bfd_unref (abfd);

  if (!bfd_check_format_matches (abfd, bfd_object, &matching))
    error (_("\"%s\": not in loadable format: %s"),
          filename, gdb_bfd_errmsg (bfd_get_error (), matching));

  if ((bfd_get_file_flags (abfd) & (EXEC_P | DYNAMIC)) != 0)
    error (_("\"%s\": not in object format."), filename);

  setup_sections_data.last_size = 0;
  setup_sections_data.last_section_first = abfd->sections;
  setup_sections_data.last_prot = -1;
  setup_sections_data.last_max_alignment = 1;
  setup_sections_data.munmap_list_headp = &munmap_list_head;
  make_cleanup (munmap_listp_free_cleanup, &munmap_list_head);
  bfd_map_over_sections (abfd, setup_sections, &setup_sections_data);
  setup_sections (abfd, NULL, &setup_sections_data);

  storage_needed = bfd_get_symtab_upper_bound (abfd);
  if (storage_needed < 0)
    error (_("Cannot read symbols of compiled module \"%s\": %s"),
          filename, bfd_errmsg (bfd_get_error ()));

  /* SYMFILE_VERBOSE is not passed even if FROM_TTY, user is not interested in
     "Reading symbols from ..." message for automatically generated file.  */
  objfile = symbol_file_add_from_bfd (abfd, filename, 0, NULL, 0, NULL);
  cleanups_free_objfile = make_cleanup_free_objfile (objfile);

  func_sym = lookup_global_symbol_from_objfile (objfile,
						GCC_FE_WRAPPER_FUNCTION,
						VAR_DOMAIN);
  if (func_sym == NULL)
    error (_("Cannot find function \"%s\" in compiled module \"%s\"."),
	   GCC_FE_WRAPPER_FUNCTION, objfile_name (objfile));
  func_type = SYMBOL_TYPE (func_sym);
  if (TYPE_CODE (func_type) != TYPE_CODE_FUNC)
    error (_("Invalid type code %d of function \"%s\" in compiled "
	     "module \"%s\"."),
	   TYPE_CODE (func_type), GCC_FE_WRAPPER_FUNCTION,
	   objfile_name (objfile));

  switch (scope)
    {
    case COMPILE_I_SIMPLE_SCOPE:
      expect_parameters = 1;
      expect_return_type = builtin_type (target_gdbarch ())->builtin_void;
      break;
    case COMPILE_I_RAW_SCOPE:
      expect_parameters = 0;
      expect_return_type = builtin_type (target_gdbarch ())->builtin_void;
      break;
    case COMPILE_I_PRINT_ADDRESS_SCOPE:
    case COMPILE_I_PRINT_VALUE_SCOPE:
      expect_parameters = 2;
      expect_return_type = builtin_type (target_gdbarch ())->builtin_void;
      break;
    default:
      internal_error (__FILE__, __LINE__, _("invalid scope %d"), scope);
    }
  if (TYPE_NFIELDS (func_type) != expect_parameters)
    error (_("Invalid %d parameters of function \"%s\" in compiled "
	     "module \"%s\"."),
	   TYPE_NFIELDS (func_type), GCC_FE_WRAPPER_FUNCTION,
	   objfile_name (objfile));
  if (!types_deeply_equal (expect_return_type, TYPE_TARGET_TYPE (func_type)))
    error (_("Invalid return type of function \"%s\" in compiled "
	    "module \"%s\"."),
	  GCC_FE_WRAPPER_FUNCTION, objfile_name (objfile));

  /* The memory may be later needed
     by bfd_generic_get_relocated_section_contents
     called from default_symfile_relocate.  */
  symbol_table = obstack_alloc (&objfile->objfile_obstack, storage_needed);
  number_of_symbols = bfd_canonicalize_symtab (abfd, symbol_table);
  if (number_of_symbols < 0)
    error (_("Cannot parse symbols of compiled module \"%s\": %s"),
          filename, bfd_errmsg (bfd_get_error ()));

  missing_symbols = 0;
  for (symp = symbol_table; symp < symbol_table + number_of_symbols; symp++)
    {
      asymbol *sym = *symp;

      if (sym->flags != 0)
	continue;
      sym->flags = BSF_GLOBAL;
      sym->section = bfd_abs_section_ptr;
      if (strcmp (sym->name, "_GLOBAL_OFFSET_TABLE_") == 0)
	{
	  if (compile_debug)
	    fprintf_unfiltered (gdb_stdlog,
				"ELF symbol \"%s\" relocated to zero\n",
				sym->name);

	  /* It seems to be a GCC bug, with -mcmodel=large there should be no
	     need for _GLOBAL_OFFSET_TABLE_.  Together with -fPIE the data
	     remain PC-relative even with _GLOBAL_OFFSET_TABLE_ as zero.  */
	  sym->value = 0;
	  continue;
	}
      bmsym = lookup_minimal_symbol (sym->name, NULL, NULL);
      switch (bmsym.minsym == NULL
	      ? mst_unknown : MSYMBOL_TYPE (bmsym.minsym))
	{
	case mst_text:
	  sym->value = BMSYMBOL_VALUE_ADDRESS (bmsym);
	  if (compile_debug)
	    fprintf_unfiltered (gdb_stdlog,
				"ELF mst_text symbol \"%s\" relocated to %s\n",
				sym->name,
				paddress (target_gdbarch (), sym->value));
	  break;
	case mst_text_gnu_ifunc:
	  sym->value = gnu_ifunc_resolve_addr (target_gdbarch (),
					       BMSYMBOL_VALUE_ADDRESS (bmsym));
	  if (compile_debug)
	    fprintf_unfiltered (gdb_stdlog,
				"ELF mst_text_gnu_ifunc symbol \"%s\" "
				"relocated to %s\n",
				sym->name,
				paddress (target_gdbarch (), sym->value));
	  break;
	default:
	  warning (_("Could not find symbol \"%s\" "
		     "for compiled module \"%s\"."),
		   sym->name, filename);
	  missing_symbols++;
	}
    }
  if (missing_symbols)
    error (_("%ld symbols were missing, cannot continue."), missing_symbols);

  bfd_map_over_sections (abfd, copy_sections, symbol_table);

  regs_type = get_regs_type (func_sym, objfile);
  if (regs_type == NULL)
    regs_addr = 0;
  else
    {
      /* Use read-only non-executable memory protection.  */
      regs_addr = gdbarch_infcall_mmap (target_gdbarch (),
					TYPE_LENGTH (regs_type),
					GDB_MMAP_PROT_READ);
      gdb_assert (regs_addr != 0);
      munmap_list_add (&munmap_list_head, regs_addr, TYPE_LENGTH (regs_type));
      if (compile_debug)
	fprintf_unfiltered (gdb_stdlog,
			    "allocated %s bytes at %s for registers\n",
			    paddress (target_gdbarch (),
				      TYPE_LENGTH (regs_type)),
			    paddress (target_gdbarch (), regs_addr));
      store_regs (regs_type, regs_addr);
    }

  if (scope == COMPILE_I_PRINT_ADDRESS_SCOPE
      || scope == COMPILE_I_PRINT_VALUE_SCOPE)
    {
      out_value_type = get_out_value_type (func_sym, objfile, scope);
      if (out_value_type == NULL)
	{
	  do_cleanups (cleanups);
	  return NULL;
	}
      check_typedef (out_value_type);
      out_value_addr = gdbarch_infcall_mmap (target_gdbarch (),
					     TYPE_LENGTH (out_value_type),
					     (GDB_MMAP_PROT_READ
					      | GDB_MMAP_PROT_WRITE));
      gdb_assert (out_value_addr != 0);
      munmap_list_add (&munmap_list_head, out_value_addr,
		       TYPE_LENGTH (out_value_type));
      if (compile_debug)
	fprintf_unfiltered (gdb_stdlog,
			    "allocated %s bytes at %s for printed value\n",
			    paddress (target_gdbarch (),
				      TYPE_LENGTH (out_value_type)),
			    paddress (target_gdbarch (), out_value_addr));
    }

  discard_cleanups (cleanups_free_objfile);

  retval = xmalloc (sizeof (*retval));
  retval->objfile = objfile;
  retval->source_file = xstrdup (source_file);
  retval->func_sym = func_sym;
  retval->regs_addr = regs_addr;
  retval->scope = scope;
  retval->scope_data = scope_data;
  retval->out_value_type = out_value_type;
  retval->out_value_addr = out_value_addr;

  /* CLEANUPS will free MUNMAP_LIST_HEAD.  */
  retval->munmap_list_head = munmap_list_head;
  munmap_list_head = NULL;

  do_cleanups (cleanups);

  return retval;
}
Example #14
0
struct frame_info *
get_prev_frame (struct frame_info *next_frame)
{
  CORE_ADDR address = 0;
  struct frame_info *prev;
  int fromleaf = 0;
  char *name;

  /* If the requested entry is in the cache, return it.
     Otherwise, figure out what the address should be for the entry
     we're about to add to the cache. */

  if (!next_frame)
    {
#if 0
      /* This screws value_of_variable, which just wants a nice clean
         NULL return from block_innermost_frame if there are no frames.
         I don't think I've ever seen this message happen otherwise.
         And returning NULL here is a perfectly legitimate thing to do.  */
      if (!current_frame)
	{
	  error ("You haven't set up a process's stack to examine.");
	}
#endif

      return current_frame;
    }

  /* If we have the prev one, return it */
  if (next_frame->prev)
    return next_frame->prev;

  /* On some machines it is possible to call a function without
     setting up a stack frame for it.  On these machines, we
     define this macro to take two args; a frameinfo pointer
     identifying a frame and a variable to set or clear if it is
     or isn't leafless.  */

  /* Still don't want to worry about this except on the innermost
     frame.  This macro will set FROMLEAF if NEXT_FRAME is a
     frameless function invocation.  */
  if (!(next_frame->next))
    {
      fromleaf = FRAMELESS_FUNCTION_INVOCATION (next_frame);
      if (fromleaf)
	address = FRAME_FP (next_frame);
    }

  if (!fromleaf)
    {
      /* Two macros defined in tm.h specify the machine-dependent
         actions to be performed here.
         First, get the frame's chain-pointer.
         If that is zero, the frame is the outermost frame or a leaf
         called by the outermost frame.  This means that if start
         calls main without a frame, we'll return 0 (which is fine
         anyway).

         Nope; there's a problem.  This also returns when the current
         routine is a leaf of main.  This is unacceptable.  We move
         this to after the ffi test; I'd rather have backtraces from
         start go curfluy than have an abort called from main not show
         main.  */
      address = FRAME_CHAIN (next_frame);
      if (!FRAME_CHAIN_VALID (address, next_frame))
	return 0;
      address = FRAME_CHAIN_COMBINE (address, next_frame);
    }
  if (address == 0)
    return 0;

  prev = (struct frame_info *)
    obstack_alloc (&frame_cache_obstack,
		   sizeof (struct frame_info));

  /* Zero all fields by default.  */
  memset (prev, 0, sizeof (struct frame_info));

  if (next_frame)
    next_frame->prev = prev;
  prev->next = next_frame;
  prev->frame = address;

/* This change should not be needed, FIXME!  We should
   determine whether any targets *need* INIT_FRAME_PC to happen
   after INIT_EXTRA_FRAME_INFO and come up with a simple way to
   express what goes on here.

   INIT_EXTRA_FRAME_INFO is called from two places: create_new_frame
   (where the PC is already set up) and here (where it isn't).
   INIT_FRAME_PC is only called from here, always after
   INIT_EXTRA_FRAME_INFO.

   The catch is the MIPS, where INIT_EXTRA_FRAME_INFO requires the PC
   value (which hasn't been set yet).  Some other machines appear to
   require INIT_EXTRA_FRAME_INFO before they can do INIT_FRAME_PC.  Phoo.

   We shouldn't need INIT_FRAME_PC_FIRST to add more complication to
   an already overcomplicated part of GDB.   [email protected], 15Sep92.

   Assuming that some machines need INIT_FRAME_PC after
   INIT_EXTRA_FRAME_INFO, one possible scheme:

   SETUP_INNERMOST_FRAME()
   Default version is just create_new_frame (read_fp ()),
   read_pc ()).  Machines with extra frame info would do that (or the
   local equivalent) and then set the extra fields.
   SETUP_ARBITRARY_FRAME(argc, argv)
   Only change here is that create_new_frame would no longer init extra
   frame info; SETUP_ARBITRARY_FRAME would have to do that.
   INIT_PREV_FRAME(fromleaf, prev)
   Replace INIT_EXTRA_FRAME_INFO and INIT_FRAME_PC.  This should
   also return a flag saying whether to keep the new frame, or
   whether to discard it, because on some machines (e.g.  mips) it
   is really awkward to have FRAME_CHAIN_VALID called *before*
   INIT_EXTRA_FRAME_INFO (there is no good way to get information
   deduced in FRAME_CHAIN_VALID into the extra fields of the new frame).
   std_frame_pc(fromleaf, prev)
   This is the default setting for INIT_PREV_FRAME.  It just does what
   the default INIT_FRAME_PC does.  Some machines will call it from
   INIT_PREV_FRAME (either at the beginning, the end, or in the middle).
   Some machines won't use it.
   [email protected], 13Apr93, 31Jan94, 14Dec94.  */

  INIT_FRAME_PC_FIRST (fromleaf, prev);

#ifdef INIT_EXTRA_FRAME_INFO
  INIT_EXTRA_FRAME_INFO (fromleaf, prev);
#endif

  /* This entry is in the frame queue now, which is good since
     FRAME_SAVED_PC may use that queue to figure out its value
     (see tm-sparc.h).  We want the pc saved in the inferior frame. */
  INIT_FRAME_PC (fromleaf, prev);

  /* If ->frame and ->pc are unchanged, we are in the process of getting
     ourselves into an infinite backtrace.  Some architectures check this
     in FRAME_CHAIN or thereabouts, but it seems like there is no reason
     this can't be an architecture-independent check.  */
  if (next_frame != NULL)
    {
      if (prev->frame == next_frame->frame
	  && prev->pc == next_frame->pc)
	{
	  next_frame->prev = NULL;
	  obstack_free (&frame_cache_obstack, prev);
	  return NULL;
	}
    }

  find_pc_partial_function (prev->pc, &name,
			    (CORE_ADDR *) NULL, (CORE_ADDR *) NULL);
  if (IN_SIGTRAMP (prev->pc, name))
    prev->signal_handler_caller = 1;

  return prev;
}
Example #15
0
static void BSPTreeCreateRecursive(BSPTreeNode *tree,
				   PolyListNode *pllist,
#if BSPTREE_STATS
				   int depth,
#endif
				   struct obstack *scratch)
{
  PolyListNode *plnode, *front, *back;
  EdgeIntersection edges[2];

  tree->front = tree->back = NULL;
  ListPop(pllist, plnode);
  tree->polylist = plnode;
  check_poly(plnode->poly);
  PolyPlane(plnode, &tree->plane);

  front = back = NULL;
  while (pllist) {    
    ListPop(pllist, plnode);
    check_poly(plnode->poly);
    switch (ClassifyPoly(&tree->plane, plnode->poly, edges)) {
    case BACKOF:
      check_poly(plnode->poly);
      ListPush(back, plnode);
      break;
    case COPLANAR:
      check_poly(plnode->poly);
      ListPush(tree->polylist, plnode);
      break;
    case INFRONTOF:
      check_poly(plnode->poly);
      ListPush(front, plnode);
      break;
    case BOTH_SIDES:
      check_poly(plnode->poly);
      SplitPolyNode(plnode, &front, &back, edges, scratch);
      break;
    }
  }
  if (front) {
    tree->front = obstack_alloc(scratch, sizeof(*tree->front));
    BSPTreeCreateRecursive(tree->front, front,
#if BSPTREE_STATS
			   depth+1,
#endif
			   scratch);
  }
  if (back) {
    tree->back = obstack_alloc(scratch, sizeof(*tree->back));
    BSPTreeCreateRecursive(tree->back, back,
#if BSPTREE_STATS
			   depth+1,
#endif
			   scratch);
  }
#if BSPTREE_STATS
  if (depth > tree_depth) {
    tree_depth = depth;
  }
#endif
}
Example #16
0
void
init_error ()
{
  gcc_obstack_init (&scratch_obstack);
  scratch_firstobj = (char *)obstack_alloc (&scratch_obstack, 0);
}
Example #17
0
static PolyListNode *
QuadToLinkedPolyList(Transform T, Transform Tdual, Transform TxT,
		     const void **tagged_app, PolyListNode **plistp,
		     Quad *quad, struct obstack *scratch)
{
  PolyListNode *plist = NULL;
  Poly *qpoly;
  int i, j, concave;

  (void)Tdual;
  (void)TxT;

  if (!plistp) {
    plistp = &plist;
  }

  if(!(quad->geomflags & QUAD_N)) {
    QuadComputeNormals(quad);
  }
  
  qpoly = NULL;
  for (i = 0; i < quad->maxquad; i++) {
    /* First try to create a single polygon, if that mesh-cell is
     * non-flat or concave, then sub-divide it.
     */
    if (!qpoly) {
      qpoly = new_poly(4, NULL, scratch);
      for (j = 0; j < 4; j++) {
	qpoly->v[j] = obstack_alloc(scratch, sizeof(Vertex));
      }
    }
    if (T && T != TM_IDENTITY) {
      for (j = 0; j < 4; j++) {
	memset(qpoly->v[j], 0, sizeof(Vertex));
	HPt3Transform(T, &quad->p[i][j], &qpoly->v[j]->pt);
	NormalTransform(T, &quad->n[i][j], &qpoly->v[j]->vn);
      }
    } else {
      for (j = 0; j < 4; j++) {
	memset(qpoly->v[j], 0, sizeof(Vertex));
	qpoly->v[j]->pt   = quad->p[i][j];
	qpoly->v[j]->vn   = quad->n[i][j];
      }
    }
    qpoly->flags |= PL_HASVN;
    if (quad->geomflags & QUAD_C) {
      qpoly->flags |= PL_HASVCOL;
      for (j = 0; j < 4; j++) {
	qpoly->v[j]->vcol = quad->c[i][j];
      }
    }
    if (quad->geomflags & COLOR_ALPHA) {
      qpoly->flags |= COLOR_ALPHA;
    }
    PolyNormal(qpoly, &qpoly->pn,
	       quad->geomflags & VERT_4D, false, &qpoly->flags, &concave);
    qpoly->flags |= PL_HASPN;
    if (qpoly->flags & POLY_NOPOLY) {
      /* degenerated, skip it, but reuse the memory region.  */
      qpoly->flags = 0;
    } else if (qpoly->flags & (POLY_CONCAVE|POLY_NONFLAT)) {
      /* we need to split it */
      split_quad_poly(concave, qpoly, plistp, tagged_app, scratch);
      qpoly = NULL;
    } else {
      PolyListNode *new_pn;
      
      new_pn = new_poly_list_node(tagged_app, scratch);
      new_pn->poly = qpoly;
      ListPush(*plistp, new_pn);
      qpoly = NULL;
    }
  }
  
  return *plistp;
}
Example #18
0
/*
 * layout_addresses() is called after all the assembly code has been read and
 * fragments, symbols and fixups have been created.  This routine sets the
 * address of the fragments and symbols.  Then it does the fixups of the frags
 * and prepares the fixes so relocation entries can be created from them.
 */
void
layout_addresses(
void)
{
    struct frchain *frchainP;
    fragS *fragP;
    relax_addressT slide, tmp;
    symbolS *symbolP;
    unsigned long nbytes, fill_size, repeat_expression, partial_bytes;

	if(frchain_root == NULL)
	    return;

	/*
	 * If there is any current frag close it off.
	 */
	if(frag_now != NULL && frag_now->fr_fix == 0){
	    frag_now->fr_fix = obstack_next_free(&frags) -
			       frag_now->fr_literal;
	    frag_wane(frag_now);
	}

	/*
	 * For every section, add a last ".fill 0" frag that will later be used
	 * as the ending address of that section.
	 */
	for(frchainP = frchain_root; frchainP; frchainP = frchainP->frch_next){
	    /*
	     * We must do the obstack_finish(), so the next object we put on
	     * obstack frags will not appear to start at the fr_literal of the
	     * current frag.  Also, it ensures that the next object will begin
	     * on a address that is aligned correctly for the engine that runs
	     * the assembler.
	     */
	    obstack_finish(&frags);

	    /*
	     * Make a fresh frag for the last frag.
	     */
	    frag_now = (fragS *)obstack_alloc(&frags, SIZEOF_STRUCT_FRAG);
	    memset(frag_now, '\0', SIZEOF_STRUCT_FRAG);
	    frag_now->fr_next = NULL;
	    obstack_finish(&frags);

	    /*
	     * Append the new frag to current frchain.
	     */
	    frchainP->frch_last->fr_next = frag_now;
	    frchainP->frch_last = frag_now;
	    frag_wane(frag_now);

	}

	/*
	 * Now set the relitive addresses of frags within the section by
	 * relaxing each section.  That is all sections will start at address
	 * zero and addresses of the frags in that section will increase from
	 * there.
	 */
	for(frchainP = frchain_root; frchainP; frchainP = frchainP->frch_next){
	    if((frchainP->frch_section.flags & SECTION_TYPE) == S_ZEROFILL)
		continue;
	    /*
	     * This is done so in case md_estimate_size_before_relax() (called
	     * by relax_section) wants to make fixSs they are for this
	     * section.
	     */
	    frchain_now = frchainP;

	    relax_section(frchainP->frch_root, frchainP->frch_nsect);
	}

	/*
	 * Now set the absolute addresses of all frags by sliding the frags in
	 * each non-zerofill section by the address ranges taken up by the
	 * sections before it.
	 */ 
	slide = 0;
	for(frchainP = frchain_root; frchainP; frchainP = frchainP->frch_next){
	    if((frchainP->frch_section.flags & SECTION_TYPE) == S_ZEROFILL)
		continue;
	    slide = round(slide, 1 << frchainP->frch_section.align);
	    tmp = frchainP->frch_last->fr_address;
	    if(slide != 0){
		for(fragP = frchainP->frch_root; fragP; fragP = fragP->fr_next){
		    fragP->fr_address += slide;
		}
	    }
	    slide += tmp;
	}
	/*
	 * Now with the non-zerofill section addresses set set all of the
	 * addresses of the zerofill sections.  Comming in the fr_address is
	 * the size of the section and going out it is the start address.  This
	 * will make layout_symbols() work out naturally.  The only funky thing
	 * is that section numbers do not end up in address order.
	 */
	for(frchainP = frchain_root; frchainP; frchainP = frchainP->frch_next){
	    if((frchainP->frch_section.flags & SECTION_TYPE) != S_ZEROFILL)
		continue;
	    slide = round(slide, 1 << frchainP->frch_section.align);

	    tmp = frchainP->frch_root->fr_address;
	    frchainP->frch_root->fr_address = slide;
	    frchainP->frch_last->fr_address = tmp + slide;
	    slide += tmp;
	}

	/*
	 * Set the symbol addresses based on there frag's address.
	 * First forward references are handled.
	 */
	for(symbolP = symbol_rootP; symbolP; symbolP = symbolP->sy_next){
	    if(symbolP->sy_forward != NULL){
		if(symbolP->sy_nlist.n_type & N_STAB)
		    symbolP->sy_other = symbolP->sy_forward->sy_other;
		symbolP->sy_value += symbolP->sy_forward->sy_value +
				     symbolP->sy_forward->sy_frag->fr_address;
		symbolP->sy_forward = 0;
	    }
	}
	for(symbolP = symbol_rootP; symbolP; symbolP = symbolP->sy_next){
	    symbolP->sy_value += symbolP->sy_frag->fr_address;
	}

	/*
	 * At this point the addresses of frags now reflect addresses we use in 
	 * the object file and the symbol values are correct.
	 * Scan the frags, converting any ".org"s and ".align"s to ".fill"s.
	 * Also converting any machine-dependent frags using md_convert_frag();
	 */
	for(frchainP = frchain_root; frchainP; frchainP = frchainP->frch_next){
	    /*
	     * This is done so any fixes created by md_convert_frag() are for
	     * this section.
	     */
	    frchain_now = frchainP;

	    for(fragP = frchainP->frch_root; fragP; fragP = fragP->fr_next){
		switch(fragP->fr_type){
		case rs_align:
		case rs_org:
		    /* convert this frag to an rs_fill type */
		    fragP->fr_type = rs_fill;
		    /*
		     * Calculate the number of bytes the variable part of the
		     * the rs_fill frag will need to fill.  Then calculate this
		     * as the fill_size * repeat_expression + partial_bytes.
		     */
		    know(fragP->fr_next != NULL);
		    nbytes = fragP->fr_next->fr_address -
			     fragP->fr_address -
			     fragP->fr_fix;
		    if(nbytes < 0){
			as_warn("rs_org invalid, dot past value by %ld bytes",
				nbytes);
			nbytes = 0;
		    }
		    fill_size = fragP->fr_var;
		    repeat_expression = nbytes / fill_size;
		    partial_bytes = nbytes - (repeat_expression * fill_size);
		    /*
		     * Now set the fr_offset to the fill repeat_expression
		     * since this is now an rs_fill type.  The fr_var is still
		     * the fill_size.
		     */
		    fragP->fr_offset = repeat_expression;
		    /*
		     * For rs_align frags there may be partial_bytes to fill
		     * with zeros before we can fill with the fill_expression
		     * of fill_size.  When the rs_align frag was created it was
		     * created with fill_size-1 extra bytes in the fixed part.
		     */
		    if(partial_bytes != 0){
			/* moved the fill_expression bytes foward */
			memmove(fragP->fr_literal +fragP->fr_fix +partial_bytes,
				fragP->fr_literal +fragP->fr_fix,
				fragP->fr_var);
    			/* zero out the partial_bytes */
    			memset(fragP->fr_literal + fragP->fr_fix,
			       '\0',
			       partial_bytes);
			/* adjust the fixed part of the frag */
    			fragP->fr_fix += partial_bytes;
		    }
		    break;

		case rs_fill:
		    break;

		case rs_machine_dependent:
		    md_convert_frag(fragP);
		    /*
		     * After md_convert_frag, we make the frag into a ".fill 0"
		     * md_convert_frag() should set up any fixSs and constants
		     * required.
		     */
		    frag_wane(fragP);
		    break;

		default:
		    BAD_CASE(fragP->fr_type);
		    break;
		}
	    }
	}

	/*
	 * For each section do the fixups for the frags.
	 */
	for(frchainP = frchain_root; frchainP; frchainP = frchainP->frch_next){
	    fixup_section(frchainP->frch_fix_root, frchainP->frch_nsect);
	}
}