Beispiel #1
0
struct page *alloc_pages(int n, struct page *next)
{
  struct page *best;
  int bestn;
  struct page *scan;

  assert(n >= K);

  scan = unused_pages;
  /* Find first fit */
  for (;;)
    {
      if (!scan)
	return alloc_new(n, next);

      if (scan->pagecount >= n) break;
      scan = scan->next;
    }

  /* Now find best fit */
  best = scan;
  bestn = scan->pagecount;
  for (;;)
    {
      scan = scan->next;
      if (!scan)
	return alloc_split(best, n, next);

      if (scan->pagecount >=n && scan->pagecount < bestn)
	{
	  best = scan;
	  bestn = scan->pagecount;
	}
    }
}
Beispiel #2
0
struct page* alloc_single_page(struct page *next)
{
	struct page *p = NULL;

	/* pthread_t pt = pthread_self(); */

	list_id = get_next_random_list(MAXLISTS);
	while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1)
		list_id = get_next_random_list(MAXLISTS);
	/*if( single_pages[Hash(pt)%MAXLISTS].page_count == 0 ){*/
	if (single_pages[list_id % MAXLISTS].page_count == 0)
		p = alloc_new(PAGE_GROUP_SIZE, NULL);
	add_single_pages(p);
	/*p = single_pages[Hash(pt)%MAXLISTS].pages;*/
	p = single_pages[list_id % MAXLISTS].pages;
	/*single_pages[Hash(pt)%MAXLISTS].pages = p->next;*/
	single_pages[list_id % MAXLISTS].pages = p->next;
	p->next                                = next;
	/*single_pages[Hash(pt)%MAXLISTS].page_count--;*/
	single_pages[list_id % MAXLISTS].page_count--;
	/*release_spinlock( &single_pages[Hash(pt)%MAXLISTS].lock );*/
	release_spinlock(&single_pages[list_id % MAXLISTS].lock);
	/*list_id++;*/

	return p;
}
Beispiel #3
0
struct page* alloc_pages(int n, struct page *next)
{
	/* pthread_t pt = pthread_self(); */
	struct page *ret_val, *p = NULL;

	assert(n >= K);
	list_id = get_next_random_list(MAXLISTS);
	while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1)
		list_id = get_next_random_list(MAXLISTS);
	/*if( n > single_pages[Hash(pt)%MAXLISTS].page_count ){*/
	if (n > single_pages[list_id % MAXLISTS].page_count)
		p = alloc_new(n + PAGE_GROUP_SIZE, NULL);
	add_single_pages(p);
	/*ret_val = single_pages[Hash(pt)%MAXLISTS].pages;*/
	/*single_pages[Hash(pt)%MAXLISTS].pages =*/
	/*single_pages[Hash(pt)%MAXLISTS].pages->next;*/
	ret_val =
	        single_pages[list_id % MAXLISTS].pages;
	single_pages[list_id %
	             MAXLISTS].pages =
	        single_pages[list_id % MAXLISTS].pages->next;
	ret_val->next = next;
	/*single_pages[Hash(pt)%MAXLISTS].page_count -= n;*/
	single_pages[list_id % MAXLISTS].page_count -= n;
	/*release_spinlock( &single_pages[Hash(pt)%MAXLISTS].lock );*/
	release_spinlock(&single_pages[list_id % MAXLISTS].lock);
	/*list_id++;*/

	return ret_val;
}
Beispiel #4
0
void			*malloc_intern(size_t size)
{
  t_list		*list;

  list = g_alloc_list;
  if ((list = best_fit(size)) != NULL)
    return (alloc_at(list, size));
  if ((list = alloc_new(size)) == NULL)
    return (NULL);
  if (list->used == TRUE)
    return (list + 1);
  return (alloc_at(list, size));
}
Beispiel #5
0
void scavenge_single_pages(int n)
{
  /* Add n pages to the single_pages list */
  struct page *scan, *best;
  __rcintptr bestn;

  /* Take any group in unused_pages that is <= n or < K.
     Remember smallest entry > n too. This is sortof equivalent to
     a best fit where we allow partial allocations to make up a whole */
  best = NULL;
  bestn = (__rcintptr)1 << (sizeof(__rcintptr) * CHAR_BIT - 2);
  scan = unused_pages;
  while (scan)
    {
      /* The pages < K can't be used for anything but single pages so we
	 might as well grab them even if they are a little too big */
      if (scan->pagecount <= n || scan->pagecount < K)
	{
	  struct page *adding = scan;

	  scan = scan->next;
	  n -= adding->pagecount;
	  unlink_page(&unused_pages, adding);
	  add_single_pages(adding);
          assert(single_pages->pagecount > 0);
	  if (n <= 0) return;
	}
      else
	{
	  if (scan->pagecount < bestn)
	    {
	      bestn = scan->pagecount;
	      best = scan;
	    }
	  scan = scan->next;
	}
    }
  /* Still not enough. Split the best block if there is one, allocate
     new pages otherwise */
  if (!best) {
    add_single_pages(alloc_new(n, NULL));
  } else if (best->pagecount - n < K) {
    unlink_page(&unused_pages, best);
    add_single_pages(best);
    assert(single_pages->pagecount > 0);
  } else {
    add_single_pages(alloc_split(best, n, NULL));
    assert(single_pages->pagecount > 0);
  }
}
Beispiel #6
0
ilka_off_t ilka_alloc_in(struct ilka_region *r, size_t len, size_t area)
{
    ilka_off_t off = alloc_new(&r->alloc, len, area);

    ilka_assert(off + len <= ilka_len(r), "invalid alloc offset: %p", (void *) off);
    ilka_assert(!off || off >= r->header_len, "invalid alloc offset: %p", (void *) off);

    if (ILKA_MCHECK) {
        mcheck_tag_t tag = mcheck_tag_next();
        mcheck_alloc(&r->mcheck, off, len, tag);
        off = mcheck_tag(off, tag);
    }

    if (ILKA_ALLOC_ZERO && off)
        memset(ilka_write(r, off, len), 0, len);

    if (ILKA_ALLOC_FILL_ON_ALLOC && off)
        memset(ilka_write(r, off, len), 0xFF, len);

    return off;
}
Beispiel #7
0
PRIVATE int buf_put_block (HTStream * me, const char * b, int l)
{
    /*
    **  If we are in pause mode then don't write anything but return PAUSE.
    **  The upper stream should then respect it and don't write any more data.
    */
    if (me->state == HT_BS_PAUSE) return HT_PAUSE;

    /*
    **  Start handling the incoming data. If we are still buffering then add
    **  it to the buffer. Otherwise just pump it through. Note that we still
    **  count the length - even if we have given up buffering!
    */
    me->conlen += l;
    if (me->state != HT_BS_TRANSPARENT) {

	/*
	**  If there is still room in the existing chunk then fill it up.
	**  Otherwise create a new chunk and add it to the linked list of
	**  chunks. If the buffer fills up then either return HT_PAUSE or
	**  flush it and go transparent.
	*/
	if (me->tmp_buf && me->tmp_max-me->tmp_ind >= l) {     /* Still room */
	    memcpy(me->tmp_buf + me->tmp_ind, b, l);
	    me->tmp_ind += l;
	    return HT_OK;
	} else {

	    /*
	    **  Add the temporary buffer (if any) to the list of chunks
	    */
	    if (me->tmp_buf) append_buf(me);

	    /*
	    **  Find the right size of the next chunk. We increase the size
	    **  exponentially until we reach HT_MAX_BLOCK in order to minimize
	    **  the number of mallocs.
	    */
	    if (me->cur_size < HT_MAX_BLOCK) {
		int newsize = me->cur_size ? me->cur_size : HT_MIN_BLOCK;
		while (l > newsize && newsize < HT_MAX_BLOCK) newsize *= 2;
		me->cur_size = newsize;
	    }

	    if (alloc_new(me, me->cur_size)) {
		/* Buffer could accept the new data */
		memcpy(me->tmp_buf, b, l);
		me->tmp_ind = l;
	    } else if (me->mode & HT_BM_DELAY) {
		/* Buffer ran full and we pause */
		me->state = HT_BS_PAUSE;
		HTTRACE(STREAM_TRACE, "Buffer....... Paused\n");
		return HT_PAUSE;
	    } else {
		/* Buffer ran full and we flush and go transparent */
		int status = buf_flush(me);
		if (status != HT_OK) return status;
	    }
	}
    }

    /*
    **  If we couldn't buffer the data then check whether we should give up
    **  or pause the stream. If we are in transparent mode then put the rest
    **  of the data down the pipe.
    */
    if (me->state == HT_BS_TRANSPARENT) return PUTBLOCK(b, l);
    return HT_OK;
}
Beispiel #8
0
int sol_load_vary(struct s_vary *fp, struct s_base *base)
{
    int i;

    memset(fp, 0, sizeof (*fp));

    fp->base = base;

    if (fp->base->pc)
    {
        fp->pv = (struct v_path*)calloc(fp->base->pc, sizeof (*fp->pv));
        fp->pc = fp->base->pc;

        for (i = 0; i < fp->base->pc; i++)
        {
            struct v_path *pp = fp->pv + i;
            struct b_path *pq = fp->base->pv + i;

            pp->base = pq;
            pp->f    = pq->f;
        }
    }

    if (fp->base->bc)
    {
        struct alloc mv;

        fp->bv = (struct v_body*)calloc(fp->base->bc, sizeof (*fp->bv));
        fp->bc = fp->base->bc;

        alloc_new(&mv, sizeof (*fp->mv), (void **) &fp->mv, &fp->mc);

        for (i = 0; i < fp->base->bc; i++)
        {
            struct b_body *bbody = fp->base->bv + i;
            struct v_body *vbody = fp->bv + i;
            struct v_move *vmove;

            vbody->base = bbody;

            vbody->mi = -1;
            vbody->mj = -1;

            if (bbody->pi >= 0 && (vmove = (struct v_move*)alloc_add(&mv)))
            {
                memset(vmove, 0, sizeof (*vmove));

                vbody->mi = fp->mc - 1;
                vmove->pi = bbody->pi;
            }

            if (bbody->pj == bbody->pi)
            {
                vbody->mj = vbody->mi;
            }
            else if (bbody->pj >= 0 && (vmove = (struct v_move*)alloc_add(&mv)))
            {
                memset(vmove, 0, sizeof (*vmove));

                vbody->mj = fp->mc - 1;
                vmove->pi = bbody->pj;
            }
        }
    }

    if (fp->base->hc)
    {
        fp->hv = (struct v_item*)calloc(fp->base->hc, sizeof (*fp->hv));
        fp->hc = fp->base->hc;

        for (i = 0; i < fp->base->hc; i++)
        {
            struct v_item *hp = fp->hv + i;
            struct b_item *hq = fp->base->hv + i;

            v_cpy(hp->p, hq->p);

            hp->t = hq->t;
            hp->n = hq->n;
        }
    }

    if (fp->base->xc)
    {
        fp->xv = (struct v_swch*)calloc(fp->base->xc, sizeof (*fp->xv));
        fp->xc = fp->base->xc;

        for (i = 0; i < fp->base->xc; i++)
        {
            struct v_swch *xp = fp->xv + i;
            struct b_swch *xq = fp->base->xv + i;

            xp->base = xq;
            xp->t    = xq->t;
            xp->tm   = xq->tm;
            xp->f    = xq->f;
        }
    }

    if (fp->base->uc)
    {
        fp->uv = (struct v_ball*)calloc(fp->base->uc, sizeof (*fp->uv));
        fp->uc = fp->base->uc;

        for (i = 0; i < fp->base->uc; i++)
        {
            struct v_ball *up = fp->uv + i;
            struct b_ball *uq = fp->base->uv + i;

            v_cpy(up->p, uq->p);

            up->r = uq->r;

            up->E[0][0] = up->e[0][0] = 1.0f;
            up->E[0][1] = up->e[0][1] = 0.0f;
            up->E[0][2] = up->e[0][2] = 0.0f;

            up->E[1][0] = up->e[1][0] = 0.0f;
            up->E[1][1] = up->e[1][1] = 1.0f;
            up->E[1][2] = up->e[1][2] = 0.0f;

            up->E[2][0] = up->e[2][0] = 0.0f;
            up->E[2][1] = up->e[2][1] = 0.0f;
            up->E[2][2] = up->e[2][2] = 1.0f;
        }
    }

    return 1;
}