Пример #1
0
int
cbufp_delete(spdid_t spdid, int cbid)
{
	struct cbufp_comp_info *cci;
	struct cbufp_info *cbi;
	struct cbuf_meta *meta;
	int ret = -EINVAL;

	assert(0);
	CBUFP_TAKE();
	cci = cbufp_comp_info_get(spdid);
	if (!cci) goto done;
	cbi = cmap_lookup(&cbufs, cbid);
	if (!cbi) goto done;
	
	meta = cbi->owner.m;
	if (meta) {
		/* TODO: check if free in all components, unmap, etc... */
		memset(meta, 0, sizeof(struct cbuf_meta));
	}
	cbufp_free_unmap(spdid, cbi);

	ret = 0;
done:
	CBUFP_RELEASE();
	return ret;
}
Пример #2
0
vaddr_t
cbuf_map_at(spdid_t s_spd, unsigned int cbid, spdid_t d_spd, vaddr_t d_addr)
{
	vaddr_t ret = (vaddr_t)NULL;
	struct cbuf_info *cbi;
	int flags;
	
	CBUF_TAKE();
	cbi = cmap_lookup(&cbufs, cbid);
	assert(cbi);
	if (unlikely(!cbi)) goto done;
	assert(cbi->owner.spdid == s_spd);
	/*
	 * the low-order bits of the d_addr are packed with the MAPPING flags (0/1)
	 * and a flag (2) set if valloc should not be used.
	 */
	flags = d_addr & 0x3;
	d_addr &= ~0x3;
	if (!(flags & 2) && valloc_alloc_at(s_spd, d_spd, (void*)d_addr, cbi->size/PAGE_SIZE)) goto done;
	if (cbuf_map(d_spd, d_addr, cbi->mem, cbi->size, flags & (MAPPING_READ|MAPPING_RW))) goto free;
	ret = d_addr;
	/*
	 * do not add d_spd to the meta list because the cbuf is not
	 * accessible directly. The s_spd must maintain the necessary info
	 * about the cbuf and its mapping in d_spd.
	 */
done:
	CBUF_RELEASE();
	return ret;
free:
	if (!(flags & 2)) valloc_free(s_spd, d_spd, (void*)d_addr, cbi->size);
	goto done;
}
/* Set pixel (X,Y) to color C. */
void
bogl_pcfb_pixel (int x, int y, int c)
{
  bogl_drawing = 1;

  assert (x >= 0 && x < bogl_xres);
  assert (y >= 0 && y < bogl_yres);

  put_var (bogl_frame + y * bogl_line_len, x, cmap_lookup(c), bpp);

  bogl_drawing = 0;
}
/* Paints a vertical line from (X,Y1) to (X,Y2) in color C.  The final
   point is not painted. */
void
bogl_pcfb_vline (int x, int y1, int y2, int c)
{
  assert (x >= 0 && x < bogl_xres);
  assert (y1 >= 0 && y1 < bogl_yres);
  assert (y2 >= 0 && y2 <= bogl_yres);
  assert (y2 >= y1);

  bogl_drawing = 1;
  for (; y1 < y2; y1++)
    put_var (bogl_frame + (y1 * bogl_line_len), x, cmap_lookup(c), bpp);
  bogl_drawing = 0;
}
Пример #5
0
int
cbufp_retrieve(spdid_t spdid, int cbid, int len)
{
	struct cbufp_comp_info *cci;
	struct cbufp_info *cbi;
	struct cbuf_meta *meta;
	struct cbufp_maps *map;
	vaddr_t dest;
	void *page;
	int ret = -1;

	CBUFP_TAKE();
	cci = cbufp_comp_info_get(spdid);
	if (!cci) goto done;
	cbi = cmap_lookup(&cbufs, cbid);
	if (!cbi) goto done;
	/* shouldn't cbuf2buf your own buffer! */
	if (cbi->owner.spdid == spdid) goto done;
	meta = cbufp_meta_lookup(cci, cbid);
	if (!meta) goto done;

	map        = malloc(sizeof(struct cbufp_maps));
	if (!map) goto done;
	dest = (vaddr_t)valloc_alloc(cos_spd_id(), spdid, 1);
	if (!dest) goto free;

	map->spdid = spdid;
	map->m     = meta;
	map->addr  = dest;
	INIT_LIST(map, next, prev);
	ADD_LIST(&cbi->owner, map, next, prev);

	page = cbi->mem;
	assert(page);
	if (dest != (mman_alias_page(cos_spd_id(), (vaddr_t)page, spdid, dest))) {
		assert(0);
		valloc_free(cos_spd_id(), spdid, (void *)dest, 1);
	}

	meta->nfo.c.flags |= CBUFM_TOUCHED;
	meta->nfo.c.ptr    = map->addr >> PAGE_ORDER;
	ret                = 0;
done:
	CBUFP_RELEASE();

	return ret;
free:
	free(map);
	goto done;
}
Пример #6
0
void cbuf_debug_cbiddump(unsigned int cbid)
{
	struct cbuf_info *cbi;
	struct cbuf_maps *m;

	printc("mgr dump cbid %u\n", cbid);
	cbi = cmap_lookup(&cbufs, cbid);
	assert(cbi);
	printc("cbid %u cbi: id %d sz %lu mem %p\n", cbid, cbi->cbid, cbi->size, cbi->mem);
	m = &cbi->owner;
	do {
		struct cbuf_meta *meta = m->m;
		printc("map: spd %d addr %lux meta %p\n", m->spdid, m->addr, m->m);
		printc("meta: nfo %lux addr %lux cbid %u\n", meta->nfo, CBUF_PTR(meta), meta->cbid_tag.cbid);
		m = FIRST_LIST(m, next, prev);
	} while(m != &cbi->owner);
}
Пример #7
0
/*
  lookup cmap table
  int id: internally used CMap identifier
  CID (returned value): 16-bit unsgined integer
*/
unsigned short cmap_lookup (unsigned char hi, unsigned char lo, int id)
{
  unsigned short cid = CID_NOTDEF;
  unsigned short offset = 0;

  if (nest > CMAP_MAX_NESTING)
    ERROR("CMap nested too deeply");
  nest++;
  if (id == CMAP_IDENTITY_H || id == CMAP_IDENTITY_V) {
    cid = (hi << 8) + lo;
  } else if (id >= 0 && id < num_cmaps) {
    if (check_range(hi, lo, id) < 0) {
      if (warn_invalid) {
	fprintf(stderr, "\n** Warning: Invalid character 0x%02x%02x **\n",
		hi, lo);
      }
      if (abort_invalid)
	ERROR("cmap_lookup(): invalid character"); /* error */
    } else {
      offset = (cmaps[id].offsets)[hi];
      if (offset == 0xffff) { /* no mapping available */
	cid = CID_NOTDEF;
      } else {
	cid = (cmaps[id].map)[offset+lo];
      }
      if (cid == CID_NOTDEF && cmaps[id].use_cmap >= 0) {
	cid = cmap_lookup(hi, lo, cmaps[id].use_cmap);
      }
    }
  } else {
    fprintf(stderr, "ID: %d/%d\n", id, num_cmaps);
    ERROR("invalid CMap ID");
  }
  nest--;

  if (nest == 0 && cid == CID_NOTDEF) {
    if (warn_missing)
      fprintf(stderr, "\n** Warning: character 0x%02x%02x missing **\n",
	      hi, lo);
    if (abort_missing)
      ERROR("no character available for this code.");
  }


  return cid;
}
/* Paint a horizontal line from (X1,Y) to (X2,Y) in color C, where X2
   >= X1.  The final point is not painted. */
void
bogl_pcfb_hline (int x1, int x2, int y, int c)
{
  assert (x1 >= 0 && x1 < bogl_xres);
  assert (x2 >= 0 && x2 <= bogl_xres);
  assert (x2 >= x1);
  assert (y >= 0 && y < bogl_yres);

  if (x1 == x2)
    return;

  bogl_drawing = 1;
  memset_var ((void*)bogl_frame
	      + (y * bogl_line_len),
	      cmap_lookup(c), x1,
	      x2 - x1, bpp);
  bogl_drawing = 0;
}
/* Clear the region from (X1,Y1) to (X2,Y2) to color C, not including
   the last row or column.  If C == -1 then the region's colors are
   inverted rather than set to a particular color.  */
void
bogl_pcfb_clear (int x1, int y1, int x2, int y2, int c)
{
  unsigned char *dst;

  assert (0 <= x1 && x1 <= x2 && x2 <= bogl_xres);
  assert (0 <= y1 && y1 <= y2 && y2 <= bogl_yres);

  if (x1 == x2)
    return;

  bogl_drawing = 1;
  dst = (char *) bogl_frame + (y1 * bogl_line_len);
  for (; y1 < y2; y1++)
    {
      memset_var (dst, cmap_lookup(c), x1, x2 - x1, bpp);
      dst += bogl_line_len;
    }
  bogl_drawing = 0;
}
Пример #10
0
/* Write PIXMAP at location (XX,YY) */
void
bogl_pcfb_put (int xx, int yy, const struct bogl_pixmap *pixmap,
		const int color_map[16])
{
  char *dst;
  const unsigned char *src;
  int h;
  
  assert (xx + pixmap->width <= bogl_xres);
  assert (yy >= 0 && yy < bogl_yres);
  assert (yy + pixmap->height <= bogl_yres);
  src = pixmap->data;

  bogl_drawing = 1;

  h = pixmap->height;
  dst = (char *) bogl_frame + (yy * bogl_line_len);
  while (h--)
    {
      int w = pixmap->width;
      int offset = xx;
      while (w)
	{
	  int color = *src & 0xf;
	  int count = *src >> 4;
	  src++;
	  w -= count;

	  if (color != pixmap->transparent)
	    memset_var ((char *) dst, cmap_lookup(color_map[color]),
			offset, count, bpp);
	  offset += count;
	}
      dst += bogl_line_len;
    }

  bogl_drawing = 0;
}
Пример #11
0
int
cbuf_unmap_at(spdid_t s_spd, unsigned int cbid, spdid_t d_spd, vaddr_t d_addr)
{
	struct cbuf_info *cbi;
	int ret = 0, err = 0;
	u32_t off;

	assert(d_addr);
	CBUF_TAKE();
	cbi = cmap_lookup(&cbufs, cbid);
	if (unlikely(!cbi)) ERR_THROW(-EINVAL, done);
	if (unlikely(cbi->owner.spdid != s_spd)) ERR_THROW(-EPERM, done);
	assert(cbi->size == round_to_page(cbi->size));
	/* unmap pages in only the d_spd client */
	for (off = 0 ; off < cbi->size ; off += PAGE_SIZE)
		err |= mman_release_page(d_spd, d_addr + off, 0);
	err |= valloc_free(s_spd, d_spd, (void*)d_addr, cbi->size/PAGE_SIZE);
	if (unlikely(err)) ERR_THROW(-EFAULT, done);
	assert(!err);
done:
	CBUF_RELEASE();
	return ret;
}
Пример #12
0
/* Draw mouse pointer POINTER with its hotspot at (X,Y), if VISIBLE !=
   0.  Restores the previously saved background at that point, if
   VISIBLE == 0.  COLORS[] gives the color indices to paint the
   cursor.

   This routine performs full clipping on all sides of the screen. */
void 
bogl_pcfb_pointer (int visible, int x1, int y1,
		    const struct bogl_pointer *pointer,
		    int colors[2])
{
  int y_count;		/* Number of scanlines. */
  int y_ofs;		/* Number of scanlines to skip drawing. */
  int x_ofs;		/* Number of pixels to skip drawing on each line. */

  assert (pointer != NULL);

  x1 -= pointer->hx;
  y1 -= pointer->hy;
  
  if (y1 + 16 > bogl_yres)
    {
      y_count = bogl_yres - y1;
    }
  else
    y_count = 16;

  if (x1 < 0)
    {
      x_ofs = -x1;
      x1 = 0;
    }
  else
    x_ofs = 0;

  if (y1 < 0)
    {
      y_ofs = -y1;
      y1 = 0;
      y_count -= y_ofs;
    }
  else
    y_ofs = 0;

  bogl_drawing = 1;

  /* Save or restore the framebuffer contents. */
  {
    int sx_ofs = x1;
    int rowbytes = 16 * bpp / 8;

    if (sx_ofs + 16 > bogl_xres)
      {
	sx_ofs = bogl_xres - 16;
      }
    /* Avoid mouse droppings on <8-bit displays */
    else if (bpp < 8 && sx_ofs % (8 / bpp))
      rowbytes++;

    if (visible)
      {
	char *dst = save;
	char *src = (char *) bogl_frame
	  + (sx_ofs * bpp / 8)
	  + (y1 * bogl_line_len);
	int y;
	
	for (y = 0; y < y_count; y++)
	  {
	    memcpy (dst, src, rowbytes);
	    dst += rowbytes;
	    src += bogl_line_len;
	  }
      }
    else
      {
	char *dst = (char *) bogl_frame
	  + (sx_ofs * bpp / 8)
	  + (y1 * bogl_line_len);
	char *src = save;
	int y;
	
	for (y = 0; y < y_count; y++)
	  {
	    memcpy (dst, src, rowbytes);
	    dst += bogl_line_len;
	    src += rowbytes;
	  }
      }
  }

  /* Now draw it */
  if (visible)
    {
      const unsigned short *mask_p, *color_p;
      int y;
      int x_count = 16;
      
      if (x1 + 16 > bogl_xres)
	x_count = bogl_xres - x1;

      mask_p = pointer->mask + y_ofs;
      color_p = pointer->color + y_ofs;
      for (y = 0; y < y_count; y++, mask_p++, color_p++)
	{
	  unsigned char *dst;
	  unsigned short bg_bits, fg_bits;
	  int x;
	  
	  dst = (char *) bogl_frame
	    + ((y1 + y) * bogl_line_len);
	  bg_bits = *mask_p ^ *color_p;
	  fg_bits = *mask_p & *color_p;

	  for (x = 0; x < x_count; x++)
	    {
	      if (bg_bits & 0x8000)
		put_var (dst, x + x1, cmap_lookup(colors[0]), bpp);
	      else if (fg_bits & 0x8000)
		put_var (dst, x + x1, cmap_lookup(colors[1]), bpp);
	      else ; /* transparent (we hope) */
	      bg_bits <<= 1;
	      fg_bits <<= 1;
	    }
	}
    }

  bogl_drawing = 0;
}
Пример #13
0
void
bogl_pcfb_text (int xx, int yy, const char *s, int n, int fg, int bg, int ul,
		const struct bogl_font *font)
{
  int h, k;
  wchar_t wc;

  assert (xx >= 0 && xx < bogl_xres);
  assert (yy >= 0 && yy < bogl_yres);

  bogl_drawing = 1;

  h = bogl_font_height (font);
  if (yy + h > bogl_yres)
    h = bogl_yres - yy;

  mbtowc (0, 0, 0);
  for (; (k = mbtowc (&wc, s, n)) > 0; s += k, n -= k)
    {
      char *dst = (char *) bogl_frame + (yy * bogl_line_len);

      u_int32_t *character = NULL;
      int w = bogl_font_glyph (font, wc, &character);

      int x, y, h1 = ul ? h - 1 : h;

      if (character == NULL)
	continue;
 
      if (xx + w > bogl_xres)
	w = bogl_xres - xx;
      
      for (y = 0; y < h1; y++)
	{
	  u_int32_t c = *character++;
	  
	  for (x = 0; x < w; x++)
	    {
	      if (c & 0x80000000)
		put_var (dst, xx+x, cmap_lookup(fg), bpp);
	      else if (bg != -1)
		put_var (dst, xx+x, cmap_lookup(bg), bpp);

	      c <<= 1;
	    }

	  dst += bogl_line_len;
	}

      if (ul)
        for (x = 0; x < w; x++)
          put_var (dst, xx+x, cmap_lookup(fg), bpp);
        

      xx += w;
      if (xx >= bogl_xres)
	break;
    }

  bogl_drawing = 0;
}
Пример #14
0
int
cbufp_create(spdid_t spdid, int size, long cbid)
{
	struct cbufp_comp_info *cci;
	struct cbufp_info *cbi;
	struct cbuf_meta *meta;
	int ret = 0;

	if (unlikely(cbid < 0)) return 0;
	CBUFP_TAKE();
	cci = cbufp_comp_info_get(spdid);
	if (!cci) goto done;

	/* 
	 * Client wants to allocate a new cbuf, but the meta might not
	 * be mapped in.
	 */
	if (!cbid) {
		cbi = malloc(sizeof(struct cbufp_info));
		if (!cbi) goto done;

		/* Allocate and map in the cbuf. */
		cbid = cmap_add(&cbufs, cbi);
		cbi->cbid        = cbid;
		cbi->size        = size;
		cbi->owner.m     = NULL;
		cbi->owner.spdid = spdid;
		INIT_LIST(&cbi->owner, next, prev);
		INIT_LIST(cbi, next, prev);
		if (cbufp_alloc_map(spdid, &(cbi->owner.addr), 
				    (void**)&(cbi->mem), size)) goto free;
		if (cci->cbufs.c) ADD_LIST(cci->cbufs.c, cbi, next, prev);
		else              cci->cbufs.c = cbi;
	} 
	/* If the client has a cbid, then make sure we agree! */
	else {
		cbi = cmap_lookup(&cbufs, cbid);
		if (!cbi) goto done;
		if (cbi->owner.spdid != spdid) goto done;
	}
	meta = cbufp_meta_lookup(cci, cbid);
	/* We need to map in the meta for this cbid.  Tell the client. */
	if (!meta) {
		ret = cbid * -1;
		goto done;
	}
	cbi->owner.m = meta;

	/* 
	 * Now we know we have a cbid, a backing structure for it, a
	 * component structure, and the meta mapped in for the cbuf.
	 * Update the meta with the correct addresses and flags!
	 */
	memset(meta, 0, sizeof(struct cbuf_meta));
	meta->nfo.c.flags |= CBUFM_IN_USE | CBUFM_TOUCHED | 
		             CBUFM_OWNER  | CBUFM_WRITABLE;
	meta->nfo.c.ptr    = cbi->owner.addr >> PAGE_ORDER;
	ret = cbid;
done:
	CBUFP_RELEASE();

	return ret;
free:
	cmap_del(&cbufs, cbid);
	free(cbi);
	goto done;
}
Пример #15
0
/*
 * For a certain principal, collect any unreferenced and not_in 
 * free list cbufs so that they can be reused.  This is the 
 * garbage-collection mechanism.
 *
 * Collect cbufs and add them onto the shared component's ring buffer.
 *
 * This function is semantically complicated. It can return no cbufs 
 * even if they are available to force the pool of cbufs to be
 * expanded (the client will call cbuf_create in this case). 
 * Or, the common case: it can return a number of available cbufs.
 */
int
cbuf_collect(spdid_t spdid, unsigned long size)
{
	struct cbuf_info *cbi;
	struct cbuf_comp_info *cci;
	struct cbuf_shared_page *csp;
	struct cbuf_bin *bin;
	int ret = 0;

	printl("cbuf_collect\n");

	CBUF_TAKE();
	cci  = cbuf_comp_info_get(spdid);
	tracking_start(&cci->track, CBUF_COLLECT);
	if (unlikely(!cci)) ERR_THROW(-ENOMEM, done);
	if (size + cci->allocated_size <= cci->target_size) goto done;

	csp  = cci->csp;
	if (unlikely(!csp)) ERR_THROW(-EINVAL, done);

	assert(csp->ring.size == CSP_BUFFER_SIZE);
	ret = CK_RING_SIZE(cbuf_ring, &csp->ring);
	if (ret != 0) goto done;
	/* 
	 * Go through all cbufs we own, and report all of them that
	 * have no current references to them.  Unfortunately, this is
	 * O(N*M), N = min(num cbufs, PAGE_SIZE/sizeof(int)), and M =
	 * num components.
	 */
	size = round_up_to_page(size);
	bin  = cbuf_comp_info_bin_get(cci, size);
	if (!bin) ERR_THROW(0, done);
	cbi  = bin->c;
	do {
		if (!cbi) break;
		/*
		 * skip cbufs which are in freelist. Coordinates with cbuf_free to 
		 * detect such cbufs correctly. 
		 * We must check refcnt first and then next pointer.
		 *
		 * If do not check refcnt: the manager may check "next" before cbuf_free 
		 * (when it is NULL), then switch to client who calls cbuf_free to set 
		 * "next", decrease refcnt and add cbuf to freelist. Then switch back to 
		 * manager, but now it will collect this in-freelist cbuf.
		 * 
		 * Furthermore we must check refcnt before the "next" pointer: 
		 * If not, similar to above case, the manager maybe preempted by client 
		 * between the manager checks "next" and refcnt. Therefore the manager 
		 * finds the "next" is null and refcnt is 0, and collect this cbuf.
		 * Short-circuit can prevent reordering. 
		 */
		assert(cbi->owner.m);
		if (!CBUF_REFCNT(cbi->owner.m) && !CBUF_IS_IN_FREELIST(cbi->owner.m)
                 		    && !cbuf_referenced(cbi)) {
			struct cbuf_ring_element el = { .cbid = cbi->cbid };
			cbuf_references_clear(cbi);
			if (!CK_RING_ENQUEUE_SPSC(cbuf_ring, &csp->ring, &el)) break;
			/*
			 * Prevent other collection collecting those cbufs.
			 * The manager checks if the shared ring buffer is empty upon 
			 * the entry, if not, it just returns. This is not enough to 
			 * prevent double-collection. The corner case is: 
			 * after the last one in ring buffer is dequeued and 
			 * before it is added to the free-list, the manager  
			 * appears. It may collect the last one again.
			 */
			cbi->owner.m->next = (struct cbuf_meta *)1;
			if (++ret == CSP_BUFFER_SIZE) break;
		}
		cbi = FIRST_LIST(cbi, next, prev);
	} while (cbi != bin->c);
	if (ret) cbuf_thd_wake_up(cci, ret*size);

done:
	tracking_end(&cci->track, CBUF_COLLECT);
	CBUF_RELEASE();
	return ret;
}

/* 
 * Called by cbuf_deref.
 */
int
cbuf_delete(spdid_t spdid, unsigned int cbid)
{
	struct cbuf_comp_info *cci;
	struct cbuf_info *cbi;
	struct cbuf_meta *meta;
	int ret = -EINVAL, sz;

	printl("cbuf_delete\n");
	CBUF_TAKE();
	tracking_start(NULL, CBUF_DEL);

	cci  = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;
	cbi  = cmap_lookup(&cbufs, cbid);
	if (unlikely(!cbi)) goto done;
	meta = cbuf_meta_lookup(cci, cbid);

	/*
	 * Other threads can access the meta data simultaneously. For
	 * example, others call cbuf2buf which increase the refcnt.
	 */
	CBUF_REFCNT_ATOMIC_DEC(meta);
	/* Find the owner of this cbuf */
	if (cbi->owner.spdid != spdid) {
		cci = cbuf_comp_info_get(cbi->owner.spdid);
		if (unlikely(!cci)) goto done;
	}
	if (cbuf_free_unmap(cci, cbi)) 	goto done;
	if (cci->allocated_size < cci->target_size) {
		cbuf_thd_wake_up(cci, cci->target_size - cci->allocated_size);
	}
	ret = 0;
done:
	tracking_end(NULL, CBUF_DEL);
	CBUF_RELEASE();
	return ret;
}

/* 
 * Called by cbuf2buf to retrieve a given cbid.
 */
int
cbuf_retrieve(spdid_t spdid, unsigned int cbid, unsigned long size)
{
	struct cbuf_comp_info *cci, *own;
	struct cbuf_info *cbi;
	struct cbuf_meta *meta, *own_meta;
	struct cbuf_maps *map;
	vaddr_t dest;
	void *page;
	int ret = -EINVAL, off;

	printl("cbuf_retrieve\n");

	CBUF_TAKE();
	tracking_start(NULL, CBUF_RETRV);

	cci        = cbuf_comp_info_get(spdid);
	if (!cci) {printd("no cci\n"); goto done; }
	cbi        = cmap_lookup(&cbufs, cbid);
	if (!cbi) {printd("no cbi\n"); goto done; }
	/* shouldn't cbuf2buf your own buffer! */
	if (cbi->owner.spdid == spdid) {
		printd("owner\n"); 
		goto done;
	}
	meta       = cbuf_meta_lookup(cci, cbid);
	if (!meta) {printd("no meta\n"); goto done; }
	assert(!(meta->nfo & ~CBUF_INCONSISENT));

	map        = malloc(sizeof(struct cbuf_maps));
	if (!map) {printd("no map\n"); ERR_THROW(-ENOMEM, done); }
	if (size > cbi->size) {printd("too big\n"); goto done; }
	assert(round_to_page(cbi->size) == cbi->size);
	size       = cbi->size;
	/* TODO: change to MAPPING_READ */
	if (cbuf_alloc_map(spdid, &map->addr, NULL, cbi->mem, size, MAPPING_RW)) {
		printc("cbuf mgr map fail spd %d mem %p sz %lu cbid %u\n", spdid, cbi->mem, size, cbid);
		goto free;
	}

	INIT_LIST(map, next, prev);
	ADD_LIST(&cbi->owner, map, next, prev);
	CBUF_PTR_SET(meta, map->addr);
	map->spdid          = spdid;
	map->m              = meta;
	meta->sz            = cbi->size >> PAGE_ORDER;
	meta->cbid_tag.cbid = cbid;
	own                 = cbuf_comp_info_get(cbi->owner.spdid);
	if (unlikely(!own)) goto done;
	/*
	 * We need to inherit the relinquish bit from the sender. 
	 * Otherwise, this cbuf cannot be returned to the manager. 
	 */
	own_meta            = cbuf_meta_lookup(own, cbid);
	if (CBUF_RELINQ(own_meta)) CBUF_FLAG_ADD(meta, CBUF_RELINQ);
	ret                 = 0;
done:
	tracking_end(NULL, CBUF_RETRV);

	CBUF_RELEASE();
	return ret;
free:
	free(map);
	goto done;
}

vaddr_t
cbuf_register(spdid_t spdid, unsigned int cbid)
{
	struct cbuf_comp_info  *cci;
	struct cbuf_meta_range *cmr;
	void *p;
	vaddr_t dest, ret = 0;

	printl("cbuf_register\n");
	CBUF_TAKE();
	tracking_start(NULL, CBUF_REG);

	cci = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;
	cmr = cbuf_meta_lookup_cmr(cci, cbid);
	if (cmr) ERR_THROW(cmr->dest, done);

	/* Create the mapping into the client */
	if (cbuf_alloc_map(spdid, &dest, &p, NULL, PAGE_SIZE, MAPPING_RW)) goto done;
	assert((unsigned int)p == round_to_page(p));
	cmr = cbuf_meta_add(cci, cbid, p, dest);
	assert(cmr);
	ret = cmr->dest;
done:
	tracking_end(NULL, CBUF_REG);

	CBUF_RELEASE();
	return ret;
}

static void
cbuf_shrink(struct cbuf_comp_info *cci, int diff)
{
	int i, sz;
	struct cbuf_bin *bin;
	struct cbuf_info *cbi, *next, *head;

	for (i = cci->nbin-1 ; i >= 0 ; i--) {
		bin = &cci->cbufs[i];
		sz = (int)bin->size;
		if (!bin->c) continue;
		cbi = FIRST_LIST(bin->c, next, prev);
		while (cbi != bin->c) {
			next = FIRST_LIST(cbi, next, prev);
			if (!cbuf_free_unmap(cci, cbi)) {
				diff -= sz;
				if (diff <= 0) return;
			}
			cbi = next;
		}
		if (!cbuf_free_unmap(cci, cbi)) {
			diff -= sz;
			if (diff <= 0) return;
		}
	}
	if (diff > 0) cbuf_mark_relinquish_all(cci);
}
Пример #16
0
int
cbuf_create(spdid_t spdid, unsigned long size, int cbid)
{
	struct cbuf_comp_info *cci;
	struct cbuf_info *cbi;
	struct cbuf_meta *meta;
	struct cbuf_bin *bin;
	int ret = 0;
	unsigned int id = (unsigned int)cbid;

	printl("cbuf_create\n");
	if (unlikely(cbid < 0)) return 0;
	CBUF_TAKE();
	tracking_start(NULL, CBUF_CRT);

	cci = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;

	/* 
	 * Client wants to allocate a new cbuf, but the meta might not
	 * be mapped in.
	 */
	if (!cbid) {
		/* TODO: check if have enough free memory: ask mem manager */
		/*memory usage exceeds the target, block this thread*/
		if (size + cci->allocated_size > cci->target_size) {
			cbuf_shrink(cci, size);
			if (size + cci->allocated_size > cci->target_size) {
				cbuf_thread_block(cci, size);
				return 0;
			}
		}

 		cbi = malloc(sizeof(struct cbuf_info));

		if (unlikely(!cbi)) goto done;
		/* Allocate and map in the cbuf. Discard inconsistent cbufs */
		/* TODO: Find a better way to manage those inconsistent cbufs */
		do {
			id   = cmap_add(&cbufs, cbi);
			meta = cbuf_meta_lookup(cci, id);
		} while(meta && CBUF_INCONSISENT(meta));

		cbi->cbid        = id;
		size             = round_up_to_page(size);
		cbi->size        = size;
		cbi->owner.m     = NULL;
		cbi->owner.spdid = spdid;
		INIT_LIST(&cbi->owner, next, prev);
		INIT_LIST(cbi, next, prev);
		if (cbuf_alloc_map(spdid, &(cbi->owner.addr), 
				   (void**)&(cbi->mem), NULL, size, MAPPING_RW)) {
			goto free;
		}
	} 
	/* If the client has a cbid, then make sure we agree! */
	else {
		cbi = cmap_lookup(&cbufs, id);
		if (unlikely(!cbi)) goto done;
		if (unlikely(cbi->owner.spdid != spdid)) goto done;
	}
	meta = cbuf_meta_lookup(cci, id);

	/* We need to map in the meta for this cbid.  Tell the client. */
	if (!meta) {
		ret = (int)id * -1;
		goto done;
	}
	
	/* 
	 * Now we know we have a cbid, a backing structure for it, a
	 * component structure, and the meta mapped in for the cbuf.
	 * Update the meta with the correct addresses and flags!
	 */
	memset(meta, 0, sizeof(struct cbuf_meta));
	meta->sz            = cbi->size >> PAGE_ORDER;
	meta->cbid_tag.cbid = id;
	CBUF_FLAG_ADD(meta, CBUF_OWNER);
	CBUF_PTR_SET(meta, cbi->owner.addr);
	CBUF_REFCNT_INC(meta);

	/*
	 * When creates a new cbuf, the manager should be the only
	 * one who can access the meta
	 */
	/* TODO: malicious client may trigger this assertion, just for debug */
	assert(CBUF_REFCNT(meta) == 1);
	assert(CBUF_PTR(meta));
	cbi->owner.m = meta;

	/*
	 * Install cbi last. If not, after return a negative cbid, 
	 * collection may happen and get a dangle cbi
	 */
	bin = cbuf_comp_info_bin_get(cci, size);
	if (!bin) bin = cbuf_comp_info_bin_add(cci, size);
	if (unlikely(!bin)) goto free;
	if (bin->c) ADD_LIST(bin->c, cbi, next, prev);
	else        bin->c   = cbi;
	cci->allocated_size += size;
	ret = (int)id;
done:
	tracking_end(NULL, CBUF_CRT);
	CBUF_RELEASE();

	return ret;
free:
	cmap_del(&cbufs, id);
	free(cbi);
	goto done;
}