Пример #1
0
static void* fr_calloc ( ThreadId tid, SizeT m, SizeT szB )
{
   return new_block( tid, m*szB, VG_(clo_alignment), /*is_zeroed*/True );
}
Пример #2
0
static void *fr_memalign ( ThreadId tid, SizeT alignB, SizeT szB )
{
   return new_block( tid, szB, alignB, False );
}
Пример #3
0
static void* fr_malloc ( ThreadId tid, SizeT szB )
{
   return new_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
}
Пример #4
0
static void* fr___builtin_vec_new ( ThreadId tid, SizeT szB )
{
   return new_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
}
Пример #5
0
static inline MSymbol* new_symbol()
  { return (MSymbol*) new_block(sizeof(MSymbol)); }
Пример #6
0
static inline char* new_string(const char* str)
  { char* p = (char*) new_block((strlen(str)+1 +3) & ~3);
    return strcpy(p, str); }
Пример #7
0
/*
 * Splits a node by creating a sibling node and shifting half the nodes
 * contents across.  Assumes there is a parent node, and it has room for
 * another child.
 *
 * Before:
 *	  +--------+
 *	  | Parent |
 *	  +--------+
 *	     |
 *	     v
 *	+----------+
 *	| A ++++++ |
 *	+----------+
 *
 *
 * After:
 *		+--------+
 *		| Parent |
 *		+--------+
 *		  |	|
 *		  v	+------+
 *	    +---------+	       |
 *	    | A* +++  |	       v
 *	    +---------+	  +-------+
 *			  | B +++ |
 *			  +-------+
 *
 * Where A* is a shadow of A.
 */
static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
			       unsigned parent_index, uint64_t key)
{
	int r;
	size_t size;
	unsigned nr_left, nr_right;
	struct dm_block *left, *right, *parent;
	struct btree_node *ln, *rn, *pn;
	__le64 location;

	left = shadow_current(s);

	r = new_block(s->info, &right);
	if (r < 0)
		return r;

	ln = dm_block_data(left);
	rn = dm_block_data(right);

	nr_left = le32_to_cpu(ln->header.nr_entries) / 2;
	nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left;

	ln->header.nr_entries = cpu_to_le32(nr_left);

	rn->header.flags = ln->header.flags;
	rn->header.nr_entries = cpu_to_le32(nr_right);
	rn->header.max_entries = ln->header.max_entries;
	rn->header.value_size = ln->header.value_size;
	memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0]));

	size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ?
		sizeof(uint64_t) : s->info->value_type.size;
	memcpy(value_ptr(rn, 0), value_ptr(ln, nr_left),
	       size * nr_right);

	/*
	 * Patch up the parent
	 */
	parent = shadow_parent(s);

	pn = dm_block_data(parent);
	location = cpu_to_le64(dm_block_location(left));
	__dm_bless_for_disk(&location);
	memcpy_disk(value_ptr(pn, parent_index),
		    &location, sizeof(__le64));

	location = cpu_to_le64(dm_block_location(right));
	__dm_bless_for_disk(&location);

	r = insert_at(sizeof(__le64), pn, parent_index + 1,
		      le64_to_cpu(rn->keys[0]), &location);
	if (r)
		return r;

	if (key < le64_to_cpu(rn->keys[0])) {
		unlock_block(s->info, right);
		s->nodes[1] = left;
	} else {
		unlock_block(s->info, left);
		s->nodes[1] = right;
	}

	return 0;
}
Пример #8
0
/*
 * Splits a node by creating two new children beneath the given node.
 *
 * Before:
 *	  +----------+
 *	  | A ++++++ |
 *	  +----------+
 *
 *
 * After:
 *	+------------+
 *	| A (shadow) |
 *	+------------+
 *	    |	|
 *   +------+	+----+
 *   |		     |
 *   v		     v
 * +-------+	 +-------+
 * | B +++ |	 | C +++ |
 * +-------+	 +-------+
 */
static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
{
	int r;
	size_t size;
	unsigned nr_left, nr_right;
	struct dm_block *left, *right, *new_parent;
	struct btree_node *pn, *ln, *rn;
	__le64 val;

	new_parent = shadow_current(s);

	r = new_block(s->info, &left);
	if (r < 0)
		return r;

	r = new_block(s->info, &right);
	if (r < 0) {
		/* FIXME: put left */
		return r;
	}

	pn = dm_block_data(new_parent);
	ln = dm_block_data(left);
	rn = dm_block_data(right);

	nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
	nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;

	ln->header.flags = pn->header.flags;
	ln->header.nr_entries = cpu_to_le32(nr_left);
	ln->header.max_entries = pn->header.max_entries;
	ln->header.value_size = pn->header.value_size;

	rn->header.flags = pn->header.flags;
	rn->header.nr_entries = cpu_to_le32(nr_right);
	rn->header.max_entries = pn->header.max_entries;
	rn->header.value_size = pn->header.value_size;

	memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
	memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));

	size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
		sizeof(__le64) : s->info->value_type.size;
	memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
	memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
	       nr_right * size);

	/* new_parent should just point to l and r now */
	pn->header.flags = cpu_to_le32(INTERNAL_NODE);
	pn->header.nr_entries = cpu_to_le32(2);
	pn->header.max_entries = cpu_to_le32(
		calc_max_entries(sizeof(__le64),
				 dm_bm_block_size(
					 dm_tm_get_bm(s->info->tm))));
	pn->header.value_size = cpu_to_le32(sizeof(__le64));

	val = cpu_to_le64(dm_block_location(left));
	__dm_bless_for_disk(&val);
	pn->keys[0] = ln->keys[0];
	memcpy_disk(value_ptr(pn, 0), &val, sizeof(__le64));

	val = cpu_to_le64(dm_block_location(right));
	__dm_bless_for_disk(&val);
	pn->keys[1] = rn->keys[0];
	memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));

	/*
	 * rejig the spine.  This is ugly, since it knows too
	 * much about the spine
	 */
	if (s->nodes[0] != new_parent) {
		unlock_block(s->info, s->nodes[0]);
		s->nodes[0] = new_parent;
	}
	if (key < le64_to_cpu(rn->keys[0])) {
		unlock_block(s->info, right);
		s->nodes[1] = left;
	} else {
		unlock_block(s->info, left);
		s->nodes[1] = right;
	}
	s->count = 2;

	return 0;
}
Пример #9
0
/*
 * Set the block size of the device.
 * If the volume block size is zero, we set the max block size to what is
 * configured in the device resource i.e. dev->device->max_block_size.
 *
 * If dev->device->max_block_size is zero, do nothing and leave dev->max_block_size as it is.
 */
void DEVICE::set_blocksizes(DCR *dcr) {

   DEVICE* dev = this;
   JCR* jcr = dcr->jcr;
   uint32_t max_bs;

   Dmsg4(100, "Device %s has dev->device->max_block_size of %u and dev->max_block_size of %u, dcr->VolMaxBlocksize is %u\n",
         dev->print_name(), dev->device->max_block_size, dev->max_block_size, dcr->VolMaxBlocksize);

   if (dcr->VolMaxBlocksize == 0 && dev->device->max_block_size != 0) {
      Dmsg2(100, "setting dev->max_block_size to dev->device->max_block_size=%u "
                 "on device %s because dcr->VolMaxBlocksize is 0\n",
            dev->device->max_block_size, dev->print_name());
      dev->min_block_size = dev->device->min_block_size;
      dev->max_block_size = dev->device->max_block_size;
   } else if (dcr->VolMaxBlocksize != 0) {
       dev->min_block_size = dcr->VolMinBlocksize;
       dev->max_block_size = dcr->VolMaxBlocksize;
   }

   /*
    * Sanity check
    */
   if (dev->max_block_size == 0) {
      max_bs = DEFAULT_BLOCK_SIZE;
   } else {
      max_bs = dev->max_block_size;
   }

   if (dev->min_block_size > max_bs) {
      Jmsg(jcr, M_ERROR_TERM, 0, _("Min block size > max on device %s\n"), dev->print_name());
   }

   if (dev->max_block_size > MAX_BLOCK_LENGTH) {
      Jmsg3(jcr, M_ERROR, 0, _("Block size %u on device %s is too large, using default %u\n"),
            dev->max_block_size, dev->print_name(), DEFAULT_BLOCK_SIZE);
      dev->max_block_size = 0;
   }

   if (dev->max_block_size % TAPE_BSIZE != 0) {
      Jmsg3(jcr, M_WARNING, 0, _("Max block size %u not multiple of device %s block size=%d.\n"),
            dev->max_block_size, dev->print_name(), TAPE_BSIZE);
   }

   if (dev->max_volume_size != 0 && dev->max_volume_size < (dev->max_block_size << 4)) {
      Jmsg(jcr, M_ERROR_TERM, 0, _("Max Vol Size < 8 * Max Block Size for device %s\n"), dev->print_name());
   }

   Dmsg3(100, "set minblocksize to %d, maxblocksize to %d on device %s\n",
         dev->min_block_size, dev->max_block_size, dev->print_name());

   /*
    * If blocklen is not dev->max_block_size create a new block with the right size.
    * (as header is always dev->label_block_size which is preset with DEFAULT_BLOCK_SIZE)
    */
   if (dcr->block) {
     if (dcr->block->buf_len != dev->max_block_size) {
         Dmsg2(100, "created new block of buf_len: %u on device %s\n",
               dev->max_block_size, dev->print_name());
         free_block(dcr->block);
         dcr->block = new_block(dev);
         Dmsg2(100, "created new block of buf_len: %u on device %s, freeing block\n",
               dcr->block->buf_len, dev->print_name());
      }
   }
}
Пример #10
0
void init_queue() {
  block_t *block = new_block();
  globalTailBlock = block;
  globalHeadBlock = block;
}
Пример #11
0
/** Wrapper for __builtin_new(). */
static void* drd___builtin_new(ThreadId tid, SizeT n)
{
   return new_block(tid, n, VG_(clo_alignment), /*is_zeroed*/False);
}
Пример #12
0
/** Wrapper for calloc(). */
static void* drd_calloc(ThreadId tid, SizeT nmemb, SizeT size1)
{
   return new_block(tid, nmemb*size1, VG_(clo_alignment),
                          /*is_zeroed*/True);
}
Пример #13
0
/** Wrapper for memalign(). */
static void* drd_memalign(ThreadId tid, SizeT align, SizeT n)
{
   return new_block(tid, n, align, /*is_zeroed*/False);
}
Пример #14
0
EP_API_EXPORT(void *) ep_palloc(struct tMemPool *a, int reqsize)
{
#ifdef ALLOC_USE_MALLOC
    int size = reqsize + CLICK_SZ;
    void *ptr;

    ep_block_alarms();
    ptr = malloc(size);
    if (ptr == NULL) {
	fputs("Ouch!  Out of memory!\n", stderr);
	exit(1);
    }
    debug_fill(ptr, size); /* might as well get uninitialized protection */
    *(void **)ptr = a->allocation_list;
    a->allocation_list = ptr;
    ep_unblock_alarms();
    return (char *)ptr + CLICK_SZ;
#else

    /* Round up requested size to an even number of alignment units (core clicks)
     */

    int nclicks = 1 + ((reqsize - 1) / CLICK_SZ);
    int size = nclicks * CLICK_SZ;

    /* First, see if we have space in the block most recently
     * allocated to this pool
     */

    union block_hdr *blok = a->last;
    char *first_avail = blok->h.first_avail;
    char *new_first_avail;

    if (reqsize <= 0)
	return NULL;

    new_first_avail = first_avail + size;

    if (new_first_avail <= blok->h.endp) {
	debug_verify_filled(first_avail, blok->h.endp,
	    "Ouch!  Someone trounced past the end of their allocation!\n");
	blok->h.first_avail = new_first_avail;
	return (void *) first_avail;
    }

    /* Nope --- get a new one that's guaranteed to be big enough */

    ep_block_alarms();

    ep_acquire_mutex(alloc_mutex);

    blok = new_block(size);
    a->last->h.next = blok;
    a->last = blok;
#ifdef POOL_DEBUG
    blok->h.owning_pool = a;
#endif

    ep_release_mutex(alloc_mutex);

    ep_unblock_alarms();

    first_avail = blok->h.first_avail;
    blok->h.first_avail += size;

    return (void *) first_avail;
#endif
}
Пример #15
0
void
cfg_t::create(void *parent_mem_ctx, exec_list *instructions)
{
   mem_ctx = ralloc_context(NULL);
   block_list.make_empty();
   blocks = NULL;
   num_blocks = 0;
   ip = 0;
   cur = NULL;

   bblock_t *entry = new_block();
   bblock_t *cur_if = NULL, *cur_else = NULL, *cur_endif = NULL;
   bblock_t *cur_do = NULL, *cur_while = NULL;
   exec_list if_stack, else_stack, endif_stack, do_stack, while_stack;
   bblock_t *next;

   set_next_block(entry);

   entry->start = (backend_instruction *) instructions->get_head();

   foreach_list(node, instructions) {
      backend_instruction *inst = (backend_instruction *)node;

      cur->end = inst;

      /* set_next_block wants the post-incremented ip */
      ip++;

      switch (inst->opcode) {
      case BRW_OPCODE_IF:
	 /* Push our information onto a stack so we can recover from
	  * nested ifs.
	  */
	 if_stack.push_tail(cur_if->make_list(mem_ctx));
	 else_stack.push_tail(cur_else->make_list(mem_ctx));
	 endif_stack.push_tail(cur_endif->make_list(mem_ctx));

	 cur_if = cur;
	 cur_else = NULL;
	 /* Set up the block just after the endif.  Don't know when exactly
	  * it will start, yet.
	  */
	 cur_endif = new_block();

	 /* Set up our immediately following block, full of "then"
	  * instructions.
	  */
	 next = new_block();
	 next->start = (backend_instruction *)inst->next;
	 cur_if->add_successor(mem_ctx, next);

	 set_next_block(next);
	 break;

      case BRW_OPCODE_ELSE:
	 cur->add_successor(mem_ctx, cur_endif);

	 next = new_block();
	 next->start = (backend_instruction *)inst->next;
	 cur_if->add_successor(mem_ctx, next);
	 cur_else = next;

	 set_next_block(next);
	 break;

      case BRW_OPCODE_ENDIF:
	 cur_endif->start = (backend_instruction *)inst->next;
	 cur->add_successor(mem_ctx, cur_endif);
	 set_next_block(cur_endif);

	 if (!cur_else)
	    cur_if->add_successor(mem_ctx, cur_endif);

	 /* Pop the stack so we're in the previous if/else/endif */
	 cur_if = pop_stack(&if_stack);
	 cur_else = pop_stack(&else_stack);
	 cur_endif = pop_stack(&endif_stack);
	 break;

      case BRW_OPCODE_DO:
	 /* Push our information onto a stack so we can recover from
	  * nested loops.
	  */
	 do_stack.push_tail(cur_do->make_list(mem_ctx));
	 while_stack.push_tail(cur_while->make_list(mem_ctx));

	 /* Set up the block just after the while.  Don't know when exactly
	  * it will start, yet.
	  */
	 cur_while = new_block();

	 /* Set up our immediately following block, full of "then"
	  * instructions.
	  */
	 next = new_block();
	 next->start = (backend_instruction *)inst->next;
	 cur->add_successor(mem_ctx, next);
	 cur_do = next;

	 set_next_block(next);
	 break;

      case BRW_OPCODE_CONTINUE:
	 cur->add_successor(mem_ctx, cur_do);

	 next = new_block();
	 next->start = (backend_instruction *)inst->next;
	 if (inst->predicate)
	    cur->add_successor(mem_ctx, next);

	 set_next_block(next);
	 break;

      case BRW_OPCODE_BREAK:
	 cur->add_successor(mem_ctx, cur_while);

	 next = new_block();
	 next->start = (backend_instruction *)inst->next;
	 if (inst->predicate)
	    cur->add_successor(mem_ctx, next);

	 set_next_block(next);
	 break;

      case BRW_OPCODE_WHILE:
	 cur_while->start = (backend_instruction *)inst->next;

	 cur->add_successor(mem_ctx, cur_do);
	 set_next_block(cur_while);

	 /* Pop the stack so we're in the previous loop */
	 cur_do = pop_stack(&do_stack);
	 cur_while = pop_stack(&while_stack);
	 break;

      default:
	 break;
      }
   }