Beispiel #1
0
static void
test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits)
{
	bitmap_info_t binfo_dyn;
	bitmap_info_init(&binfo_dyn, nbits);

	assert_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn),
	    "Unexpected difference between static and dynamic initialization, "
	    "nbits=%zu", nbits);
	assert_zu_eq(binfo->nbits, binfo_dyn.nbits,
	    "Unexpected difference between static and dynamic initialization, "
	    "nbits=%zu", nbits);
#ifdef BITMAP_USE_TREE
	assert_u_eq(binfo->nlevels, binfo_dyn.nlevels,
	    "Unexpected difference between static and dynamic initialization, "
	    "nbits=%zu", nbits);
	{
		unsigned i;

		for (i = 0; i < binfo->nlevels; i++) {
			assert_zu_eq(binfo->levels[i].group_offset,
			    binfo_dyn.levels[i].group_offset,
			    "Unexpected difference between static and dynamic "
			    "initialization, nbits=%zu, level=%u", nbits, i);
		}
	}
#else
	assert_zu_eq(binfo->ngroups, binfo_dyn.ngroups,
	    "Unexpected difference between static and dynamic initialization");
#endif
}
Beispiel #2
0
void
allocator_destroy (struct allocator *a)
{
  ASSERT (a != NULL);
  ASSERT (bitmap_none (a->used_map, 0, bitmap_size (a->used_map)));
  size_t pages = (bitmap_size (a->used_map)*a->item_size + PGSIZE-1) / PGSIZE;
  palloc_free_multiple (a->items, pages);
  bitmap_destroy (a->used_map);
}
Beispiel #3
0
static struct idset *idset_new(int num_ssid, int num_id)
{
	struct idset *set;

	set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id));
	if (set) {
		set->num_ssid = num_ssid;
		set->num_id = num_id;
		memset(set->bitmap, 0, bitmap_size(num_ssid, num_id));
	}
	return set;
}
/*
 * Test failure to read bitmap from child log file
 */
void test_cbt_util_coalesce_child_no_bitmap_data_failure(void **state)
{
	int result;
	char* args[] = { "cbt-util", "coalesce", "-p", "test_parent.log", "-c" , "test_child.log"};
	void *parent_meta;
	void *child_meta;
	uint64_t disk_size, file_size;

	disk_size = 2199023255552; //2TB
	file_size = bitmap_size(disk_size) + sizeof(struct cbt_log_metadata);

	parent_meta = malloc(file_size);
	child_meta = malloc(sizeof(struct cbt_log_metadata));
	//Intialise size in metadata file
	((struct cbt_log_metadata*)parent_meta)->size = disk_size; 
	((struct cbt_log_metadata*)child_meta)->size = disk_size;
	FILE *parent_log = fmemopen((void*)parent_meta, file_size, "r");
	FILE *child_log = fmemopen((void*)child_meta,
							sizeof(struct cbt_log_metadata), "r+");

	will_return(__wrap_fopen, parent_log);
	expect_value(__wrap_fclose, fp, parent_log);
	will_return(__wrap_fopen, child_log);
	expect_value(__wrap_fclose, fp, child_log);

	result = cbt_util_coalesce(6, args);
	assert_int_equal(result, -EIO);

	free(parent_meta);
	free(child_meta);
}
Beispiel #5
0
uint RepairPost::size() {
  uint sum = 0;

	sum += sizeof(uint) * ptrs_len;                     // ptrs  (in bits)	                    
  	sum += sizeof(uint)*(m/32*nbits+m%32*nbits/32+1);   // compressed sequence
  	sum += BRR->SpaceRequirementInBits()/8;             // Bitmap-rank BRR
	sum += (symbols_new_len*bits_sn/WW32+1)*sizeof(uint);  // c_symb[]

	//sum += sizeof(uint) * (nodes +1);                   //**  lenPost array.
	sum += sizetobits();


	cerr<< "\n ptrs_size ="<<ptrs_len*4 ;
	cerr<< "\n compressed seq="<<sizeof(uint)*(m/32*nbits+m%32*nbits/32+1);
	cerr<< "\n Bitmap Rb (ya quitadas estructs rank)=" << (BRR->SpaceRequirementInBits()/8) << " bytes";
		cerr<< "\n Rs (dic)=" << (symbols_new_len*bits_sn/WW32+1)*sizeof(uint);
	cerr<< "\n LenPosting[] array (lens of each posting OLD!!) = " << 4 * (nodes +1);
	cerr<< "\n LenPosting[] array (lens of each posting BITS) = " << sizetobits();
	cerr<< "\n VOCAB SIZE = ptrs[] + lenList[] = " << sizetobits() + (sizeof(uint) * ptrs_len) << " bytes\n";

	uint totalBitmaps = bitmap_size();
	
	cerr<< "\n  @size Postings as bitmaps ="<<totalBitmaps << " bytes" ;
	cerr<< "\n  @Number of Bitmaps="<< il->numBitmaps << ", bitmapLenListThreshold="<<il->lenBitmapThreshold;
	cerr<< "\n  @Bytes per bitmap = " <<  il->ints_bitmaps_aligned * sizeof(uint)  << "\n";
	cerr<< "\n  @Rank struct for bitVector+ bitVector " << BR_ptr_bitmap->SpaceRequirementInBits()/8 ; 
	sum +=  BR_ptr_bitmap->SpaceRequirementInBits()/8 ;
	sum+= totalBitmaps;	
	fflush (stderr);
  return sum;
}
Beispiel #6
0
TEST_END

TEST_BEGIN(test_bitmap_unset)
{
	size_t i;

	for (i = 1; i <= BITMAP_MAXBITS; i++) {
		bitmap_info_t binfo;
		bitmap_info_init(&binfo, i);
		{
			size_t j;
			bitmap_t *bitmap = (bitmap_t *)malloc(
			    bitmap_size(&binfo));
			bitmap_init(bitmap, &binfo);

			for (j = 0; j < i; j++)
				bitmap_set(bitmap, &binfo, j);
			assert_true(bitmap_full(bitmap, &binfo),
			    "All bits should be set");
			for (j = 0; j < i; j++)
				bitmap_unset(bitmap, &binfo, j);
			for (j = 0; j < i; j++)
				bitmap_set(bitmap, &binfo, j);
			assert_true(bitmap_full(bitmap, &binfo),
			    "All bits should be set");
			free(bitmap);
		}
	}
}
/*
 * Test failure to allocate bitmap buffer for child
 */
void test_cbt_util_coalesce_child_bitmap_malloc_failure(void **state)
{
	int result;
	uint64_t disk_size, file_size;
	char* args[] = { "cbt-util", "coalesce", "-p", "test_parent.log", "-c" , "test_child.log"};
	void *log_meta;

  	disk_size = 2199023255552; //2TB
	file_size = bitmap_size(disk_size) + sizeof(struct cbt_log_metadata);
	log_meta = malloc(file_size);
	((struct cbt_log_metadata*)log_meta)->size = disk_size;
	FILE *parent_log = fmemopen((void*)log_meta, file_size, "r");
	FILE *child_log = fmemopen((void*)log_meta, file_size, "r+");

	will_return(__wrap_fopen, parent_log);
	expect_value(__wrap_fclose, fp, parent_log);
	will_return(__wrap_fopen, child_log);
	expect_value(__wrap_fclose, fp, child_log);

	malloc_succeeds(true);
	malloc_succeeds(true);
	malloc_succeeds(true);
	malloc_succeeds(false);

	result = cbt_util_coalesce(6, args);
	assert_int_equal(result, -ENOMEM);

	disable_malloc_mock();
	free(log_meta);
}
Beispiel #8
0
/** Install I/O Permission bitmap.
 *
 * Current task's I/O permission bitmap, if any, is installed
 * in the current CPU's TSS.
 *
 * Interrupts must be disabled prior this call.
 *
 */
void io_perm_bitmap_install(void)
{
    /* First, copy the I/O Permission Bitmap. */
    irq_spinlock_lock(&TASK->lock, false);

    size_t ver = TASK->arch.iomapver;
    size_t elements = TASK->arch.iomap.elements;

    if (elements > 0) {
        ASSERT(TASK->arch.iomap.bits);

        bitmap_t iomap;
        bitmap_initialize(&iomap, TSS_IOMAP_SIZE * 8,
                          CPU->arch.tss->iomap);
        bitmap_copy(&iomap, &TASK->arch.iomap, elements);

        /*
         * Set the trailing bits in the last byte of the map to disable
         * I/O access.
         */
        bitmap_set_range(&iomap, elements,
                         ALIGN_UP(elements, 8) - elements);

        /*
         * It is safe to set the trailing eight bits because of the
         * extra convenience byte in TSS_IOMAP_SIZE.
         */
        bitmap_set_range(&iomap, ALIGN_UP(elements, 8), 8);
    }

    irq_spinlock_unlock(&TASK->lock, false);

    /*
     * Second, adjust TSS segment limit.
     * Take the extra ending byte with all bits set into account.
     */
    ptr_16_64_t cpugdtr;
    gdtr_store(&cpugdtr);

    descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base;
    size_t size = bitmap_size(elements);
    gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + size);
    gdtr_load(&cpugdtr);

    /*
     * Before we load new TSS limit, the current TSS descriptor
     * type must be changed to describe inactive TSS.
     */
    tss_descriptor_t *tss_desc = (tss_descriptor_t *) &gdt_p[TSS_DES];
    tss_desc->type = AR_TSS;
    tr_load(GDT_SELECTOR(TSS_DES));

    /*
     * Update the generation count so that faults caused by
     * early accesses can be serviced.
     */
    CPU->arch.iomapver_copy = ver;
}
Beispiel #9
0
TEST_END

TEST_BEGIN(test_bitmap_sfu)
{
	size_t i;

	for (i = 1; i <= BITMAP_MAXBITS; i++) {
		bitmap_info_t binfo;
		bitmap_info_init(&binfo, i);
		{
			size_t j;
			bitmap_t *bitmap = (bitmap_t *)malloc(
			    bitmap_size(&binfo));
			bitmap_init(bitmap, &binfo);

			/* Iteratively set bits starting at the beginning. */
			for (j = 0; j < i; j++) {
				assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
				    "First unset bit should be just after "
				    "previous first unset bit");
			}
			assert_true(bitmap_full(bitmap, &binfo),
			    "All bits should be set");

			/*
			 * Iteratively unset bits starting at the end, and
			 * verify that bitmap_sfu() reaches the unset bits.
			 */
			for (j = i - 1; j < i; j--) { /* (i..0] */
				bitmap_unset(bitmap, &binfo, j);
				assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
				    "First unset bit should the bit previously "
				    "unset");
				bitmap_unset(bitmap, &binfo, j);
			}
			assert_false(bitmap_get(bitmap, &binfo, 0),
			    "Bit should be unset");

			/*
			 * Iteratively set bits starting at the beginning, and
			 * verify that bitmap_sfu() looks past them.
			 */
			for (j = 1; j < i; j++) {
				bitmap_set(bitmap, &binfo, j - 1);
				assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
				    "First unset bit should be just after the "
				    "bit previously set");
				bitmap_unset(bitmap, &binfo, j);
			}
			assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1,
			    "First unset bit should be the last bit");
			assert_true(bitmap_full(bitmap, &binfo),
			    "All bits should be set");
			free(bitmap);
		}
	}
}
Beispiel #10
0
/* Returns true if PAGE was allocated from POOL,
   false otherwise. */
static bool
page_from_pool (const struct pool *pool, void *page) 
{
  size_t page_no = pg_no (page);
  size_t start_page = pg_no (pool->base);
  size_t end_page = start_page + bitmap_size (pool->used_map);

  return page_no >= start_page && page_no < end_page;
}
/* Frees the swap slot associated with the given swapid. */
void
swap_free (swapid_t id)
{
  ASSERT (swap_block != NULL);
  ASSERT (id < bitmap_size (free_blocks));

  lock_acquire (&swap_alloc_lock);
  bitmap_set (free_blocks, id, true);
  lock_release (&swap_alloc_lock);
}
Beispiel #12
0
/* Free the swap page with index SWAP_FRAME_NO in SWAP_TABLE */
void
swap_free (struct swap_table * swap_table, size_t swap_frame_no)
{
  ASSERT (swap_table->swap_block != NULL);
  ASSERT (swap_frame_no < bitmap_size (swap_table->bitmap));
  
  lock_acquire (&swap_table->lock_bitmap);
  bitmap_set (swap_table->bitmap, swap_frame_no, false);
  lock_release(&swap_table->lock_bitmap);
}
  static size_t allocation_size(jint statics_size, jint vtable_length) {
    size_t size = header_size() + statics_size;

#if USE_EMBEDDED_VTABLE_BITMAP
    size += bitmap_size(vtable_length);
#else
    (void)vtable_length;
#endif

    return align_allocation_size(size);
  }
Beispiel #14
0
void allocator_free (struct allocator *a, void *base, size_t amount)
{
  ASSERT (a != NULL);
  if (base == NULL || amount == 0)
    return;
  ASSERT (base >= a->items);
  ASSERT (base <= item_pos (a, bitmap_size (a->used_map)-1));
  size_t pos = (uintptr_t) (base - a->items) / a->item_size;
  ASSERT (bitmap_all (a->used_map, pos, amount));
  bitmap_set_multiple (a->used_map, pos, amount, false);
}
Beispiel #15
0
TEST_END

static size_t
test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits,
    size_t prev_size)
{
	size_t size = bitmap_size(binfo);
	assert_zu_ge(size, (nbits >> 3),
	    "Bitmap size is smaller than expected");
	assert_zu_ge(size, prev_size, "Bitmap size is smaller than expected");
	return (size);
}
Beispiel #16
0
static void
test_bitmap_size(void)
{
	size_t i, prev_size;

	prev_size = 0;
	for (i = 1; i <= MAXBITS; i++) {
		size_t size = bitmap_size(i);
		assert(size >= prev_size);
		prev_size = size;
	}
}
Beispiel #17
0
/** Enable I/O space range for task.
 *
 * Interrupts are disabled and task is locked.
 *
 * @param task   Task.
 * @param ioaddr Starting I/O space address.
 * @param size   Size of the enabled I/O range.
 *
 * @return EOK on success or an error code from errno.h.
 *
 */
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
	size_t elements = ioaddr + size;
	if (elements > IO_PORTS)
		return ENOENT;
	
	if (task->arch.iomap.elements < elements) {
		/*
		 * The I/O permission bitmap is too small and needs to be grown.
		 */
		
		void *store = malloc(bitmap_size(elements), FRAME_ATOMIC);
		if (!store)
			return ENOMEM;
		
		bitmap_t oldiomap;
		bitmap_initialize(&oldiomap, task->arch.iomap.elements,
		    task->arch.iomap.bits);
		
		bitmap_initialize(&task->arch.iomap, elements, store);
		
		/*
		 * Mark the new range inaccessible.
		 */
		bitmap_set_range(&task->arch.iomap, oldiomap.elements,
		    elements - oldiomap.elements);
		
		/*
		 * In case there really existed smaller iomap,
		 * copy its contents and deallocate it.
		 */
		if (oldiomap.bits) {
			bitmap_copy(&task->arch.iomap, &oldiomap,
			    oldiomap.elements);
			
			free(oldiomap.bits);
		}
	}
	
	/*
	 * Enable the range and we are done.
	 */
	bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, size);
	
	/*
	 * Increment I/O Permission bitmap generation counter.
	 */
	task->arch.iomapver++;
	
	return EOK;
}
Beispiel #18
0
TEST_END

static void
test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits)
{
	size_t i;
	bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
	assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
	bitmap_init(bitmap, binfo);

	for (i = 0; i < nbits; i++)
		bitmap_set(bitmap, binfo, i);
	assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
	free(bitmap);
}
Beispiel #19
0
/*
 * Set a pfn as populated, expanding the tracking structures if needed. To
 * avoid realloc()ing too excessively, the size increased to the nearest power
 * of two large enough to contain the required pfn.
 */
static int pfn_set_populated(struct xc_sr_context *ctx, xen_pfn_t pfn)
{
    xc_interface *xch = ctx->xch;

    if ( pfn > ctx->restore.max_populated_pfn )
    {
        xen_pfn_t new_max;
        size_t old_sz, new_sz;
        unsigned long *p;

        /* Round up to the nearest power of two larger than pfn, less 1. */
        new_max = pfn;
        new_max |= new_max >> 1;
        new_max |= new_max >> 2;
        new_max |= new_max >> 4;
        new_max |= new_max >> 8;
        new_max |= new_max >> 16;
#ifdef __x86_64__
        new_max |= new_max >> 32;
#endif

        old_sz = bitmap_size(ctx->restore.max_populated_pfn + 1);
        new_sz = bitmap_size(new_max + 1);
        p = realloc(ctx->restore.populated_pfns, new_sz);
        if ( !p )
        {
            ERROR("Failed to realloc populated bitmap");
            errno = ENOMEM;
            return -1;
        }

        memset((uint8_t *)p + old_sz, 0x00, new_sz - old_sz);

        ctx->restore.populated_pfns    = p;
        ctx->restore.max_populated_pfn = new_max;
    }
Beispiel #20
0
TEST_END

static void
test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits)
{
	size_t i;
	bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
	assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
	bitmap_init(bitmap, binfo);

	for (i = 0; i < nbits; i++) {
		assert_false(bitmap_get(bitmap, binfo, i),
		    "Bit should be unset");
	}
	free(bitmap);
}
Beispiel #21
0
static int writelog_create(struct tdlog_state *s)
{
  uint64_t bmsize;

  bmsize = bitmap_size(s->size);

  BDPRINTF("allocating %"PRIu64" bytes for dirty bitmap", bmsize);

  s->writelog = bitmap_alloc(s->size);
  if (!s->writelog) {
    BWPRINTF("could not allocate dirty bitmap of size %"PRIu64, bmsize);
    return -1;
  }

  return 0;
}
/*
 * Test successful coalesce
 */
void test_cbt_util_coalesce_success(void **state)
{
	int result;
	int file_size;
	char* args[] = { "cbt-util", "coalesce", "-p", "test_parent.log", "-c" , "test_child.log"};
	void *parent_data;
	void *child_data;
	struct fwrite_data *output;
	uint64_t size = 4194304;

	uint64_t bmsize = bitmap_size(size);
	file_size = sizeof(struct cbt_log_metadata) + bmsize;
	parent_data = malloc(file_size);
	child_data = malloc(file_size);

	//Intialise size in metadata file
	((struct cbt_log_metadata*)parent_data)->size = size;
	//Fill bitmap with random bytes
	memcpy(parent_data + sizeof(struct cbt_log_metadata), (void*)memcpy, bmsize );
	FILE *parent_log = fmemopen((void*)parent_data, file_size, "r");

	//Intialise size in metadata file
	((struct cbt_log_metadata*)child_data)->size = size;
	//Fill bitmap with random bytes
	memcpy(child_data + sizeof(struct cbt_log_metadata), (void*)memcpy, bmsize );
	FILE *child_log = fmemopen((void*)child_data, file_size, "r");

	will_return(__wrap_fopen, parent_log);
	expect_value(__wrap_fclose, fp, parent_log);
	will_return(__wrap_fopen, child_log);
	expect_value(__wrap_fclose, fp, child_log);
	enable_mock_fwrite();
	output = setup_fwrite_mock(bmsize);

	result = cbt_util_coalesce(6, args);
	// OR the contents of bitmap of both files
	*((char *)child_data + sizeof(struct cbt_log_metadata))
				|= *((char *)parent_data + sizeof(struct cbt_log_metadata));
	assert_int_equal(result, 0);
	assert_memory_equal(output->buf,
				child_data + sizeof(struct cbt_log_metadata), bmsize);

	free_fwrite_data(output);
	free(parent_data);
	free(child_data);
}
Beispiel #23
0
TEST_END

static void
test_bitmap_sfu_body(const bitmap_info_t *binfo, size_t nbits)
{
	size_t i;
	bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
	assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
	bitmap_init(bitmap, binfo);

	/* Iteratively set bits starting at the beginning. */
	for (i = 0; i < nbits; i++) {
		assert_zd_eq(bitmap_sfu(bitmap, binfo), i,
		    "First unset bit should be just after previous first unset "
		    "bit");
	}
	assert_true(bitmap_full(bitmap, binfo), "All bits should be set");

	/*
	 * Iteratively unset bits starting at the end, and verify that
	 * bitmap_sfu() reaches the unset bits.
	 */
	for (i = nbits - 1; i < nbits; i--) { /* (nbits..0] */
		bitmap_unset(bitmap, binfo, i);
		assert_zd_eq(bitmap_sfu(bitmap, binfo), i,
		    "First unset bit should the bit previously unset");
		bitmap_unset(bitmap, binfo, i);
	}
	assert_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset");

	/*
	 * Iteratively set bits starting at the beginning, and verify that
	 * bitmap_sfu() looks past them.
	 */
	for (i = 1; i < nbits; i++) {
		bitmap_set(bitmap, binfo, i - 1);
		assert_zd_eq(bitmap_sfu(bitmap, binfo), i,
		    "First unset bit should be just after the bit previously "
		    "set");
		bitmap_unset(bitmap, binfo, i);
	}
	assert_zd_eq(bitmap_sfu(bitmap, binfo), nbits - 1,
	    "First unset bit should be the last bit");
	assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
	free(bitmap);
}
Beispiel #24
0
/** Enable I/O space range for task.
 *
 * Interrupts are disabled and task is locked.
 *
 * @param task	 Task.
 * @param ioaddr Starting I/O space address.
 * @param size	 Size of the enabled I/O range.
 *
 * @return EOK on success or an error code from errno.h.
 */
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
    if (!task->arch.iomap) {
        task->arch.iomap = malloc(sizeof(bitmap_t), 0);
        if (task->arch.iomap == NULL)
            return ENOMEM;

        void *store = malloc(bitmap_size(IO_MEMMAP_PAGES), 0);
        if (store == NULL)
            return ENOMEM;

        bitmap_initialize(task->arch.iomap, IO_MEMMAP_PAGES, store);
        bitmap_clear_range(task->arch.iomap, 0, IO_MEMMAP_PAGES);
    }

    uintptr_t iopage = ioaddr / PORTS_PER_PAGE;
    size = ALIGN_UP(size + ioaddr - 4 * iopage, PORTS_PER_PAGE);
    bitmap_set_range(task->arch.iomap, iopage, size / 4);

    return EOK;
}
/*
 * Test failure to set file pointer to start of bitmap area
 */
void test_cbt_util_coalesce_set_file_pointer_failure(void **state)
{
	int result;
	int file_size;
	char* args[] = { "cbt-util", "coalesce", "-p", "test_parent.log", "-c" , "test_child.log"};
	void *parent_data;
	void *child_data;
	uint64_t size = 4194304;

	uint64_t bmsize = bitmap_size(size);
	file_size = sizeof(struct cbt_log_metadata) + bmsize;
	parent_data = malloc(file_size);
	child_data = malloc(file_size);

	//Intialise size in metadata file
	((struct cbt_log_metadata*)parent_data)->size = size;
	//Fill bitmap with random bytes
	memcpy(parent_data + sizeof(struct cbt_log_metadata), (void*)memcpy, bmsize );
	FILE *parent_log = fmemopen((void*)parent_data, file_size, "w+");

	//Intialise size in metadata file
	((struct cbt_log_metadata*)child_data)->size = size;
	//Fill bitmap with random bytes
	memcpy(child_data + sizeof(struct cbt_log_metadata), (void*)memcpy, bmsize );
	FILE *child_log = fmemopen((void*)child_data, file_size, "w+");

	will_return(__wrap_fopen, parent_log);
	expect_value(__wrap_fclose, fp, parent_log);
	will_return(__wrap_fopen, child_log);
	expect_value(__wrap_fclose, fp, child_log);
	
	fail_fseek(EIO);

	result = cbt_util_coalesce(6, args);
	assert_int_equal(result, -EIO);

	free(parent_data);
	free(child_data);
}
Beispiel #26
0
/*!
 * \brief Create NSEC RR set.
 *
 * \param from       Node that should contain the new RRSet
 * \param to         Node that should be pointed to from 'from'
 * \param ttl        Record TTL (SOA's minimum TTL).
 *
 * \return NSEC RR set, NULL on error.
 */
static knot_rrset_t *create_nsec_rrset(const zone_node_t *from,
                                       const zone_node_t *to,
                                       uint32_t ttl)
{
	assert(from);
	assert(to);
	knot_rrset_t *rrset = knot_rrset_new(from->owner, KNOT_RRTYPE_NSEC,
					     KNOT_CLASS_IN, NULL);
	if (!rrset) {
		return NULL;
	}

	// Create bitmap
	bitmap_t rr_types = { 0 };
	bitmap_add_node_rrsets(&rr_types, from);
	bitmap_add_type(&rr_types, KNOT_RRTYPE_NSEC);
	bitmap_add_type(&rr_types, KNOT_RRTYPE_RRSIG);
	if (node_rrtype_exists(from, KNOT_RRTYPE_SOA)) {
		bitmap_add_type(&rr_types, KNOT_RRTYPE_DNSKEY);
	}

	// Create RDATA
	assert(to->owner);
	size_t next_owner_size = knot_dname_size(to->owner);
	size_t rdata_size = next_owner_size + bitmap_size(&rr_types);
	uint8_t rdata[rdata_size];

	// Fill RDATA
	memcpy(rdata, to->owner, next_owner_size);
	bitmap_write(&rr_types, rdata + next_owner_size);

	int ret = knot_rrset_add_rdata(rrset, rdata, rdata_size, ttl, NULL);
	if (ret != KNOT_EOK) {
		knot_rrset_free(&rrset, NULL);
		return NULL;
	}

	return rrset;
}
Beispiel #27
0
TEST_END

TEST_BEGIN(test_bitmap_init)
{
	size_t i;

	for (i = 1; i <= BITMAP_MAXBITS; i++) {
		bitmap_info_t binfo;
		bitmap_info_init(&binfo, i);
		{
			size_t j;
			bitmap_t *bitmap = (bitmap_t *)malloc(
			    bitmap_size(&binfo));
			bitmap_init(bitmap, &binfo);

			for (j = 0; j < i; j++) {
				assert_false(bitmap_get(bitmap, &binfo, j),
				    "Bit should be unset");
			}
			free(bitmap);
		}
	}
}
Beispiel #28
0
/* Initializes the page allocator.  At most USER_PAGE_LIMIT
   pages are put into the user pool. */
void
palloc_init (size_t user_page_limit)
{
  /* Free memory starts at 1 MB and runs to the end of RAM. */
  uint8_t *free_start = ptov (1024 * 1024);
  uint8_t *free_end = ptov (init_ram_pages * PGSIZE);
  size_t free_pages = (free_end - free_start) / PGSIZE;
  size_t user_pages = free_pages / 2;
  size_t kernel_pages;
  if (user_pages > user_page_limit)
    user_pages = user_page_limit;
  kernel_pages = free_pages - user_pages;

  /* Give half of memory to kernel, half to user. */
  init_pool (&kernel_pool, free_start, kernel_pages, "kernel pool");
  init_pool (&user_pool, free_start + kernel_pages * PGSIZE,
             user_pages, "user pool");

  clock_point_init = user_pool.base;
  clock_point = clock_point_init;
  clock_point_max = clock_point_init + bitmap_size(user_pool.used_map) * PGSIZE;

}
Beispiel #29
0
void idset_fill(struct idset *set)
{
	memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
}
Beispiel #30
0
void idset_clear(struct idset *set)
{
	memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id));
}