Пример #1
0
CUDA_TEST_P(Hog_var, HOG)
{
    cv::cuda::GpuMat _img(c_img);
    cv::cuda::GpuMat d_img;

    int win_stride_width = 8;int win_stride_height = 8;
    int win_width = 16;
    int block_width = 8;
    int block_stride_width = 4;int block_stride_height = 4;
    int cell_width = 4;
    int nbins = 9;

    Size win_stride(win_stride_width, win_stride_height);
    Size win_size(win_width, win_width * 2);
    Size block_size(block_width, block_width);
    Size block_stride(block_stride_width, block_stride_height);
    Size cell_size(cell_width, cell_width);

    cv::Ptr<cv::cuda::HOG> gpu_hog = cv::cuda::HOG::create(win_size, block_size, block_stride, cell_size, nbins);

    gpu_hog->setNumLevels(13);
    gpu_hog->setHitThreshold(0);
    gpu_hog->setWinStride(win_stride);
    gpu_hog->setScaleFactor(1.05);
    gpu_hog->setGroupThreshold(8);
    gpu_hog->compute(_img, d_img);

    vector<float> gpu_desc_vec;
    ASSERT_TRUE(gpu_desc_vec.empty());
    cv::Mat R(d_img);

    cv::HOGDescriptor cpu_hog(win_size, block_size, block_stride, cell_size, nbins);
    cpu_hog.nlevels = 13;
    vector<float> cpu_desc_vec;
    ASSERT_TRUE(cpu_desc_vec.empty());
    cpu_hog.compute(c_img, cpu_desc_vec, win_stride, Size(0,0));
}
HeapWord*
G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
                                                          HeapWord* n,
                                                          const void* addr) {
  // We're not in the normal case.  We need to handle an important subcase
  // here: LAB allocation.  An allocation previously recorded in the
  // offset table was actually a lab allocation, and was divided into
  // several objects subsequently.  Fix this situation as we answer the
  // query, by updating entries as we cross them.

  // If the fist object's end q is at the card boundary. Start refining
  // with the corresponding card (the value of the entry will be basically
  // set to 0). If the object crosses the boundary -- start from the next card.
  size_t n_index = _array->index_for(n);
  size_t next_index = _array->index_for(n) + !_array->is_card_boundary(n);
  // Calculate a consistent next boundary.  If "n" is not at the boundary
  // already, step to the boundary.
  HeapWord* next_boundary = _array->address_for_index(n_index) +
                            (n_index == next_index ? 0 : N_words);
  assert(next_boundary <= _array->_end,
         err_msg("next_boundary is beyond the end of the covered region "
                 " next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
                 next_boundary, _array->_end));
  if (addr >= gsp()->top()) return gsp()->top();
  while (next_boundary < addr) {
    while (n <= next_boundary) {
      q = n;
      oop obj = oop(q);
      if (obj->klass_or_null() == NULL) return q;
      n += block_size(q);
    }
    assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
    // [q, n) is the block that crosses the boundary.
    alloc_block_work2(&next_boundary, &next_index, q, n);
  }
  return forward_to_block_containing_addr_const(q, n, addr);
}
Пример #3
0
void Space::adjust_pointers() {
  // adjust all the interior pointers to point at the new locations of objects
  // Used by MarkSweep::mark_sweep_phase3()

  // First check to see if there is any work to be done.
  if (used() == 0) {
    return;  // Nothing to do.
  }

  // Otherwise...
  HeapWord* q = bottom();
  HeapWord* t = end();

  debug_only(HeapWord* prev_q = NULL);
  while (q < t) {
    if (oop(q)->is_gc_marked()) {
      // q is alive

      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
      // point all the oops to the new location
      size_t size = oop(q)->adjust_pointers();
      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());

      debug_only(prev_q = q);
      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));

      q += size;
    } else {
      // q is not a live object.  But we're not in a compactible space,
      // So we don't have live ranges.
      debug_only(prev_q = q);
      q += block_size(q);
      assert(q > prev_q, "we should be moving forward through memory");
    }
  }
  assert(q == t, "just checking");
}
Пример #4
0
void
sgen_init_internal_allocator (void)
{
    int i, size;

    for (i = 0; i < INTERNAL_MEM_MAX; ++i)
        fixed_type_allocator_indexes [i] = -1;

    for (i = 0; i < NUM_ALLOCATORS; ++i) {
        allocator_block_sizes [i] = block_size (allocator_sizes [i]);
        mono_lock_free_allocator_init_size_class (&size_classes [i], allocator_sizes [i], allocator_block_sizes [i]);
        mono_lock_free_allocator_init_allocator (&allocators [i], &size_classes [i]);
    }

    for (size = mono_pagesize (); size <= LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) {
        int max_size = LOCK_FREE_ALLOC_SB_USABLE_SIZE (size) / 2;
        /*
         * we assert that allocator_sizes contains the biggest possible object size
         * per block (4K => 4080 / 2 = 2040, 8k => 8176 / 2 = 4088, 16k => 16368 / 2 = 8184 on 64bits),
         * so that we do not get different block sizes for sizes that should go to the same one
         */
        g_assert (allocator_sizes [index_for_size (max_size)] == max_size);
    }
}
Пример #5
0
/*! Initializes the swap allocator. */
void swalloc_init(void)
{
    uint32_t i;

    swap_disk = block_get_role(BLOCK_SWAP);
    swap_slots = block_size(swap_disk) / PAGE_SECTORS;

    /* Initialize swap table */
    uint32_t num_pages_used = sizeof(struct swap) * swap_slots;
    num_pages_used = (uint32_t) pg_round_up((void *) num_pages_used) / PGSIZE;

    /* Get pages for swap table */
    swap_list = palloc_get_multiple(PAL_ASSERT | PAL_PAGING | PAL_ZERO, num_pages_used);

    /* Initialize list */
    list_init(open_swap_list);
    /* Initialize swap entries */
    for (i = 0; i < swap_slots; ++i)
    {
        swap_list[i].start_sector = i * PAGE_SECTORS;
        swap_list[i].in_use = false;
        list_push_back(open_swap_list, &(swap_list[i].open_elem));
    }
}
Пример #6
0
char *EMalloc(unsigned long nbytes)
/* storage allocator */
/* Always returns a pointer that has 8-byte alignment (essential for our
   internal representation of an object). */
{
    unsigned char *p;
    unsigned char *temp;
    register struct block_list *list;
    int alignment;
    int min_align;

#ifdef ELINUX
#ifndef EBSD62
	return malloc(nbytes);
#else
	p = malloc( nbytes + 8 );
	if( (unsigned long)p & 7 ){
		*(int *)p = MAGIC_FILLER;
		p += 4;
	}
	else{
	       *(int  *)(p+4) = 0;
	        p += 8;
	}
	return p;
#endif
#else
#ifdef HEAP_CHECK
    long size;

    check_pool();
#endif
    nbytes += align4; // allow for possible 4-aligned malloc pointers

    if (nbytes <= MAX_CACHED_SIZE) {
	/* See if we have a block of this size in our cache.
	   Every block in the cache is 8-aligned. */

	list = pool_map[(nbytes + (RESOLUTION - 1)) >> LOG_RESOLUTION];
#ifdef HEAP_CHECK
	if (list->size < nbytes || list->size > nbytes * 2) {
	    sprintf(msg, "Alloc - size is %d, nbytes is %d", list->size, nbytes);
	    RTInternal(msg);
	}
#endif
	temp = (char *)list->first;

	if (temp != NULL) {
	    /* a cache hit */

#ifdef EXTRA_STATS
	    a_hit++;
#endif
	    list->first = ((free_block_ptr)temp)->next;
	    cache_size -= 2;

#ifdef HEAP_CHECK
	    if (cache_size > 100000000)
		RTInternal("cache size is bad");
	    p = temp;
	    if (align4 && *(int *)(p-4) == MAGIC_FILLER)
		p = p - 4;
	    if (((unsigned long)temp) & 3)
		RTInternal("unaligned address in storage cache");
	    Allocated(block_size(p));
#endif
	    return temp; /* will be 8-aligned */
	}
	else {
	    nbytes = list->size; /* better to grab bigger size
				    so it can be reused for same purpose */
#ifdef EXTRA_STATS
	    a_miss++;
#endif
	}
    }
Пример #7
0
//Return footer of the current block
static inline uint32_t* get_footer(uint32_t* const bp) {
    REQUIRES(bp != NULL);
    REQUIRES(in_heap(bp));

    return bp + (block_size(get_header(bp))/WSIZE) - 2;
}
Пример #8
0
/**
 * Computes the usable size of a block (total size minus management data size)
 * \param[in] b Pointer to the block
 * \return The usable size of the block in bytes
 */
static size_t usable_block_size(block_info *b)
{
	return (block_size(b) - sizeof(*b));
}
Пример #9
0
int main()
{

    // Set the number of symbols (i.e. the generation size in RLNC
    // terminology) and the size of a symbol in bytes
    uint32_t max_symbols = 42;
    uint32_t max_symbol_size = 64;

    std::string encode_filename = "encode-file.bin";

    // Create a test file for encoding.
    std::ofstream encode_file;
    encode_file.open (encode_filename, std::ios::binary);

    uint32_t file_size = 50000;
    std::vector<char> encode_data(file_size);
    std::vector<char> decode_data;

    // Just write some bytes to the file
    for(uint32_t i = 0; i < file_size; ++i)
    {
        encode_data[i] = rand() % 255;
    }
    encode_file.write(&encode_data[0], file_size);
    encode_file.close();

    // Select the encoding and decoding algorithms
    typedef kodo::full_rlnc_encoder<fifi::binary>
        encoder_t;

    typedef kodo::full_rlnc_decoder<fifi::binary>
        decoder_t;

    // Now for the encoder we use a file_encoder with the chosen
    // encoding algorithm
    typedef kodo::file_encoder<encoder_t>
        file_encoder_t;

    // For decoding we use an object_decoder with the chosen
    // decoding algorithm
    typedef kodo::object_decoder<decoder_t>
        object_decoder_t;

    // Create the encoder factory - builds the individual encoders used
    file_encoder_t::factory encoder_factory(max_symbols, max_symbol_size);

    // Create the actual file encoder using the encoder factory and
    // the filename of the file to be encoded
    file_encoder_t file_encoder(encoder_factory, encode_filename);

    // Create the decoder factory - build the individual decoders used
    object_decoder_t::factory decoder_factory(max_symbols, max_symbol_size);

    // Create the object decoder using the decoder factory and the
    // size of the file to be decoded
    object_decoder_t object_decoder(decoder_factory, file_size);

    // Now in the following loop we go through all the encoders
    // needed to encode the entire file. We the build the corresponding
    // decoder and decode the chunk immediately. In practice where
    // encoders and decoders are on different devices e.g. connected
    // over a network, we would have to pass also the encoder and decoder
    // index between the source and sink to allow the correct data would
    // passed from encoder to corresponding decoder.
    for(uint32_t i = 0; i < file_encoder.encoders(); ++i)
    {
        auto encoder = file_encoder.build(i);
        auto decoder = object_decoder.build(i);

        // Set the encoder non-systematic
        if(kodo::has_systematic_encoder<encoder_t>::value)
            kodo::set_systematic_off(encoder);

        std::vector<uint8_t> payload(encoder->payload_size());

        while( !decoder->is_complete() )
        {
            // Encode a packet into the payload buffer
            encoder->encode( &payload[0] );

            // In practice send the payload over a network, save it to
            // a file etc. Then when needed build and pass it to the decoder

            // Pass that packet to the decoder
            decoder->decode( &payload[0] );
        }

        std::vector<uint8_t> data_out(decoder->block_size());
        decoder->copy_symbols(sak::storage(data_out));
        data_out.resize(decoder->bytes_used());

        decode_data.insert(decode_data.end(),
                           data_out.begin(),
                           data_out.end());
    }

    // Check we properly decoded the data
    if (std::equal(decode_data.begin(),
                   decode_data.end(), encode_data.begin()))
    {
        std::cout << "Data decoded correctly" << std::endl;
    }
    else
    {
        std::cout << "Unexpected failure to decode "
                  << "please file a bug report :)" << std::endl;
    }
}
Пример #10
0
/*
 * next_block - return next block
 */
static inline void *next_block(void *bp) {
    return bp + block_size(bp);
}
Пример #11
0
static struct dentry *
nilfs_mount(struct file_system_type *fs_type, int flags,
	     const char *dev_name, void *data)
{
	struct nilfs_super_data sd;
	struct super_block *s;
	fmode_t mode = FMODE_READ | FMODE_EXCL;
	struct dentry *root_dentry;
	int err, s_new = false;

	if (!(flags & MS_RDONLY))
		mode |= FMODE_WRITE;

	sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type);
	if (IS_ERR(sd.bdev))
		return ERR_CAST(sd.bdev);

	sd.cno = 0;
	sd.flags = flags;
	if (nilfs_identify((char *)data, &sd)) {
		err = -EINVAL;
		goto failed;
	}

	/*
	 * once the super is inserted into the list by sget, s_umount
	 * will protect the lockfs code from trying to start a snapshot
	 * while we are mounting
	 */
	mutex_lock(&sd.bdev->bd_fsfreeze_mutex);
	if (sd.bdev->bd_fsfreeze_count > 0) {
		mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
		err = -EBUSY;
		goto failed;
	}
	s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, sd.bdev);
	mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
	if (IS_ERR(s)) {
		err = PTR_ERR(s);
		goto failed;
	}

	if (!s->s_root) {
		char b[BDEVNAME_SIZE];

		s_new = true;

		/* New superblock instance created */
		s->s_flags = flags;
		s->s_mode = mode;
		strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id));
		sb_set_blocksize(s, block_size(sd.bdev));

		err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
		if (err)
			goto failed_super;

		s->s_flags |= MS_ACTIVE;
	} else if (!sd.cno) {
		int busy = false;

		if (nilfs_tree_was_touched(s->s_root)) {
			busy = nilfs_try_to_shrink_tree(s->s_root);
			if (busy && (flags ^ s->s_flags) & MS_RDONLY) {
				printk(KERN_ERR "NILFS: the device already "
				       "has a %s mount.\n",
				       (s->s_flags & MS_RDONLY) ?
				       "read-only" : "read/write");
				err = -EBUSY;
				goto failed_super;
			}
		}
		if (!busy) {
			/*
			 * Try remount to setup mount states if the current
			 * tree is not mounted and only snapshots use this sb.
			 */
			err = nilfs_remount(s, &flags, data);
			if (err)
				goto failed_super;
		}
	}

	if (sd.cno) {
		err = nilfs_attach_snapshot(s, sd.cno, &root_dentry);
		if (err)
			goto failed_super;
	} else {
		root_dentry = dget(s->s_root);
	}

	if (!s_new)
		blkdev_put(sd.bdev, mode);

	return root_dentry;

 failed_super:
	deactivate_locked_super(s);

 failed:
	if (!s_new)
		blkdev_put(sd.bdev, mode);
	return ERR_PTR(err);
}
Пример #12
0
static ssize_t block_read_size(struct kobj_t * kobj, void * buf, size_t size)
{
	struct block_t * blk = (struct block_t *)kobj->priv;
	return sprintf(buf, "%lld", block_size(blk));
}
Пример #13
0
/*
 * malloc - allocate a block with at least size bytes of payload
 */
void *malloc (size_t size) {
    size_t total_size; /* include prologue and epilogue */
    unsigned *list_p;
    void *bp;

    dbg_printf("want to malloc(%d)\n", (int)size);
    print_heap();

    /* initialize */
    if (!heap_head) {
       mm_init();
    }

    /* calculate total block size */
    if (size <= 0) {
        return NULL;
    } else if (size <= 3 * WSIZE) {
        total_size = 4 * WSIZE;
    } else {
        total_size = DSIZE * ((size + WSIZE + DSIZE - 1) / DSIZE);
    }

    /* get corresponding free list */
    list_p = get_list(total_size);
    if (list_p == NULL) {
        return NULL;
    }

    /* try to find a block big enough */
    while (list_p != array_tail) {
        bp = (void *)(size_t)*list_p;
        while (bp != NULL) {
            if (block_size((void *)r2a((size_t)bp)) >= total_size) {
                place((void *)r2a((size_t)bp), total_size);
                dbg_printf("want to return 0x%x from malloc(%d) after find a block big enough \n", (int)bp, (int)size);
                print_heap();
                return (void *)r2a((size_t)bp);
            }
            bp = succ_block((void *)r2a((size_t)bp));
        }
        list_p++;
    }

    /* if there is no appropriate block, then extend heap */
    /*
    if (total_size > (1 << 20)) {
        bp = extend_heap(total_size);
    } else {
        bp = extend_heap(total_size * 2);
    }*/
    //bp = extend_heap(MAX(total_size, 72));
    bp = extend_heap(total_size);
    //bp = extend_heap(MAX(total_size, (CHUNKSIZE + 7) / 8 * 8));
    dbg_printf("just after extend heap\n");
    print_heap();
    if (bp == NULL) {
        return NULL;
    }
    place(bp, total_size);

    dbg_printf("want to return 0x%x from malloc(%d) and can't find big enough block\n", (int)bp, (int)size);
    print_heap();
    return (void *)bp;
}
Пример #14
0
/*
* Return a clone of this object
*/
BlockCipher* Lion::clone() const
   {
   return new Lion(m_hash->clone(), m_cipher->clone(), block_size());
   }
Пример #15
0
/* Reads the partition table in the given SECTOR of BLOCK and
   scans it for partitions of interest to Pintos.

   If SECTOR is 0, so that this is the top-level partition table
   on BLOCK, then PRIMARY_EXTENDED_SECTOR is not meaningful;
   otherwise, it should designate the sector of the top-level
   extended partition table that was traversed to arrive at
   SECTOR, for use in finding logical partitions (see the large
   comment below).

   PART_NR points to the number of non-empty primary or logical
   partitions already encountered on BLOCK.  It is incremented as
   partitions are found. */
static void
read_partition_table (struct block *block, block_sector_t sector,
                      block_sector_t primary_extended_sector,
                      int *part_nr)
{
  /* Format of a partition table entry.  See [Partitions]. */
  struct partition_table_entry
    {
      uint8_t bootable;         /* 0x00=not bootable, 0x80=bootable. */
      uint8_t start_chs[3];     /* Encoded starting cylinder, head, sector. */
      uint8_t type;             /* Partition type (see partition_type_name). */
      uint8_t end_chs[3];       /* Encoded ending cylinder, head, sector. */
      uint32_t offset;          /* Start sector offset from partition table. */
      uint32_t size;            /* Number of sectors. */
    }
  PACKED;

  /* Partition table sector. */
  struct partition_table
    {
      uint8_t loader[446];      /* Loader, in top-level partition table. */
      struct partition_table_entry partitions[4];       /* Table entries. */
      uint16_t signature;       /* Should be 0xaa55. */
    }
  PACKED;

  struct partition_table *pt;
  size_t i;

  /* Check SECTOR validity. */
  if (sector >= block_size (block))
    {
      printf ("%s: Partition table at sector %"PRDSNu" past end of device.\n",
              block_name (block), sector);
      return;
    }

  /* Read sector. */
  ASSERT (sizeof *pt == BLOCK_SECTOR_SIZE);
  pt = malloc (sizeof *pt);
  if (pt == NULL)
    PANIC ("Failed to allocate memory for partition table.");
  block_read (block, 0, pt);

  /* Check signature. */
  if (pt->signature != 0xaa55)
    {
      if (primary_extended_sector == 0)
        printf ("%s: Invalid partition table signature\n", block_name (block));
      else
        printf ("%s: Invalid extended partition table in sector %"PRDSNu"\n",
                block_name (block), sector);
      free (pt);
      return;
    }

  /* Parse partitions. */
  for (i = 0; i < sizeof pt->partitions / sizeof *pt->partitions; i++)
    {
      struct partition_table_entry *e = &pt->partitions[i];

      if (e->size == 0 || e->type == 0)
        {
          /* Ignore empty partition. */
        }
      else if (e->type == 0x05       /* Extended partition. */
               || e->type == 0x0f    /* Windows 98 extended partition. */
               || e->type == 0x85    /* Linux extended partition. */
               || e->type == 0xc5)   /* DR-DOS extended partition. */
        {
          printf ("%s: Extended partition in sector %"PRDSNu"\n",
                  block_name (block), sector);

          /* The interpretation of the offset field for extended
             partitions is bizarre.  When the extended partition
             table entry is in the master boot record, that is,
             the device's primary partition table in sector 0, then
             the offset is an absolute sector number.  Otherwise,
             no matter how deep the partition table we're reading
             is nested, the offset is relative to the start of
             the extended partition that the MBR points to. */
          if (sector == 0)
            read_partition_table (block, e->offset, e->offset, part_nr);
          else
            read_partition_table (block, e->offset + primary_extended_sector,
                                  primary_extended_sector, part_nr);
        }
      else
        {
          ++*part_nr;

          found_partition (block, e->type, e->offset + sector,
                           e->size, *part_nr);
        }
    }

  free (pt);
}
Пример #16
0
/*
* Return the name of this type
*/
std::string Lion::name() const
   {
   return "Lion(" + m_hash->name() + "," +
                    m_cipher->name() + "," +
                    std::to_string(block_size()) + ")";
   }
Пример #17
0
void HeapRegion::verify(VerifyOption vo,
                        bool* failures) const {
  G1CollectedHeap* g1 = G1CollectedHeap::heap();
  *failures = false;
  HeapWord* p = bottom();
  HeapWord* prev_p = NULL;
  VerifyLiveClosure vl_cl(g1, vo);
  bool is_humongous = isHumongous();
  bool do_bot_verify = !is_young();
  size_t object_num = 0;
  while (p < top()) {
    oop obj = oop(p);
    size_t obj_size = block_size(p);
    object_num += 1;

    if (is_humongous != g1->isHumongous(obj_size) &&
        !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
      gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
                             SIZE_FORMAT" words) in a %shumongous region",
                             p, g1->isHumongous(obj_size) ? "" : "non-",
                             obj_size, is_humongous ? "" : "non-");
       *failures = true;
       return;
    }

    // If it returns false, verify_for_object() will output the
    // appropriate message.
    if (do_bot_verify &&
        !g1->is_obj_dead(obj, this) &&
        !_offsets.verify_for_object(p, obj_size)) {
      *failures = true;
      return;
    }

    if (!g1->is_obj_dead_cond(obj, this, vo)) {
      if (obj->is_oop()) {
        Klass* klass = obj->klass();
        bool is_metaspace_object = Metaspace::contains(klass) ||
                                   (vo == VerifyOption_G1UsePrevMarking &&
                                   ClassLoaderDataGraph::unload_list_contains(klass));
        if (!is_metaspace_object) {
          gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
                                 "not metadata", klass, (void *)obj);
          *failures = true;
          return;
        } else if (!klass->is_klass()) {
          gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
                                 "not a klass", klass, (void *)obj);
          *failures = true;
          return;
        } else {
          vl_cl.set_containing_obj(obj);
          obj->oop_iterate_no_header(&vl_cl);
          if (vl_cl.failures()) {
            *failures = true;
          }
          if (G1MaxVerifyFailures >= 0 &&
              vl_cl.n_failures() >= G1MaxVerifyFailures) {
            return;
          }
        }
      } else {
        gclog_or_tty->print_cr(PTR_FORMAT" no an oop", (void *)obj);
        *failures = true;
        return;
      }
    }
    prev_p = p;
    p += obj_size;
  }

  if (p != top()) {
    gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
                           "does not match top "PTR_FORMAT, p, top());
    *failures = true;
    return;
  }

  HeapWord* the_end = end();
  assert(p == top(), "it should still hold");
  // Do some extra BOT consistency checking for addresses in the
  // range [top, end). BOT look-ups in this range should yield
  // top. No point in doing that if top == end (there's nothing there).
  if (p < the_end) {
    // Look up top
    HeapWord* addr_1 = p;
    HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
    if (b_start_1 != p) {
      gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" "
                             " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
                             addr_1, b_start_1, p);
      *failures = true;
      return;
    }

    // Look up top + 1
    HeapWord* addr_2 = p + 1;
    if (addr_2 < the_end) {
      HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
      if (b_start_2 != p) {
        gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" "
                               " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
                               addr_2, b_start_2, p);
        *failures = true;
        return;
      }
    }

    // Look up an address between top and end
    size_t diff = pointer_delta(the_end, p) / 2;
    HeapWord* addr_3 = p + diff;
    if (addr_3 < the_end) {
      HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
      if (b_start_3 != p) {
        gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
                               " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
                               addr_3, b_start_3, p);
        *failures = true;
        return;
      }
    }

    // Loook up end - 1
    HeapWord* addr_4 = the_end - 1;
    HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
    if (b_start_4 != p) {
      gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
                             " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
                             addr_4, b_start_4, p);
      *failures = true;
      return;
    }
  }

  if (is_humongous && object_num > 1) {
    gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
                           "but has "SIZE_FORMAT", objects",
                           bottom(), end(), object_num);
    *failures = true;
    return;
  }

  verify_strong_code_roots(vo, failures);
}
Пример #18
0
HeapWord*
HeapRegion::
oops_on_card_seq_iterate_careful(MemRegion mr,
                                 FilterOutOfRegionClosure* cl,
                                 bool filter_young,
                                 jbyte* card_ptr) {
  // Currently, we should only have to clean the card if filter_young
  // is true and vice versa.
  if (filter_young) {
    assert(card_ptr != NULL, "pre-condition");
  } else {
    assert(card_ptr == NULL, "pre-condition");
  }
  G1CollectedHeap* g1h = G1CollectedHeap::heap();

  // If we're within a stop-world GC, then we might look at a card in a
  // GC alloc region that extends onto a GC LAB, which may not be
  // parseable.  Stop such at the "scan_top" of the region.
  if (g1h->is_gc_active()) {
    mr = mr.intersection(MemRegion(bottom(), scan_top()));
  } else {
    mr = mr.intersection(used_region());
  }
  if (mr.is_empty()) return NULL;
  // Otherwise, find the obj that extends onto mr.start().

  // The intersection of the incoming mr (for the card) and the
  // allocated part of the region is non-empty. This implies that
  // we have actually allocated into this region. The code in
  // G1CollectedHeap.cpp that allocates a new region sets the
  // is_young tag on the region before allocating. Thus we
  // safely know if this region is young.
  if (is_young() && filter_young) {
    return NULL;
  }

  assert(!is_young(), "check value of filter_young");

  // We can only clean the card here, after we make the decision that
  // the card is not young. And we only clean the card if we have been
  // asked to (i.e., card_ptr != NULL).
  if (card_ptr != NULL) {
    *card_ptr = CardTableModRefBS::clean_card_val();
    // We must complete this write before we do any of the reads below.
    OrderAccess::storeload();
  }

  // Cache the boundaries of the memory region in some const locals
  HeapWord* const start = mr.start();
  HeapWord* const end = mr.end();

  // We used to use "block_start_careful" here.  But we're actually happy
  // to update the BOT while we do this...
  HeapWord* cur = block_start(start);
  assert(cur <= start, "Postcondition");

  oop obj;

  HeapWord* next = cur;
  do {
    cur = next;
    obj = oop(cur);
    if (obj->klass_or_null() == NULL) {
      // Ran into an unparseable point.
      return cur;
    }
    // Otherwise...
    next = cur + block_size(cur);
  } while (next <= start);

  // If we finish the above loop...We have a parseable object that
  // begins on or before the start of the memory region, and ends
  // inside or spans the entire region.
  assert(cur <= start, "Loop postcondition");
  assert(obj->klass_or_null() != NULL, "Loop postcondition");

  do {
    obj = oop(cur);
    assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant");
    if (obj->klass_or_null() == NULL) {
      // Ran into an unparseable point.
      return cur;
    }

    // Advance the current pointer. "obj" still points to the object to iterate.
    cur = cur + block_size(cur);

    if (!g1h->is_obj_dead(obj)) {
      // Non-objArrays are sometimes marked imprecise at the object start. We
      // always need to iterate over them in full.
      // We only iterate over object arrays in full if they are completely contained
      // in the memory region.
      if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
        obj->oop_iterate(cl);
      } else {
        obj->oop_iterate(cl, mr);
      }
    }
  } while (cur < end);

  return NULL;
}
Пример #19
0
//Returns block pointer of the previous block
static inline uint32_t* prev_bp(uint32_t* const bp) {
    REQUIRES(bp != NULL);
    REQUIRES(in_heap(bp));

    return bp - (block_size(get_header(bp) - 1)/WSIZE);
}
Пример #20
0
  void TrackerFile::update()
  {
    std::size_t block_size ( 1 );
    if ( !ptr_DVECTORS.empty() )
    {
      block_size = ptr_DVECTORS[ 0 ] -> size();
    }
    if ( !ptr_CVECTORS.empty() )
    {
      block_size = ptr_CVECTORS[ 0 ] -> size();
    }
    if ( !ptr_DMESH.empty() )
    {
      block_size = ptr_DMESH[ 0 ] -> get_nnodes();
    }
    if ( !ptr_CMESH.empty() )
    {
      block_size = ptr_CMESH[ 0 ] -> get_nnodes();
    }
    if ( !ptr_CCMESH.empty() )
    {
      block_size = ptr_CCMESH[ 0 ] -> get_nnodes();
    }

    for ( std::size_t line = 0; line < block_size; ++line )
    {
      write_scalar_data();
      if ( !ptr_DVECTORS.empty() )
      {
        // for each vector ptr
        for ( std::size_t i = 0; i < ptr_DVECTORS.size(); ++i )
        {
          file << ( *ptr_DVECTORS[ i ] ) [ line ] << " ";
        }
      }
      if ( !ptr_CVECTORS.empty() )
      {
        // for each vector ptr
        for ( std::size_t i = 0; i < ptr_CVECTORS.size(); ++i )
        {
          file << ( *ptr_CVECTORS[ i ] ) [ line ].real() << " ";
          file << ( *ptr_CVECTORS[ i ] ) [ line ].imag() << " ";
        }
      }
      if ( !ptr_DMESH.empty() )
      {
        // for each mesh ptr
        for ( std::size_t i = 0; i < ptr_DMESH.size(); ++i )
        {
          file << ( *ptr_DMESH[ i ] ).coord( line ) << " ";
          for ( std::size_t var = 0; var < ptr_DMESH[ i ] -> get_nvars(); ++var )
          {
            file << ( *ptr_DMESH[ i ] )( line, var ) << " ";
          }
        }
      }
      if ( !ptr_CMESH.empty() )
      {
        // for each mesh ptr
        for ( std::size_t i = 0; i < ptr_CMESH.size(); ++i )
        {
          file << ( *ptr_CMESH[ i ] ).coord( line ) << " ";
          for ( std::size_t var = 0; var < ptr_CMESH[ i ] -> get_nvars(); ++var )
          {
            file << ( *ptr_CMESH[ i ] )( line, var ).real() << " ";
            file << ( *ptr_CMESH[ i ] )( line, var ).imag() << " ";
          }
        }
      }
      if ( !ptr_CCMESH.empty() )
      {
        // for each mesh ptr
        for ( std::size_t i = 0; i < ptr_CCMESH.size(); ++i )
        {
          file << ( *ptr_CCMESH[ i ] ).coord( line ).real() << " ";
          file << ( *ptr_CCMESH[ i ] ).coord( line ).imag() << " ";
          for ( std::size_t var = 0; var < ptr_CCMESH[ i ] -> get_nvars(); ++var )
          {
            file << ( *ptr_CCMESH[ i ] )( line, var ).real() << " ";
            file << ( *ptr_CCMESH[ i ] )( line, var ).imag() << " ";
          }
        }
      }
      file << std::endl;
    }
    // flush the buffer
    file.flush();
  }
Пример #21
0
 MemDisk::block_t MemDisk::size() const noexcept {
   // we are NOT going to round up to "support" unevenly sized
   // disks that are not created as multiples of sectors
   return (image_end_ - image_start_) / block_size();
 }
Пример #22
0
// Return the header to the previous block
static inline uint32_t* block_prev(uint32_t* const block) {
    REQUIRES(block != NULL);
    REQUIRES(in_heap(block));

    return block - block_size(block - 1) - 2;
}
Пример #23
0
static int
nilfs_get_sb(struct file_system_type *fs_type, int flags,
	     const char *dev_name, void *data, struct vfsmount *mnt)
{
	struct nilfs_super_data sd;
	struct super_block *s;
	fmode_t mode = FMODE_READ;
	struct the_nilfs *nilfs;
	int err, need_to_close = 1;

	if (!(flags & MS_RDONLY))
		mode |= FMODE_WRITE;

	sd.bdev = open_bdev_exclusive(dev_name, mode, fs_type);
	if (IS_ERR(sd.bdev))
		return PTR_ERR(sd.bdev);

	/*
	 * To get mount instance using sget() vfs-routine, NILFS needs
	 * much more information than normal filesystems to identify mount
	 * instance.  For snapshot mounts, not only a mount type (ro-mount
	 * or rw-mount) but also a checkpoint number is required.
	 */
	sd.cno = 0;
	sd.flags = flags;
	if (nilfs_identify((char *)data, &sd)) {
		err = -EINVAL;
		goto failed;
	}

	nilfs = find_or_create_nilfs(sd.bdev);
	if (!nilfs) {
		err = -ENOMEM;
		goto failed;
	}

	mutex_lock(&nilfs->ns_mount_mutex);

	if (!sd.cno) {
		/*
		 * Check if an exclusive mount exists or not.
		 * Snapshot mounts coexist with a current mount
		 * (i.e. rw-mount or ro-mount), whereas rw-mount and
		 * ro-mount are mutually exclusive.
		 */
		down_read(&nilfs->ns_super_sem);
		if (nilfs->ns_current &&
		    ((nilfs->ns_current->s_super->s_flags ^ flags)
		     & MS_RDONLY)) {
			up_read(&nilfs->ns_super_sem);
			err = -EBUSY;
			goto failed_unlock;
		}
		up_read(&nilfs->ns_super_sem);
	}

	/*
	 * Find existing nilfs_sb_info struct
	 */
	sd.sbi = nilfs_find_sbinfo(nilfs, !(flags & MS_RDONLY), sd.cno);

	/*
	 * Get super block instance holding the nilfs_sb_info struct.
	 * A new instance is allocated if no existing mount is present or
	 * existing instance has been unmounted.
	 */
	s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, &sd);
	if (sd.sbi)
		nilfs_put_sbinfo(sd.sbi);

	if (IS_ERR(s)) {
		err = PTR_ERR(s);
		goto failed_unlock;
	}

	if (!s->s_root) {
		char b[BDEVNAME_SIZE];

		/* New superblock instance created */
		s->s_flags = flags;
		s->s_mode = mode;
		strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id));
		sb_set_blocksize(s, block_size(sd.bdev));

		err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0,
				       nilfs);
		if (err)
			goto cancel_new;

		s->s_flags |= MS_ACTIVE;
		need_to_close = 0;
	}

	mutex_unlock(&nilfs->ns_mount_mutex);
	put_nilfs(nilfs);
	if (need_to_close)
		close_bdev_exclusive(sd.bdev, mode);
	simple_set_mnt(mnt, s);
	return 0;

 failed_unlock:
	mutex_unlock(&nilfs->ns_mount_mutex);
	put_nilfs(nilfs);
 failed:
	close_bdev_exclusive(sd.bdev, mode);

	return err;

 cancel_new:
	/* Abandoning the newly allocated superblock */
	mutex_unlock(&nilfs->ns_mount_mutex);
	put_nilfs(nilfs);
	deactivate_locked_super(s);
	/*
	 * deactivate_locked_super() invokes close_bdev_exclusive().
	 * We must finish all post-cleaning before this call;
	 * put_nilfs() needs the block device.
	 */
	return err;
}
Пример #24
0
u64_t block_write(struct block_t * blk, u8_t * buf, u64_t offset, u64_t count)
{
	u64_t blkno, blksz, blkcnt, capacity;
	u64_t len, tmp;
	u64_t ret = 0;
	u8_t * p;

	if(!blk || !buf || !count)
		return 0;

	blksz = block_size(blk);
	blkcnt = block_count(blk);
	if(!blksz || !blkcnt)
		return 0;

	capacity = block_capacity(blk);
	if(offset >= capacity)
		return 0;

	tmp = capacity - offset;
	if(count > tmp)
		count = tmp;

	p = malloc(blksz);
	if(!p)
		return 0;

	blkno = offset / blksz;
	tmp = offset % blksz;
	if(tmp > 0)
	{
		len = blksz - tmp;
		if(count < len)
			len = count;

		if(blk->read(blk, p, blkno, 1) != 1)
		{
			free(p);
			return ret;
		}

		memcpy((void *)(&p[tmp]), (const void *)buf, len);

		if(blk->write(blk, p, blkno, 1) != 1)
		{
			free(p);
			return ret;
		}

		buf += len;
		count -= len;
		ret += len;
		blkno += 1;
	}

	tmp = count / blksz;
	if(tmp > 0)
	{
		len = tmp * blksz;

		if(blk->write(blk, buf, blkno, tmp) != tmp)
		{
			free(p);
			return ret;
		}

		buf += len;
		count -= len;
		ret += len;
		blkno += tmp;
	}

	if(count > 0)
	{
		len = count;

		if(blk->read(blk, p, blkno, 1) != 1)
		{
			free(p);
			return ret;
		}

		memcpy((void *)(&p[0]), (const void *)buf, len);

		if(blk->write(blk, p, blkno, 1) != 1)
		{
			free(p);
			return ret;
		}

		ret += len;
	}

	free(p);
	return ret;
}
Пример #25
0
/*
 * mm_checkheap
 */
void mm_checkheap(int verbose){
    void *bp;
    unsigned *list_p;
    unsigned *pred, *succ;
    int i;
    if (!verbose) {
        return;
    }
    bp = data_head + DSIZE;
    /* checking the heap */
    /* prologue */
    if (!(block_size(bp) == 8 && block_alloc(bp) == 1)) {
        printf("Invariant Error: prologue block\n");
    }
    /* blocks */
    bp = next_block(bp);
    while (block_size(bp) != 0) {
        if ((long)bp % DSIZE != 0) {
            printf("Invariant Error: block's address isn't aligned\n");
        }
        if (!block_alloc(bp)) {
            if (*(int *)HEAD(bp) != *(int *)FOOT(bp)) {
                printf("Invariant Error: block head and foot don't match\n");
            }
        }
        if (!block_prev_alloc(bp)) {
            if (block_prev_alloc(bp) != block_alloc(prev_block(bp))) {
                printf("Invariant Error: prev alloc bit doesn't match prev block\n");
            }
            if (block_alloc(bp) == 0) {
                printf("Inveriant Error: find consecutive free blocks\n");
            }
        }
        if (block_alloc(bp) == 0 && block_alloc(next_block(bp)) == 0) {
            printf("Inveriant Error: find consecutive free blocks\n");
        }
        if (block_size(bp) < 4 * WSIZE) {
            printf("Invariant Error: block is too small\n");
        }
        bp = next_block(bp);
    } 
    /* epilogue */
    if (!(block_size(bp) == 0 && block_alloc(bp) == 1)) {
        printf("Invariant Error: epilogue block\n");
    }

    /* checking the free list */
    list_p = (unsigned *)heap_head;
    for (i = 0; i < ARRAYSIZE; i++) {
        if (!*list_p) {
            continue;
        }
        bp = (unsigned *)r2a((size_t)*list_p);
        while (bp != NULL) {
            pred = pred_block(bp);
            succ = succ_block(bp);
            if (pred != NULL) {
                if (*(pred + 1) != a2r((size_t)bp)) {
                    printf("Invariant Error: inconsistent pointer\n");
                }
            }
            if (succ != NULL) {
                if (*succ != a2r((size_t)bp)) {
                    printf("Invariant Error: inconsistent pointer\n");
                }
            }
            if (get_list(block_size((void *)bp)) != list_p) {
                printf("Invariant Error: block size doesn't match list\n");
            }
            bp = succ;
        }
        list_p++;
    }
    print_heap();
}
Пример #26
0
inline void test_basic_api(uint32_t symbols, uint32_t symbol_size)
{

    // Common setting
    typename Encoder::factory encoder_factory(symbols, symbol_size);
    auto encoder = encoder_factory.build();

    typename Decoder::factory decoder_factory(symbols, symbol_size);
    auto decoder = decoder_factory.build();

    EXPECT_TRUE(symbols == encoder_factory.max_symbols());
    EXPECT_TRUE(symbol_size == encoder_factory.max_symbol_size());
    EXPECT_TRUE(symbols == encoder->symbols());
    EXPECT_TRUE(symbol_size == encoder->symbol_size());

    EXPECT_TRUE(symbols == decoder_factory.max_symbols());
    EXPECT_TRUE(symbol_size == decoder_factory.max_symbol_size());
    EXPECT_TRUE(symbols == decoder->symbols());
    EXPECT_TRUE(symbol_size == decoder->symbol_size());

    EXPECT_TRUE(encoder->symbol_length() > 0);
    EXPECT_TRUE(decoder->symbol_length() > 0);

    EXPECT_TRUE(encoder->block_size() == symbols * symbol_size);
    EXPECT_TRUE(decoder->block_size() == symbols * symbol_size);

    EXPECT_TRUE(encoder_factory.max_payload_size() >=
                encoder->payload_size());

    EXPECT_TRUE(decoder_factory.max_payload_size() >=
                decoder->payload_size());

    EXPECT_EQ(encoder_factory.max_payload_size(),
              decoder_factory.max_payload_size());

    // Encode/decode operations
    EXPECT_EQ(encoder->payload_size(), decoder->payload_size());

    std::vector<uint8_t> payload(encoder->payload_size());

    std::vector<uint8_t> data_in = random_vector(encoder->block_size());
    std::vector<uint8_t> data_in_copy(data_in);

    sak::mutable_storage storage_in = sak::storage(data_in);
    sak::mutable_storage storage_in_copy = sak::storage(data_in_copy);

    EXPECT_TRUE(sak::equal(storage_in, storage_in_copy));

    // Only used for prime fields, lets reconsider how we implement
    // this less intrusive
    uint32_t prefix = 0;

    if(fifi::is_prime2325<typename Encoder::field_type>::value)
    {
        // This field only works for multiple of uint32_t
        assert((encoder->block_size() % 4) == 0);

        uint32_t block_length = encoder->block_size() / 4;

        fifi::prime2325_binary_search search(block_length);
        prefix = search.find_prefix(storage_in_copy);

        // Apply the negated prefix
        fifi::apply_prefix(storage_in_copy, ~prefix);
    }

    encoder->set_symbols(storage_in_copy);

    // Set the encoder non-systematic
    if(kodo::is_systematic_encoder(encoder))
        kodo::set_systematic_off(encoder);

    while( !decoder->is_complete() )
    {
        uint32_t payload_used = encoder->encode( &payload[0] );
        EXPECT_TRUE(payload_used <= encoder->payload_size());

        decoder->decode( &payload[0] );
    }

    std::vector<uint8_t> data_out(decoder->block_size(), '\0');
    decoder->copy_symbols(sak::storage(data_out));

    if(fifi::is_prime2325<typename Encoder::field_type>::value)
    {
        // Now we have to apply the negated prefix to the decoded data
        fifi::apply_prefix(sak::storage(data_out), ~prefix);
    }

    EXPECT_TRUE(std::equal(data_out.begin(),
                           data_out.end(),
                           data_in.begin()));
}
Пример #27
0
/// @example use_cached_symbol_decoder.cpp
///
/// This example shows how to use the cached symbol decoder to "extract"
/// the symbol coding coefficients and the encoded symbol data from an
/// incoming symbol.
int main()
{
    // The finite field we will use in the example. You can try
    // with other fields by specifying e.g. fifi::binary8 for the
    // extension field 2^8
    typedef fifi::binary finite_field;

    // Set the number of symbols (i.e. the generation size in RLNC
    // terminology) and the size of a symbol in bytes
    uint32_t symbols = 8;
    uint32_t symbol_size = 160;

    // Typdefs for the encoder/decoder type we wish to use
    typedef kodo::full_rlnc_encoder<finite_field> rlnc_encoder;
    typedef kodo::full_rlnc_decoder<finite_field> rlnc_decoder;

    typedef kodo::symbol_info_decoder<finite_field> rlnc_info_decoder;

    // In the following we will make an encoder/decoder factory.
    // The factories are used to build actual encoders/decoders.
    // Each stack we use have their own factories.
    rlnc_encoder::factory encoder_factory(symbols, symbol_size);
    auto encoder = encoder_factory.build();

    rlnc_decoder::factory decoder_factory(symbols, symbol_size);
    auto decoder = decoder_factory.build();

    rlnc_info_decoder::factory info_decoder_factory(symbols, symbol_size);
    auto info_decoder = info_decoder_factory.build();

    // Allocate some storage for a "payload" the payload is what we would
    // eventually send over a network
    std::vector<uint8_t> payload(encoder->payload_size());

    // Allocate some data to encode. In this case we make a buffer
    // with the same size as the encoder's block size (the max.
    // amount a single encoder can encode)
    std::vector<uint8_t> data_in(encoder->block_size());

    // Just for fun - fill the data with random data
    for(auto &e: data_in)
        e = rand() % 256;

    // Assign the data buffer to the encoder so that we may start
    // to produce encoded symbols from it
    encoder->set_symbols(sak::storage(data_in));

    while( !decoder->is_complete())
    {
        // Encode a packet into the payload buffer
        encoder->encode( &payload[0] );

        // Here we "simulate" a packet loss of approximately 50%
        // by dropping half of the encoded packets.
        // When running this example you will notice that the initial
        // symbols are received systematically (i.e. uncoded). After
        // sending all symbols once uncoded, the encoder will switch
        // to full coding, in which case you will see the full encoding
        // vectors being sent and received.
        if((rand() % 2) == 0)
            continue;

        // Pass the encoded packet to the info decoder. After this
        // information about the coded symbol can be fetched using the
        // cached_symbol_decoder API
        info_decoder->decode( &payload[0] );

        if(!info_decoder->cached_symbol_coded())
        {
            // The symbol was uncoded so we may ask the cache which of the
            // original symbols we have received.

            std::cout << "Symbol was uncoded, index = "
                      << info_decoder->cached_symbol_index() << std::endl;

            // Now we pass the data directly into our actual decoder. This is
            // done using the "Codec API" directly, and not through the "Payload
            // API" as we would typically do.
            decoder->decode_symbol( info_decoder->cached_symbol_data(),
                                    info_decoder->cached_symbol_index());

        }
        else
        {
            // The symbol was coded so we may ask the cache to return
            // the coding coefficients used to create the encoded symbol.

            std::cout << "Symbol was coded, encoding vector = ";

            const uint8_t* c = info_decoder->cached_symbol_coefficients();

            // We loop through the coefficient buffer and print the coefficients
            for(uint32_t i = 0; i < info_decoder->symbols(); ++i)
            {
                std::cout << (uint32_t) fifi::get_value<finite_field>(c, i)
                          << " ";
            }

            std::cout << std::endl;

            // Pass that packet to the decoder, as with the uncoded symbols
            // above we pass it directly to the "Codec API"
            decoder->decode_symbol(info_decoder->cached_symbol_data(),
                                   info_decoder->cached_symbol_coefficients());

        }
    }

    // The decoder is complete, now copy the symbols from the decoder
    std::vector<uint8_t> data_out(decoder->block_size());
    decoder->copy_symbols(sak::storage(data_out));

    // Check we properly decoded the data
    if (std::equal(data_out.begin(), data_out.end(), data_in.begin()))
    {
        std::cout << "Data decoded correctly" << std::endl;
    }
    else
    {
        std::cout << "Unexpected failure to decode "
                  << "please file a bug report :)" << std::endl;
    }
}
Пример #28
0
inline void
invoke_reuse_incomplete(uint32_t symbols, uint32_t symbol_size)
{

    bool do_complete;

    typename Encoder::factory encoder_factory(symbols, symbol_size);
    typename Decoder::factory decoder_factory(symbols, symbol_size);

    // Use factory a lot of times
    for (uint32_t i = 0; i < 100; ++i)
    {
        // Build coders
        auto encoder = encoder_factory.build();
        auto decoder = decoder_factory.build();

        // Prepare buffers
        std::vector<uint8_t> payload(encoder->payload_size());
        std::vector<uint8_t> data_in(encoder->block_size());

        // Fill with random data
        for (auto &e: data_in)
            e = rand() % 256;

        // Put data in encoder
        encoder->set_symbols(sak::storage(data_in));

        if (rand() % 100 > 90)
        {
            do_complete = false;
        }
        else
        {
            do_complete = true;
        }

        // Start encoding/decoding
        while (!decoder->is_complete())
        {
            encoder->encode(&payload[0]);

            // Loose a packet with probability
            if (rand() % 100 > 90)
                continue;

            decoder->decode(&payload[0]);

            // Stop decoding after a while with probability
            if (!do_complete && decoder->rank() == symbols - 2)
                break;
        }

        // Check if completed decoders are correct
        if (decoder->is_complete())
        {
            std::vector<uint8_t> data_out(decoder->block_size());
            decoder->copy_symbols(sak::storage(data_out));

            ASSERT_TRUE(sak::equal(sak::storage(data_out),
                                   sak::storage(data_in)));
        }
    }

}
Пример #29
0
// Return the header to the next block
static inline uint32_t* block_next(uint32_t* const block) {
    REQUIRES(block != NULL);
    REQUIRES(in_heap(block));

    return block + block_size(block) + 2;
}
Пример #30
0
void run_test_random_annex_partial(uint32_t max_symbols,
                                 uint32_t max_symbol_size,
                                 uint32_t multiplier)
{

    uint32_t object_size = max_symbols * max_symbol_size * multiplier;
    object_size -= (rand() % object_size);

    uint32_t annex_size = kodo::max_annex_size(
        max_symbols, max_symbol_size, object_size);

    if(annex_size > 0)
    {
        // Randomize the actual annex size
        annex_size -= (rand() % annex_size);
    }

    typedef kodo::random_annex_encoder<Encoder, Partitioning>
        random_annex_encoder;

    typedef kodo::random_annex_decoder<Decoder, Partitioning>
        random_annex_decoder;

    std::vector<uint8_t> data_in = random_vector(object_size);
    std::vector<uint8_t> data_out(object_size, '\0');

    typename Encoder::factory encoder_factory(max_symbols, max_symbol_size);
    typename Decoder::factory decoder_factory(max_symbols, max_symbol_size);

    random_annex_encoder obj_encoder(
        annex_size, encoder_factory, sak::storage(data_in));

    random_annex_decoder obj_decoder(
        annex_size, decoder_factory, obj_encoder.object_size());

    EXPECT_TRUE(obj_encoder.encoders() >= 1);
    EXPECT_TRUE(obj_decoder.decoders() >= 1);
    EXPECT_TRUE(obj_encoder.encoders() == obj_decoder.decoders());

    uint32_t bytes_used = 0;

    for(uint32_t i = 0; i < obj_encoder.encoders(); ++i)
    {
        auto encoder = obj_encoder.build(i);
        typename random_annex_decoder::pointer_type decoder =
            obj_decoder.build(i);

        if(kodo::has_systematic_encoder<Encoder>::value)
            kodo::set_systematic_off(encoder);

        EXPECT_TRUE(encoder->block_size() >= encoder->bytes_used());
        EXPECT_TRUE(decoder->block_size() >= decoder->bytes_used());

        EXPECT_TRUE(encoder->block_size() == decoder->block_size());
        EXPECT_TRUE(encoder->bytes_used() == decoder->bytes_used());
        EXPECT_TRUE(encoder->payload_size() == decoder->payload_size());

        std::vector<uint8_t> payload(encoder->payload_size());

        while(!decoder->is_complete())
        {
            encoder->encode( &payload[0] );
            decoder->decode( &payload[0] );
        }

        sak::mutable_storage storage = sak::storage(
            &data_out[0] + bytes_used, decoder->bytes_used());

        decoder.unwrap()->copy_symbols(storage);

        bytes_used += decoder->bytes_used();
    }

    EXPECT_EQ(bytes_used, object_size);
    EXPECT_TRUE(std::equal(data_in.begin(),
                           data_in.end(),
                           data_out.begin()));

}