Beispiel #1
0
int main ( void )
{
   int i, sum, m;
   char* aa = calloc(100,1);
   sum = 0;

   VALGRIND_CHECK_READABLE(aa,100);

   m = VALGRIND_MAKE_WRITABLE( &aa[49], 1 );
   VALGRIND_CHECK_WRITABLE(aa,100);

   printf("m_na: returned value is %d\n", m );

   for (i = 0; i < 100; i++)
     sum += aa[i];
   printf("sum is %d\n", sum);

   m = VALGRIND_DISCARD(m);
   printf("m_rm: returned value is %d\n", m );

   for (i = 0; i < 100; i++)
     sum += aa[i];
   printf("sum is %d\n", sum);

   return 0;
}
Beispiel #2
0
static void add_single_pages(struct page *base)
/* Effects: Adds pages at base to the single_pages list */
{
  // fprintf(stderr, "## add_single_pages: base=%p, base->pagecount=%d\n",
  //         base, base->pagecount);
  assert(base->pagecount > 0);
  pageid n = base->pagecount;
  struct page *prev = base->prev_address, *basenext = base->next_address,
    *next;

  single_page_count += n;

  for (;;)
    {
      // split 'base' into single pages.
      ASSERT_FREE(base);
      VALGRIND_MAKE_WRITABLE(base, sizeof(struct page));
      base->free = 0; /* Not free so that coalesce won't steal these back */
      base->prev_address = prev;
      // enforce the invariant that 'pagecount' is always correct: make sure
      // we record that this page has size 1
      base->pagecount = 1;
      prev = base;
      base->next = single_pages;
      single_pages = base;
      if (--n == 0)
	break;
      next = (struct page *)((char *)base + RPAGESIZE);
      base->next_address = next;
      base = next;
    }
  base->next_address = basenext;
  basenext->prev_address = base;
}
Beispiel #3
0
region newsubregion(region parent)
{
  // fprintf(stderr, "## newsubregion\n");
  char *first;
  region r;

  first = (char *)alloc_single_page(NULL);
  preclear(first + PAGE_HEADER_SIZE, RPAGESIZE - PAGE_HEADER_SIZE);

#ifdef STAGGER_RSTART
  /* stagger regions across cache lines a bit */
  rstart += 64;
#if RPAGESIZE < 1024
#error RPAGESIZE must be at least 1024, or change the next if.
#endif
  if (rstart >= 16 * 64) rstart = 0;
#endif
  r = (region)(first + rstart + PAGE_HEADER_SIZE);
  VALGRIND_MAKE_WRITABLE(r, sizeof(*r));
  postclear(r, sizeof *r);
  initregion(r);

  if (parent)
    link_region(r, parent);

  // fprintf(stderr, "## create mempool %p\n", r);
  VALGRIND_CREATE_MEMPOOL(r, 0, 0);
  ++num_regions_active;
  ++num_regions_created;
  return r;
}
/* Case 3 - memory definedness doesn't survive remapping */
static void test3()
{
	char *m = mm(0, pgsz * 5, PROT_READ|PROT_WRITE);

	VALGRIND_MAKE_WRITABLE(&m[pgsz], pgsz);
	mm(&m[pgsz], pgsz, PROT_READ);
	VALGRIND_CHECK_READABLE(&m[pgsz], pgsz); /* OK */
}
/* Case 5 - mprotect doesn't affect definedness */
static void test5()
{
	char *m = mm(0, pgsz * 5, PROT_READ|PROT_WRITE);
	
	VALGRIND_MAKE_WRITABLE (m, pgsz*5);
	memset(m, 'x', 10);
	VALGRIND_CHECK_READABLE(m, 10);	/* OK */
	VALGRIND_CHECK_READABLE(m+10, 10); /* BAD */

	mprotect(m, pgsz*5, PROT_NONE);
	mprotect(m, pgsz*5, PROT_READ);

	VALGRIND_CHECK_READABLE(m, 10);	/* still OK */
	VALGRIND_CHECK_READABLE(m+20, 10); /* BAD */
}
Beispiel #6
0
static void delregion(region r)
{
  nochildren(r);
  VALGRIND_DESTROY_MEMPOOL(r);
  VALGRIND_MAKE_WRITABLE(r, sizeof(struct page));
  free_all_pages(r, &r->normal);
  // VALGRIND_DO_QUICK_LEAK_CHECK;
  // VALGRIND_FREELIKE_BLOCK(r, 0);
  // fprintf(stderr, "##delregion: r->normal.page.end = %p\n", r->normal.page.end);
  // fprintf(stderr, "##           r+1 = %p\n", r+1);
  if (r->normal.page.end) {
    // VALGRIND_MAKE_NOACCESS((char*)(r+1), r->normal.page.end - (char*)(r+1));
  }
  // VALGRIND_MAKE_NOACCESS(r, sizeof(*r));
  --num_regions_active;
}
Beispiel #7
0
MemoryPool::~MemoryPool(void)
{
    pool_destroying = true;

    decrement_usage(used_memory.value());
    decrement_mapping(mapped_memory.value());

#ifdef USE_VALGRIND
    VALGRIND_DESTROY_MEMPOOL(this);

    // Do not forget to discard stack traces for delayed free blocks
    for (size_t i = 0; i < delayedFreeCount; i++)
    {
        MemBlock* block = delayedFree[i];
        void* object = &block->body;

        VALGRIND_DISCARD(
            VALGRIND_MAKE_MEM_DEFINED(block, OFFSET(MemBlock*, body)));
        VALGRIND_DISCARD(
            VALGRIND_MAKE_WRITABLE(object, block->length));
    }
#endif

    if (parent)
    {
        MemoryPool::release(freeObjects);
    }
    else
    {
        releaseRaw(pool_destroying, freeObjects, ((threshold + roundingSize) / roundingSize) * sizeof(void*));
    }
    freeObjects = NULL;

    for (MemSmallHunk* hunk; hunk = smallHunks;)
    {
        smallHunks = hunk->nextHunk;
        releaseRaw(pool_destroying, hunk, minAllocation);
    }

    for (MemBigHunk* hunk; hunk = bigHunks;)
    {
        bigHunks = hunk->nextHunk;
        releaseRaw(pool_destroying, hunk, hunk->length);
    }
}
Beispiel #8
0
struct page *region_get_mem(size_t s)
{
  size_t request_bytes;
  void *mem;
  struct page *newp;

  /* Don't get less than K * RPAGESIZE extra memory (K * RPAGESIZE
     is the minimum useful size for something on unused_pages) */
  if (s + K * RPAGESIZE < MINIMUM_MEM_REQUEST)
    request_bytes = MINIMUM_MEM_REQUEST;
  else
    request_bytes = s;

#if 0
  request_bytes = ALIGN(request_bytes, 65536);
#endif

  mem = (struct page *)MMAP(0, request_bytes+RPAGESIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE);
  if (!mem) { out_of_memory(); abort(); }

  VALGRIND_MALLOCLIKE_BLOCK(mem, request_bytes+RPAGESIZE, 0, 0);
  // VALGRIND_MAKE_NOACCESS(mem, request_bytes+RPAGESIZE);
  newp = PALIGN(mem, RPAGESIZE);

  VALGRIND_MAKE_WRITABLE(newp, sizeof(struct page));
  memset(newp, 0, sizeof(struct page));

  if (mem == newp) /* Maybe we were lucky! */
    request_bytes += RPAGESIZE;

  addbyaddress(newp);

  /* Add the new memory to unused_pages */
#ifndef NMEMDEBUG
  set_region_range(newp, (char *)newp + s, FREEPAGE);
#endif
  total_page_count += request_bytes >> RPAGELOG;
  newp->pagecount = request_bytes >> RPAGELOG;
  assert(newp->pagecount > 0);
  newp->free = 1;
  addfront(&unused_pages, newp);

  return newp;
}
Beispiel #9
0
/**
 * In developer builds, clobber a region of memory.
 *
 * If we think a string buffer is longer than it really is, this ought
 * to make the failure obvious, by segfaulting (if in the heap) or by
 * killing the return address (on the stack), or by trapping under a
 * memory debugger.
 *
 * This is meant to catch possible string overflows, even if the
 * actual string copied is not big enough to cause an overflow.
 *
 * In addition, under Valgrind the buffer is marked as uninitialized.
 **/
void clobber_region(const char *fn, unsigned int line, char *dest, size_t len)
{
#ifdef DEVELOPER
	global_clobber_region_function = fn;
	global_clobber_region_line = line;

	/* F1 is odd and 0xf1f1f1f1 shouldn't be a valid pointer */
	memset(dest, 0xF1, len);
#ifdef VALGRIND
	/* Even though we just wrote to this, from the application's
	 * point of view it is not initialized.
	 *
	 * (This is not redundant with the clobbering above.  The
	 * marking might not actually take effect if we're not running
	 * under valgrind.) */
	VALGRIND_MAKE_WRITABLE(dest, len);
#endif /* VALGRIND */
#endif /* DEVELOPER */
}
Beispiel #10
0
struct page *alloc_split(struct page *split, int n, struct page *next)
/* Assumes freepages_lock held */
{
  // fprintf(stderr, "## alloc_split: split=%p, n=%d, next=%p, split->pagecount=%d\n", split, n, next, split->pagecount);
#ifndef NMEMDEBUG
  /* These pages had better be free */
  pageid i, pnb = PAGENB(split);

  assert(n > 0);
  assert(split->pagecount >= n);
  for (i = pnb; i < pnb + split->pagecount; i++)
    assert(page_region(i) == FREEPAGE);
#endif
  if (split->pagecount > n)
    {
      struct page *splitoff;

      /* Keep first part of block */
      split->pagecount -= n;
      assert(split->pagecount > 0);
      /* Return latter part of block */
      splitoff = split;
      split = (struct page *)((char *)split + (split->pagecount << RPAGELOG));
      VALGRIND_MAKE_WRITABLE(split, sizeof(struct page));
      memset(split, 0, sizeof(struct page));

      /* Update the by-address list */
      insertbefore_address(split, splitoff->next_address);
    }
  else
    {
      /* remove split from list */
      unlink_page(&unused_pages, split);
    }
  split->next = next;
  split->pagecount = n;
  assert(split->pagecount > 0);
  split->free = 0;

  return split;
}
Beispiel #11
0
void allocate_chunk_at(void) {
  
  extern LUSHAPI alloc_root_t at_alloc;
  assert(at_alloc.freelist==NULL);
  chunk_header_t *chkhd = malloc(sizeof(chunk_header_t) + SIZEOF_CHUNK_AT);
  if (chkhd) {
    chkhd->begin = (empty_alloc_t *) ((char*)chkhd + sizeof(chunk_header_t));
    chkhd->end = (empty_alloc_t *) ((char*)chkhd->begin + SIZEOF_CHUNK_AT);
    chkhd->next = at_alloc.chunklist;
    at_alloc.chunklist = chkhd;
    VALGRIND_MAKE_NOACCESS(chkhd->begin, SIZEOF_CHUNK_AT);
    
    char *p = (char*)chkhd->begin + REDZONE_SIZE;
    for (int i = 0; i < CONSCHUNKSIZE; i++, p+=SIZEOF_CELL) {
      VALGRIND_MAKE_WRITABLE(p, sizeof(at));
      empty_alloc_t *ea = (empty_alloc_t *)p;
      ea->next = at_alloc.freelist;
      ea->used = 0;
      at_alloc.freelist = ea;
    }
  } else
    RAISEF("not enough memory", NIL);
}
Beispiel #12
0
enum vocab_ret vocab_decode(struct vocab_vector *vocab, struct vec *v) {
    unsigned long int tmp;
    unsigned int bytes = 0,
                 ret;
    unsigned char byte;

    VALGRIND_CHECK_WRITABLE(vocab, sizeof(*vocab));

    /* if debugging, clear the vocab vector first */
    assert((memset(vocab, 0, sizeof(*vocab)), 1));

    /* check that memory can be accessed, then mark vocab entry as 
     * uninitialised */
    VALGRIND_MAKE_WRITABLE(vocab, sizeof(*vocab));
    VALGRIND_CHECK_READABLE(v->pos, VEC_LEN(v));

    /* first, get first byte which contains attribute, location and type
     * indications */
    if (v->pos < v->end) {
        vec_byte_read(v, (char *) &byte, 1);
        bytes++;
        vocab->attr = byte & BIT_LMASK(2);
        byte >>= 2;
        vocab->location = byte & BIT_LMASK(2);
        byte >>= 2;
        vocab->type = byte;

        if (vocab->attr & VOCAB_ATTRIBUTES_PERLIST) {
            if ((ret = vec_vbyte_read(v, &tmp))) {
                vocab->attribute = (unsigned int) tmp;
                bytes += ret;
            } else {
                if (((unsigned int) VEC_LEN(v)) <= vec_vbyte_len(UINT_MAX)) {
                    v->pos -= bytes;
                    return VOCAB_ENOSPC;
                } else {
                    v->pos -= bytes;
                    return VOCAB_EOVERFLOW;
                }
            }
        }

        /* get common header entries */
        if ((ret = vec_vbyte_read(v, &vocab->size))
          && (bytes += ret)
          && (ret = vec_vbyte_read(v, &vocab->header.doc.docs))
          && (bytes += ret)
          && (ret = vec_vbyte_read(v, &vocab->header.doc.occurs))
          && (bytes += ret)
          && (ret = vec_vbyte_read(v, &vocab->header.doc.last))
          && (bytes += ret)) {
            /* succeeded, do nothing */
        } else {
            if (((unsigned int) VEC_LEN(v)) <= vec_vbyte_len(UINT_MAX)) {
                v->pos -= bytes;
                return VOCAB_ENOSPC;
            } else {
                v->pos -= bytes;
                return VOCAB_EOVERFLOW;
            }
        }

        /* get specific header entries */
        switch (vocab->type) {
        case VOCAB_VTYPE_DOC:
        case VOCAB_VTYPE_DOCWP:
            /* ok, so i cheated a little and just read the common, uh, not
             * common ones above (they're not common because future vector 
             * types might not have them)... */
            break;

        case VOCAB_VTYPE_IMPACT:
            break;

        default: 
            v->pos -= bytes; 
            return VOCAB_EINVAL;
        }

        /* get location */
        switch (vocab->location) {
        case VOCAB_LOCATION_VOCAB:
            if (((unsigned int) VEC_LEN(v)) >= vocab->size) {
                /* note that we increment vector over in-vocab vector so that
                 * successive _decode calls will work as planned */
                vocab->loc.vocab.vec = v->pos;
                v->pos += vocab->size;
                bytes += vocab->size;
            } else {
                v->pos -= bytes; 
                return VOCAB_ENOSPC;
            }
            break;
       
        case VOCAB_LOCATION_FILE:
            if ((ret = vec_vbyte_read(v, &tmp))
              && ((vocab->loc.file.fileno = tmp), (bytes += ret))
              && (ret = vec_vbyte_read(v, &vocab->loc.file.offset))
              && (bytes += ret)
              && (ret = vec_vbyte_read(v, &tmp))
              && ((vocab->loc.file.capacity = tmp), (bytes += ret))) {
                /* succeeded, do nothing */
            } else {
                if (((unsigned int) VEC_LEN(v)) <= vec_vbyte_len(UINT_MAX)) {
                    v->pos -= bytes;
                    return VOCAB_ENOSPC;
                } else {
                    v->pos -= bytes;
                    return VOCAB_EOVERFLOW;
                }
            }
            break;

        default: 
            v->pos -= bytes;
            return VOCAB_EINVAL;
        }

        return VOCAB_OK;
    } else {