/* Ity_I1 values cannot be stored or loaded. So vex_inject_ir will load/store
   such a value from/to a 4-byte container. It uses 32to1 and 1Uto32,
   respectively. */
static void
valgrind_set_vbits(opnd_t *opnd)
{
   unsigned rc, num_bytes;
   
   /* 1-bit wide values cannot be read. So we read a 4 bytes here */
   num_bytes = opnd->type == Ity_I1 ? 4 : sizeof_irtype(opnd->type);
   rc = VALGRIND_SET_VBITS(&opnd->value, &opnd->vbits.bits, num_bytes);
   assert(rc == 1);

   // Make sure the v-bits were set correctly
   vbits_t actual = { .num_bits = opnd->vbits.num_bits };
   rc = VALGRIND_GET_VBITS(&opnd->value, &actual.bits, num_bytes);
   assert(rc == 1);

   assert(equal_vbits(opnd->vbits, actual));
}


static void
valgrind_get_vbits(opnd_t *opnd)
{
   unsigned rc, num_bytes;

   /* 1-bit wide values cannot be stored. So we store them by writing a
      single byte */
   num_bytes = opnd->type == Ity_I1 ? 4 : sizeof_irtype(opnd->type);
   opnd->vbits.num_bits = bitsof_irtype(opnd->type);
   rc = VALGRIND_GET_VBITS(&opnd->value, &opnd->vbits.bits, num_bytes);
   assert(rc == 1);
}
Example #2
0
static void
memcpy_with_vbits (void *dest,
                   void *src,
                   size_t length)
{
#ifdef WITH_VALGRIND
	int vbits_setup = 0;
	void *vbits = NULL;

	if (RUNNING_ON_VALGRIND) {
		vbits = malloc (length);
		if (vbits != NULL)
			vbits_setup = VALGRIND_GET_VBITS (src, vbits, length);
		VALGRIND_MAKE_MEM_DEFINED (src, length);
	}
#endif

	memcpy (dest, src, length);

#ifdef WITH_VALGRIND
	if (vbits_setup == 1) {
		VALGRIND_SET_VBITS (dest, vbits, length);
		VALGRIND_SET_VBITS (src, vbits, length);
	}
	free (vbits);
#endif
}
Example #3
0
TEST(MemcpyAsync, Validity) {
    cudaError_t ret;
    cudaStream_t stream;

    ret = cudaStreamCreate(&stream);
    ASSERT_EQ(cudaSuccess, ret);

    int * device_ptr, src = 0, vsrc, dst, vdst;

    ret = cudaMalloc((void **) &device_ptr, sizeof(*device_ptr));
    ASSERT_EQ(cudaSuccess, ret);

    /* Only src is valid; *device_ptr and dst are invalid. */

    /* Do transfer */
    ret = cudaMemcpyAsync(device_ptr, &src, sizeof(src),
        cudaMemcpyHostToDevice, stream);
    ASSERT_EQ(cudaSuccess, ret);

    /* Both src and *device_ptr are valid; dst is invalid */
    ret = cudaMemcpyAsync(&dst, device_ptr, sizeof(dst),
        cudaMemcpyDeviceToHost, stream);
    ASSERT_EQ(cudaSuccess, ret);

    EXPECT_EQ(src, dst);

    int valgrind = VALGRIND_GET_VBITS(&src, &vsrc, sizeof(src));
    assert(valgrind == 0 || valgrind == 1);

    if (valgrind == 1) {
        valgrind = VALGRIND_GET_VBITS(&dst, &vdst, sizeof(dst));
        assert(valgrind == 1);

        EXPECT_EQ(vsrc, vdst);
    }

    ret = cudaStreamSynchronize(stream);
    EXPECT_EQ(cudaSuccess, ret);

    ret = cudaFree(device_ptr);
    ASSERT_EQ(cudaSuccess, ret);

    ret = cudaStreamDestroy(stream);
    ASSERT_EQ(cudaSuccess, ret);
}
Example #4
0
TEST_P(MemsetValidity, Aligned) {
    const size_t param = GetParam();
    const size_t alloc = sizeof(void *) << param;

    cudaError_t ret;
    uint8_t * ptr;
    ret = cudaMalloc((void **) &ptr, alloc);
    ASSERT_EQ(cudaSuccess, ret);

    uint8_t *  data  = new uint8_t[alloc];
    uint8_t * vdata  = new uint8_t[alloc];
    uint8_t * expect = new uint8_t[alloc];
    memset(expect, 0xFF, alloc);

    // Write a pattern to the actual data and an expected validity pattern
    for (size_t i = 0; i < param; i += 2) {
        // Write
        const size_t range  = sizeof(void *) *  (1 << i);
        assert(range * 2 <= alloc);

        ret = cudaMemset(ptr    + range, i & 0x0, range);
                  memset(expect + range,     0x0, range);
        ASSERT_EQ(cudaSuccess, ret);
    }

    // Download data
    ret = cudaMemcpy(data, ptr, alloc, cudaMemcpyDeviceToHost);
    ASSERT_EQ(cudaSuccess, ret);

    // Copy out validity bits
    int valgrind = VALGRIND_GET_VBITS(data, vdata, alloc);
    assert(valgrind == 0 || valgrind == 1);

    // Check if Valgrind is running
    if (valgrind == 1) {
        const int iret = memcmp(vdata, expect, alloc);
        EXPECT_EQ(0, iret);
    }

    delete[] expect;
    delete[] vdata;
    delete[]  data;

    ret = cudaFree(ptr);
    ASSERT_EQ(cudaSuccess, ret);
}
Example #5
0
TEST(Memset, MallocAfterMemset) {
    cudaError_t ret;
    void *ptr1, *ptr2;
    const size_t block = 1 << 10;

    ret = cudaMalloc(&ptr1, block);
    ASSERT_EQ(cudaSuccess, ret);

    ret = cudaMemset(ptr1, 0, block);
    ASSERT_EQ(cudaSuccess, ret);

    ret = cudaMalloc(&ptr2, block);
    ASSERT_EQ(cudaSuccess, ret);

    // Download data
    void *hptr1;
    ret = cudaMallocHost(&hptr1, block);
    ASSERT_EQ(cudaSuccess, ret);

    ret = cudaMemcpy(hptr1, ptr1, block, cudaMemcpyDeviceToHost);

    // Copy out validity bits
    uint8_t * vptr1 = new uint8_t[block];
    int valgrind = VALGRIND_GET_VBITS(hptr1, vptr1, block);
    assert(valgrind == 0 || valgrind == 1);

    // Check if Valgrind is running
    if (valgrind == 1) {
        uint8_t * eptr1 = new uint8_t[block];
        memset(eptr1, 0x0, block);

        EXPECT_EQ(0, memcmp(vptr1, eptr1, block));
        delete[] eptr1;
    }

    delete[] vptr1;

    ret = cudaFree(ptr2);
    ASSERT_EQ(cudaSuccess, ret);

    ret = cudaFree(ptr1);
    ASSERT_EQ(cudaSuccess, ret);

    ret = cudaFreeHost(hptr1);
    ASSERT_EQ(cudaSuccess, ret);
}
Example #6
0
int main ( void )
{
  int* a = malloc(10 * sizeof(int));
  int* b = malloc(10 * sizeof(int));
  int* v = malloc(10 * sizeof(int));
  int i, sum, res;

  for (i = 0; i < 10; i++) {
     if (i != 5) 
        a[i] = i;
  }

  /* a[0 .. 4] and [6 .. 9] are defined, [5] is undefined. */
  for (i = 0; i < 10; i++)
     b[i] = 0;

  /* b[0 .. 9] is defined. */

  /* Get metadata for a and put it in v. */
  res = VALGRIND_GET_VBITS(a, v, 10*sizeof(int) );
  printf("result of GET is %d (1 for success)\n", res);

  for (i = 0; i < 10; i++)
     printf("%d 0x%08x\n", i, v[i]);

  /* and copy to b. */
  res = VALGRIND_SET_VBITS(b, v, 10*sizeof(int) );
  printf("result of SET is %d (1 for success)\n", res);
  
  /* Now we should have that b[5] is undefined since a[5] is
     undefined. */
  sum = 100;
  for (i = 0; i < 10; i++)
     sum += b[i];

  /* V should yelp at this point, that sum is undefined. */
  if (sum == 0) 
    printf("sum == 0\n"); 
  else
    printf("sum != 0\n");

  return 0;
}
Example #7
0
/**
 * When running under Valgrind, check whether all bytes in the range [packet,
 * packet+length) are defined. Let Valgrind print a backtrace if one or more
 * bytes with uninitialized values have been found. This function can help to
 * find the cause of undefined value errors if --track-origins=yes is not
 * sufficient. Does nothing when not running under Valgrind.
 *
 * Note: this requires a fairly recent valgrind.
 */
void
netsnmp_check_definedness(const void *packet, size_t length)
{
#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__)   \
    && (__VALGRIND_MAJOR__ > 3                                   \
        || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))

    if (RUNNING_ON_VALGRIND) {
        int i;
        char vbits;

        for (i = 0; i < length; ++i) {
            if (VALGRIND_GET_VBITS((const char *)packet + i, &vbits, 1) == 1
                && vbits)
                VALGRIND_PRINTF_BACKTRACE("Undefined: byte %d/%d", i,
                                          (int)length);
        }
    }

#endif
}
Example #8
0
/**
 * Helper for reading magic number and traversal check flag fields of a pool-first chunk,
 * that suppresses valgrind's warnings about undefined values.
 *
 * A pool-first chunk can be either allocated or free.
 *
 * As chunks are marked as undefined upon allocation, some of chunks can still be
 * fully or partially marked as undefined.
 *
 * Nevertheless, the fields are read and their values are used to determine
 * whether the chunk is actually free pool-first chunk.
 *
 * See also:
 *          Description of collection algorithm in mem_pools_collect_empty
 */
static void __attr_always_inline___
mem_pools_collect_read_magic_num_and_flag (mem_pool_chunk_t *pool_first_chunk_p, /**< a pool-first chunk */
        uint16_t *out_magic_num_field_value_p, /**< out: value of magic num field,
                                                                                   *        read from the chunk */
        bool *out_traversal_check_flag_p) /**< out: value of traversal check flag
                                                                              *        field, read from the chunk */
{
    JERRY_ASSERT (pool_first_chunk_p != NULL);
    JERRY_ASSERT (out_magic_num_field_value_p != NULL);
    JERRY_ASSERT (out_traversal_check_flag_p != NULL);

#ifdef JERRY_VALGRIND
    /*
     * If the chunk is not free, there may be undefined bytes at hint_magic_num and traversal_check_flag fields.
     *
     * Although, it is correct for the routine, valgrind issues warning about using uninitialized data
     * in conditional expression. To suppress the false-positive warning, the chunk is temporarily marked
     * as defined, and after reading hint magic number and list identifier, valgrind state of the chunk is restored.
     */
    uint8_t vbits[MEM_POOL_CHUNK_SIZE];
    unsigned status;

    status = VALGRIND_GET_VBITS (pool_first_chunk_p, vbits, MEM_POOL_CHUNK_SIZE);
    JERRY_ASSERT (status == 0 || status == 1);

    VALGRIND_DEFINED_SPACE (pool_first_chunk_p, MEM_POOL_CHUNK_SIZE);
#endif /* JERRY_VALGRIND */

    uint16_t magic_num_field = pool_first_chunk_p->u.pool_gc.hint_magic_num;
    bool traversal_check_flag = pool_first_chunk_p->u.pool_gc.traversal_check_flag;

#ifdef JERRY_VALGRIND
    status = VALGRIND_SET_VBITS (pool_first_chunk_p, vbits, MEM_POOL_CHUNK_SIZE);
    JERRY_ASSERT (status == 0 || status == 1);
#endif /* JERRY_VALGRIND */

    *out_magic_num_field_value_p = magic_num_field;
    *out_traversal_check_flag_p = traversal_check_flag;
} /* mem_pools_collect_read_magic_num_and_flag */
void
mem_pools_collect_empty (void)
{
  /*
   * Hint magic number in header of pools with free first chunks
   */
  const uint16_t hint_magic_num_value = 0x7e89;

  /*
   * At first pass collect pointers to those of free chunks that are first at their pools
   * to separate lists (collection-time pool lists) and change them to headers of corresponding pools
   */

  /*
   * Number of collection-time pool lists
   */
  constexpr uint32_t pool_lists_number = 8;

  /*
   * Collection-time pool lists
   */
  mem_pool_chunk_t *pool_lists_p[pool_lists_number];
  for (uint32_t i = 0; i < pool_lists_number; i++)
  {
    pool_lists_p[i] = NULL;
  }

  /*
   * Number of the pools, included into the lists
   */
  uint32_t pools_in_lists_number = 0;

  for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *prev_free_chunk_p = NULL, *next_free_chunk_p;
       free_chunk_iter_p != NULL;
       free_chunk_iter_p = next_free_chunk_p)
  {
    mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p);

    VALGRIND_DEFINED_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE);

    next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

    if (pool_start_p == free_chunk_iter_p)
    {
      /*
       * The chunk is first at its pool
       *
       * Remove the chunk from common list of free chunks
       */
      if (prev_free_chunk_p == NULL)
      {
        JERRY_ASSERT (mem_free_chunk_p == free_chunk_iter_p);

        mem_free_chunk_p = next_free_chunk_p;
      }
      else
      {
        prev_free_chunk_p->u.free.next_p = next_free_chunk_p;
      }

      pools_in_lists_number++;

      uint8_t list_id = pools_in_lists_number % pool_lists_number;

      /*
       * Initialize pool header and insert the pool into one of lists
       */
      free_chunk_iter_p->u.pool_gc.free_list_cp = MEM_CP_NULL;
      free_chunk_iter_p->u.pool_gc.free_chunks_num = 1; /* the first chunk */
      free_chunk_iter_p->u.pool_gc.hint_magic_num = hint_magic_num_value;
      free_chunk_iter_p->u.pool_gc.list_id = list_id;

      MEM_CP_SET_POINTER (free_chunk_iter_p->u.pool_gc.next_first_cp, pool_lists_p[list_id]);
      pool_lists_p[list_id] = free_chunk_iter_p;
    }
    else
    {
      prev_free_chunk_p = free_chunk_iter_p;
    }
  }

  if (pools_in_lists_number == 0)
  {
    /* there are no empty pools */

    return;
  }

  /*
   * At second pass we check for all rest free chunks whether they are in pools that were included into
   * collection-time pool lists.
   *
   * For each of the chunk, try to find the corresponding pool through iterating the list.
   *
   * If pool is found in a list (so, first chunk of the pool is free) for a chunk, increment counter
   * of free chunks in the pools, and move the chunk from global free chunks list to collection-time
   * local list of corresponding pool's free chunks.
   */
  for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *prev_free_chunk_p = NULL, *next_free_chunk_p;
       free_chunk_iter_p != NULL;
       free_chunk_iter_p = next_free_chunk_p)
  {
    mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p);

    next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

    bool is_chunk_moved_to_local_list = false;

#ifdef JERRY_VALGRIND
    /*
     * If the chunk is not free, there may be undefined bytes at hint_magic_num and list_id fields.
     *
     * Although, it is correct for the routine, valgrind issues warning about using uninitialized data
     * in conditional expression. To suppress the false-positive warning, the chunk is temporarily marked
     * as defined, and after reading hint magic number and list identifier, valgrind state of the chunk is restored.
     */
    uint8_t vbits[MEM_POOL_CHUNK_SIZE];
    unsigned status;

    status = VALGRIND_GET_VBITS (pool_start_p, vbits, MEM_POOL_CHUNK_SIZE);
    JERRY_ASSERT (status == 0 || status == 1);

    VALGRIND_DEFINED_SPACE (pool_start_p, MEM_POOL_CHUNK_SIZE);
#endif /* JERRY_VALGRIND */

    /*
     * The magic number doesn't guarantee that the chunk is actually a pool header,
     * so it is only optimization to reduce number of unnecessary iterations over
     * pool lists.
     */
    uint16_t magic_num_field = pool_start_p->u.pool_gc.hint_magic_num;
    uint8_t id_to_search_in = pool_start_p->u.pool_gc.list_id;

#ifdef JERRY_VALGRIND
    status = VALGRIND_SET_VBITS (pool_start_p, vbits, MEM_POOL_CHUNK_SIZE);
    JERRY_ASSERT (status == 0 || status == 1);
#endif /* JERRY_VALGRIND */

    if (magic_num_field == hint_magic_num_value)
    {
      /*
       * Maybe, the first chunk is free.
       *
       * If it is so, it is included in the list of pool's first free chunks.
       */

      if (id_to_search_in < pool_lists_number)
      {
        for (mem_pool_chunk_t *pool_list_iter_p = pool_lists_p[id_to_search_in];
             pool_list_iter_p != NULL;
             pool_list_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                                    pool_list_iter_p->u.pool_gc.next_first_cp))
        {
          if (pool_list_iter_p == pool_start_p)
          {
            /*
             * The first chunk is actually free.
             *
             * So, incrementing free chunks counter in it.
             */
            pool_start_p->u.pool_gc.free_chunks_num++;

            /*
             * It is possible that the corresponding pool is empty
             *
             * Moving current chunk from common list of free chunks to temporary list, local to the pool
             */
            if (prev_free_chunk_p == NULL)
            {
              JERRY_ASSERT (mem_free_chunk_p == free_chunk_iter_p);

              mem_free_chunk_p = next_free_chunk_p;
            }
            else
            {
              prev_free_chunk_p->u.free.next_p = next_free_chunk_p;
            }

            free_chunk_iter_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                                                   pool_start_p->u.pool_gc.free_list_cp);
            MEM_CP_SET_NON_NULL_POINTER (pool_start_p->u.pool_gc.free_list_cp, free_chunk_iter_p);

            is_chunk_moved_to_local_list = true;

            break;
          }
        }
      }
    }

    if (!is_chunk_moved_to_local_list)
    {
      prev_free_chunk_p = free_chunk_iter_p;
    }
  }

  /*
   * At third pass we check each pool in collection-time pool lists free for counted
   * number of free chunks in the pool.
   *
   * If the number is equal to number of chunks in the pool - then the pool is empty, and so is freed,
   * otherwise - free chunks of the pool are returned to common list of free chunks.
   */
  for (uint8_t list_id = 0; list_id < pool_lists_number; list_id++)
  {
    for (mem_pool_chunk_t *pool_list_iter_p = pool_lists_p[list_id], *next_p;
         pool_list_iter_p != NULL;
         pool_list_iter_p = next_p)
    {
      next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                   pool_list_iter_p->u.pool_gc.next_first_cp);

      if (pool_list_iter_p->u.pool_gc.free_chunks_num == MEM_POOL_CHUNKS_NUMBER)
      {
#ifndef JERRY_NDEBUG
        mem_free_chunks_number -= MEM_POOL_CHUNKS_NUMBER;
#endif /* !JERRY_NDEBUG */

        MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST ();
        mem_heap_free_block (pool_list_iter_p);

        MEM_POOLS_STAT_FREE_POOL ();
      }
      else
      {
        mem_pool_chunk_t *first_chunk_p = pool_list_iter_p;

        /*
         * Convert layout of first chunk from collection-time pool header to common free chunk
         */
        first_chunk_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                                           pool_list_iter_p->u.pool_gc.free_list_cp);

        /*
         * Link local pool's list of free chunks into global list of free chunks
         */
        for (mem_pool_chunk_t *pool_chunks_iter_p = first_chunk_p;
             ;
             pool_chunks_iter_p = pool_chunks_iter_p->u.free.next_p)
        {
          JERRY_ASSERT (pool_chunks_iter_p != NULL);

          if (pool_chunks_iter_p->u.free.next_p == NULL)
          {
            pool_chunks_iter_p->u.free.next_p = mem_free_chunk_p;

            break;
          }
        }

        mem_free_chunk_p = first_chunk_p;
      }
    }
  }

#ifdef JERRY_VALGRIND
  /*
   * Valgrind-mode specific pass that marks all free chunks inaccessible
   */
  for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *next_free_chunk_p;
       free_chunk_iter_p != NULL;
       free_chunk_iter_p = next_free_chunk_p)
  {
    next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

    VALGRIND_NOACCESS_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE);
  }
#endif /* JERRY_VALGRIND */
} /* mem_pools_collect_empty */