示例#1
0
static void
test_heap_give_some_memory_back (mem_try_give_memory_back_severity_t severity)
{
  int p;

  if (severity == MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_LOW)
  {
    p = 8;
  }
  else
  {
    JERRY_ASSERT (severity == MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH);

    p = 1;
  }

  for (int i = 0; i < test_sub_iters; i++)
  {
    if (rand () % p == 0)
    {
      if (ptrs[i] != NULL)
      {
        for (size_t k = 0; k < sizes[i]; k++)
        {
          JERRY_ASSERT (ptrs[i][k] == 0);
        }

        mem_heap_free_block (ptrs[i]);
        ptrs[i] = NULL;
      }
    }
  }
} /* test_heap_give_some_memory_back */
示例#2
0
/**
 * Insert a new bytecode to the bytecode container
 */
static void
re_bytecode_list_insert (re_bytecode_ctx_t *bc_ctx_p, /**< RegExp bytecode context */
                         size_t offset, /**< distance from the start of the container */
                         re_bytecode_t *bytecode_p, /**< input bytecode */
                         size_t length) /**< length of input */
{
  JERRY_ASSERT (length <= REGEXP_BYTECODE_BLOCK_SIZE);

  re_bytecode_t *current_p = bc_ctx_p->current_p;
  if (current_p + length > bc_ctx_p->block_end_p)
  {
    re_realloc_regexp_bytecode_block (bc_ctx_p);
  }

  re_bytecode_t *src_p = bc_ctx_p->block_start_p + offset;
  if ((re_get_bytecode_length (bc_ctx_p) - offset) > 0)
  {
    re_bytecode_t *dest_p = src_p + length;
    re_bytecode_t *tmp_block_start_p;
    tmp_block_start_p = (re_bytecode_t *) mem_heap_alloc_block ((re_get_bytecode_length (bc_ctx_p) - offset),
                                                                 MEM_HEAP_ALLOC_SHORT_TERM);
    memcpy (tmp_block_start_p, src_p, (size_t) (re_get_bytecode_length (bc_ctx_p) - offset));
    memcpy (dest_p, tmp_block_start_p, (size_t) (re_get_bytecode_length (bc_ctx_p) - offset));
    mem_heap_free_block (tmp_block_start_p);
  }
  memcpy (src_p, bytecode_p, length);

  bc_ctx_p->current_p += length;
} /* re_bytecode_list_insert */
示例#3
0
/**
 * Deletes bytecode and associated hash table
 */
void
serializer_remove_bytecode_data (const bytecode_data_header_t *bytecode_data_p) /**< pointer to bytecode data which
                                                                                 * should be deleted */
{
  bytecode_data_header_t *prev_header = NULL;
  bytecode_data_header_t *cur_header_p = first_bytecode_header_p;

  while (cur_header_p != NULL)
  {
    if (cur_header_p == bytecode_data_p)
    {
      if (prev_header)
      {
        prev_header->next_header_cp = cur_header_p->next_header_cp;
      }
      else
      {
        first_bytecode_header_p = MEM_CP_GET_POINTER (bytecode_data_header_t, cur_header_p->next_header_cp);
      }
      mem_heap_free_block (cur_header_p);
      break;
    }

    prev_header = cur_header_p;
  }
} /* serializer_remove_instructions */
示例#4
0
/**
 * Free memory occupied by bytecode data
 */
static void
bc_free_bytecode_data (bytecode_data_header_t *bytecode_data_p) /**< byte-code scope data header */
{
  bytecode_data_header_t *next_to_handle_list_p = bytecode_data_p;

  while (next_to_handle_list_p != NULL)
  {
    bytecode_data_header_t *bc_header_list_iter_p = next_to_handle_list_p;
    next_to_handle_list_p = NULL;

    while (bc_header_list_iter_p != NULL)
    {
      bytecode_data_header_t *header_p = bc_header_list_iter_p;

      bc_header_list_iter_p = MEM_CP_GET_POINTER (bytecode_data_header_t, header_p->next_header_cp);

      mem_cpointer_t *declarations_p = MEM_CP_GET_POINTER (mem_cpointer_t, header_p->declarations_cp);

      for (uint32_t index = 0; index < header_p->func_scopes_count; index++)
      {
        bytecode_data_header_t *child_scope_header_p = MEM_CP_GET_NON_NULL_POINTER (bytecode_data_header_t,
                                                                                    declarations_p[index]);
        JERRY_ASSERT (child_scope_header_p->next_header_cp == MEM_CP_NULL);

        MEM_CP_SET_POINTER (child_scope_header_p->next_header_cp, next_to_handle_list_p);

        next_to_handle_list_p = child_scope_header_p;
      }

      mem_heap_free_block (header_p);
    }

    JERRY_ASSERT (bc_header_list_iter_p == NULL);
  }
} /* bc_free_bytecode_data */
示例#5
0
void
serializer_free (void)
{
  if (bytecode_data.strings_buffer)
  {
    mem_heap_free_block ((uint8_t *) bytecode_data.strings_buffer);
  }
  if (bytecode_data.lit_id_hash != null_hash)
  {
    lit_id_hash_table_free (bytecode_data.lit_id_hash);
  }

  mem_heap_free_block ((uint8_t *) bytecode_data.opcodes);

  lit_finalize ();
}
/**
 * Free stored literal
 *
 * @return pointer to the next literal in the list
 */
lit_record_t *
lit_free_literal (lit_record_t *lit_p) /**< literal record */
{
  lit_record_t *const ret_p = lit_cpointer_decompress (lit_p->next);
  mem_heap_free_block (lit_p, lit_get_literal_size (lit_p));
  return ret_p;
} /* lit_free_literal */
示例#7
0
/**
 * Realloc the bytecode container
 *
 * @return current position in RegExp bytecode
 */
static re_bytecode_t*
re_realloc_regexp_bytecode_block (re_bytecode_ctx_t *bc_ctx_p) /**< RegExp bytecode context */
{
  JERRY_ASSERT (bc_ctx_p->block_end_p - bc_ctx_p->block_start_p >= 0);
  size_t old_size = static_cast<size_t> (bc_ctx_p->block_end_p - bc_ctx_p->block_start_p);

  /* If one of the members of RegExp bytecode context is NULL, then all member should be NULL
   * (it means first allocation), otherwise all of the members should be a non NULL pointer. */
  JERRY_ASSERT ((!bc_ctx_p->current_p && !bc_ctx_p->block_end_p && !bc_ctx_p->block_start_p)
                || (bc_ctx_p->current_p && bc_ctx_p->block_end_p && bc_ctx_p->block_start_p));

  size_t new_block_size = old_size + REGEXP_BYTECODE_BLOCK_SIZE;
  JERRY_ASSERT (bc_ctx_p->current_p - bc_ctx_p->block_start_p >= 0);
  size_t current_ptr_offset = static_cast<size_t> (bc_ctx_p->current_p - bc_ctx_p->block_start_p);

  re_bytecode_t *new_block_start_p = (re_bytecode_t *) mem_heap_alloc_block (new_block_size,
                                                                             MEM_HEAP_ALLOC_SHORT_TERM);
  if (bc_ctx_p->current_p)
  {
    memcpy (new_block_start_p, bc_ctx_p->block_start_p, static_cast<size_t> (current_ptr_offset));
    mem_heap_free_block (bc_ctx_p->block_start_p);
  }
  bc_ctx_p->block_start_p = new_block_start_p;
  bc_ctx_p->block_end_p = new_block_start_p + new_block_size;
  bc_ctx_p->current_p = new_block_start_p + current_ptr_offset;

  return bc_ctx_p->current_p;
} /* re_realloc_regexp_bytecode_block */
示例#8
0
void
serializer_free (void)
{
  if (bytecode_data.strings_buffer)
  {
    mem_heap_free_block ((uint8_t *) bytecode_data.strings_buffer);
  }

  lit_finalize ();

  while (bytecode_data.instrs_p != NULL)
  {
    insts_data_header_t *header_p = GET_BYTECODE_HEADER (bytecode_data.instrs_p);
    bytecode_data.instrs_p = MEM_CP_GET_POINTER (vm_instr_t, header_p->next_instrs_cp);

    mem_heap_free_block (header_p);
  }
}
/**
 * The String.prototype object's 'trim' routine
 *
 * See also:
 *          ECMA-262 v5, 15.5.4.20
 *
 * @return completion value
 *         Returned value must be freed with ecma_free_completion_value.
 */
static ecma_completion_value_t
ecma_builtin_string_prototype_object_trim (ecma_value_t this_arg) /**< this argument */
{
  ecma_completion_value_t ret_value = ecma_make_empty_completion_value ();

  /* 1 */
  ECMA_TRY_CATCH (check_coercible_val,
                  ecma_op_check_object_coercible (this_arg),
                  ret_value);

  /* 2 */
  ECMA_TRY_CATCH (to_string_val,
                  ecma_op_to_string (this_arg),
                  ret_value);

  ecma_string_t *original_string_p = ecma_get_string_from_value (to_string_val);

  /* 3 */
  const lit_utf8_size_t size = ecma_string_get_size (original_string_p);
  const ecma_length_t length = ecma_string_get_size (original_string_p);

  /* Workaround: avoid repeated call of ecma_string_get_char_at_pos() because its overhead */
  lit_utf8_byte_t *original_utf8_str_p = (lit_utf8_byte_t *) mem_heap_alloc_block (size + 1,
                                                                                   MEM_HEAP_ALLOC_SHORT_TERM);
  ecma_string_to_utf8_string (original_string_p, original_utf8_str_p, (ssize_t) size);

  uint32_t prefix = 0, postfix = 0;
  uint32_t new_len = 0;

  while (prefix < length && isspace (lit_utf8_string_code_unit_at (original_utf8_str_p, size, prefix)))
  {
    prefix++;
  }

  while (postfix < length - prefix && isspace (lit_utf8_string_code_unit_at (original_utf8_str_p,
                                                                             size,
                                                                             length - postfix - 1)))
  {
    postfix++;
  }

  new_len = prefix < size ? size - prefix - postfix : 0;

  ecma_string_t *new_str_p = ecma_string_substr (original_string_p, prefix, prefix + new_len);

  /* 4 */
  ret_value = ecma_make_normal_completion_value (ecma_make_string_value (new_str_p));

  mem_heap_free_block (original_utf8_str_p);

  ECMA_FINALIZE (to_string_val);
  ECMA_FINALIZE (check_coercible_val);

  return ret_value;
} /* ecma_builtin_string_prototype_object_trim */
示例#10
0
void
linked_list_free (linked_list list)
{
  ASSERT_LIST (list);
  linked_list_header *header = (linked_list_header *) list;
  if (header->next)
  {
    linked_list_free ((linked_list) header->next);
  }
  mem_heap_free_block (list);
}
示例#11
0
void
serializer_free (void)
{
  lit_finalize ();

  while (first_bytecode_header_p != NULL)
  {
    bytecode_data_header_t *header_p = first_bytecode_header_p;
    first_bytecode_header_p = MEM_CP_GET_POINTER (bytecode_data_header_t, header_p->next_header_cp);

    mem_heap_free_block (header_p);
  }
}
示例#12
0
/**
 * Free the chunk
 */
void
mem_pools_free (uint8_t *chunk_p) /**< pointer to the chunk */
{
  mem_pool_state_t *pool_state = mem_pools, *prev_pool_state_p = NULL;

  /**
   * Search for the pool containing specified chunk.
   */
  while (!mem_pool_is_chunk_inside (pool_state, chunk_p))
  {
    prev_pool_state_p = pool_state;
    pool_state = MEM_CP_GET_NON_NULL_POINTER (mem_pool_state_t, pool_state->next_pool_cp);
  }

  /**
   * Free the chunk
   */
  mem_pool_free_chunk (pool_state, chunk_p);
  mem_free_chunks_number++;

  MEM_POOLS_STAT_FREE_CHUNK ();

  /**
   * If all chunks of the pool are free, free the pool itself.
   */
  if (pool_state->free_chunks_number == MEM_POOL_CHUNKS_NUMBER)
  {
    if (prev_pool_state_p != NULL)
    {
      prev_pool_state_p->next_pool_cp = pool_state->next_pool_cp;
    }
    else
    {
      mem_pools = MEM_CP_GET_POINTER (mem_pool_state_t, pool_state->next_pool_cp);
    }

    mem_free_chunks_number -= MEM_POOL_CHUNKS_NUMBER;

    mem_heap_free_block ((uint8_t*) pool_state);

    MEM_POOLS_STAT_FREE_POOL ();
  }
  else if (mem_pools != pool_state)
  {
    JERRY_ASSERT (prev_pool_state_p != NULL);

    prev_pool_state_p->next_pool_cp = pool_state->next_pool_cp;
    MEM_CP_SET_NON_NULL_POINTER (pool_state->next_pool_cp, mem_pools);
    mem_pools = pool_state;
  }
} /* mem_pools_free */
/**
 * The String object's 'fromCharCode' routine
 *
 * See also:
 *          ECMA-262 v5, 15.5.3.2
 *
 * @return completion value
 *         Returned value must be freed with ecma_free_completion_value.
 */
static ecma_completion_value_t
ecma_builtin_string_object_from_char_code (ecma_value_t this_arg __attr_unused___, /**< 'this' argument */
                                           const ecma_value_t args[], /**< arguments list */
                                           ecma_length_t args_number) /**< number of arguments */
{
  ecma_completion_value_t ret_value = ecma_make_empty_completion_value ();

  if (args_number == 0)
  {
    ecma_string_t *ret_str_p = ecma_new_ecma_string_from_utf8 (NULL, 0);
    return ecma_make_normal_completion_value (ecma_make_string_value (ret_str_p));
  }

  lit_utf8_size_t utf8_buf_size = args_number * LIT_UTF8_MAX_BYTES_IN_CODE_UNIT;
  ecma_string_t *ret_str_p;
  MEM_DEFINE_LOCAL_ARRAY (utf8_buf_p, utf8_buf_size, lit_utf8_byte_t);

  lit_utf8_size_t utf8_buf_used = 0;

  FIXME ("Support surrogate pairs");
  for (ecma_length_t arg_index = 0;
       arg_index < args_number;
       arg_index++)
  {
    ECMA_OP_TO_NUMBER_TRY_CATCH (arg_num, args[arg_index], ret_value);

    uint32_t uint32_char_code = ecma_number_to_uint32 (arg_num);
    ecma_char_t code_unit = (uint16_t) uint32_char_code;

    JERRY_ASSERT (utf8_buf_used <= utf8_buf_size - LIT_UTF8_MAX_BYTES_IN_CODE_UNIT);
    utf8_buf_used += lit_code_unit_to_utf8 (code_unit, utf8_buf_p + utf8_buf_used);
    JERRY_ASSERT (utf8_buf_used <= utf8_buf_size);

    ECMA_OP_TO_NUMBER_FINALIZE (arg_num);

    if (ecma_is_completion_value_throw (ret_value))
    {
      mem_heap_free_block (utf8_buf_p);

      return ret_value;
    }

    JERRY_ASSERT (ecma_is_completion_value_empty (ret_value));
  }

  ret_str_p = ecma_new_ecma_string_from_utf8 (utf8_buf_p, utf8_buf_used);

  MEM_FINALIZE_LOCAL_ARRAY (utf8_buf_p);

  return ecma_make_normal_completion_value (ecma_make_string_value (ret_str_p));
} /* ecma_builtin_string_object_from_char_code */
示例#14
0
array_list
array_list_append (array_list al, void *element)
{
  array_list_header *h = extract_header (al);
  if ((h->len + 1) * h->element_size + sizeof (array_list_header) > h->size)
  {
    size_t size = mem_heap_recommend_allocation_size (h->size + h->element_size);
    JERRY_ASSERT (size > h->size);

    uint8_t* new_block_p = (uint8_t*) mem_heap_alloc_block (size, MEM_HEAP_ALLOC_SHORT_TERM);
    memcpy (new_block_p, h, h->size);
    memset (new_block_p + h->size, 0, size - h->size);

    mem_heap_free_block ((uint8_t *) h);

    h = (array_list_header *) new_block_p;
    h->size = size;
    al = (array_list) h;
  }
  memcpy (data (al) + (h->len * h->element_size), element, h->element_size);
  h->len++;
  return al;
}
示例#15
0
int
main (int __attr_unused___ argc,
      char __attr_unused___ **argv)
{
  TEST_INIT ();

  mem_heap_init ();

  mem_register_a_try_give_memory_back_callback (test_heap_give_some_memory_back);

  mem_heap_print (true, false, true);

  for (uint32_t i = 0; i < test_iters; i++)
  {
    for (uint32_t j = 0; j < test_sub_iters; j++)
    {
      if (rand () % 2)
      {
        size_t size = (size_t) rand () % test_threshold_block_size;
        ptrs[j] = (uint8_t*) mem_heap_alloc_block (size,
                                                   (rand () % 2) ?
                                                   MEM_HEAP_ALLOC_LONG_TERM : MEM_HEAP_ALLOC_SHORT_TERM);
        sizes[j] = size;
        is_one_chunked[j] = false;
      }
      else
      {
        ptrs[j] = (uint8_t*) mem_heap_alloc_chunked_block ((rand () % 2) ?
                                                           MEM_HEAP_ALLOC_LONG_TERM : MEM_HEAP_ALLOC_SHORT_TERM);
        sizes[j] = mem_heap_get_chunked_block_data_size ();
        is_one_chunked[j] = true;
      }

      JERRY_ASSERT (sizes[j] == 0 || ptrs[j] != NULL);
      memset (ptrs[j], 0, sizes[j]);

      if (is_one_chunked[j])
      {
        JERRY_ASSERT (ptrs[j] != NULL
                      && mem_heap_get_chunked_block_start (ptrs[j] + (size_t) rand () % sizes[j]) == ptrs[j]);
      }
    }

    // mem_heap_print (true);

    for (uint32_t j = 0; j < test_sub_iters; j++)
    {
      if (ptrs[j] != NULL)
      {
        for (size_t k = 0; k < sizes[j]; k++)
        {
          JERRY_ASSERT (ptrs[j][k] == 0);
        }

        if (is_one_chunked[j])
        {
          JERRY_ASSERT (sizes[j] == 0
                        || mem_heap_get_chunked_block_start (ptrs[j] + (size_t) rand () % sizes[j]) == ptrs[j]);
        }

        mem_heap_free_block (ptrs[j]);

        ptrs[j] = NULL;
      }
    }
  }

  mem_heap_print (true, false, true);

  return 0;
} /* main */
示例#16
0
/**
 * Decrease reference counter of Compact
 * Byte Code or regexp byte code.
 */
void
ecma_bytecode_deref (ecma_compiled_code_t *bytecode_p) /**< byte code pointer */
{
  JERRY_ASSERT (bytecode_p->refs > 0);

  bytecode_p->refs--;

  if (bytecode_p->refs > 0)
  {
    /* Non-zero reference counter. */
    return;
  }

  if (bytecode_p->status_flags & CBC_CODE_FLAGS_FUNCTION)
  {
    lit_cpointer_t *literal_start_p = NULL;
    uint32_t literal_end;
    uint32_t const_literal_end;

    if (bytecode_p->status_flags & CBC_CODE_FLAGS_UINT16_ARGUMENTS)
    {
      uint8_t *byte_p = (uint8_t *) bytecode_p;
      literal_start_p = (lit_cpointer_t *) (byte_p + sizeof (cbc_uint16_arguments_t));

      cbc_uint16_arguments_t *args_p = (cbc_uint16_arguments_t *) bytecode_p;
      literal_end = args_p->literal_end;
      const_literal_end = args_p->const_literal_end;
    }
    else
    {
      uint8_t *byte_p = (uint8_t *) bytecode_p;
      literal_start_p = (lit_cpointer_t *) (byte_p + sizeof (cbc_uint8_arguments_t));

      cbc_uint8_arguments_t *args_p = (cbc_uint8_arguments_t *) bytecode_p;
      literal_end = args_p->literal_end;
      const_literal_end = args_p->const_literal_end;
    }

    for (uint32_t i = const_literal_end; i < literal_end; i++)
    {
      mem_cpointer_t bytecode_cpointer = literal_start_p[i];
      ecma_compiled_code_t *bytecode_literal_p = ECMA_GET_NON_NULL_POINTER (ecma_compiled_code_t,
                                                                            bytecode_cpointer);

      /* Self references are ignored. */
      if (bytecode_literal_p != bytecode_p)
      {
        ecma_bytecode_deref (bytecode_literal_p);
      }
    }
  }
  else
  {
#ifndef CONFIG_ECMA_COMPACT_PROFILE_DISABLE_REGEXP_BUILTIN
    re_compiled_code_t *re_bytecode_p = (re_compiled_code_t *) bytecode_p;

    ecma_deref_ecma_string (ECMA_GET_NON_NULL_POINTER (ecma_string_t, re_bytecode_p->pattern_cp));
#endif /* !CONFIG_ECMA_COMPACT_PROFILE_DISABLE_REGEXP_BUILTIN */
  }

  mem_heap_free_block (bytecode_p,
                       ((size_t) bytecode_p->size) << MEM_ALIGNMENT_LOG);
} /* ecma_bytecode_deref */
示例#17
0
/**
 * Register bytecode and idx map from snapshot
 *
 * NOTE:
 *      If is_copy flag is set, bytecode is copied from snapshot, else bytecode is referenced directly
 *      from snapshot
 *
 * @return pointer to byte-code header, upon success,
 *         NULL - upon failure (i.e., in case snapshot format is not valid)
 */
const bytecode_data_header_t *
serializer_load_bytecode_with_idx_map (const uint8_t *bytecode_and_idx_map_p, /**< buffer with instructions array
                                                                               *   and idx to literals map from
                                                                               *   snapshot */
                                       uint32_t bytecode_size, /**< size of instructions array */
                                       uint32_t idx_to_lit_map_size, /**< size of the idx to literals map */
                                       const lit_mem_to_snapshot_id_map_entry_t *lit_map_p, /**< map of in-snapshot
                                                                                             *   literal offsets
                                                                                             *   to literal identifiers,
                                                                                             *   created in literal
                                                                                             *   storage */
                                       uint32_t literals_num, /**< number of literals */
                                       bool is_copy) /** flag, indicating whether the passed in-snapshot data
                                                      *  should be copied to engine's memory (true),
                                                      *  or it can be referenced until engine is stopped
                                                      *  (i.e. until call to jerry_cleanup) */
{
  const uint8_t *idx_to_lit_map_p = bytecode_and_idx_map_p + bytecode_size;

  size_t instructions_number = bytecode_size / sizeof (vm_instr_t);
  size_t blocks_count = JERRY_ALIGNUP (instructions_number, BLOCK_SIZE) / BLOCK_SIZE;

  uint32_t idx_num_total;
  size_t idx_to_lit_map_offset = 0;
  if (!jrt_read_from_buffer_by_offset (idx_to_lit_map_p,
                                       idx_to_lit_map_size,
                                       &idx_to_lit_map_offset,
                                       &idx_num_total))
  {
    return NULL;
  }

  const size_t bytecode_alloc_size = JERRY_ALIGNUP (bytecode_size, MEM_ALIGNMENT);
  const size_t hash_table_size = lit_id_hash_table_get_size_for_table (idx_num_total, blocks_count);
  const size_t header_and_hash_table_size = JERRY_ALIGNUP (sizeof (bytecode_data_header_t) + hash_table_size,
                                                           MEM_ALIGNMENT);
  const size_t alloc_size = header_and_hash_table_size + (is_copy ? bytecode_alloc_size : 0);

  uint8_t *buffer_p = (uint8_t*) mem_heap_alloc_block (alloc_size, MEM_HEAP_ALLOC_LONG_TERM);
  bytecode_data_header_t *header_p = (bytecode_data_header_t *) buffer_p;

  vm_instr_t *instrs_p;
  vm_instr_t *snapshot_instrs_p = (vm_instr_t *) bytecode_and_idx_map_p;
  if (is_copy)
  {
    instrs_p = (vm_instr_t *) (buffer_p + header_and_hash_table_size);
    memcpy (instrs_p, snapshot_instrs_p, bytecode_size);
  }
  else
  {
    instrs_p = snapshot_instrs_p;
  }

  uint8_t *lit_id_hash_table_buffer_p = buffer_p + sizeof (bytecode_data_header_t);
  if (lit_id_hash_table_load_from_snapshot (blocks_count,
                                            idx_num_total,
                                            idx_to_lit_map_p + idx_to_lit_map_offset,
                                            idx_to_lit_map_size - idx_to_lit_map_offset,
                                            lit_map_p,
                                            literals_num,
                                            lit_id_hash_table_buffer_p,
                                            hash_table_size)
      && (vm_instr_counter_t) instructions_number == instructions_number)
  {
    MEM_CP_SET_NON_NULL_POINTER (header_p->lit_id_hash_cp, lit_id_hash_table_buffer_p);
    header_p->instrs_p = instrs_p;
    header_p->instrs_count = (vm_instr_counter_t) instructions_number;
    MEM_CP_SET_POINTER (header_p->next_header_cp, first_bytecode_header_p);

    first_bytecode_header_p = header_p;

    return header_p;
  }
  else
  {
    mem_heap_free_block (buffer_p);
    return NULL;
  }
} /* serializer_load_bytecode_with_idx_map */
示例#18
0
void
mem_pools_collect_empty (void)
{
  /*
   * Hint magic number in header of pools with free first chunks
   */
  const uint16_t hint_magic_num_value = 0x7e89;

  /*
   * At first pass collect pointers to those of free chunks that are first at their pools
   * to separate lists (collection-time pool lists) and change them to headers of corresponding pools
   */

  /*
   * Number of collection-time pool lists
   */
  constexpr uint32_t pool_lists_number = 8;

  /*
   * Collection-time pool lists
   */
  mem_pool_chunk_t *pool_lists_p[pool_lists_number];
  for (uint32_t i = 0; i < pool_lists_number; i++)
  {
    pool_lists_p[i] = NULL;
  }

  /*
   * Number of the pools, included into the lists
   */
  uint32_t pools_in_lists_number = 0;

  for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *prev_free_chunk_p = NULL, *next_free_chunk_p;
       free_chunk_iter_p != NULL;
       free_chunk_iter_p = next_free_chunk_p)
  {
    mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p);

    VALGRIND_DEFINED_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE);

    next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

    if (pool_start_p == free_chunk_iter_p)
    {
      /*
       * The chunk is first at its pool
       *
       * Remove the chunk from common list of free chunks
       */
      if (prev_free_chunk_p == NULL)
      {
        JERRY_ASSERT (mem_free_chunk_p == free_chunk_iter_p);

        mem_free_chunk_p = next_free_chunk_p;
      }
      else
      {
        prev_free_chunk_p->u.free.next_p = next_free_chunk_p;
      }

      pools_in_lists_number++;

      uint8_t list_id = pools_in_lists_number % pool_lists_number;

      /*
       * Initialize pool header and insert the pool into one of lists
       */
      free_chunk_iter_p->u.pool_gc.free_list_cp = MEM_CP_NULL;
      free_chunk_iter_p->u.pool_gc.free_chunks_num = 1; /* the first chunk */
      free_chunk_iter_p->u.pool_gc.hint_magic_num = hint_magic_num_value;
      free_chunk_iter_p->u.pool_gc.list_id = list_id;

      MEM_CP_SET_POINTER (free_chunk_iter_p->u.pool_gc.next_first_cp, pool_lists_p[list_id]);
      pool_lists_p[list_id] = free_chunk_iter_p;
    }
    else
    {
      prev_free_chunk_p = free_chunk_iter_p;
    }
  }

  if (pools_in_lists_number == 0)
  {
    /* there are no empty pools */

    return;
  }

  /*
   * At second pass we check for all rest free chunks whether they are in pools that were included into
   * collection-time pool lists.
   *
   * For each of the chunk, try to find the corresponding pool through iterating the list.
   *
   * If pool is found in a list (so, first chunk of the pool is free) for a chunk, increment counter
   * of free chunks in the pools, and move the chunk from global free chunks list to collection-time
   * local list of corresponding pool's free chunks.
   */
  for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *prev_free_chunk_p = NULL, *next_free_chunk_p;
       free_chunk_iter_p != NULL;
       free_chunk_iter_p = next_free_chunk_p)
  {
    mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p);

    next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

    bool is_chunk_moved_to_local_list = false;

#ifdef JERRY_VALGRIND
    /*
     * If the chunk is not free, there may be undefined bytes at hint_magic_num and list_id fields.
     *
     * Although, it is correct for the routine, valgrind issues warning about using uninitialized data
     * in conditional expression. To suppress the false-positive warning, the chunk is temporarily marked
     * as defined, and after reading hint magic number and list identifier, valgrind state of the chunk is restored.
     */
    uint8_t vbits[MEM_POOL_CHUNK_SIZE];
    unsigned status;

    status = VALGRIND_GET_VBITS (pool_start_p, vbits, MEM_POOL_CHUNK_SIZE);
    JERRY_ASSERT (status == 0 || status == 1);

    VALGRIND_DEFINED_SPACE (pool_start_p, MEM_POOL_CHUNK_SIZE);
#endif /* JERRY_VALGRIND */

    /*
     * The magic number doesn't guarantee that the chunk is actually a pool header,
     * so it is only optimization to reduce number of unnecessary iterations over
     * pool lists.
     */
    uint16_t magic_num_field = pool_start_p->u.pool_gc.hint_magic_num;
    uint8_t id_to_search_in = pool_start_p->u.pool_gc.list_id;

#ifdef JERRY_VALGRIND
    status = VALGRIND_SET_VBITS (pool_start_p, vbits, MEM_POOL_CHUNK_SIZE);
    JERRY_ASSERT (status == 0 || status == 1);
#endif /* JERRY_VALGRIND */

    if (magic_num_field == hint_magic_num_value)
    {
      /*
       * Maybe, the first chunk is free.
       *
       * If it is so, it is included in the list of pool's first free chunks.
       */

      if (id_to_search_in < pool_lists_number)
      {
        for (mem_pool_chunk_t *pool_list_iter_p = pool_lists_p[id_to_search_in];
             pool_list_iter_p != NULL;
             pool_list_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                                    pool_list_iter_p->u.pool_gc.next_first_cp))
        {
          if (pool_list_iter_p == pool_start_p)
          {
            /*
             * The first chunk is actually free.
             *
             * So, incrementing free chunks counter in it.
             */
            pool_start_p->u.pool_gc.free_chunks_num++;

            /*
             * It is possible that the corresponding pool is empty
             *
             * Moving current chunk from common list of free chunks to temporary list, local to the pool
             */
            if (prev_free_chunk_p == NULL)
            {
              JERRY_ASSERT (mem_free_chunk_p == free_chunk_iter_p);

              mem_free_chunk_p = next_free_chunk_p;
            }
            else
            {
              prev_free_chunk_p->u.free.next_p = next_free_chunk_p;
            }

            free_chunk_iter_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                                                   pool_start_p->u.pool_gc.free_list_cp);
            MEM_CP_SET_NON_NULL_POINTER (pool_start_p->u.pool_gc.free_list_cp, free_chunk_iter_p);

            is_chunk_moved_to_local_list = true;

            break;
          }
        }
      }
    }

    if (!is_chunk_moved_to_local_list)
    {
      prev_free_chunk_p = free_chunk_iter_p;
    }
  }

  /*
   * At third pass we check each pool in collection-time pool lists free for counted
   * number of free chunks in the pool.
   *
   * If the number is equal to number of chunks in the pool - then the pool is empty, and so is freed,
   * otherwise - free chunks of the pool are returned to common list of free chunks.
   */
  for (uint8_t list_id = 0; list_id < pool_lists_number; list_id++)
  {
    for (mem_pool_chunk_t *pool_list_iter_p = pool_lists_p[list_id], *next_p;
         pool_list_iter_p != NULL;
         pool_list_iter_p = next_p)
    {
      next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                   pool_list_iter_p->u.pool_gc.next_first_cp);

      if (pool_list_iter_p->u.pool_gc.free_chunks_num == MEM_POOL_CHUNKS_NUMBER)
      {
#ifndef JERRY_NDEBUG
        mem_free_chunks_number -= MEM_POOL_CHUNKS_NUMBER;
#endif /* !JERRY_NDEBUG */

        MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST ();
        mem_heap_free_block (pool_list_iter_p);

        MEM_POOLS_STAT_FREE_POOL ();
      }
      else
      {
        mem_pool_chunk_t *first_chunk_p = pool_list_iter_p;

        /*
         * Convert layout of first chunk from collection-time pool header to common free chunk
         */
        first_chunk_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                                           pool_list_iter_p->u.pool_gc.free_list_cp);

        /*
         * Link local pool's list of free chunks into global list of free chunks
         */
        for (mem_pool_chunk_t *pool_chunks_iter_p = first_chunk_p;
             ;
             pool_chunks_iter_p = pool_chunks_iter_p->u.free.next_p)
        {
          JERRY_ASSERT (pool_chunks_iter_p != NULL);

          if (pool_chunks_iter_p->u.free.next_p == NULL)
          {
            pool_chunks_iter_p->u.free.next_p = mem_free_chunk_p;

            break;
          }
        }

        mem_free_chunk_p = first_chunk_p;
      }
    }
  }

#ifdef JERRY_VALGRIND
  /*
   * Valgrind-mode specific pass that marks all free chunks inaccessible
   */
  for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *next_free_chunk_p;
       free_chunk_iter_p != NULL;
       free_chunk_iter_p = next_free_chunk_p)
  {
    next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

    VALGRIND_NOACCESS_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE);
  }
#endif /* JERRY_VALGRIND */
} /* mem_pools_collect_empty */
示例#19
0
void
array_list_free (array_list al)
{
  array_list_header *h = extract_header (al);
  mem_heap_free_block ((uint8_t *) h);
}
/**
 * 'Native call' opcode handler.
 */
ecma_completion_value_t
opfunc_native_call (opcode_t opdata, /**< operation data */
                    int_data_t *int_data) /**< interpreter context */
{
  const idx_t dst_var_idx = opdata.data.native_call.lhs;
  const idx_t native_call_id_idx = opdata.data.native_call.name;
  const idx_t args_number = opdata.data.native_call.arg_list;
  const opcode_counter_t lit_oc = int_data->pos;

  JERRY_ASSERT (native_call_id_idx < OPCODE_NATIVE_CALL__COUNT);

  int_data->pos++;

  JERRY_STATIC_ASSERT (OPCODE_NATIVE_CALL__COUNT < (1u << (sizeof (native_call_id_idx) * JERRY_BITSINBYTE)));

  ecma_completion_value_t ret_value = ecma_make_empty_completion_value ();

  MEM_DEFINE_LOCAL_ARRAY (arg_values, args_number, ecma_value_t);

  ecma_length_t args_read;
  ecma_completion_value_t get_arg_completion = fill_varg_list (int_data,
                                                               args_number,
                                                               arg_values,
                                                               &args_read);

  if (ecma_is_completion_value_empty (get_arg_completion))
  {
    JERRY_ASSERT (args_read == args_number);

    switch ((opcode_native_call_t)native_call_id_idx)
    {
      case OPCODE_NATIVE_CALL_LED_TOGGLE:
      case OPCODE_NATIVE_CALL_LED_ON:
      case OPCODE_NATIVE_CALL_LED_OFF:
      case OPCODE_NATIVE_CALL_LED_ONCE:
      case OPCODE_NATIVE_CALL_WAIT:
      {
        JERRY_UNIMPLEMENTED ("Device operations are not implemented.");
      }

      case OPCODE_NATIVE_CALL_PRINT:
      {
        for (ecma_length_t arg_index = 0;
             ecma_is_completion_value_empty (ret_value) && arg_index < args_read;
             arg_index++)
        {
          ECMA_TRY_CATCH (str_value,
                          ecma_op_to_string (arg_values[arg_index]),
                          ret_value);

          ecma_string_t *str_p = ecma_get_string_from_value (str_value);

          lit_utf8_size_t bytes = ecma_string_get_size (str_p);

          ssize_t utf8_str_size = (ssize_t) (bytes + 1);
          lit_utf8_byte_t *utf8_str_p = (lit_utf8_byte_t*) mem_heap_alloc_block ((size_t) utf8_str_size,
                                                                               MEM_HEAP_ALLOC_SHORT_TERM);
          if (utf8_str_p == NULL)
          {
            jerry_fatal (ERR_OUT_OF_MEMORY);
          }

          ecma_string_to_utf8_string (str_p, utf8_str_p, utf8_str_size);
          utf8_str_p[utf8_str_size - 1] = 0;

          FIXME ("Support unicode in printf.");
          if (arg_index < args_read - 1)
          {
            printf ("%s ", (char*) utf8_str_p);
          }
          else
          {
            printf ("%s", (char*) utf8_str_p);
          }

          mem_heap_free_block (utf8_str_p);

          ret_value = set_variable_value (int_data, lit_oc, dst_var_idx,
                                          ecma_make_simple_value (ECMA_SIMPLE_VALUE_UNDEFINED));

          ECMA_FINALIZE (str_value);
        }
        printf ("\n");
        break;
      }

      case OPCODE_NATIVE_CALL__COUNT:
      {
        JERRY_UNREACHABLE ();
      }
    }
  }
  else
  {
    JERRY_ASSERT (!ecma_is_completion_value_normal (get_arg_completion));

    ret_value = get_arg_completion;
  }

  for (ecma_length_t arg_index = 0;
       arg_index < args_read;
       arg_index++)
  {
    ecma_free_value (arg_values[arg_index], true);
  }

  MEM_FINALIZE_LOCAL_ARRAY (arg_values);

  return ret_value;
} /* opfunc_native_call */
示例#21
0
/**
 * Register bytecode and supplementary data of a single scope from snapshot
 *
 * NOTE:
 *      If is_copy flag is set, bytecode is copied from snapshot, else bytecode is referenced directly
 *      from snapshot
 *
 * @return pointer to byte-code header, upon success,
 *         NULL - upon failure (i.e., in case snapshot format is not valid)
 */
static bytecode_data_header_t *
bc_load_bytecode_with_idx_map (const uint8_t *snapshot_data_p, /**< buffer with instructions array
                                                                *   and idx to literals map from
                                                                *   snapshot */
                               size_t snapshot_size, /**< remaining size of snapshot */
                               const lit_mem_to_snapshot_id_map_entry_t *lit_map_p, /**< map of in-snapshot
                                                                                     *   literal offsets
                                                                                     *   to literal identifiers,
                                                                                     *   created in literal
                                                                                     *   storage */
                               uint32_t literals_num, /**< number of literals */
                               bool is_copy, /** flag, indicating whether the passed in-snapshot data
                                              *  should be copied to engine's memory (true),
                                              *  or it can be referenced until engine is stopped
                                              *  (i.e. until call to jerry_cleanup) */
                               uint32_t *out_bytecode_data_size) /**< out: size occupied by bytecode data
                                                                  *   in snapshot */
{
  size_t buffer_offset = 0;
  jerry_snapshot_bytecode_header_t bytecode_header;
  if (!jrt_read_from_buffer_by_offset (snapshot_data_p,
                                       snapshot_size,
                                       &buffer_offset,
                                       &bytecode_header,
                                       sizeof (bytecode_header)))
  {
    return NULL;
  }

  *out_bytecode_data_size = bytecode_header.size;

  buffer_offset += (JERRY_ALIGNUP (sizeof (jerry_snapshot_bytecode_header_t), MEM_ALIGNMENT)
                    - sizeof (jerry_snapshot_bytecode_header_t));

  JERRY_ASSERT (bytecode_header.size <= snapshot_size);

  /* Read uid->lit_cp hash table size */
  const uint8_t *idx_to_lit_map_p = (snapshot_data_p
                                    + buffer_offset +
                                    + bytecode_header.instrs_size
                                    + bytecode_header.var_decls_count * sizeof (uint32_t));

  size_t instructions_number = bytecode_header.instrs_size / sizeof (vm_instr_t);
  size_t blocks_count = JERRY_ALIGNUP (instructions_number, BLOCK_SIZE) / BLOCK_SIZE;

  uint32_t idx_num_total;
  size_t idx_to_lit_map_offset = 0;
  if (!jrt_read_from_buffer_by_offset (idx_to_lit_map_p,
                                       bytecode_header.idx_to_lit_map_size,
                                       &idx_to_lit_map_offset,
                                       &idx_num_total,
                                       sizeof (idx_num_total)))
  {
    return NULL;
  }

  /* Alloc bytecode_header for runtime */
  const size_t bytecode_alloc_size = JERRY_ALIGNUP (bytecode_header.instrs_size, MEM_ALIGNMENT);
  const size_t hash_table_size = lit_id_hash_table_get_size_for_table (idx_num_total, blocks_count);
  const size_t declarations_area_size = JERRY_ALIGNUP (bytecode_header.func_scopes_count * sizeof (mem_cpointer_t)
                                                       + bytecode_header.var_decls_count * sizeof (lit_cpointer_t),
                                                       MEM_ALIGNMENT);
  const size_t header_and_tables_size = JERRY_ALIGNUP ((sizeof (bytecode_data_header_t)
                                                        + hash_table_size
                                                        + declarations_area_size),
                                                       MEM_ALIGNMENT);
  const size_t alloc_size = header_and_tables_size + (is_copy ? bytecode_alloc_size : 0);

  uint8_t *buffer_p = (uint8_t*) mem_heap_alloc_block (alloc_size, MEM_HEAP_ALLOC_LONG_TERM);
  bytecode_data_header_t *header_p = (bytecode_data_header_t *) buffer_p;

  vm_instr_t *instrs_p;
  vm_instr_t *snapshot_instrs_p = (vm_instr_t *) (snapshot_data_p + buffer_offset);
  if (is_copy)
  {
    instrs_p = (vm_instr_t *) (buffer_p + header_and_tables_size);
    memcpy (instrs_p, snapshot_instrs_p, bytecode_header.instrs_size);
  }
  else
  {
    instrs_p = snapshot_instrs_p;
  }

  buffer_offset += bytecode_header.instrs_size; /* buffer_offset is now offset of variable declarations */

  /* Read uid->lit_cp hash table */
  uint8_t *lit_id_hash_table_buffer_p = buffer_p + sizeof (bytecode_data_header_t);
  if (!(lit_id_hash_table_load_from_snapshot (blocks_count,
                                             idx_num_total,
                                             idx_to_lit_map_p + idx_to_lit_map_offset,
                                             bytecode_header.idx_to_lit_map_size - idx_to_lit_map_offset,
                                             lit_map_p,
                                             literals_num,
                                             lit_id_hash_table_buffer_p,
                                             hash_table_size)
       && (vm_instr_counter_t) instructions_number == instructions_number))
  {
    mem_heap_free_block (buffer_p);
    return NULL;
  }

  /* Fill with NULLs child scopes declarations for this scope */
  mem_cpointer_t *declarations_p = (mem_cpointer_t *) (buffer_p + sizeof (bytecode_data_header_t) + hash_table_size);
  memset (declarations_p, 0, bytecode_header.func_scopes_count * sizeof (mem_cpointer_t));

  /* Read variable declarations for this scope */
  lit_cpointer_t *var_decls_p = (lit_cpointer_t *) (declarations_p + bytecode_header.func_scopes_count);
  for (uint32_t i = 0; i < bytecode_header.var_decls_count; i++)
  {
    uint32_t lit_offset_from_snapshot;
    if (!jrt_read_from_buffer_by_offset (snapshot_data_p,
                                         buffer_offset + bytecode_header.var_decls_count * sizeof (uint32_t),
                                         &buffer_offset,
                                         &lit_offset_from_snapshot,
                                         sizeof (lit_offset_from_snapshot)))
    {
      mem_heap_free_block (buffer_p);
      return NULL;
    }
    /**
     * TODO: implement binary search here
     */
    lit_cpointer_t lit_cp = NOT_A_LITERAL;
    uint32_t j;
    for (j = 0; j < literals_num; j++)
    {
      if (lit_map_p[j].literal_offset == lit_offset_from_snapshot)
      {
        lit_cp.packed_value = lit_map_p[j].literal_id.packed_value;
        break;
      }
    }

    if (j == literals_num)
    {
      mem_heap_free_block (buffer_p);
      return NULL;
    }

    var_decls_p[i] = lit_cp;
  }

  /* Fill bytecode_data_header */
  bc_fill_bytecode_data_header (header_p,
                                (lit_id_hash_table *) lit_id_hash_table_buffer_p,
                                instrs_p,
                                declarations_p,
                                (uint16_t) bytecode_header.func_scopes_count,
                                (uint16_t) bytecode_header.var_decls_count,
                                bytecode_header.is_strict,
                                bytecode_header.is_ref_arguments_identifier,
                                bytecode_header.is_ref_eval_identifier,
                                bytecode_header.is_args_moved_to_regs,
                                bytecode_header.is_args_moved_to_regs,
                                bytecode_header.is_no_lex_env);

  return header_p;
} /* bc_load_bytecode_with_idx_map */
示例#22
0
/**
 * Collect chunks from empty pools and free the pools
 */
void
mem_pools_collect_empty (void)
{
    /*
     * Hint magic number in header of pools with free pool-first chunks
     */
    const uint16_t hint_magic_num_value = 0x7e89;

    /*
     * Collection-time chunk lists
     */
    mem_pool_chunk_t *first_chunks_list_p = NULL;
    mem_pool_chunk_t *non_first_chunks_list_p = NULL;

    /*
     * At first stage collect free pool-first chunks to separate collection-time lists
     * and change their layout from mem_pool_chunk_t::u::free to mem_pool_chunk_t::u::pool_gc
     */
    {
        mem_pool_chunk_t tmp_header;
        tmp_header.u.free.next_p = mem_free_chunk_p;

        for (mem_pool_chunk_t *free_chunk_iter_p = tmp_header.u.free.next_p,
                *prev_free_chunk_p = &tmp_header,
                *next_free_chunk_p;
                free_chunk_iter_p != NULL;
                free_chunk_iter_p = next_free_chunk_p)
        {
            mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p);

            VALGRIND_DEFINED_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE);

            next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

            if (pool_start_p == free_chunk_iter_p)
            {
                /*
                 * The chunk is first at its pool
                 *
                 * Remove the chunk from common list of free chunks
                 */
                prev_free_chunk_p->u.free.next_p = next_free_chunk_p;

                /*
                 * Initialize pool-first chunk as pool header and it insert into list of free pool-first chunks
                 */
                free_chunk_iter_p->u.pool_gc.free_list_cp = MEM_CP_NULL;
                free_chunk_iter_p->u.pool_gc.free_chunks_num = 1; /* the first chunk */
                free_chunk_iter_p->u.pool_gc.hint_magic_num = hint_magic_num_value;
                free_chunk_iter_p->u.pool_gc.traversal_check_flag = false;

                MEM_CP_SET_POINTER (free_chunk_iter_p->u.pool_gc.next_first_cp, first_chunks_list_p);
                first_chunks_list_p = free_chunk_iter_p;
            }
            else
            {
                prev_free_chunk_p = free_chunk_iter_p;
            }
        }

        mem_free_chunk_p = tmp_header.u.free.next_p;
    }

    if (first_chunks_list_p == NULL)
    {
        /* there are no empty pools */

        return;
    }

    /*
     * At second stage we collect all free non-pool-first chunks, for which corresponding pool-first chunks are free,
     * and link them into the corresponding mem_pool_chunk_t::u::pool_gc::free_list_cp list, while also maintaining
     * the corresponding mem_pool_chunk_t::u::pool_gc::free_chunks_num:
     *  - at first, for each non-pool-first free chunk we check whether traversal check flag is cleared in corresponding
     *    first chunk in the same pool, and move those chunks, for which the condition is true,
     *    to separate temporary list.
     *
     *  - then, we flip the traversal check flags for each of free pool-first chunks.
     *
     *  - at last, we perform almost the same as at first step, but check only non-pool-first chunks from the temporary
     *    list, and send the chunks, for which the corresponding traversal check flag is cleared, back to the common list
     *    of free chunks, and the rest chunks from the temporary list are linked to corresponding pool-first chunks.
     *    Also, counter of the linked free chunks is maintained in every free pool-first chunk.
     */
    {
        {
            mem_pool_chunk_t tmp_header;
            tmp_header.u.free.next_p = mem_free_chunk_p;

            for (mem_pool_chunk_t *free_chunk_iter_p = tmp_header.u.free.next_p,
                    *prev_free_chunk_p = &tmp_header,
                    *next_free_chunk_p;
                    free_chunk_iter_p != NULL;
                    free_chunk_iter_p = next_free_chunk_p)
            {
                mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p);

                next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

                /*
                 * The magic number doesn't guarantee that the chunk is actually a free pool-first chunk,
                 * so we test the traversal check flag after flipping values of the flags in every
                 * free pool-first chunk.
                 */
                uint16_t magic_num_field;
                bool traversal_check_flag;

                mem_pools_collect_read_magic_num_and_flag (pool_start_p, &magic_num_field, &traversal_check_flag);

                /*
                 * During this traversal the flag in the free header chunks is in cleared state
                 */
                if (!traversal_check_flag
                        && magic_num_field == hint_magic_num_value)
                {
                    free_chunk_iter_p->u.free.next_p = non_first_chunks_list_p;
                    non_first_chunks_list_p = free_chunk_iter_p;

                    prev_free_chunk_p->u.free.next_p = next_free_chunk_p;
                }
                else
                {
                    prev_free_chunk_p = free_chunk_iter_p;
                }
            }

            mem_free_chunk_p = tmp_header.u.free.next_p;
        }

        {
            /*
             * Now, flip the traversal check flag in free pool-first chunks
             */
            for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p;
                    first_chunks_iter_p != NULL;
                    first_chunks_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                          first_chunks_iter_p->u.pool_gc.next_first_cp))
            {
                JERRY_ASSERT (!first_chunks_iter_p->u.pool_gc.traversal_check_flag);

                first_chunks_iter_p->u.pool_gc.traversal_check_flag = true;
            }
        }

        {
            for (mem_pool_chunk_t *non_first_chunks_iter_p = non_first_chunks_list_p, *next_p;
                    non_first_chunks_iter_p != NULL;
                    non_first_chunks_iter_p = next_p)
            {
                next_p = non_first_chunks_iter_p->u.free.next_p;

                mem_pool_chunk_t *pool_start_p;
                pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (non_first_chunks_iter_p);

                uint16_t magic_num_field;
                bool traversal_check_flag;

                mem_pools_collect_read_magic_num_and_flag (pool_start_p, &magic_num_field, &traversal_check_flag);

                JERRY_ASSERT (magic_num_field == hint_magic_num_value);

#ifndef JERRY_DISABLE_HEAVY_DEBUG
                bool is_occured = false;

                for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p;
                        first_chunks_iter_p != NULL;
                        first_chunks_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                              first_chunks_iter_p->u.pool_gc.next_first_cp))
                {
                    if (pool_start_p == first_chunks_iter_p)
                    {
                        is_occured = true;
                        break;
                    }
                }

                JERRY_ASSERT (is_occured == traversal_check_flag);
#endif /* !JERRY_DISABLE_HEAVY_DEBUG */

                /*
                 * During this traversal the flag in the free header chunks is in set state
                 *
                 * If the flag is set, it is guaranteed that the pool-first chunk,
                 * from the same pool, as the current non-pool-first chunk, is free
                 * and is placed in the corresponding list of free pool-first chunks.
                 */
                if (traversal_check_flag)
                {
                    pool_start_p->u.pool_gc.free_chunks_num++;

                    non_first_chunks_iter_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                            pool_start_p->u.pool_gc.free_list_cp);
                    MEM_CP_SET_NON_NULL_POINTER (pool_start_p->u.pool_gc.free_list_cp, non_first_chunks_iter_p);
                }
                else
                {
                    non_first_chunks_iter_p->u.free.next_p = mem_free_chunk_p;
                    mem_free_chunk_p = non_first_chunks_iter_p;
                }
            }
        }

        non_first_chunks_list_p = NULL;
    }

    /*
     * At third stage we check each free pool-first chunk in collection-time list for counted
     * number of free chunks in the pool, containing the chunk.
     *
     * If the number is equal to number of chunks in the pool - then the pool is empty, and so is freed,
     * otherwise - free chunks of the pool are returned to the common list of free chunks.
     */
    for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p, *next_p;
            first_chunks_iter_p != NULL;
            first_chunks_iter_p = next_p)
    {
        next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                     first_chunks_iter_p->u.pool_gc.next_first_cp);

        JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.hint_magic_num == hint_magic_num_value);
        JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.traversal_check_flag);
        JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.free_chunks_num <= MEM_POOL_CHUNKS_NUMBER);

        if (first_chunks_iter_p->u.pool_gc.free_chunks_num == MEM_POOL_CHUNKS_NUMBER)
        {
#ifndef JERRY_NDEBUG
            mem_free_chunks_number -= MEM_POOL_CHUNKS_NUMBER;
#endif /* !JERRY_NDEBUG */

            MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST ();
            mem_heap_free_block (first_chunks_iter_p);

            MEM_POOLS_STAT_FREE_POOL ();
        }
        else
        {
            mem_pool_chunk_t *first_chunk_p = first_chunks_iter_p;

            /*
             * Convert layout of first chunk from collection-time pool-first chunk's layout to the common free chunk layout
             */
            first_chunk_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                           first_chunks_iter_p->u.pool_gc.free_list_cp);

            /*
             * Link local pool's list of free chunks into the common list of free chunks
             */
            for (mem_pool_chunk_t *pool_chunks_iter_p = first_chunk_p;
                    ;
                    pool_chunks_iter_p = pool_chunks_iter_p->u.free.next_p)
            {
                JERRY_ASSERT (pool_chunks_iter_p != NULL);

                if (pool_chunks_iter_p->u.free.next_p == NULL)
                {
                    pool_chunks_iter_p->u.free.next_p = mem_free_chunk_p;

                    break;
                }
            }

            mem_free_chunk_p = first_chunk_p;
        }
    }

#ifdef JERRY_VALGRIND
    /*
     * Valgrind-mode specific pass that marks all free chunks inaccessible
     */
    for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *next_free_chunk_p;
            free_chunk_iter_p != NULL;
            free_chunk_iter_p = next_free_chunk_p)
    {
        next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

        VALGRIND_NOACCESS_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE);
    }
#endif /* JERRY_VALGRIND */
} /* mem_pools_collect_empty */
示例#23
0
/**
 * Long path for mem_pools_alloc
 */
static void __attr_noinline___
mem_pools_alloc_longpath (void)
{
    mem_check_pools ();

    JERRY_ASSERT (mem_free_chunk_p == NULL);

    JERRY_ASSERT (MEM_POOL_SIZE <= mem_heap_get_chunked_block_data_size ());
    JERRY_ASSERT (MEM_POOL_CHUNKS_NUMBER >= 1);

    MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST ();
    mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t*) mem_heap_alloc_chunked_block (MEM_HEAP_ALLOC_LONG_TERM);

    if (mem_free_chunk_p != NULL)
    {
        /* some chunks were freed due to GC invoked by heap allocator */
        MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST ();
        mem_heap_free_block (pool_start_p);

        return;
    }

#ifndef JERRY_NDEBUG
    mem_free_chunks_number += MEM_POOL_CHUNKS_NUMBER;
#endif /* !JERRY_NDEBUG */

    JERRY_STATIC_ASSERT (MEM_POOL_CHUNK_SIZE % MEM_ALIGNMENT == 0);
    JERRY_STATIC_ASSERT (sizeof (mem_pool_chunk_t) == MEM_POOL_CHUNK_SIZE);
    JERRY_STATIC_ASSERT (sizeof (mem_pool_chunk_index_t) <= MEM_POOL_CHUNK_SIZE);
    JERRY_ASSERT ((mem_pool_chunk_index_t) MEM_POOL_CHUNKS_NUMBER == MEM_POOL_CHUNKS_NUMBER);
    JERRY_ASSERT (MEM_POOL_SIZE == MEM_POOL_CHUNKS_NUMBER * MEM_POOL_CHUNK_SIZE);

    JERRY_ASSERT (((uintptr_t) pool_start_p) % MEM_ALIGNMENT == 0);

    mem_pool_chunk_t *prev_free_chunk_p = NULL;

    for (mem_pool_chunk_index_t chunk_index = 0;
            chunk_index < MEM_POOL_CHUNKS_NUMBER;
            chunk_index++)
    {
        mem_pool_chunk_t *chunk_p = pool_start_p + chunk_index;

        if (prev_free_chunk_p != NULL)
        {
            prev_free_chunk_p->u.free.next_p = chunk_p;
        }

        prev_free_chunk_p = chunk_p;
    }

    prev_free_chunk_p->u.free.next_p = NULL;

#ifdef JERRY_VALGRIND
    for (mem_pool_chunk_index_t chunk_index = 0;
            chunk_index < MEM_POOL_CHUNKS_NUMBER;
            chunk_index++)
    {
        mem_pool_chunk_t *chunk_p = pool_start_p + chunk_index;

        VALGRIND_NOACCESS_SPACE (chunk_p, MEM_POOL_CHUNK_SIZE);
    }
#endif /* JERRY_VALGRIND */

    mem_free_chunk_p = pool_start_p;

    MEM_POOLS_STAT_ALLOC_POOL ();

    mem_check_pools ();
} /* mem_pools_alloc_longpath */
示例#24
0
/**
 * Compilation of RegExp bytecode
 *
 * @return completion value
 *         Returned value must be freed with ecma_free_completion_value
 */
ecma_completion_value_t
re_compile_bytecode (re_bytecode_t **out_bytecode_p, /**< out:pointer to bytecode */
                     ecma_string_t *pattern_str_p, /**< pattern */
                     uint8_t flags) /**< flags */
{
  ecma_completion_value_t ret_value = ecma_make_empty_completion_value ();
  re_compiler_ctx_t re_ctx;
  re_ctx.flags = flags;
  re_ctx.highest_backref = 0;
  re_ctx.num_of_non_captures = 0;

  re_bytecode_ctx_t bc_ctx;
  bc_ctx.block_start_p = NULL;
  bc_ctx.block_end_p = NULL;
  bc_ctx.current_p = NULL;

  re_ctx.bytecode_ctx_p = &bc_ctx;

  lit_utf8_size_t pattern_str_size = ecma_string_get_size (pattern_str_p);
  MEM_DEFINE_LOCAL_ARRAY (pattern_start_p, pattern_str_size, lit_utf8_byte_t);

  ecma_string_to_utf8_string (pattern_str_p, pattern_start_p, (ssize_t) pattern_str_size);
  lit_utf8_iterator_t iter = lit_utf8_iterator_create (pattern_start_p, pattern_str_size);

  re_parser_ctx_t parser_ctx;
  parser_ctx.iter = iter;
  parser_ctx.num_of_groups = -1;
  re_ctx.parser_ctx_p = &parser_ctx;

  /* 1. Parse RegExp pattern */
  re_ctx.num_of_captures = 1;
  re_append_opcode (&bc_ctx, RE_OP_SAVE_AT_START);

  ECMA_TRY_CATCH (empty, re_parse_alternative (&re_ctx, true), ret_value);

  /* 2. Check for invalid backreference */
  if (re_ctx.highest_backref >= re_ctx.num_of_captures)
  {
    ret_value = ecma_raise_syntax_error ("Invalid backreference.\n");
  }
  else
  {
    re_append_opcode (&bc_ctx, RE_OP_SAVE_AND_MATCH);
    re_append_opcode (&bc_ctx, RE_OP_EOF);

    /* 3. Insert extra informations for bytecode header */
    re_insert_u32 (&bc_ctx, 0, (uint32_t) re_ctx.num_of_non_captures);
    re_insert_u32 (&bc_ctx, 0, (uint32_t) re_ctx.num_of_captures * 2);
    re_insert_u32 (&bc_ctx, 0, (uint32_t) re_ctx.flags);
  }
  ECMA_FINALIZE (empty);

  MEM_FINALIZE_LOCAL_ARRAY (pattern_start_p);

  if (!ecma_is_completion_value_empty (ret_value))
  {
    /* Compilation failed, free bytecode. */
    mem_heap_free_block (bc_ctx.block_start_p);
    *out_bytecode_p = NULL;
  }
  else
  {
    /* The RegExp bytecode contains at least a RE_OP_SAVE_AT_START opdoce, so it cannot be NULL. */
    JERRY_ASSERT (bc_ctx.block_start_p != NULL);
    *out_bytecode_p = bc_ctx.block_start_p;
  }

#ifdef JERRY_ENABLE_LOG
  re_dump_bytecode (&bc_ctx);
#endif

  return ret_value;
} /* re_compile_bytecode */