Exemple #1
0
/**
 * Try to free some memory (depending on severity).
 */
void
ecma_try_to_give_back_some_memory (mem_try_give_memory_back_severity_t severity) /**< severity of
                                                                                  *   the request */
{
    if (severity == MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_LOW)
    {
        /*
         * If there is enough newly allocated objects since last GC, probably it is worthwhile to start GC now.
         * Otherwise, probability to free sufficient space is considered to be low.
         */
        if (ecma_gc_new_objects_since_last_gc * CONFIG_ECMA_GC_NEW_OBJECTS_SHARE_TO_START_GC > ecma_gc_objects_number)
        {
            ecma_gc_run ();
        }
    }
    else if (severity == MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_MEDIUM
             || severity == MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH)
    {
        ecma_gc_run ();
    }
    else
    {
        JERRY_ASSERT (severity == MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_CRITICAL);

        /* Freeing as much memory as we currently can */
        ecma_lcache_invalidate_all ();

        ecma_gc_run ();
    }
} /* ecma_try_to_give_back_some_memory */
/**
 * Finalize ECMA components
 */
void
ecma_finalize (void)
{
  mem_unregister_a_try_give_memory_back_callback (ecma_try_to_give_back_some_memory);

  ecma_finalize_environment ();
  ecma_lcache_invalidate_all ();
  ecma_finalize_builtins ();
  ecma_gc_run ();
} /* ecma_finalize */
Exemple #3
0
/**
 * Try to free some memory (depending on severity).
 */
void
ecma_free_unused_memory (jmem_free_unused_memory_severity_t severity) /**< severity of the request */
{
  if (severity == JMEM_FREE_UNUSED_MEMORY_SEVERITY_LOW)
  {
    /*
     * If there is enough newly allocated objects since last GC, probably it is worthwhile to start GC now.
     * Otherwise, probability to free sufficient space is considered to be low.
     */
    if (ecma_gc_new_objects_since_last_gc * CONFIG_ECMA_GC_NEW_OBJECTS_SHARE_TO_START_GC > ecma_gc_objects_number)
    {
      ecma_gc_run ();
    }
  }
  else
  {
    JERRY_ASSERT (severity == JMEM_FREE_UNUSED_MEMORY_SEVERITY_HIGH);

    /* Freeing as much memory as we currently can */
    ecma_gc_run ();
  }
} /* ecma_free_unused_memory */
Exemple #4
0
/**
 * Try to free some memory (depending on severity).
 */
void
ecma_try_to_give_back_some_memory (mem_try_give_memory_back_severity_t severity) /**< severity of
                                                                                  *   the request */
{
  if (severity == MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_LOW)
  {
    ecma_gc_run ();
  }
  else if (severity == MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_MEDIUM
           || severity == MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH)
  {
    /* we have already done simple GC as requests come in ascending severity order */
  }
  else
  {
    JERRY_ASSERT (severity == MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_CRITICAL);

    /* Freeing as much memory as we currently can */
    ecma_lcache_invalidate_all ();

    ecma_gc_run ();
  }
} /* ecma_try_to_give_back_some_memory */
Exemple #5
0
/**
 * Run interpreter loop using specified context
 *
 * Note:
 *      The interpreter loop stops upon receiving completion value that is normal completion value.
 *
 * @return If the received completion value is not meta completion value (ECMA_COMPLETION_TYPE_META), then
 *          the completion value is returned as is;
 *         Otherwise - the completion value is discarded and normal empty completion value is returned.
 */
ecma_completion_value_t
vm_loop (vm_frame_ctx_t *frame_ctx_p, /**< interpreter context */
         vm_run_scope_t *run_scope_p) /**< current run scope,
                                       *   or NULL - if there is no active run scope */
{
  ecma_completion_value_t completion;

#ifdef MEM_STATS
  mem_heap_stats_t heap_stats_before;
  mem_pools_stats_t pools_stats_before;

  memset (&heap_stats_before, 0, sizeof (heap_stats_before));
  memset (&pools_stats_before, 0, sizeof (pools_stats_before));
#endif /* MEM_STATS */

  while (true)
  {
    do
    {
      JERRY_ASSERT (run_scope_p == NULL
                    || (run_scope_p->start_oc <= frame_ctx_p->pos
                        && frame_ctx_p->pos <= run_scope_p->end_oc));

      const vm_instr_t *curr = &frame_ctx_p->instrs_p[frame_ctx_p->pos];

#ifdef MEM_STATS
      const vm_instr_counter_t instr_pos = frame_ctx_p->pos;

      interp_mem_stats_opcode_enter (frame_ctx_p->instrs_p,
                                     instr_pos,
                                     &heap_stats_before,
                                     &pools_stats_before);
#endif /* MEM_STATS */

      completion = __opfuncs[curr->op_idx] (*curr, frame_ctx_p);

#ifdef CONFIG_VM_RUN_GC_AFTER_EACH_OPCODE
      ecma_gc_run ();
#endif /* CONFIG_VM_RUN_GC_AFTER_EACH_OPCODE */

#ifdef MEM_STATS
      interp_mem_stats_opcode_exit (frame_ctx_p,
                                    instr_pos,
                                    &heap_stats_before,
                                    &pools_stats_before);
#endif /* MEM_STATS */

      JERRY_ASSERT (!ecma_is_completion_value_normal (completion)
                    || ecma_is_completion_value_empty (completion));
    }
    while (ecma_is_completion_value_normal (completion));

    if (ecma_is_completion_value_jump (completion))
    {
      vm_instr_counter_t target = ecma_get_jump_target_from_completion_value (completion);

      /*
       * TODO:
       *      Implement instantiation of run scopes for global scope, functions and eval scope.
       *      Currently, correctness of jumps without run scope set is guaranteed through byte-code semantics.
       */
      if (run_scope_p == NULL /* if no run scope set */
          || (target >= run_scope_p->start_oc /* or target is within the current run scope */
              && target <= run_scope_p->end_oc))
      {
        frame_ctx_p->pos = target;

        continue;
      }
    }

    if (ecma_is_completion_value_meta (completion))
    {
      completion = ecma_make_empty_completion_value ();
    }

    return completion;
  }
} /* vm_loop */