Beispiel #1
0
void *m_malloc_maybe(size_t num_bytes) {
    void *ptr = malloc(num_bytes);
#if MICROPY_MEM_STATS
    MP_STATE_MEM(total_bytes_allocated) += num_bytes;
    MP_STATE_MEM(current_bytes_allocated) += num_bytes;
    UPDATE_PEAK();
#endif
    DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
    return ptr;
}
Beispiel #2
0
void *m_malloc_maybe(size_t num_bytes) {
    void *ptr = malloc(num_bytes);
    if (ptr == NULL) {
        return NULL;
    }
#if MICROPY_MEM_STATS
    total_bytes_allocated += num_bytes;
    current_bytes_allocated += num_bytes;
    UPDATE_PEAK();
#endif
    DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
    return ptr;
}
Beispiel #3
0
void *m_malloc_with_finaliser(size_t num_bytes) {
    void *ptr = malloc_with_finaliser(num_bytes);
    if (ptr == NULL && num_bytes != 0) {
        return m_malloc_fail(num_bytes);
    }
#if MICROPY_MEM_STATS
    MP_STATE_MEM(total_bytes_allocated) += num_bytes;
    MP_STATE_MEM(current_bytes_allocated) += num_bytes;
    UPDATE_PEAK();
#endif
    DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
    return ptr;
}
Beispiel #4
0
void *m_malloc0(int num_bytes) {
    if (num_bytes == 0) {
        return NULL;
    }
    void *ptr = calloc(1, num_bytes);
    if (ptr == NULL) {
        printf("could not allocate memory, allocating %d bytes\n", num_bytes);
        return NULL;
    }
#if MICROPY_MEM_STATS
    total_bytes_allocated += num_bytes;
    current_bytes_allocated += num_bytes;
    UPDATE_PEAK();
#endif
    DEBUG_printf("malloc0 %d : %p\n", num_bytes, ptr);
    return ptr;
}
Beispiel #5
0
void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes) {
    void *new_ptr = realloc(ptr, new_num_bytes);
    if (new_ptr == NULL) {
        return NULL;
    }
#if MICROPY_MEM_STATS
    // At first thought, "Total bytes allocated" should only grow,
    // after all, it's *total*. But consider for example 2K block
    // shrunk to 1K and then grown to 2K again. It's still 2K
    // allocated total. If we process only positive increments,
    // we'll count 3K.
    size_t diff = new_num_bytes - old_num_bytes;
    total_bytes_allocated += diff;
    current_bytes_allocated += diff;
    UPDATE_PEAK();
#endif
    DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, old_num_bytes, new_num_bytes, new_ptr);
    return new_ptr;
}
Beispiel #6
0
void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes, bool allow_move) {
#else
void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move) {
#endif
    void *new_ptr = realloc_ext(ptr, new_num_bytes, allow_move);
#if MICROPY_MEM_STATS
    // At first thought, "Total bytes allocated" should only grow,
    // after all, it's *total*. But consider for example 2K block
    // shrunk to 1K and then grown to 2K again. It's still 2K
    // allocated total. If we process only positive increments,
    // we'll count 3K.
    // Also, don't count failed reallocs.
    if (!(new_ptr == NULL && new_num_bytes != 0)) {
        size_t diff = new_num_bytes - old_num_bytes;
        MP_STATE_MEM(total_bytes_allocated) += diff;
        MP_STATE_MEM(current_bytes_allocated) += diff;
        UPDATE_PEAK();
    }
#endif
    DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, old_num_bytes, new_num_bytes, new_ptr);
    return new_ptr;
}
void SharedUserData::task(){
#ifdef AZ_PROXIED
  // Static variables store peak values seen during the life of the run.
  static volatile sud_jvm_heap_rev1_t peak_jvm_heap;
  static sud_io_rev1_t io_stats;
  static volatile bool initialized = false;
  if (!initialized) {
    memset ((void*)(&peak_jvm_heap), 0, sizeof(peak_jvm_heap));
    initialized = true;
  }

  if (SafepointSynchronize::is_at_safepoint()) return;

  CollectedHeap *heap = Universe::heap();
  if (!heap) return;

  size_t l = heap->last_gc_live_bytes();
size_t u=heap->used();
  size_t c = heap->capacity();
  size_t m = heap->max_capacity();
  size_t pu = heap->permanent_used();
  size_t pc = heap->permanent_capacity();

  // Make sure that the numbers make sense when graphing.
  c = (u > c) ? u : c;
  m = (c > m) ? c : m;
  pc = (pu > pc) ? pu : pc;

  sud_jvm_heap_rev1_t jvm_heap;
  memset(&jvm_heap, 0, sizeof(jvm_heap));
  jvm_heap.revision = SUD_JVM_HEAP_REVISION;
  switch (heap->kind()) {
  case CollectedHeap::GenCollectedHeap: strcpy(jvm_heap.name, "GenCollectedHeap"); break;
  case CollectedHeap::ParallelScavengeHeap: strcpy(jvm_heap.name, "ParallelScavengeHeap"); break;
  case CollectedHeap::PauselessHeap: strcpy(jvm_heap.name, "PauselessHeap"); break;
  default: strcpy(jvm_heap.name, "");
  }
  if (heap->supports_tlab_allocation()) jvm_heap.flags |= SUD_JVM_HEAP_FLAG_TLAB_ALLOCATION;
  if (heap->supports_inline_contig_alloc()) jvm_heap.flags |= SUD_JVM_HEAP_FLAG_INLINE_CONTIG_ALLOC;

  uint64_t now = (uint64_t) os::javaTimeMillis();
  jvm_heap.timestamp_ms = now;
  jvm_heap.live_bytes = l;
  jvm_heap.used_bytes = u;
  jvm_heap.capacity_bytes = c;
  jvm_heap.max_capacity_bytes = m;
  jvm_heap.permanent_used_bytes = pu;
  jvm_heap.permanent_capacity_bytes = pc;
  jvm_heap.total_collections = heap->total_collections();

  libos::AccountInfo ai;
  az_allocid_t allocid = process_get_allocationid();
  sys_return_t ret = ai.inspectProcess (allocid);
  if (ret == SYSERR_NONE) {
    // Copy memory_accounting information into the sud structure.
    // Take care not to overflow the accounts past the maximum storable.
    const account_info_t *account_info = ai.getAccountInfo();
    uint64_t count =
      (account_info->ac_count < SUD_MAX_ACCOUNTS) ?
      account_info->ac_count :
      SUD_MAX_ACCOUNTS;
    jvm_heap.account_info.ac_count = count;
    for (uint64_t i = 0; i < count; i++) {
      jvm_heap.account_info.ac_array[i] = account_info->ac_array[i];
    }
  }
  else {
warning("Failed to inspect memory accounting info (%d)",ret);
  }

#define UPDATE_PEAK(struct_member,value) \
  if (peak_jvm_heap.peak_ ## struct_member ## _bytes < value) { \
    peak_jvm_heap.peak_ ## struct_member ## _bytes = value; \
    peak_jvm_heap.peak_ ## struct_member ## _timestamp_ms = now; \
  } \
  jvm_heap.peak_ ## struct_member ## _bytes = peak_jvm_heap.peak_ ## struct_member ## _bytes; \
  jvm_heap.peak_ ## struct_member ## _timestamp_ms = peak_jvm_heap.peak_ ## struct_member ## _timestamp_ms;

  UPDATE_PEAK (live,l);
  UPDATE_PEAK (used,u);
  UPDATE_PEAK (capacity,c);
  UPDATE_PEAK (max_capacity,m);
  UPDATE_PEAK (permanent_used,pu);
  UPDATE_PEAK (permanent_capacity,pc);

  UPDATE_PEAK (allocated,ai.getAllocatedBytes());
  UPDATE_PEAK (funded,ai.getFundedBytes());
  UPDATE_PEAK (overdraft,ai.getOverdraftBytes());
  UPDATE_PEAK (footprint,ai.getFootprintBytes());

  UPDATE_PEAK (committed,ai.getCommittedBytes());
  UPDATE_PEAK (grant,ai.getGrantBytes());
  UPDATE_PEAK (allocated_from_committed,ai.getAllocatedFromCommittedBytes());

  UPDATE_PEAK (default_allocated,ai.getDefaultAllocatedBytes());
  UPDATE_PEAK (default_committed,ai.getDefaultCommittedBytes());
  UPDATE_PEAK (default_footprint,ai.getDefaultFootprintBytes());
  UPDATE_PEAK (default_grant,ai.getDefaultGrantBytes());

  UPDATE_PEAK (heap_allocated,ai.getHeapAllocatedBytes());
  UPDATE_PEAK (heap_committed,ai.getHeapCommittedBytes());
  UPDATE_PEAK (heap_footprint,ai.getHeapFootprintBytes());
  UPDATE_PEAK (heap_grant,ai.getHeapGrantBytes());

  ret = shared_user_data_set_jvm_heap_rev1 (allocid, &jvm_heap);
  if (ret != SYSERR_NONE) warning("Failed to set jvm_heap shared user data (%d)", ret);

  memset ((void*)(&io_stats), 0, sizeof(io_stats));
  io_stats.revision = SUD_IO_REVISION;
  atcpn_stats_get_io_rev1(&io_stats);
  ret = shared_user_data_set_io_rev1 (allocid, &io_stats);
  if (ret != SYSERR_NONE) warning("Failed to set io_stats shared user data (%d)", ret);
#endif // AZ_PROXIED
}