void crt::check_default_heap() { typedef memory::heap::DefaultStat heap_type; const auto stat = heap_type::get_stat(); console::printf("Heap '%s' statistics:\n", heap_type::get_name()); console::printf(" allocated: %I64u, %I64u\n", stat.get_allocations(), stat.get_allocations_size()); console::printf(" freed : %I64u, %I64u\n", stat.get_frees(), stat.get_frees_size()); console::printf(" diff : %I64d, %I64d\n", stat.get_allocations() - stat.get_frees(), stat.get_allocations_size() - stat.get_frees_size()); }
void ip_buf_unref(struct net_buf *buf) #endif { if (!buf) { #ifdef DEBUG_IP_BUFS NET_DBG("*** ERROR *** buf %p (%s():%d)\n", buf, caller, line); #else NET_DBG("*** ERROR *** buf %p\n", buf); #endif return; } if (!buf->ref) { #ifdef DEBUG_IP_BUFS NET_DBG("*** ERROR *** buf %p is freed already (%s():%d)\n", buf, caller, line); #else NET_DBG("*** ERROR *** buf %p is freed already\n", buf); #endif return; } #ifdef DEBUG_IP_BUFS NET_DBG("%s [%d] buf %p ref %d (%s():%d)\n", type2str(ip_buf_type(buf)), get_frees(ip_buf_type(buf)), buf, buf->ref - 1, caller, line); #else NET_DBG("%s buf %p ref %d\n", type2str(ip_buf_type(buf)), buf, buf->ref - 1); #endif net_buf_unref(buf); }
virtual void endFrameEx(const TypedValue *retval, const char *given_symbol) override { char symbol[512]; HierarchicalProfilerFrame *frame = dynamic_cast<HierarchicalProfilerFrame *>(m_stack); frame->getStack(2, symbol, sizeof(symbol)); CountMap &counts = m_stats[symbol]; counts.count++; counts.wall_time += cpuCycles() - frame->m_tsc_start; if (m_flags & TrackCPU) { counts.cpu += cpuTime(m_MHz) - frame->m_vtsc_start; } if (m_flags & TrackMemory) { auto const& stats = MM().getStats(); int64_t mu_end = stats.usage; int64_t pmu_end = stats.peakUsage; counts.memory += mu_end - frame->m_mu_start; counts.peak_memory += pmu_end - frame->m_pmu_start; } else if (m_flags & TrackMalloc) { counts.memory += get_allocs() - frame->m_mu_start; counts.peak_memory += get_frees() - frame->m_pmu_start; } }
static struct net_buf *ip_buf_get_reserve(enum ip_buf_type type, uint16_t reserve_head) #endif { struct net_buf *buf = NULL; /* Note that we do not reserve any space in front of the * buffer so buf->data points to first byte of the IP header. * This is done like this so that IP stack works the same * way as BT and 802.15.4 stacks. * * The reserve_head variable in the function will tell * the size of the IP + other headers if there are any. * That variable is only used to calculate the pointer * where the application data starts. */ switch (type) { case IP_BUF_RX: buf = net_buf_get(&free_rx_bufs, 0); dec_free_rx_bufs(buf); break; case IP_BUF_TX: buf = net_buf_get(&free_tx_bufs, 0); dec_free_tx_bufs(buf); break; } if (!buf) { #ifdef DEBUG_IP_BUFS NET_ERR("Failed to get free %s buffer (%s():%d)\n", type2str(type), caller, line); #else NET_ERR("Failed to get free %s buffer\n", type2str(type)); #endif return NULL; } ip_buf_type(buf) = type; ip_buf_appdata(buf) = buf->data + reserve_head; ip_buf_appdatalen(buf) = 0; ip_buf_reserve(buf) = reserve_head; net_buf_add(buf, reserve_head); NET_BUF_CHECK_IF_NOT_IN_USE(buf); #ifdef DEBUG_IP_BUFS NET_DBG("%s [%d] buf %p reserve %u ref %d (%s():%d)\n", type2str(type), get_frees(type), buf, reserve_head, buf->ref, caller, line); #else NET_DBG("%s buf %p reserve %u ref %d\n", type2str(type), buf, reserve_head, buf->ref); #endif return buf; }
virtual void beginFrameEx(const char *symbol) override { m_stack->m_tsc_start = cpuCycles(); if (m_flags & TrackCPU) { m_stack->m_vtsc_start = cpuTime(m_MHz); } if (m_flags & TrackMemory) { auto const& stats = MM().getStats(); m_stack->m_mu_start = stats.usage; m_stack->m_pmu_start = stats.peakUsage; } else if (m_flags & TrackMalloc) { m_stack->m_mu_start = get_allocs(); m_stack->m_pmu_start = get_frees(); } }
virtual void beginFrameEx(const char *symbol) override { HierarchicalProfilerFrame *frame = dynamic_cast<HierarchicalProfilerFrame *>(m_stack); frame->m_tsc_start = cpuCycles(); if (m_flags & TrackCPU) { frame->m_vtsc_start = cpuTime(m_MHz); } if (m_flags & TrackMemory) { auto const& stats = MM().getStats(); frame->m_mu_start = stats.usage; frame->m_pmu_start = stats.peakUsage; } else if (m_flags & TrackMalloc) { frame->m_mu_start = get_allocs(); frame->m_pmu_start = get_frees(); } }
struct net_buf *ip_buf_ref(struct net_buf *buf) #endif { if (!buf) { #ifdef DEBUG_IP_BUFS NET_DBG("*** ERROR *** buf %p (%s():%d)\n", buf, caller, line); #else NET_DBG("*** ERROR *** buf %p\n", buf); #endif return NULL; } #ifdef DEBUG_IP_BUFS NET_DBG("%s [%d] buf %p ref %d (%s():%d)\n", type2str(ip_buf_type(buf)), get_frees(ip_buf_type(buf)), buf, buf->ref + 1, caller, line); #else NET_DBG("%s buf %p ref %d\n", type2str(ip_buf_type(buf)), buf, buf->ref + 1); #endif return net_buf_ref(buf); }
void test_memory() { LogWarn(""); auto heap = ::GetProcessHeap(); PROCESS_HEAP_ENTRY entry; memory::zero(entry); LogReport("Walking heap %p...", heap); while (::HeapWalk(heap, &entry) != FALSE) { if ((entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) != 0) { LogReport("Allocated block"); if ((entry.wFlags & PROCESS_HEAP_ENTRY_MOVEABLE) != 0) { LogReport(", movable with HANDLE %p", entry.Block.hMem); } if ((entry.wFlags & PROCESS_HEAP_ENTRY_DDESHARE) != 0) { LogReport(", DDESHARE"); } } else if ((entry.wFlags & PROCESS_HEAP_REGION) != 0) { LogReport("Region\n %d bytes committed" \ " %d bytes uncommitted\n First block address: %p" \ " Last block address: %p", entry.Region.dwCommittedSize, entry.Region.dwUnCommittedSize, entry.Region.lpFirstBlock, entry.Region.lpLastBlock); } else if ((entry.wFlags & PROCESS_HEAP_UNCOMMITTED_RANGE) != 0) { LogReport("Uncommitted range"); } else { LogReport("Block"); } LogReport(" Data portion begins at: %p\n Size: %d bytes" \ " Overhead: %d bytes\n Region index: %d", entry.lpData, entry.cbData, entry.cbOverhead, entry.iRegionIndex); } // LastError = GetLastError(); // if (LastError != ERROR_NO_MORE_ITEMS) { // _tprintf(TEXT("HeapWalk failed with LastError %d."), LastError); // } return; struct TypeTag {}; typedef typename memory::heap::DecoratorStat<memory::heap::Default, memory::heap::StatLog, TypeTag> heap_type; struct TypeTag1 {}; typedef typename memory::heap::DecoratorStat<memory::heap::Default, memory::heap::StatLog, TypeTag1> heap_type1; struct TypeTag2 {}; typedef typename memory::heap::DecoratorStat<memory::heap::Default, memory::heap::StatLog, TypeTag2> heap_type2; auto ptr = HostAlloc(heap_type, 47); HostFree(heap_type, ptr); HostAlloc(heap_type, 42); { const auto stat = heap_type::get_stat(); LogReport("stat alloc: %I64u, %I64u", stat.get_allocations(), stat.get_allocations_size()); LogReport("stat free : %I64u, %I64u", stat.get_frees(), stat.get_frees_size()); LogReport("stat diff : %I64d", stat.get_allocations_size() - stat.get_frees_size()); } { const auto stat = heap_type1::get_stat(); LogReport("stat1 alloc: %I64u, %I64u", stat.get_allocations(), stat.get_allocations_size()); LogReport("stat1 free : %I64u, %I64u", stat.get_frees(), stat.get_frees_size()); LogReport("stat1 diff : %I64d", stat.get_allocations_size() - stat.get_frees_size()); } HostAlloc(heap_type1, 17); HostAlloc(heap_type1, 12); HostAlloc(heap_type2, 71); HostAlloc(heap_type2, 22); { const auto stat = heap_type1::get_stat(); LogReport("stat1 alloc: %I64u, %I64u", stat.get_allocations(), stat.get_allocations_size()); LogReport("stat1 free : %I64u, %I64u", stat.get_frees(), stat.get_frees_size()); LogReport("stat1 diff : %I64d", stat.get_allocations_size() - stat.get_frees_size()); } { const auto stat = heap_type2::get_stat(); LogReport("stat2 alloc: %I64u, %I64u", stat.get_allocations(), stat.get_allocations_size()); LogReport("stat2 free : %I64u, %I64u", stat.get_frees(), stat.get_frees_size()); LogReport("stat2 diff : %I64d", stat.get_allocations_size() - stat.get_frees_size()); } }