static struct memory_t* alloc_chunk(struct allocator_t* self, size_t index) { size_t alloc_size = (PREFIX_SIZE + (index + 1) * ALIGN); size_t chunk_size = alloc_size * MAX_NUMBER; if (NULL == self->free_list[index]) { size_t i; struct memory_t* node; self->free_list[index] = (struct memory_t*)malloc(chunk_size); assert(NULL != self->free_list[index]); insert_chunk(self, self->free_list[index]); node = self->free_list[index]; for (i = 0; i < chunk_size - alloc_size; i += alloc_size) { node->index = index; node = node->next = (struct memory_t*)((byte_t*)node + alloc_size); } node->index = index; node->next = NULL; } return self->free_list[index]; }
void hoardUnsbrk(void *ptr, long size) { CTRACE(("unsbrk: %p, %ld!\n", ptr, size)); hoardLock(sHeapLock); // TODO: hoard always allocates and frees in typical sizes, so we could // save a lot of effort if we just had a similar mechanism // We add this chunk to our free list - first, try to find an adjacent // chunk, so that we can merge them together free_chunk *chunk = (free_chunk *)sFreeChunks, *last = NULL, *smaller = NULL; for (; chunk != NULL; chunk = chunk->next) { if ((addr_t)chunk + chunk->size == (addr_t)ptr || (addr_t)ptr + size == (addr_t)chunk) { // chunks are adjacent - merge them CTRACE((" found adjacent chunks: %p, %ld\n", chunk, chunk->size)); if (last) last->next = chunk->next; else sFreeChunks = chunk->next; if ((addr_t)chunk < (addr_t)ptr) chunk->size += size; else { free_chunk *newChunk = (free_chunk *)ptr; newChunk->next = chunk->next; newChunk->size = size + chunk->size; chunk = newChunk; } insert_chunk(chunk); hoardUnlock(sHeapLock); return; } last = chunk; if (chunk->size < (size_t)size) smaller = chunk; } // we didn't find an adjacent chunk, so insert the new chunk into the list free_chunk *newChunk = (free_chunk *)ptr; newChunk->size = size; if (smaller) { newChunk->next = smaller->next; smaller->next = newChunk; } else { newChunk->next = sFreeChunks; sFreeChunks = newChunk; } hoardUnlock(sHeapLock); }
static void mem_exit_bad_place(struct ls_state *ls, bool in_kernel, unsigned int base) { struct mem_state *m = in_kernel ? &ls->kern_mem : &ls->user_mem; assert(m->in_alloc && "attempt to exit malloc without being in!"); assert(!m->in_free && "attempt to exit malloc while in free!"); assert(!m->in_mm_init && "attempt to exit malloc while in init!"); if (in_kernel != testing_userspace()) { lsprintf(DEV, "Malloc [0x%x | %d]\n", base, m->alloc_request_size); } if (in_kernel) { assert(KERNEL_MEMORY(base)); } else { assert(base == 0 || USER_MEMORY(base)); } if (base == 0) { lsprintf(INFO, "%s seems to be out of memory.\n", K_STR(in_kernel)); } else { struct chunk *chunk = MM_XMALLOC(1, struct chunk); chunk->base = base; chunk->len = m->alloc_request_size; chunk->id = m->heap_next_id; chunk->malloc_trace = stack_trace(ls); chunk->free_trace = NULL; m->heap_size += m->alloc_request_size; assert(m->heap_next_id != INT_MAX && "need a wider type"); m->heap_next_id++; insert_chunk(&m->heap, chunk, false); } m->in_alloc = false; }
void * hoardSbrk(long size) { assert(size > 0); CTRACE(("sbrk: size = %ld\n", size)); // align size request size = (size + hoardHeap::ALIGNMENT - 1) & ~(hoardHeap::ALIGNMENT - 1); // choose correct protection flags uint32 protection = B_READ_AREA | B_WRITE_AREA; if (__gABIVersion < B_HAIKU_ABI_GCC_2_HAIKU) protection |= B_EXECUTE_AREA; hoardLock(sHeapLock); // find chunk in free list free_chunk *chunk = sFreeChunks, *last = NULL; for (; chunk != NULL; chunk = chunk->next) { CTRACE((" chunk %p (%ld)\n", chunk, chunk->size)); if (chunk->size < (size_t)size) { last = chunk; continue; } // this chunk is large enough to satisfy the request SERIAL_PRINT(("HEAP-%ld: found free chunk to hold %ld bytes\n", find_thread(NULL), size)); void *address = (void *)chunk; if (chunk->size > (size_t)size + sizeof(free_chunk)) { // divide this chunk into smaller bits size_t newSize = chunk->size - size; free_chunk *next = chunk->next; chunk = (free_chunk *)((addr_t)chunk + size); chunk->next = next; chunk->size = newSize; if (last != NULL) { last->next = next; insert_chunk(chunk); } else sFreeChunks = chunk; } else { chunk = chunk->next; if (last != NULL) last->next = chunk; else sFreeChunks = chunk; } hoardUnlock(sHeapLock); return address; } // There was no chunk, let's see if the area is large enough size_t oldHeapSize = sFreeHeapSize; sFreeHeapSize += size; // round to next heap increment aligned size size_t incrementAlignedSize = (sFreeHeapSize + kHeapIncrement - 1) & ~(kHeapIncrement - 1); if (incrementAlignedSize <= sHeapAreaSize) { SERIAL_PRINT(("HEAP-%ld: heap area large enough for %ld\n", find_thread(NULL), size)); // the area is large enough already hoardUnlock(sHeapLock); return (void *)(sFreeHeapBase + oldHeapSize); } // We need to grow the area SERIAL_PRINT(("HEAP-%ld: need to resize heap area to %ld (%ld requested)\n", find_thread(NULL), incrementAlignedSize, size)); status_t status = resize_area(sHeapArea, incrementAlignedSize); if (status != B_OK) { // Either the system is out of memory or another area is in the way and // prevents ours from being resized. As a special case of the latter // the user might have mmap()ed something over malloc()ed memory. This // splits the heap area in two, the first one retaining the original // area ID. In either case, if there's still memory, it is a good idea // to try and allocate a new area. sFreeHeapSize = oldHeapSize; if (status == B_NO_MEMORY) { hoardUnlock(sHeapLock); return NULL; } size_t newHeapSize = (size + kHeapIncrement - 1) / kHeapIncrement * kHeapIncrement; // First try at the location directly after the current heap area, if // that is still in the reserved memory region. void* base = (void*)(sFreeHeapBase + sHeapAreaSize); area_id area = -1; if (sHeapBase != NULL && base >= sHeapBase && (addr_t)base + newHeapSize <= (addr_t)sHeapBase + kHeapReservationSize) { area = create_area("heap", &base, B_EXACT_ADDRESS, newHeapSize, B_NO_LOCK, protection); if (area == B_NO_MEMORY) { hoardUnlock(sHeapLock); return NULL; } } // If we don't have an area yet, try again with a free location // allocation. if (area < 0) { base = (void*)(sFreeHeapBase + sHeapAreaSize); area = create_area("heap", &base, B_RANDOMIZED_BASE_ADDRESS, newHeapSize, B_NO_LOCK, protection); } if (area < 0) { hoardUnlock(sHeapLock); return NULL; } // We have a new area, so make it the new heap area. sHeapArea = area; sFreeHeapBase = (addr_t)base; sHeapAreaSize = newHeapSize; sFreeHeapSize = size; oldHeapSize = 0; } else sHeapAreaSize = incrementAlignedSize; hoardUnlock(sHeapLock); return (void *)(sFreeHeapBase + oldHeapSize); }
bool on_chunk(audio_chunk * chunk, abort_callback & p_abort) { bool first_init = false; bool re_init = false; // This block can be used to determine when a new track is started //metadb_handle::ptr curTrack; //if (get_cur_file(curTrack) && curTrack != m_lastTrack) //{ // m_lastTrack = curTrack; //} if (chunk->get_channels() != m_channels) { if (m_channels == 0) { first_init = true; } else { re_init = true; } m_channels = chunk->get_channels(); } if (chunk->get_srate() != m_srate) { if (m_srate == 0) { first_init = true; } else { re_init = true; } m_srate = chunk->get_srate(); } if (first_init || re_init) { // Free memory before re-initializing for settings change if (re_init) { console::print("Reinitializing filter."); delete m_filter; delete m_equalizer; } // Instantiate equalizer m_equalizer = new equalizer(FILTER_LEN, EQ_FILTER_BLOCKS, REALSIZE, m_channels, m_srate); std::vector<struct impulse_info> impulse_info; struct impulse_info info; // Load equalizer if (cfg_eq_enable.get_value() != 0) { prefs_eq::get_mag(m_mag); info.filename = m_equalizer->generate(ISO_BANDS_SIZE, (double *) iso_bands, m_mag, m_phase); info.scale = prefs_eq::get_scale(); impulse_info.push_back(info); } std::wstring filename; // Load DRC impulse response files filename = util::str2wstr(cfg_file1_filename.get_ptr()); if ((cfg_file1_enable.get_value() != 0) && !filename.empty()) { if (!buffer::check_snd_file(filename.c_str(), m_channels, m_srate)) { if (cfg_file1_resample.get_value() != 0) { filename = buffer::resample_snd_file(filename.c_str(), m_channels, m_srate); } else { filename.clear(); } } if (!filename.empty()) { info.filename = filename; info.scale = prefs_file::get_file1_scale(); impulse_info.push_back(info); } } filename = util::str2wstr(cfg_file2_filename.get_ptr()); if ((cfg_file2_enable.get_value() != 0) && !filename.empty()) { if (!buffer::check_snd_file(filename.c_str(), m_channels, m_srate)) { if (cfg_file2_resample.get_value() != 0) { filename = buffer::resample_snd_file(filename.c_str(), m_channels, m_srate); } else { filename.clear(); } } if (!filename.empty()) { info.filename = filename; info.scale = prefs_file::get_file2_scale(); impulse_info.push_back(info); } } filename = util::str2wstr(cfg_file3_filename.get_ptr()); if ((cfg_file3_enable.get_value() != 0) && !filename.empty()) { if (!buffer::check_snd_file(filename.c_str(), m_channels, m_srate)) { if (cfg_file3_resample.get_value() != 0) { filename = buffer::resample_snd_file(filename.c_str(), m_channels, m_srate); } else { filename.clear(); } } if (!filename.empty()) { info.filename = filename; info.scale = prefs_file::get_file3_scale(); impulse_info.push_back(info); } } filename.clear(); double scale; if (impulse_info.size() == 1) { filename = impulse_info.front().filename; scale = impulse_info.front().scale; } else if (impulse_info.size() > 1) { // Preconvolve impulse files into a single file filename = preprocessor::convolve_impulses(impulse_info, FILTER_LEN, REALSIZE); scale = 1.0; } if (!filename.empty()) { int n_channels, n_frames, sampling_rate; // Get impulse file parameters if (buffer::get_snd_file_params(filename.c_str(), &n_channels, &n_frames, &sampling_rate)) { // calculate filter blocks int length = util::get_next_multiple(n_frames, FILTER_LEN); int filter_blocks = length / FILTER_LEN; // Instantiate filter m_filter = new brutefir(FILTER_LEN, filter_blocks, REALSIZE, m_channels, BF_SAMPLE_FORMAT_FLOAT_LE, BF_SAMPLE_FORMAT_FLOAT_LE, m_srate, false); // Assign filter coefficients m_filter->set_coeff(filename.c_str(), filter_blocks, scale); // Reallocate input and output buffers m_bufsize = FILTER_LEN * m_channels * sizeof(audio_sample); m_inbuf = (audio_sample *) _aligned_realloc(m_inbuf, m_bufsize, ALIGNMENT); m_outbuf = (audio_sample *) _aligned_realloc(m_outbuf, m_bufsize, ALIGNMENT); console::printf("Filter length: %u samples, %u blocks.", FILTER_LEN, filter_blocks); console::printf("Format: %u channels, %u Hz.", m_channels, m_srate); } } } // Check if initialization completed successfully if (m_filter != NULL) { if (m_filter->is_initialized()) { t_size sample_count = chunk->get_sample_count(); const audio_sample *src = chunk->get_data(); audio_sample *dst; while (sample_count) { unsigned int todo = FILTER_LEN - m_buffer_count; if (todo > sample_count) { todo = sample_count; } dst = m_inbuf + m_buffer_count * m_channels; for (unsigned int i = 0, j = todo * m_channels; i < j; i++) { *dst++ = *src++; } sample_count -= todo; m_buffer_count += todo; if (m_buffer_count == FILTER_LEN) { if (m_filter->run(m_inbuf, m_outbuf) == 0) { audio_chunk *chk = insert_chunk(m_buffer_count * m_channels); chk->set_data_32((float *)m_outbuf, m_buffer_count, m_channels, m_srate); if (cfg_overflow_enable.get_value() != 0) { m_filter->check_overflows(); } } else { console::print("Filter processing error."); } m_buffer_count = 0; } } } } else { // If initialization error or no coefficients enabled, just // pass audio chunk through return true; } // Return false since we replace the original audio chunk // with the processed ones return false; }