void AreaLink::Attach(const void *data, size_t size) { if (!data || size == 0) return; Lock(); // Will the attachment fit? if (fHeader->GetAttachmentSize() + size > fHeader->GetInfo().size) { // Being it won't fit, resize the area to fit the thing int32 pageNum = int32(fHeader->GetInfo().size / B_PAGE_SIZE); resize_area(fTarget, pageNum + 1); } // Our attachment will fit, so copy the data into the current location // and increment the appropriate fHeader values int8 *currentpointer = (int8*)BaseAddress(); currentpointer += fHeader->GetAttachmentSize(); memcpy(currentpointer, data, size); fAttachList->AddItem(currentpointer); fHeader->AddAttachment(size); Unlock(); }
static int expand_heap( CacheHeap_s *psHeap ) { const uint32 nBlockSize = sizeof( CacheBlock_s ) + psHeap->ch_nBlockSize; uint32 nNewSize; uint32 nOldCount; uint32 nNewBlocks; CacheBlock_s *psBlock; int i; int nError; if ( psHeap->ch_nSize + PAGE_SIZE * 32 > 1024 * 1024 * 128 ) { return( -ENOMEM ); } while ( psHeap->ch_bBusy ) { UNLOCK( g_sBlockCache.bc_hLock ); snooze( 10000 ); LOCK( g_sBlockCache.bc_hLock ); } nNewSize = psHeap->ch_nSize + PAGE_SIZE * 32; nOldCount = psHeap->ch_nSize / nBlockSize; nNewBlocks = nNewSize / nBlockSize - nOldCount; psBlock = ( CacheBlock_s * )( ( ( uint8 * )psHeap->ch_pAddress ) + nBlockSize * nOldCount ); psHeap->ch_bBusy = true; UNLOCK( g_sBlockCache.bc_hLock ); nError = resize_area( psHeap->ch_hAreaID, nNewSize, false ); LOCK( g_sBlockCache.bc_hLock ); psHeap->ch_bBusy = false; if ( nError < 0 ) { printk( "expand_heap() failed to resize area from %u to %u.4 bytes\n", psHeap->ch_nSize, nNewSize ); return ( -ENOMEM ); } atomic_add( &g_sSysBase.ex_nBlockCacheSize, nNewSize - psHeap->ch_nSize ); psHeap->ch_nSize = nNewSize; for ( i = 0; i < nNewBlocks; ++i ) { psBlock->cb_psNext = psHeap->ch_psFirstFreeBlock; psHeap->ch_psFirstFreeBlock = psBlock; psBlock->cb_nFlags = psHeap->ch_nBlockSize; psBlock = ( CacheBlock_s * )( ( ( uint8 * )psBlock ) + nBlockSize ); } return ( 0 ); }
void SHMSink::render(const std::vector<unsigned char> &data) { shm_lock(); if (!resize_area(sizeof(SHMHeader) + data.size())) return; memcpy(shm_area_->data, data.data(), data.size()); shm_area_->buffer_size = data.size(); shm_area_->buffer_gen++; sem_post(&shm_area_->notification); shm_unlock(); }
void *sbrk(int diff) { char *retval; if (diff == 0) return heap_base + heap_size; if (resize_area(heap_area, heap_size + diff) < 0) return 0; retval = heap_base + heap_size; heap_size += diff; return retval; }
void SHMSink::render_callback(VideoProvider &provider, size_t bytes) { shm_lock(); if (!resize_area(sizeof(SHMHeader) + bytes)) { ERROR("Could not resize area"); return; } provider.fillBuffer(static_cast<void*>(shm_area_->data)); shm_area_->buffer_size = bytes; shm_area_->buffer_gen++; sem_post(&shm_area_->notification); shm_unlock(); }
static int vm_bang(void *str) { for (int i = 0; i < 10; i++) { uint *addr; int area = create_area("original area", (void**) &addr, 0, 0x2000, AREA_NOT_WIRED, USER_READ | USER_WRITE); if (area < 0) { _serial_print("error creating original area\n"); return 0; } unsigned var = rand(); *addr = var; uint *clone_addr; int clone = clone_area("clone area", (void**) &clone_addr, 0, USER_WRITE | USER_READ, area); if (clone < 0) { _serial_print("error creating clone area\n"); return 0; } if (*clone_addr != var) { _serial_print("clone failed to copy pages\n"); return 0; } addr += 1024; clone_addr += 1024; *clone_addr = var; if (*addr != var) { _serial_print("page failed failed to be propigated\n"); return 0; } for (int i = 0; i < 10; i++) resize_area(area, (i % 4) * PAGE_SIZE + PAGE_SIZE); delete_area(area); delete_area(clone); } printf("%s", (char*) str); atomic_add(&thread_count, -1); return 0; }
void SHMSrc::render(char *dest, size_t len) { shm_lock(); while (buffer_gen_ == shm_area_->buffer_gen) { shm_unlock(); std::cerr << "Waiting for next buffer" << std::endl;; sem_wait(&shm_area_->notification); shm_lock(); } if (!resize_area()) return; std::cerr << "Reading from buffer!" << std::endl; memcpy(dest, shm_area_->data, len); buffer_gen_ = shm_area_->buffer_gen; shm_unlock(); }
void SHMSink::render_frame(VideoFrame& src) { VideoFrame dst; VideoScaler scaler; const int width = src.getWidth(); const int height = src.getHeight(); const int format = VIDEO_PIXFMT_BGRA; size_t bytes = dst.getSize(width, height, format); shm_lock(); if (!resize_area(sizeof(SHMHeader) + bytes)) { ERROR("Could not resize area"); return; } dst.setDestination(shm_area_->data, width, height, format); scaler.scale(src, dst); #ifdef DEBUG_FPS const std::chrono::time_point<std::chrono::system_clock> currentTime = std::chrono::system_clock::now(); const std::chrono::duration<double> seconds = currentTime - lastFrameDebug_; frameCount_++; if (seconds.count() > 1) { DEBUG("%s: FPS %f", shm_name_.c_str(), frameCount_ / seconds.count()); frameCount_ = 0; lastFrameDebug_ = currentTime; } #endif shm_area_->buffer_size = bytes; shm_area_->buffer_gen++; sem_post(&shm_area_->notification); shm_unlock(); }
void * hoardSbrk(long size) { assert(size > 0); CTRACE(("sbrk: size = %ld\n", size)); // align size request size = (size + hoardHeap::ALIGNMENT - 1) & ~(hoardHeap::ALIGNMENT - 1); // choose correct protection flags uint32 protection = B_READ_AREA | B_WRITE_AREA; if (__gABIVersion < B_HAIKU_ABI_GCC_2_HAIKU) protection |= B_EXECUTE_AREA; hoardLock(sHeapLock); // find chunk in free list free_chunk *chunk = sFreeChunks, *last = NULL; for (; chunk != NULL; chunk = chunk->next) { CTRACE((" chunk %p (%ld)\n", chunk, chunk->size)); if (chunk->size < (size_t)size) { last = chunk; continue; } // this chunk is large enough to satisfy the request SERIAL_PRINT(("HEAP-%ld: found free chunk to hold %ld bytes\n", find_thread(NULL), size)); void *address = (void *)chunk; if (chunk->size > (size_t)size + sizeof(free_chunk)) { // divide this chunk into smaller bits size_t newSize = chunk->size - size; free_chunk *next = chunk->next; chunk = (free_chunk *)((addr_t)chunk + size); chunk->next = next; chunk->size = newSize; if (last != NULL) { last->next = next; insert_chunk(chunk); } else sFreeChunks = chunk; } else { chunk = chunk->next; if (last != NULL) last->next = chunk; else sFreeChunks = chunk; } hoardUnlock(sHeapLock); return address; } // There was no chunk, let's see if the area is large enough size_t oldHeapSize = sFreeHeapSize; sFreeHeapSize += size; // round to next heap increment aligned size size_t incrementAlignedSize = (sFreeHeapSize + kHeapIncrement - 1) & ~(kHeapIncrement - 1); if (incrementAlignedSize <= sHeapAreaSize) { SERIAL_PRINT(("HEAP-%ld: heap area large enough for %ld\n", find_thread(NULL), size)); // the area is large enough already hoardUnlock(sHeapLock); return (void *)(sFreeHeapBase + oldHeapSize); } // We need to grow the area SERIAL_PRINT(("HEAP-%ld: need to resize heap area to %ld (%ld requested)\n", find_thread(NULL), incrementAlignedSize, size)); status_t status = resize_area(sHeapArea, incrementAlignedSize); if (status != B_OK) { // Either the system is out of memory or another area is in the way and // prevents ours from being resized. As a special case of the latter // the user might have mmap()ed something over malloc()ed memory. This // splits the heap area in two, the first one retaining the original // area ID. In either case, if there's still memory, it is a good idea // to try and allocate a new area. sFreeHeapSize = oldHeapSize; if (status == B_NO_MEMORY) { hoardUnlock(sHeapLock); return NULL; } size_t newHeapSize = (size + kHeapIncrement - 1) / kHeapIncrement * kHeapIncrement; // First try at the location directly after the current heap area, if // that is still in the reserved memory region. void* base = (void*)(sFreeHeapBase + sHeapAreaSize); area_id area = -1; if (sHeapBase != NULL && base >= sHeapBase && (addr_t)base + newHeapSize <= (addr_t)sHeapBase + kHeapReservationSize) { area = create_area("heap", &base, B_EXACT_ADDRESS, newHeapSize, B_NO_LOCK, protection); if (area == B_NO_MEMORY) { hoardUnlock(sHeapLock); return NULL; } } // If we don't have an area yet, try again with a free location // allocation. if (area < 0) { base = (void*)(sFreeHeapBase + sHeapAreaSize); area = create_area("heap", &base, B_RANDOMIZED_BASE_ADDRESS, newHeapSize, B_NO_LOCK, protection); } if (area < 0) { hoardUnlock(sHeapLock); return NULL; } // We have a new area, so make it the new heap area. sHeapArea = area; sFreeHeapBase = (addr_t)base; sHeapAreaSize = newHeapSize; sFreeHeapSize = size; oldHeapSize = 0; } else sHeapAreaSize = incrementAlignedSize; hoardUnlock(sHeapLock); return (void *)(sFreeHeapBase + oldHeapSize); }
struct block* ClientMemoryAllocator::_AllocateChunk(size_t size, bool& newArea) { // round up to multiple of page size size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1); // At first, try to resize our existing areas chunk_iterator iterator = fChunks.GetIterator(); struct chunk* chunk; while ((chunk = iterator.Next()) != NULL) { status_t status = resize_area(chunk->area, chunk->size + size); if (status == B_OK) { newArea = false; break; } } // TODO: resize and relocate while holding the write lock struct block* block; uint8* address; if (chunk == NULL) { // TODO: temporary measurement as long as resizing areas doesn't // work the way we need (with relocating the area, if needed) if (size < B_PAGE_SIZE * 32) size = B_PAGE_SIZE * 32; // create new area for this allocation chunk = (struct chunk*)malloc(sizeof(struct chunk)); if (chunk == NULL) return NULL; block = (struct block*)malloc(sizeof(struct block)); if (block == NULL) { free(chunk); return NULL; } char name[B_OS_NAME_LENGTH]; #ifdef HAIKU_TARGET_PLATFORM_LIBBE_TEST strcpy(name, "client heap"); #else snprintf(name, sizeof(name), "heap:%ld:%s", fApplication->ClientTeam(), fApplication->SignatureLeaf()); #endif area_id area = create_area(name, (void**)&address, B_ANY_ADDRESS, size, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA); if (area < B_OK) { free(block); free(chunk); return NULL; } // add chunk to list chunk->area = area; chunk->base = address; chunk->size = size; fChunks.Add(chunk); newArea = true; } else { // create new free block for this chunk block = (struct block *)malloc(sizeof(struct block)); if (block == NULL) return NULL; address = chunk->base + chunk->size; chunk->size += size; } // add block to free list block->chunk = chunk; block->base = address; block->size = size; fFreeBlocks.Add(block); return block; }
status_t iw_find_low_memory(interwave_dev * iw) { size_t low_size = (MIN_MEMORY_SIZE+(B_PAGE_SIZE-1))&~(B_PAGE_SIZE-1); size_t allocate_size; physical_entry where; uint32 boundary; size_t trysize; area_id curarea; void * addr; char name[DEVNAME]; if (low_size < MIN_MEMORY_SIZE) { low_size = MIN_MEMORY_SIZE; } if (low_size > 65536) { iwprintf("too much low memory requested !"); low_size = 65536; } allocate_size = 2*low_size; sprintf(name, "%s_low", iw->name); curarea = find_area(name); if (curarea >= 0) { /* area there from previous run */ area_info ainfo; iwprintf("testing likely candidate..."); if (get_area_info(curarea, &ainfo)) { iwprintf("no info"); goto allocate; } /* test area we found */ trysize = ainfo.size; addr = ainfo.address; if (trysize < allocate_size) { iwprintf("too small (%x)", trysize); goto allocate; } if (get_memory_map(addr, trysize, &where, 1) < B_OK) { iwprintf("no memory map"); goto allocate; } if ((uint32)where.address & 0xff000000) { iwprintf("bad physical address"); goto allocate; } if (ainfo.lock < B_FULL_LOCK || where.size < allocate_size) { iwprintf("lock not contiguous"); goto allocate; } goto a_o_k; } allocate: if (curarea >= 0) { delete_area(curarea); /* area didn't work */ curarea = -1; } iwprintf("allocating new low area"); trysize = allocate_size; curarea = create_area(name, &addr, B_ANY_KERNEL_ADDRESS, trysize, B_LOMEM, B_READ_AREA | B_WRITE_AREA); iwprintf("create_area(%d) returned area %x at logical 0x%08x", trysize, curarea, addr); if (curarea < 0) { goto oops; } if (get_memory_map(addr, allocate_size, &where, 1) < 0) { delete_area(curarea); curarea = B_ERROR; goto oops; } if ((uint32)where.address & 0xff000000) { // does not start in low memory delete_area(curarea); curarea = B_ERROR; goto oops; } if (((uint32)where.address+allocate_size) & 0xff000000) { // does not end in low memory delete_area(curarea); curarea = B_ERROR; goto oops; } oops: if (curarea < 0) { dprintf("interwave: failed to create low_mem area\n"); return curarea; } a_o_k: iwprintf("successfully found or created low area!"); iwprintf("physical 0x%08x-0x%08x logical 0x%08x size %d", where.address, where.address+trysize-1, addr, trysize); iw->low_size = low_size; iw->low_area = curarea; // The resulting double-sized area probably crosses a 64K boundary. // Let's change the start address so that the final, normal-sized one does not. // The first boundary possibly crossed boundary = ((uint32)where.address & 0xffff0000) + 0x00010000; // The good chunk (low_size bytes not crossing a 64K boundary) may be // either below or above the first boundary. if((boundary-(uint32)where.address) >= low_size) { // it's below, nothing to change iw->low_mem = (uchar *)addr; iw->low_phys = (vuchar *)where.address; iwprintf("current size is %d bytes",trysize); iwprintf("keeping %d bytes",low_size); if(trysize>low_size) resize_area(curarea,low_size); } else { // it's above - bump up start address uint32 delta = boundary - (uint32)where.address; iw->low_mem = (uchar *)addr + delta; iw->low_phys = (vuchar *)boundary; // Unfortunately, what's below the boundary (delta bytes) is wasted. // We can't truncate an area's bottom. iwprintf("current size is %d bytes",trysize); iwprintf("keeping %d bytes, waste=%d",low_size+delta,delta); if(trysize>low_size+delta) resize_area(curarea,low_size+delta); } iwprintf("using physical 0x%08x-0x%08x logical 0x%08x size %d", iw->low_phys, iw->low_phys+iw->low_size-1, iw->low_mem, iw->low_size); return B_OK; }