status_t mem_write(void* cookie, off_t position, const void* buffer, size_t* numBytes) { void *virtualAddress; area_id area; status_t status = B_OK; /* check permissions */ if (getuid() != 0 && geteuid() != 0) { *numBytes = 0; return EPERM; } area = mem_map_target(position, *numBytes, B_WRITE_AREA, &virtualAddress); if (area < 0) { *numBytes = 0; return area; } if (user_memcpy(virtualAddress, buffer, *numBytes) != B_OK) status = B_BAD_ADDRESS; delete_area(area); return status; }
static void unmap_device(device_info *di) { shared_info *si = di->si; uint32 tmpUlong; pci_info *pcii = &(di->pcii); /* disable memory mapped IO */ tmpUlong = get_pci(PCI_command, 4); tmpUlong &= 0xfffffffc; set_pci(PCI_command, 4, tmpUlong); /* delete the areas */ if (si->regs_area >= 0) delete_area(si->regs_area); if (si->fb_area >= 0) delete_area(si->fb_area); si->regs_area = si->fb_area = -1; si->framebuffer = NULL; di->regs = NULL; }
BTimeSource::~BTimeSource() { CALLED(); if (fArea > 0) delete_area(fArea); delete fSlaveNodes; }
static void scsi_free_dma_buffer(dma_buffer *buffer) { if (buffer->area > 0) { SHOW_FLOW0(1, "Destroying buffer"); delete_area(buffer->area); buffer->area = 0; buffer->size = 0; } if (buffer->sg_list_area > 0) { delete_area(buffer->sg_list_area); buffer->sg_list_area = 0; } }
static status_t buffer_force_stop(device_t* device) { dprintf("null_audio: %s\n" , __func__ ); if (device && device->running) null_stop_hardware(device); delete_area(device->playback_stream.buffer_area); delete_area(device->record_stream.buffer_area); delete_sem(device->playback_stream.buffer_ready_sem); delete_sem(device->record_stream.buffer_ready_sem); return B_OK; }
/*static*/ status_t TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area, TracingMetaData*& _metaData) { // search meta data in memory (from previous session) TracingMetaData* metaData; addr_t metaDataAddress = kMetaDataBaseAddress; for (; metaDataAddress <= kMetaDataBaseEndAddress; metaDataAddress += kMetaDataAddressIncrement) { area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata", (void**)&metaData, B_ANY_KERNEL_ADDRESS, B_PAGE_SIZE, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, metaDataAddress, CREATE_AREA_DONT_CLEAR); if (area < 0) continue; if (!findPrevious) { _area = area; _metaData = metaData; return B_OK; } if (metaData->fMagic1 == kMetaDataMagic1 && metaData->fMagic2 == kMetaDataMagic2 && metaData->fMagic3 == kMetaDataMagic3) { _area = area; _metaData = metaData; return B_OK; } delete_area(area); } return B_ENTRY_NOT_FOUND; }
status_t _user_system_profiler_recorded(struct system_profiler_parameters* userParameters) { if (userParameters == NULL || !IS_USER_ADDRESS(userParameters)) return B_BAD_ADDRESS; if (sRecordedParameters == NULL) return B_ERROR; // Transfer the area to the userland process void* address; area_id newArea = transfer_area(sRecordedParameters->buffer_area, &address, B_ANY_ADDRESS, team_get_current_team_id(), true); if (newArea < 0) return newArea; status_t status = set_area_protection(newArea, B_READ_AREA); if (status == B_OK) { sRecordedParameters->buffer_area = newArea; status = user_memcpy(userParameters, sRecordedParameters, sizeof(system_profiler_parameters)); } if (status != B_OK) delete_area(newArea); delete sRecordedParameters; sRecordedParameters = NULL; return status; }
void BDirectWindow::_DisposeData() { // wait until the connection terminates: we can't destroy // the object until the client receives the B_DIRECT_STOP // notification, or bad things will happen while (fConnectionEnable) snooze(50000); _LockDirect(); if (fInitStatus & DW_STATUS_THREAD_STARTED) { fDaemonKiller = true; // delete this sem, otherwise the Direct daemon thread // will wait forever on it delete_sem(fDisableSem); status_t retVal; wait_for_thread(fDirectDaemonId, &retVal); } #if DW_NEEDS_LOCKING if (fInitStatus & DW_STATUS_SEM_CREATED) delete_sem(fDirectSem); #endif if (fInitStatus & DW_STATUS_AREA_CLONED) delete_area(fClonedClippingArea); }
void time_faults() { const int FAULT_PAGES = 100; char *addr; int area = create_area("fault area", (void**) &addr, 0, FAULT_PAGES * PAGE_SIZE, AREA_NOT_WIRED, USER_READ | USER_WRITE); bigtime_t start = system_time(); char *c = addr; for (int i = 0; i < FAULT_PAGES; i++) { *c = 0; c += PAGE_SIZE; } bigtime_t time1 = system_time() - start; start = system_time(); c = addr; for (int i = 0; i < FAULT_PAGES; i++) { *c = 0; c += PAGE_SIZE; } bigtime_t time2 = system_time() - start; printf("\n%d pages. fault time %Ldus (%Ldus per fault) no fault time %Ldus (%Ldus per fault)\n", FAULT_PAGES, time1, time1 / FAULT_PAGES, time2, time2 / FAULT_PAGES); delete_area(area); }
static status_t InitCommon(int fileDesc) { // Initialization function used by primary and cloned accelerants. gInfo.deviceFileDesc = fileDesc; // Get area ID of shared data from driver. area_id sharedArea; status_t result = ioctl(gInfo.deviceFileDesc, TDFX_GET_SHARED_DATA, &sharedArea, sizeof(sharedArea)); if (result != B_OK) return result; gInfo.sharedInfoArea = clone_area("3DFX shared info", (void**)&(gInfo.sharedInfo), B_ANY_ADDRESS, B_READ_AREA | B_WRITE_AREA, sharedArea); if (gInfo.sharedInfoArea < 0) return gInfo.sharedInfoArea; // sharedInfoArea has error code gInfo.regsArea = clone_area("3DFX regs area", (void**)&(gInfo.regs), B_ANY_ADDRESS, B_READ_AREA | B_WRITE_AREA, gInfo.sharedInfo->regsArea); if (gInfo.regsArea < 0) { delete_area(gInfo.sharedInfoArea); return gInfo.regsArea; // regsArea has error code } return B_OK; }
static status_t VBoxGuestHaikuDetach(void) { struct VBoxGuestDeviceState *pState = &sState; if (cUsers > 0) return EBUSY; /* * Reverse what we did in VBoxGuestHaikuAttach. */ VBoxGuestHaikuRemoveIRQ(pState); if (pState->iVMMDevMemAreaId) delete_area(pState->iVMMDevMemAreaId); VBoxGuestDeleteDevExt(&g_DevExt); #ifdef DO_LOG RTLogDestroy(RTLogRelSetDefaultInstance(NULL)); RTLogSetDefaultInstance(NULL); // RTLogDestroy(RTLogSetDefaultInstance(NULL)); #endif RTSpinlockDestroy(g_Spinlock); g_Spinlock = NIL_RTSPINLOCK; RTR0Term(); return B_OK; }
void UNINIT_ACCELERANT(void) { if (accelerantIsClone) { LOG(4,("UNINIT_ACCELERANT: shutting down clone accelerant.\n")); } else { LOG(4,("UNINIT_ACCELERANT: shutting down primary accelerant.\n")); /* delete benaphores ONLY if we are the primary accelerant */ DELETE_BEN(si->engine.lock); DELETE_BEN(si->overlay.lock); /* ensure that INIT_ACCELERANT can be executed again */ si->accelerant_in_use = false; } /* free our mode list area */ delete_area(my_mode_list_area); /* paranoia */ my_mode_list = 0; /* release our cloned data */ uninit_common(); /* close the file handle ONLY if we're the clone */ if (accelerantIsClone) close(fd); }
area_id alloc_mem(void **virt, void **phy, size_t size, uint32 protection, const char *name) { // TODO: phy should be phys_addr_t*! physical_entry pe; void * virtadr; area_id areaid; status_t rv; TRACE("allocating %ld bytes for %s\n", size, name); size = ROUNDUP(size, B_PAGE_SIZE); areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size, B_32_BIT_CONTIGUOUS, protection); // TODO: The rest of the code doesn't deal correctly with physical // addresses > 4 GB, so we have to force 32 bit addresses here. if (areaid < B_OK) { TRACE("couldn't allocate area %s\n", name); return B_ERROR; } rv = get_memory_map(virtadr, size, &pe, 1); if (rv < B_OK) { delete_area(areaid); TRACE("couldn't map %s\n", name); return B_ERROR; } if (virt) *virt = virtadr; if (phy) *phy = (void*)(addr_t)pe.address; TRACE("area = %ld, size = %ld, virt = %p, phy = %p\n", areaid, size, virtadr, pe.address); return areaid; }
~RequestRelocator() { if (!fSuccess) { for (int32 i = 0; i < *fAreaCount; i++) delete_area(fAreas[i]); } }
area_id alloc_mem(void **phy, void **log, size_t size, const char *name) { physical_entry pe; void * logadr; area_id areaid; status_t rv; LOG(("allocating %d bytes for %s\n",size,name)); size = round_to_pagesize(size); areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS, size, B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA); if (areaid < B_OK) { PRINT(("couldn't allocate area %s\n",name)); return B_ERROR; } rv = get_memory_map(logadr, size, &pe, 1); if (rv < B_OK) { delete_area(areaid); PRINT(("couldn't map %s\n", name)); return B_ERROR; } memset(logadr, 0, size); if (log) *log = logadr; if (phy) *phy = pe.address; LOG(("area = %d, size = %d, log = %#08X, phy = %#08X\n", areaid, size, logadr, pe.address)); return areaid; }
APR_DECLARE(apr_status_t) apr_shm_destroy(apr_shm_t *m) { delete_area(m->aid); m->avail = 0; m->memblock = NULL; return APR_SUCCESS; }
/*! Clean up data common to both primary and cloned accelerant */ static void uninit_common(void) { intel_free_memory(gInfo->context_base); delete_area(gInfo->regs_area); delete_area(gInfo->shared_info_area); gInfo->regs_area = gInfo->shared_info_area = -1; // close the file handle ONLY if we're the clone if (gInfo->is_clone) close(gInfo->device); free(gInfo); }
status_t BMediaFiles::RewindTypes() { CALLED(); _ClearTypes(); server_get_media_types_request request; request.team = BPrivate::current_team(); server_get_media_types_reply reply; status_t status = QueryServer(SERVER_GET_MEDIA_FILE_TYPES, &request, sizeof(request), &reply, sizeof(reply)); if (status != B_OK) { ERROR("BMediaFiles::RewindTypes: failed to rewind types: %s\n", strerror(status)); return status; } const char* types = (const char*)reply.address; for (int32 i = 0; i < reply.count; i++) fTypes.AddItem(new BString(types + i * B_MEDIA_NAME_LENGTH)); delete_area(reply.area); fTypeIndex = 0; return B_OK; }
static void FreeShared() { delete_area(gPd->sharedArea); gPd->sharedArea = -1; gPd->si = NULL; }
status_t BMediaFiles::RewindRefs(const char* type) { CALLED(); _ClearItems(); TRACE("BMediaFiles::RewindRefs: sending SERVER_GET_MEDIA_FILE_ITEMS for " "type %s\n", type); server_get_media_items_request request; request.team = BPrivate::current_team(); strncpy(request.type, type, B_MEDIA_NAME_LENGTH); server_get_media_items_reply reply; status_t status = QueryServer(SERVER_GET_MEDIA_FILE_ITEMS, &request, sizeof(request), &reply, sizeof(reply)); if (status != B_OK) { ERROR("BMediaFiles::RewindRefs: failed to rewind refs: %s\n", strerror(status)); return status; } const char* items = (const char*)reply.address; for (int32 i = 0; i < reply.count; i++) { fItems.AddItem(new BString(items + i * B_MEDIA_NAME_LENGTH, B_MEDIA_NAME_LENGTH)); } delete_area(reply.area); fCurrentType = type; fItemIndex = 0; return B_OK; }
void ATIRadeon::UnmapROM( int nFd, PCI_Info_s *dev ) { if (!rinfo.bios_seg) return; rinfo.bios_seg = NULL; m_pROMBase = NULL; if(m_hROMArea >= 0) { /* This is the workaround for the old crash issue - just remapping it to the end of VRAM works -MK */ if( remap_area (m_hROMArea, (void *)((rinfo.fb_base_phys + rinfo.video_ram))) < 0 ) { dbprintf("Radeon :: failed to unmap ROM area (%d)\n", m_hROMArea); return; } delete_area(m_hROMArea); } /* This will disable and set address to unassigned */ pci_gfx_write_config( nFd, dev->nBus, dev->nDevice, dev->nFunction, PCI_ROM_BASE, 4, 0 ); }
static apr_status_t mmap_cleanup(void *themmap) { apr_mmap_t *mm = themmap; apr_mmap_t *next = APR_RING_NEXT(mm,link); int rv = 0; /* we no longer refer to the mmaped region */ APR_RING_REMOVE(mm,link); APR_RING_NEXT(mm,link) = NULL; APR_RING_PREV(mm,link) = NULL; if (next != mm) { /* more references exist, so we're done */ return APR_SUCCESS; } #ifdef BEOS rv = delete_area(mm->area); #else rv = munmap(mm->mm, mm->size); #endif mm->mm = (void *)-1; if (rv == 0) { return APR_SUCCESS; } return errno; }
area_id alloc_contiguous(void **virt, void **phy, size_t size, uint32 protection, const char *name) { physical_entry pe; void * virtadr; area_id areaid; status_t rv; TRACE("allocating %ld bytes for %s\n", size, name); size = round_to_pagesize(size); areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size, B_CONTIGUOUS, protection); if (areaid < B_OK) { ERROR("couldn't allocate area %s\n", name); return B_ERROR; } rv = get_memory_map(virtadr, size, &pe, 1); if (rv < B_OK) { delete_area(areaid); ERROR("couldn't get mapping for %s\n", name); return B_ERROR; } memset(virtadr, 0, size); if (virt) *virt = virtadr; if (phy) *phy = pe.address; TRACE("area = %ld, size = %ld, virt = %p, phy = %p\n", areaid, size, virtadr, pe.address); return areaid; }
void page_aligned_allocator::free(char* const block) { #ifdef TORRENT_DEBUG_BUFFERS int page = page_size(); // make the two surrounding pages non-readable and -writable mprotect(block - page, page, PROT_READ | PROT_WRITE); alloc_header* h = (alloc_header*)(block - page); int num_pages = (h->size + (page-1)) / page + 2; TORRENT_ASSERT(h->magic == 0x1337); mprotect(block + (num_pages-2) * page, page, PROT_READ | PROT_WRITE); // fprintf(stderr, "free: %p head: %p tail: %p size: %d\n", block, block - page, block + h->size, int(h->size)); h->magic = 0; #if defined __linux__ || (defined __APPLE__ && MAC_OS_X_VERSION_MIN_REQUIRED >= 1050) print_backtrace(h->stack, sizeof(h->stack)); #endif ::free(block - page); return; #endif #ifdef TORRENT_WINDOWS _aligned_free(block); #elif defined TORRENT_BEOS area_id id = area_for(block); if (id < B_OK) return; delete_area(id); #else ::free(block); #endif }
area_id Stack::AllocateArea(void **logicalAddress, void **physicalAddress, size_t size, const char *name) { TRACE("allocating %ld bytes for %s\n", size, name); void *logAddress; size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1); area_id area = create_area(name, &logAddress, B_ANY_KERNEL_ADDRESS, size, B_CONTIGUOUS, 0); if (area < B_OK) { TRACE_ERROR("couldn't allocate area %s\n", name); return B_ERROR; } physical_entry physicalEntry; status_t result = get_memory_map(logAddress, size, &physicalEntry, 1); if (result < B_OK) { delete_area(area); TRACE_ERROR("couldn't map area %s\n", name); return B_ERROR; } memset(logAddress, 0, size); if (logicalAddress) *logicalAddress = logAddress; if (physicalAddress) *physicalAddress = physicalEntry.address; TRACE("area = %ld, size = %ld, log = %p, phy = %p\n", area, size, logAddress, physicalEntry.address); return area; }
~ParameterDeleter() { if (!fDetached) { delete_area(fArea); delete sRecordedParameters; sRecordedParameters = NULL; } }
static int vm_bang(void *str) { for (int i = 0; i < 10; i++) { uint *addr; int area = create_area("original area", (void**) &addr, 0, 0x2000, AREA_NOT_WIRED, USER_READ | USER_WRITE); if (area < 0) { _serial_print("error creating original area\n"); return 0; } unsigned var = rand(); *addr = var; uint *clone_addr; int clone = clone_area("clone area", (void**) &clone_addr, 0, USER_WRITE | USER_READ, area); if (clone < 0) { _serial_print("error creating clone area\n"); return 0; } if (*clone_addr != var) { _serial_print("clone failed to copy pages\n"); return 0; } addr += 1024; clone_addr += 1024; *clone_addr = var; if (*addr != var) { _serial_print("page failed failed to be propigated\n"); return 0; } for (int i = 0; i < 10; i++) resize_area(area, (i % 4) * PAGE_SIZE + PAGE_SIZE); delete_area(area); delete_area(clone); } printf("%s", (char*) str); atomic_add(&thread_count, -1); return 0; }
static void nor_uninit_device(void *_cookie) { TRACE("uninit_device\n"); nor_driver_info *info = (nor_driver_info*)_cookie; if (info) delete_area(info->id); }
VirtioQueue::~VirtioQueue() { delete_area(fArea); for (uint16 i = 0; i < fRingSize; i++) { delete fDescriptors[i]; } delete[] fDescriptors; }
status_t DMAResource::CreateBounceBuffer(DMABounceBuffer** _buffer) { void* bounceBuffer = NULL; phys_addr_t physicalBase = 0; area_id area = -1; phys_size_t size = ROUNDUP(fBounceBufferSize, B_PAGE_SIZE); virtual_address_restrictions virtualRestrictions = {}; virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; physical_address_restrictions physicalRestrictions = {}; physicalRestrictions.low_address = fRestrictions.low_address; physicalRestrictions.high_address = fRestrictions.high_address; physicalRestrictions.alignment = fRestrictions.alignment; physicalRestrictions.boundary = fRestrictions.boundary; area = create_area_etc(B_SYSTEM_TEAM, "dma buffer", size, B_CONTIGUOUS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, 0, &virtualRestrictions, &physicalRestrictions, &bounceBuffer); if (area < B_OK) return area; physical_entry entry; if (get_memory_map(bounceBuffer, size, &entry, 1) != B_OK) { panic("get_memory_map() failed."); delete_area(area); return B_ERROR; } physicalBase = entry.address; ASSERT(fRestrictions.high_address >= physicalBase + size); DMABounceBuffer* buffer = new(std::nothrow) DMABounceBuffer; if (buffer == NULL) { delete_area(area); return B_NO_MEMORY; } buffer->address = bounceBuffer; buffer->physical_address = physicalBase; buffer->size = size; *_buffer = buffer; return B_OK; }