bool AppleLVMGroup::resizeSet(UInt32 newMemberCount) { UInt32 oldMemberCount = arMemberCount; UInt64 * oldBlockCounts = arMemberBlockCounts; arMemberBlockCounts = IONew(UInt64, newMemberCount); bzero(arMemberBlockCounts, sizeof(UInt64) * newMemberCount); if (oldBlockCounts) { bcopy(oldBlockCounts, arMemberBlockCounts, sizeof(UInt64) * oldMemberCount); IODelete(oldBlockCounts, sizeof(UInt64), oldMemberCount); } UInt64 * oldStartingOffset = arMemberStartingOffset; arMemberStartingOffset = IONew(UInt64, newMemberCount); bzero(arMemberStartingOffset, sizeof(UInt64) * newMemberCount); if (oldStartingOffset) { bcopy(oldStartingOffset, arMemberStartingOffset, sizeof(UInt64) * oldMemberCount); IODelete(oldStartingOffset, sizeof(UInt64), oldMemberCount); } AppleLVMVolume ** oldMetaDataVolumes = arMetaDataVolumes; arMetaDataVolumes = IONew(AppleLVMVolume *, newMemberCount); bzero(arMetaDataVolumes, sizeof(AppleLVMVolume *) * newMemberCount); if (oldMetaDataVolumes) { bcopy(oldMetaDataVolumes, arMetaDataVolumes, sizeof(AppleLVMVolume *) * oldMemberCount); IODelete(oldMetaDataVolumes, sizeof(AppleLVMVolume *), oldMemberCount); } if (super::resizeSet(newMemberCount) == false) return false; if (oldMemberCount && arMemberCount > oldMemberCount) arExpectingLiveAdd += arMemberCount - oldMemberCount; return true; }
void AppleLVMGroup::free(void) { if (arMemberBlockCounts) IODelete(arMemberBlockCounts, UInt64, arMemberCount); if (arMemberStartingOffset) IODelete(arMemberStartingOffset, UInt64, arMemberCount); if (arPrimaryBuffer) arPrimaryBuffer->release(); UInt32 i; if (arLogicalVolumes) { for (i = 0; i < arLogicalVolumeCount; i++) { if (arLogicalVolumes[i]) { arController->removeLogicalVolume(arLogicalVolumes[i]); arLogicalVolumes[i]->release(); arLogicalVolumes[i] = NULL; } } IODelete(arLogicalVolumes, AppleLVMVolume *, 1024); // XXXTOC } if (arMetaDataVolumes) { for (i = 0; i < arMemberCount; i++) { if (arMetaDataVolumes[i]) arMetaDataVolumes[i]->release(); } IODelete(arMetaDataVolumes, AppleLVMVolume *, arMemberCount); } super::free(); }
/* * free: * * Free resources */ void IOBufferMemoryDescriptor::free() { // Cache all of the relevant information on the stack for use // after we call super::free()! IOOptionBits flags = _flags; IOOptionBits internalFlags = _internalFlags; IOOptionBits options = _options; vm_size_t size = _capacity; void * buffer = _buffer; IOMemoryMap * map = 0; IOAddressRange * range = _ranges.v64; vm_offset_t alignment = _alignment; if (alignment >= page_size) size = round_page(size); if (reserved) { map = reserved->map; IODelete( reserved, ExpansionData, 1 ); if (map) map->release(); } /* super::free may unwire - deallocate buffer afterwards */ super::free(); if (options & kIOMemoryPageable) { #if IOALLOCDEBUG debug_iomallocpageable_size -= round_page(size); #endif } else if (buffer) { if (kInternalFlagPageSized & internalFlags) size = round_page(size); if (kInternalFlagPhysical & internalFlags) { IOKernelFreePhysical((mach_vm_address_t) buffer, size); } else if (kInternalFlagPageAllocated & internalFlags) { iopa_free((uintptr_t) buffer, size); } else if (alignment > 1) { IOFreeAligned(buffer, size); } else { IOFree(buffer, size); } } if (range && (kIOMemoryAsReference & flags)) IODelete(range, IOAddressRange, 1); }
void AppleRAIDMirrorSet::free(void) { if (arRebuildThreadCall) thread_call_free(arRebuildThreadCall); arRebuildThreadCall = 0; if (arSetCompleteThreadCall) thread_call_free(arSetCompleteThreadCall); arSetCompleteThreadCall = 0; if (arLastSeek) IODelete(arLastSeek, UInt64, arLastAllocCount); if (arSkippedIOCount) IODelete(arSkippedIOCount, UInt64, arLastAllocCount); assert(queue_empty(&arFailedRequestQueue)); super::free(); }
void IORangeAllocator::free() { if( elements) IODelete( elements, IORangeAllocatorElement, capacity ); super::free(); }
void IORecursiveLockFree( IORecursiveLock * _lock ) { _IORecursiveLock * lock = (_IORecursiveLock *)_lock; mutex_free( lock->mutex ); IODelete( lock, _IORecursiveLock, 1); }
void IOAccelerationUserClient::stop( IOService * provider ) { IOAccelIDRecord * record; IOLockLock(gLock); while (!queue_empty( &fTaskList )) { queue_remove_first( &fTaskList, record, IOAccelIDRecord *, task_link ); if (--record->retain) record->task_link.next = 0; else { queue_remove(&gGlobalList, record, IOAccelIDRecord *, glob_link); gTotalCount--; IODelete(record, IOAccelIDRecord, 1); } } IOLockUnlock(gLock); super::stop( provider ); }
void net_habitue_device_SC101::handleAsyncIOTimeout(outstanding *out, void *ctx) { outstanding_io *io = (outstanding_io *)ctx; IOStorageCompletion completion = io->completion; bool isWrite = (io->buffer->getDirection() == kIODirectionOut); io->attempt++; io->timeout_ms = getNextTimeoutMS(io->attempt, isWrite); if (io->timeout_ms) { if (io->attempt > 3) KINFO("retry IO (%p, %d, %d)", io, io->attempt, io->timeout_ms); else KDEBUG("retry IO (%p, %d, %d)", io, io->attempt, io->timeout_ms); // IOBlockStorageDriver::incrementRetries(isWrite) doSubmitIO(io); return; } KINFO("abort IO %p", io); // IOBlockStorageDriver::incrementErrors(isWrite) completeIO(io); io->addr->release(); IODelete(io, outstanding_io, 1); IOStorage::complete(completion, kIOReturnNotResponding, 0); }
IOReturn IOPolledFileClose(IOPolledFileIOVars ** pVars, off_t write_offset, void * addr, size_t write_length, off_t discard_offset, off_t discard_end) { IOPolledFileIOVars * vars; vars = *pVars; if (!vars) return(kIOReturnSuccess); if (vars->fileRef) { kern_close_file_for_direct_io(vars->fileRef, write_offset, addr, write_length, discard_offset, discard_end); vars->fileRef = NULL; } if (vars->fileExtents) { vars->fileExtents->release(); vars->fileExtents = 0; } if (vars->pollers) { vars->pollers->release(); vars->pollers = 0; } if (vars->allocated) IODelete(vars, IOPolledFileIOVars, 1); else bzero(vars, sizeof(IOPolledFileIOVars)); *pVars = NULL; return (kIOReturnSuccess); }
void net_habitue_device_SC101::handleResolveTimeout(outstanding *out, void *ctx) { KINFO("resolve timed out, no such ID '%s'?", getID()->getCStringNoCopy()); IODelete(out, outstanding, 1); // TODO(iwade) detach if never successfully resolved. }
// Free is called twice: // First when the atomic retainCount transitions from 1 -> 0 // Secondly when the work loop itself is commiting hari kari // Hence the each leg of the free must be single threaded. void IOWorkLoop::free() { if (workThread) { IOInterruptState is; // If we are here then we must be trying to shut down this work loop // in this case disable all of the event source, mark the loop // as terminating and wakeup the work thread itself and return // Note: we hold the gate across the entire operation mainly for the // benefit of our event sources so we can disable them cleanly. closeGate(); disableAllEventSources(); is = IOSimpleLockLockDisableInterrupt(workToDoLock); SETP(&fFlags, kLoopTerminate); thread_wakeup_one((void *) &workToDo); IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); openGate(); } else /* !workThread */ { IOEventSource *event, *next; for (event = eventChain; event; event = next) { next = event->getNext(); event->setWorkLoop(0); event->setNext(0); event->release(); } eventChain = 0; // Either we have a partial initialization to clean up // or the workThread itself is performing hari-kari. // Either way clean up all of our resources and return. if (controlG) { controlG->release(); controlG = 0; } if (workToDoLock) { IOSimpleLockFree(workToDoLock); workToDoLock = 0; } if (gateLock) { IORecursiveLockFree(gateLock); gateLock = 0; } if (reserved) { IODelete(reserved, ExpansionData, 1); reserved = 0; } super::free(); } }
void IOEventSource::free( void ) { IOStatisticsUnregisterCounter(); if (reserved) IODelete(reserved, ExpansionData, 1); super::free(); }
bool AppleRAIDMirrorSet::resizeSet(UInt32 newMemberCount) { UInt32 oldMemberCount = arMemberCount; // if downsizing, just hold on to the extra space if (arLastAllocCount < newMemberCount) { if (arLastSeek) IODelete(arLastSeek, UInt64, arLastAllocCount); arLastSeek = IONew(UInt64, newMemberCount); if (!arLastSeek) return false; if (arSkippedIOCount) IODelete(arSkippedIOCount, UInt64, arLastAllocCount); arSkippedIOCount = IONew(UInt64, newMemberCount); if (!arSkippedIOCount) return false; } bzero(arLastSeek, sizeof(UInt64) * newMemberCount); bzero(arSkippedIOCount, sizeof(UInt64) * newMemberCount); if (super::resizeSet(newMemberCount) == false) return false; if (oldMemberCount && arMemberCount > oldMemberCount) arExpectingLiveAdd += arMemberCount - oldMemberCount; return true; }
void IOHIDInterface::free() { OSSafeReleaseNULL(_transportString); OSSafeReleaseNULL(_elementArray); OSSafeReleaseNULL(_manufacturerString); OSSafeReleaseNULL(_productString); OSSafeReleaseNULL(_serialNumberString); if ( _reserved ) { IODelete( _reserved, ExpansionData, 1 ); } super::free(); }
void deblockCompletion(void *target, void *parameter, IOReturn status, UInt64 actualByteCount) { deblock_state *state = (deblock_state *)parameter; deblock_master_state *master = state->master; state->buffer->release(); IODelete(state, deblock_state, 1); master->pending--; master->actualByteCount += actualByteCount; if (status != kIOReturnSuccess) master->status = status; if (!master->pending) { if (master->status != kIOReturnSuccess) KINFO("deblock FAILED"); IOStorage::complete(master->completion, master->status, master->actualByteCount); master->addr->release(); IODelete(master, deblock_master_state, 1); } }
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - void IOHIKeyboardMapper::free() { if (!_parsedMapping.mapping || !_parsedMapping.mappingLen) return; if (_reserved) { IODelete(_reserved, ExpansionData, 1); } if (_mappingShouldBeFreed) IOFree((void *)_parsedMapping.mapping, _parsedMapping.mappingLen); super::free(); }
void IOEthernetInterface::free() { if ( _requiredFilters ) { _requiredFilters->release(); _requiredFilters = 0; } if ( _activeFilters ) { _activeFilters->release(); _activeFilters = 0; } if ( _supportedFilters ) { _supportedFilters->release(); _supportedFilters = 0; } if ( _inputEventThreadCall ) { thread_call_free( _inputEventThreadCall ); _inputEventThreadCall = 0; } if ( _reserved ) { if (kIOPMUndefinedDriverAssertionID != _wompEnabledAssertionID) { getPMRootDomain()->releasePMAssertion(_wompEnabledAssertionID); _wompEnabledAssertionID = kIOPMUndefinedDriverAssertionID; } if (_disabledWakeFilters) { _disabledWakeFilters->release(); _disabledWakeFilters = 0; } IODelete( _reserved, ExpansionData, 1 ); _reserved = 0; } super::free(); }
IORecursiveLock * IORecursiveLockAlloc( void ) { _IORecursiveLock * lock; lock = IONew( _IORecursiveLock, 1); if( !lock) return( 0 ); lock->mutex = mutex_alloc(ETAP_IO_AHA); if( lock->mutex) { lock->thread = 0; lock->count = 0; } else { IODelete( lock, _IORecursiveLock, 1); lock = 0; } return( (IORecursiveLock *) lock ); }
void net_habitue_device_SC101::handleResolvePacket(sockaddr_in *addr, mbuf_t m, size_t len, outstanding *out, void *ctx) { clock_get_uptime(&_lastReply); if (mbuf_len(m) < out->len && mbuf_pullup(&m, out->len) != 0) { KINFO("pullup failed"); return; } KDEBUG("resolve succeeded!"); psan_resolve_response_t *res = (psan_resolve_response_t *)mbuf_data(m); sockaddr_in part; bzero(&part, sizeof(part)); part.sin_len = sizeof(part); part.sin_family = AF_INET; part.sin_port = htons(PSAN_PORT); part.sin_addr = res->ip4; OSData *partData = OSData::withBytes(&part, sizeof(part)); if (partData) { setProperty(gSC101DevicePartitionAddressKey, partData); partData->release(); } OSData *rootData = OSData::withBytes(addr, sizeof(*addr)); if (rootData) { setProperty(gSC101DeviceRootAddressKey, rootData); rootData->release(); } IODelete(out, outstanding, 1); mbuf_freem(m); if (!getProperty(gSC101DeviceSizeKey)) disk(); }
void RadeonController::free( void ) { ScrnInfoPtr pScrn = xf86Screens[0]; if (pScrn) { IODelete(pScrn, ScrnInfoRec, 1); xf86Screens[0] = NULL; } #ifdef DEBUG DumpMsg.client--; if (DumpMsg.client == 0) { DumpMsg.mMsgBufferEnabled = false; if (DumpMsg.mMsgBuffer) { IOFree(DumpMsg.mMsgBuffer, DumpMsg.mMsgBufferSize); DumpMsg.mMsgBuffer = NULL; } if (DumpMsg.mMessageLock) { IOLockLock(DumpMsg.mMessageLock); IOLockFree(DumpMsg.mMessageLock); DumpMsg.mMessageLock = NULL; } getRegistryRoot()->removeProperty("RadeonDumpReady"); } #endif int i; for (i = 0;i < 2;i++) { if (options.EDID_Block[i]) { IOFree(options.EDID_Block[i], options.EDID_Length[i]); options.EDID_Block[i] = NULL; } } if (memoryMap.BIOSCopy) { IOFree(memoryMap.BIOSCopy, memoryMap.BIOSLength); memoryMap.BIOSCopy = NULL; } if (IOMap) IOMap->release(); if (FBMap) FBMap->release(); super::free(); }
/* * free: * * Free resources */ void IOBufferMemoryDescriptor::free() { // Cache all of the relevant information on the stack for use // after we call super::free()! IOOptionBits options = _options; vm_size_t size = _capacity; void * buffer = _buffer; vm_map_t map = 0; vm_offset_t alignment = _alignment; if (reserved) { map = reserved->map; IODelete( reserved, ExpansionData, 1 ); } /* super::free may unwire - deallocate buffer afterwards */ super::free(); if (buffer) { if (options & kIOMemoryPageable) { if (map) vm_deallocate(map, (vm_address_t) buffer, round_page_32(size)); else IOFreePageable(buffer, size); } else { if (options & kIOMemoryPhysicallyContiguous) IOFreeContiguous(buffer, size); else if (alignment > 1) IOFreeAligned(buffer, size); else IOFree(buffer, size); } } if (map) vm_map_deallocate(map); }
void net_habitue_device_SC101::handleAsyncIOPacket(sockaddr_in *addr, mbuf_t m, size_t len, outstanding *out, void *ctx) { clock_get_uptime(&_lastReply); outstanding_io *io = (outstanding_io *)ctx; bool isWrite = (io->buffer->getDirection() == kIODirectionOut); UInt32 ioLen = (io->nblks * SECTOR_SIZE); IOStorageCompletion completion = io->completion; IOReturn status = kIOReturnError; IOByteCount wrote = ioLen; if (isWrite) { //KDEBUG("%p write %d %d", io, io->block, io->nblks); status = kIOReturnSuccess; } else { //KDEBUG("%p read %d %d", io, io->block, io->nblks); if (mbuf_buffer(io->buffer, 0, m, sizeof(psan_get_response_t), ioLen)) status = kIOReturnSuccess; else KINFO("mbuf_buffer failed"); } if (status != kIOReturnSuccess) KINFO("%p FAILED", io); completeIO(io); io->addr->release(); IODelete(io, outstanding_io, 1); mbuf_freem(m); IOStorage::complete(completion, status, wrote); }
// allocate element at index bool IORangeAllocator::allocElement( UInt32 index ) { UInt32 newCapacity; IORangeAllocatorElement * newElements; if( ((numElements == capacity) && capacityIncrement) || (!elements)) { newCapacity = capacity + capacityIncrement; newElements = IONew( IORangeAllocatorElement, newCapacity ); if( !newElements) return( false ); if( elements) { bcopy( elements, newElements, index * sizeof( IORangeAllocatorElement)); bcopy( elements + index, newElements + index + 1, (numElements - index) * sizeof( IORangeAllocatorElement)); IODelete( elements, IORangeAllocatorElement, capacity ); } elements = newElements; capacity = newCapacity; } else { bcopy( elements + index, elements + index + 1, (numElements - index) * sizeof( IORangeAllocatorElement)); } numElements++; return( true ); }