void recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags) { mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags); RECURSIVE_LOCK_HOLDER(lock) = -1; lock->recursion = 0; }
void recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags) { lock->holder = -1; lock->recursion = 0; mutex_init_etc(&lock->lock, name, flags); }
void mtx_init(struct mtx *mutex, const char *name, const char *type, int options) { if ((options & MTX_RECURSE) != 0) { recursive_lock_init_etc(&mutex->u.recursive, name, MUTEX_FLAG_CLONE_NAME); } else { mutex_init_etc(&mutex->u.mutex.lock, name, MUTEX_FLAG_CLONE_NAME); mutex->u.mutex.owner = -1; } mutex->type = options; }
void mtx_init(struct mtx *mutex, const char *name, const char *type, int options) { if (options == MTX_DEF) { mutex_init_etc(&mutex->u.mutex, name, MUTEX_FLAG_CLONE_NAME); } else if (options == MTX_RECURSE) { recursive_lock_init_etc(&mutex->u.recursive, name, MUTEX_FLAG_CLONE_NAME); } else panic("Uh-oh, someone is pressing the wrong buttons"); mutex->type = options; }
void mtx_init(struct mtx *mutex, const char *name, const char *type, int options) { if ((options & MTX_RECURSE) != 0) { recursive_lock_init_etc(&mutex->u.recursive, name, MUTEX_FLAG_CLONE_NAME); mutex->type = MTX_RECURSE; } else if ((options & MTX_SPIN) != 0) { B_INITIALIZE_SPINLOCK(&mutex->u.spinlock.lock); mutex->type = MTX_SPIN; } else { mutex_init_etc(&mutex->u.mutex.lock, name, MUTEX_FLAG_CLONE_NAME); mutex->u.mutex.owner = -1; mutex->type = MTX_DEF; } }
PhysicalMemoryAllocator::PhysicalMemoryAllocator(const char *name, size_t minSize, size_t maxSize, uint32 minCountPerBlock) : fOverhead(0), fStatus(B_NO_INIT) { fName = strdup(name); mutex_init_etc(&fLock, fName, MUTEX_FLAG_CLONE_NAME); fArrayCount = 1; size_t biggestSize = minSize; while (biggestSize < maxSize) { fArrayCount++; biggestSize *= 2; } size_t size = fArrayCount * sizeof(uint8 *); fArray = (uint8 **)malloc(size); fOverhead += size; size = fArrayCount * sizeof(size_t); fBlockSize = (size_t *)malloc(size); fArrayLength = (size_t *)malloc(size); fArrayOffset = (size_t *)malloc(size); fOverhead += size * 3; size_t arraySlots = biggestSize / minSize; for (int32 i = 0; i < fArrayCount; i++) { size = arraySlots * minCountPerBlock * sizeof(uint8); fArrayLength[i] = arraySlots * minCountPerBlock; fBlockSize[i] = biggestSize / arraySlots; fArrayOffset[i] = fArrayLength[i] - 1; fArray[i] = (uint8 *)malloc(size); memset(fArray[i], 0, fArrayLength[i]); fOverhead += size; arraySlots /= 2; } fManagedMemory = fBlockSize[0] * fArrayLength[0]; size_t roundedSize = biggestSize * minCountPerBlock; #ifdef HAIKU_TARGET_PLATFORM_HAIKU fDebugBase = roundedSize; fDebugChunkSize = 64; fDebugUseMap = 0; roundedSize += sizeof(fDebugUseMap) * 8 * fDebugChunkSize; #endif roundedSize = (roundedSize + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1); fArea = create_area(fName, &fLogicalBase, B_ANY_KERNEL_ADDRESS, roundedSize, B_32_BIT_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA); // TODO: Use B_CONTIGUOUS when the TODOs regarding 64 bit physical // addresses are fixed (if possible). if (fArea < B_OK) { TRACE_ERROR(("PMA: failed to create memory area\n")); return; } physical_entry physicalEntry; if (get_memory_map(fLogicalBase, roundedSize, &physicalEntry, 1) < B_OK) { TRACE_ERROR(("PMA: failed to get memory map\n")); return; } fPhysicalBase = physicalEntry.address; fStatus = B_OK; }
void hoardLockInit(hoardLockType &lock, const char *name) { mutex_init_etc(&lock, name, MUTEX_FLAG_ADAPTIVE); }