static status_t low_resource_manager(void*) { bigtime_t timeout = kLowResourceInterval; while (true) { int32 state = low_resource_state_no_update(B_ALL_KERNEL_RESOURCES); if (state != B_LOW_RESOURCE_CRITICAL) { acquire_sem_etc(sLowResourceWaitSem, 1, B_RELATIVE_TIMEOUT, timeout); } RecursiveLocker _(&sLowResourceLock); compute_state(); state = low_resource_state_no_update(B_ALL_KERNEL_RESOURCES); TRACE(("low_resource_manager: state = %ld, %ld free pages, %lld free " "memory, %lu free semaphores\n", state, vm_page_num_free_pages(), vm_available_not_needed_memory(), sem_max_sems() - sem_used_sems())); if (state < B_LOW_RESOURCE_NOTE) continue; call_handlers(sLowResources); if (state == B_LOW_RESOURCE_WARNING) timeout = kWarnResourceInterval; else timeout = kLowResourceInterval; sLowResourceWaiterCondition.NotifyAll(); } return 0; }
static int dump_info(int argc, char **argv) { kprintf("kernel build: %s %s (gcc%d %s)\n", __DATE__, __TIME__, __GNUC__, __VERSION__); kprintf("revision: %s\n\n", get_haiku_revision()); kprintf("cpu count: %" B_PRId32 "\n", smp_get_num_cpus()); for (int32 i = 0; i < smp_get_num_cpus(); i++) kprintf(" [%" B_PRId32 "] active time: %10" B_PRId64 ", interrupt" " time: %10" B_PRId64 ", irq time: %10" B_PRId64 "\n", i + 1, gCPU[i].active_time, gCPU[i].interrupt_time, gCPU[i].irq_time); // ToDo: Add page_faults kprintf("pages:\t\t%" B_PRIuPHYSADDR " (%" B_PRIuPHYSADDR " max)\n", vm_page_num_pages() - vm_page_num_free_pages(), vm_page_num_pages()); kprintf("sems:\t\t%" B_PRId32 " (%" B_PRId32 " max)\n", sem_used_sems(), sem_max_sems()); kprintf("ports:\t\t%" B_PRId32 " (%" B_PRId32 " max)\n", port_used_ports(), port_max_ports()); kprintf("threads:\t%" B_PRId32 " (%" B_PRId32 " max)\n", thread_used_threads(), thread_max_threads()); kprintf("teams:\t\t%" B_PRId32 " (%" B_PRId32 " max)\n", team_used_teams(), team_max_teams()); return 0; }
static void cd_set_capacity(cd_driver_info* info, uint64 capacity, uint32 blockSize) { TRACE("cd_set_capacity(info = %p, capacity = %Ld, blockSize = %ld)\n", info, capacity, blockSize); // get log2, if possible uint32 blockShift = log2(blockSize); if ((1UL << blockShift) != blockSize) blockShift = 0; if (info->block_size != blockSize) { if (capacity == 0) { // there is obviously no medium in the drive, don't try to update // the DMA resource return; } if (info->block_size != 0) { dprintf("old %ld, new %ld\n", info->block_size, blockSize); panic("updating DMAResource not yet implemented..."); } // TODO: we need to replace the DMAResource in our IOScheduler status_t status = info->dma_resource->Init(info->node, blockSize, 1024, 32); if (status != B_OK) panic("initializing DMAResource failed: %s", strerror(status)); // Allocate the I/O scheduler. If there seems to be sufficient memory // we use an IOCache, since that adds caching at the lowest I/O layer // and thus dramatically reduces I/O operations and seeks. The // disadvantage is that it increases free memory (physical pages) // fragmentation, which makes large contiguous allocations more likely // to fail. size_t freeMemory = vm_page_num_free_pages(); if (freeMemory > 180 * 1024 * 1024 / B_PAGE_SIZE) { info->io_scheduler = new(std::nothrow) IOCache(info->dma_resource, 1024 * 1024); } else { dprintf("scsi_cd: Using IOSchedulerSimple instead of IOCache to " "avoid memory allocation issues.\n"); info->io_scheduler = new(std::nothrow) IOSchedulerSimple( info->dma_resource); } if (info->io_scheduler == NULL) panic("allocating IOScheduler failed."); // TODO: use whole device name here status = info->io_scheduler->Init("scsi"); if (status != B_OK) panic("initializing IOScheduler failed: %s", strerror(status)); info->io_scheduler->SetCallback(do_io, info); info->block_size = blockSize; } if (info->original_capacity != capacity && info->io_scheduler != NULL) { info->original_capacity = capacity; // For CDs, it's obviously relatively normal that they report a larger // capacity than it can actually address. Therefore we'll manually // correct the value here. test_capacity(info); info->io_scheduler->SetDeviceCapacity(info->capacity * blockSize); } }
static void compute_state(void) { sLastMeasurement = system_time(); sLowResources = B_ALL_KERNEL_RESOURCES; // free pages state uint32 freePages = vm_page_num_free_pages(); if (freePages < kCriticalPagesLimit) { sLowPagesState = B_LOW_RESOURCE_CRITICAL; } else if (freePages < kWarnPagesLimit) { sLowPagesState = B_LOW_RESOURCE_WARNING; } else if (freePages < kNotePagesLimit) { sLowPagesState = B_LOW_RESOURCE_NOTE; } else { sLowPagesState = B_NO_LOW_RESOURCE; sLowResources &= ~B_KERNEL_RESOURCE_PAGES; } // free memory state off_t freeMemory = vm_available_not_needed_memory(); if (freeMemory < sCriticalMemoryLimit) { sLowMemoryState = B_LOW_RESOURCE_CRITICAL; } else if (freeMemory < sWarnMemoryLimit) { sLowMemoryState = B_LOW_RESOURCE_WARNING; } else if (freeMemory < sNoteMemoryLimit) { sLowMemoryState = B_LOW_RESOURCE_NOTE; } else { sLowMemoryState = B_NO_LOW_RESOURCE; sLowResources &= ~B_KERNEL_RESOURCE_MEMORY; } // free semaphores state uint32 maxSems = sem_max_sems(); uint32 freeSems = maxSems - sem_used_sems(); if (freeSems < maxSems >> 16) { sLowSemaphoresState = B_LOW_RESOURCE_CRITICAL; } else if (freeSems < maxSems >> 8) { sLowSemaphoresState = B_LOW_RESOURCE_WARNING; } else if (freeSems < maxSems >> 4) { sLowSemaphoresState = B_LOW_RESOURCE_NOTE; } else { sLowSemaphoresState = B_NO_LOW_RESOURCE; sLowResources &= ~B_KERNEL_RESOURCE_SEMAPHORES; } // free kernel address space state // TODO: this should take fragmentation into account size_t maxSpace = KERNEL_SIZE; size_t freeSpace = vm_kernel_address_space_left(); if (freeSpace < maxSpace >> 16) sLowSpaceState = B_LOW_RESOURCE_CRITICAL; if (freeSpace < maxSpace >> 8) sLowSpaceState = B_LOW_RESOURCE_WARNING; if (freeSpace < maxSpace >> 4) sLowSpaceState = B_LOW_RESOURCE_NOTE; else { sLowSpaceState = B_NO_LOW_RESOURCE; sLowResources &= ~B_KERNEL_RESOURCE_ADDRESS_SPACE; } }