Exemplo n.º 1
0
status_t
int_init_post_vm(kernel_args *args)
{
	int i;

	/* initialize the vector list */
	for (i = 0; i < NUM_IO_VECTORS; i++) {
		B_INITIALIZE_SPINLOCK(&sVectors[i].vector_lock);
		sVectors[i].enable_count = 0;
		sVectors[i].no_lock_vector = false;
#if DEBUG_INTERRUPTS
		sVectors[i].handled_count = 0;
		sVectors[i].unhandled_count = 0;
		sVectors[i].trigger_count = 0;
		sVectors[i].ignored_count = 0;
#endif
		sVectors[i].handler_list = NULL;
	}

#if DEBUG_INTERRUPTS
	add_debugger_command("ints", &dump_int_statistics,
		"list interrupt statistics");
#endif

	return arch_int_init_post_vm(args);
}
Exemplo n.º 2
0
status_t
object_depot_init(object_depot* depot, size_t capacity, size_t maxCount,
	uint32 flags, void* cookie, void (*return_object)(object_depot* depot,
		void* cookie, void* object, uint32 flags))
{
	depot->full = NULL;
	depot->empty = NULL;
	depot->full_count = depot->empty_count = 0;
	depot->max_count = maxCount;
	depot->magazine_capacity = capacity;

	rw_lock_init(&depot->outer_lock, "object depot");
	B_INITIALIZE_SPINLOCK(&depot->inner_lock);

	int cpuCount = smp_get_num_cpus();
	depot->stores = (depot_cpu_store*)slab_internal_alloc(
		sizeof(depot_cpu_store) * cpuCount, flags);
	if (depot->stores == NULL) {
		rw_lock_destroy(&depot->outer_lock);
		return B_NO_MEMORY;
	}

	for (int i = 0; i < cpuCount; i++) {
		depot->stores[i].loaded = NULL;
		depot->stores[i].previous = NULL;
	}

	depot->cookie = cookie;
	depot->return_object = return_object;

	return B_OK;
}
Exemplo n.º 3
0
	ReadRequest(file_cookie* cookie)
		:
		fThread(thread_get_current_thread()),
		fCookie(cookie),
		fNotified(true)
	{
		B_INITIALIZE_SPINLOCK(&fLock);
	}
Exemplo n.º 4
0
DPCQueue::DPCQueue()
	:
	fThreadID(-1),
	fCallbackInProgress(NULL),
	fCallbackDoneCondition(NULL)
{
	B_INITIALIZE_SPINLOCK(&fLock);

	fPendingCallbacksCondition.Init(this, "dpc queue");
}
Exemplo n.º 5
0
status_t
int_init_post_vm(kernel_args* args)
{
    int i;

    /* initialize the vector list */
    for (i = 0; i < NUM_IO_VECTORS; i++) {
        B_INITIALIZE_SPINLOCK(&sVectors[i].vector_lock);
        sVectors[i].enable_count = 0;
        sVectors[i].no_lock_vector = false;
        sVectors[i].type = INTERRUPT_TYPE_UNKNOWN;

        B_INITIALIZE_SPINLOCK(&sVectors[i].load_lock);
        sVectors[i].last_measure_time = 0;
        sVectors[i].last_measure_active = 0;
        sVectors[i].load = 0;

#if DEBUG_INTERRUPTS
        sVectors[i].handled_count = 0;
        sVectors[i].unhandled_count = 0;
        sVectors[i].trigger_count = 0;
        sVectors[i].ignored_count = 0;
#endif
        sVectors[i].handler_list = NULL;

        sVectorCPUAssignments[i].irq = i;
        sVectorCPUAssignments[i].count = 1;
        sVectorCPUAssignments[i].handlers_count = 0;
        sVectorCPUAssignments[i].load = 0;
        sVectorCPUAssignments[i].cpu = -1;
    }

#if DEBUG_INTERRUPTS
    add_debugger_command("ints", &dump_int_statistics,
                         "list interrupt statistics");
#endif

    add_debugger_command("int_load", &dump_int_load,
                         "list interrupt usage statistics");

    return arch_int_init_post_vm(args);
}
Exemplo n.º 6
0
SystemProfiler::SystemProfiler(team_id team, const area_info& userAreaInfo,
	const system_profiler_parameters& parameters)
	:
	fTeam(team),
	fUserArea(userAreaInfo.area),
	fKernelArea(-1),
	fAreaSize(userAreaInfo.size),
	fFlags(parameters.flags),
	fStackDepth(parameters.stack_depth),
	fInterval(parameters.interval),
	fHeader(NULL),
	fBufferBase(NULL),
	fBufferCapacity(0),
	fBufferStart(0),
	fBufferSize(0),
	fDroppedEvents(0),
	fLastTeamAddedSerialNumber(0),
	fLastThreadAddedSerialNumber(0),
	fTeamNotificationsRequested(false),
	fTeamNotificationsEnabled(false),
	fThreadNotificationsRequested(false),
	fThreadNotificationsEnabled(false),
	fImageNotificationsRequested(false),
	fImageNotificationsEnabled(false),
	fIONotificationsRequested(false),
	fIONotificationsEnabled(false),
	fSchedulerNotificationsRequested(false),
	fWaitObjectNotificationsRequested(false),
	fWaitingProfilerThread(NULL),
	fWaitObjectBuffer(NULL),
	fWaitObjectCount(0),
	fUsedWaitObjects(),
	fFreeWaitObjects(),
	fWaitObjectTable()
{
	B_INITIALIZE_SPINLOCK(&fLock);

	memset(fReentered, 0, sizeof(fReentered));

	// compute the number wait objects we want to cache
	if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
		fWaitObjectCount = parameters.locking_lookup_size
			/ (sizeof(WaitObject) + (sizeof(void*) * 3 / 2));
		if (fWaitObjectCount < MIN_WAIT_OBJECT_COUNT)
			fWaitObjectCount = MIN_WAIT_OBJECT_COUNT;
		if (fWaitObjectCount > MAX_WAIT_OBJECT_COUNT)
			fWaitObjectCount = MAX_WAIT_OBJECT_COUNT;
	}
}
Exemplo n.º 7
0
void
rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
{
	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
	lock->waiters = NULL;
	B_INITIALIZE_SPINLOCK(&lock->lock);
	lock->holder = -1;
	lock->count = 0;
	lock->owner_count = 0;
	lock->active_readers = 0;
	lock->pending_readers = 0;
	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;

	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
}
Exemplo n.º 8
0
void
rw_lock_init(rw_lock* lock, const char* name)
{
	lock->name = name;
	lock->waiters = NULL;
	B_INITIALIZE_SPINLOCK(&lock->lock);
	lock->holder = -1;
	lock->count = 0;
	lock->owner_count = 0;
	lock->active_readers = 0;
	lock->pending_readers = 0;
	lock->flags = 0;

	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
}
Exemplo n.º 9
0
void
mutex_init_etc(mutex* lock, const char *name, uint32 flags)
{
	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
	lock->waiters = NULL;
	B_INITIALIZE_SPINLOCK(&lock->lock);
#if KDEBUG
	lock->holder = -1;
#else
	lock->count = 0;
	lock->ignore_unlock_count = 0;
#endif
	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;

	T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
	NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
}
Exemplo n.º 10
0
void
mutex_init(mutex* lock, const char *name)
{
	lock->name = name;
	lock->waiters = NULL;
	B_INITIALIZE_SPINLOCK(&lock->lock);
#if KDEBUG
	lock->holder = -1;
#else
	lock->count = 0;
	lock->ignore_unlock_count = 0;
#endif
	lock->flags = 0;

	T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
	NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
}
Exemplo n.º 11
0
void
mtx_init(struct mtx *mutex, const char *name, const char *type,
	int options)
{
	if ((options & MTX_RECURSE) != 0) {
		recursive_lock_init_etc(&mutex->u.recursive, name,
			MUTEX_FLAG_CLONE_NAME);
		mutex->type = MTX_RECURSE;
	} else if ((options & MTX_SPIN) != 0) {
		B_INITIALIZE_SPINLOCK(&mutex->u.spinlock.lock);
		mutex->type = MTX_SPIN;
	} else {
		mutex_init_etc(&mutex->u.mutex.lock, name, MUTEX_FLAG_CLONE_NAME);
		mutex->u.mutex.owner = -1;
		mutex->type = MTX_DEF;
	}
}
Exemplo n.º 12
0
status_t
initialize_timer(void)
{
	sTimerCount = 0;
	sTimerNextId = 1;
	B_INITIALIZE_SPINLOCK(&sTimerSpinlock);
	
	sTimerThread = spawn_kernel_thread(timer_thread, "firewire timer", 80, 0);
	sTimerSem = create_sem(0, "firewire timer");
	set_sem_owner(sTimerSem, B_SYSTEM_TEAM);
	
	if (sTimerSem < 0 || sTimerThread < 0) {
		delete_sem(sTimerSem);
		kill_thread(sTimerThread);
		return B_ERROR;
	}
	
	resume_thread(sTimerThread);
	return B_OK;
}
Exemplo n.º 13
0
AHCIPort::AHCIPort(AHCIController *controller, int index)
	: fController(controller),
	fIndex(index),
	fRegs(&controller->fRegs->port[index]),
	fArea(-1),
	fCommandsActive(0),
	fRequestSem(-1),
	fResponseSem(-1),
	fDevicePresent(false),
	fUse48BitCommands(false),
	fSectorSize(0),
	fSectorCount(0),
	fIsATAPI(false),
	fTestUnitReadyActive(false),
	fResetPort(false),
	fError(false)
{
	B_INITIALIZE_SPINLOCK(&fSpinlock);
	fRequestSem = create_sem(1, "ahci request");
	fResponseSem = create_sem(0, "ahci response");
}
Exemplo n.º 14
0
VirtioRNGDevice::VirtioRNGDevice(device_node *node)
	:
	fNode(node),
	fVirtio(NULL),
	fVirtioDevice(NULL),
	fStatus(B_NO_INIT),
	fOffset(BUFFER_SIZE)
{
	CALLED();

	B_INITIALIZE_SPINLOCK(&fInterruptLock);
	fInterruptCondition.Init(this, "virtio rng transfer");

	get_memory_map(fBuffer, BUFFER_SIZE, &fEntry, 1);

	// get the Virtio device from our parent's parent
	device_node *parent = gDeviceManager->get_parent_node(node);
	device_node *virtioParent = gDeviceManager->get_parent_node(parent);
	gDeviceManager->put_node(parent);

	gDeviceManager->get_driver(virtioParent, (driver_module_info **)&fVirtio,
		(void **)&fVirtioDevice);
	gDeviceManager->put_node(virtioParent);

	fVirtio->negociate_features(fVirtioDevice,
		0, &fFeatures, &get_feature_name);

	fStatus = fVirtio->alloc_queues(fVirtioDevice, 1, &fVirtioQueue);
	if (fStatus != B_OK) {
		ERROR("queue allocation failed (%s)\n", strerror(fStatus));
		return;
	}

	fStatus = fVirtio->setup_interrupt(fVirtioDevice, NULL, this);
	if (fStatus != B_OK) {
		ERROR("interrupt setup failed (%s)\n", strerror(fStatus));
		return;
	}
}
Exemplo n.º 15
0
bool
TracingMetaData::_InitPreviousTracingData()
{
	// TODO: ATM re-attaching the previous tracing buffer doesn't work very
	// well. The entries should be checked more thoroughly for validity -- e.g.
	// the pointers to the entries' vtable pointers could be invalid, which can
	// make the "traced" command quite unusable. The validity of the entries
	// could be checked in a safe environment (i.e. with a fault handler) with
	// typeid() and call of a virtual function.
	return false;

	addr_t bufferStart
		= (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
	addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;

	if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
		|| (addr_t)fFirstEntry % sizeof(trace_entry) != 0
		|| (addr_t)fFirstEntry < bufferStart
		|| (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
		|| (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
		|| (addr_t)fAfterLastEntry < bufferStart
		|| (addr_t)fAfterLastEntry > bufferEnd
		|| fPhysicalAddress == 0) {
		dprintf("Failed to init tracing meta data: Sanity checks "
			"failed.\n");
		return false;
	}

	// re-map the previous tracing buffer
	virtual_address_restrictions virtualRestrictions = {};
	virtualRestrictions.address = fTraceOutputBuffer;
	virtualRestrictions.address_specification = B_EXACT_ADDRESS;
	physical_address_restrictions physicalRestrictions = {};
	physicalRestrictions.low_address = fPhysicalAddress;
	physicalRestrictions.high_address = fPhysicalAddress
		+ ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE, B_PAGE_SIZE);
	area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_CLEAR, 0,
		&virtualRestrictions, &physicalRestrictions, NULL);
	if (area < 0) {
		dprintf("Failed to init tracing meta data: Mapping tracing log "
			"buffer failed: %s\n", strerror(area));
		return false;
	}

	dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
		fTraceOutputBuffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);

	// verify/repair the tracing entry list
	uint32 errorCount = 0;
	uint32 entryCount = 0;
	uint32 nonBufferEntryCount = 0;
	uint32 previousEntrySize = 0;
	trace_entry* entry = fFirstEntry;
	while (errorCount <= kMaxRecoveringErrorCount) {
		// check previous entry size
		if (entry->previous_size != previousEntrySize) {
			if (entry != fFirstEntry) {
				dprintf("ktrace recovering: entry %p: fixing previous_size "
					"size: %" B_PRIu32 " (should be %" B_PRIu32 ")\n", entry,
					entry->previous_size, previousEntrySize);
				errorCount++;
			}
			entry->previous_size = previousEntrySize;
		}

		if (entry == fAfterLastEntry)
			break;

		// check size field
		if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
			dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
				entry);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
			dprintf("ktrace recovering: entry %p: size too big: %" B_PRIu32 "\n",
				entry, entry->size);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
			dprintf("ktrace recovering: entry %p: entry crosses "
				"fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		// check for wrap entry
		if ((entry->flags & WRAP_ENTRY) != 0) {
			if ((uint32)(fBuffer + kBufferSize - entry)
					> kMaxTracingEntryByteSize / sizeof(trace_entry)) {
				dprintf("ktrace recovering: entry %p: wrap entry at invalid "
					"buffer location\n", entry);
				errorCount++;
			}

			if (entry->size != 0) {
				dprintf("ktrace recovering: entry %p: invalid wrap entry "
					"size: %" B_PRIu32 "\n", entry, entry->size);
				errorCount++;
				entry->size = 0;
			}

			previousEntrySize = fBuffer + kBufferSize - entry;
			entry = fBuffer;
			continue;
		}

		if ((entry->flags & BUFFER_ENTRY) == 0) {
			entry->flags |= CHECK_ENTRY;
			nonBufferEntryCount++;
		}

		entryCount++;
		previousEntrySize = entry->size;

		entry += entry->size;
	}

	if (errorCount > kMaxRecoveringErrorCount) {
		dprintf("ktrace recovering: Too many errors.\n");
		fAfterLastEntry = entry;
		fAfterLastEntry->previous_size = previousEntrySize;
	}

	dprintf("ktrace recovering: Recovered %" B_PRIu32 " entries + %" B_PRIu32
		" buffer entries from previous session. Expected %" B_PRIu32
		" entries.\n", nonBufferEntryCount, entryCount - nonBufferEntryCount,
		fEntries);
	fEntries = nonBufferEntryCount;

	B_INITIALIZE_SPINLOCK(&fLock);

	// TODO: Actually check the entries! Do that when first accessing the
	// tracing buffer from the kernel debugger (when sTracingDataRecovered is
	// true).
	sTracingDataRecovered = true;
	return true;
}
Exemplo n.º 16
0
/*static*/ status_t
TracingMetaData::Create(TracingMetaData*& _metaData)
{
	// search meta data in memory (from previous session)
	area_id area;
	TracingMetaData* metaData;
	status_t error = _CreateMetaDataArea(true, area, metaData);
	if (error == B_OK) {
		if (metaData->_InitPreviousTracingData()) {
			_metaData = metaData;
			return B_OK;
		}

		dprintf("Found previous tracing meta data, but failed to init.\n");

		// invalidate the meta data
		metaData->fMagic1 = 0;
		metaData->fMagic2 = 0;
		metaData->fMagic3 = 0;
		delete_area(area);
	} else
		dprintf("No previous tracing meta data found.\n");

	// no previous tracing data found -- create new one
	error = _CreateMetaDataArea(false, area, metaData);
	if (error != B_OK)
		return error;

	virtual_address_restrictions virtualRestrictions = {};
	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
	physical_address_restrictions physicalRestrictions = {};
	area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
		&virtualRestrictions, &physicalRestrictions,
		(void**)&metaData->fTraceOutputBuffer);
	if (area < 0)
		return area;

	// get the physical address
	physical_entry physicalEntry;
	if (get_memory_map(metaData->fTraceOutputBuffer, B_PAGE_SIZE,
			&physicalEntry, 1) == B_OK) {
		metaData->fPhysicalAddress = physicalEntry.address;
	} else {
		dprintf("TracingMetaData::Create(): failed to get physical address "
			"of tracing buffer\n");
		metaData->fPhysicalAddress = 0;
	}

	metaData->fBuffer = (trace_entry*)(metaData->fTraceOutputBuffer
		+ kTraceOutputBufferSize);
	metaData->fFirstEntry = metaData->fBuffer;
	metaData->fAfterLastEntry = metaData->fBuffer;

	metaData->fEntries = 0;
	metaData->fEntriesEver = 0;
	B_INITIALIZE_SPINLOCK(&metaData->fLock);

	metaData->fMagic1 = kMetaDataMagic1;
	metaData->fMagic2 = kMetaDataMagic2;
	metaData->fMagic3 = kMetaDataMagic3;

	_metaData = metaData;
	return B_OK;
}
/*static*/ void
X86PagingStructures32Bit::StaticInit()
{
	B_INITIALIZE_SPINLOCK(&sPagingStructuresListLock);
	new (&sPagingStructuresList) PagingStructuresList;
}
/*static*/ void
M68KPagingStructures040::StaticInit()
{
	B_INITIALIZE_SPINLOCK(&sPagingStructuresListLock);
	new (&sPagingStructuresList) PagingStructuresList;
}
Exemplo n.º 19
0
bool
TracingMetaData::_InitPreviousTracingData()
{
	addr_t bufferStart
		= (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
	addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;

	if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
		|| (addr_t)fFirstEntry % sizeof(trace_entry) != 0
		|| (addr_t)fFirstEntry < bufferStart
		|| (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
		|| (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
		|| (addr_t)fAfterLastEntry < bufferStart
		|| (addr_t)fAfterLastEntry > bufferEnd
		|| fPhysicalAddress == 0) {
		dprintf("Failed to init tracing meta data: Sanity checks "
			"failed.\n");
		return false;
	}

	// re-map the previous tracing buffer
	void* buffer = fTraceOutputBuffer;
	area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
		&buffer, B_EXACT_ADDRESS, kTraceOutputBufferSize + MAX_TRACE_SIZE,
		B_CONTIGUOUS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
		fPhysicalAddress, CREATE_AREA_DONT_CLEAR);
	if (area < 0) {
		dprintf("Failed to init tracing meta data: Mapping tracing log "
			"buffer failed: %s\n", strerror(area));
		return false;
	}

	dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
		buffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);

	// verify/repair the tracing entry list
	uint32 errorCount = 0;
	uint32 entryCount = 0;
	uint32 nonBufferEntryCount = 0;
	uint32 previousEntrySize = 0;
	trace_entry* entry = fFirstEntry;
	while (errorCount <= kMaxRecoveringErrorCount) {
		// check previous entry size
		if (entry->previous_size != previousEntrySize) {
			if (entry != fFirstEntry) {
				dprintf("ktrace recovering: entry %p: fixing previous_size "
					"size: %lu (should be %lu)\n", entry, entry->previous_size,
					previousEntrySize);
				errorCount++;
			}
			entry->previous_size = previousEntrySize;
		}

		if (entry == fAfterLastEntry)
			break;

		// check size field
		if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
			dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
				entry);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
			dprintf("ktrace recovering: entry %p: size too big: %lu\n", entry,
				entry->size);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
			dprintf("ktrace recovering: entry %p: entry crosses "
				"fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		// check for wrap entry
		if ((entry->flags & WRAP_ENTRY) != 0) {
			if ((uint32)(fBuffer + kBufferSize - entry)
					> kMaxTracingEntryByteSize / sizeof(trace_entry)) {
				dprintf("ktrace recovering: entry %p: wrap entry at invalid "
					"buffer location\n", entry);
				errorCount++;
			}

			if (entry->size != 0) {
				dprintf("ktrace recovering: entry %p: invalid wrap entry "
					"size: %lu\n", entry, entry->size);
				errorCount++;
				entry->size = 0;
			}

			previousEntrySize = fBuffer + kBufferSize - entry;
			entry = fBuffer;
			continue;
		}

		if ((entry->flags & BUFFER_ENTRY) == 0) {
			entry->flags |= CHECK_ENTRY;
			nonBufferEntryCount++;
		}

		entryCount++;
		previousEntrySize = entry->size;

		entry += entry->size;
	}

	if (errorCount > kMaxRecoveringErrorCount) {
		dprintf("ktrace recovering: Too many errors.\n");
		fAfterLastEntry = entry;
		fAfterLastEntry->previous_size = previousEntrySize;
	}

	dprintf("ktrace recovering: Recovered %lu entries + %lu buffer entries "
		"from previous session. Expected %lu entries.\n", nonBufferEntryCount,
		entryCount - nonBufferEntryCount, fEntries);
	fEntries = nonBufferEntryCount;

	B_INITIALIZE_SPINLOCK(&fLock);

	// TODO: Actually check the entries! Do that when first accessing the
	// tracing buffer from the kernel debugger (when sTracingDataRecovered is
	// true).
	sTracingDataRecovered = true;
	return true;
}