Esempio n. 1
0
/*static*/ status_t
TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area,
	TracingMetaData*& _metaData)
{
	// search meta data in memory (from previous session)
	TracingMetaData* metaData;
	addr_t metaDataAddress = kMetaDataBaseAddress;
	for (; metaDataAddress <= kMetaDataBaseEndAddress;
			metaDataAddress += kMetaDataAddressIncrement) {
		area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata",
			(void**)&metaData, B_ANY_KERNEL_ADDRESS, B_PAGE_SIZE,
			B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
			metaDataAddress, CREATE_AREA_DONT_CLEAR);
		if (area < 0)
			continue;

		if (!findPrevious) {
			_area = area;
			_metaData = metaData;
			return B_OK;
		}

		if (metaData->fMagic1 == kMetaDataMagic1
			&& metaData->fMagic2 == kMetaDataMagic2
			&& metaData->fMagic3 == kMetaDataMagic3) {
			_area = area;
			_metaData = metaData;
			return B_OK;
		}

		delete_area(area);
	}

	return B_ENTRY_NOT_FOUND;
}
Esempio n. 2
0
// TODO: Name clash with POSIX sem_init()... (we could just use C++)
status_t
haiku_sem_init(kernel_args *args)
{
	area_id area;
	int32 i;

	TRACE(("sem_init: entry\n"));

	// compute maximal number of semaphores depending on the available memory
	// 128 MB -> 16384 semaphores, 448 kB fixed array size
	// 256 MB -> 32768, 896 kB
	// 512 MB and more-> 65536, 1.75 MB
	i = vm_page_num_pages() / 2;
	while (sMaxSems < i && sMaxSems < kMaxSemaphores)
		sMaxSems <<= 1;

	// create and initialize semaphore table
	virtual_address_restrictions virtualRestrictions = {};
	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
	physical_address_restrictions physicalRestrictions = {};
	area = create_area_etc(B_SYSTEM_TEAM, "sem_table",
		sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK,
		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
		&virtualRestrictions, &physicalRestrictions, (void**)&sSems);
	if (area < 0)
		panic("unable to allocate semaphore table!\n");

	memset(sSems, 0, sizeof(struct sem_entry) * sMaxSems);
	for (i = 0; i < sMaxSems; i++) {
		sSems[i].id = -1;
		free_sem_slot(i, i);
	}

	// add debugger commands
	add_debugger_command_etc("sems", &dump_sem_list,
		"Dump a list of all active semaphores (for team, with name, etc.)",
		"[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]"
			" | (\"last\" <last acquirer>)\n"
		"Prints a list of all active semaphores meeting the given\n"
		"requirement. If no argument is given, all sems are listed.\n"
		"  <team>             - The team owning the semaphores.\n"
		"  <name>             - Part of the name of the semaphores.\n"
		"  <last acquirer>    - The thread that last acquired the semaphore.\n"
		, 0);
	add_debugger_command_etc("sem", &dump_sem_info,
		"Dump info about a particular semaphore",
		"<sem>\n"
		"Prints info about the specified semaphore.\n"
		"  <sem>  - pointer to the semaphore structure, semaphore ID, or name\n"
		"           of the semaphore to print info for.\n", 0);

	TRACE(("sem_init: exit\n"));

	sSemsActive = true;

	return 0;
}
Esempio n. 3
0
/*static*/ status_t
TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area,
	TracingMetaData*& _metaData)
{
	// search meta data in memory (from previous session)
	TracingMetaData* metaData;
	phys_addr_t metaDataAddress = kMetaDataBaseAddress;
	for (; metaDataAddress <= kMetaDataBaseEndAddress;
			metaDataAddress += kMetaDataAddressIncrement) {
		virtual_address_restrictions virtualRestrictions = {};
		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
		physical_address_restrictions physicalRestrictions = {};
		physicalRestrictions.low_address = metaDataAddress;
		physicalRestrictions.high_address = metaDataAddress + B_PAGE_SIZE;
		area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata",
			B_PAGE_SIZE, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
			CREATE_AREA_DONT_CLEAR, 0, &virtualRestrictions,
			&physicalRestrictions, (void**)&metaData);
		if (area < 0)
			continue;

		if (!findPrevious) {
			_area = area;
			_metaData = metaData;
			return B_OK;
		}

		if (metaData->fMagic1 == kMetaDataMagic1
			&& metaData->fMagic2 == kMetaDataMagic2
			&& metaData->fMagic3 == kMetaDataMagic3) {
			_area = area;
			_metaData = metaData;
			return B_OK;
		}

		delete_area(area);
	}

	if (findPrevious)
		return B_ENTRY_NOT_FOUND;

	// We could allocate any of the standard locations. Instead of failing
	// entirely, we use the static meta data. The tracing buffer won't be
	// reattachable in the next session, but at least we can use it in this
	// session.
	_metaData = &sFallbackTracingMetaData;
	return B_OK;
}
status_t
DMAResource::CreateBounceBuffer(DMABounceBuffer** _buffer)
{
	void* bounceBuffer = NULL;
	phys_addr_t physicalBase = 0;
	area_id area = -1;
	phys_size_t size = ROUNDUP(fBounceBufferSize, B_PAGE_SIZE);

	virtual_address_restrictions virtualRestrictions = {};
	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
	physical_address_restrictions physicalRestrictions = {};
	physicalRestrictions.low_address = fRestrictions.low_address;
	physicalRestrictions.high_address = fRestrictions.high_address;
	physicalRestrictions.alignment = fRestrictions.alignment;
	physicalRestrictions.boundary = fRestrictions.boundary;
	area = create_area_etc(B_SYSTEM_TEAM, "dma buffer", size, B_CONTIGUOUS,
		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, 0, &virtualRestrictions,
		&physicalRestrictions, &bounceBuffer);
	if (area < B_OK)
		return area;

	physical_entry entry;
	if (get_memory_map(bounceBuffer, size, &entry, 1) != B_OK) {
		panic("get_memory_map() failed.");
		delete_area(area);
		return B_ERROR;
	}

	physicalBase = entry.address;

	ASSERT(fRestrictions.high_address >= physicalBase + size);

	DMABounceBuffer* buffer = new(std::nothrow) DMABounceBuffer;
	if (buffer == NULL) {
		delete_area(area);
		return B_NO_MEMORY;
	}

	buffer->address = bounceBuffer;
	buffer->physical_address = physicalBase;
	buffer->size = size;

	*_buffer = buffer;
	return B_OK;
}
void
debug_heap_init()
{
	// create the heap area
	void* base;
	virtual_address_restrictions virtualRestrictions = {};
	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
	physical_address_restrictions physicalRestrictions = {};
	area_id area = create_area_etc(B_SYSTEM_TEAM, "kdebug heap", KDEBUG_HEAP,
		B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
		CREATE_AREA_DONT_WAIT, &virtualRestrictions, &physicalRestrictions,
		(void**)&base);
	if (area < 0)
		return;

	// switch from the small static buffer to the area
	InterruptsLocker locker;
	sHeapBase = base;
	sHeapSize = KDEBUG_HEAP;
}
Esempio n. 6
0
/*static*/ status_t
TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area,
	TracingMetaData*& _metaData)
{
	// search meta data in memory (from previous session)
	TracingMetaData* metaData;
	phys_addr_t metaDataAddress = kMetaDataBaseAddress;
	for (; metaDataAddress <= kMetaDataBaseEndAddress;
			metaDataAddress += kMetaDataAddressIncrement) {
		virtual_address_restrictions virtualRestrictions = {};
		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
		physical_address_restrictions physicalRestrictions = {};
		physicalRestrictions.low_address = metaDataAddress;
		physicalRestrictions.high_address = metaDataAddress + B_PAGE_SIZE;
		area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata",
			B_PAGE_SIZE, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
			CREATE_AREA_DONT_CLEAR, 0, &virtualRestrictions,
			&physicalRestrictions, (void**)&metaData);
		if (area < 0)
			continue;

		if (!findPrevious) {
			_area = area;
			_metaData = metaData;
			return B_OK;
		}

		if (metaData->fMagic1 == kMetaDataMagic1
			&& metaData->fMagic2 == kMetaDataMagic2
			&& metaData->fMagic3 == kMetaDataMagic3) {
			_area = area;
			_metaData = metaData;
			return B_OK;
		}

		delete_area(area);
	}

	return B_ENTRY_NOT_FOUND;
}
Esempio n. 7
0
bool
TracingMetaData::_InitPreviousTracingData()
{
	// TODO: ATM re-attaching the previous tracing buffer doesn't work very
	// well. The entries should be checked more thoroughly for validity -- e.g.
	// the pointers to the entries' vtable pointers could be invalid, which can
	// make the "traced" command quite unusable. The validity of the entries
	// could be checked in a safe environment (i.e. with a fault handler) with
	// typeid() and call of a virtual function.
	return false;

	addr_t bufferStart
		= (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
	addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;

	if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
		|| (addr_t)fFirstEntry % sizeof(trace_entry) != 0
		|| (addr_t)fFirstEntry < bufferStart
		|| (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
		|| (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
		|| (addr_t)fAfterLastEntry < bufferStart
		|| (addr_t)fAfterLastEntry > bufferEnd
		|| fPhysicalAddress == 0) {
		dprintf("Failed to init tracing meta data: Sanity checks "
			"failed.\n");
		return false;
	}

	// re-map the previous tracing buffer
	virtual_address_restrictions virtualRestrictions = {};
	virtualRestrictions.address = fTraceOutputBuffer;
	virtualRestrictions.address_specification = B_EXACT_ADDRESS;
	physical_address_restrictions physicalRestrictions = {};
	physicalRestrictions.low_address = fPhysicalAddress;
	physicalRestrictions.high_address = fPhysicalAddress
		+ ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE, B_PAGE_SIZE);
	area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_CLEAR, 0,
		&virtualRestrictions, &physicalRestrictions, NULL);
	if (area < 0) {
		dprintf("Failed to init tracing meta data: Mapping tracing log "
			"buffer failed: %s\n", strerror(area));
		return false;
	}

	dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
		fTraceOutputBuffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);

	// verify/repair the tracing entry list
	uint32 errorCount = 0;
	uint32 entryCount = 0;
	uint32 nonBufferEntryCount = 0;
	uint32 previousEntrySize = 0;
	trace_entry* entry = fFirstEntry;
	while (errorCount <= kMaxRecoveringErrorCount) {
		// check previous entry size
		if (entry->previous_size != previousEntrySize) {
			if (entry != fFirstEntry) {
				dprintf("ktrace recovering: entry %p: fixing previous_size "
					"size: %" B_PRIu32 " (should be %" B_PRIu32 ")\n", entry,
					entry->previous_size, previousEntrySize);
				errorCount++;
			}
			entry->previous_size = previousEntrySize;
		}

		if (entry == fAfterLastEntry)
			break;

		// check size field
		if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
			dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
				entry);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
			dprintf("ktrace recovering: entry %p: size too big: %" B_PRIu32 "\n",
				entry, entry->size);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
			dprintf("ktrace recovering: entry %p: entry crosses "
				"fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		// check for wrap entry
		if ((entry->flags & WRAP_ENTRY) != 0) {
			if ((uint32)(fBuffer + kBufferSize - entry)
					> kMaxTracingEntryByteSize / sizeof(trace_entry)) {
				dprintf("ktrace recovering: entry %p: wrap entry at invalid "
					"buffer location\n", entry);
				errorCount++;
			}

			if (entry->size != 0) {
				dprintf("ktrace recovering: entry %p: invalid wrap entry "
					"size: %" B_PRIu32 "\n", entry, entry->size);
				errorCount++;
				entry->size = 0;
			}

			previousEntrySize = fBuffer + kBufferSize - entry;
			entry = fBuffer;
			continue;
		}

		if ((entry->flags & BUFFER_ENTRY) == 0) {
			entry->flags |= CHECK_ENTRY;
			nonBufferEntryCount++;
		}

		entryCount++;
		previousEntrySize = entry->size;

		entry += entry->size;
	}

	if (errorCount > kMaxRecoveringErrorCount) {
		dprintf("ktrace recovering: Too many errors.\n");
		fAfterLastEntry = entry;
		fAfterLastEntry->previous_size = previousEntrySize;
	}

	dprintf("ktrace recovering: Recovered %" B_PRIu32 " entries + %" B_PRIu32
		" buffer entries from previous session. Expected %" B_PRIu32
		" entries.\n", nonBufferEntryCount, entryCount - nonBufferEntryCount,
		fEntries);
	fEntries = nonBufferEntryCount;

	B_INITIALIZE_SPINLOCK(&fLock);

	// TODO: Actually check the entries! Do that when first accessing the
	// tracing buffer from the kernel debugger (when sTracingDataRecovered is
	// true).
	sTracingDataRecovered = true;
	return true;
}
Esempio n. 8
0
/*static*/ status_t
TracingMetaData::Create(TracingMetaData*& _metaData)
{
	// search meta data in memory (from previous session)
	area_id area;
	TracingMetaData* metaData;
	status_t error = _CreateMetaDataArea(true, area, metaData);
	if (error == B_OK) {
		if (metaData->_InitPreviousTracingData()) {
			_metaData = metaData;
			return B_OK;
		}

		dprintf("Found previous tracing meta data, but failed to init.\n");

		// invalidate the meta data
		metaData->fMagic1 = 0;
		metaData->fMagic2 = 0;
		metaData->fMagic3 = 0;
		delete_area(area);
	} else
		dprintf("No previous tracing meta data found.\n");

	// no previous tracing data found -- create new one
	error = _CreateMetaDataArea(false, area, metaData);
	if (error != B_OK)
		return error;

	virtual_address_restrictions virtualRestrictions = {};
	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
	physical_address_restrictions physicalRestrictions = {};
	area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
		&virtualRestrictions, &physicalRestrictions,
		(void**)&metaData->fTraceOutputBuffer);
	if (area < 0)
		return area;

	// get the physical address
	physical_entry physicalEntry;
	if (get_memory_map(metaData->fTraceOutputBuffer, B_PAGE_SIZE,
			&physicalEntry, 1) == B_OK) {
		metaData->fPhysicalAddress = physicalEntry.address;
	} else {
		dprintf("TracingMetaData::Create(): failed to get physical address "
			"of tracing buffer\n");
		metaData->fPhysicalAddress = 0;
	}

	metaData->fBuffer = (trace_entry*)(metaData->fTraceOutputBuffer
		+ kTraceOutputBufferSize);
	metaData->fFirstEntry = metaData->fBuffer;
	metaData->fAfterLastEntry = metaData->fBuffer;

	metaData->fEntries = 0;
	metaData->fEntriesEver = 0;
	B_INITIALIZE_SPINLOCK(&metaData->fLock);

	metaData->fMagic1 = kMetaDataMagic1;
	metaData->fMagic2 = kMetaDataMagic2;
	metaData->fMagic3 = kMetaDataMagic3;

	_metaData = metaData;
	return B_OK;
}
Esempio n. 9
0
static bool
scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
{
	// free old buffer first
	scsi_free_dma_buffer(buffer);

	// just in case alignment is ridiculously huge
	size = (size + dma_params->alignment) & ~dma_params->alignment;

	size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);

	// calculate worst case number of S/G entries, i.e. if they are non-continuous;
	// there is a controller limit and a limit by our own S/G manager to check
	if (size / B_PAGE_SIZE > dma_params->max_sg_blocks
		|| size / B_PAGE_SIZE > MAX_TEMP_SG_FRAGMENTS) {
		uint32 boundary = dma_params->dma_boundary;

		// alright - a contiguous buffer is required to keep S/G table short
		SHOW_INFO(1, "need to setup contiguous DMA buffer of size %" B_PRIu32,
			size);

		// verify that we don't get problems with dma boundary
		if (boundary != ~(uint32)0) {
			if (size > boundary + 1) {
				SHOW_ERROR(2, "data is longer then maximum DMA transfer len (%"
					 B_PRId32 "/%" B_PRId32 " bytes)", size, boundary + 1);
				return false;
			}
		}

		virtual_address_restrictions virtualRestrictions = {};
		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
		physical_address_restrictions physicalRestrictions = {};
		if (dma_params->alignment != ~(uint32)0)
			physicalRestrictions.alignment = dma_params->alignment + 1;
		if (boundary != ~(uint32)0)
			physicalRestrictions.boundary = boundary + 1;
#if B_HAIKU_PHYSICAL_BITS > 32
		physicalRestrictions.high_address = 0x100000000ULL;
			// TODO: Use 64 bit addresses, if possible!
#endif
		buffer->area = create_area_etc(B_SYSTEM_TEAM, "DMA buffer", size,
			B_CONTIGUOUS, 0, 0, 0, &virtualRestrictions, &physicalRestrictions,
			(void**)&buffer->address);

		if (buffer->area < 0) {
			SHOW_ERROR(2, "Cannot create contignous DMA buffer of %" B_PRIu32
				" bytes", size);
			return false;
		}

		buffer->size = size;
	} else {
		// we can live with a fragmented buffer - very nice
		buffer->area = create_area("DMA buffer",
			(void **)&buffer->address, B_ANY_KERNEL_ADDRESS, size,
			B_32_BIT_FULL_LOCK, 0);
				// TODO: Use B_FULL_LOCK, if possible!
		if (buffer->area < 0) {
			SHOW_ERROR(2, "Cannot create DMA buffer of %" B_PRIu32 " bytes",
				size);
			return false;
		}

		buffer->size = size;
	}

	// create S/G list
	// worst case is one entry per page, and size is page-aligned
	size_t sg_list_size = buffer->size / B_PAGE_SIZE * sizeof( physical_entry );
	// create_area has page-granularity
	sg_list_size = (sg_list_size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);

	buffer->sg_list_area = create_area("DMA buffer S/G table",
		(void **)&buffer->sg_list, B_ANY_KERNEL_ADDRESS, sg_list_size,
		B_32_BIT_FULL_LOCK, 0);
			// TODO: Use B_FULL_LOCK, if possible!
	if (buffer->sg_list_area < 0) {
		SHOW_ERROR( 2, "Cannot create DMA buffer S/G list of %" B_PRIuSIZE
			" bytes", sg_list_size );

		delete_area(buffer->area);
		buffer->area = 0;
		return false;
	}

	size_t sg_list_entries = sg_list_size / sizeof(physical_entry);

	{
		size_t mapped_len;
		status_t res;
		iovec vec = {
			buffer->address,
			buffer->size
		};

		res = get_iovec_memory_map(
			&vec, 1, 0, buffer->size,
			buffer->sg_list, sg_list_entries, &buffer->sg_count,
			&mapped_len );

		if( res != B_OK || mapped_len != buffer->size ) {
			SHOW_ERROR(0, "Error creating S/G list for DMA buffer (%s; wanted "
				"%" B_PRIuSIZE ", got %" B_PRIuSIZE " bytes)", strerror(res),
				mapped_len, buffer->size);
		}
	}

	return true;
}
Esempio n. 10
0
status_t
port_init(kernel_args *args)
{
	size_t size = sizeof(struct port_entry) * sMaxPorts;

	// create and initialize ports table
	virtual_address_restrictions virtualRestrictions = {};
	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
	physical_address_restrictions physicalRestrictions = {};
	sPortArea = create_area_etc(B_SYSTEM_TEAM, "port_table", size, B_FULL_LOCK,
		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
		&virtualRestrictions, &physicalRestrictions, (void**)&sPorts);
	if (sPortArea < 0) {
		panic("unable to allocate kernel port table!\n");
		return sPortArea;
	}

	memset(sPorts, 0, size);
	for (int32 i = 0; i < sMaxPorts; i++) {
		mutex_init(&sPorts[i].lock, NULL);
		sPorts[i].id = -1;
		sPorts[i].read_condition.Init(&sPorts[i], "port read");
		sPorts[i].write_condition.Init(&sPorts[i], "port write");
	}

	addr_t base;
	if (create_area("port heap", (void**)&base, B_ANY_KERNEL_ADDRESS,
			kInitialPortBufferSize, B_NO_LOCK,
			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA) < 0) {
			// TODO: Since port_init() is invoked before the boot partition is
			// mounted, the underlying VMAnonymousCache cannot commit swap space
			// upon creation and thus the pages aren't swappable after all. This
			// makes the area essentially B_LAZY_LOCK with additional overhead.
		panic("unable to allocate port area!\n");
		return B_ERROR;
	}

	static const heap_class kBufferHeapClass = {"default", 100,
		PORT_MAX_MESSAGE_SIZE + sizeof(port_message), 2 * 1024,
		sizeof(port_message), 8, 4, 64};
	sPortAllocator = heap_create_allocator("port buffer", base,
		kInitialPortBufferSize, &kBufferHeapClass, true);
	if (sPortAllocator == NULL) {
		panic("unable to create port heap");
		return B_NO_MEMORY;
	}

	sNoSpaceCondition.Init(sPorts, "port space");

	// add debugger commands
	add_debugger_command_etc("ports", &dump_port_list,
		"Dump a list of all active ports (for team, with name, etc.)",
		"[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]\n"
		"Prints a list of all active ports meeting the given\n"
		"requirement. If no argument is given, all ports are listed.\n"
		"  <team>             - The team owning the ports.\n"
		"  <name>             - Part of the name of the ports.\n", 0);
	add_debugger_command_etc("port", &dump_port_info,
		"Dump info about a particular port",
		"(<id> | [ \"address\" ] <address>) | ([ \"name\" ] <name>) "
			"| (\"condition\" <address>)\n"
		"Prints info about the specified port.\n"
		"  <address>   - Pointer to the port structure.\n"
		"  <name>      - Name of the port.\n"
		"  <condition> - address of the port's read or write condition.\n", 0);

	new(&sNotificationService) PortNotificationService();
	sPortsActive = true;
	return B_OK;
}
Esempio n. 11
0
bool
TracingMetaData::_InitPreviousTracingData()
{
	addr_t bufferStart
		= (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
	addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;

	if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
		|| (addr_t)fFirstEntry % sizeof(trace_entry) != 0
		|| (addr_t)fFirstEntry < bufferStart
		|| (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
		|| (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
		|| (addr_t)fAfterLastEntry < bufferStart
		|| (addr_t)fAfterLastEntry > bufferEnd
		|| fPhysicalAddress == 0) {
		dprintf("Failed to init tracing meta data: Sanity checks "
			"failed.\n");
		return false;
	}

	// re-map the previous tracing buffer
	void* buffer = fTraceOutputBuffer;
	area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
		&buffer, B_EXACT_ADDRESS, kTraceOutputBufferSize + MAX_TRACE_SIZE,
		B_CONTIGUOUS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
		fPhysicalAddress, CREATE_AREA_DONT_CLEAR);
	if (area < 0) {
		dprintf("Failed to init tracing meta data: Mapping tracing log "
			"buffer failed: %s\n", strerror(area));
		return false;
	}

	dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
		buffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);

	// verify/repair the tracing entry list
	uint32 errorCount = 0;
	uint32 entryCount = 0;
	uint32 nonBufferEntryCount = 0;
	uint32 previousEntrySize = 0;
	trace_entry* entry = fFirstEntry;
	while (errorCount <= kMaxRecoveringErrorCount) {
		// check previous entry size
		if (entry->previous_size != previousEntrySize) {
			if (entry != fFirstEntry) {
				dprintf("ktrace recovering: entry %p: fixing previous_size "
					"size: %lu (should be %lu)\n", entry, entry->previous_size,
					previousEntrySize);
				errorCount++;
			}
			entry->previous_size = previousEntrySize;
		}

		if (entry == fAfterLastEntry)
			break;

		// check size field
		if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
			dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
				entry);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
			dprintf("ktrace recovering: entry %p: size too big: %lu\n", entry,
				entry->size);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
			dprintf("ktrace recovering: entry %p: entry crosses "
				"fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		// check for wrap entry
		if ((entry->flags & WRAP_ENTRY) != 0) {
			if ((uint32)(fBuffer + kBufferSize - entry)
					> kMaxTracingEntryByteSize / sizeof(trace_entry)) {
				dprintf("ktrace recovering: entry %p: wrap entry at invalid "
					"buffer location\n", entry);
				errorCount++;
			}

			if (entry->size != 0) {
				dprintf("ktrace recovering: entry %p: invalid wrap entry "
					"size: %lu\n", entry, entry->size);
				errorCount++;
				entry->size = 0;
			}

			previousEntrySize = fBuffer + kBufferSize - entry;
			entry = fBuffer;
			continue;
		}

		if ((entry->flags & BUFFER_ENTRY) == 0) {
			entry->flags |= CHECK_ENTRY;
			nonBufferEntryCount++;
		}

		entryCount++;
		previousEntrySize = entry->size;

		entry += entry->size;
	}

	if (errorCount > kMaxRecoveringErrorCount) {
		dprintf("ktrace recovering: Too many errors.\n");
		fAfterLastEntry = entry;
		fAfterLastEntry->previous_size = previousEntrySize;
	}

	dprintf("ktrace recovering: Recovered %lu entries + %lu buffer entries "
		"from previous session. Expected %lu entries.\n", nonBufferEntryCount,
		entryCount - nonBufferEntryCount, fEntries);
	fEntries = nonBufferEntryCount;

	B_INITIALIZE_SPINLOCK(&fLock);

	// TODO: Actually check the entries! Do that when first accessing the
	// tracing buffer from the kernel debugger (when sTracingDataRecovered is
	// true).
	sTracingDataRecovered = true;
	return true;
}