Exemple #1
0
static status_t createGARTBuffer( GART_info *gart, size_t size )
{
	physical_entry map[1];
	void *unaligned_addr, *aligned_phys;

	SHOW_FLOW0( 3, "" );

	gart->buffer.size = size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);

	// we allocate an contiguous area having twice the size
	// to be able to find an aligned, contiguous range within it;
	// the graphics card doesn't care, but the CPU cannot
	// make an arbitrary area WC'ed, at least elder ones
	// question: is this necessary for a PCI GART because of bus snooping?
	gart->buffer.unaligned_area = create_area( "Radeon PCI GART buffer",
		&unaligned_addr, B_ANY_KERNEL_ADDRESS,
		2 * size, B_CONTIGUOUS/*B_FULL_LOCK*/, B_READ_AREA | B_WRITE_AREA | B_USER_CLONEABLE_AREA );
		// TODO: Physical aligning can be done without waste using the
		// private create_area_etc().
	if (gart->buffer.unaligned_area < 0) {
		SHOW_ERROR( 1, "cannot create PCI GART buffer (%s)",
			strerror( gart->buffer.unaligned_area ));
		return gart->buffer.unaligned_area;
	}

	get_memory_map( unaligned_addr, B_PAGE_SIZE, map, 1 );

	aligned_phys =
		(void **)((map[0].address + size - 1) & ~(size - 1));

	SHOW_FLOW( 3, "aligned_phys=%p", aligned_phys );

	gart->buffer.area = map_physical_memory( "Radeon aligned PCI GART buffer",
		(addr_t)aligned_phys,
		size, B_ANY_KERNEL_BLOCK_ADDRESS | B_MTR_WC,
		B_READ_AREA | B_WRITE_AREA, &gart->buffer.ptr );

	if( gart->buffer.area < 0 ) {
		SHOW_ERROR0( 3, "cannot map buffer with WC" );
		gart->buffer.area = map_physical_memory( "Radeon aligned PCI GART buffer",
			(addr_t)aligned_phys,
			size, B_ANY_KERNEL_BLOCK_ADDRESS,
			B_READ_AREA | B_WRITE_AREA, &gart->buffer.ptr );
	}

	if( gart->buffer.area < 0 ) {
		SHOW_ERROR0( 1, "cannot map GART buffer" );
		delete_area( gart->buffer.unaligned_area );
		gart->buffer.unaligned_area = -1;
		return gart->buffer.area;
	}

	memset( gart->buffer.ptr, 0, size );

	return B_OK;
}
Exemple #2
0
/*!	This is reimplementation, Haiku uses BIOS call and gets most current panel
	info, we're, otherwise, digging in VBIOS memory and parsing VBT tables to
	get native panel timings. This will allow to get non-updated,
	PROM-programmed timings info when compensation mode is off on your machine.
*/
static bool
get_bios(void)
{
	static const uint64_t kVBIOSAddress = 0xc0000;
	static const int kVBIOSSize = 64 * 1024;
		// FIXME: is this the same for all cards?

	/* !!!DANGER!!!: mapping of BIOS using legacy location for now,
	hence, if panel mode will be set using info from VBT, it will
	be taken from primary card's VBIOS */
	vbios.area = map_physical_memory("VBIOS mapping", kVBIOSAddress,
		kVBIOSSize, B_ANY_KERNEL_ADDRESS, B_KERNEL_READ_AREA, (void**)&vbios.memory);

	if (vbios.area < 0)
		return false;

	TRACE((DEVICE_NAME ": mapping VBIOS: 0x%" B_PRIx64 " -> %p\n",
		kVBIOSAddress, vbios.memory));

	int vbtOffset = vbios.ReadWord(kVbtPointer);
	if ((vbtOffset + (int)sizeof(vbt_header)) >= kVBIOSSize) {
		TRACE((DEVICE_NAME": bad VBT offset : 0x%x\n", vbtOffset));
		delete_area(vbios.area);
		return false;
	}

	struct vbt_header* vbt = (struct vbt_header*)(vbios.memory + vbtOffset);
	if (memcmp(vbt->signature, "$VBT", 4) != 0) {
		TRACE((DEVICE_NAME": bad VBT signature: %20s\n", vbt->signature));
		delete_area(vbios.area);
		return false;
	}
	return true;
}
Exemple #3
0
area_id
map_mem(void **virt, void *phy, size_t size, uint32 protection,
	const char *name)
{
	uint32 offset;
	void *phyadr;
	void *mapadr;
	area_id area;

	TRACE("mapping physical address %p with %ld bytes for %s\n", phy, size, name);

	offset = (uint32)phy & (B_PAGE_SIZE - 1);
	phyadr = (char *)phy - offset;
	size = round_to_pagesize(size + offset);
	area = map_physical_memory(name, phyadr, size, B_ANY_KERNEL_ADDRESS,
		protection, &mapadr);
	if (area < B_OK) {
		ERROR("mapping '%s' failed, error 0x%lx (%s)\n", name, area, strerror(area));
		return area;
	}

	*virt = (char *)mapadr + offset;

	TRACE("physical = %p, virtual = %p, offset = %ld, phyadr = %p, mapadr = "
		"%p, size = %ld, area = 0x%08lx\n", phy, *virt, offset, phyadr, mapadr,
		size, area);

	return area;
}
static void *watchdog_thread_function(void *ctx)
{
    r  = (volatile struct sunxi_timer_reg *) map_physical_memory(TIMER_BASE, 4096);

    /* Enable the hardware watchdog (with sun5i workaround) */
    r->wdog_mode_reg = (5 << 3) | 3;
    r->wdog_ctrl_reg = (0x0a57 << 1) | 1;
    r->wdog_mode_reg = (5 << 3) | 3;

    while (1)
    {
        pthread_mutex_lock(&watchdog_mutex);
        if (watchdog_timeout_counter == MAGIC_NUMBER)
        {
            /* Disable the hardware watchdog and exit */
            r->wdog_mode_reg = 0;
            exit(0);
        }
        if (watchdog_timeout_counter == 0)
        {
            /* The timer has elapsed, we are dead */
            printf("Boom!\n");
            deadloop();
        }
        watchdog_timeout_counter--;
        pthread_mutex_unlock(&watchdog_mutex);

        simple_memtester(watchdog_timeout_counter);

        sleep(1);
        r->wdog_ctrl_reg = (0x0a57 << 1) | 1;
        r->wdog_mode_reg = (5 << 3) | 3;
    }
}
Exemple #5
0
area_id
mem_map_target(off_t position, size_t length, uint32 protection,
	void **virtualAddress)
{
	area_id area;
	phys_addr_t physicalAddress;
	size_t offset;
	size_t size;

	/* SIZE_MAX actually but 2G should be enough anyway */
	if (length > SSIZE_MAX - B_PAGE_SIZE)
		return EINVAL;

	/* the first page address */
	physicalAddress = (phys_addr_t)position & ~((off_t)B_PAGE_SIZE - 1);

	/* offset of target into it */
	offset = position - (off_t)physicalAddress;

	/* size of the whole mapping (page rounded) */
	size = (offset + length + B_PAGE_SIZE - 1) & ~((size_t)B_PAGE_SIZE - 1);
	area = map_physical_memory("mem_driver_temp", physicalAddress, size,
		B_ANY_KERNEL_ADDRESS, protection, virtualAddress);
	if (area < 0)
		return area;

	*virtualAddress += offset;
	return area;
}
/*!	Remaps the frame buffer if necessary; if we've already mapped the complete
	frame buffer, there is no need to map it again.
*/
static status_t
remap_frame_buffer(vesa_info& info, addr_t physicalBase, uint32 width,
	uint32 height, int8 depth, uint32 bytesPerRow, bool initializing)
{
	vesa_shared_info& sharedInfo = *info.shared_info;
	addr_t frameBuffer = info.frame_buffer;

	if (!info.complete_frame_buffer_mapped) {
		addr_t base = physicalBase;
		size_t size = bytesPerRow * height;
		bool remap = !initializing;

		if (info.physical_frame_buffer_size != 0) {
			// we can map the complete frame buffer
			base = info.physical_frame_buffer;
			size = info.physical_frame_buffer_size;
			remap = true;
		}

		if (remap) {
			area_id area = map_physical_memory("vesa frame buffer", base,
				size, B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA,
				(void**)&frameBuffer);
			if (area < 0)
				return area;

			if (initializing) {
				// We need to manually update the kernel's frame buffer address,
				// since this frame buffer remapping has not been issued by the
				// app_server (which would otherwise take care of this)
				frame_buffer_update(frameBuffer, width, height, depth,
					bytesPerRow);
			}

			delete_area(info.shared_info->frame_buffer_area);

			info.frame_buffer = frameBuffer;
			sharedInfo.frame_buffer_area = area;

			// Turn on write combining for the area
			vm_set_area_memory_type(area, base, B_MTR_WC);

			if (info.physical_frame_buffer_size != 0)
				info.complete_frame_buffer_mapped = true;
		}
	}

	if (info.complete_frame_buffer_mapped)
		frameBuffer += physicalBase - info.physical_frame_buffer;

	// Update shared frame buffer information
	sharedInfo.frame_buffer = (uint8*)frameBuffer;
	sharedInfo.physical_frame_buffer = (uint8*)physicalBase;
	sharedInfo.bytes_per_row = bytesPerRow;

	return B_OK;
}
Exemple #7
0
static status_t
MapDevice()
{
	SharedInfo *si = gPd->si;
	int writeCombined = 1;

	/* Map the frame buffer */
	si->fbArea = map_physical_memory("VMware frame buffer",
		(addr_t)si->fbDma, si->fbSize, B_ANY_KERNEL_BLOCK_ADDRESS|B_MTR_WC,
		B_READ_AREA|B_WRITE_AREA, (void **)&si->fb);
	if (si->fbArea < 0) {
		/* Try again without write combining */
		writeCombined = 0;
		si->fbArea = map_physical_memory("VMware frame buffer",
			(addr_t)si->fbDma, si->fbSize, B_ANY_KERNEL_BLOCK_ADDRESS,
			B_READ_AREA|B_WRITE_AREA, (void **)&si->fb);
	}
	if (si->fbArea < 0) {
		TRACE("failed to map frame buffer\n");
		return si->fbArea;
	}
	TRACE("frame buffer mapped: %p->%p, area %ld, size %ld, write "
		"combined: %d\n", si->fbDma, si->fb, si->fbArea,
		si->fbSize, writeCombined);

	/* Map the fifo */
	si->fifoArea = map_physical_memory("VMware fifo",
		(addr_t)si->fifoDma, si->fifoSize, B_ANY_KERNEL_BLOCK_ADDRESS,
		B_READ_AREA|B_WRITE_AREA, (void **)&si->fifo);
	if (si->fifoArea < 0) {
		TRACE("failed to map fifo\n");
		delete_area(si->fbArea);
		return si->fifoArea;
	}
	TRACE("fifo mapped: %p->%p, area %ld, size %ld\n", si->fifoDma,
		si->fifo, si->fifoArea, si->fifoSize);

	return B_OK;
}
Exemple #8
0
LM_STATUS
MM_MapMemBase(PLM_DEVICE_BLOCK pDevice)
{
	struct be_b57_dev *pUmDevice = (struct be_b57_dev *)(pDevice);
	size_t size = pUmDevice->pci_data.u.h0.base_register_sizes[0];

	size = ROUND_UP_TO_PAGE(size);
	pUmDevice->mem_base = map_physical_memory("broadcom_regs",
		pUmDevice->pci_data.u.h0.base_registers[0], size,
		B_ANY_KERNEL_BLOCK_ADDRESS, 0,
		(void **)(&pDevice->pMappedMemBase));

	return LM_STATUS_SUCCESS;
}
Exemple #9
0
static status_t et6000MapDevice(ET6000DeviceInfo *di) {
char buffer[B_OS_NAME_LENGTH];
ET6000SharedInfo *si = di->si;
uint32  tmpUlong;
pci_info *pcii = &(di->pcii);

    /* Enable memory space access and I/O space access */
    tmpUlong = get_pci(PCI_command, 4);
    tmpUlong |= 0x00000003;
    set_pci(PCI_command, 4, tmpUlong);

    /* Enable ROM decoding */
    tmpUlong = get_pci(PCI_rom_base, 4);
    tmpUlong |= 0x00000001;
    set_pci(PCI_rom_base, 4, tmpUlong);

    /* PCI header base address in I/O space */
    si->pciConfigSpace = (uint16)di->pcii.u.h0.base_registers[1];

    sprintf(buffer, "%04X_%04X_%02X%02X%02X videomemory",
        di->pcii.vendor_id, di->pcii.device_id,
        di->pcii.bus, di->pcii.device, di->pcii.function);

   /*
    * We map the whole graphics card memory area (which consist of RAM memory
    * and memory mapped registers) at once. Memory mapped registers must not
    * be cacheble, so the whole area is mapped with B_MTR_UC (unable caching).
    * We certainly could map separately the RAM memory with write combining
    * (B_MTR_WC) and the memory mapped registers with B_MTR_UC.
    */
    si->memoryArea = map_physical_memory(buffer,
        di->pcii.u.h0.base_registers[0],
        di->pcii.u.h0.base_register_sizes[0],
        B_ANY_KERNEL_BLOCK_ADDRESS | B_MTR_UC,
        B_READ_AREA + B_WRITE_AREA,
        &(si->memory));

    si->framebuffer = si->memory;
    si->mmRegs = (void *)((uint32)si->memory + 0x003fff00);
    si->emRegs = (void *)((uint32)si->memory + 0x003fe000);

    /* remember the physical addresses */
    si->physMemory = si->physFramebuffer =
        (void *) di->pcii.u.h0.base_registers_pci[0];

    si->memSize = et6000GetOnboardMemorySize(si->pciConfigSpace, si->memory);

    /* in any case, return the result */
    return si->memoryArea;
}
Exemple #10
0
status_t
arch_int_init_post_vm(kernel_args *args)
{
	// create a read/write kernel area
	sVectorPageArea = create_area("vectorpage", (void **)&sVectorPageAddress,
		B_ANY_ADDRESS, VECTORPAGE_SIZE, B_FULL_LOCK,
		B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA);

	if (sVectorPageArea < 0)
		panic("vector page could not be created!");

	// clone it at a fixed address with user read/only permissions
	sUserVectorPageAddress = (addr_t*)USER_VECTOR_ADDR_HIGH;
	sUserVectorPageArea = clone_area("user_vectorpage",
		(void **)&sUserVectorPageAddress, B_EXACT_ADDRESS,
		B_READ_AREA | B_EXECUTE_AREA, sVectorPageArea);

	if (sUserVectorPageArea < 0)
		panic("user vector page @ %p could not be created (%lx)!", sVectorPageAddress, sUserVectorPageArea);

	// copy vectors into the newly created area
	memcpy(sVectorPageAddress, &_vectors_start, VECTORPAGE_SIZE);

	arm_vector_init();

	// see if high vectors are enabled
	if ((mmu_read_c1() & (1 << 13)) != 0)
		dprintf("High vectors already enabled\n");
	else {
		mmu_write_c1(mmu_read_c1() | (1 << 13));

		if ((mmu_read_c1() & (1 << 13)) == 0)
			dprintf("Unable to enable high vectors!\n");
		else
			dprintf("Enabled high vectors\n");
	}

	sPxaInterruptArea = map_physical_memory("pxa_intc", PXA_INTERRUPT_PHYS_BASE,
		PXA_INTERRUPT_SIZE, 0, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, (void**)&sPxaInterruptBase);

	if (sPxaInterruptArea < 0)
		return sPxaInterruptArea;

	sPxaInterruptBase[PXA_ICMR] = 0;
	sPxaInterruptBase[PXA_ICMR2] = 0;

	return B_OK;
}
static status_t
nor_init_device(void *_info, void **_cookie)
{
	TRACE("init_device\n");
	nor_driver_info *info = (nor_driver_info*)_info;

	info->mapped = NULL;
	info->blocksize = 128 * 1024;
	info->totalsize = info->blocksize * 256;

	info->id = map_physical_memory("NORFlash", NORFLASH_ADDR, info->totalsize, B_ANY_KERNEL_ADDRESS, B_READ_AREA, &info->mapped);
	if (info->id < 0)
		return info->id;

	*_cookie = info;
	return B_OK;
}
Exemple #12
0
	static void dp83815_init_registers( rtl8139_properties_t *data )
	{
		int32 base, size, offset;
		base = data->pcii->u.h0.base_registers[0];
		size = data->pcii->u.h0.base_register_sizes[0];

		/* Round down to nearest page boundary */
		base = base & ~(B_PAGE_SIZE-1);

		/* Adjust the size */
		offset = data->pcii->u.h0.base_registers[0] - base;
		size += offset;
		size = (size +(B_PAGE_SIZE-1)) & ~(B_PAGE_SIZE-1);

		TRACE(( kDevName " _open_hook(): PCI base=%lx size=%lx offset=%lx\n", base, size, offset));

		data->ioarea = map_physical_memory(kDevName " Regs", (void *)base, size, B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA, (void **)&data->reg_base);

		data->reg_base = data->reg_base + offset;
	}
Exemple #13
0
/* This is not the most advanced method to map physical memory for io access.
 * Perhaps using B_ANY_KERNEL_ADDRESS instead of B_ANY_KERNEL_BLOCK_ADDRESS
 * makes the whole offset calculation and relocation obsolete. But the code
 * below does work, and I can't test if using B_ANY_KERNEL_ADDRESS also works.
 */
area_id
map_mem(void **log, void *phy, size_t size, const char *name)
{
	uint32 offset;
	void *phyadr;
	void *mapadr;
	area_id area;

	LOG(("mapping physical address %p with %#x bytes for %s\n",phy,size,name));

	offset = (uint32)phy & (B_PAGE_SIZE - 1);
	phyadr = phy - offset;
	size = round_to_pagesize(size + offset);
	area = map_physical_memory(name, phyadr, size, B_ANY_KERNEL_BLOCK_ADDRESS,
		B_READ_AREA | B_WRITE_AREA, &mapadr);
	*log = mapadr + offset;

	LOG(("physical = %p, logical = %p, offset = %#x, phyadr = %p, mapadr = %p, size = %#x, area = %#x\n",
		phy, *log, offset, phyadr, mapadr, size, area));

	return area;
}
Exemple #14
0
status_t
init_driver(void)
{
	sOpenCount = 0;

	status_t status = get_module(B_ACPI_MODULE_NAME, (module_info**)&sAcpi);
	if (status < B_OK)
		return status;

	acpi_hpet *hpetTable;
	status = sAcpi->get_table(ACPI_HPET_SIGNATURE, 0,
		(void**)&hpetTable);
	
	if (status != B_OK) {
		put_module(B_ACPI_MODULE_NAME);
		return status;
	}

	sHPETArea = map_physical_memory("HPET registries",
					hpetTable->hpet_address.address,
					B_PAGE_SIZE,
					B_ANY_KERNEL_ADDRESS,
					B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
					(void**)&sHPETRegs);

	if (sHPETArea < 0) {
		put_module(B_ACPI_MODULE_NAME);	
		return sHPETArea;
	}

	status = hpet_init();
	if (status != B_OK) {
		delete_area(sHPETArea);
		put_module(B_ACPI_MODULE_NAME);
	}

	return status;
}
Exemple #15
0
void
acpi_init()
{
	// Try to find the ACPI RSDP.
	for (int32 i = 0; acpi_scan_spots[i].length > 0; i++) {
		acpi_rsdp* rsdp = NULL;

		TRACE(("acpi_init: entry base 0x%lx, limit 0x%lx\n",
			acpi_scan_spots[i].start, acpi_scan_spots[i].stop));

		char* start = NULL;
		area_id rsdpArea = map_physical_memory("acpi rsdp",
			acpi_scan_spots[i].start, acpi_scan_spots[i].length,
			B_ANY_KERNEL_ADDRESS, B_KERNEL_READ_AREA, (void **)&start);
		if (rsdpArea < B_OK) {
			TRACE(("acpi_init: couldn't map %s\n", strerror(rsdpArea)));
			break;
		}
		for (char *pointer = start;
			(addr_t)pointer < (addr_t)start + acpi_scan_spots[i].length;
			pointer += 16) {
			if (strncmp(pointer, ACPI_RSDP_SIGNATURE, 8) == 0) {
				TRACE(("acpi_init: found ACPI RSDP signature at %p\n",
					pointer));
				rsdp = (acpi_rsdp*)pointer;
			}
		}
		
		if (rsdp != NULL && acpi_check_rsdt(rsdp) == B_OK) {
			delete_area(rsdpArea);
			break;
		}
		delete_area(rsdpArea);
	}

}
Exemple #16
0
static status_t
Rage128_GetBiosParameters(DeviceInfo& di)
{
	// Get the PLL parameters from the video BIOS, and if Mobility chips, also
	// get the LCD panel width & height and a few other related parameters.

	// In case mapping the ROM area fails or other error occurs, set default
	// values for the parameters which will be obtained from the BIOS ROM.
	// The default PLL parameters values probably will not work for all chips.
	// For example, reference freq can be 29.50MHz, 28.63MHz, or 14.32MHz.

	SharedInfo& si = *(di.sharedInfo);
	R128_PLLParams& pll = si.r128PLLParams;
	pll.reference_freq = 2950;
	pll.reference_div = 65;
	pll.min_pll_freq = 12500;
	pll.max_pll_freq = 25000;
	pll.xclk = 10300;

	si.panelX = 0;
	si.panelY = 0;
	si.panelPowerDelay = 1;

	// Map the ROM area.  The Rage128 chips do not assign a ROM address in the
	// PCI info;  thus, access the ROM via the ISA legacy memory map.

	uint8* romAddr;
	area_id romArea = map_physical_memory("ATI Rage128 ROM",
		0x000c0000,
		R128_BIOS_SIZE,
		B_ANY_KERNEL_ADDRESS,
		B_READ_AREA,
		(void**)&(romAddr));

	if (romArea < 0) {
		TRACE("Rage128_GetBiosParameters(), ROM mapping error: %ld\n", romArea);
		return romArea;		// ROM mapping failed; return error code
	}

	// Check if we got the BIOS signature (might fail on laptops..).

	if (BIOS8(0) != 0x55 || BIOS8(1) != 0xaa) {
		TRACE("Rage128_GetBiosParameters(), ROM does not contain BIOS signature\n");
		delete_area(romArea);
		return B_ERROR;
	}

	// Get the PLL values from the mapped ROM area.

	uint16 biosHeader = BIOS16(0x48);
	uint16 pllInfoBlock = BIOS16(biosHeader + 0x30);

	pll.reference_freq = BIOS16(pllInfoBlock + 0x0e);
	pll.reference_div = BIOS16(pllInfoBlock + 0x10);
	pll.min_pll_freq = BIOS32(pllInfoBlock + 0x12);
	pll.max_pll_freq = BIOS32(pllInfoBlock + 0x16);
	pll.xclk = BIOS16(pllInfoBlock + 0x08);

	TRACE("PLL parameters: rf=%d rd=%d min=%ld max=%ld; xclk=%d\n",
		pll.reference_freq, pll.reference_div, pll.min_pll_freq,
		pll.max_pll_freq, pll.xclk);

	// If Mobility chip, get the LCD panel width & height and a few other
	// related parameters.

	if (si.chipType == RAGE128_MOBILITY) {
		// There should be direct access to the start of the FP info table, but
		// until we find out where that offset is stored, we must search for
		// the ATI signature string: "M3      ".

		int i;
		for (i = 4; i < R128_BIOS_SIZE - 8; i++) {
			if (BIOS8(i) == 'M' &&
					BIOS8(i + 1) == '3' &&
					BIOS8(i + 2) == ' ' &&
					BIOS8(i + 3) == ' ' &&
					BIOS8(i + 4) == ' ' &&
					BIOS8(i + 5) == ' ' &&
					BIOS8(i + 6) == ' ' &&
					BIOS8(i + 7) == ' ') {
				int fpHeader = i - 2;

				// Assume that only one panel is attached and supported.

				for (i = fpHeader + 20; i < fpHeader + 84; i += 2) {
					if (BIOS16(i) != 0) {
						int fpStart = BIOS16(i);
						si.panelX = BIOS16(fpStart + 25);
						si.panelY = BIOS16(fpStart + 27);
						si.panelPowerDelay = BIOS8(fpStart + 56);
						TRACE("LCD Panel size: %dx%d  Panel type: 0x%x   power delay: %d\n",
							si.panelX, si.panelY, BIOS16(fpStart + 29),
							si.panelPowerDelay);
						break;
					}
				}

				break;
			}
		}
	}

	delete_area(romArea);

	return B_OK;
}
Exemple #17
0
static status_t
MapDevice(DeviceInfo& di)
{
	SharedInfo& si = *(di.sharedInfo);
	pci_info& pciInfo = di.pciInfo;

	// Enable memory mapped IO and bus master.

	SetPCI(pciInfo, PCI_command, 2, GetPCI(pciInfo, PCI_command, 2)
		| PCI_command_io | PCI_command_memory | PCI_command_master);

	// Map the video memory.

	phys_addr_t videoRamAddr = pciInfo.u.h0.base_registers[0];
	uint32 videoRamSize = pciInfo.u.h0.base_register_sizes[0];
	si.videoMemPCI = videoRamAddr;
	char frameBufferAreaName[] = "ATI frame buffer";

	si.videoMemArea = map_physical_memory(
		frameBufferAreaName,
		videoRamAddr,
		videoRamSize,
		B_ANY_KERNEL_BLOCK_ADDRESS | B_MTR_WC,
		B_READ_AREA + B_WRITE_AREA,
		(void**)&(si.videoMemAddr));

	if (si.videoMemArea < 0) {
		// Try to map this time without write combining.
		si.videoMemArea = map_physical_memory(
			frameBufferAreaName,
			videoRamAddr,
			videoRamSize,
			B_ANY_KERNEL_BLOCK_ADDRESS,
			B_READ_AREA + B_WRITE_AREA,
			(void**)&(si.videoMemAddr));
	}

	if (si.videoMemArea < 0)
		return si.videoMemArea;

	// Map the MMIO register area.

	phys_addr_t regsBase = pciInfo.u.h0.base_registers[2];
	uint32 regAreaSize = pciInfo.u.h0.base_register_sizes[2];

	// If the register area address or size is not in the PCI info, it should
	// be at the end of the video memory.  Check if it is there.

	if (MACH64_FAMILY(si.chipType) && (regsBase == 0 || regAreaSize == 0)) {
		uint32 regsOffset = 0x7ff000;	// offset to regs area in video memory
		uint32 regs = uint32(si.videoMemAddr) + regsOffset;
		uint32 chipInfo = *((vuint32*)(regs + M64_CONFIG_CHIP_ID));

		if (si.deviceID != (chipInfo & M64_CFG_CHIP_TYPE)) {
			// Register area not found;  delete any other areas that were
			// created.
			delete_area(si.videoMemArea);
			si.videoMemArea = -1;
			TRACE("Mach64 register area not found\n");
			return B_ERROR;
		}

		// Adjust params for creating register area below.

		regsBase = videoRamAddr + regsOffset;
		regAreaSize = 0x1000;
		TRACE("Register address is at end of frame buffer memory at 0x%lx\n",
			uint32(regsBase));
	}

	si.regsArea = map_physical_memory("ATI mmio registers",
		regsBase,
		regAreaSize,
		B_ANY_KERNEL_ADDRESS,
		0,		// neither read nor write, to hide it from user space apps
		(void**)&di.regs);

	// If there was an error, delete other areas.
	if (si.regsArea < 0) {
		delete_area(si.videoMemArea);
		si.videoMemArea = -1;
	}

	return si.regsArea;
}
Exemple #18
0
OHCI::OHCI(pci_info *info, Stack *stack)
	:	BusManager(stack),
		fPCIInfo(info),
		fStack(stack),
		fOperationalRegisters(NULL),
		fRegisterArea(-1),
		fHccaArea(-1),
		fHcca(NULL),
		fInterruptEndpoints(NULL),
		fDummyControl(NULL),
		fDummyBulk(NULL),
		fDummyIsochronous(NULL),
		fFirstTransfer(NULL),
		fLastTransfer(NULL),
		fFinishTransfersSem(-1),
		fFinishThread(-1),
		fStopFinishThread(false),
		fProcessingPipe(NULL),
		fRootHub(NULL),
		fRootHubAddress(0),
		fPortCount(0)
{
	if (!fInitOK) {
		TRACE_ERROR("bus manager failed to init\n");
		return;
	}

	TRACE("constructing new OHCI host controller driver\n");
	fInitOK = false;

	mutex_init(&fEndpointLock, "ohci endpoint lock");

	// enable busmaster and memory mapped access
	uint16 command = sPCIModule->read_pci_config(fPCIInfo->bus,
		fPCIInfo->device, fPCIInfo->function, PCI_command, 2);
	command &= ~PCI_command_io;
	command |= PCI_command_master | PCI_command_memory;

	sPCIModule->write_pci_config(fPCIInfo->bus, fPCIInfo->device,
		fPCIInfo->function, PCI_command, 2, command);

	// map the registers
	uint32 offset = sPCIModule->read_pci_config(fPCIInfo->bus,
		fPCIInfo->device, fPCIInfo->function, PCI_base_registers, 4);
	offset &= PCI_address_memory_32_mask;
	TRACE_ALWAYS("iospace offset: 0x%lx\n", offset);
	fRegisterArea = map_physical_memory("OHCI memory mapped registers",
		(void *)offset,	B_PAGE_SIZE, B_ANY_KERNEL_BLOCK_ADDRESS,
		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | B_WRITE_AREA,
		(void **)&fOperationalRegisters);
	if (fRegisterArea < B_OK) {
		TRACE_ERROR("failed to map register memory\n");
		return;
	}

	TRACE("mapped operational registers: %p\n", fOperationalRegisters);

	// Check the revision of the controller, which should be 10h
	uint32 revision = _ReadReg(OHCI_REVISION) & 0xff;
	TRACE("version %ld.%ld%s\n", OHCI_REVISION_HIGH(revision),
		OHCI_REVISION_LOW(revision), OHCI_REVISION_LEGACY(revision)
		? ", legacy support" : "");
	if (OHCI_REVISION_HIGH(revision) != 1 || OHCI_REVISION_LOW(revision) != 0) {
		TRACE_ERROR("unsupported OHCI revision\n");
		return;
	}

	void *hccaPhysicalAddress;
	fHccaArea = fStack->AllocateArea((void **)&fHcca, &hccaPhysicalAddress,
		sizeof(ohci_hcca), "USB OHCI Host Controller Communication Area");

	if (fHccaArea < B_OK) {
		TRACE_ERROR("unable to create the HCCA block area\n");
		return;
	}

	memset(fHcca, 0, sizeof(ohci_hcca));

	// Set Up Host controller
	// Dummy endpoints
	fDummyControl = _AllocateEndpoint();
	if (!fDummyControl)
		return;

	fDummyBulk = _AllocateEndpoint();
	if (!fDummyBulk) {
		_FreeEndpoint(fDummyControl);
		return;
	}

	fDummyIsochronous = _AllocateEndpoint();
	if (!fDummyIsochronous) {
		_FreeEndpoint(fDummyControl);
		_FreeEndpoint(fDummyBulk);
		return;
	}

	// Static endpoints that get linked in the HCCA
	fInterruptEndpoints = new(std::nothrow)
		ohci_endpoint_descriptor *[OHCI_STATIC_ENDPOINT_COUNT];
	if (!fInterruptEndpoints) {
		TRACE_ERROR("failed to allocate memory for interrupt endpoints\n");
		_FreeEndpoint(fDummyControl);
		_FreeEndpoint(fDummyBulk);
		_FreeEndpoint(fDummyIsochronous);
		return;
	}

	for (int32 i = 0; i < OHCI_STATIC_ENDPOINT_COUNT; i++) {
		fInterruptEndpoints[i] = _AllocateEndpoint();
		if (!fInterruptEndpoints[i]) {
			TRACE_ERROR("failed to allocate interrupt endpoint %ld", i);
			while (--i >= 0)
				_FreeEndpoint(fInterruptEndpoints[i]);
			_FreeEndpoint(fDummyBulk);
			_FreeEndpoint(fDummyControl);
			_FreeEndpoint(fDummyIsochronous);
			return;
		}
	}

	// build flat tree so that at each of the static interrupt endpoints
	// fInterruptEndpoints[i] == interrupt endpoint for interval 2^i
	uint32 interval = OHCI_BIGGEST_INTERVAL;
	uint32 intervalIndex = OHCI_STATIC_ENDPOINT_COUNT - 1;
	while (interval > 1) {
		uint32 insertIndex = interval / 2;
		while (insertIndex < OHCI_BIGGEST_INTERVAL) {
			fHcca->interrupt_table[insertIndex]
				= fInterruptEndpoints[intervalIndex]->physical_address;
			insertIndex += interval;
		}

		intervalIndex--;
		interval /= 2;
	}

	// setup the empty slot in the list and linking of all -> first
	fHcca->interrupt_table[0] = fInterruptEndpoints[0]->physical_address;
	for (int32 i = 1; i < OHCI_STATIC_ENDPOINT_COUNT; i++) {
		fInterruptEndpoints[i]->next_physical_endpoint
			= fInterruptEndpoints[0]->physical_address;
		fInterruptEndpoints[i]->next_logical_endpoint
			= fInterruptEndpoints[0];
	}

	// Now link the first endpoint to the isochronous endpoint
	fInterruptEndpoints[0]->next_physical_endpoint
		= fDummyIsochronous->physical_address;

	// Determine in what context we are running (Kindly copied from FreeBSD)
	uint32 control = _ReadReg(OHCI_CONTROL);
	if (control & OHCI_INTERRUPT_ROUTING) {
		TRACE_ALWAYS("smm is in control of the host controller\n");
		uint32 status = _ReadReg(OHCI_COMMAND_STATUS);
		_WriteReg(OHCI_COMMAND_STATUS, status | OHCI_OWNERSHIP_CHANGE_REQUEST);
		for (uint32 i = 0; i < 100 && (control & OHCI_INTERRUPT_ROUTING); i++) {
			snooze(1000);
			control = _ReadReg(OHCI_CONTROL);
		}

		if ((control & OHCI_INTERRUPT_ROUTING) != 0) {
			TRACE_ERROR("smm does not respond. resetting...\n");
			_WriteReg(OHCI_CONTROL, OHCI_HC_FUNCTIONAL_STATE_RESET);
			snooze(USB_DELAY_BUS_RESET);
		} else
			TRACE_ALWAYS("ownership change successful\n");
	} else {
		TRACE("cold started\n");
		snooze(USB_DELAY_BUS_RESET);
	}

	// This reset should not be necessary according to the OHCI spec, but
	// without it some controllers do not start.
	_WriteReg(OHCI_CONTROL, OHCI_HC_FUNCTIONAL_STATE_RESET);
	snooze(USB_DELAY_BUS_RESET);

	// We now own the host controller and the bus has been reset
	uint32 frameInterval = _ReadReg(OHCI_FRAME_INTERVAL);
	uint32 intervalValue = OHCI_GET_INTERVAL_VALUE(frameInterval);

	// Disable interrupts right before we reset
	_WriteReg(OHCI_COMMAND_STATUS, OHCI_HOST_CONTROLLER_RESET);
	// Nominal time for a reset is 10 us
	uint32 reset = 0;
	for (uint32 i = 0; i < 10; i++) {
		spin(10);
		reset = _ReadReg(OHCI_COMMAND_STATUS) & OHCI_HOST_CONTROLLER_RESET;
		if (reset == 0)
			break;
	}

	if (reset) {
		TRACE_ERROR("error resetting the host controller (timeout)\n");
		return;
	}

	// The controller is now in SUSPEND state, we have 2ms to go OPERATIONAL.
	// Interrupts are disabled.

	// Set up host controller register
	_WriteReg(OHCI_HCCA, (uint32)hccaPhysicalAddress);
	_WriteReg(OHCI_CONTROL_HEAD_ED, (uint32)fDummyControl->physical_address);
	_WriteReg(OHCI_BULK_HEAD_ED, (uint32)fDummyBulk->physical_address);
	// Disable all interrupts
	_WriteReg(OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTERRUPTS);
	// Switch on desired functional features
	control = _ReadReg(OHCI_CONTROL);
	control &= ~(OHCI_CONTROL_BULK_SERVICE_RATIO_MASK | OHCI_ENABLE_LIST
		| OHCI_HC_FUNCTIONAL_STATE_MASK | OHCI_INTERRUPT_ROUTING);
	control |= OHCI_ENABLE_LIST | OHCI_CONTROL_BULK_RATIO_1_4
		| OHCI_HC_FUNCTIONAL_STATE_OPERATIONAL;
	// And finally start the controller
	_WriteReg(OHCI_CONTROL, control);

	// The controller is now OPERATIONAL.
	frameInterval = (_ReadReg(OHCI_FRAME_INTERVAL) & OHCI_FRAME_INTERVAL_TOGGLE)
		^ OHCI_FRAME_INTERVAL_TOGGLE;
	frameInterval |= OHCI_FSMPS(intervalValue) | intervalValue;
	_WriteReg(OHCI_FRAME_INTERVAL, frameInterval);
	// 90% periodic
	uint32 periodic = OHCI_PERIODIC(intervalValue);
	_WriteReg(OHCI_PERIODIC_START, periodic);

	// Fiddle the No Over Current Protection bit to avoid chip bug
	uint32 desca = _ReadReg(OHCI_RH_DESCRIPTOR_A);
	_WriteReg(OHCI_RH_DESCRIPTOR_A, desca | OHCI_RH_NO_OVER_CURRENT_PROTECTION);
	_WriteReg(OHCI_RH_STATUS, OHCI_RH_LOCAL_POWER_STATUS_CHANGE);
	snooze(OHCI_ENABLE_POWER_DELAY);
	_WriteReg(OHCI_RH_DESCRIPTOR_A, desca);

	// The AMD756 requires a delay before re-reading the register,
	// otherwise it will occasionally report 0 ports.
	uint32 numberOfPorts = 0;
	for (uint32 i = 0; i < 10 && numberOfPorts == 0; i++) {
		snooze(OHCI_READ_DESC_DELAY);
		uint32 descriptor = _ReadReg(OHCI_RH_DESCRIPTOR_A);
		numberOfPorts = OHCI_RH_GET_PORT_COUNT(descriptor);
	}
	if (numberOfPorts > OHCI_MAX_PORT_COUNT)
		numberOfPorts = OHCI_MAX_PORT_COUNT;
	fPortCount = numberOfPorts;
	TRACE("port count is %d\n", fPortCount);

	// Create semaphore the finisher thread will wait for
	fFinishTransfersSem = create_sem(0, "OHCI Finish Transfers");
	if (fFinishTransfersSem < B_OK) {
		TRACE_ERROR("failed to create semaphore\n");
		return;
	}

	// Create the finisher service thread
	fFinishThread = spawn_kernel_thread(_FinishThread, "ohci finish thread",
		B_URGENT_DISPLAY_PRIORITY, (void *)this);
	resume_thread(fFinishThread);

	// Install the interrupt handler
	TRACE("installing interrupt handler\n");
	install_io_interrupt_handler(fPCIInfo->u.h0.interrupt_line,
		_InterruptHandler, (void *)this, 0);

	// Enable interesting interrupts now that the handler is in place
	_WriteReg(OHCI_INTERRUPT_ENABLE, OHCI_NORMAL_INTERRUPTS
		| OHCI_MASTER_INTERRUPT_ENABLE);

	TRACE("OHCI host controller driver constructed\n");
	fInitOK = true;
}
Exemple #19
0
static status_t
Mach64_GetBiosParameters(DeviceInfo& di, uint8& clockType)
{
	// Get some clock parameters from the video BIOS, and if Mobility chip,
	// also get the LCD panel width & height.

	// In case mapping the ROM area fails or other error occurs, set default
	// values for the parameters which will be obtained from the BIOS ROM.

	clockType = M64_CLOCK_INTERNAL;

	SharedInfo& si = *(di.sharedInfo);
	M64_Params& params = si.m64Params;
	params.clockNumberToProgram = 3;

	si.panelX = 0;
	si.panelY = 0;

	// Map the ROM area.  The Mach64 chips do not assign a ROM address in the
	// PCI info;  thus, access the ROM via the ISA legacy memory map.

	uint8* romAddr;
	area_id romArea = map_physical_memory("ATI Mach64 ROM",
		0x000c0000,
		M64_BIOS_SIZE,
		B_ANY_KERNEL_ADDRESS,
		B_READ_AREA,
		(void**)&(romAddr));

	if (romArea < 0) {
		TRACE("Mach64_GetBiosParameters(), ROM mapping error: %ld\n", romArea);
		return romArea;		// ROM mapping failed; return error code
	}

	// Check if we have the BIOS signature (might fail on laptops..).

	if (BIOS8(0) != 0x55 || BIOS8(1) != 0xaa) {
		TRACE("Mach64_GetBiosParameters(), ROM does not contain BIOS signature\n");
		delete_area(romArea);
		return B_ERROR;
	}

	// Get clock info from BIOS.

	uint32 romTable = BIOS16(0x48);
	uint32 clockTable = BIOS16(romTable + 16);
	clockType = BIOS8(clockTable);
	params.clockNumberToProgram = BIOS8(clockTable + 6);
	params.maxPixelClock = BIOS16(clockTable + 4) * 10;
	params.refFreq = BIOS16(clockTable + 8);
	params.refDivider = BIOS16(clockTable + 10);

	// If Mobility chip, get the LCD panel width & height.

	if (si.chipType == MACH64_MOBILITY) {
		uint32 lcdTable = BIOS16(0x78);
		if (BIOS32(lcdTable) == 0x544d5224) {	// is LCD table signature correct?
			uint32 lcdPanelInfo = BIOS16(lcdTable + 10);
			si.panelX = BIOS16(lcdPanelInfo + 25);
			si.panelY = BIOS16(lcdPanelInfo + 27);
			TRACE("Mobility LCD Panel size: %dx%d\n", si.panelX, si.panelY);
		} else {
			TRACE("Mobility LCD table signature 0x%x in BIOS is incorrect\n",
				 BIOS32(lcdTable));
		}
	}

	delete_area(romArea);

	return B_OK;
}
Exemple #20
0
struct  Thread *create_thread(struct Process *process, size_t entry_point, size_t params) {
	lock_interrupts();

	struct Thread *thread = malloc(sizeof(struct Thread));
	if(thread == 0)
		return 0; /* out of memory */

	/* set up the stack - grab a virtual page */	
	thread->pml4 = process ? process->pml4 : kernel_pml4;
	size_t virt_page = find_free_page_range(thread->pml4, 1);
	if(virt_page == 0) {
		free(thread); /* out of memory */
		unlock_interrupts();
		return 0;
	}

	/* grab a physical page */
	size_t phys = get_physical_page();
	if(phys == 0) {
		free(thread); /* out of memory */
		unlock_interrupts();
		return 0;
	}


	/* map the new stack */
	map_physical_page(thread->pml4, virt_page, phys);

	/* now map this page for us to play with */
	size_t temp_addr = (size_t)map_physical_memory(phys, 0);

	/* set up our initial registers */
	struct isr_regs *regs = (struct isr_regs *)(temp_addr + page_size - sizeof(struct isr_regs));
	regs->r15 = 0; regs->r14 = 0; regs->r13 = 0; regs->r12 = 0; regs->r11 = 0; regs->r10 = 0; regs->r9 = 0; regs->r8 = 0;
	regs->rbp = virt_page + page_size; regs->rdi = params; regs->rsi = 0; regs->rdx = 0; regs->rcx = 0; regs->rbx = 0; regs->rax = 0;
	regs->int_no = 0; regs->err_code = 0;
	regs->rip = entry_point; regs->cs = 0x08;
	regs->eflags = 
		((!process) ? ((1 << 12) | (1 << 13)) : 0) | /* set iopl bits for kernel threads */
		(1 << 9) | /* interrupts enabled */
		(1 << 21) /* can use CPUID */; 
	regs->usersp = virt_page + page_size; regs->ss = 0x10;

	/* set up the thread object */
	thread->process = process;
	thread->stack = virt_page;
	thread->registers = (struct isr_regs *)(virt_page + page_size - sizeof(struct isr_regs));
	thread->id = next_thread_id;
	next_thread_id++;
	thread->awake = false;
	thread->awake_in_process = false;
	thread->time_slices = 0;
	/*print_string("Virtual page: ");
	print_hex(virt_page);
	asm("hlt");*/

	/* add it to the linked list of threads */
	thread->previous = 0;
	if(!process) {
		if(kernel_threads)
			kernel_threads->previous = thread;
		thread->next = kernel_threads;
		kernel_threads = thread;
	} else {
		if(process->threads)
			process->threads->previous = thread;
		thread->next = process->threads;
		process->threads = thread;
		process->threads_count++;
	}

	/* populate the fpu registers with something */
	memset(thread->fpu_registers, 0, 512);

	/* initially asleep */
	thread->next_awake = 0;
	thread->previous = 0;

	unlock_interrupts();

	return thread;
}
status_t init_driver()
{
    LogFlowFunc(("init_driver\n"));

    gLock.Init("VBoxVideo driver lock");

    uint32 pciIndex = 0;

    while (gPCI->get_nth_pci_info(pciIndex, &gDeviceInfo.pciInfo) == B_OK)
    {
        if (gDeviceInfo.pciInfo.vendor_id == VENDOR_ID && gDeviceInfo.pciInfo.device_id == DEVICE_ID)
        {
            sprintf(gDeviceInfo.name, "graphics/" DEVICE_FORMAT,
                    gDeviceInfo.pciInfo.vendor_id, gDeviceInfo.pciInfo.device_id,
                    gDeviceInfo.pciInfo.bus, gDeviceInfo.pciInfo.device, gDeviceInfo.pciInfo.function);
            TRACE("found device %s\n", gDeviceInfo.name);

            gCanHasDevice = true;
            gDeviceInfo.openCount = 0;

            size_t sharedSize = (sizeof(SharedInfo) + 7) & ~7;
            gDeviceInfo.sharedArea = create_area("vboxvideo shared info",
                                                 (void **)&gDeviceInfo.sharedInfo, B_ANY_KERNEL_ADDRESS,
                                                 ROUND_TO_PAGE_SIZE(sharedSize), B_FULL_LOCK,
                                                 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_USER_CLONEABLE_AREA);

            uint16_t width, height, vwidth, bpp, flags;
            VBoxVideoGetModeRegisters(&width, &height, &vwidth, &bpp, &flags);

            gDeviceInfo.sharedInfo->currentMode.space = get_color_space_for_depth(bpp);
            gDeviceInfo.sharedInfo->currentMode.virtual_width = width;
            gDeviceInfo.sharedInfo->currentMode.virtual_height = height;
            gDeviceInfo.sharedInfo->currentMode.h_display_start = 0;
            gDeviceInfo.sharedInfo->currentMode.v_display_start = 0;
            gDeviceInfo.sharedInfo->currentMode.flags = 0;
            gDeviceInfo.sharedInfo->currentMode.timing.h_display = width;
            gDeviceInfo.sharedInfo->currentMode.timing.v_display = height;
            /* Not used, but this makes a reasonable-sounding refresh rate show in screen prefs: */
            gDeviceInfo.sharedInfo->currentMode.timing.h_total = 1000;
            gDeviceInfo.sharedInfo->currentMode.timing.v_total = 1;
            gDeviceInfo.sharedInfo->currentMode.timing.pixel_clock = 850;

            /* Map the PCI memory space */
            uint32 command_reg = gPCI->read_pci_config(gDeviceInfo.pciInfo.bus,
                                                       gDeviceInfo.pciInfo.device, gDeviceInfo.pciInfo.function,  PCI_command, 2);
            command_reg |= PCI_command_io | PCI_command_memory | PCI_command_master;
            gPCI->write_pci_config(gDeviceInfo.pciInfo.bus, gDeviceInfo.pciInfo.device,
                                   gDeviceInfo.pciInfo.function, PCI_command, 2, command_reg);

            gDeviceInfo.sharedInfo->framebufferArea = map_physical_memory("vboxvideo framebuffer",
                                                      (phys_addr_t)gDeviceInfo.pciInfo.u.h0.base_registers[0],
                                                      gDeviceInfo.pciInfo.u.h0.base_register_sizes[0], B_ANY_KERNEL_BLOCK_ADDRESS,
                                                      B_READ_AREA | B_WRITE_AREA, &(gDeviceInfo.sharedInfo->framebuffer));
            vm_set_area_memory_type(gDeviceInfo.sharedInfo->framebufferArea,
                                    (phys_addr_t)gDeviceInfo.pciInfo.u.h0.base_registers[0], B_MTR_WC);
            break;
        }

        pciIndex++;
    }

    return B_OK;
}
Exemple #22
0
static status_t
openpic_init_driver(device_node *node, void **cookie)
{
	// OK, this module is broken for now. But it compiles.
	return B_ERROR;
	openpic_info *info = new(nothrow) openpic_info;
	if (!info)
		return B_NO_MEMORY;
	ObjectDeleter<openpic_info> infoDeleter(info);

	info->node = node;

	// get interface to PCI device
	void *aCookie;
	void *anotherCookie; // possibly the same cookie.
	driver_module_info *driver;
	status_t status = sDeviceManager->get_driver(sDeviceManager->get_parent_node(node),
												 &driver, &aCookie);
	if (status != B_OK)
		return status;

	driver->init_driver(node, &anotherCookie);

	/* status = sDeviceManager->init_driver(
		sDeviceManager->get_parent(node), NULL,
		(driver_module_info**)&info->pci, (void**)&info->device);
		if (status != B_OK)
		return status; */

	// get the pci info for the device
	pci_info pciInfo;
	info->pci->get_pci_info(info->device, &pciInfo);

	// find supported device info
	info->supported_device = openpic_check_supported_device(pciInfo.vendor_id,
		pciInfo.device_id);
	if (!info->supported_device) {
		dprintf("openpic: device (0x%04hx:0x%04hx) not supported\n",
			pciInfo.vendor_id, pciInfo.device_id);
		return B_ERROR;
	}
	dprintf("openpic: found supported device: %s (0x%04hx:0x%04hx)\n",
		info->supported_device->name, pciInfo.vendor_id, pciInfo.device_id);

	// get register space
	addr_t physicalRegisterBase = pciInfo.u.h0.base_registers[0];
	uint32 registerSpaceSize = pciInfo.u.h0.base_register_sizes[0];
	if (registerSpaceSize < info->supported_device->register_offset
		|| registerSpaceSize - info->supported_device->register_offset
			< OPENPIC_MIN_REGISTER_SPACE_SIZE) {
		dprintf("openpic: register space too small\n");
	}
	physicalRegisterBase += info->supported_device->register_offset;
	registerSpaceSize -= info->supported_device->register_offset;
	if (registerSpaceSize > OPENPIC_MAX_REGISTER_SPACE_SIZE)
		registerSpaceSize = OPENPIC_MAX_REGISTER_SPACE_SIZE;
	
	// map register space
	void *virtualRegisterBase = NULL;
	area_id registerArea = map_physical_memory("openpic registers",
		(void*)physicalRegisterBase, registerSpaceSize, B_ANY_KERNEL_ADDRESS,
		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, &virtualRegisterBase);
	if (registerArea < 0)
		return info->register_area;

	info->physical_registers = physicalRegisterBase;
	info->register_space_size = registerSpaceSize;
	info->register_area = registerArea;
	info->virtual_registers = (addr_t)virtualRegisterBase;

	// init the controller
	status = openpic_init(info);
	if (status != B_OK)
		return status;

	// keep the info
	infoDeleter.Detach();
	*cookie = info;

	dprintf("openpic_init_driver(): Successfully initialized!\n");

	return B_OK;
}
Exemple #23
0
static status_t VBoxGuestHaikuAttach(const pci_info *pDevice)
{
    status_t status;
    int rc = VINF_SUCCESS;
    int iResId = 0;
    struct VBoxGuestDeviceState *pState = &sState;
    static const char *const     s_apszGroups[] = VBOX_LOGGROUP_NAMES;
    PRTLOGGER                    pRelLogger;

    AssertReturn(pDevice, B_BAD_VALUE);

    cUsers = 0;

    /*
     * Initialize IPRT R0 driver, which internally calls OS-specific r0 init.
     */
    rc = RTR0Init(0);
    if (RT_FAILURE(rc))
    {
        /** @todo r=ramshankar: use dprintf here. */
        LogFunc(("RTR0Init failed.\n"));
        return ENXIO;
    }

    rc = RTSpinlockCreate(&g_Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestHaiku");
    if (RT_FAILURE(rc))
    {
        LogRel(("VBoxGuestHaikuAttach: RTSpinlock create failed. rc=%Rrc\n", rc));
        return ENXIO;
    }

#ifdef DO_LOG
    /*
     * Create the release log.
     * (We do that here instead of common code because we want to log
     * early failures using the LogRel macro.)
     */
    rc = RTLogCreate(&pRelLogger, 0 | RTLOGFLAGS_PREFIX_THREAD /* fFlags */, "all",
                     "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
                     RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER | RTLOGDEST_USER, NULL);
    dprintf(MODULE_NAME ": RTLogCreate: %d\n", rc);
    if (RT_SUCCESS(rc))
    {
        //RTLogGroupSettings(pRelLogger, g_szLogGrp);
        //RTLogFlags(pRelLogger, g_szLogFlags);
        //RTLogDestinations(pRelLogger, "/var/log/vboxguest.log");
        RTLogRelSetDefaultInstance(pRelLogger);
        RTLogSetDefaultInstance(pRelLogger); //XXX
    }
#endif

    /*
     * Allocate I/O port resource.
     */
    pState->uIOPortBase = pDevice->u.h0.base_registers[0];
    /* @todo check flags for IO? */
    if (pState->uIOPortBase)
    {
        /*
         * Map the MMIO region.
         */
        uint32 phys = pDevice->u.h0.base_registers[1];
        /* @todo Check flags for mem? */
        pState->VMMDevMemSize    = pDevice->u.h0.base_register_sizes[1];
        pState->iVMMDevMemAreaId = map_physical_memory("VirtualBox Guest MMIO", phys, pState->VMMDevMemSize,
                                                       B_ANY_KERNEL_BLOCK_ADDRESS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
                                                       &pState->pMMIOBase);
        if (pState->iVMMDevMemAreaId > 0 && pState->pMMIOBase)
        {
            /*
             * Call the common device extension initializer.
             */
            rc = VBoxGuestInitDevExt(&g_DevExt, pState->uIOPortBase, pState->pMMIOBase, pState->VMMDevMemSize,
#if ARCH_BITS == 64
                                     VBOXOSTYPE_Haiku_x64,
#else
                                     VBOXOSTYPE_Haiku,
#endif
                                     VMMDEV_EVENT_MOUSE_POSITION_CHANGED);
            if (RT_SUCCESS(rc))
            {
                /*
                 * Add IRQ of VMMDev.
                 */
                pState->iIrqResId = pDevice->u.h0.interrupt_line;
                rc = VBoxGuestHaikuAddIRQ(pState);
                if (RT_SUCCESS(rc))
                {
                    LogRel((MODULE_NAME ": loaded successfully\n"));
                    return B_OK;
                }

                LogRel((MODULE_NAME ":VBoxGuestInitDevExt failed.\n"));
                VBoxGuestDeleteDevExt(&g_DevExt);
            }
            else
                LogRel((MODULE_NAME ":VBoxGuestHaikuAddIRQ failed.\n"));
        }
        else
            LogRel((MODULE_NAME ":MMIO region setup failed.\n"));
    }
    else
        LogRel((MODULE_NAME ":IOport setup failed.\n"));

    RTR0Term();
    return ENXIO;
}
Exemple #24
0
/*
** Allocate the actual memory for the cardinfo object
*/
static BusLogic *create_cardinfo(int num, int iobase, int irq)
{
    uchar *a;
    area_id aid;
    int i;
    physical_entry entries[5];
    char name[9] = { 'b', 'l', '_', 'c', 'c', 'b', '0', '0', 0 };

    BusLogic *bl = (BusLogic *) malloc(sizeof(BusLogic));

#ifndef __INTEL__
    i = map_physical_memory("bl_regs",(void*) iobase,  4096,
                            B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA, &a);
    iobase = (uint32) a;
    if(i < 0) {
        dprintf("buslogic: can't map registers...\n");
    }
#endif

    bl->id = num;
    bl->iobase = iobase;
    bl->irq = irq;
    bl->out_nextbox = bl->in_nextbox = 0;

    /* create our 20k workspace.  First 4k goes to 510 mailboxes.
    ** remaining 16k is used for up to 256 CCB32's
    */
    a = NULL;

#if BOOT
    /* life in the bootstrap is a bit different... */
    /* can't be sure of getting contig pages -- scale
       stuff down so we can live in just one page */
    bl->box_count = 4;
    if(!(a = malloc(4096*2))) {
        free(bl);
        return NULL;
    }
    a = (uchar *) ((((uint32) a) & 0xFFFFF000) + 0x1000);
    get_memory_map(a, 4096, entries, 2);
#else
    bl->box_count = MAX_CCB_COUNT;
    aid = create_area("bl_workspace", (void **)&a, B_ANY_KERNEL_ADDRESS, 4096*5,
                      B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
    if(aid == B_ERROR || aid == B_BAD_VALUE || aid == B_NO_MEMORY) {
        free(bl);
        return NULL;
    }
    get_memory_map(a, 4096*5, entries, 2);
#endif

    /* figure virtual <-> physical translations */
    bl->phys_to_virt = ((uint) a) - ((uint) entries[0].address);
    bl->virt_to_phys = (((uint) entries[0].address - (uint) a));
    bl->phys_mailboxes = (uint) entries[0].address;

    /* initialize all mailboxes to empty */
    bl->out_boxes = (BL_Out_Mailbox32 *) a;
    bl->in_boxes = (BL_In_Mailbox32 *) (a + (8 * bl->box_count));
    for(i=0; i<bl->box_count; i++) {
        bl->out_boxes[i].action_code = BL_ActionCode_NotInUse;
        bl->in_boxes[i].completion_code = BL_CompletionCode_NotInUse;
    }

    /* setup the CCB32 cache */
#if BOOT
    bl->ccb = (BL_CCB32 *) (((uchar *)a) + 1024);
#else
    bl->ccb = (BL_CCB32 *) (((uchar *)a) + 4096);
#endif
    bl->first_ccb = NULL;
    for(i=0; i<bl->box_count; i++) {
        name[6] = hextab[(i & 0xF0) >> 4];
        name[7] = hextab[i & 0x0F];
        bl->ccb[i].done = create_sem(0, name);
        bl->ccb[i].next = bl->first_ccb;
        bl->first_ccb = &(bl->ccb[i]);
    }

    bl->hw_lock = create_sem(1, "bl_hw_lock");
    bl->ccb_lock = create_sem(1, "bl_ccb_lock");
    bl->ccb_count = create_sem(MAX_CCB_COUNT, "bl_ccb_count");
    bl->reqid = 0;

    return bl;
}
static status_t
MapDevice(DeviceInfo& di)
{
	char areaName[B_OS_NAME_LENGTH];
	SharedInfo& si = *(di.sharedInfo);
	pci_info& pciInfo = di.pciInfo;

	TRACE("enter MapDevice()\n");

	// Enable memory mapped IO and bus master.

	SetPCI(pciInfo, PCI_command, 2, GetPCI(pciInfo, PCI_command, 2)
		| PCI_command_io | PCI_command_memory | PCI_command_master);

	const uint32 SavageMmioRegBaseOld	= 0x1000000;	// 16 MB
	const uint32 SavageMmioRegBaseNew	= 0x0000000;
	const uint32 SavageMmioRegSize		= 0x0080000;	// 512 KB reg area size

	const uint32 VirgeMmioRegBase		= 0x1000000;	// 16 MB
	const uint32 VirgeMmioRegSize		= 0x10000;		// 64 KB reg area size

	uint32 videoRamAddr = 0;
	uint32 videoRamSize = 0;
	uint32 regsBase = 0;
	uint32 regAreaSize = 0;

	// Since we do not know at this point the actual size of the video
	// memory, set it to the largest value that the respective chipset
	// family can have.

	if (S3_SAVAGE_FAMILY(di.pChipInfo->chipType)) {
		if (S3_SAVAGE_3D_SERIES(di.pChipInfo->chipType)) {
			// Savage 3D & Savage MX chips.

			regsBase = pciInfo.u.h0.base_registers[0] + SavageMmioRegBaseOld;
			regAreaSize = SavageMmioRegSize;

	 		videoRamAddr = pciInfo.u.h0.base_registers[0];
			videoRamSize = 16 * 1024 * 1024;	// 16 MB is max for 3D series
			si.videoMemPCI = (void *)(pciInfo.u.h0.base_registers_pci[0]);
		} else {
			// All other Savage chips.

			regsBase = pciInfo.u.h0.base_registers[0] + SavageMmioRegBaseNew;
			regAreaSize = SavageMmioRegSize;

			videoRamAddr = pciInfo.u.h0.base_registers[1];
			videoRamSize = pciInfo.u.h0.base_register_sizes[1];
			si.videoMemPCI = (void *)(pciInfo.u.h0.base_registers_pci[1]);
		}
	} else {
		// Trio/Virge chips.

		regsBase = pciInfo.u.h0.base_registers[0] + VirgeMmioRegBase;
		regAreaSize = VirgeMmioRegSize;

 		videoRamAddr = pciInfo.u.h0.base_registers[0];
		videoRamSize = 8 * 1024 * 1024;	// 8 MB is max for Trio/Virge chips
		si.videoMemPCI = (void *)(pciInfo.u.h0.base_registers_pci[0]);
	}

	// Map the MMIO register area.

	sprintf(areaName, DEVICE_FORMAT " regs",
		pciInfo.vendor_id, pciInfo.device_id,
		pciInfo.bus, pciInfo.device, pciInfo.function);

	si.regsArea = map_physical_memory(areaName, regsBase, regAreaSize,
		B_ANY_KERNEL_ADDRESS,
		0,		// neither read nor write, to hide it from user space apps
		(void**)(&(di.regs)));

	if (si.regsArea < 0)
		return si.regsArea;	// return error code

	// Map the video memory.

	sprintf(areaName, DEVICE_FORMAT " framebuffer",
		pciInfo.vendor_id, pciInfo.device_id,
		pciInfo.bus, pciInfo.device, pciInfo.function);

	si.videoMemArea = map_physical_memory(
		areaName,
		videoRamAddr,
		videoRamSize,
		B_ANY_KERNEL_BLOCK_ADDRESS | B_MTR_WC,
		B_READ_AREA + B_WRITE_AREA,
		&(si.videoMemAddr));

	if (si.videoMemArea < 0) {
		// Try to map this time without write combining.
		si.videoMemArea = map_physical_memory(
			areaName,
			videoRamAddr,
			videoRamSize,
			B_ANY_KERNEL_BLOCK_ADDRESS,
			B_READ_AREA + B_WRITE_AREA,
			&(si.videoMemAddr));
	}

	TRACE("Video memory, area: %ld,  addr: 0x%lX\n", si.videoMemArea, (uint32)(si.videoMemAddr));

	// If there was an error, delete other areas.
	if (si.videoMemArea < 0) {
		delete_area(si.regsArea);
		si.regsArea = -1;
	}

	TRACE("leave MapDevice(); result: %ld\n", si.videoMemArea);
	return si.videoMemArea;
}
Exemple #26
0
acpi_descriptor_header*
acpi_find_table_generic(const char* signature, acpi_descriptor_header* acpiSdt)
{
	if (acpiSdt == NULL)
		return NULL;

	if (sNumEntries == -1) {
		// if using the xsdt, our entries are 64 bits wide.
		sNumEntries = (acpiSdt->length
			- sizeof(acpi_descriptor_header))
				/ sizeof(PointerType);
	}

	if (sNumEntries <= 0) {
		TRACE(("acpi: root system description table is empty\n"));
		return NULL;
	}

	TRACE(("acpi: searching %ld entries for table '%.4s'\n", sNumEntries,
		signature));

	PointerType* pointer = (PointerType*)((uint8*)acpiSdt
		+ sizeof(acpi_descriptor_header));

	acpi_descriptor_header* header = NULL;
	area_id headerArea = -1;
	for (int32 j = 0; j < sNumEntries; j++, pointer++) {
		headerArea = map_physical_memory("acpi header", (uint32)*pointer,
				sizeof(acpi_descriptor_header), B_ANY_KERNEL_ADDRESS, 
			B_KERNEL_READ_AREA, (void **)&header);

		if (header == NULL
			|| strncmp(header->signature, signature, 4) != 0) {
			// not interesting for us
			TRACE(("acpi: Looking for '%.4s'. Skipping '%.4s'\n",
				signature, header != NULL ? header->signature : "null"));

			if (header != NULL) {
				delete_area(headerArea);
				header = NULL;
			}

			continue;
		}

		TRACE(("acpi: Found '%.4s' @ %p\n", signature, pointer));
		break;
	}


	if (header == NULL)
		return NULL;

	// Map the whole table, not just the header
	uint32 length = header->length;
	delete_area(headerArea);

	headerArea = map_physical_memory("acpi table",
		(uint32)*pointer, length, B_ANY_KERNEL_ADDRESS, 
			B_KERNEL_READ_AREA, (void **)&header);
	return header;
}
Exemple #27
0
static status_t map_device(device_info *di)
{
	char buffer[B_OS_NAME_LENGTH]; /*memory for device name*/
	shared_info *si = di->si;
	uint32	tmpUlong;
	pci_info *pcii = &(di->pcii);
	system_info sysinfo;

	/*storage for the physical to virtual table (used for dma buffer)*/
//	physical_entry physical_memory[2];
//	#define G400_DMA_BUFFER_SIZE 1024*1024

	/* variables for making copy of ROM */
	uint8* rom_temp;
	area_id rom_area;

	/* Nvidia cards have registers in [0] and framebuffer in [1] */
	int registers = 1;
	int frame_buffer = 0;
//	int pseudo_dma = 2;

	/* enable memory mapped IO, disable VGA I/O - this is defined in the PCI standard */
	tmpUlong = get_pci(PCI_command, 2);
	/* enable PCI access */
	tmpUlong |= PCI_command_memory;
	/* enable busmastering */
	tmpUlong |= PCI_command_master;
	/* disable ISA I/O access */
	tmpUlong &= ~PCI_command_io;
	set_pci(PCI_command, 2, tmpUlong);

 	/*work out which version of BeOS is running*/
 	get_system_info(&sysinfo);
 	if (0)//sysinfo.kernel_build_date[0]=='J')/*FIXME - better ID version*/
 	{
 		si->use_clone_bugfix = 1;
 	}
 	else
 	{
 		si->use_clone_bugfix = 0;
 	}

	/* work out a name for the register mapping */
	sprintf(buffer, DEVICE_FORMAT " regs",
		di->pcii.vendor_id, di->pcii.device_id,
		di->pcii.bus, di->pcii.device, di->pcii.function);

	/* get a virtual memory address for the registers*/
	si->regs_area = map_physical_memory(
		buffer,
		/* WARNING: Nvidia needs to map regs as viewed from PCI space! */
		di->pcii.u.h0.base_registers_pci[registers],
		di->pcii.u.h0.base_register_sizes[registers],
		B_ANY_KERNEL_ADDRESS,
 		(si->use_clone_bugfix ? B_READ_AREA|B_WRITE_AREA : 0),
		(void **)&(di->regs));
 	si->clone_bugfix_regs = (uint32 *) di->regs;

	/* if mapping registers to vmem failed then pass on error */
	if (si->regs_area < 0) return si->regs_area;

	/* work out a name for the ROM mapping*/
	sprintf(buffer, DEVICE_FORMAT " rom",
		di->pcii.vendor_id, di->pcii.device_id,
		di->pcii.bus, di->pcii.device, di->pcii.function);

	/* disable ROM shadowing, we want the guaranteed exact contents of the chip */
	/* warning:
	 * don't touch: (confirmed) NV04, NV05, NV05-M64, NV11 all shutoff otherwise.
	 * NV18, NV28 and NV34 keep working.
	 * confirmed NV28 and NV34 to use upper part of shadowed ROM for scratch purposes,
	 * however the actual ROM content (so the used part) is intact (confirmed). */
	//set_pci(ENCFG_ROMSHADOW, 4, 0);

	/* get ROM memory mapped base adress - this is defined in the PCI standard */
	tmpUlong = get_pci(PCI_rom_base, 4);
	if (tmpUlong)
	{
		/* ROM was assigned an adress, so enable ROM decoding - see PCI standard */
		tmpUlong |= 0x00000001;
		set_pci(PCI_rom_base, 4, tmpUlong);

		rom_area = map_physical_memory(
			buffer,
			(void *)di->pcii.u.h0.rom_base_pci,
			di->pcii.u.h0.rom_size,
			B_ANY_KERNEL_ADDRESS,
			B_READ_AREA,
			(void **)&(rom_temp)
		);

		/* check if we got the BIOS signature (might fail on laptops..) */
		if (rom_temp[0]!=0x55 || rom_temp[1]!=0xaa)
		{
			/* apparantly no ROM is mapped here */
			delete_area(rom_area);
			rom_area = -1;
			/* force using ISA legacy map as fall-back */
			tmpUlong = 0x00000000;
		}
	}

	if (!tmpUlong)
	{
		/* ROM was not assigned an adress, fetch it from ISA legacy memory map! */
		rom_area = map_physical_memory(
			buffer,
			(void *)0x000c0000,
			65536,
			B_ANY_KERNEL_ADDRESS,
			B_READ_AREA,
			(void **)&(rom_temp)
		);
	}

	/* if mapping ROM to vmem failed then clean up and pass on error */
	if (rom_area < 0) {
		delete_area(si->regs_area);
		si->regs_area = -1;
		return rom_area;
	}

	/* dump ROM to file if selected in skel.settings
	 * (ROM always fits in 64Kb: checked TNT1 - FX5950) */
	if (current_settings.dumprom) dumprom (rom_temp, 65536);
	/* make a copy of ROM for future reference */
	memcpy (si->rom_mirror, rom_temp, 65536);

	/* disable ROM decoding - this is defined in the PCI standard, and delete the area */
	tmpUlong = get_pci(PCI_rom_base, 4);
	tmpUlong &= 0xfffffffe;
	set_pci(PCI_rom_base, 4, tmpUlong);
	delete_area(rom_area);

	/* work out a name for the framebuffer mapping*/
	sprintf(buffer, DEVICE_FORMAT " framebuffer",
		di->pcii.vendor_id, di->pcii.device_id,
		di->pcii.bus, di->pcii.device, di->pcii.function);

	/* map the framebuffer into vmem, using Write Combining*/
	si->fb_area = map_physical_memory(
		buffer,
		/* WARNING: Nvidia needs to map framebuffer as viewed from PCI space! */
		(void *) di->pcii.u.h0.base_registers_pci[frame_buffer],
		di->pcii.u.h0.base_register_sizes[frame_buffer],
		B_ANY_KERNEL_BLOCK_ADDRESS | B_MTR_WC,
		B_READ_AREA + B_WRITE_AREA,
		&(si->framebuffer));

	/*if failed with write combining try again without*/
	if (si->fb_area < 0) {
		si->fb_area = map_physical_memory(
			buffer,
			/* WARNING: Nvidia needs to map framebuffer as viewed from PCI space! */
			(void *) di->pcii.u.h0.base_registers_pci[frame_buffer],
			di->pcii.u.h0.base_register_sizes[frame_buffer],
			B_ANY_KERNEL_BLOCK_ADDRESS,
			B_READ_AREA + B_WRITE_AREA,
			&(si->framebuffer));
	}

	/* if there was an error, delete our other areas and pass on error*/
	if (si->fb_area < 0)
	{
		delete_area(si->regs_area);
		si->regs_area = -1;
		return si->fb_area;
	}
//fixme: retest for card coldstart and PCI/virt_mem mapping!!
	/* remember the DMA address of the frame buffer for BDirectWindow?? purposes */
	si->framebuffer_pci = (void *) di->pcii.u.h0.base_registers_pci[frame_buffer];

	// remember settings for use here and in accelerant
	si->settings = current_settings;

	/* in any case, return the result */
	return si->fb_area;
}
Exemple #28
0
static status_t
open_hook(const char* name, uint32 flags, void** cookie)
{
	int32 index = 0;
	device_info *di;
	shared_info *si;
	thread_id	thid;
	thread_info	thinfo;
	status_t	result = B_OK;
	char shared_name[B_OS_NAME_LENGTH];
	physical_entry map[1];
	size_t net_buf_size;
	void *unaligned_dma_buffer;

	/* find the device name in the list of devices */
	/* we're never passed a name we didn't publish */
	while (pd->device_names[index]
		&& (strcmp(name, pd->device_names[index]) != 0))
		index++;

	/* for convienience */
	di = &(pd->di[index]);

	/* make sure no one else has write access to the common data */
	AQUIRE_BEN(pd->kernel);

	/* if it's already open for writing */
	if (di->is_open) {
		/* mark it open another time */
		goto mark_as_open;
	}
	/* create the shared_info area */
	sprintf(shared_name, DEVICE_FORMAT " shared",
		di->pcii.vendor_id, di->pcii.device_id,
		di->pcii.bus, di->pcii.device, di->pcii.function);
	/* create this area with NO user-space read or write permissions, to prevent accidental damage */
	di->shared_area = create_area(shared_name, (void **)&(di->si), B_ANY_KERNEL_ADDRESS,
		((sizeof(shared_info) + (B_PAGE_SIZE - 1)) & ~(B_PAGE_SIZE - 1)), B_FULL_LOCK,
		B_USER_CLONEABLE_AREA);
	if (di->shared_area < 0) {
		/* return the error */
		result = di->shared_area;
		goto done;
	}

	/* save a few dereferences */
	si = di->si;

	/* create the DMA command buffer area */
	//fixme? for R4.5 a workaround for cloning would be needed!
	/* we want to setup a 1Mb buffer (size must be multiple of B_PAGE_SIZE) */
	net_buf_size = ((1 * 1024 * 1024) + (B_PAGE_SIZE-1)) & ~(B_PAGE_SIZE-1);
	/* create the area that will hold the DMA command buffer */
	si->unaligned_dma_area =
		create_area("NV DMA cmd buffer",
			(void **)&unaligned_dma_buffer,
			B_ANY_KERNEL_ADDRESS,
			2 * net_buf_size, /* take twice the net size so we can have MTRR-WC even on old systems */
			B_32_BIT_CONTIGUOUS, /* GPU always needs access */
			B_USER_CLONEABLE_AREA | B_READ_AREA | B_WRITE_AREA);
			// TODO: Physical aligning can be done without waste using the
			// private create_area_etc().
	/* on error, abort */
	if (si->unaligned_dma_area < 0)
	{
		/* free the already created shared_info area, and return the error */
		result = si->unaligned_dma_area;
		goto free_shared;
	}
	/* we (also) need the physical adress our DMA buffer is at, as this needs to be
	 * fed into the GPU's engine later on. Get an aligned adress so we can use MTRR-WC
	 * even on older CPU's. */
	get_memory_map(unaligned_dma_buffer, B_PAGE_SIZE, map, 1);
	si->dma_buffer_pci = (void*)
		((map[0].address + net_buf_size - 1) & ~(net_buf_size - 1));

	/* map the net DMA command buffer into vmem, using Write Combining */
	si->dma_area = map_physical_memory(
		"NV aligned DMA cmd buffer", (addr_t)si->dma_buffer_pci, net_buf_size,
		B_ANY_KERNEL_BLOCK_ADDRESS | B_MTR_WC,
		B_READ_AREA | B_WRITE_AREA, &(si->dma_buffer));
	/* if failed with write combining try again without */
	if (si->dma_area < 0) {
		si->dma_area = map_physical_memory(
			"NV aligned DMA cmd buffer", (addr_t)si->dma_buffer_pci,
			net_buf_size, B_ANY_KERNEL_BLOCK_ADDRESS,
			B_READ_AREA | B_WRITE_AREA, &(si->dma_buffer));
	}
	/* if there was an error, delete our other areas and pass on error*/
	if (si->dma_area < 0)
	{
		/* free the already created areas, and return the error */
		result = si->dma_area;
		goto free_shared_and_uadma;
	}

	/* save the vendor and device IDs */
	si->vendor_id = di->pcii.vendor_id;
	si->device_id = di->pcii.device_id;
	si->revision = di->pcii.revision;
	si->bus = di->pcii.bus;
	si->device = di->pcii.device;
	si->function = di->pcii.function;

	/* ensure that the accelerant's INIT_ACCELERANT function can be executed */
	si->accelerant_in_use = false;
	/* preset singlehead card to prevent early INT routine calls (once installed) to
	 * wrongly identify the INT request coming from us! */
	si->ps.secondary_head = false;

	/* note the amount of system RAM the system BIOS assigned to the card if applicable:
	 * unified memory architecture (UMA) */
	switch ((((uint32)(si->device_id)) << 16) | si->vendor_id)
	{
	case 0x01a010de: /* Nvidia GeForce2 Integrated GPU */
		/* device at bus #0, device #0, function #1 holds value at byte-index 0x7C */
		si->ps.memory_size = 1024 * 1024 *
			(((((*pci_bus->read_pci_config)(0, 0, 1, 0x7c, 4)) & 0x000007c0) >> 6) + 1);
		/* last 64kB RAM is used for the BIOS (or something else?) */
		si->ps.memory_size -= (64 * 1024);
		break;
	case 0x01f010de: /* Nvidia GeForce4 MX Integrated GPU */
		/* device at bus #0, device #0, function #1 holds value at byte-index 0x84 */
		si->ps.memory_size = 1024 * 1024 *
			(((((*pci_bus->read_pci_config)(0, 0, 1, 0x84, 4)) & 0x000007f0) >> 4) + 1);
		/* last 64kB RAM is used for the BIOS (or something else?) */
		si->ps.memory_size -= (64 * 1024);
		break;
	default:
		/* all other cards have own RAM: the amount of which is determined in the
		 * accelerant. */
		break;
	}

	/* map the device */
	result = map_device(di);
	if (result < 0) goto free_shared_and_alldma;

	/* we will be returning OK status for sure now */
	result = B_OK;

	/* disable and clear any pending interrupts */
	//fixme:
	//distinquish between crtc1/crtc2 once all heads get seperate driver instances!
	disable_vbi_all(di->regs);

	/* preset we can't use INT related functions */
	si->ps.int_assigned = false;

	/* create a semaphore for vertical blank management */
	si->vblank = create_sem(0, di->name);
	if (si->vblank < 0) goto mark_as_open;

	/* change the owner of the semaphores to the opener's team */
	/* this is required because apps can't aquire kernel semaphores */
	thid = find_thread(NULL);
	get_thread_info(thid, &thinfo);
	set_sem_owner(si->vblank, thinfo.team);

	/* If there is a valid interrupt line assigned then set up interrupts */
	if ((di->pcii.u.h0.interrupt_pin == 0x00) ||
	    (di->pcii.u.h0.interrupt_line == 0xff) || /* no IRQ assigned */
	    (di->pcii.u.h0.interrupt_line <= 0x02))   /* system IRQ assigned */
	{
		/* delete the semaphore as it won't be used */
		delete_sem(si->vblank);
		si->vblank = -1;
	}
	else
	{
		/* otherwise install our interrupt handler */
		result = install_io_interrupt_handler(di->pcii.u.h0.interrupt_line, nv_interrupt, (void *)di, 0);
		/* bail if we couldn't install the handler */
		if (result != B_OK)
		{
			/* delete the semaphore as it won't be used */
			delete_sem(si->vblank);
			si->vblank = -1;
		}
		else
		{
			/* inform accelerant(s) we can use INT related functions */
			si->ps.int_assigned = true;
		}
	}

mark_as_open:
	/* mark the device open */
	di->is_open++;

	/* send the cookie to the opener */
	*cookie = di;

	goto done;


free_shared_and_alldma:
	/* clean up our aligned DMA area */
	delete_area(si->dma_area);
	si->dma_area = -1;
	si->dma_buffer = NULL;

free_shared_and_uadma:
	/* clean up our unaligned DMA area */
	delete_area(si->unaligned_dma_area);
	si->unaligned_dma_area = -1;
	si->dma_buffer_pci = NULL;

free_shared:
	/* clean up our shared area */
	delete_area(di->shared_area);
	di->shared_area = -1;
	di->si = NULL;

done:
	/* end of critical section */
	RELEASE_BEN(pd->kernel);

	/* all done, return the status */
	return result;
}
Exemple #29
0
/*
** Allocate the actual memory for the cardinfo object
*/
static Symbios *create_cardinfo(int num, pci_info *pi, int flags)
{
	char name[32];
	Symbios *s;
	int i,scf;
	area_id aid;
	uint32 stest2,stest4;

	if((pi->u.h0.interrupt_line == 0) || (pi->u.h0.interrupt_line > 128)) {
		return NULL; /* invalid IRQ */
	}

	if(!(s = (Symbios *) malloc(sizeof(Symbios)))) return NULL;

	s->num = num;
	s->iobase = pi->u.h0.base_registers[0];
	s->irq = pi->u.h0.interrupt_line;
	s->hwlock = 0;
	s->startqueue = NULL;
	s->startqueuetail = NULL;
	s->active = NULL;

	sprintf(name,"sym%d:sram",num);
	if(flags & symf_sram){
		unsigned char *c;
		s->sram_phys = pi->u.h0.base_registers[2];
		if((aid=map_physical_memory(name, s->sram_phys, 4096,
									 B_ANY_KERNEL_ADDRESS, B_READ_AREA + B_WRITE_AREA,
									 (void **) &(s->script))) < 0){
			free(s);
			return NULL;
		}
		/* memory io test */
		c = (unsigned char *) s->script;
		for(i=0;i<4096;i++) c[i] = (255 - (i & 0xff));
		for(i=0;i<4096;i++) {
			if(c[i] != (255 - (i & 0xff))) {
				d_printf("symbios%d: scripts ram io error @ %d\n",num,i);
				goto err;
			}
		}
	} else {
		uchar *a;
		physical_entry entries[2];
		aid = create_area(name, (void **)&a, B_ANY_KERNEL_ADDRESS, 4096*5,
			B_32_BIT_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
		if(aid == B_ERROR || aid == B_BAD_VALUE || aid == B_NO_MEMORY){
			free(s);
		    return NULL;
		}
		get_memory_map(a, 4096, entries, 2);
		s->sram_phys = (uint32) entries[0].address;
		s->script = (uint32 *) a;
	}

	d_printf("symbios%d: scripts ram @ 0x%08lx, mapped to 0x%08lx (%s)\n",
			 num, s->sram_phys, (uint32) s->script,
			 flags & symf_sram ? "onboard" : "offboard" );

	/* what are we set at now? */
	s->host_targ_id = inb(sym_scid) & 0x07;
	dprintf("symbios%ld: host id %ld\n",s->num,s->host_targ_id);

	s->host_targ_id = 7;  /* XXX figure this out somehow... */
	s->max_targ_id = (flags & symf_wide) ? 15 : 7;

	stest2 = inb(sym_stest2);
	stest4 = inb(sym_stest4);

	/* software reset */
	outb(sym_istat, sym_istat_srst);
	spin(10000);
	outb(sym_istat, 0);
	spin(10000);

	/* initiator mode, full arbitration */
	outb(sym_scntl0, sym_scntl0_arb0 | sym_scntl0_arb1);

	outb(sym_scntl1, 0);
	outb(sym_scntl2, 0);

	/* initiator id=7, respond to reselection */
	/* respond to reselect of id 7 */
	outb(sym_respid, id_bits[s->host_targ_id]);
	outb(sym_scid, sym_scid_rre | s->host_targ_id);

	outb(sym_dmode, 0);

	dprintf("symbios%ld: stest2 = 0x%02lx, stest4 = 0x%02lx\n",s->num,stest2,stest4);

	/* no differential, no loopback, no hiz, no always-wide, no filter, no lowlevel */
	outb(sym_stest2, 0); // save diff bit
	outb(sym_stest3, 0);

//	if(flags & symf_quadrupler){
//		outb(sym_stest4, sym_stest4_lvd);
//	}

	outb(sym_stest1, 0);    /* make sure clock doubler is OFF  */

	s->sclk = sym_readclock(s);
	dprintf("symbios%ld: clock is %ldKHz\n",s->num,s->sclk);

	if(flags & symf_doubler){
		/* if we have a doubler and we don't already have an 80MHz clock */
		if((s->sclk > 35000) && (s->sclk < 45000)){
			dprintf("symbios%ld: enabling clock doubler...\n",s->num);
			outb(sym_stest1, 0x08);  /* enable doubler */
			spin(200);                /* wait 20us      */
			outb(sym_stest3, 0xa0);  /* halt sclk, enable TolerANT*/
			outb(sym_scntl3, 0x05);  /* SCLK/4         */
			outb(sym_stest1, 0x0c);  /* engage doubler */
			outb(sym_stest3, 0x80);  /* reenable sclk, leave TolerANT on  */

			spin(3000);

			s->sclk = sym_readclock(s);
			dprintf("symbios%ld: clock is %ldKHz\n",s->num,s->sclk);
		}
	}
	if(flags & symf_quadrupler){
		if((s->sclk > 35000) && (s->sclk < 45000)){
			dprintf("symbios%ld: enabling clock quadrupler...\n",s->num);
			outb(sym_stest1, 0x08);  /* enable doubler */
			spin(200);                /* wait 20us      */
			outb(sym_stest3, 0xa0);  /* halt sclk, enable TolerANT*/
			outb(sym_scntl3, 0x05);  /* SCLK/4         */
			outb(sym_stest1, 0x0c);  /* engage doubler */
			outb(sym_stest3, 0x80);  /* reenable sclk, leave TolerANT on  */

			spin(3000);

			s->sclk = sym_readclock(s);
			dprintf("symbios%ld: clock is %ldKHz\n",s->num,s->sclk);
			s->sclk = 160000;
		}
	}
	outb(sym_stest3, 0x80);  /* leave TolerANT on  */

	scf = 0;
	/* set CCF / SCF according to specs */
	if(s->sclk < 25010) {
		dprintf("symbios%ld: unsupported clock frequency\n",s->num);
		goto err;  //		s->scntl3 = 0x01;
	} else if(s->sclk < 37510){
		dprintf("symbios%ld: unsupported clock frequency\n",s->num);
		goto err;  //		s->scntl3 = 0x02;
	} else if(s->sclk < 50010){
		/* 40MHz - divide by 1, 2 */
		scf = 0x10;
		s->scntl3 = 0x03;
	} else if(s->sclk < 75010){
		dprintf("symbios%ld: unsupported clock frequency\n",s->num);
		goto err; //		s->scntl3 = 0x04;
	} else if(s->sclk < 85000){
		/* 80 MHz - divide by 2, 4*/
		scf = 0x30;
		s->scntl3 = 0x05;
	} else {
		/* 160 MHz - divide by 4, 8 */
		scf = 0x50;
		s->scntl3 = 0x07;
	}


	s->maxoffset = (flags & symf_short) ? 8 : 15 ;
	s->syncsize = 0;

	if(scf == 0x50){
		/* calculate values for 160MHz clock */
		for(i=0;i<4;i++){
			s->syncinfo[s->syncsize].sxfer = i << 5;
			s->syncinfo[s->syncsize].scntl3 = s->scntl3 | 0x90; /* /2, Ultra2 */
			s->syncinfo[s->syncsize].period_ns = (625 * (i+4)) / 100;
			s->syncinfo[s->syncsize].period = 4 * (s->syncinfo[s->syncsize].period_ns / 4);
			s->syncsize++;
		}
	}

	if(scf >= 0x30){
		/* calculate values for 80MHz clock */
		for(i=0;i<4;i++){
			s->syncinfo[s->syncsize].sxfer = i << 5;
			if(scf == 0x30){
				s->syncinfo[s->syncsize].scntl3 = s->scntl3 | 0x90; /* /2, Ultra */
			} else {
				s->syncinfo[s->syncsize].scntl3 = s->scntl3 | 0xb0; /* /4, Ultra2 */
			}

			s->syncinfo[s->syncsize].period_ns = (125 * (i+4)) / 10;
			s->syncinfo[s->syncsize].period = 4 * (s->syncinfo[s->syncsize].period_ns / 4);
			s->syncsize++;
		}
	}

	/* calculate values for 40MHz clock */
	for(i=0;i<8;i++){
		s->syncinfo[s->syncsize].sxfer = i << 5;
		s->syncinfo[s->syncsize].scntl3 = s->scntl3 | scf;
		s->syncinfo[s->syncsize].period_ns = 25 * (i+4);
		s->syncinfo[s->syncsize].period = 4 * (s->syncinfo[s->syncsize].period_ns / 4);
		s->syncsize++;
	}

	for(i=0;i<s->syncsize;i++){
		dprintf("symbios%ld: syncinfo[%d] = { %02x, %02x, %d ns, %d ns }\n",
				s->num, i,
				s->syncinfo[i].sxfer, s->syncinfo[i].scntl3,
				s->syncinfo[i].period_ns, s->syncinfo[i].period);
	}

	for(i=0;i<16;i++){
		s->targ[i].id = i;
		s->targ[i].adapter = s;
		s->targ[i].wide = 0;
		s->targ[i].offset = 0;
		s->targ[i].status = status_inactive;

		if((i == s->host_targ_id) || (i > s->max_targ_id)){
			s->targ[i].flags = tf_ignore;
		} else {
			s->targ[i].flags = tf_ask_sync;
			if(flags & symf_wide) s->targ[i].flags |= tf_ask_wide;
//			s->targ[i].flags = 0;

			setparams(s->targ + i, 0, 0, 0);

			sprintf(name,"sym%ld:%02d:lock",s->num,i);
			s->targ[i].sem_targ = create_sem(1,name);

			sprintf(name,"sym%ld:%02d:done",s->num,i);
			s->targ[i].sem_done = create_sem(0,name);
		}
	}

	if(flags & symf_wide){
		s->idmask = 15;
		s->op_in = OP_WDATA_IN;
		s->op_out = OP_WDATA_OUT;
	} else {
		s->idmask = 7;
		s->op_in = OP_NDATA_IN;
		s->op_out = OP_NDATA_OUT;
	}

	reloc_script(s);
    return s;

err:
	free(s);
	delete_area(aid);
	return NULL;
}
Exemple #30
0
static status_t
acpi_check_rsdt(acpi_rsdp* rsdp)
{
	if (acpi_validate_rsdp(rsdp) != B_OK)
		return B_BAD_DATA;

	bool usingXsdt = false;

	TRACE(("acpi: found rsdp at %p oem id: %.6s, rev %d\n",
		rsdp, rsdp->oem_id, rsdp->revision));
	TRACE(("acpi: rsdp points to rsdt at 0x%lx\n", rsdp->rsdt_address));

	uint32 length = 0;
	acpi_descriptor_header* rsdt = NULL;
	area_id rsdtArea = -1;
	if (rsdp->revision > 0) {
		length = rsdp->xsdt_length;
		rsdtArea = map_physical_memory("rsdt acpi",
			(uint32)rsdp->xsdt_address, rsdp->xsdt_length, B_ANY_KERNEL_ADDRESS, 
			B_KERNEL_READ_AREA, (void **)&rsdt);
		if (rsdt != NULL
			&& strncmp(rsdt->signature, ACPI_XSDT_SIGNATURE, 4) != 0) {
			delete_area(rsdtArea);
			rsdt = NULL;
			TRACE(("acpi: invalid extended system description table\n"));
		} else
			usingXsdt = true;
	}

	// if we're ACPI v1 or we fail to map the XSDT for some reason,
	// attempt to use the RSDT instead.
	if (rsdt == NULL) {
		// map and validate the root system description table
		rsdtArea = map_physical_memory("rsdt acpi",
			rsdp->rsdt_address, sizeof(acpi_descriptor_header),
			B_ANY_KERNEL_ADDRESS, B_KERNEL_READ_AREA, (void **)&rsdt);
		if (rsdt != NULL
			&& strncmp(rsdt->signature, ACPI_RSDT_SIGNATURE, 4) != 0) {
			delete_area(rsdtArea);
			rsdt = NULL;
			TRACE(("acpi: invalid root system description table\n"));
			return B_ERROR;
		}

		length = rsdt->length;
		// Map the whole table, not just the header
		TRACE(("acpi: rsdt length: %lu\n", length));
		delete_area(rsdtArea);
		rsdtArea = map_physical_memory("rsdt acpi",
			rsdp->rsdt_address, length, B_ANY_KERNEL_ADDRESS, 
			B_KERNEL_READ_AREA, (void **)&rsdt);
	}

	if (rsdt != NULL) {
		if (acpi_validate_rsdt(rsdt) != B_OK) {
			TRACE(("acpi: rsdt failed checksum validation\n"));
			delete_area(rsdtArea);
			return B_ERROR;
		} else {
			if (usingXsdt)
				sAcpiXsdt = rsdt;
			else
				sAcpiRsdt = rsdt;
			TRACE(("acpi: found valid %s at %p\n",
				usingXsdt ? ACPI_XSDT_SIGNATURE : ACPI_RSDT_SIGNATURE,
				rsdt));
		}
	} else
		return B_ERROR;

	return B_OK;
}