Пример #1
0
static	int	init_dma(pci_bus_cookie *cookie)
{
	addr_t	temp;
	uint8	val = in8(cookie->io_port + BM_STATUS_REG );
	TRACE(("entering init_dma status = %d\n",val));
	if (!( val &( BM_SR_MASK_DRV1 | BM_SR_MASK_DRV0 )))
	{
      		TRACE(("BM_STATUS is wrong %d\n",val));
      		return -1;
      	}

	cookie->prd_region_id = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "ide_prd_buf", (void **)&cookie->prd_buf_address,
							REGION_ADDR_ANY_ADDRESS, 4096, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);


   	cookie->stat_reg = in8( cookie->io_port +  + BM_STATUS_REG );
   	cookie->stat_reg = cookie->stat_reg & 0xe0;

	memset(cookie->prd_buf_address, 0, 4096);

	vm_get_page_mapping(vm_get_kernel_aspace_id(), (addr_t)cookie->prd_buf_address, &cookie->prd_phy_address);
	cookie->raw_buffer = (uint8*)0x80000;
	vm_map_physical_memory(vm_get_kernel_aspace_id(), "test", (void *)&cookie->mapped_address, REGION_ADDR_ANY_ADDRESS,0x10000, LOCK_RW|LOCK_KERNEL, (addr_t)cookie->raw_buffer);
	TRACE(("mapped address is %X\n",cookie->mapped_address));
      	return 0;
}
Пример #2
0
static addr_t vtophys(void *virt)
{
	addr_t phys = 0;

	vm_get_page_mapping(vm_get_kernel_aspace_id(), (addr_t)virt, &phys);
	return phys;
}
Пример #3
0
int port_init(kernel_args *ka)
{
	int i;
	int sz;

	sz = sizeof(struct port_entry) * MAX_PORTS;

	// create and initialize semaphore table
	port_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "port_table", (void **)&ports,
		REGION_ADDR_ANY_ADDRESS, sz, REGION_WIRING_WIRED, LOCK_RW|LOCK_KERNEL);
	if(port_region < 0) {
		panic("unable to allocate kernel port table!\n");
	}

	memset(ports, 0, sz);
	for(i=0; i<MAX_PORTS; i++)
		ports[i].id = -1;

	// add debugger commands
	dbg_add_command(&dump_port_list, "ports", "Dump a list of all active ports");
	dbg_add_command(&dump_port_info, "port", "Dump info about a particular port");

	ports_active = true;

	return 0;
}
Пример #4
0
int vm_translation_map_module_init2(kernel_args *ka)
{
	dprintf("vm_translation_map_module_init2: creating iospace region\n");
	void *temp = (void *)iospace_addr;
	vm_create_null_region(vm_get_kernel_aspace_id(), "iospace", &temp,
		REGION_ADDR_EXACT_ADDRESS, iospace_len);

	return 0;
}
Пример #5
0
int elf_init(kernel_args *ka)
{
	vm_region_info rinfo;

	mutex_init(&image_lock, "kimages_lock");
	mutex_init(&image_load_lock, "kimages_load_lock");

	// build a image structure for the kernel, which has already been loaded
	kernel_image = create_image_struct();

	// text segment
	kernel_image->regions[0].id = vm_find_region_by_name(vm_get_kernel_aspace_id(), "kernel_seg0");
	if(kernel_image->regions[0].id < 0)
		panic("elf_init: could not look up kernel text segment region\n");
	vm_get_region_info(kernel_image->regions[0].id, &rinfo);
	kernel_image->regions[0].start = rinfo.base;
	kernel_image->regions[0].size = rinfo.size;

	// data segment
	kernel_image->regions[1].id = vm_find_region_by_name(vm_get_kernel_aspace_id(), "kernel_seg1");
	if(kernel_image->regions[1].id >= 0) {
		vm_get_region_info(kernel_image->regions[1].id, &rinfo);
		kernel_image->regions[1].start = rinfo.base;
		kernel_image->regions[1].size = rinfo.size;
	} else {
		// there is no region 1 in kernel
		kernel_image->regions[1].start = 0;
		kernel_image->regions[1].size = 0;
	}

	// we know where the dynamic section is
	kernel_image->dynamic_ptr = (addr_t)ka->kernel_dynamic_section_addr.start;

	// parse the dynamic section
	if(elf_parse_dynamic_section(kernel_image) < 0)
		dprintf("elf_init: WARNING elf_parse_dynamic_section couldn't find dynamic section.\n");

	// insert it first in the list of kernel images loaded
	kernel_images = NULL;
	insert_image_in_list(kernel_image);

	return 0;
}
Пример #6
0
static void elf_unload_image_final( struct elf_image_info *image )
{
	int i;

	for( i = 0; i < 2; ++i ) {
		vm_delete_region( vm_get_kernel_aspace_id(), image->regions[i].id );
	}

	if( image->vnode )
		vfs_put_vnode_ptr( image->vnode );

	remove_image_from_list(image);
	kfree( image->eheader );
	kfree( image );
}
Пример #7
0
int arch_smp_init(kernel_args *ka)
{
    dprintf("arch_smp_init: entry\n");

    if(ka->num_cpus > 1) {
        // setup some globals
        num_cpus = ka->num_cpus;
        apic = ka->arch_args.apic;
        ioapic = ka->arch_args.ioapic;
        memcpy(cpu_apic_id, ka->arch_args.cpu_apic_id, sizeof(ka->arch_args.cpu_apic_id));
        memcpy(cpu_os_id, ka->arch_args.cpu_os_id, sizeof(ka->arch_args.cpu_os_id));
        memcpy(cpu_apic_version, ka->arch_args.cpu_apic_version, sizeof(ka->arch_args.cpu_apic_version));
        apic_timer_tics_per_sec = ka->arch_args.apic_time_cv_factor;

        // setup regions that represent the apic & ioapic
        vm_create_anonymous_region(vm_get_kernel_aspace_id(), "local_apic", (void *)&apic,
                                   REGION_ADDR_EXACT_ADDRESS, PAGE_SIZE, REGION_WIRING_WIRED_ALREADY, LOCK_RW|LOCK_KERNEL);
        vm_create_anonymous_region(vm_get_kernel_aspace_id(), "ioapic", (void *)&ioapic,
                                   REGION_ADDR_EXACT_ADDRESS, PAGE_SIZE, REGION_WIRING_WIRED_ALREADY, LOCK_RW|LOCK_KERNEL);

        // set up the local apic on the boot cpu
        arch_smp_init_percpu(ka, 1);

        // set up the interrupt handlers for local apic interrupts.
        // they are mapped to absolute interrupts 0xfb-0xff, but the io handlers are all
        // base 0x20, so subtract 0x20.
        int_set_io_interrupt_handler(0xfb - 0x20, &x86_64_timer_interrupt, NULL, "lapic timer");
        int_set_io_interrupt_handler(0xfd - 0x20, &x86_64_ici_interrupt, NULL, "ici");
        int_set_io_interrupt_handler(0xfe - 0x20, &x86_64_smp_error_interrupt, NULL, "lapic smp error");
        int_set_io_interrupt_handler(0xff - 0x20, &x86_64_spurious_interrupt, NULL, "lapic spurious");
    } else {
        num_cpus = 1;
    }

    return 0;
}
Пример #8
0
int arch_vm_init_endvm(kernel_args *ka)
{
	region_id id;
	void *ptr;

	dprintf("arch_vm_init_endvm: entry\n");

	// map 0 - 0xa0000 directly
	id = vm_map_physical_memory(vm_get_kernel_aspace_id(), "dma_region", &ptr,
		REGION_ADDR_ANY_ADDRESS, 0xa0000, LOCK_RW|LOCK_KERNEL, 0x0);
	if(id < 0) {
		panic("arch_vm_init_endvm: unable to map dma region\n");
		return ERR_NO_MEMORY;
	}
	return 0;
}
Пример #9
0
/* creates kernel process */
proc_id proc_create_kernel_process(const char* name)
{
    process_t *proc;
    proc_id id;

    ASSERT_MSG(name != NULL && strlen(name) <= SYS_MAX_OS_NAME_LEN,
        "proc_create_kernel_process: kernel process name is invalid!\n");

    /* ensure that kernel process was not created before */
    if(kernel_process)
        panic("proc_create_kernel_process: kernel process already exists!\n");

    /* create kernel process struct */
    proc = create_process_common(name, NULL);
    if(!proc)
        return INVALID_PROCESSID;

    /* final fields init */
    proc->state = PROCESS_STATE_NORMAL;
    id = proc->id;
    proc->aid = vm_get_kernel_aspace_id();
    proc->aspace = vm_get_kernel_aspace();
    proc->process_role = PROCESS_ROLE_KERNEL;
    proc->def_prio = process_roles_props[proc->process_role].def_prio;
    proc->def_sched_policy.raw =
        process_roles_props[proc->process_role].def_sched_policy.raw;

    /* init arch-dependend part */
    if(arch_init_process_struct(proc) != NO_ERROR) {
        vm_put_aspace(proc->aspace);
        destroy_process_common(proc);
        return INVALID_PROCESSID;
    }

    /* store as global */
    kernel_process = proc;

    /* add to processes list */
    put_process_to_list(proc);

    return id; /* return id to caller */
}
Пример #10
0
int rhine_init(rhine *r)
{
	bigtime_t time;
	int err = -1;
	addr_t temp;
	int i;

	dprintf("rhine_init: r %p\n", r);

	r->region = vm_map_physical_memory(vm_get_kernel_aspace_id(), "rhine_region", (void **)&r->virt_base,
		REGION_ADDR_ANY_ADDRESS, r->phys_size, LOCK_KERNEL|LOCK_RW, r->phys_base);
	if(r->region < 0) {
		dprintf("rhine_init: error creating memory mapped region\n");
		err = -1;
		goto err;
	}
	dprintf("rhine mapped at address 0x%lx\n", r->virt_base);

	/* create regions for tx and rx descriptors */
	r->rxdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rhine_rxdesc", (void **)&r->rxdesc,
		REGION_ADDR_ANY_ADDRESS, RXDESC_COUNT * sizeof(struct rhine_rx_desc), REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
	r->rxdesc_phys = vtophys(r->rxdesc);
	dprintf("rhine: rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys);
	r->txdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rhine_txdesc", (void **)&r->txdesc,
		REGION_ADDR_ANY_ADDRESS, TXDESC_COUNT * sizeof(struct rhine_tx_desc), REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
	r->txdesc_phys = vtophys(r->txdesc);
	dprintf("rhine: tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys);
	r->reg_spinlock = 0;

	/* stick all rx and tx buffers in a circular buffer */
	for (i=0; i < RXDESC_COUNT; i++) {
		RXDESC(r, i).status = 0;
		RXDESC(r, i).framelen = 0;
		RXDESC(r, i).buflen = 0;
		RXDESC(r, i).ptr = 0;
		if (i == RXDESC_COUNT-1)
			RXDESC(r, i).next = RXDESC_PHYS(r, 0);
		else
			RXDESC(r, i).next = RXDESC_PHYS(r, i + 1);
	}
	// XXX do same for tx


	r->rx_head = r->rx_tail = 0;

	/* reset the chip */
	time = system_time();
	RHINE_WRITE_16(r, RHINE_CR0, 0x8000); // reset the chip
	do {
		thread_snooze(10000); // 10ms
		if(system_time() - time > 1000000) {
			break;
		}
	} while(RHINE_READ_16(r, RHINE_CR0) & 0x8000);

	if (RHINE_READ_16(r, RHINE_CR0) & 0x8000) {
		dprintf("chip didn't reset, trying alternate method\n");
		RHINE_SETBITS_8(r, RHINE_MISC_CR1, 0x40);
		thread_snooze(10000);
	}

	/* read in the mac address */
	RHINE_WRITE_8(r, RHINE_EECSR, RHINE_READ_8(r, RHINE_EECSR) | (1<<5));
	r->mac_addr[0] = RHINE_READ_8(r, RHINE_PAR0); 
	r->mac_addr[1] = RHINE_READ_8(r, RHINE_PAR1);
   	r->mac_addr[2] = RHINE_READ_8(r, RHINE_PAR2);
	r->mac_addr[3] = RHINE_READ_8(r, RHINE_PAR3);
   	r->mac_addr[4] = RHINE_READ_8(r, RHINE_PAR4);
   	r->mac_addr[5] = RHINE_READ_8(r, RHINE_PAR5);
  	dprintf("rhine: mac addr %x:%x:%x:%x:%x:%x\n",
  		r->mac_addr[0], r->mac_addr[1], r->mac_addr[2],
  		r->mac_addr[3], r->mac_addr[4], r->mac_addr[5]);

	/* set up the rx state */
	/* 64 byte fifo threshold, all physical/broadcast/multicast/small/error packets accepted */
	RHINE_WRITE_8(r, RHINE_RCR, (0<<5) | (1<<4) | (1<<3) | (1<<2) | (1<<1) | (1<<0));
	RHINE_WRITE_32(r, RHINE_RDA0, RXDESC_PHYS(r, r->rx_head));

	/* set up tx state */
	/* 64 byte fifo, default backup, default loopback mode */
	RHINE_WRITE_8(r, RHINE_TCR, 0);

	/* mask all interrupts */
	RHINE_WRITE_16(r, RHINE_IMR0, 0);

	/* clear all pending interrupts */
	RHINE_WRITE_16(r, RHINE_ISR0, 0xffff);
	
	/* set up the interrupt handler */
	int_set_io_interrupt_handler(r->irq, &rhine_int, r, "rhine");

	{
		static uint8 buf[2048];
		RXDESC(r, r->rx_tail).ptr = vtophys(buf);
		RXDESC(r, r->rx_tail).buflen = sizeof(buf);
		RXDESC(r, r->rx_tail).status = 0;
		RXDESC(r, r->rx_tail).framelen = RHINE_RX_OWNER;
		r->rx_tail++;

		RHINE_WRITE_16(r, RHINE_CR0, (1<<1) | (1<<3) | (1<<6));
	}

	/* unmask all interrupts */
	RHINE_WRITE_16(r, RHINE_IMR0, 0xffff);	

#if 0
	// try to reset the device
 	time = system_time();
	RTL_WRITE_8(r, RT_CHIPCMD, RT_CMD_RESET);
	do {
		thread_snooze(10000); // 10ms
		if(system_time() - time > 1000000) {
			err = -1;
			goto err1;
		}
	} while((RTL_READ_8(r, RT_CHIPCMD) & RT_CMD_RESET));

	// create a rx and tx buf
	r->rxbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rhine_rxbuf", (void **)&r->rxbuf,
		REGION_ADDR_ANY_ADDRESS, 64*1024 + 16, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
	r->txbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rhine_txbuf", (void **)&r->txbuf,
		REGION_ADDR_ANY_ADDRESS, 8*1024, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW);

	// set up the transmission buf and sem
	r->tx_sem = sem_create(4, "rhine_txsem");
	mutex_init(&r->lock, "rhine");
	r->txbn = 0;
	r->last_txbn = 0;
	r->rx_sem = sem_create(0, "rhine_rxsem");
	r->reg_spinlock = 0;

	// set up the interrupt handler
	int_set_io_interrupt_handler(r->irq, &rhine_int, r, "rhine");

	// read the mac address
	r->mac_addr[0] = RTL_READ_8(r, RT_IDR0);
	r->mac_addr[1] = RTL_READ_8(r, RT_IDR0 + 1);
	r->mac_addr[2] = RTL_READ_8(r, RT_IDR0 + 2);
	r->mac_addr[3] = RTL_READ_8(r, RT_IDR0 + 3);
  	r->mac_addr[4] = RTL_READ_8(r, RT_IDR0 + 4);
  	r->mac_addr[5] = RTL_READ_8(r, RT_IDR0 + 5);

  	dprintf("rhine: mac addr %x:%x:%x:%x:%x:%x\n",
  		r->mac_addr[0], r->mac_addr[1], r->mac_addr[2],
  		r->mac_addr[3], r->mac_addr[4], r->mac_addr[5]);

	// enable writing to the config registers
	RTL_WRITE_8(r, RT_CFG9346, 0xc0);

	// reset config 1
	RTL_WRITE_8(r, RT_CONFIG1, 0);

	// Enable receive and transmit functions
	RTL_WRITE_8(r, RT_CHIPCMD, RT_CMD_RX_ENABLE | RT_CMD_TX_ENABLE);

	// Set Rx FIFO threashold to 256, Rx size to 64k+16, 256 byte DMA burst
	RTL_WRITE_32(r, RT_RXCONFIG, 0x00009c00);

	// Set Tx 256 byte DMA burst
	RTL_WRITE_32(r, RT_TXCONFIG, 0x03000400);

	// Turn off lan-wake and set the driver-loaded bit
	RTL_WRITE_8(r, RT_CONFIG1, (RTL_READ_8(r, RT_CONFIG1) & ~0x30) | 0x20);

	// Enable FIFO auto-clear
	RTL_WRITE_8(r, RT_CONFIG4, RTL_READ_8(r, RT_CONFIG4) | 0x80);

	// go back to normal mode
	RTL_WRITE_8(r, RT_CFG9346, 0);

	// Setup RX buffers
	*(int *)r->rxbuf = 0;
	vm_get_page_mapping(vm_get_kernel_aspace_id(), r->rxbuf, &temp);
	dprintf("rx buffer will be at 0x%lx\n", temp);
	RTL_WRITE_32(r, RT_RXBUF, temp);

	// Setup TX buffers
	dprintf("tx buffer (virtual) is at 0x%lx\n", r->txbuf);
	*(int *)r->txbuf = 0;
	vm_get_page_mapping(vm_get_kernel_aspace_id(), r->txbuf, &temp);
	RTL_WRITE_32(r, RT_TXADDR0, temp);
	RTL_WRITE_32(r, RT_TXADDR1, temp + 2*1024);
	dprintf("first half of txbuf at 0x%lx\n", temp);
	*(int *)(r->txbuf + 4*1024) = 0;
	vm_get_page_mapping(vm_get_kernel_aspace_id(), r->txbuf + 4*1024, &temp);
	RTL_WRITE_32(r, RT_TXADDR2, temp);
	RTL_WRITE_32(r, RT_TXADDR3, temp + 2*1024);
	dprintf("second half of txbuf at 0x%lx\n", temp);

/*
	RTL_WRITE_32(r, RT_TXSTATUS0, RTL_READ_32(r, RT_TXSTATUS0) | 0xfffff000);
	RTL_WRITE_32(r, RT_TXSTATUS1, RTL_READ_32(r, RT_TXSTATUS1) | 0xfffff000);
	RTL_WRITE_32(r, RT_TXSTATUS2, RTL_READ_32(r, RT_TXSTATUS2) | 0xfffff000);
	RTL_WRITE_32(r, RT_TXSTATUS3, RTL_READ_32(r, RT_TXSTATUS3) | 0xfffff000);
*/
	// Reset RXMISSED counter
	RTL_WRITE_32(r, RT_RXMISSED, 0);

	// Enable receiving broadcast and physical match packets
//	RTL_WRITE_32(r, RT_RXCONFIG, RTL_READ_32(r, RT_RXCONFIG) | 0x0000000a);
	RTL_WRITE_32(r, RT_RXCONFIG, RTL_READ_32(r, RT_RXCONFIG) | 0x0000000f);

	// Filter out all multicast packets
	RTL_WRITE_32(r, RT_MAR0, 0);
	RTL_WRITE_32(r, RT_MAR0 + 4, 0);

	// Disable all multi-interrupts
	RTL_WRITE_16(r, RT_MULTIINTR, 0);

	RTL_WRITE_16(r, RT_INTRMASK, MYRT_INTS);
//	RTL_WRITE_16(r, RT_INTRMASK, 0x807f);

	// Enable RX/TX once more
	RTL_WRITE_8(r, RT_CHIPCMD, RT_CMD_RX_ENABLE | RT_CMD_TX_ENABLE);

	RTL_WRITE_8(r, RT_CFG9346, 0);
#endif

	return 0;

err1:
	vm_delete_region(vm_get_kernel_aspace_id(), r->region);
err:
	return err;
}
Пример #11
0
image_id elf_load_kspace(const char *path, const char *sym_prepend)
{
	struct Elf32_Ehdr *eheader;
	struct Elf32_Phdr *pheaders;
	struct elf_image_info *image;
	void *vnode = NULL;
	int fd;
	int err;
	int i;
	ssize_t len;
	addr_t lowest_address = 0;
	addr_t highest_address = 0;

	dprintf("elf_load_kspace: entry path '%s'\n", path);

	fd = sys_open(path, 0);
	if(fd < 0)
		return fd;

	err = vfs_get_vnode_from_fd(fd, true, &vnode);
	if(err < 0)
		goto error0;

	// XXX awful hack to keep someone else from trying to load this image
	// probably not a bad thing, shouldn't be too many races
	mutex_lock(&image_load_lock);

	// make sure it's not loaded already. Search by vnode
	image = find_image_by_vnode(vnode);
	if( image ) {
		atomic_add( &image->ref_count, 1 );
		//err = ERR_NOT_ALLOWED;
		goto done;
	}

	eheader = (struct Elf32_Ehdr *)kmalloc( sizeof( *eheader ));
	if( !eheader ) {
		err = ERR_NO_MEMORY;
		goto error;
	}

	len = sys_read(fd, eheader, 0, sizeof(*eheader));
	if(len < 0) {
		err = len;
		goto error1;
	}
	if(len != sizeof(*eheader)) {
		// short read
		err = ERR_INVALID_BINARY;
		goto error1;
	}
	err = verify_eheader(eheader);
	if(err < 0)
		goto error1;

	image = create_image_struct();
	if(!image) {
		err = ERR_NO_MEMORY;
		goto error1;
	}
	image->vnode = vnode;
	image->eheader = eheader;

	pheaders = kmalloc(eheader->e_phnum * eheader->e_phentsize);
	if(pheaders == NULL) {
		dprintf("error allocating space for program headers\n");
		err = ERR_NO_MEMORY;
		goto error2;
	}

//	dprintf("reading in program headers at 0x%x, len 0x%x\n", eheader.e_phoff, eheader.e_phnum * eheader.e_phentsize);
	len = sys_read(fd, pheaders, eheader->e_phoff, eheader->e_phnum * eheader->e_phentsize);
	if(len < 0) {
		err = len;
		dprintf("error reading in program headers\n");
		goto error3;
	}
	if(len != eheader->e_phnum * eheader->e_phentsize) {
		dprintf("short read while reading in program headers\n");
		err = -1;
		goto error3;
	}

	for(i=0; i < eheader->e_phnum; i++) {
		char region_name[64];
		bool ro_segment_handled = false;
		bool rw_segment_handled = false;
		int image_region;
		int lock;

//		dprintf("looking at program header %d\n", i);

		switch(pheaders[i].p_type) {
			case PT_LOAD:
				break;
			case PT_DYNAMIC:
				image->dynamic_ptr = pheaders[i].p_vaddr;
				continue;
			default:
				dprintf("unhandled pheader type 0x%x\n", pheaders[i].p_type);
				continue;
		}

		// we're here, so it must be a PT_LOAD segment
		if((pheaders[i].p_flags & (PF_R | PF_W | PF_X)) == (PF_R | PF_W)) {
			// this is the writable segment
			if(rw_segment_handled) {
				// we've already created this segment
				continue;
			}
			rw_segment_handled = true;
			image_region = 1;
			lock = LOCK_RW|LOCK_KERNEL;
			sprintf(region_name, "%s_seg1", path);
		} else if((pheaders[i].p_flags & (PF_R | PF_X)) == (PF_R | PF_X)) {
			// this is the non-writable segment
			if(ro_segment_handled) {
				// we've already created this segment
				continue;
			}
			ro_segment_handled = true;
			image_region = 0;
//			lock = LOCK_RO|LOCK_KERNEL;
			lock = LOCK_RW|LOCK_KERNEL;
			sprintf(region_name, "%s_seg0", path);
		} else {
			dprintf("weird program header flags 0x%x\n", pheaders[i].p_flags);
			continue;
		}
		image->regions[image_region].size = ROUNDUP(pheaders[i].p_memsz + (pheaders[i].p_vaddr % PAGE_SIZE), PAGE_SIZE);
		if(i == 0) {
			// put region zero anywhere
			image->regions[image_region].id = vm_create_anonymous_region(vm_get_kernel_aspace_id(), region_name,
				(void **)&image->regions[image_region].start, REGION_ADDR_ANY_ADDRESS,
				image->regions[image_region].size, REGION_WIRING_WIRED, lock);
		} else {
			// try to line the other regions up so that their relative distances are according to the ELF header
			image->regions[image_region].start = ROUNDOWN(pheaders[i].p_vaddr + image->regions[0].delta, PAGE_SIZE);
//			dprintf("elf: region 0.delta 0x%x region %d.pvaddr 0x%x region %d.start 0x%x\n", 
//				image->regions[0].delta, i, pheaders[i].p_vaddr, i, image->regions[image_region].start);
			image->regions[image_region].id = vm_create_anonymous_region(vm_get_kernel_aspace_id(), region_name,
				(void **)&image->regions[image_region].start, REGION_ADDR_EXACT_ADDRESS,
				image->regions[image_region].size, REGION_WIRING_WIRED, lock);			
		}
		if(image->regions[image_region].id < 0) {
			dprintf("error allocating region!\n");
			err = ERR_INVALID_BINARY;
			goto error4;
		}
		image->regions[image_region].delta = image->regions[image_region].start - ROUNDOWN(pheaders[i].p_vaddr, PAGE_SIZE);

//		dprintf("elf_load_kspace: created a region at 0x%x\n", image->regions[image_region].start);

		len = sys_read(fd, (void *)(image->regions[image_region].start + (pheaders[i].p_vaddr % PAGE_SIZE)),
			pheaders[i].p_offset, pheaders[i].p_filesz);
		if(len < 0) {
			err = len;
			dprintf("error reading in seg %d\n", i);
			goto error4;
		}

		if(lowest_address == 0 || image->regions[image_region].start < lowest_address)
			lowest_address = image->regions[image_region].start;
		if(highest_address == 0 || (image->regions[image_region].start + image->regions[image_region].size) > highest_address)
			highest_address = image->regions[image_region].start + image->regions[image_region].size;
	}

	if(image->regions[1].start != 0) {
		if(image->regions[0].delta != image->regions[1].delta) {
			dprintf("could not load binary, fix the region problem!\n");
			dump_image_info(image);
			err = ERR_NO_MEMORY;
			goto error4;
		}
	}

	// modify the dynamic ptr by the delta of the regions
	image->dynamic_ptr += image->regions[0].delta;

	err = elf_parse_dynamic_section(image);
	if(err < 0)
		goto error4;

	err = elf_relocate(image, sym_prepend);
	if(err < 0)
		goto error4;

	err = 0;

	kfree(pheaders);
	sys_close(fd);

	insert_image_in_list(image);

done:
	mutex_unlock(&image_load_lock);

	dprintf("elf_load_kspace: syncing icache from 0x%lx to 0x%lx\n", lowest_address, highest_address);
	arch_cpu_sync_icache((void *)lowest_address, highest_address - lowest_address);

	dprintf("elf_load_kspace: done!\n");

	return image->id;

error4:
	if(image->regions[1].id >= 0)
		vm_delete_region(vm_get_kernel_aspace_id(), image->regions[1].id);
	if(image->regions[0].id >= 0)
		vm_delete_region(vm_get_kernel_aspace_id(), image->regions[0].id);
error3:
	kfree(image);
error2:
	kfree(pheaders);
error1:
	kfree(eheader);
error:
	mutex_unlock(&image_load_lock);
error0:
	if(vnode)
		vfs_put_vnode_ptr(vnode);
	sys_close(fd);

	return err;
}
static int rtl8169_init(rtl8169 *r)
{
    //bigtime_t time;
    int err = -1;
    //addr_t temp;
    //int i;

    hal_mutex_init(&r->lock,DEBUG_MSG_PREFIX);


    SHOW_FLOW(2, "rtl8169_init: r %p\n", r);

    /*
     r->region = vm_map_physical_memory(vm_get_kernel_aspace_id(), "rtl8169_region", (void **)&r->virt_base, REGION_ADDR_ANY_ADDRESS, r->phys_size, LOCK_KERNEL|LOCK_RW, r->phys_base);
    if(r->region < 0) {
        SHOW_ERROR0(1, "rtl8169_init: error creating memory mapped region\n");
        err = -1;
        goto err;
    }*/

    size_t n_pages = BYTES_TO_PAGES(r->phys_size);

    hal_alloc_vaddress( (void **)&r->virt_base, n_pages); // alloc address of a page, but not memory
    hal_pages_control_etc( r->phys_base, (void *)r->virt_base, n_pages, page_map_io, page_rw, 0 );

    SHOW_INFO(2, "rtl8169 mapped at address 0x%lx\n", r->virt_base);

#if 0
    /* create regions for tx and rx descriptors */
    r->rxdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxdesc", (void **)&r->rxdesc,
                                                  REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
    r->rxdesc_phys = vtophys(r->rxdesc);
    SHOW_INFO(2, "rtl8169: rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys);
    r->txdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txdesc", (void **)&r->txdesc,
                                                  REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
    r->txdesc_phys = vtophys(r->txdesc);
    SHOW_INFO(2, "rtl8169: tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys);
    r->reg_spinlock = 0;

    /* create a large tx and rx buffer for the descriptors to point to */
    r->rxbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxbuf", (void **)&r->rxbuf,
                                                 REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW);
    r->txbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txbuf", (void **)&r->txbuf,
                                                 REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW);
#endif

    hal_pv_alloc( &r->rxdesc_phys, (void**)&r->rxdesc, NUM_RX_DESCRIPTORS * DESCRIPTOR_LEN );
    hal_pv_alloc( &r->txdesc_phys, (void**)&r->txdesc, NUM_TX_DESCRIPTORS * DESCRIPTOR_LEN );

    SHOW_INFO(2, "rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys);
    SHOW_INFO(2, "tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys);

    hal_pv_alloc( &r->rxbuf_phys, (void**)&r->rxbuf, NUM_RX_DESCRIPTORS * BUFSIZE_PER_FRAME );
    hal_pv_alloc( &r->txbuf_phys, (void**)&r->txbuf, NUM_TX_DESCRIPTORS * BUFSIZE_PER_FRAME );

    /* create a receive sem */
    hal_sem_init( &r->rx_sem, "rtl8169 rx_sem");

    /* transmit sem */
    hal_sem_init(  &r->tx_sem, "rtl8169 tx_sem");

    /* reset the chip */
    int repeats = 100;
    RTL_WRITE_8(r, REG_CR, (1<<4)); // reset the chip, disable tx/rx
    do {
        hal_sleep_msec(10); // 10ms
        if(repeats -- <= 0 )
            break;
    } while(RTL_READ_8(r, REG_CR) & (1<<4));

    /* read in the mac address */
    r->mac_addr[0] = RTL_READ_8(r, REG_IDR0);
    r->mac_addr[1] = RTL_READ_8(r, REG_IDR1);
    r->mac_addr[2] = RTL_READ_8(r, REG_IDR2);
    r->mac_addr[3] = RTL_READ_8(r, REG_IDR3);
    r->mac_addr[4] = RTL_READ_8(r, REG_IDR4);
    r->mac_addr[5] = RTL_READ_8(r, REG_IDR5);
    SHOW_INFO(2, "rtl8169: mac addr %x:%x:%x:%x:%x:%x\n",
              r->mac_addr[0], r->mac_addr[1], r->mac_addr[2],
              r->mac_addr[3], r->mac_addr[4], r->mac_addr[5]);

    /* some voodoo from BSD driver */
    RTL_WRITE_16(r, REG_CCR, RTL_READ_16(r, REG_CCR));
    RTL_SETBITS_16(r, REG_CCR, 0x3);

    /* mask all interrupts */
    RTL_WRITE_16(r, REG_IMR, 0);

    /* set up the tx/rx descriptors */
    rtl8169_setup_descriptors(r);

    /* enable tx/rx */
    RTL_SETBITS_8(r, REG_CR, (1<<3)|(1<<2));

    /* set up the rx state */
    /* 1024 byte dma threshold, 1024 dma max burst, CRC calc 8 byte+, accept all packets */
    RTL_WRITE_32(r, REG_RCR, (1<<16) | (6<<13) | (6<<8) | (0xf << 0));
    RTL_SETBITS_16(r, REG_CCR, (1<<5)); // rx checksum enable
    RTL_WRITE_16(r, REG_RMS, 1518); // rx mtu

    /* set up the tx state */
    RTL_WRITE_32(r, REG_TCR, (RTL_READ_32(r, REG_TCR) & ~0x1ff) | (6<<8)); // 1024 max burst dma
    RTL_WRITE_8(r, REG_MTPS, 0x3f); // max tx packet size (must be careful to not actually transmit more than mtu)

    /* set up the interrupt handler */
    //int_set_io_interrupt_handler(r->irq, &rtl8169_int, r, "rtl8169");
    if(hal_irq_alloc( r->irq, &rtl8169_int, r, HAL_IRQ_SHAREABLE ))
    {
        SHOW_ERROR( 0, "unable to allocate irq %d", r->irq );
        goto err1;
    }

    /* clear all pending interrupts */
    RTL_WRITE_16(r, REG_ISR, 0xffff);

    /* unmask interesting interrupts */
    RTL_WRITE_16(r, REG_IMR, IMR_SYSERR | IMR_LINKCHG | IMR_TER | IMR_TOK | IMR_RER | IMR_ROK | IMR_RXOVL);

    return 0;

err1:
    // TODO free what?
    //vm_delete_region(vm_get_kernel_aspace_id(), r->region);
//err:
    return err;
}
Пример #13
0
int rtl8169_init(rtl8169 *r)
{
	bigtime_t time;
	int err = -1;
	addr_t temp;
	int i;

	SHOW_FLOW(2, "rtl8169_init: r %p\n", r);

	r->region = vm_map_physical_memory(vm_get_kernel_aspace_id(), "rtl8169_region", (void **)&r->virt_base,
		REGION_ADDR_ANY_ADDRESS, r->phys_size, LOCK_KERNEL|LOCK_RW, r->phys_base);
	if(r->region < 0) {
		SHOW_ERROR0(1, "rtl8169_init: error creating memory mapped region\n");
		err = -1;
		goto err;
	}
	SHOW_INFO(2, "rtl8169 mapped at address 0x%lx\n", r->virt_base);

	/* create regions for tx and rx descriptors */
	r->rxdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxdesc", (void **)&r->rxdesc,
		REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
	r->rxdesc_phys = vtophys(r->rxdesc);
	SHOW_INFO(2, "rtl8169: rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys);
	r->txdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txdesc", (void **)&r->txdesc,
		REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
	r->txdesc_phys = vtophys(r->txdesc);
	SHOW_INFO(2, "rtl8169: tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys);
	r->reg_spinlock = 0;

	/* create a large tx and rx buffer for the descriptors to point to */
	r->rxbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxbuf", (void **)&r->rxbuf,
			REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW);
	r->txbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txbuf", (void **)&r->txbuf,
			REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW);

	/* create a receive sem */
	r->rx_sem = sem_create(0, "rtl8169 rx_sem");

	/* transmit sem */
	r->tx_sem = sem_create(1, "rtl8169 tx_sem");

	/* reset the chip */
	time = system_time();
	RTL_WRITE_8(r, REG_CR, (1<<4)); // reset the chip, disable tx/rx
	do {
		thread_snooze(10000); // 10ms
		if(system_time() - time > 1000000) {
			break;
		}
	} while(RTL_READ_8(r, REG_CR) & (1<<4));

	/* read in the mac address */
	r->mac_addr[0] = RTL_READ_8(r, REG_IDR0); 
	r->mac_addr[1] = RTL_READ_8(r, REG_IDR1);
   	r->mac_addr[2] = RTL_READ_8(r, REG_IDR2);
	r->mac_addr[3] = RTL_READ_8(r, REG_IDR3);
   	r->mac_addr[4] = RTL_READ_8(r, REG_IDR4);
   	r->mac_addr[5] = RTL_READ_8(r, REG_IDR5);
  	SHOW_INFO(2, "rtl8169: mac addr %x:%x:%x:%x:%x:%x\n",
  		r->mac_addr[0], r->mac_addr[1], r->mac_addr[2],
  		r->mac_addr[3], r->mac_addr[4], r->mac_addr[5]);

	/* some voodoo from BSD driver */
	RTL_WRITE_16(r, REG_CCR, RTL_READ_16(r, REG_CCR));
	RTL_SETBITS_16(r, REG_CCR, 0x3);

	/* mask all interrupts */
	RTL_WRITE_16(r, REG_IMR, 0);

	/* set up the tx/rx descriptors */
	rtl8169_setup_descriptors(r);

	/* enable tx/rx */
	RTL_SETBITS_8(r, REG_CR, (1<<3)|(1<<2));

	/* set up the rx state */
	/* 1024 byte dma threshold, 1024 dma max burst, CRC calc 8 byte+, accept all packets */
	RTL_WRITE_32(r, REG_RCR, (1<<16) | (6<<13) | (6<<8) | (0xf << 0)); 
	RTL_SETBITS_16(r, REG_CCR, (1<<5)); // rx checksum enable
	RTL_WRITE_16(r, REG_RMS, 1518); // rx mtu

	/* set up the tx state */
	RTL_WRITE_32(r, REG_TCR, (RTL_READ_32(r, REG_TCR) & ~0x1ff) | (6<<8)); // 1024 max burst dma
	RTL_WRITE_8(r, REG_MTPS, 0x3f); // max tx packet size (must be careful to not actually transmit more than mtu)

	/* set up the interrupt handler */
	int_set_io_interrupt_handler(r->irq, &rtl8169_int, r, "rtl8169");

	/* clear all pending interrupts */
	RTL_WRITE_16(r, REG_ISR, 0xffff);
	
	/* unmask interesting interrupts */
	RTL_WRITE_16(r, REG_IMR, IMR_SYSERR | IMR_LINKCHG | IMR_TER | IMR_TOK | IMR_RER | IMR_ROK | IMR_RXOVL);

	return 0;

err1:
	vm_delete_region(vm_get_kernel_aspace_id(), r->region);
err:
	return err;
}
Пример #14
0
/* init virtual memory */
status_t vm_init(kernel_args_t *kargs)
{
    addr_t heap_base;  /* Kernel's heap base */
    size_t heap_size;  /* Kernel's heap size */
    status_t err;

    /* clear VM statistics */
    memset(&VM_State, 0, sizeof(vm_stat_t));

    /* translation map module init */
    err = vm_translation_map_init(kargs);
    if(err)
       panic("vm_init: translation map init failed!\n");

    /* execute architecture-specific init */
    err = arch_vm_init(kargs);
    if(err)
       panic("arch_vm_init: failed!\n");

    /* start platform-specific init */
    err = platform_vm_init(kargs);
    if(err)
       panic("platform_vm_init: failed!\n");

    /* init memory size */
    err = vm_page_preinit(kargs);
    if(err)
       panic("vm_page_preinit: failed!\n");

    /* init kernel's heap */
    {
        /* compute heap size */
        heap_size = ROUNDUP(
                      vm_phys_mem_size() / SYSCFG_KERNEL_HEAP_FRAC,
                      1*1024*1024 /* Mbyte */
                    );
        if(heap_size > SYSCFG_KERNEL_HEAP_MAX)
              heap_size = SYSCFG_KERNEL_HEAP_MAX;

        /* allocate heap area */
        heap_base = vm_alloc_from_kargs(kargs, heap_size, VM_PROT_KERNEL_DEFAULT);

        /* init heap */
        heap_init(heap_base, heap_size);
        /* Fuf... Now kmalloc and kfree is available */
    }

    /* init vm page module */
    err = vm_page_init(kargs);
    if(err)
       panic("vm_page_init: failed!\n");

/*** Important note: After this point vm_alloc_from_kargs must not be used
 *** because physical pages bookkeping is turned on.
 ***/
 
    /* prefinal stage of translation map module init */
    err = vm_translation_map_init_prefinal(kargs);
    if(err)
       panic("vm_init: prefinal stage of translation map init failed!\n");

    /* start prefinal init stage of architecture-specific parts */
    err = arch_vm_init_prefinal(kargs);
    if(err)
       panic("arch_vm_init_prefinal: stage failed!\n");

    /* start prefinal stages of platform-specific init */
    err = platform_vm_init_prefinal(kargs);
    if(err)
       panic("platform_vm_init_prefinal: stage failed!\n");

    /* init address spaces module */
    err = vm_address_spaces_init(kargs);
    if(err)
       panic("vm_address_spaces_init: failed!\n");

    /* init vm objects module */
    err = vm_objects_init(kargs);
    if(err)
       panic("vm_objects_init: failed!\n");

    /* create initial kernel space */
    if(vm_create_kernel_aspace("kernel_space", KERNEL_BASE, KERNEL_SIZE) == VM_INVALID_ASPACEID)
       panic("vm_init: failed to create initial kernel space!\n");

    /*** Final stages of VM init ***/

    /* final stage of translation map module init */
    err = vm_translation_map_init_final(kargs);
    if(err)
       panic("vm_init: final stage of translation map init failed!\n");

    /* start final init stage of architecture-specific parts */
    err = arch_vm_init_final(kargs);
    if(err)
       panic("arch_vm_init_final: final stage failed!\n");

    /* final stages of platform-specific init */
    err = platform_vm_init_final(kargs);
    if(err)
       panic("platform_vm_init_final: final stage failed!\n");

    /* final stages of vm page module init */
    err = vm_page_init_final(kargs);
    if(err)
       panic("vm_page_init_final: failed!\n");

    /* init memory structures */
    {
        aspace_id kid = vm_get_kernel_aspace_id();
        object_id id;
        char name[SYS_MAX_OS_NAME_LEN];
        int i;

        /* create kernel_image memory object and its mapping */
        id = vm_create_physmem_object(VM_NAME_KERNEL_IMAGE, kargs->phys_kernel_addr.start,
                                      kargs->phys_kernel_addr.size, VM_OBJECT_PROTECT_ALL);
        if(id == VM_INVALID_OBJECTID)
            panic("vm_init: failed to create kernel_image object!\n");

        err = vm_map_object_exactly(kid, id, VM_PROT_KERNEL_ALL, kargs->virt_kernel_addr.start);
        if(err != NO_ERROR)
            panic("vm_init: failed to create mapping of kernel_image object!\n");

        /* init physical page counters */
        err = vm_page_init_wire_counters(kargs->virt_kernel_addr.start,
                                         kargs->virt_kernel_addr.size);
        if(err != NO_ERROR)
            panic("vm_init: failed to init counters for kernel_image object!\n");

        /* create kernel stacks memory objects and mappings */
        for(i = 0; i < SYSCFG_MAX_CPUS; i++) {
            snprintf(name, SYS_MAX_OS_NAME_LEN, VM_NAME_KERNEL_CPU_STACK_FMT, i);
            id = vm_create_physmem_object(name, kargs->phys_cpu_kstack[i].start,
                                          kargs->phys_cpu_kstack[i].size, VM_OBJECT_PROTECT_ALL);
            if(id == VM_INVALID_OBJECTID)
                panic("vm_init: failed to create %s object!\n", name);

            err = vm_map_object_exactly(kid, id, VM_PROT_KERNEL_ALL, kargs->virt_cpu_kstack[i].start);
            if(err != NO_ERROR)
                panic("vm_init: failed to create mapping of %s object!\n", name);

            /* init counters */
            err = vm_page_init_wire_counters(kargs->virt_cpu_kstack[i].start,
                                             kargs->virt_cpu_kstack[i].size);
            if(err != NO_ERROR)
                panic("vm_init: failed to init counters for %s object!\n", name);
        }

        /* create kernel_heap memory object and its mapping */
        id = vm_create_virtmem_object(VM_NAME_KERNEL_HEAP, kid, heap_base, heap_size,
                                      VM_OBJECT_PROTECT_ALL);
        if(id == VM_INVALID_OBJECTID)
            panic("vm_init: failed to create kernel_heap object!\n");

        err = vm_map_object_exactly(kid, id, VM_PROT_KERNEL_ALL, heap_base);
        if(err != NO_ERROR)
            panic("vm_init: failed to create mapping of kernel_heap object!\n");

        /* init counters for heap pages */
        err = vm_page_init_wire_counters(heap_base, heap_size);
        if(err != NO_ERROR)
            panic("vm_init: failed to init counters for kernel_heap object!\n");

        /* create bootfs_image memory object */
        id = vm_create_physmem_object(VM_NAME_BOOTFS_IMAGE, kargs->btfs_image_addr.start,
                                      kargs->btfs_image_addr.size, VM_OBJECT_PROTECT_ALL);
        if(id == VM_INVALID_OBJECTID)
            panic("vm_init: failed to create bootfs_image object!\n");
    }
    /* end of init memory structures */

    return NO_ERROR;
}
Пример #15
0
static int find_and_map(void)
{
	int err;
	pci_module_hooks *pci;
	pci_info pinfo;
	aspace_id kai = vm_get_kernel_aspace_id();
	int i;
	bool foundit;

	if(module_get(PCI_BUS_MODULE_NAME, 0, (void **)&pci) < 0) {
		dprintf("vmware: no pci bus found..\n");
		err = ERR_NOT_FOUND;
		goto error0;
	}

	foundit = false;
	for(i = 0; pci->get_nth_pci_info(i, &pinfo) >= NO_ERROR; i++) {
		dprintf("vmware: looking at 0x%x:0x%x\n", pinfo.vendor_id, pinfo.device_id);
		if(pinfo.vendor_id == PCI_VENDOR_ID_VMWARE &&
			(pinfo.device_id == PCI_DEVICE_ID_VMWARE_SVGA2 || pinfo.device_id == PCI_DEVICE_ID_VMWARE_SVGA)) {
			foundit = true;
			break;
		}
	}
	if(!foundit) {
		dprintf("vmware: didn't find device on pci bus\n");
		err = ERR_NOT_FOUND;
		goto error0;
	}

	switch(pinfo.device_id) {
		case PCI_DEVICE_ID_VMWARE_SVGA:
			dprintf("vmware SVGA device detected at pci %d:%d:%d\n", pinfo.bus, pinfo.device, pinfo.function);
			vcons.index_port = SVGA_LEGACY_BASE_PORT + SVGA_INDEX_PORT * 4;
			vcons.value_port = SVGA_LEGACY_BASE_PORT + SVGA_VALUE_PORT * 4;
			break;
		case PCI_DEVICE_ID_VMWARE_SVGA2:
			dprintf("vmware SVGA2 device detected at pci %d:%d:%d\n", pinfo.bus, pinfo.device, pinfo.function);
			vcons.index_port = pinfo.u.h0.base_registers[0] + SVGA_INDEX_PORT;
			vcons.value_port = pinfo.u.h0.base_registers[0] + SVGA_VALUE_PORT;
			break;
	}
	vcons.fb_phys_base = pinfo.u.h0.base_registers[1];
	vcons.fb_size = MIN(pinfo.u.h0.base_register_sizes[1], SVGA_FB_MAX_SIZE);
	vcons.fifo_phys_base = pinfo.u.h0.base_registers[2];
	vcons.fifo_size = MIN(pinfo.u.h0.base_register_sizes[2], SVGA_MEM_SIZE);

	dprintf("vmware: index port 0x%x, value port 0x%x\n", vcons.index_port, vcons.value_port);
	dprintf("vmware: phys base 0x%x, size 0x%X\n", vcons.fb_phys_base, vcons.fb_size);
	dprintf("vmware: fifo phys base 0x%x, fifo size 0x%X\n", vcons.fifo_phys_base, vcons.fifo_size);

	vcons.fb_region = vm_map_physical_memory(kai, "vmw:fb", (void **)&vcons.fb_base, REGION_ADDR_ANY_ADDRESS, vcons.fb_size, LOCK_KERNEL | LOCK_RW, vcons.fb_phys_base);
	if (vcons.fb_region < 0)
	{
		err = vcons.fb_region;
		dprintf("Error mapping frame buffer: %x\n", err);
		goto error0;
	}
	vcons.fifo_region = vm_map_physical_memory(kai, "vmw:fifo", (void **)&vcons.fifo_base, REGION_ADDR_ANY_ADDRESS, vcons.fifo_size, LOCK_KERNEL | LOCK_RW, vcons.fifo_phys_base);
	if (vcons.fifo_region < 0)
	{
		err = vcons.fifo_region;
		dprintf("Error mapping vmw::fifo: %x\n", err);
		goto error1;
	}

	// XXX this makes the emulation unhappy (crashes vmware)
//	out_reg(SVGA_REG_ID, SVGA_ID_2);
//	vcons.svga_id = in_reg(SVGA_REG_ID);
//	dprintf("vmware: svga version %d\n", vcons.svga_id);
	
	vcons.bits_per_pixel = in_reg(SVGA_REG_BITS_PER_PIXEL);
	dprintf("vmware: bpp %d\n", vcons.bits_per_pixel);

	err = NO_ERROR;
	goto error0;

	// unmap vcons.fifo_region
	vm_delete_region(kai, vcons.fifo_region);
error1:
	// unmap vcons.fb_region
	vm_delete_region(kai, vcons.fb_region);
error0:
	return err;
}
Пример #16
0
int rtl8139_init(rtl8139 *rtl)
{
	bigtime_t time;
	int err = -1;
	addr_t temp;

	dprintf("rtl8139_init: rtl %p\n", rtl);

	rtl->region = vm_map_physical_memory(vm_get_kernel_aspace_id(), "rtl8139_region", (void **)&rtl->virt_base,
		REGION_ADDR_ANY_ADDRESS, rtl->phys_size, LOCK_KERNEL|LOCK_RW, rtl->phys_base);
	if(rtl->region < 0) {
		dprintf("rtl8139_init: error creating memory mapped region\n");
		err = -1;
		goto err;
	}
	dprintf("rtl8139 mapped at address 0x%lx\n", rtl->virt_base);

	// try to reset the device
 	time = system_time();
	RTL_WRITE_8(rtl, RT_CHIPCMD, RT_CMD_RESET);
	do {
		thread_snooze(10000); // 10ms
		if(system_time() - time > 1000000) {
			err = -1;
			goto err1;
		}
	} while((RTL_READ_8(rtl, RT_CHIPCMD) & RT_CMD_RESET));

	// create a rx and tx buf
	rtl->rxbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8139_rxbuf", (void **)&rtl->rxbuf,
		REGION_ADDR_ANY_ADDRESS, 64*1024 + 16, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
	rtl->txbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8139_txbuf", (void **)&rtl->txbuf,
		REGION_ADDR_ANY_ADDRESS, 8*1024, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW);

	// set up the transmission buf and sem
	rtl->tx_sem = sem_create(4, "rtl8139_txsem");
	mutex_init(&rtl->lock, "rtl8139");
	rtl->txbn = 0;
	rtl->last_txbn = 0;
	rtl->rx_sem = sem_create(0, "rtl8139_rxsem");
	rtl->reg_spinlock = 0;

	// set up the interrupt handler
	int_set_io_interrupt_handler(rtl->irq, &rtl8139_int, rtl, "rtl8139");

	// read the mac address
	rtl->mac_addr[0] = RTL_READ_8(rtl, RT_IDR0);
	rtl->mac_addr[1] = RTL_READ_8(rtl, RT_IDR0 + 1);
	rtl->mac_addr[2] = RTL_READ_8(rtl, RT_IDR0 + 2);
	rtl->mac_addr[3] = RTL_READ_8(rtl, RT_IDR0 + 3);
  	rtl->mac_addr[4] = RTL_READ_8(rtl, RT_IDR0 + 4);
  	rtl->mac_addr[5] = RTL_READ_8(rtl, RT_IDR0 + 5);

  	dprintf("rtl8139: mac addr %x:%x:%x:%x:%x:%x\n",
  		rtl->mac_addr[0], rtl->mac_addr[1], rtl->mac_addr[2],
  		rtl->mac_addr[3], rtl->mac_addr[4], rtl->mac_addr[5]);

	// enable writing to the config registers
	RTL_WRITE_8(rtl, RT_CFG9346, 0xc0);

	// reset config 1
	RTL_WRITE_8(rtl, RT_CONFIG1, 0);

	// Enable receive and transmit functions
	RTL_WRITE_8(rtl, RT_CHIPCMD, RT_CMD_RX_ENABLE | RT_CMD_TX_ENABLE);

	// Set Rx FIFO threashold to 256, Rx size to 64k+16, 256 byte DMA burst
	RTL_WRITE_32(rtl, RT_RXCONFIG, 0x00009c00);

	// Set Tx 256 byte DMA burst
	RTL_WRITE_32(rtl, RT_TXCONFIG, 0x03000400);

	// Turn off lan-wake and set the driver-loaded bit
	RTL_WRITE_8(rtl, RT_CONFIG1, (RTL_READ_8(rtl, RT_CONFIG1) & ~0x30) | 0x20);

	// Enable FIFO auto-clear
	RTL_WRITE_8(rtl, RT_CONFIG4, RTL_READ_8(rtl, RT_CONFIG4) | 0x80);

	// go back to normal mode
	RTL_WRITE_8(rtl, RT_CFG9346, 0);

	// Setup RX buffers
	*(int *)rtl->rxbuf = 0;
	vm_get_page_mapping(vm_get_kernel_aspace_id(), rtl->rxbuf, &temp);
	dprintf("rx buffer will be at 0x%lx\n", temp);
	RTL_WRITE_32(rtl, RT_RXBUF, temp);

	// Setup TX buffers
	dprintf("tx buffer (virtual) is at 0x%lx\n", rtl->txbuf);
	*(int *)rtl->txbuf = 0;
	vm_get_page_mapping(vm_get_kernel_aspace_id(), rtl->txbuf, &temp);
	RTL_WRITE_32(rtl, RT_TXADDR0, temp);
	RTL_WRITE_32(rtl, RT_TXADDR1, temp + 2*1024);
	dprintf("first half of txbuf at 0x%lx\n", temp);
	*(int *)(rtl->txbuf + 4*1024) = 0;
	vm_get_page_mapping(vm_get_kernel_aspace_id(), rtl->txbuf + 4*1024, &temp);
	RTL_WRITE_32(rtl, RT_TXADDR2, temp);
	RTL_WRITE_32(rtl, RT_TXADDR3, temp + 2*1024);
	dprintf("second half of txbuf at 0x%lx\n", temp);

/*
	RTL_WRITE_32(rtl, RT_TXSTATUS0, RTL_READ_32(rtl, RT_TXSTATUS0) | 0xfffff000);
	RTL_WRITE_32(rtl, RT_TXSTATUS1, RTL_READ_32(rtl, RT_TXSTATUS1) | 0xfffff000);
	RTL_WRITE_32(rtl, RT_TXSTATUS2, RTL_READ_32(rtl, RT_TXSTATUS2) | 0xfffff000);
	RTL_WRITE_32(rtl, RT_TXSTATUS3, RTL_READ_32(rtl, RT_TXSTATUS3) | 0xfffff000);
*/
	// Reset RXMISSED counter
	RTL_WRITE_32(rtl, RT_RXMISSED, 0);

	// Enable receiving broadcast and physical match packets
//	RTL_WRITE_32(rtl, RT_RXCONFIG, RTL_READ_32(rtl, RT_RXCONFIG) | 0x0000000a);
	RTL_WRITE_32(rtl, RT_RXCONFIG, RTL_READ_32(rtl, RT_RXCONFIG) | 0x0000000f);

	// Filter out all multicast packets
	RTL_WRITE_32(rtl, RT_MAR0, 0);
	RTL_WRITE_32(rtl, RT_MAR0 + 4, 0);

	// Disable all multi-interrupts
	RTL_WRITE_16(rtl, RT_MULTIINTR, 0);

	RTL_WRITE_16(rtl, RT_INTRMASK, MYRT_INTS);
//	RTL_WRITE_16(rtl, RT_INTRMASK, 0x807f);

	// Enable RX/TX once more
	RTL_WRITE_8(rtl, RT_CHIPCMD, RT_CMD_RX_ENABLE | RT_CMD_TX_ENABLE);

	RTL_WRITE_8(rtl, RT_CFG9346, 0);

	return 0;

err1:
	vm_delete_region(vm_get_kernel_aspace_id(), rtl->region);
err:
	return err;
}