Exemplo n.º 1
0
hvm_status MmuReadWritePhysicalRegion(hvm_phy_address phy, void* buffer, Bit32u size, hvm_bool isWrite)
{
  hvm_status r;
  hvm_address dwLogicalAddress;
  PTE entryOriginal;

  /* Check that the memory region to read does not cross multiple frames */
  if (PHY_TO_FRAME(phy) != PHY_TO_FRAME(phy+size-1)) {
      MmuPrint("[MMU] Error: physical region %.8x-%.8x crosses multiple frames\n", phy, phy+size-1);
      return HVM_STATUS_UNSUCCESSFUL;
  }
  
  r = MmuMapPhysicalPage(phy, &dwLogicalAddress, &entryOriginal);
  if (r != HVM_STATUS_SUCCESS)
    return HVM_STATUS_UNSUCCESSFUL;
  
  dwLogicalAddress += MMU_PAGE_OFFSET(phy);
  
  if (!isWrite) {
    /* Read memory page */
    MmuPrint("[MMU] MmuReadWritePhysicalRegion() Going to read %d from va: %.8x\n", size, dwLogicalAddress);
    vmm_memcpy(buffer, (Bit8u*) dwLogicalAddress, size);
    
  } else {
    /* Write to memory page */
    vmm_memcpy((Bit8u*) dwLogicalAddress, buffer, size);
  }
  
  MmuPrint("[MMU] MmuReadWritePhysicalRegion() All done!\n");
  
  MmuUnmapPhysicalPage(dwLogicalAddress, entryOriginal);
  
  return HVM_STATUS_SUCCESS;
}
Exemplo n.º 2
0
/*-----------------------------------------------------------------------------------*/
void
uip_arp_arpin(void)
{

	if(uip_len < sizeof(struct arp_hdr)) {
		uip_len = 0;
		return;
	}
	uip_len = 0;

	switch(BUF->opcode) {
	case HTONS(ARP_REQUEST):
		/* ARP request. If it asked for our address, we send out a
		   reply. */
		if(uip_ipaddr_cmp(BUF->dipaddr, uip_hostaddr)) {
			/* First, we register the one who made the request in our ARP
			   table, since it is likely that we will do more communication
			   with this host in the future. */
			uip_arp_update(BUF->sipaddr, &BUF->shwaddr);

			/* The reply opcode is 2. */
			BUF->opcode = HTONS(2);

			vmm_memcpy(BUF->dhwaddr.addr, BUF->shwaddr.addr, 6);
			vmm_memcpy(BUF->shwaddr.addr, uip_ethaddr.addr, 6);
			vmm_memcpy(BUF->ethhdr.src.addr, uip_ethaddr.addr, 6);
			vmm_memcpy(BUF->ethhdr.dest.addr, BUF->dhwaddr.addr, 6);

			BUF->dipaddr[0] = BUF->sipaddr[0];
			BUF->dipaddr[1] = BUF->sipaddr[1];
			BUF->sipaddr[0] = uip_hostaddr[0];
			BUF->sipaddr[1] = uip_hostaddr[1];

			BUF->ethhdr.type = HTONS(UIP_ETHTYPE_ARP);
			uip_len = sizeof(struct arp_hdr);
		}
		break;
	case HTONS(ARP_REPLY):
		/* ARP reply. We insert or update the ARP table if it was meant
		   for us. */
		if(uip_ipaddr_cmp(BUF->dipaddr, uip_hostaddr)) {
			uip_arp_update(BUF->sipaddr, &BUF->shwaddr);
		}
		break;
	}

	return;
}
Exemplo n.º 3
0
bool vmm_ringbuf_enqueue(struct vmm_ringbuf *rb, void *srckey, bool overwrite)
{
	u32 read_pos_mod, write_pos_mod;
	bool isfull, update;

	if (!rb || !srckey) {
		return FALSE;
	}

	vmm_spin_lock(&rb->lock);

	read_pos_mod = (rb->read_pos + 1);
	if (rb->key_count <= read_pos_mod) {
		read_pos_mod -= rb->key_count;
	}
	write_pos_mod = (rb->write_pos + 1);
	if (rb->key_count <= write_pos_mod) {
		write_pos_mod -= rb->key_count;
	}
	isfull = (rb->read_pos == write_pos_mod);
	update = FALSE;
	if (overwrite) {
		if (isfull) {
			rb->read_pos = read_pos_mod;
			rb->avail_count--;
		}
		update = TRUE;
	} else {
		if (!isfull) {
			update = TRUE;
		}
	}
	if(update) {
		switch(rb->key_size) {
		case 1:
			*((u8 *)(rb->keys + (rb->write_pos * rb->key_size)))
			= *((u8 *)srckey);
			break;
		case 2:
			*((u16 *)(rb->keys + (rb->write_pos * rb->key_size)))
			= *((u16 *)srckey);
			break;
		case 4:
			*((u32 *)(rb->keys + (rb->write_pos * rb->key_size)))
			= *((u32 *)srckey);
			break;
		default:
			vmm_memcpy(rb->keys + (rb->write_pos * rb->key_size), 
				   srckey, 
				   rb->key_size);
			break;
		};
		rb->write_pos = write_pos_mod;
		rb->avail_count++;
	}

	vmm_spin_unlock(&rb->lock);

	return update;
}
Exemplo n.º 4
0
void libfdt_node_parse_recursive(vmm_devtree_node_t * node,
				 char **data_ptr, char *str_buf)
{
	vmm_devtree_node_t *child;
	vmm_devtree_attr_t *attr;

	if (LIBFDT_DATA32(*data_ptr) != FDT_BEGIN_NODE)
		return;

	*data_ptr += sizeof(u32);

	node->name = vmm_malloc(vmm_strlen(*data_ptr) + 1);
	vmm_strcpy(node->name, *data_ptr);
	node->type = VMM_DEVTREE_NODETYPE_UNKNOWN;
	node->priv = NULL;
	*data_ptr += vmm_strlen(*data_ptr) + 1;
	while ((u32) (*data_ptr) % sizeof(u32) != 0)
		(*data_ptr)++;

	while (LIBFDT_DATA32(*data_ptr) != FDT_END_NODE) {
		switch (LIBFDT_DATA32(*data_ptr)) {
		case FDT_PROP:
			*data_ptr += sizeof(u32);
			attr = vmm_malloc(sizeof(vmm_devtree_attr_t));
			INIT_LIST_HEAD(&attr->head);
			attr->len = LIBFDT_DATA32(*data_ptr);
			*data_ptr += sizeof(u32);
			attr->name = &str_buf[LIBFDT_DATA32(*data_ptr)];
			*data_ptr += sizeof(u32);
			attr->value = vmm_malloc(attr->len);
			vmm_memcpy(attr->value, *data_ptr, attr->len);
			*data_ptr += attr->len;
			while ((u32) (*data_ptr) % sizeof(u32) != 0)
				(*data_ptr)++;
			list_add_tail(&node->attr_list, &attr->head);
			break;
		case FDT_NOP:
			*data_ptr += sizeof(u32);
			break;
		case FDT_BEGIN_NODE:
			child = vmm_malloc(sizeof(vmm_devtree_node_t));
			INIT_LIST_HEAD(&child->head);
			INIT_LIST_HEAD(&child->attr_list);
			INIT_LIST_HEAD(&child->child_list);
			child->parent = node;
			libfdt_node_parse_recursive(child, data_ptr, str_buf);
			list_add_tail(&node->child_list, &child->head);
			break;
		default:
			return;
			break;
		};
	}

	*data_ptr += sizeof(u32);

	return;
}
Exemplo n.º 5
0
void arch_vcpu_regs_switch(struct vmm_vcpu *tvcpu, 
			  struct vmm_vcpu *vcpu,
			  arch_regs_t *regs)
{
	if (tvcpu) {
		vmm_memcpy(mips_uregs(tvcpu), regs, sizeof(arch_regs_t));
	}

	if (vcpu) {
		if (!vcpu->is_normal) {
			mips_uregs(vcpu)->cp0_status = read_c0_status() & ~(0x01UL << CP0_STATUS_UM_SHIFT);
		} else {
			mips_uregs(vcpu)->cp0_status = read_c0_status() | (0x01UL << CP0_STATUS_UM_SHIFT);
		}

		vmm_memcpy(regs, mips_uregs(vcpu), sizeof(arch_regs_t));
	}
}
Exemplo n.º 6
0
static int acpi_read_sdt_at(physical_addr_t addr,
                            struct acpi_sdt_hdr * tb,
                            size_t size,
                            const char * name)
{
    struct acpi_sdt_hdr hdr;
    void *sdt_va = NULL;

    sdt_va = (void *)vmm_host_iomap(addr, PAGE_SIZE);
    if (unlikely(!sdt_va)) {
        vmm_printf("ACPI ERROR: Failed to map physical address 0x%x.\n",
                   __func__, addr);
        return VMM_EFAIL;
    }

    /* if NULL is supplied, we only return the size of the table */
    if (tb == NULL) {
        vmm_memcpy(&hdr, sdt_va, sizeof(struct acpi_sdt_hdr));
        return hdr.len;
    }

    vmm_memcpy(tb, sdt_va, sizeof(struct acpi_sdt_hdr));

    if (acpi_check_signature((const char *)tb->signature,
                             (const char *)name)) {
        vmm_printf("ACPI ERROR: acpi %s signature does not match\n", name);
        return VMM_EFAIL;
    }

    if (size < tb->len) {
        vmm_printf("ACPI ERROR: acpi buffer too small for %s\n", name);
        return VMM_EFAIL;
    }

    vmm_memcpy(tb, sdt_va, size);

    if (acpi_check_csum(tb, tb->len)) {
        vmm_printf("ACPI ERROR: acpi %s checksum does not match\n", name);
        return VMM_EFAIL;
    }

    return tb->len;
}
Exemplo n.º 7
0
int vmm_libfdt_parse(virtual_addr_t fdt_addr,
		     vmm_devtree_node_t ** root,
		     char **string_buffer, size_t * string_buffer_size)
{
	virtual_addr_t data_addr, str_addr;
	size_t data_size, str_size;
	char *data_ptr;
	struct vmm_fdt_header *header;

	header = (struct vmm_fdt_header *)fdt_addr;

	/* Check magic number of header for sainity */
	if (header->magic != FDT_MAGIC) {
		return VMM_EFAIL;
	}

	/* Compute data location & size */
	data_addr = fdt_addr;
	data_addr += sizeof(vmm_fdt_header_t);
	data_addr += sizeof(vmm_fdt_reserve_entry_t);
	data_size = header->size_dt_struct;

	/* Compute strings location & size */
	str_addr = data_addr + data_size;
	str_size = header->size_dt_strings;

	/* Allocate string buffer */
	*string_buffer = vmm_malloc(str_size);
	vmm_memcpy(*string_buffer, (char *)str_addr, str_size);
	*string_buffer_size = str_size;

	/* Setup root node */
	*root = vmm_malloc(sizeof(vmm_devtree_node_t));
	INIT_LIST_HEAD(&(*root)->head);
	INIT_LIST_HEAD(&(*root)->attr_list);
	INIT_LIST_HEAD(&(*root)->child_list);
	(*root)->name = NULL;
	(*root)->type = VMM_DEVTREE_NODETYPE_UNKNOWN;
	(*root)->priv = NULL;
	(*root)->parent = NULL;

	/* Parse FDT recursively */
	data_ptr = (char *)data_addr;
	libfdt_node_parse_recursive(*root, &data_ptr, *string_buffer);

	return VMM_OK;
}
Exemplo n.º 8
0
BOOLEAN array_list_add(ARRAY_LIST_HANDLE alist, void* data)
{
    LIST_ELEMENT *free_element = NULL;
    ARRAY_LIST_ELEMENT *free_list_entry = NULL;

    if(list_is_empty(&alist->free_list) || alist == NULL || data == NULL) {
        return FALSE;
    }
    free_element = alist->free_list.next;
    list_remove(free_element);
    list_add(alist->used_list.prev, free_element);
    alist->num_of_used_elements++;

    free_list_entry = LIST_ENTRY(free_element, ARRAY_LIST_ELEMENT, list);
    vmm_memcpy(free_list_entry->data, data, alist->element_size);
    return TRUE;
}
Exemplo n.º 9
0
bool vmm_ringbuf_dequeue(struct vmm_ringbuf *rb, void *dstkey)
{
	u32 read_pos_mod;
	bool isempty;

	if (!rb || !dstkey) {
		return FALSE;
	}

	vmm_spin_lock(&rb->lock);

	isempty = (rb->read_pos == rb->write_pos);

	if (!isempty) {
		switch(rb->key_size) {
		case 1:
			*((u8 *)dstkey) =
			*((u8 *)(rb->keys + (rb->read_pos * rb->key_size)));
			break;
		case 2:
			*((u16 *)dstkey) =
			*((u16 *)(rb->keys + (rb->read_pos * rb->key_size)));
			break;
		case 4:
			*((u32 *)dstkey) = 
			*((u32 *)(rb->keys + (rb->read_pos * rb->key_size)));
			break;
		default:
			vmm_memcpy(dstkey, 
				   rb->keys + (rb->read_pos * rb->key_size),
				   rb->key_size);
			break;
		};
		read_pos_mod = (rb->read_pos + 1);
		if (rb->key_count <= read_pos_mod) {
			read_pos_mod -= rb->key_count;
		}
		rb->read_pos = read_pos_mod;
		rb->avail_count--;
	}

	vmm_spin_unlock(&rb->lock);

	return !isempty;
}
Exemplo n.º 10
0
bool vmm_ringbuf_getkey(struct vmm_ringbuf *rb, u32 index, void *dstkey)
{
	if (!rb || !dstkey) {
		return FALSE;
	}

	if (rb->key_count <= index) {
		return FALSE;
	}
	
	vmm_spin_lock(&rb->lock);

	index = (rb->read_pos + index);
	if (rb->key_count <= index) {
		index -= rb->key_count;
	}
	switch(rb->key_size) {
	case 1:
		*((u8 *)dstkey) =
		*((u8 *)(rb->keys + (index * rb->key_size)));
		break;
	case 2:
		*((u16 *)dstkey) =
		*((u16 *)(rb->keys + (index * rb->key_size)));
		break;
	case 4:
		*((u32 *)dstkey) = 
		*((u32 *)(rb->keys + (index * rb->key_size)));
		break;
	default:
		vmm_memcpy(dstkey, 
			   rb->keys + (index * rb->key_size),
			   rb->key_size);
		break;
	};

	vmm_spin_unlock(&rb->lock);

	return TRUE;
}
Exemplo n.º 11
0
int ne2k_init(struct nic_priv_data *nic_data)
{
	int r;
	u8 eth_addr[6];

	if (!nic_data) {
		return VMM_EFAIL;
	}

	if (!nic_data->rx_rb) {
		nic_data->rx_rb = vmm_ringbuf_alloc(1, 2000);
		if (!nic_data->rx_rb) {
			vmm_printf("Cannot allocate receive buffer\n");
			return VMM_EFAIL;
		}
	}

	nic_data->base = (u8 *) (isa_vbase + CONFIG_DRIVER_NE2000_BASE);

	r = get_prom(nic_data, eth_addr);
	if (r != VMM_OK)
		return VMM_EFAIL;

	nic_data->data = nic_data->base + DP_DATA;
	nic_data->tx_buf1 = START_PG;
	nic_data->tx_buf2 = START_PG2;
	nic_data->rx_buf_start = RX_START;
	nic_data->rx_buf_end = RX_END;
	vmm_memcpy(nic_data->esa, eth_addr, sizeof(nic_data->esa));

	if (dp83902a_init(nic_data) != true) {
		return VMM_EFAIL;
	}

	dp83902a_start(nic_data, eth_addr);
	nic_data->initialized = 1;

	return VMM_OK;
}
Exemplo n.º 12
0
int cmd_memory_copy(struct vmm_chardev *cdev, physical_addr_t daddr, 
		    physical_addr_t saddr, u32 bcnt)
{
	int rc;
	u32 b = 0, b2copy;
	bool dpage_mapped, spage_mapped;
	virtual_addr_t dva, dpage_va, sva, spage_va;
	physical_addr_t dpage_pa, spage_pa;
	dpage_pa = daddr - (daddr & VMM_PAGE_MASK);
	dpage_va = vmm_host_iomap(dpage_pa, VMM_PAGE_SIZE);
	dpage_mapped = TRUE;
	spage_pa = saddr - (saddr & VMM_PAGE_MASK);
	spage_va = vmm_host_iomap(spage_pa, VMM_PAGE_SIZE);
	spage_mapped = TRUE;
	while (b < bcnt) {
		if (dpage_pa != (daddr - (daddr & VMM_PAGE_MASK))) {
			if (dpage_mapped) {
				rc = vmm_host_iounmap(dpage_va, VMM_PAGE_SIZE);
				if (rc) {
					vmm_cprintf(cdev, 
					"Error: Failed to unmap memory.\n");
					return rc;
				}
				dpage_mapped = FALSE;
			}
			dpage_pa = daddr - (daddr & VMM_PAGE_MASK);
			dpage_va = vmm_host_iomap(dpage_pa, VMM_PAGE_SIZE);
			dpage_mapped = TRUE;
		}
		dva = dpage_va + (virtual_addr_t)(daddr & VMM_PAGE_MASK);
		if (spage_pa != (saddr - (saddr & VMM_PAGE_MASK))) {
			if (spage_mapped) {
				rc = vmm_host_iounmap(spage_va, VMM_PAGE_SIZE);
				if (rc) {
					vmm_cprintf(cdev, 
					"Error: Failed to unmap memory.\n");
					return rc;
				}
				spage_mapped = FALSE;
			}
			spage_pa = saddr - (saddr & VMM_PAGE_MASK);
			spage_va = vmm_host_iomap(spage_pa, VMM_PAGE_SIZE);
			spage_mapped = TRUE;
		}
		sva = spage_va + (virtual_addr_t)(saddr & VMM_PAGE_MASK);
		if ((daddr & VMM_PAGE_MASK) < (saddr & VMM_PAGE_MASK)) {
			b2copy = VMM_PAGE_SIZE - (u32)(saddr & VMM_PAGE_MASK);
		} else {
			b2copy = VMM_PAGE_SIZE - (u32)(daddr & VMM_PAGE_MASK);
		}
		b2copy = ((bcnt - b) < b2copy) ? (bcnt - b) : b2copy;
		vmm_memcpy((void *)dva, (void *)sva, b2copy);
		b += b2copy;
		daddr += b2copy;
		saddr += b2copy;
	}
	vmm_cprintf(cdev, "Copied %d (0x%x) bytes.\n", b, b);
	if (dpage_mapped) {
		rc = vmm_host_iounmap(dpage_va, VMM_PAGE_SIZE);
		if (rc) {
			vmm_cprintf(cdev, "Error: Failed to unmap memory.\n");
			return rc;
		}
		dpage_mapped = FALSE;
	}
	if (spage_mapped) {
		rc = vmm_host_iounmap(spage_va, VMM_PAGE_SIZE);
		if (rc) {
			vmm_cprintf(cdev, "Error: Failed to unmap memory.\n");
			return rc;
		}
		spage_mapped = FALSE;
	}
	return VMM_OK;
}
Exemplo n.º 13
0
/*-----------------------------------------------------------------------------------*/
void
uip_arp_out(void)
{
	struct arp_entry *tabptr;

	/* Find the destination IP address in the ARP table and construct
	   the Ethernet header. If the destination IP addres isn't on the
	   local network, we use the default router's IP address instead.

	   If not ARP table entry is found, we overwrite the original IP
	   packet with an ARP request for the IP address. */

	/* First check if destination is a local broadcast. */
	if(uip_ipaddr_cmp(IPBUF->destipaddr, broadcast_ipaddr)) {
		vmm_memcpy(IPBUF->ethhdr.dest.addr, broadcast_ethaddr.addr, 6);
	} else {
		/* Check if the destination address is on the local network. */
		if(!uip_ipaddr_maskcmp(IPBUF->destipaddr, uip_hostaddr, uip_netmask)) {
			/* Destination address was not on the local network, so we need to
			   use the default router's IP address instead of the destination
			   address when determining the MAC address. */
			uip_ipaddr_copy(ipaddr, uip_draddr);
		} else {
			/* Else, we use the destination IP address. */
			uip_ipaddr_copy(ipaddr, IPBUF->destipaddr);
		}

		for(i = 0; i < UIP_ARPTAB_SIZE; ++i) {
			tabptr = &arp_table[i];
			if(uip_ipaddr_cmp(ipaddr, tabptr->ipaddr)) {
				break;
			}
		}

		if(i == UIP_ARPTAB_SIZE) {
			/* The destination address was not in our ARP table, so we
			   overwrite the IP packet with an ARP request. */

			vmm_memset(BUF->ethhdr.dest.addr, 0xff, 6);
			vmm_memset(BUF->dhwaddr.addr, 0x00, 6);
			vmm_memcpy(BUF->ethhdr.src.addr, uip_ethaddr.addr, 6);
			vmm_memcpy(BUF->shwaddr.addr, uip_ethaddr.addr, 6);

			uip_ipaddr_copy(BUF->dipaddr, ipaddr);
			uip_ipaddr_copy(BUF->sipaddr, uip_hostaddr);
			BUF->opcode = HTONS(ARP_REQUEST); /* ARP request. */
			BUF->hwtype = HTONS(ARP_HWTYPE_ETH);
			BUF->protocol = HTONS(UIP_ETHTYPE_IP);
			BUF->hwlen = 6;
			BUF->protolen = 4;
			BUF->ethhdr.type = HTONS(UIP_ETHTYPE_ARP);

			uip_appdata = &uip_buf[UIP_TCPIP_HLEN + UIP_LLH_LEN];

			uip_len = sizeof(struct arp_hdr);
			return;
		}

		/* Build an ethernet header. */
		vmm_memcpy(IPBUF->ethhdr.dest.addr, tabptr->ethaddr.addr, 6);
	}
	vmm_memcpy(IPBUF->ethhdr.src.addr, uip_ethaddr.addr, 6);

	IPBUF->ethhdr.type = HTONS(UIP_ETHTYPE_IP);

	uip_len += sizeof(struct uip_eth_hdr);
}
Exemplo n.º 14
0
/*-----------------------------------------------------------------------------------*/
static void
uip_arp_update(u16 *ipaddr, struct uip_eth_addr *ethaddr)
{
	register struct arp_entry *tabptr;
	/* Walk through the ARP mapping table and try to find an entry to
	   update. If none is found, the IP -> MAC address mapping is
	   inserted in the ARP table. */
	for(i = 0; i < UIP_ARPTAB_SIZE; ++i) {

		tabptr = &arp_table[i];
		/* Only check those entries that are actually in use. */
		if(tabptr->ipaddr[0] != 0 &&
			tabptr->ipaddr[1] != 0) {

			/* Check if the source IP address of the incoming packet matches
			   the IP address in this ARP table entry. */
			if(ipaddr[0] == tabptr->ipaddr[0] &&
				ipaddr[1] == tabptr->ipaddr[1]) {

				/* An old entry found, update this and return. */
				vmm_memcpy(tabptr->ethaddr.addr, ethaddr->addr, 6);
				tabptr->time = arptime;

				return;
			}
		}
	}

	/* If we get here, no existing ARP table entry was found, so we
	   create one. */

	/* First, we try to find an unused entry in the ARP table. */
	for(i = 0; i < UIP_ARPTAB_SIZE; ++i) {
		tabptr = &arp_table[i];
		if(tabptr->ipaddr[0] == 0 &&
			tabptr->ipaddr[1] == 0) {
			break;
		}
	}

	/* If no unused entry is found, we try to find the oldest entry and
	   throw it away. */
	if(i == UIP_ARPTAB_SIZE) {
		tmpage = 0;
		c = 0;
		for(i = 0; i < UIP_ARPTAB_SIZE; ++i) {
			tabptr = &arp_table[i];
			if(arptime - tabptr->time > tmpage) {
				tmpage = arptime - tabptr->time;
				c = i;
			}
		}
		i = c;
		tabptr = &arp_table[i];
	}

	/* Now, i is the ARP table entry which we will fill with the new
	   information. */
	vmm_memcpy(tabptr->ipaddr, ipaddr, 4);
	vmm_memcpy(tabptr->ethaddr.addr, ethaddr->addr, 6);
	tabptr->time = arptime;
}
Exemplo n.º 15
0
int __init vmm_modules_init(void)
{
	int mod_ret;
	u32 i, j;
	struct vmm_module tmpmod;

	/* Reset the control structure */
	vmm_memset(&modules_ctrl, 0, sizeof(modules_ctrl));

	/* Initialize the control structure */
	modules_ctrl.table = (struct vmm_module *) arch_modtbl_vaddr();
	modules_ctrl.table_size = arch_modtbl_size() / sizeof(struct vmm_module);
	modules_ctrl.mod_count = 0;

	/* Find and count valid modules */
	for (i = 0; i < modules_ctrl.table_size; i++) {
		/* Check validity of command table entry */
		if (modules_ctrl.table[i].signature == VMM_MODULE_SIGNATURE) {
			/* Increment count in control structure */
			modules_ctrl.mod_count++;
		} else {
			break;
		}
	}

	/* If no modules found then return */
	if (!modules_ctrl.mod_count) {
		return VMM_OK;
	}

	/* Sort modules based on initialization priority (Selection Sort) */
	for (i = 0; i < (modules_ctrl.mod_count - 1); i++) {
		for (j = (i + 1); j < modules_ctrl.mod_count; j++) {
			if (modules_ctrl.table[j].ipriority <
			    modules_ctrl.table[i].ipriority) {
				vmm_memcpy(&tmpmod,
					   &modules_ctrl.table[i],
					   sizeof(tmpmod));
				vmm_memcpy(&modules_ctrl.table[i],
					   &modules_ctrl.table[j],
					   sizeof(modules_ctrl.table[i]));
				vmm_memcpy(&modules_ctrl.table[j],
					   &tmpmod,
					   sizeof(modules_ctrl.table[j]));
			}
		}
	}

	/* Initialize modules in sorted order */
	for (i = 0; i < modules_ctrl.mod_count; i++) {
		/* Initialize module if required */
		if (modules_ctrl.table[i].init) {
#if defined(CONFIG_VERBOSE_MODE)
			vmm_printf("Initialize %s\n",
				   modules_ctrl.table[i].name);
#endif
			mod_ret = modules_ctrl.table[i].init();
			if (mod_ret) {
				vmm_printf("%s: %s init error %d\n", 
				__func__, modules_ctrl.table[i].name, mod_ret);
			}
			modules_ctrl.table[i].istatus = mod_ret;
		}
	}

	return VMM_OK;
}
Exemplo n.º 16
0
void vmm_deadloop_internal(UINT32 file_code, UINT32 line_num, GUEST_CPU_HANDLE gcpu)
{
    static UINT32 dump_started = 0;
    char        buffer[BUFFER_SIZE], err_msg[BUFFER_SIZE];
    UINT64      rsp, stack_base;
    UINT32      size;
    CPU_ID      cpu_id;
    EXCEPT_INFO header;

    // skip dumping debug info if deadloop/assert happened before launch
    if (g_debug_gpa == 0)
        return;

    cpu_id = hw_cpu_id();
    if (cpu_id >= MAX_CPUS)
        return;

    vmm_sprintf_s(err_msg, 128, "CPU%d: %s: Error: Could not copy deadloop message back to guest\n",
            cpu_id, __FUNCTION__);

    // send cpu id, file code, line number to serial port
    vmm_printf("%02d%04d%04d\n", cpu_id, file_code, line_num);

    // must match format defined in FILE_LINE_INFO
    size = vmm_sprintf_s(buffer, BUFFER_SIZE, "%04d%04d", file_code, line_num);

    // copy file code/line number to guest buffer at offset defined in DEADLOOP_DUMP
    // strlen(signature) + sizeof(cpu_id) + file_line[cpu]
    if (!vmm_copy_to_guest_phy_addr(gcpu,
                                   (void*)(g_debug_gpa+8+8+(cpu_id*size)),
                                   size,
                                   (void*)buffer)) {
        VMM_LOG(mask_uvmm, level_error, err_msg);
        }

    // only copy signature, VERSION, cpu_id, exception info, vmcs to guest
    // buffer once
    if (hw_interlocked_compare_exchange((INT32*)&dump_started,0,1) == 0) {
        size = vmm_sprintf_s(buffer, BUFFER_SIZE, "%c%c%c%c%c%c%c%c%s%04d",
            DEADLOOP_SIGNATURE[0], DEADLOOP_SIGNATURE[1],
            DEADLOOP_SIGNATURE[2], DEADLOOP_SIGNATURE[3],
            DEADLOOP_SIGNATURE[4], DEADLOOP_SIGNATURE[5],
            DEADLOOP_SIGNATURE[6], DEADLOOP_SIGNATURE[7], VERSION, cpu_id);

        // copy signature and cpu_id to guest buffer
        if (!vmm_copy_to_guest_phy_addr(gcpu,
                                       (void*)(g_debug_gpa),
                                       size,
                                       (void*)buffer)) {
            VMM_LOG(mask_uvmm, level_error, err_msg);
        }

        // clear buffer erasing the signature or setting no exception flag
        vmm_zeromem(buffer, sizeof(UINT64));

        // copy exception info to guest buffer
        if (g_exception_stack != NULL) {
                vmm_memcpy((void *)&header.exception_stack, g_exception_stack, sizeof(ISR_PARAMETERS_ON_STACK));
                header.base_address = vmm_startup_data.vmm_memory_layout[uvmm_image].base_address;

            if (g_exception_stack->a.vector_id == IA32_EXCEPTION_VECTOR_PAGE_FAULT)
                header.cr2 = hw_read_cr2();

            // copy exception info to guest buffer
            if (!vmm_copy_to_guest_phy_addr(gcpu,
                                           (void*)(g_debug_gpa+OFFSET_EXCEPTION),
                                           sizeof(EXCEPT_INFO),
                                           (void*)&header)) {
                VMM_LOG(mask_uvmm, level_error, err_msg);
            }

                // copy GPRs to guest buffer
                if (!vmm_copy_to_guest_phy_addr(gcpu,
                                           (void*)(g_debug_gpa+OFFSET_GPR),
                                           sizeof(VMM_GP_REGISTERS),
                                           (void*)&g_exception_gpr)) {
                VMM_LOG(mask_uvmm, level_error, err_msg);
            }

            // copy stack to guest buffer
            rsp = isr_error_code_required((VECTOR_ID)g_exception_stack->a.vector_id) ?
                        g_exception_stack->u.errcode_exception.sp :
                        g_exception_stack->u.exception.sp;

            vmm_stack_get_stack_pointer_for_cpu(cpu_id, &stack_base);

            size = sizeof(UINT64)*STACK_TRACE_SIZE;
            if ((rsp+size) > stack_base)
                size = (UINT32)(stack_base-rsp);

            if (!vmm_copy_to_guest_phy_addr(gcpu,
                                           (void*)(g_debug_gpa+OFFSET_STACK),
                                           size,
                                               (void*)rsp)) {
                VMM_LOG(mask_uvmm, level_error, err_msg);
            }
        } else {
            // Clear base image address indicating exception did not happen
            if (!vmm_copy_to_guest_phy_addr(gcpu,
                                           (void*)(g_debug_gpa+OFFSET_EXCEPTION),
                                           sizeof(UINT64),
					    (void*)buffer)) {
                VMM_LOG(mask_uvmm, level_error, err_msg);
	    }
        }

        // copy vmcs to guest buffer
        vmcs_dump_all(gcpu);
    }
}
Exemplo n.º 17
0
int acpi_init(void)
{
    int i;

    if (!acpi_ctxt) {
        acpi_ctxt = vmm_malloc(sizeof(struct acpi_context));
        if (!acpi_ctxt) {
            vmm_printf("ACPI ERROR: Failed to allocate memory for"
                       " ACPI context.\n");
            return VMM_EFAIL;
        }

        acpi_ctxt->root_desc =
            (struct acpi_rsdp *)find_root_system_descriptor();
        acpi_ctxt->rsdt = NULL;

        if (acpi_ctxt->root_desc == NULL) {
            vmm_printf("ACPI ERROR: No root system descriptor"
                       " table found!\n");
            goto rdesc_fail;
        }

        if (acpi_ctxt->root_desc->rsdt_addr == 0) {
            vmm_printf("ACPI ERROR: No root descriptor found"
                       " in RSD Pointer!\n");
            goto rsdt_fail;
        }

        acpi_ctxt->rsdt =
            (struct acpi_rsdt *)vmm_malloc(sizeof(struct acpi_rsdt));

        if (!acpi_ctxt->rsdt)
            goto rsdt_fail;

        if (acpi_read_sdt_at(acpi_ctxt->root_desc->rsdt_addr,
                             (struct acpi_sdt_hdr *)acpi_ctxt->rsdt,
                             sizeof(struct acpi_rsdt),
                             RSDT_SIGNATURE) < 0) {
            goto sdt_fail;
        }

        acpi_ctxt->nr_sys_hdr = (acpi_ctxt->rsdt->hdr.len
                                 - sizeof(struct acpi_sdt_hdr))/sizeof(u32);

        for (i = 0; i < acpi_ctxt->nr_sys_hdr; i++) {
            struct acpi_sdt_hdr *hdr;

            hdr = (struct acpi_sdt_hdr *)
                  vmm_host_iomap(acpi_ctxt->rsdt->data[i],
                                 PAGE_SIZE);

            if (hdr == NULL) {
                vmm_printf("ACPI ERROR: Cannot read header at 0x%x\n",
                           acpi_ctxt->rsdt->data[i]);
                goto sdt_fail;
            }

            vmm_memcpy(&acpi_ctxt->sdt_trans[i].signature, &hdr->signature, SDT_SIGN_LEN);

            acpi_ctxt->sdt_trans[i].signature[SDT_SIGN_LEN] = '\0';
            acpi_ctxt->sdt_trans[i].length = hdr->len;

            //vmm_host_iounmap((virtual_addr_t)hdr, PAGE_SIZE);
        }

        acpi_ctxt->madt_hdr = (struct acpi_madt_hdr *)
                              vmm_host_iomap(acpi_get_table_base("APIC"),
                                             PAGE_SIZE);
        if (acpi_ctxt->madt_hdr == NULL)
            goto sdt_fail;

    }

    return VMM_OK;

sdt_fail:
    vmm_free(acpi_ctxt->rsdt);
rsdt_fail:
    vmm_host_iounmap((virtual_addr_t)acpi_ctxt->root_desc,
                     PAGE_SIZE);
rdesc_fail:
    vmm_free(acpi_ctxt);
    acpi_ctxt = NULL;

    return VMM_EFAIL;
}
Exemplo n.º 18
0
/* lock_gate_alloc
   Allocate a readers-writer gate structure
   => ptr = pointer to word in which to store address of allocated gate
      description = NULL-terminated human-readable label 
   <= 0 for success, or an error code (and ptr will be set to NULL)
*/
kresult lock_rw_gate_alloc(rw_gate **ptr, char *description)
{
   kresult err;
   rw_gate_pool *search = &gate_pool;
   unsigned char slot_search_start, slot_search;
   unsigned char slot_found = 0;
   rw_gate *new_gate;

   /* sanity check */
   if(!ptr || !description) return e_bad_params;
   if(vmm_nullbufferlen(description) >= LOCK_DESCRIPT_LENGTH)
      return e_bad_params;
   
   /* lock the locking code */
   lock_gate(lock_lock, LOCK_WRITE);
   
   /* try to find an existing pool with free slots */
   while(search && search->nr_free == 0)
      search = search->next;
   
   /* if search is still NULL then grab a new page for the pool */
   if(!search)
   {
      void *new_page;
      err = vmm_req_phys_pg(&new_page, 1);
      if(err)
      {
         unlock_gate(lock_lock, LOCK_WRITE);
         return err; /* bail out if we're out of pages! */
      }
      
      /* allocate a new structure to describe this pool or give up */
      err = vmm_malloc((void **)&search, sizeof(rw_gate_pool));
      if(err)
      {
         unlock_gate(lock_lock, LOCK_WRITE);
         return err; /* bail out if we're out of memory! */
      }
      
      /* initialise the pool structure and add to the head of
         the linked list so it can be found quickly */
      vmm_memset(search, 0, sizeof(rw_gate_pool));
      search->nr_free = (unsigned char)LOCK_POOL_BITMAP_LENGTH_BITS;
      search->physical_base = new_page;
      search->virtual_base = KERNEL_PHYS2LOG(new_page);
      
      vmm_memset(KERNEL_PHYS2LOG(new_page), 0, MEM_PGSIZE);
            
      /* add us to the start of the list */
      if(gate_pool.next)
      {
         search->next = gate_pool.next;
         search->next->previous = search;
      }

      gate_pool.next = search;
      search->previous = &gate_pool;
   }
   
   /* search is now valid or we wouldn't be here, so locate a free slot by
      scanning through the bitmap, starting at the last known free slot */
   slot_search = slot_search_start = search->last_free;
   do
   {
      if(lock_bitmap_test(search->bitmap, slot_search) == 0)
         slot_found = 1;
      
      slot_search++;
      if(slot_search >= LOCK_POOL_BITMAP_LENGTH_BITS)
         slot_search = 0;
   }
   while(slot_search_start != slot_search && !slot_found);
   
   /* set the bit and grab the address of the slot to use for the lock */
   if(slot_found)
   {
      lock_bitmap_set(search->bitmap, slot_search);
      
      /* initialise the gate structure */
      new_gate = (rw_gate *)(unsigned int)search->virtual_base + (sizeof(rw_gate) * slot_search);
      vmm_memset(new_gate, 0, (sizeof(rw_gate)));
      *ptr = new_gate;
      
#ifdef DEBUG_LOCK_RWGATE_PROFILE
      vmm_memcpy(&(new_gate->description), description, vmm_nullbufferlen(description) + sizeof('\0'));
#endif
                 
      /* update accounting, increment the next free slot but beware of
         overflow */
      search->nr_free--;
      search->last_free++;
      if(search->last_free >= LOCK_POOL_BITMAP_LENGTH_BITS)
         slot_search = 0;
      
      LOCK_DEBUG("[lock:%i] created new readers-writer lock '%s' %p (spinlock %p)\n",
                 description, new_gate, new_gate->spinlock);
      
      unlock_gate(lock_lock, LOCK_WRITE);
      return success;
   }
   
   /* something weird has happened if we've fallen this far, so
      fall through to failure */
   unlock_gate(lock_lock, LOCK_WRITE);
   *ptr = NULL;
   return e_failure;
}
Exemplo n.º 19
0
static int map_guest_region(struct vmm_vcpu *vcpu, int region_type, int tlb_index)
{
	mips32_tlb_entry_t shadow_entry;
	physical_addr_t gphys;
	physical_addr_t hphys, paddr;
	virtual_addr_t vaddr2map;
	u32 gphys_size;
	struct vmm_region *region;
	struct vmm_guest *aguest = vcpu->guest;

	vaddr2map = (region_type == VMM_REGION_TYPE_ROM ? 0x3FC00000 : 0x0);
	paddr = (region_type == VMM_REGION_TYPE_ROM ? 0x1FC00000 : 0x0);

	/*
	 * Create the initial TLB entry mapping complete RAM promised
	 * to the guest. The idea is that guest vcpu shouldn't fault
	 * on this address.
	 */
	region = vmm_guest_find_region(aguest, paddr, TRUE);
	if (region == NULL) {
		vmm_printf("Bummer!!! No guest region defined for VCPU RAM.\n");
		return VMM_EFAIL;
	}

	gphys = region->gphys_addr;
	hphys = region->hphys_addr;
	gphys_size = region->phys_size;

	switch (gphys_size) {
	case TLB_PAGE_SIZE_1K:
	case TLB_PAGE_SIZE_4K:
	case TLB_PAGE_SIZE_16K:
	case TLB_PAGE_SIZE_256K:
	case TLB_PAGE_SIZE_1M:
	case TLB_PAGE_SIZE_4M:
	case TLB_PAGE_SIZE_16M:
	case TLB_PAGE_SIZE_64M:
	case TLB_PAGE_SIZE_256M:
		gphys_size = gphys_size;
		shadow_entry.page_mask = ((gphys_size / 2) - 1);
		break;
	default:
		vmm_panic("Guest physical memory region should be same as page"
			  " sizes available for MIPS32.\n");
	}

	/* FIXME: Guest physical/virtual should be from DTS */
	shadow_entry.entryhi._s_entryhi.vpn2 = (vaddr2map >> VPN2_SHIFT);
	shadow_entry.entryhi._s_entryhi.asid = (u8)(2 << 6);
	shadow_entry.entryhi._s_entryhi.reserved = 0;
	shadow_entry.entryhi._s_entryhi.vpn2x = 0;

	shadow_entry.entrylo0._s_entrylo.global = 0;
	shadow_entry.entrylo0._s_entrylo.valid = 1;
	shadow_entry.entrylo0._s_entrylo.dirty = 1;
	shadow_entry.entrylo0._s_entrylo.cacheable = 1;
	shadow_entry.entrylo0._s_entrylo.pfn = (hphys >> PAGE_SHIFT);

	shadow_entry.entrylo1._s_entrylo.global = 0;
	shadow_entry.entrylo1._s_entrylo.valid = 0;
	shadow_entry.entrylo1._s_entrylo.dirty = 0;
	shadow_entry.entrylo1._s_entrylo.cacheable = 0;
	shadow_entry.entrylo1._s_entrylo.pfn = 0;

	vmm_memcpy((void *)&mips_sregs(vcpu)->shadow_tlb_entries[tlb_index],
		   (void *)&shadow_entry, sizeof(mips32_tlb_entry_t));

	return VMM_OK;
}