Exemplo n.º 1
0
/* locks_initialise
   Initialise the portable part of the locking system. This will
   require setting up locks for the VMM before any of its management
   functions can be called using the given physical page
   => initial_phys_pg = base address of first physical page to use
   <= 0 for success, or an error code
*/
kresult locks_initialise(void *initial_phys_pg)
{
   /* sanity check */
   if(!initial_phys_pg) return e_bad_params;
   
   /* zero out the initial pool structure - this VMM call is safe to use.
      this also forces gate_pool.prev to be NULL, indicating it's
      the first item and therefore shouldn't be free()d */
   vmm_memset(&gate_pool, 0, sizeof(rw_gate_pool));
   
   /* plug in the base address of this initial page */
   gate_pool.physical_base = initial_phys_pg;
   gate_pool.virtual_base = KERNEL_PHYS2LOG(initial_phys_pg);
   
   /* create the first lock by hand - the lock for the locking code */
   lock_lock = (rw_gate *)gate_pool.virtual_base;
   gate_pool.bitmap[0] = 1; /* too hardcoded? */
   gate_pool.last_free = 1;
   gate_pool.nr_free = LOCK_POOL_BITMAP_LENGTH_BITS - 1;
   vmm_memset(lock_lock, 0, sizeof(rw_gate));
   
   LOCK_DEBUG("[lock:%i] locks initialised: first gate pool at %p, first lock at %p\n",
              CPU_ID, &gate_pool, gate_pool.virtual_base);
   
   return success;
}
Exemplo n.º 2
0
hvm_status ProcessGetModuleByAddr(hvm_address cr3, hvm_address addr, char *name)
{
  MODULE_DATA prev, next;
  hvm_status r;
#ifdef GUEST_WINDOWS      
  vmm_memset(&next, 0, sizeof(next));
  vmm_memset(&prev, 0, sizeof(prev));

  r = ProcessGetNextModule(cr3, NULL, &next);
  while(TRUE) {
    if(r == HVM_STATUS_END_OF_FILE)
      break;
    
    if(r != HVM_STATUS_UNSUCCESSFUL && (addr < (hvm_address) ((Bit32u) next.baseaddr + next.sizeofimage) && addr >= next.baseaddr)) {
      vmm_strncpy(name, next.name, 32);
      return HVM_STATUS_SUCCESS;
    }
    prev = next;
    vmm_memset(next.name, 0, sizeof(next.name));
    r = ProcessGetNextModule(cr3, &prev, &next);
  }
  return HVM_STATUS_UNSUCCESSFUL;
#elif defined GUEST_LINUX
  
  r = HVM_STATUS_UNSUCCESSFUL;
#endif

}
Exemplo n.º 3
0
hvm_status ProcessGetNameByPid(hvm_address cr3, hvm_address pid, char *name)
{
  PROCESS_DATA prev, next;
  hvm_status r;

  vmm_memset(&next, 0, sizeof(next));

  r = ProcessGetNextProcess(cr3, NULL, &next);
  while(TRUE) {
    if(r == HVM_STATUS_END_OF_FILE)
      break;
    
    if(r != HVM_STATUS_UNSUCCESSFUL && pid == next.pid) {
#ifdef GUEST_WINDOWS      
      vmm_strncpy(name, next.name, 32);
#elif defined GUEST_LINUX
      vmm_strncpy(name, next.name, TASK_COMM_LEN);
#endif
      return HVM_STATUS_SUCCESS;
    }
    prev = next;
    vmm_memset(next.name, 0, sizeof(next.name));
    r = ProcessGetNextProcess(cr3, &prev, &next);
  }
  return HVM_STATUS_UNSUCCESSFUL;
}
Exemplo n.º 4
0
static int ne2k_driver_probe(struct vmm_driver *dev, const struct vmm_devid *devid)
{
	int rc;
	struct vmm_netdev *ndev;
	struct nic_priv_data *priv_data;
	
	ndev = vmm_malloc(sizeof(struct vmm_netdev));
	if(!ndev) {
		rc = VMM_EFAIL;
		goto free_nothing;
	}
	vmm_memset(ndev,0, sizeof(struct vmm_netdev));

	priv_data = vmm_malloc(sizeof(struct nic_priv_data));
	if(!priv_data) {
		rc = VMM_EFAIL;
		goto free_chardev;
	}
	vmm_memset(priv_data,0, sizeof(struct nic_priv_data));

	if (ne2k_init(priv_data)) {
		rc = VMM_EFAIL;
		goto free_chardev;
	}

	priv_data->txrx_thread = vmm_hyperthread_create("ne2k-isa-driver", dp83902a_poll, priv_data);

	if (priv_data == NULL) {
		rc = VMM_EFAIL;
		goto free_chardev;
	}

	vmm_hyperthread_run(priv_data->txrx_thread);

	vmm_strcpy(ndev->name, dev->node->name);
	ndev->dev = dev;
	ndev->ioctl = NULL;
	ndev->read = ne2k_read;
	ndev->write = ne2k_write;
	ndev->priv = (void *)priv_data;

	rc = vmm_netdev_register(ndev);
	if(rc) {
		goto free_port;
	}

	dev->priv = (void *)ndev;

	return VMM_OK;

free_port:
	vmm_free(priv_data);
free_chardev:
	vmm_free(ndev);
free_nothing:
	return rc;
}
Exemplo n.º 5
0
int vmm_chardev_register(struct vmm_chardev * cdev)
{
	int rc;
	struct vmm_classdev *cd;

	if (!(cdev && cdev->read && cdev->write)) {
		return VMM_EFAIL;
	}

	cd = vmm_malloc(sizeof(struct vmm_classdev));
	if (!cd) {
		return VMM_EFAIL;
	}

	vmm_memset(cd, 0, sizeof(struct vmm_classdev));

	INIT_LIST_HEAD(&cd->head);
	vmm_strcpy(cd->name, cdev->name);
	cd->dev = cdev->dev;
	cd->priv = cdev;

	rc = vmm_devdrv_register_classdev(VMM_CHARDEV_CLASS_NAME, cd);
	if (rc != VMM_OK) {
		vmm_free(cd);
	}

	return rc;
}
Exemplo n.º 6
0
/*-----------------------------------------------------------------------------------*/
void
uip_arp_init(void)
{
	for(i = 0; i < UIP_ARPTAB_SIZE; ++i) {
		vmm_memset(arp_table[i].ipaddr, 0, 4);
	}
}
Exemplo n.º 7
0
int vmm_scheduler_init(void)
{
	/* Reset the scheduler control structure */
	vmm_memset(&sched, 0, sizeof(sched));

	/* Initialize current VCPU. (Per Host CPU) */
	sched.current_vcpu = NULL;

	/* Create idle orphan vcpu with 100 msec time slice. (Per Host CPU) */
	sched.idle_vcpu = vmm_manager_vcpu_orphan_create("idle/0",
	(virtual_addr_t)&idle_orphan,
	(virtual_addr_t)&sched.idle_vcpu_stack[VMM_IDLE_VCPU_STACK_SZ - 4],
	VMM_IDLE_VCPU_TIMESLICE);

	/* Initialize IRQ state (Per Host CPU) */
	sched.irq_context = FALSE;

	/* Create timer event and start it. (Per Host CPU) */
	sched.ev = vmm_timer_event_create("sched", 
					  &vmm_scheduler_timer_event, 
					  NULL);
	if (!sched.ev) {
		return VMM_EFAIL;
	}
	vmm_timer_event_start(sched.ev, 0);

	return VMM_OK;
}
Exemplo n.º 8
0
static int mterm_main(void *udata)
{
	size_t cmds_len;
	char cmds[MTERM_CMD_STRING_SIZE];

	/* Print Banner */
	vmm_printf("%s", VMM_BANNER_STRING);

	/* Main loop of VMM */
	while (1) {
		/* Show prompt */
		vmm_printf("XVisor# ");
		vmm_memset(cmds, 0, sizeof(cmds));

		/* Get command string */
		vmm_gets(cmds, MTERM_CMD_STRING_SIZE, '\n');
		cmds_len = vmm_strlen(cmds);
		if (cmds_len > 0) {
			if (cmds[cmds_len - 1] == '\r')
				cmds[cmds_len - 1] = '\0';

			/* Execute command string */
			vmm_cmdmgr_execute_cmdstr(vmm_stdio_device(), cmds);
		}
	}

	return VMM_OK;
}
Exemplo n.º 9
0
// Function : vmdb_fill_vmexit_request
// Purpose  : Configures VMDB-related VTx controls, depending on enble value
//          :   DR-access
//          :   Save/Load DR
//          :   Exception on INT1
// Arguments: VMEXIT_CONTROL *vmexit_request
//          : BOOLEAN enable/disable
// Returns  : void
void vmdb_fill_vmexit_request ( OUT VMEXIT_CONTROL *vmexit_request, BOOLEAN enable)
    {
    IA32_VMCS_EXCEPTION_BITMAP            exceptions_mask;
    PROCESSOR_BASED_VM_EXECUTION_CONTROLS exec_controls_mask;
    VM_EXIT_CONTROLS                      vmexit_controls;
    VM_ENTRY_CONTROLS                     vmenter_controls;
    UINT32                                value = enable ? (UINT32)-1 : 0;

    vmm_memset( vmexit_request, 0, sizeof( VMEXIT_CONTROL ));
    exceptions_mask.Uint32  = 0;
    exceptions_mask.Bits.DB = 1;
    vmexit_request->exceptions.bit_mask = exceptions_mask.Uint32;
    vmexit_request->exceptions.bit_request = value;

    exec_controls_mask.Uint32      = 0;
    exec_controls_mask.Bits.MovDr  = 1;
    vmexit_request->proc_ctrls.bit_mask = exec_controls_mask.Uint32;
    vmexit_request->proc_ctrls.bit_request = value;

    vmexit_controls.Uint32                 = 0;
    vmexit_controls.Bits.SaveDebugControls = 1;
    vmexit_request->vm_exit_ctrls.bit_mask = vmexit_controls.Uint32;
    vmexit_request->vm_exit_ctrls.bit_request = value;

    vmenter_controls.Uint32                 = 0;
    vmenter_controls.Bits.LoadDebugControls = 1;

    vmexit_request->vm_enter_ctrls.bit_mask = vmenter_controls.Uint32;
    vmexit_request->vm_enter_ctrls.bit_request = value;

    }
Exemplo n.º 10
0
int arch_vcpu_regs_init(struct vmm_vcpu *vcpu)
{
	vmm_memset(mips_uregs(vcpu), 0, sizeof(arch_regs_t));

        if (!vcpu->is_normal) {
		/* For orphan vcpu */
                mips_uregs(vcpu)->cp0_epc = vcpu->start_pc;
                mips_uregs(vcpu)->regs[SP_IDX] = vcpu->start_sp;
		mips_uregs(vcpu)->regs[S8_IDX] = mips_uregs(vcpu)->regs[SP_IDX];
		mips_uregs(vcpu)->cp0_status = read_c0_status();
		mips_uregs(vcpu)->cp0_entryhi = read_c0_entryhi();
        } else {
		/* For normal vcpu running guests */
		mips_sregs(vcpu)->cp0_regs[CP0_CAUSE_IDX] = 0x400;
		mips_sregs(vcpu)->cp0_regs[CP0_STATUS_IDX] = 0x40004;
		mips_uregs(vcpu)->cp0_status = read_c0_status() | (0x01UL << CP0_STATUS_UM_SHIFT);
		mips_uregs(vcpu)->cp0_entryhi = read_c0_entryhi();
		mips_uregs(vcpu)->cp0_entryhi &= ASID_MASK;
		mips_uregs(vcpu)->cp0_entryhi |= (0x2 << ASID_SHIFT);
		mips_uregs(vcpu)->cp0_epc = vcpu->start_pc;

		/* All guest run from 0 and fault */
		mips_sregs(vcpu)->cp0_regs[CP0_EPC_IDX] = vcpu->start_pc;
		/* Give guest the same CPU cap as we have */
		mips_sregs(vcpu)->cp0_regs[CP0_PRID_IDX] = read_c0_prid();
	}

	return VMM_OK;
}
Exemplo n.º 11
0
int vmm_cpu_irq_setup(void)
{
	int rc;
	extern u32 _start_vect[];
	u32 *vectors, *vectors_data;
	u32 vec;
	cpu_page_t vec_page;

#if defined(CONFIG_ARMV7A_HIGHVEC)
	/* Enable high vectors in SCTLR */
	write_sctlr(read_sctlr() | SCTLR_V_MASK);
	vectors = (u32 *) CPU_IRQ_HIGHVEC_BASE;
#else
	vectors = (u32 *) CPU_IRQ_LOWVEC_BASE;
#endif
	vectors_data = vectors + CPU_IRQ_NR;

	/* If vectors are at correct location then do nothing */
	if ((u32) _start_vect == (u32) vectors) {
		return VMM_OK;
	}

	/* If vectors are not mapped in virtual memory then map them. */
	vmm_memset(&vec_page, 0, sizeof(cpu_page_t));
	rc = cpu_mmu_get_reserved_page((virtual_addr_t)vectors, &vec_page);
	if (rc) {
		rc = vmm_host_ram_alloc(&vec_page.pa, 
					TTBL_L2TBL_SMALL_PAGE_SIZE, 
					TRUE);
		if (rc) {
			return rc;
		}
		vec_page.va = (virtual_addr_t)vectors;
		vec_page.sz = TTBL_L2TBL_SMALL_PAGE_SIZE;
		vec_page.imp = 0;
		vec_page.dom = TTBL_L1TBL_TTE_DOM_RESERVED;
		vec_page.ap = TTBL_AP_SRW_U;
		vec_page.xn = 0;
		vec_page.c = 0;
		vec_page.b = 0;
		if ((rc = cpu_mmu_map_reserved_page(&vec_page))) {
			return rc;
		}
	}

	/*
	 * Loop through the vectors we're taking over, and copy the
	 * vector's insn and data word.
	 */
	for (vec = 0; vec < CPU_IRQ_NR; vec++) {
		vectors[vec] = _start_vect[vec];
		vectors_data[vec] = _start_vect[vec + CPU_IRQ_NR];
	}

	return VMM_OK;
}
Exemplo n.º 12
0
/* -----------------------------------------------------------------------------
 * clear_insn() - clear instruction pointer 
 * -----------------------------------------------------------------------------
 */
static int clear_insn(register struct ud* u)
{
  u->error     = 0;
  u->pfx_seg   = 0;
  u->pfx_opr   = 0;
  u->pfx_adr   = 0;
  u->pfx_lock  = 0;
  u->pfx_repne = 0;
  u->pfx_rep   = 0;
  u->pfx_repe  = 0;
  u->pfx_seg   = 0;
  u->pfx_rex   = 0;
  u->pfx_insn  = 0;
  u->mnemonic  = UD_Inone;
  u->itab_entry = NULL;

  vmm_memset( &u->operand[ 0 ], 0, sizeof( struct ud_operand ) );
  vmm_memset( &u->operand[ 1 ], 0, sizeof( struct ud_operand ) );
  vmm_memset( &u->operand[ 2 ], 0, sizeof( struct ud_operand ) );
 
  return 0;
}
Exemplo n.º 13
0
/*-----------------------------------------------------------------------------------*/
void
uip_arp_timer(void)
{
	struct arp_entry *tabptr;

	++arptime;
	for(i = 0; i < UIP_ARPTAB_SIZE; ++i) {
		tabptr = &arp_table[i];
		if((tabptr->ipaddr[0] | tabptr->ipaddr[1]) != 0 &&
			arptime - tabptr->time >= UIP_ARP_MAXAGE) {
			vmm_memset(tabptr->ipaddr, 0, 4);
		}
	}

}
Exemplo n.º 14
0
int __init vmm_profiler_init(void)
{
	pctrl.stat =
	    vmm_malloc(sizeof(struct vmm_profiler_stat) * kallsyms_num_syms);

	if (pctrl.stat == NULL) {
		return VMM_EFAIL;
	}

	vmm_memset(pctrl.stat, 0, sizeof(struct vmm_profiler_stat) *
		   kallsyms_num_syms);

	INIT_SPIN_LOCK(&pctrl.lock);

	return VMM_OK;
}
Exemplo n.º 15
0
static int __init daemon_mterm_init(void)
{
	u8 mterm_priority;
	u32 mterm_time_slice;
	struct vmm_devtree_node * node;
	const char * attrval;

	/* Reset the control structure */
	vmm_memset(&mtctrl, 0, sizeof(mtctrl));

	/* Retrive mterm time slice */
	node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING
				   VMM_DEVTREE_VMMINFO_NODE_NAME);
	if (!node) {
		return VMM_EFAIL;
	}
	attrval = vmm_devtree_attrval(node,
				      "mterm_priority");
	if (attrval) {
		mterm_priority = *((u32 *) attrval);
	} else {
		mterm_priority = VMM_THREAD_DEF_PRIORITY;
	}
	attrval = vmm_devtree_attrval(node,
				      "mterm_time_slice");
	if (attrval) {
		mterm_time_slice = *((u32 *) attrval);
	} else {
		mterm_time_slice = VMM_THREAD_DEF_TIME_SLICE;
	}

	/* Create mterm thread */
	mtctrl.thread = vmm_threads_create("mterm", 
					   &mterm_main, 
					   NULL, 
					   mterm_priority,
					   mterm_time_slice);
	if (!mtctrl.thread) {
		vmm_panic("Creation of system critical thread failed.\n");
	}

	/* Start the mterm thread */
	vmm_threads_start(mtctrl.thread);

	return VMM_OK;
}
Exemplo n.º 16
0
int vmm_profiler_start(void)
{
	if (!vmm_profiler_isactive()) {
		irq_flags_t flags = arch_cpu_irq_save();

		vmm_memset(pctrl.stat, 0,
			   sizeof(struct vmm_profiler_stat) *
			   kallsyms_num_syms);
		_vmm_profile_enter = vmm_profile_enter;
		_vmm_profile_exit = vmm_profile_exit;
		pctrl.is_active = 1;

		arch_cpu_irq_restore(flags);
	} else {
		return VMM_EFAIL;
	}

	return VMM_OK;
}
Exemplo n.º 17
0
int arch_vcpu_regs_deinit(struct vmm_vcpu * vcpu)
{
	int rc;

	/* For both Orphan & Normal VCPUs */
	vmm_memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));

	/* For Orphan VCPUs do nothing else */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}

	/* Cleanup CP15 */
	if ((rc = cpu_vcpu_cp15_deinit(vcpu))) {
		return rc;
	}

	/* Free super regs */
	vmm_free(vcpu->arch_priv);

	return VMM_OK;
}
Exemplo n.º 18
0
int __init vmm_chardev_init(void)
{
	int rc;
	struct vmm_class *c;

	c = vmm_malloc(sizeof(struct vmm_class));
	if (!c) {
		return VMM_EFAIL;
	}

	vmm_memset(c, 0, sizeof(struct vmm_class));

	INIT_LIST_HEAD(&c->head);
	vmm_strcpy(c->name, VMM_CHARDEV_CLASS_NAME);
	INIT_LIST_HEAD(&c->classdev_list);

	rc = vmm_devdrv_register_class(c);
	if (rc != VMM_OK) {
		vmm_free(c);
	}

	return rc;
}
Exemplo n.º 19
0
// Send message to destination processors.
// RETURN VALUE:    number of CPUs on which handler is about to execute
UINT32 ipc_execute_send(IPC_DESTINATION dst, IPC_MESSAGE_TYPE type, 
                        IPC_HANDLER_FN handler,
                        void *arg, BOOLEAN wait_for_handler_finish)
{
    CPU_ID                  i;
    CPU_ID                  sender_cpu_id = IPC_CPU_ID();
    IPC_CPU_CONTEXT         *ipc = NULL;
    volatile UINT32         num_received_acks = 0;
    UINT32                  num_required_acks = 0;
    volatile UINT32         *ack_array = &ipc_ack_array[sender_cpu_id * num_of_host_processors];
    BOOLEAN                 status;
    IPC_DESTINATION         single_dst;
    UINT32                  wait_count = 0;
    UINT64                  nmi_accounted_flag[CPU_BITMAP_MAX] = {0};
    UINT64                  enqueue_flag[CPU_BITMAP_MAX] = {0};
    UINT64                  next_send_tsc;
    (void)status;
    // Initializ ack array.
    vmm_memset((void *) ack_array, 0, num_of_host_processors * sizeof(UINT32));

    for(i = 0; i < num_of_host_processors; i++) {
        if (i != sender_cpu_id) {                               // Exclude yourself.
            if (ipc_cpu_is_destination(dst, sender_cpu_id, i)) {
                ipc = &ipc_cpu_contexts[i];
                lock_acquire(&ipc->data_lock);
                if (ipc_preprocess_message(ipc, i, type)) {     // Preprocess IPC and check if need to enqueue.
                    BOOLEAN  empty_queue= (array_list_size(ipc->message_queue)==0);
                    BITMAP_ARRAY64_SET(enqueue_flag, i);  // Mark CPU active.
                    num_required_acks++;
                    if (!wait_for_handler_finish)  // Dont wait for handlers to finish.
                        status = ipc_enqueue_message(ipc, type, handler, arg, &ack_array[i], NULL);
                    else   // Wait for handlers to finish.
                        status = ipc_enqueue_message(ipc, type, handler, arg, NULL, &ack_array[i]);
                    ipc->num_of_sent_ipc_messages++;            // IPC sent message counting.
                    VMM_ASSERT(status);
                    // Check if IPC signal should be sent.
                    if (empty_queue) {
                        // Send IPC signal (NMI or SIPI)
                        single_dst.addr_shorthand = IPI_DST_NO_SHORTHAND;
                        single_dst.addr = (UINT8)i;
                        if (cpu_activity_state[i] == IPC_CPU_ACTIVE) {
                            BITMAP_ARRAY64_SET(nmi_accounted_flag, i);
                            ipc->num_of_sent_ipc_nmi_interrupts++;
                            ipc_hw_signal_nmi(single_dst);
                        }
                        else
                            ipc_hw_signal_sipi(single_dst);
                    }
                }
                lock_release(&ipc->data_lock);
            }
        }
    }

    if (num_required_acks > 0) {
        VMM_ASSERT(hw_get_tsc_ticks_per_second() != 0);

        // Calculate next tsc tick to resend NMI.
        next_send_tsc = hw_rdtsc() + hw_get_tsc_ticks_per_second(); 
        // Should be one second.
        // signal and wait for acknowledge
        while (num_received_acks != num_required_acks) {
            // Check wait count and time.
            if (wait_count++ > 1000 && hw_rdtsc() > next_send_tsc) {
                wait_count = 0;
                next_send_tsc = hw_rdtsc() + hw_get_tsc_ticks_per_second();

                for (i = 0, num_received_acks = 0; i < num_of_host_processors; i++) {
                    // Send additional IPC signal to stalled cores.
                    if (BITMAP_ARRAY64_GET(enqueue_flag, i) && !ack_array[i]) {
                         // exclude yourself and non active CPUs.
                        single_dst.addr_shorthand = IPI_DST_NO_SHORTHAND;
                        single_dst.addr = (UINT8) i;
                        // Check that CPU is still active.
                        VMM_ASSERT(cpu_activity_state[i] != IPC_CPU_NOT_ACTIVE);
                        if (!debug_not_resend) {
                            ipc = &ipc_cpu_contexts[i];
                            lock_acquire(&ipc->data_lock);
                            if (cpu_activity_state[i] == IPC_CPU_ACTIVE) {
                                if (!BITMAP_ARRAY64_GET(nmi_accounted_flag, i)) {
                                    BITMAP_ARRAY64_SET(nmi_accounted_flag, i);
                                    ipc->num_of_sent_ipc_nmi_interrupts++;
                                }
                                ipc_hw_signal_nmi(single_dst);
                                VMM_LOG(mask_anonymous, level_trace,
                                        "[%d] send additional NMI to %d\n", 
                                         (int) sender_cpu_id, (int) i);
                            }
                            else {
                                ipc_hw_signal_sipi(single_dst);
                                VMM_LOG(mask_anonymous, level_trace,
                                        "[%d] send additional SIPI to %d\n", 
                                        (int) sender_cpu_id, (int) i);
                            }
                        lock_release(&ipc->data_lock);
                        }
                    }
                }
            }
            else {
                // Try to processs own received messages.
                // To prevent deadlock situation when 2 core send messages simultaneously.
                if (!ipc_process_one_ipc())
                    hw_pause();
                // Count received acks.
                for (i = 0, num_received_acks = 0; i < num_of_host_processors; i++)
                    num_received_acks += ack_array[i];
            }
        }
    }
    return num_required_acks;
}
Exemplo n.º 20
0
void init_scancodes_map(void)
{
    vmm_memset(scancodes_map, 0, sizeof(scancodes_map));

    scancodes_map[SCANCODE_KEYPAD0] = '0';
    scancodes_map[SCANCODE_KEYPAD1] = '1';
    scancodes_map[SCANCODE_KEYPAD2] = '2';
    scancodes_map[SCANCODE_KEYPAD3] = '3';
    scancodes_map[SCANCODE_KEYPAD4] = '4';
    scancodes_map[SCANCODE_KEYPAD5] = '5';
    scancodes_map[SCANCODE_KEYPAD6] = '6';
    scancodes_map[SCANCODE_KEYPAD7] = '7';
    scancodes_map[SCANCODE_KEYPAD8] = '8';
    scancodes_map[SCANCODE_KEYPAD9] = '9';
    scancodes_map[SCANCODE_KEYPADPLUS]  = '+';
    scancodes_map[SCANCODE_KEYPADMINUS] = '-';

    scancodes_map[SCANCODE_0] = '0';
    scancodes_map[SCANCODE_1] = '1';
    scancodes_map[SCANCODE_2] = '2';
    scancodes_map[SCANCODE_3] = '3';
    scancodes_map[SCANCODE_4] = '4';
    scancodes_map[SCANCODE_5] = '5';
    scancodes_map[SCANCODE_6] = '6';
    scancodes_map[SCANCODE_7] = '7';
    scancodes_map[SCANCODE_8] = '8';
    scancodes_map[SCANCODE_9] = '9';

    scancodes_map[SCANCODE_Q] = 'q';
    scancodes_map[SCANCODE_W] = 'w';
    scancodes_map[SCANCODE_E] = 'e';
    scancodes_map[SCANCODE_R] = 'r';
    scancodes_map[SCANCODE_T] = 't';
    scancodes_map[SCANCODE_Y] = 'y';
    scancodes_map[SCANCODE_U] = 'u';
    scancodes_map[SCANCODE_I] = 'i';
    scancodes_map[SCANCODE_O] = 'o';
    scancodes_map[SCANCODE_P] = 'p';
    scancodes_map[SCANCODE_A] = 'a';
    scancodes_map[SCANCODE_S] = 's';
    scancodes_map[SCANCODE_D] = 'd';
    scancodes_map[SCANCODE_F] = 'f';
    scancodes_map[SCANCODE_G] = 'g';
    scancodes_map[SCANCODE_H] = 'h';
    scancodes_map[SCANCODE_J] = 'j';
    scancodes_map[SCANCODE_K] = 'k';
    scancodes_map[SCANCODE_L] = 'l';
    scancodes_map[SCANCODE_Z] = 'z';
    scancodes_map[SCANCODE_X] = 'x';
    scancodes_map[SCANCODE_C] = 'c';
    scancodes_map[SCANCODE_V] = 'v';
    scancodes_map[SCANCODE_B] = 'b';
    scancodes_map[SCANCODE_N] = 'n';
    scancodes_map[SCANCODE_M] = 'm';

    scancodes_map[SCANCODE_SPACE]  = ' ';
    scancodes_map[SCANCODE_TAB]    = '\t';
    scancodes_map[SCANCODE_ENTER]  = '\n';
    scancodes_map[SCANCODE_COMMA]  = ',';
    scancodes_map[SCANCODE_POINT]  = '.';
    scancodes_map[SCANCODE_FSLASH] = '\\';
    scancodes_map[SCANCODE_BSLASH] = '/';
    scancodes_map[SCANCODE_BACKSPACE] = '\b';
    scancodes_map[SCANCODE_EQUAL]  = '=';

    scancodes_map[SCANCODE_CURSORUP] = 0x3;
    scancodes_map[SCANCODE_CURSORDOWN] = 0x4;
}
Exemplo n.º 21
0
int __init vmm_modules_init(void)
{
	int mod_ret;
	u32 i, j;
	struct vmm_module tmpmod;

	/* Reset the control structure */
	vmm_memset(&modules_ctrl, 0, sizeof(modules_ctrl));

	/* Initialize the control structure */
	modules_ctrl.table = (struct vmm_module *) arch_modtbl_vaddr();
	modules_ctrl.table_size = arch_modtbl_size() / sizeof(struct vmm_module);
	modules_ctrl.mod_count = 0;

	/* Find and count valid modules */
	for (i = 0; i < modules_ctrl.table_size; i++) {
		/* Check validity of command table entry */
		if (modules_ctrl.table[i].signature == VMM_MODULE_SIGNATURE) {
			/* Increment count in control structure */
			modules_ctrl.mod_count++;
		} else {
			break;
		}
	}

	/* If no modules found then return */
	if (!modules_ctrl.mod_count) {
		return VMM_OK;
	}

	/* Sort modules based on initialization priority (Selection Sort) */
	for (i = 0; i < (modules_ctrl.mod_count - 1); i++) {
		for (j = (i + 1); j < modules_ctrl.mod_count; j++) {
			if (modules_ctrl.table[j].ipriority <
			    modules_ctrl.table[i].ipriority) {
				vmm_memcpy(&tmpmod,
					   &modules_ctrl.table[i],
					   sizeof(tmpmod));
				vmm_memcpy(&modules_ctrl.table[i],
					   &modules_ctrl.table[j],
					   sizeof(modules_ctrl.table[i]));
				vmm_memcpy(&modules_ctrl.table[j],
					   &tmpmod,
					   sizeof(modules_ctrl.table[j]));
			}
		}
	}

	/* Initialize modules in sorted order */
	for (i = 0; i < modules_ctrl.mod_count; i++) {
		/* Initialize module if required */
		if (modules_ctrl.table[i].init) {
#if defined(CONFIG_VERBOSE_MODE)
			vmm_printf("Initialize %s\n",
				   modules_ctrl.table[i].name);
#endif
			mod_ret = modules_ctrl.table[i].init();
			if (mod_ret) {
				vmm_printf("%s: %s init error %d\n", 
				__func__, modules_ctrl.table[i].name, mod_ret);
			}
			modules_ctrl.table[i].istatus = mod_ret;
		}
	}

	return VMM_OK;
}
Exemplo n.º 22
0
/* lock_gate_alloc
   Allocate a readers-writer gate structure
   => ptr = pointer to word in which to store address of allocated gate
      description = NULL-terminated human-readable label 
   <= 0 for success, or an error code (and ptr will be set to NULL)
*/
kresult lock_rw_gate_alloc(rw_gate **ptr, char *description)
{
   kresult err;
   rw_gate_pool *search = &gate_pool;
   unsigned char slot_search_start, slot_search;
   unsigned char slot_found = 0;
   rw_gate *new_gate;

   /* sanity check */
   if(!ptr || !description) return e_bad_params;
   if(vmm_nullbufferlen(description) >= LOCK_DESCRIPT_LENGTH)
      return e_bad_params;
   
   /* lock the locking code */
   lock_gate(lock_lock, LOCK_WRITE);
   
   /* try to find an existing pool with free slots */
   while(search && search->nr_free == 0)
      search = search->next;
   
   /* if search is still NULL then grab a new page for the pool */
   if(!search)
   {
      void *new_page;
      err = vmm_req_phys_pg(&new_page, 1);
      if(err)
      {
         unlock_gate(lock_lock, LOCK_WRITE);
         return err; /* bail out if we're out of pages! */
      }
      
      /* allocate a new structure to describe this pool or give up */
      err = vmm_malloc((void **)&search, sizeof(rw_gate_pool));
      if(err)
      {
         unlock_gate(lock_lock, LOCK_WRITE);
         return err; /* bail out if we're out of memory! */
      }
      
      /* initialise the pool structure and add to the head of
         the linked list so it can be found quickly */
      vmm_memset(search, 0, sizeof(rw_gate_pool));
      search->nr_free = (unsigned char)LOCK_POOL_BITMAP_LENGTH_BITS;
      search->physical_base = new_page;
      search->virtual_base = KERNEL_PHYS2LOG(new_page);
      
      vmm_memset(KERNEL_PHYS2LOG(new_page), 0, MEM_PGSIZE);
            
      /* add us to the start of the list */
      if(gate_pool.next)
      {
         search->next = gate_pool.next;
         search->next->previous = search;
      }

      gate_pool.next = search;
      search->previous = &gate_pool;
   }
   
   /* search is now valid or we wouldn't be here, so locate a free slot by
      scanning through the bitmap, starting at the last known free slot */
   slot_search = slot_search_start = search->last_free;
   do
   {
      if(lock_bitmap_test(search->bitmap, slot_search) == 0)
         slot_found = 1;
      
      slot_search++;
      if(slot_search >= LOCK_POOL_BITMAP_LENGTH_BITS)
         slot_search = 0;
   }
   while(slot_search_start != slot_search && !slot_found);
   
   /* set the bit and grab the address of the slot to use for the lock */
   if(slot_found)
   {
      lock_bitmap_set(search->bitmap, slot_search);
      
      /* initialise the gate structure */
      new_gate = (rw_gate *)(unsigned int)search->virtual_base + (sizeof(rw_gate) * slot_search);
      vmm_memset(new_gate, 0, (sizeof(rw_gate)));
      *ptr = new_gate;
      
#ifdef DEBUG_LOCK_RWGATE_PROFILE
      vmm_memcpy(&(new_gate->description), description, vmm_nullbufferlen(description) + sizeof('\0'));
#endif
                 
      /* update accounting, increment the next free slot but beware of
         overflow */
      search->nr_free--;
      search->last_free++;
      if(search->last_free >= LOCK_POOL_BITMAP_LENGTH_BITS)
         slot_search = 0;
      
      LOCK_DEBUG("[lock:%i] created new readers-writer lock '%s' %p (spinlock %p)\n",
                 description, new_gate, new_gate->spinlock);
      
      unlock_gate(lock_lock, LOCK_WRITE);
      return success;
   }
   
   /* something weird has happened if we've fallen this far, so
      fall through to failure */
   unlock_gate(lock_lock, LOCK_WRITE);
   *ptr = NULL;
   return e_failure;
}
Exemplo n.º 23
0
static int uart_driver_probe(struct vmm_device *dev,const struct vmm_devid *devid)
{
	int rc;
	const char *attr;
	struct vmm_chardev *cd;
	struct uart_port *port;
	
	cd = vmm_malloc(sizeof(struct vmm_chardev));
	if(!cd) {
		rc = VMM_EFAIL;
		goto free_nothing;
	}
	vmm_memset(cd, 0, sizeof(struct vmm_chardev));

	port = vmm_malloc(sizeof(struct uart_port));
	if(!port) {
		rc = VMM_EFAIL;
		goto free_chardev;
	}
	vmm_memset(port, 0, sizeof(struct uart_port));

	vmm_strcpy(cd->name, dev->node->name);
	cd->dev = dev;
	cd->ioctl = NULL;
	cd->read = uart_read;
	cd->write = uart_write;
	cd->priv = port;

	rc = vmm_devdrv_ioremap(dev, &port->base, 0);
	if(rc) {
		goto free_port;
	}

	attr = vmm_devtree_attrval(dev->node, "reg_align");
	if (attr) {
		port->reg_align = *((u32 *)attr);
	} else {
		port->reg_align = 1;
	}

	attr = vmm_devtree_attrval(dev->node, "reg_offset");
	if (attr) {
		port->base += *((u32 *)attr);
	}

	attr = vmm_devtree_attrval(dev->node, "baudrate");
	if(!attr) {
		rc = VMM_EFAIL;
		goto free_port;
	}
	port->baudrate = *((u32 *)attr);
	port->input_clock = vmm_devdrv_clock_rate(dev);

	/* Call low-level init function */
	uart_lowlevel_init(port->base, port->reg_align, 
			port->baudrate, port->input_clock);

	rc = vmm_chardev_register(cd);
	if(rc) {
		goto free_port;
	}

	return VMM_OK;

free_port:
	vmm_free(port);
free_chardev:
	vmm_free(cd);
free_nothing:
	return rc;
}
Exemplo n.º 24
0
int arch_vcpu_regs_init(struct vmm_vcpu * vcpu)
{
	u32 ite, cpuid = ARM_CPUID_CORTEXA8;
	/* Initialize User Mode Registers */
	/* For both Orphan & Normal VCPUs */
	vmm_memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));
	arm_regs(vcpu)->pc = vcpu->start_pc;
	if (vcpu->is_normal) {
		arm_regs(vcpu)->cpsr  = CPSR_ZERO_MASK;
		arm_regs(vcpu)->cpsr |= CPSR_ASYNC_ABORT_DISABLED;
		arm_regs(vcpu)->cpsr |= CPSR_MODE_USER;
	} else {
		arm_regs(vcpu)->cpsr  = CPSR_ZERO_MASK;
		arm_regs(vcpu)->cpsr |= CPSR_ASYNC_ABORT_DISABLED;
		arm_regs(vcpu)->cpsr |= CPSR_MODE_SUPERVISOR;
		arm_regs(vcpu)->sp = vcpu->start_sp;
	}
	/* Initialize Supervisor Mode Registers */
	/* For only Normal VCPUs */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}
	if (!vcpu->reset_count) {
		vcpu->arch_priv = vmm_malloc(sizeof(arm_priv_t));
		vmm_memset(arm_priv(vcpu), 0, sizeof(arm_priv_t));
		arm_priv(vcpu)->cpsr = CPSR_ASYNC_ABORT_DISABLED | 
				   CPSR_IRQ_DISABLED |
				   CPSR_FIQ_DISABLED | 
				   CPSR_MODE_SUPERVISOR;
	} else {
		for (ite = 0; ite < CPU_FIQ_GPR_COUNT; ite++) {
			arm_priv(vcpu)->gpr_usr[ite] = 0x0;
			arm_priv(vcpu)->gpr_fiq[ite] = 0x0;
		}
		arm_priv(vcpu)->sp_usr = 0x0;
		arm_priv(vcpu)->lr_usr = 0x0;
		arm_priv(vcpu)->sp_svc = 0x0;
		arm_priv(vcpu)->lr_svc = 0x0;
		arm_priv(vcpu)->spsr_svc = 0x0;
		arm_priv(vcpu)->sp_mon = 0x0;
		arm_priv(vcpu)->lr_mon = 0x0;
		arm_priv(vcpu)->spsr_mon = 0x0;
		arm_priv(vcpu)->sp_abt = 0x0;
		arm_priv(vcpu)->lr_abt = 0x0;
		arm_priv(vcpu)->spsr_abt = 0x0;
		arm_priv(vcpu)->sp_und = 0x0;
		arm_priv(vcpu)->lr_und = 0x0;
		arm_priv(vcpu)->spsr_und = 0x0;
		arm_priv(vcpu)->sp_irq = 0x0;
		arm_priv(vcpu)->lr_irq = 0x0;
		arm_priv(vcpu)->spsr_irq = 0x0;
		arm_priv(vcpu)->sp_fiq = 0x0;
		arm_priv(vcpu)->lr_fiq = 0x0;
		arm_priv(vcpu)->spsr_fiq = 0x0;
		cpu_vcpu_cpsr_update(vcpu, 
				     arm_regs(vcpu), 
				     (CPSR_ZERO_MASK |
					CPSR_ASYNC_ABORT_DISABLED | 
					CPSR_IRQ_DISABLED |
					CPSR_FIQ_DISABLED | 
					CPSR_MODE_SUPERVISOR),
				     CPSR_ALLBITS_MASK);
	}
	if (!vcpu->reset_count) {
		arm_priv(vcpu)->features = 0;
		switch (cpuid) {
		case ARM_CPUID_CORTEXA8:
			arm_set_feature(vcpu, ARM_FEATURE_V4T);
			arm_set_feature(vcpu, ARM_FEATURE_V5);
			arm_set_feature(vcpu, ARM_FEATURE_V6);
			arm_set_feature(vcpu, ARM_FEATURE_V6K);
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_AUXCR);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2);
			arm_set_feature(vcpu, ARM_FEATURE_VFP);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			break;
		case ARM_CPUID_CORTEXA9:
			arm_set_feature(vcpu, ARM_FEATURE_V4T);
			arm_set_feature(vcpu, ARM_FEATURE_V5);
			arm_set_feature(vcpu, ARM_FEATURE_V6);
			arm_set_feature(vcpu, ARM_FEATURE_V6K);
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_AUXCR);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2);
			arm_set_feature(vcpu, ARM_FEATURE_VFP);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			break;
		default:
			break;
		};
	}
#ifdef CONFIG_ARM32_FUNCSTATS
	for (ite=0; ite < ARM_FUNCSTAT_MAX; ite++) {
		arm_priv(vcpu)->funcstat[ite].function_name = NULL;
		arm_priv(vcpu)->funcstat[ite].entry_count = 0;
		arm_priv(vcpu)->funcstat[ite].exit_count = 0;
		arm_priv(vcpu)->funcstat[ite].time = 0;
	}
#endif
	return cpu_vcpu_cp15_init(vcpu, cpuid);
}
Exemplo n.º 25
0
/*-----------------------------------------------------------------------------------*/
void
uip_arp_out(void)
{
	struct arp_entry *tabptr;

	/* Find the destination IP address in the ARP table and construct
	   the Ethernet header. If the destination IP addres isn't on the
	   local network, we use the default router's IP address instead.

	   If not ARP table entry is found, we overwrite the original IP
	   packet with an ARP request for the IP address. */

	/* First check if destination is a local broadcast. */
	if(uip_ipaddr_cmp(IPBUF->destipaddr, broadcast_ipaddr)) {
		vmm_memcpy(IPBUF->ethhdr.dest.addr, broadcast_ethaddr.addr, 6);
	} else {
		/* Check if the destination address is on the local network. */
		if(!uip_ipaddr_maskcmp(IPBUF->destipaddr, uip_hostaddr, uip_netmask)) {
			/* Destination address was not on the local network, so we need to
			   use the default router's IP address instead of the destination
			   address when determining the MAC address. */
			uip_ipaddr_copy(ipaddr, uip_draddr);
		} else {
			/* Else, we use the destination IP address. */
			uip_ipaddr_copy(ipaddr, IPBUF->destipaddr);
		}

		for(i = 0; i < UIP_ARPTAB_SIZE; ++i) {
			tabptr = &arp_table[i];
			if(uip_ipaddr_cmp(ipaddr, tabptr->ipaddr)) {
				break;
			}
		}

		if(i == UIP_ARPTAB_SIZE) {
			/* The destination address was not in our ARP table, so we
			   overwrite the IP packet with an ARP request. */

			vmm_memset(BUF->ethhdr.dest.addr, 0xff, 6);
			vmm_memset(BUF->dhwaddr.addr, 0x00, 6);
			vmm_memcpy(BUF->ethhdr.src.addr, uip_ethaddr.addr, 6);
			vmm_memcpy(BUF->shwaddr.addr, uip_ethaddr.addr, 6);

			uip_ipaddr_copy(BUF->dipaddr, ipaddr);
			uip_ipaddr_copy(BUF->sipaddr, uip_hostaddr);
			BUF->opcode = HTONS(ARP_REQUEST); /* ARP request. */
			BUF->hwtype = HTONS(ARP_HWTYPE_ETH);
			BUF->protocol = HTONS(UIP_ETHTYPE_IP);
			BUF->hwlen = 6;
			BUF->protolen = 4;
			BUF->ethhdr.type = HTONS(UIP_ETHTYPE_ARP);

			uip_appdata = &uip_buf[UIP_TCPIP_HLEN + UIP_LLH_LEN];

			uip_len = sizeof(struct arp_hdr);
			return;
		}

		/* Build an ethernet header. */
		vmm_memcpy(IPBUF->ethhdr.dest.addr, tabptr->ethaddr.addr, 6);
	}
	vmm_memcpy(IPBUF->ethhdr.src.addr, uip_ethaddr.addr, 6);

	IPBUF->ethhdr.type = HTONS(UIP_ETHTYPE_IP);

	uip_len += sizeof(struct uip_eth_hdr);
}