Ejemplo n.º 1
0
void lock_acquire_writelock(VMM_READ_WRITE_LOCK * lock)
{
    (void)lock;
    lock_acquire(&lock->lock);
    // wait until readers == 0
    while(lock->readers) {
        hw_pause();
    }
}
Ejemplo n.º 2
0
void lock_acquire(VMM_LOCK* lock)
{
    (void)lock;
    CPU_ID this_cpu_id = hw_cpu_id();

    if (! lock) {
        return; // error
    }
    while (FALSE == lock_try_acquire(lock)) {
        VMM_ASSERT_NOLOCK(lock->owner_cpu_id != this_cpu_id);
        hw_pause();
    }
    lock->owner_cpu_id = this_cpu_id;
}
Ejemplo n.º 3
0
void interruptible_lock_acquire_writelock(VMM_READ_WRITE_LOCK * lock)
{
    (void)lock;
    BOOLEAN ipc_processed = FALSE;

    interruptible_lock_acquire(&lock->lock);
    //  wait until readers == 0
    while(lock->readers) {
        ipc_processed = ipc_process_one_ipc();
        if(FALSE == ipc_processed) {
            hw_pause();
        }
    }
}
Ejemplo n.º 4
0
void interruptible_lock_acquire(VMM_LOCK* lock)
{
    (void)lock;
    CPU_ID this_cpu_id = hw_cpu_id();
    BOOLEAN ipc_processed = FALSE;

    if (!lock) {
        return; // error
    }
    while (FALSE == lock_try_acquire(lock)) {
        ipc_processed = ipc_process_one_ipc();
        if(FALSE == ipc_processed) {
            hw_pause();
        }
    }
    lock->owner_cpu_id = this_cpu_id;
}
Ejemplo n.º 5
0
static
void raw_unlock(volatile uint32_t *p_lock_var)
{
	int32_t old_value;

	for (;; ) {
		/* Loop until successfully decremented the lock variable */

		old_value = *p_lock_var;
		if (old_value ==
		    hw_interlocked_compare_exchange((int32_t *)p_lock_var,
			    old_value,                                                  /* Expected */
			    old_value - 1)) {                                           /* New */
			break;
		}
		hw_pause();
	}
}
Ejemplo n.º 6
0
/*===================== raw_lock(), raw_unlock() ==========================
 *
 * These functions are used for doing lock/unlock
 * without CPU identification/validation
 * The reason is to have the lock facility at the stage when cpu ID is unknown
 * e.g. for LOGs at the bootstrap time
 *
 *========================================================================= */
static
void raw_lock(volatile uint32_t *p_lock_var)
{
	uint32_t old_value;

	for (;; ) {
		/* Loop until the successfully incremented the lock variable */
		/* from 0 to 1 (i.e., we are the only lockers */

		old_value = hw_interlocked_compare_exchange(
			(int32_t *)p_lock_var,
			0, /* Expected */
			1); /* New */
		if (0 == old_value) {
			break;
		}
		hw_pause();
	}
}
Ejemplo n.º 7
0
// Send message to destination processors.
// RETURN VALUE:    number of CPUs on which handler is about to execute
UINT32 ipc_execute_send(IPC_DESTINATION dst, IPC_MESSAGE_TYPE type, 
                        IPC_HANDLER_FN handler,
                        void *arg, BOOLEAN wait_for_handler_finish)
{
    CPU_ID                  i;
    CPU_ID                  sender_cpu_id = IPC_CPU_ID();
    IPC_CPU_CONTEXT         *ipc = NULL;
    volatile UINT32         num_received_acks = 0;
    UINT32                  num_required_acks = 0;
    volatile UINT32         *ack_array = &ipc_ack_array[sender_cpu_id * num_of_host_processors];
    BOOLEAN                 status;
    IPC_DESTINATION         single_dst;
    UINT32                  wait_count = 0;
    UINT64                  nmi_accounted_flag[CPU_BITMAP_MAX] = {0};
    UINT64                  enqueue_flag[CPU_BITMAP_MAX] = {0};
    UINT64                  next_send_tsc;
    (void)status;
    // Initializ ack array.
    vmm_memset((void *) ack_array, 0, num_of_host_processors * sizeof(UINT32));

    for(i = 0; i < num_of_host_processors; i++) {
        if (i != sender_cpu_id) {                               // Exclude yourself.
            if (ipc_cpu_is_destination(dst, sender_cpu_id, i)) {
                ipc = &ipc_cpu_contexts[i];
                lock_acquire(&ipc->data_lock);
                if (ipc_preprocess_message(ipc, i, type)) {     // Preprocess IPC and check if need to enqueue.
                    BOOLEAN  empty_queue= (array_list_size(ipc->message_queue)==0);
                    BITMAP_ARRAY64_SET(enqueue_flag, i);  // Mark CPU active.
                    num_required_acks++;
                    if (!wait_for_handler_finish)  // Dont wait for handlers to finish.
                        status = ipc_enqueue_message(ipc, type, handler, arg, &ack_array[i], NULL);
                    else   // Wait for handlers to finish.
                        status = ipc_enqueue_message(ipc, type, handler, arg, NULL, &ack_array[i]);
                    ipc->num_of_sent_ipc_messages++;            // IPC sent message counting.
                    VMM_ASSERT(status);
                    // Check if IPC signal should be sent.
                    if (empty_queue) {
                        // Send IPC signal (NMI or SIPI)
                        single_dst.addr_shorthand = IPI_DST_NO_SHORTHAND;
                        single_dst.addr = (UINT8)i;
                        if (cpu_activity_state[i] == IPC_CPU_ACTIVE) {
                            BITMAP_ARRAY64_SET(nmi_accounted_flag, i);
                            ipc->num_of_sent_ipc_nmi_interrupts++;
                            ipc_hw_signal_nmi(single_dst);
                        }
                        else
                            ipc_hw_signal_sipi(single_dst);
                    }
                }
                lock_release(&ipc->data_lock);
            }
        }
    }

    if (num_required_acks > 0) {
        VMM_ASSERT(hw_get_tsc_ticks_per_second() != 0);

        // Calculate next tsc tick to resend NMI.
        next_send_tsc = hw_rdtsc() + hw_get_tsc_ticks_per_second(); 
        // Should be one second.
        // signal and wait for acknowledge
        while (num_received_acks != num_required_acks) {
            // Check wait count and time.
            if (wait_count++ > 1000 && hw_rdtsc() > next_send_tsc) {
                wait_count = 0;
                next_send_tsc = hw_rdtsc() + hw_get_tsc_ticks_per_second();

                for (i = 0, num_received_acks = 0; i < num_of_host_processors; i++) {
                    // Send additional IPC signal to stalled cores.
                    if (BITMAP_ARRAY64_GET(enqueue_flag, i) && !ack_array[i]) {
                         // exclude yourself and non active CPUs.
                        single_dst.addr_shorthand = IPI_DST_NO_SHORTHAND;
                        single_dst.addr = (UINT8) i;
                        // Check that CPU is still active.
                        VMM_ASSERT(cpu_activity_state[i] != IPC_CPU_NOT_ACTIVE);
                        if (!debug_not_resend) {
                            ipc = &ipc_cpu_contexts[i];
                            lock_acquire(&ipc->data_lock);
                            if (cpu_activity_state[i] == IPC_CPU_ACTIVE) {
                                if (!BITMAP_ARRAY64_GET(nmi_accounted_flag, i)) {
                                    BITMAP_ARRAY64_SET(nmi_accounted_flag, i);
                                    ipc->num_of_sent_ipc_nmi_interrupts++;
                                }
                                ipc_hw_signal_nmi(single_dst);
                                VMM_LOG(mask_anonymous, level_trace,
                                        "[%d] send additional NMI to %d\n", 
                                         (int) sender_cpu_id, (int) i);
                            }
                            else {
                                ipc_hw_signal_sipi(single_dst);
                                VMM_LOG(mask_anonymous, level_trace,
                                        "[%d] send additional SIPI to %d\n", 
                                        (int) sender_cpu_id, (int) i);
                            }
                        lock_release(&ipc->data_lock);
                        }
                    }
                }
            }
            else {
                // Try to processs own received messages.
                // To prevent deadlock situation when 2 core send messages simultaneously.
                if (!ipc_process_one_ipc())
                    hw_pause();
                // Count received acks.
                for (i = 0, num_received_acks = 0; i < num_of_host_processors; i++)
                    num_received_acks += ack_array[i];
            }
        }
    }
    return num_required_acks;
}
Ejemplo n.º 8
0
/* ---------------------------- APIs --------------------------------------- */
void guest_control_setup(guest_handle_t guest, const vmexit_control_t *request)
{
	guest_gcpu_econtext_t ctx;
	guest_cpu_handle_t gcpu;
	mon_state_t mon_state;
	cpu_id_t this_hcpu_id = hw_cpu_id();

	MON_ASSERT(guest);

	/* setup vmexit requests without applying */
	for (gcpu = mon_guest_gcpu_first(guest, &ctx); gcpu;
	     gcpu = mon_guest_gcpu_next(&ctx))
		gcpu_control_setup_only(gcpu, request);

	/* now apply */
	mon_state = mon_get_state();

	if (MON_STATE_BOOT == mon_state) {
		/* may be run on BSP only */
		MON_ASSERT(0 == this_hcpu_id);

		/* single thread mode with all APs yet not init */
		for (gcpu = mon_guest_gcpu_first(guest, &ctx); gcpu;
		     gcpu = mon_guest_gcpu_next(&ctx))
			gcpu_control_apply_only(gcpu);
	} else if (MON_STATE_RUN == mon_state) {
		ipc_comm_guest_struct_t ipc;
		uint32_t wait_for_ipc_count = 0;
		ipc_destination_t ipc_dst;

		mon_memset(&ipc, 0, sizeof(ipc));
		mon_memset(&ipc_dst, 0, sizeof(ipc_dst));

		/* multi-thread mode with all APs ready and running
		 * or in Wait-For-SIPI state on behalf of guest */

		ipc.guest = guest;

		/* first apply for gcpus allocated for this hw cpu */
		apply_vmexit_config(this_hcpu_id, &ipc);

		/* reset executed counter and flush memory */
		hw_assign_as_barrier(&(ipc.executed), 0);

		/* send for execution */
		ipc_dst.addr_shorthand = IPI_DST_ALL_EXCLUDING_SELF;
		wait_for_ipc_count =
			ipc_execute_handler(ipc_dst, apply_vmexit_config, &ipc);

		/* wait for execution finish */
		while (wait_for_ipc_count != ipc.executed) {
			/* avoid deadlock - process one IPC if exist */
			ipc_process_one_ipc();
			hw_pause();
		}
	} else {
		/* not supported mode */
		MON_LOG(mask_anonymous, level_trace,
			"Unsupported global mon_state=%d in"
			" guest_request_vmexit_on()\n",
			mon_state);
		MON_DEADLOOP();
	}
}