Example #1
0
static void *qemu_tcg_cpu_thread_fn(void *arg)
{
    CPUState *env = arg;

    qemu_tcg_init_cpu_signals();
    qemu_thread_get_self(env->thread);

    /* signal CPU creation */
    qemu_mutex_lock(&qemu_global_mutex);
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
        env->thread_id = qemu_get_thread_id();
        env->created = 1;
    }
    qemu_cond_signal(&qemu_cpu_cond);

    /* wait for initial kick-off after machine start */
    while (first_cpu->stopped) {
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
    }

    while (1) {
        cpu_exec_all();
        if (use_icount && qemu_next_icount_deadline() <= 0) {
            qemu_notify_event();
        }
        qemu_tcg_wait_io_event();
    }

    return NULL;
}
Example #2
0
static void *kvm_cpu_thread_fn(void *arg)
{
    CPUState *env = arg;

    qemu_mutex_lock(&qemu_global_mutex);
    qemu_thread_self(env->thread);
    if (kvm_enabled())
        kvm_init_vcpu(env);

    kvm_init_ipi(env);

    /* signal CPU creation */
    env->created = 1;
    qemu_cond_signal(&qemu_cpu_cond);

    /* and wait for machine initialization */
    while (!qemu_system_ready)
        qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);

    while (1) {
        if (cpu_can_run(env))
            qemu_cpu_exec(env);
        qemu_kvm_wait_io_event(env);
    }

    return NULL;
}
Example #3
0
/* Mark cpu as not executing, and release pending exclusive ops.  */
void cpu_exec_end(CPUState *cpu)
{
    atomic_set(&cpu->running, false);

    /* Write cpu->running before reading pending_cpus.  */
    smp_mb();

    /* 1. start_exclusive saw cpu->running == true.  Then it will increment
     * pending_cpus and wait for exclusive_cond.  After taking the lock
     * we'll see cpu->has_waiter == true.
     *
     * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
     * This includes the case when an exclusive item started after setting
     * cpu->running to false and before we read pending_cpus.  Then we'll see
     * cpu->has_waiter == false and not touch pending_cpus.  The next call to
     * cpu_exec_start will run exclusive_idle if still necessary, thus waiting
     * for the item to complete.
     *
     * 3. pending_cpus == 0.  Then start_exclusive is definitely going to
     * see cpu->running == false, and it can ignore this CPU until the
     * next cpu_exec_start.
     */
    if (unlikely(atomic_read(&pending_cpus))) {
        qemu_mutex_lock(&qemu_cpu_list_lock);
        if (cpu->has_waiter) {
            cpu->has_waiter = false;
            atomic_set(&pending_cpus, pending_cpus - 1);
            if (pending_cpus == 1) {
                qemu_cond_signal(&exclusive_cond);
            }
        }
        qemu_mutex_unlock(&qemu_cpu_list_lock);
    }
}
Example #4
0
static void edu_mmio_write(void *opaque, hwaddr addr, uint64_t val,
                unsigned size)
{
    EduState *edu = opaque;

    if (addr < 0x80 && size != 4) {
        return;
    }

    if (addr >= 0x80 && size != 4 && size != 8) {
        return;
    }

    switch (addr) {
    case 0x04:
        edu->addr4 = ~val;
        break;
    case 0x08:
        if (atomic_read(&edu->status) & EDU_STATUS_COMPUTING) {
            break;
        }
        /* EDU_STATUS_COMPUTING cannot go 0->1 concurrently, because it is only
         * set in this function and it is under the iothread mutex.
         */
        qemu_mutex_lock(&edu->thr_mutex);
        edu->fact = val;
        atomic_or(&edu->status, EDU_STATUS_COMPUTING);
        qemu_cond_signal(&edu->thr_cond);
        qemu_mutex_unlock(&edu->thr_mutex);
        break;
    case 0x20:
        if (val & EDU_STATUS_IRQFACT) {
            atomic_or(&edu->status, EDU_STATUS_IRQFACT);
        } else {
            atomic_and(&edu->status, ~EDU_STATUS_IRQFACT);
        }
        break;
    case 0x60:
        edu_raise_irq(edu, val);
        break;
    case 0x64:
        edu_lower_irq(edu, val);
        break;
    case 0x80:
        dma_rw(edu, true, &val, &edu->dma.src, false);
        break;
    case 0x88:
        dma_rw(edu, true, &val, &edu->dma.dst, false);
        break;
    case 0x90:
        dma_rw(edu, true, &val, &edu->dma.cnt, false);
        break;
    case 0x98:
        if (!(val & EDU_DMA_RUN)) {
            break;
        }
        dma_rw(edu, true, &val, &edu->dma.cmd, true);
        break;
    }
}
Example #5
0
static void *qemu_kvm_cpu_thread_fn(void *arg)
{
    CPUArchState *env = arg;
    CPUState *cpu = ENV_GET_CPU(env);
    int r;

    qemu_mutex_lock(&qemu_global_mutex);
    qemu_thread_get_self(cpu->thread);
    cpu->thread_id = qemu_get_thread_id();
    cpu_single_env = env;

    r = kvm_init_vcpu(env);
    if (r < 0) {
        fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
        exit(1);
    }

    qemu_kvm_init_cpu_signals(env);

    /* signal CPU creation */
    cpu->created = true;
    qemu_cond_signal(&qemu_cpu_cond);

    while (1) {
        if (cpu_can_run(cpu)) {
            r = kvm_cpu_exec(env);
            if (r == EXCP_DEBUG) {
                cpu_handle_guest_debug(env);
            }
        }
        qemu_kvm_wait_io_event(env);
    }

    return NULL;
}
Example #6
0
static void virtio_balloon_set_status(VirtIODevice *vdev, uint8_t status)
{
    VirtIOBalloon *s = VIRTIO_BALLOON(vdev);

    if (!s->stats_vq_elem && vdev->vm_running &&
        (status & VIRTIO_CONFIG_S_DRIVER_OK) && virtqueue_rewind(s->svq, 1)) {
        /* poll stats queue for the element we have discarded when the VM
         * was stopped */
        virtio_balloon_receive_stats(vdev, s->svq);
    }

    if (virtio_balloon_free_page_support(s)) {
        /*
         * The VM is woken up and the iothread was blocked, so signal it to
         * continue.
         */
        if (vdev->vm_running && s->block_iothread) {
            qemu_mutex_lock(&s->free_page_lock);
            s->block_iothread = false;
            qemu_cond_signal(&s->free_page_cond);
            qemu_mutex_unlock(&s->free_page_lock);
        }

        /* The VM is stopped, block the iothread. */
        if (!vdev->vm_running) {
            qemu_mutex_lock(&s->free_page_lock);
            s->block_iothread = true;
            qemu_mutex_unlock(&s->free_page_lock);
        }
    }
}
Example #7
0
static void *iothread_run(void *opaque)
{
    IOThread *iothread = opaque;

    rcu_register_thread();

    my_iothread = iothread;
    qemu_mutex_lock(&iothread->init_done_lock);
    iothread->thread_id = qemu_get_thread_id();
    qemu_cond_signal(&iothread->init_done_cond);
    qemu_mutex_unlock(&iothread->init_done_lock);

    while (iothread->running) {
        aio_poll(iothread->ctx, true);

        if (atomic_read(&iothread->worker_context)) {
            GMainLoop *loop;

            g_main_context_push_thread_default(iothread->worker_context);
            iothread->main_loop =
                g_main_loop_new(iothread->worker_context, TRUE);
            loop = iothread->main_loop;

            g_main_loop_run(iothread->main_loop);
            iothread->main_loop = NULL;
            g_main_loop_unref(loop);

            g_main_context_pop_thread_default(iothread->worker_context);
        }
    }

    rcu_unregister_thread();
    return NULL;
}
Example #8
0
static void *iothread_run(void *opaque)
{
    IOThread *iothread = opaque;
    bool blocking;

    rcu_register_thread();

    qemu_mutex_lock(&iothread->init_done_lock);
    iothread->thread_id = qemu_get_thread_id();
    qemu_cond_signal(&iothread->init_done_cond);
    qemu_mutex_unlock(&iothread->init_done_lock);

    while (!iothread->stopping) {
        aio_context_acquire(iothread->ctx);
        blocking = true;
        while (!iothread->stopping && aio_poll(iothread->ctx, blocking)) {
            /* Progress was made, keep going */
            blocking = false;
        }
        aio_context_release(iothread->ctx);
    }

    rcu_unregister_thread();
    return NULL;
}
Example #9
0
static void *qemu_kvm_cpu_thread_fn(void *arg)
{
    CPUState *env = arg;
    int r;

    qemu_mutex_lock(&qemu_global_mutex);
    qemu_thread_get_self(env->thread);
    env->thread_id = qemu_get_thread_id();

    r = kvm_init_vcpu(env);
    if (r < 0) {
        fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
        exit(1);
    }

    qemu_kvm_init_cpu_signals(env);

    /* signal CPU creation */
    env->created = 1;
    qemu_cond_signal(&qemu_cpu_cond);
// TLC profiling
    tlc_cpu_profile_init(env);
	    
    while (1) {
        if (cpu_can_run(env)) {
            r = kvm_cpu_exec(env);
            if (r == EXCP_DEBUG) {
                cpu_handle_guest_debug(env);
            }
        }
        qemu_kvm_wait_io_event(env);
    }

    return NULL;
}
Example #10
0
static void qemu_wait_io_event_common(CPUState *env)
{
    if (env->stop) {
	env->stop = 0;
        env->stopped = 1;
    
	if(cpu_break_switch == 1){	    
	    // Correctness: cpu_break_switch is set (in brtm) 
	    // before env->stop is set (in pause_all_vcpus).
	    // Get current affinity, priority, etc. and print out
	    //print_current_vcpu_info(env->cpu_index);
	    
	    if(sched_setaffinity(0, sizeof(cpu_set_t), &adjusted_cpu_set) == -1){
    	        printf(" cpu %d: sched_setaffinity() error\n", env->cpu_index);
    	    }		
	    // TLC show new vcpu info
	    //print_current_vcpu_info(env->cpu_index);
	}
	// make sure the above slowdown is done (if the break was set) 
	// before telling the io-thread to continue.
	qemu_cond_signal(&qemu_pause_cond);
    }
    flush_queued_work(env);
    env->thread_kicked = false;
}
Example #11
0
File: cpus.c Project: tycho/qemu
static void qemu_wait_io_event_common(CPUState *env)
{
    if (env->stop) {
        env->stop = 0;
        env->stopped = 1;
        qemu_cond_signal(&qemu_pause_cond);
    }
}
Example #12
0
void cpu_stop_current(void)
{
    if (current_cpu) {
        current_cpu->stop = false;
        current_cpu->stopped = true;
        cpu_exit(current_cpu);
        qemu_cond_signal(&qemu_pause_cond);
    }
}
Example #13
0
void cpu_stop_current(void)
{
    if (cpu_single_env) {
        cpu_single_env->stop = 0;
        cpu_single_env->stopped = 1;
        cpu_exit(cpu_single_env);
        qemu_cond_signal(&qemu_pause_cond);
    }
}
Example #14
0
void cpu_stop_current(void)
{
    if (cpu_single_env) {
        CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
        cpu_single_cpu->stop = false;
        cpu_single_cpu->stopped = true;
        cpu_exit(cpu_single_env);
        qemu_cond_signal(&qemu_pause_cond);
    }
}
Example #15
0
static void qemu_wait_io_event_common(CPUState *cpu)
{
    if (cpu->stop) {
        cpu->stop = false;
        cpu->stopped = true;
        qemu_cond_signal(&qemu_pause_cond);
    }
    flush_queued_work(cpu);
    cpu->thread_kicked = false;
}
Example #16
0
static void qemu_wait_io_event_common(CPUState *env)
{
    if (env->stop) {
        env->stop = 0;
        env->stopped = 1;
        qemu_cond_signal(&qemu_pause_cond);
    }
    flush_queued_work(env);
    env->thread_kicked = false;
}
Example #17
0
File: cpus.c Project: 0bliv10n/s2e
static void qemu_wait_io_event_common(CPUArchState *env)
{
    if (env->stop) {
#if defined(CONFIG_S2E_DEBUG)
        s2e_debug_print("CPU: qemu_wait_io_event_common stop\n");
#endif
        env->stop = 0;
        env->stopped = 1;
        qemu_cond_signal(&qemu_pause_cond);
    }
    flush_queued_work(env);
    env->thread_kicked = false;
}
Example #18
0
static void pci_edu_uninit(PCIDevice *pdev)
{
    EduState *edu = DO_UPCAST(EduState, pdev, pdev);

    qemu_mutex_lock(&edu->thr_mutex);
    edu->stopping = true;
    qemu_mutex_unlock(&edu->thr_mutex);
    qemu_cond_signal(&edu->thr_cond);
    qemu_thread_join(&edu->thread);

    qemu_cond_destroy(&edu->thr_cond);
    qemu_mutex_destroy(&edu->thr_mutex);

    timer_del(&edu->dma_timer);
}
Example #19
0
static void *iothread_run(void *opaque)
{
    IOThread *iothread = opaque;

    rcu_register_thread();

    my_iothread = iothread;
    qemu_mutex_lock(&iothread->init_done_lock);
    iothread->thread_id = qemu_get_thread_id();
    qemu_cond_signal(&iothread->init_done_cond);
    qemu_mutex_unlock(&iothread->init_done_lock);

    while (!atomic_read(&iothread->stopping)) {
        aio_poll(iothread->ctx, true);
    }

    rcu_unregister_thread();
    return NULL;
}
Example #20
0
static void *qemu_dummy_cpu_thread_fn(void *arg)
{
#ifdef _WIN32
    fprintf(stderr, "qtest is not supported under Windows\n");
    exit(1);
#else
    CPUArchState *env = arg;
    CPUState *cpu = ENV_GET_CPU(env);
    sigset_t waitset;
    int r;

    qemu_mutex_lock_iothread();
    qemu_thread_get_self(cpu->thread);
    cpu->thread_id = qemu_get_thread_id();

    sigemptyset(&waitset);
    sigaddset(&waitset, SIG_IPI);

    /* signal CPU creation */
    cpu->created = true;
    qemu_cond_signal(&qemu_cpu_cond);

    cpu_single_env = env;
    while (1) {
        cpu_single_env = NULL;
        qemu_mutex_unlock_iothread();
        do {
            int sig;
            r = sigwait(&waitset, &sig);
        } while (r == -1 && (errno == EAGAIN || errno == EINTR));
        if (r == -1) {
            perror("sigwait");
            exit(1);
        }
        qemu_mutex_lock_iothread();
        cpu_single_env = env;
        qemu_wait_io_event_common(cpu);
    }

    return NULL;
#endif
}
Example #21
0
static void qemu_wait_io_event(CPUState *env)
{
    while (!tcg_has_work())
        qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);

    qemu_mutex_unlock(&qemu_global_mutex);

    /*
     * Users of qemu_global_mutex can be starved, having no chance
     * to acquire it since this path will get to it first.
     * So use another lock to provide fairness.
     */
    qemu_mutex_lock(&qemu_fair_mutex);
    qemu_mutex_unlock(&qemu_fair_mutex);

    qemu_mutex_lock(&qemu_global_mutex);
    if (env->stop) {
        env->stop = 0;
        env->stopped = 1;
        qemu_cond_signal(&qemu_pause_cond);
    }
}
Example #22
0
static void *qemu_tcg_cpu_thread_fn(void *arg)
{
    CPUState *cpu = arg;
    CPUArchState *env;

    qemu_tcg_init_cpu_signals();
    qemu_thread_get_self(cpu->thread);

    /* signal CPU creation */
    qemu_mutex_lock(&qemu_global_mutex);
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
        cpu = ENV_GET_CPU(env);
        cpu->thread_id = qemu_get_thread_id();
        cpu->created = true;
    }
    qemu_cond_signal(&qemu_cpu_cond);

    /* wait for initial kick-off after machine start */
    while (ENV_GET_CPU(first_cpu)->stopped) {
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);

        /* process any pending work */
        for (env = first_cpu; env != NULL; env = env->next_cpu) {
            qemu_wait_io_event_common(ENV_GET_CPU(env));
        }
    }

    while (1) {
        tcg_exec_all();
        if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
            qemu_notify_event();
        }
        qemu_tcg_wait_io_event();
    }

    return NULL;
}
Example #23
0
static void *qemu_tcg_cpu_thread_fn(void *arg)
{
    CPUArchState *env = arg;

    qemu_tcg_init_cpu_signals();
    qemu_thread_get_self(env->thread);

    /* signal CPU creation */
    qemu_mutex_lock(&qemu_global_mutex);
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
        env->thread_id = qemu_get_thread_id();
        LOGD_CPUS("%s2: Thread ID = %d\n", __func__, env->thread_id);    
        env->created = 1;
    }
    qemu_cond_signal(&qemu_cpu_cond);

    /* wait for initial kick-off after machine start */
    while (first_cpu->stopped) {
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);

        /* process any pending work */
        for (env = first_cpu; env != NULL; env = env->next_cpu) {
            qemu_wait_io_event_common(env);
        }
    }

    while (1) {
        tcg_exec_all();
        if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
            LOGD_CPUS("%s=>qemu_notify_event()\n", __func__);
            qemu_notify_event();
        }
        qemu_tcg_wait_io_event();
    }

    return NULL;
}
Example #24
0
static void *qemu_tcg_cpu_thread_fn(void *arg)
{
    CPUState *cpu = arg;

    qemu_tcg_init_cpu_signals();
    qemu_thread_get_self(cpu->thread);

    qemu_mutex_lock(&qemu_global_mutex);
    qemu_for_each_cpu(tcg_signal_cpu_creation, NULL);
    qemu_cond_signal(&qemu_cpu_cond);

    /* wait for initial kick-off after machine start */
    while (first_cpu->stopped) {
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);

        /* process any pending work */
        for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
            qemu_wait_io_event_common(cpu);
        }
    }

    while (1) {
        tcg_exec_all();

        if (use_icount) {
            int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);

            if (deadline == 0) {
                qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
            }
        }
        qemu_tcg_wait_io_event();
    }

    return NULL;
}
Example #25
0
static void *tcg_cpu_thread_fn(void *arg)
{
    CPUState *env = arg;

    block_io_signals();
    qemu_thread_self(env->thread);

    /* signal CPU creation */
    qemu_mutex_lock(&qemu_global_mutex);
    for (env = first_cpu; env != NULL; env = env->next_cpu)
        env->created = 1;
    qemu_cond_signal(&qemu_cpu_cond);

    /* and wait for machine initialization */
    while (!qemu_system_ready)
        qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);

    while (1) {
        tcg_cpu_exec();
        qemu_wait_io_event(cur_cpu);
    }

    return NULL;
}
Example #26
0
int
main(
    int argc,
    char *argv[]
) {
    char *qemu_host;
    char *qemu_port;
    VSCMsgHeader mhHeader;
    VSCMsgError *error_msg;

    int rv;
    int dwSendLength;
    int dwRecvLength;
    uint8_t pbRecvBuffer[APDUBufSize];
    uint8_t pbSendBuffer[APDUBufSize];
     VReaderStatus reader_status;
    VReader *reader = NULL;
    VCardEmulOptions *command_line_options = NULL;

    char *cert_names[MAX_CERTS];
    char *emul_args = NULL;
    int cert_count = 0;
    int c;

    while ((c = getopt(argc, argv, "c:e:pd:")) != -1) {
        switch (c) {
        case 'c':
            if (cert_count >= MAX_CERTS) {
                printf("too many certificates (max = %d)\n", MAX_CERTS);
                exit(5);
            }
            cert_names[cert_count++] = optarg;
            break;
        case 'e':
            emul_args = optarg;
            break;
        case 'p':
            print_usage();
            exit(4);
            break;
        case 'd':
            verbose = get_id_from_string(optarg, 1);
            break;
        }
    }

    if (argc - optind != 2) {
        print_usage();
        exit(4);
    }

    if (cert_count > 0) {
        char *new_args;
        int len, i;
        /* if we've given some -c options, we clearly we want do so some
         * software emulation.  add that emulation now. this is NSS Emulator
         * specific */
        if (emul_args == NULL) {
            emul_args = (char *)"db=\"/etc/pki/nssdb\"";
        }
#define SOFT_STRING ",soft=(,Virtual Reader,CAC,,"
             /* 2 == close paren & null */
        len = strlen(emul_args) + strlen(SOFT_STRING) + 2;
        for (i = 0; i < cert_count; i++) {
            len += strlen(cert_names[i])+1; /* 1 == comma */
        }
        new_args = g_malloc(len);
        strcpy(new_args, emul_args);
        strcat(new_args, SOFT_STRING);
        for (i = 0; i < cert_count; i++) {
            strcat(new_args, cert_names[i]);
            strcat(new_args, ",");
        }
        strcat(new_args, ")");
        emul_args = new_args;
    }
    if (emul_args) {
        command_line_options = vcard_emul_options(emul_args);
    }

    qemu_host = g_strdup(argv[argc - 2]);
    qemu_port = g_strdup(argv[argc - 1]);
    sock = connect_to_qemu(qemu_host, qemu_port);
    if (sock == -1) {
        fprintf(stderr, "error opening socket, exiting.\n");
        exit(5);
    }

    qemu_mutex_init(&write_lock);
    qemu_mutex_init(&pending_reader_lock);
    qemu_cond_init(&pending_reader_condition);

    vcard_emul_init(command_line_options);

    printf("> ");
    fflush(stdout);

    /* Send init message, Host responds (and then we send reader attachments) */
    VSCMsgInit init = {
        .version = htonl(VSCARD_VERSION),
        .magic = VSCARD_MAGIC,
        .capabilities = {0}
    };
    send_msg(VSC_Init, mhHeader.reader_id, &init, sizeof(init));

    do {
        fd_set fds;

        FD_ZERO(&fds);
        FD_SET(1, &fds);
        FD_SET(sock, &fds);

        /* waiting on input from the socket */
        rv = select(sock+1, &fds, NULL, NULL, NULL);
        if (rv < 0) {
            /* handle error */
            perror("select");
            return 7;
        }
        if (FD_ISSET(1, &fds)) {
            do_command();
        }
        if (!FD_ISSET(sock, &fds)) {
            continue;
        }

        rv = read(sock, &mhHeader, sizeof(mhHeader));
        if (rv < sizeof(mhHeader)) {
            /* Error */
            if (rv < 0) {
                perror("header read error\n");
            } else {
                fprintf(stderr, "header short read %d\n", rv);
            }
            return 8;
        }
        mhHeader.type = ntohl(mhHeader.type);
        mhHeader.reader_id = ntohl(mhHeader.reader_id);
        mhHeader.length = ntohl(mhHeader.length);
        if (verbose) {
            printf("Header: type=%d, reader_id=%u length=%d (0x%x)\n",
                    mhHeader.type, mhHeader.reader_id, mhHeader.length,
                                               mhHeader.length);
        }
        switch (mhHeader.type) {
        case VSC_APDU:
        case VSC_Flush:
        case VSC_Error:
        case VSC_Init:
            rv = read(sock, pbSendBuffer, mhHeader.length);
            break;
        default:
            fprintf(stderr, "Unexpected message of type 0x%X\n", mhHeader.type);
            return 0;
        }
        switch (mhHeader.type) {
        case VSC_APDU:
            if (rv < 0) {
                /* Error */
                fprintf(stderr, "read error\n");
                close(sock);
                return 8;
            }
            if (verbose) {
                printf(" recv APDU: ");
                print_byte_array(pbSendBuffer, mhHeader.length);
            }
            /* Transmit received APDU */
            dwSendLength = mhHeader.length;
            dwRecvLength = sizeof(pbRecvBuffer);
            reader = vreader_get_reader_by_id(mhHeader.reader_id);
            reader_status = vreader_xfr_bytes(reader,
                pbSendBuffer, dwSendLength,
                pbRecvBuffer, &dwRecvLength);
            if (reader_status == VREADER_OK) {
                mhHeader.length = dwRecvLength;
                if (verbose) {
                    printf(" send response: ");
                    print_byte_array(pbRecvBuffer, mhHeader.length);
                }
                send_msg(VSC_APDU, mhHeader.reader_id,
                         pbRecvBuffer, dwRecvLength);
            } else {
                rv = reader_status; /* warning: not meaningful */
                send_msg(VSC_Error, mhHeader.reader_id, &rv, sizeof(uint32_t));
            }
            vreader_free(reader);
            reader = NULL; /* we've freed it, don't use it by accident
                              again */
            break;
        case VSC_Flush:
            /* TODO: actually flush */
            send_msg(VSC_FlushComplete, mhHeader.reader_id, NULL, 0);
            break;
        case VSC_Error:
            error_msg = (VSCMsgError *) pbSendBuffer;
            if (error_msg->code == VSC_SUCCESS) {
                qemu_mutex_lock(&pending_reader_lock);
                if (pending_reader) {
                    vreader_set_id(pending_reader, mhHeader.reader_id);
                    vreader_free(pending_reader);
                    pending_reader = NULL;
                    qemu_cond_signal(&pending_reader_condition);
                }
                qemu_mutex_unlock(&pending_reader_lock);
                break;
            }
            printf("warning: qemu refused to add reader\n");
            if (error_msg->code == VSC_CANNOT_ADD_MORE_READERS) {
                /* clear pending reader, qemu can't handle any more */
                qemu_mutex_lock(&pending_reader_lock);
                if (pending_reader) {
                    pending_reader = NULL;
                    /* make sure the event loop doesn't hang */
                    qemu_cond_signal(&pending_reader_condition);
                }
                qemu_mutex_unlock(&pending_reader_lock);
            }
            break;
        case VSC_Init:
            if (on_host_init(&mhHeader, (VSCMsgInit *)pbSendBuffer) < 0) {
                return -1;
            }
            break;
        default:
            printf("Default\n");
            return 0;
        }
    } while (rv >= 0);

    return 0;
}
Example #27
0
static void xseg_request_handler(void *state)
{
    BDRVArchipelagoState *s = (BDRVArchipelagoState *) state;
    void *psd = xseg_get_signal_desc(s->xseg, s->port);
    qemu_mutex_lock(&s->request_mutex);

    while (!s->stopping) {
        struct xseg_request *req;
        void *data;
        xseg_prepare_wait(s->xseg, s->srcport);
        req = xseg_receive(s->xseg, s->srcport, X_NONBLOCK);
        if (req) {
            AIORequestData *reqdata;
            ArchipelagoSegmentedRequest *segreq;
            xseg_get_req_data(s->xseg, req, (void **)&reqdata);

            switch (reqdata->op) {
            case ARCHIP_OP_READ:
                data = xseg_get_data(s->xseg, req);
                segreq = reqdata->segreq;
                segreq->count += req->serviced;

                qemu_iovec_from_buf(reqdata->aio_cb->qiov, reqdata->bufidx,
                                    data,
                                    req->serviced);

                xseg_put_request(s->xseg, req, s->srcport);

                if ((__sync_add_and_fetch(&segreq->ref, -1)) == 0) {
                    if (!segreq->failed) {
                        reqdata->aio_cb->ret = segreq->count;
                        archipelago_finish_aiocb(reqdata);
                        g_free(segreq);
                    } else {
                        g_free(segreq);
                        g_free(reqdata);
                    }
                } else {
                    g_free(reqdata);
                }
                break;
            case ARCHIP_OP_WRITE:
            case ARCHIP_OP_FLUSH:
                segreq = reqdata->segreq;
                segreq->count += req->serviced;
                xseg_put_request(s->xseg, req, s->srcport);

                if ((__sync_add_and_fetch(&segreq->ref, -1)) == 0) {
                    if (!segreq->failed) {
                        reqdata->aio_cb->ret = segreq->count;
                        archipelago_finish_aiocb(reqdata);
                        g_free(segreq);
                    } else {
                        g_free(segreq);
                        g_free(reqdata);
                    }
                } else {
                    g_free(reqdata);
                }
                break;
            case ARCHIP_OP_VOLINFO:
                s->is_signaled = true;
                qemu_cond_signal(&s->archip_cond);
                break;
            }
        } else {
            xseg_wait_signal(s->xseg, psd, 100000UL);
        }
        xseg_cancel_wait(s->xseg, s->srcport);
    }

    s->th_is_signaled = true;
    qemu_cond_signal(&s->request_cond);
    qemu_mutex_unlock(&s->request_mutex);
    qemu_thread_exit(NULL);
}
Example #28
0
static void pfifo_run_pusher(NV2AState *d)
{
    uint32_t *push0 = &d->pfifo.regs[NV_PFIFO_CACHE1_PUSH0];
    uint32_t *push1 = &d->pfifo.regs[NV_PFIFO_CACHE1_PUSH1];
    uint32_t *dma_subroutine = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_SUBROUTINE];
    uint32_t *dma_state = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_STATE];
    uint32_t *dma_push = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_PUSH];
    uint32_t *dma_get = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_GET];
    uint32_t *dma_put = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_PUT];
    uint32_t *dma_dcount = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_DCOUNT];

    uint32_t *status = &d->pfifo.regs[NV_PFIFO_CACHE1_STATUS];
    uint32_t *get_reg = &d->pfifo.regs[NV_PFIFO_CACHE1_GET];
    uint32_t *put_reg = &d->pfifo.regs[NV_PFIFO_CACHE1_PUT];

    if (!GET_MASK(*push0, NV_PFIFO_CACHE1_PUSH0_ACCESS)) return;
    if (!GET_MASK(*dma_push, NV_PFIFO_CACHE1_DMA_PUSH_ACCESS)) return;

    /* suspended */
    if (GET_MASK(*dma_push, NV_PFIFO_CACHE1_DMA_PUSH_STATUS)) return;

    // TODO: should we become busy here??
    // NV_PFIFO_CACHE1_DMA_PUSH_STATE _BUSY

    unsigned int channel_id = GET_MASK(*push1,
                                       NV_PFIFO_CACHE1_PUSH1_CHID);


    /* Channel running DMA mode */
    uint32_t channel_modes = d->pfifo.regs[NV_PFIFO_MODE];
    assert(channel_modes & (1 << channel_id));

    assert(GET_MASK(*push1, NV_PFIFO_CACHE1_PUSH1_MODE)
            == NV_PFIFO_CACHE1_PUSH1_MODE_DMA);

    /* We're running so there should be no pending errors... */
    assert(GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_ERROR)
            == NV_PFIFO_CACHE1_DMA_STATE_ERROR_NONE);

    hwaddr dma_instance =
        GET_MASK(d->pfifo.regs[NV_PFIFO_CACHE1_DMA_INSTANCE],
                 NV_PFIFO_CACHE1_DMA_INSTANCE_ADDRESS) << 4;

    hwaddr dma_len;
    uint8_t *dma = nv_dma_map(d, dma_instance, &dma_len);

    while (true) {
        uint32_t dma_get_v = *dma_get;
        uint32_t dma_put_v = *dma_put;
        if (dma_get_v == dma_put_v) break;
        if (dma_get_v >= dma_len) {
            assert(false);
            SET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_ERROR,
                     NV_PFIFO_CACHE1_DMA_STATE_ERROR_PROTECTION);
            break;
        }

        uint32_t word = ldl_le_p((uint32_t*)(dma + dma_get_v));
        dma_get_v += 4;

        uint32_t method_type =
            GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD_TYPE);
        uint32_t method_subchannel =
            GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_SUBCHANNEL);
        uint32_t method =
            GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD) << 2;
        uint32_t method_count =
            GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD_COUNT);

        uint32_t subroutine_state =
            GET_MASK(*dma_subroutine, NV_PFIFO_CACHE1_DMA_SUBROUTINE_STATE);

        if (method_count) {
            /* full */
            if (*status & NV_PFIFO_CACHE1_STATUS_HIGH_MARK) return;


            /* data word of methods command */
            d->pfifo.regs[NV_PFIFO_CACHE1_DMA_DATA_SHADOW] = word;

            uint32_t put = *put_reg;
            uint32_t get = *get_reg;

            assert((method & 3) == 0);
            uint32_t method_entry = 0;
            SET_MASK(method_entry, NV_PFIFO_CACHE1_METHOD_ADDRESS, method >> 2);
            SET_MASK(method_entry, NV_PFIFO_CACHE1_METHOD_TYPE, method_type);
            SET_MASK(method_entry, NV_PFIFO_CACHE1_METHOD_SUBCHANNEL, method_subchannel);

            // NV2A_DPRINTF("push %d 0x%x 0x%x - subch %d\n", put/4, method_entry, word, method_subchannel);

            assert(put < 128*4 && (put%4) == 0);
            d->pfifo.regs[NV_PFIFO_CACHE1_METHOD + put*2] = method_entry;
            d->pfifo.regs[NV_PFIFO_CACHE1_DATA + put*2] = word;

            uint32_t new_put = (put+4) & 0x1fc;
            *put_reg = new_put;
            if (new_put == get) {
                // set high mark
                *status |= NV_PFIFO_CACHE1_STATUS_HIGH_MARK;
            }
            if (*status & NV_PFIFO_CACHE1_STATUS_LOW_MARK) {
                // unset low mark
                *status &= ~NV_PFIFO_CACHE1_STATUS_LOW_MARK;
                // signal puller
                qemu_cond_signal(&d->pfifo.puller_cond);
            }

            if (method_type == NV_PFIFO_CACHE1_DMA_STATE_METHOD_TYPE_INC) {
                SET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD,
                         (method + 4) >> 2);
            }
            SET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD_COUNT,
                     method_count - 1);
            (*dma_dcount)++;
        } else {
Example #29
0
static void pfifo_run_puller(NV2AState *d)
{
    uint32_t *pull0 = &d->pfifo.regs[NV_PFIFO_CACHE1_PULL0];
    uint32_t *pull1 = &d->pfifo.regs[NV_PFIFO_CACHE1_PULL1];
    uint32_t *engine_reg = &d->pfifo.regs[NV_PFIFO_CACHE1_ENGINE];

    uint32_t *status = &d->pfifo.regs[NV_PFIFO_CACHE1_STATUS];
    uint32_t *get_reg = &d->pfifo.regs[NV_PFIFO_CACHE1_GET];
    uint32_t *put_reg = &d->pfifo.regs[NV_PFIFO_CACHE1_PUT];

    // TODO
    // CacheEntry working_cache[NV2A_CACHE1_SIZE];
    // int working_cache_size = 0;
    // pull everything into our own queue

    // TODO think more about locking

    while (true) {
        if (!GET_MASK(*pull0, NV_PFIFO_CACHE1_PULL0_ACCESS)) return;

        /* empty cache1 */
        if (*status & NV_PFIFO_CACHE1_STATUS_LOW_MARK) break;

        uint32_t get = *get_reg;
        uint32_t put = *put_reg;

        assert(get < 128*4 && (get % 4) == 0);
        uint32_t method_entry = d->pfifo.regs[NV_PFIFO_CACHE1_METHOD + get*2];
        uint32_t parameter = d->pfifo.regs[NV_PFIFO_CACHE1_DATA + get*2];

        uint32_t new_get = (get+4) & 0x1fc;
        *get_reg = new_get;

        if (new_get == put) {
            // set low mark
            *status |= NV_PFIFO_CACHE1_STATUS_LOW_MARK;
        }
        if (*status & NV_PFIFO_CACHE1_STATUS_HIGH_MARK) {
            // unset high mark
            *status &= ~NV_PFIFO_CACHE1_STATUS_HIGH_MARK;
            // signal pusher
            qemu_cond_signal(&d->pfifo.pusher_cond);            
        }


        uint32_t method = method_entry & 0x1FFC;
        uint32_t subchannel = GET_MASK(method_entry, NV_PFIFO_CACHE1_METHOD_SUBCHANNEL);

        // NV2A_DPRINTF("pull %d 0x%x 0x%x - subch %d\n", get/4, method_entry, parameter, subchannel);

        if (method == 0) {
            RAMHTEntry entry = ramht_lookup(d, parameter);
            assert(entry.valid);

            // assert(entry.channel_id == state->channel_id);

            assert(entry.engine == ENGINE_GRAPHICS);


            /* the engine is bound to the subchannel */
            assert(subchannel < 8);
            SET_MASK(*engine_reg, 3 << (4*subchannel), entry.engine);
            SET_MASK(*pull1, NV_PFIFO_CACHE1_PULL1_ENGINE, entry.engine);
            // NV2A_DPRINTF("engine_reg1 %d 0x%x\n", subchannel, *engine_reg);


            // TODO: this is f****d
            qemu_mutex_lock(&d->pgraph.lock);
            //make pgraph busy
            qemu_mutex_unlock(&d->pfifo.lock);

            pgraph_context_switch(d, entry.channel_id);
            pgraph_wait_fifo_access(d);
            pgraph_method(d, subchannel, 0, entry.instance);

            // make pgraph not busy
            qemu_mutex_unlock(&d->pgraph.lock);
            qemu_mutex_lock(&d->pfifo.lock);

        } else if (method >= 0x100) {
            // method passed to engine

            /* methods that take objects.
             * TODO: Check this range is correct for the nv2a */
            if (method >= 0x180 && method < 0x200) {
                //qemu_mutex_lock_iothread();
                RAMHTEntry entry = ramht_lookup(d, parameter);
                assert(entry.valid);
                // assert(entry.channel_id == state->channel_id);
                parameter = entry.instance;
                //qemu_mutex_unlock_iothread();
            }

            enum FIFOEngine engine = GET_MASK(*engine_reg, 3 << (4*subchannel));
            // NV2A_DPRINTF("engine_reg2 %d 0x%x\n", subchannel, *engine_reg);
            assert(engine == ENGINE_GRAPHICS);
            SET_MASK(*pull1, NV_PFIFO_CACHE1_PULL1_ENGINE, engine);

            // TODO: this is f****d
            qemu_mutex_lock(&d->pgraph.lock);
            //make pgraph busy
            qemu_mutex_unlock(&d->pfifo.lock);

            pgraph_wait_fifo_access(d);
            pgraph_method(d, subchannel, method, parameter);

            // make pgraph not busy
            qemu_mutex_unlock(&d->pgraph.lock);
            qemu_mutex_lock(&d->pfifo.lock);
        } else {
            assert(false);
        }

    }
}