void process_queued_cpu_work(CPUState *cpu) { struct qemu_work_item *wi; if (cpu->queued_work_first == NULL) { return; } qemu_mutex_lock(&cpu->work_mutex); while (cpu->queued_work_first != NULL) { wi = cpu->queued_work_first; cpu->queued_work_first = wi->next; if (!cpu->queued_work_first) { cpu->queued_work_last = NULL; } qemu_mutex_unlock(&cpu->work_mutex); if (wi->exclusive) { /* Running work items outside the BQL avoids the following deadlock: * 1) start_exclusive() is called with the BQL taken while another * CPU is running; 2) cpu_exec in the other CPU tries to takes the * BQL, so it goes to sleep; start_exclusive() is sleeping too, so * neither CPU can proceed. */ qemu_mutex_unlock_iothread(); start_exclusive(); wi->func(cpu, wi->data); end_exclusive(); qemu_mutex_lock_iothread(); } else { wi->func(cpu, wi->data); } qemu_mutex_lock(&cpu->work_mutex); if (wi->free) { g_free(wi); } else { atomic_mb_set(&wi->done, true); } } qemu_mutex_unlock(&cpu->work_mutex); qemu_cond_broadcast(&qemu_work_cond); }
static inline void res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle) { pr_dbg("%s, handle=%d\n", tbl->name, handle); qemu_mutex_lock(&tbl->lock); if (handle < tbl->tbl_sz) { clear_bit(handle, tbl->bitmap); } qemu_mutex_unlock(&tbl->lock); }
static void char_pty_finalize(Object *obj) { Chardev *chr = CHARDEV(obj); PtyChardev *s = PTY_CHARDEV(obj); qemu_mutex_lock(&chr->chr_write_lock); pty_chr_state(chr, 0); object_unref(OBJECT(s->ioc)); pty_chr_timer_cancel(s); qemu_mutex_unlock(&chr->chr_write_lock); qemu_chr_be_event(chr, CHR_EVENT_CLOSED); }
static void sifive_plic_set_claimed(SiFivePLICState *plic, int irq, bool claimed) { qemu_mutex_lock(&plic->lock); uint32_t word = irq >> 5; if (claimed) { plic->claimed[word] |= (1 << (irq & 31)); } else { plic->claimed[word] &= ~(1 << (irq & 31)); } qemu_mutex_unlock(&plic->lock); }
static void sifive_plic_set_pending(SiFivePLICState *plic, int irq, bool pending) { qemu_mutex_lock(&plic->lock); uint32_t word = irq >> 5; if (pending) { plic->pending[word] |= (1 << (irq & 31)); } else { plic->pending[word] &= ~(1 << (irq & 31)); } qemu_mutex_unlock(&plic->lock); }
static void qemu_wait_io_event(CPUState *env) { while (!tcg_has_work()) qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000); qemu_mutex_unlock(&qemu_global_mutex); /* * Users of qemu_global_mutex can be starved, having no chance * to acquire it since this path will get to it first. * So use another lock to provide fairness. */ qemu_mutex_lock(&qemu_fair_mutex); qemu_mutex_unlock(&qemu_fair_mutex); qemu_mutex_lock(&qemu_global_mutex); if (env->stop) { env->stop = 0; env->stopped = 1; qemu_cond_signal(&qemu_pause_cond); } }
void cpu_list_add(CPUState *cpu) { qemu_mutex_lock(&qemu_cpu_list_lock); if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) { cpu->cpu_index = cpu_get_free_index(); assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX); } else { assert(!cpu_index_auto_assigned); } QTAILQ_INSERT_TAIL(&cpus, cpu, node); qemu_mutex_unlock(&qemu_cpu_list_lock); finish_safe_work(cpu); }
/** * Install new regions list */ static void hostmem_listener_commit(MemoryListener *listener) { HostMem *hostmem = container_of(listener, HostMem, listener); qemu_mutex_lock(&hostmem->current_regions_lock); g_free(hostmem->current_regions); hostmem->current_regions = hostmem->new_regions; hostmem->num_current_regions = hostmem->num_new_regions; qemu_mutex_unlock(&hostmem->current_regions_lock); /* Reset new regions list */ hostmem->new_regions = NULL; hostmem->num_new_regions = 0; }
static void qemu_tcg_wait_io_event(void) { CPUState *env; while (!any_cpu_has_work()) qemu_cond_timedwait(tcg_halt_cond, &qemu_global_mutex, 1000); qemu_mutex_unlock(&qemu_global_mutex); /* * Users of qemu_global_mutex can be starved, having no chance * to acquire it since this path will get to it first. * So use another lock to provide fairness. */ qemu_mutex_lock(&qemu_fair_mutex); qemu_mutex_unlock(&qemu_fair_mutex); qemu_mutex_lock(&qemu_global_mutex); for (env = first_cpu; env != NULL; env = env->next_cpu) { qemu_wait_io_event_common(env); } }
static void iothread_complete(UserCreatable *obj, Error **errp) { Error *local_error = NULL; IOThread *iothread = IOTHREAD(obj); char *name, *thread_name; iothread->stopping = false; iothread->running = true; iothread->thread_id = -1; iothread->ctx = aio_context_new(&local_error); if (!iothread->ctx) { error_propagate(errp, local_error); return; } aio_context_set_poll_params(iothread->ctx, iothread->poll_max_ns, iothread->poll_grow, iothread->poll_shrink, &local_error); if (local_error) { error_propagate(errp, local_error); aio_context_unref(iothread->ctx); iothread->ctx = NULL; return; } qemu_mutex_init(&iothread->init_done_lock); qemu_cond_init(&iothread->init_done_cond); iothread->once = (GOnce) G_ONCE_INIT; /* This assumes we are called from a thread with useful CPU affinity for us * to inherit. */ name = object_get_canonical_path_component(OBJECT(obj)); thread_name = g_strdup_printf("IO %s", name); qemu_thread_create(&iothread->thread, thread_name, iothread_run, iothread, QEMU_THREAD_JOINABLE); g_free(thread_name); g_free(name); /* Wait for initialization to complete */ qemu_mutex_lock(&iothread->init_done_lock); while (iothread->thread_id == -1) { qemu_cond_wait(&iothread->init_done_cond, &iothread->init_done_lock); } qemu_mutex_unlock(&iothread->init_done_lock); }
static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi) { qemu_mutex_lock(&cpu->work_mutex); if (cpu->queued_work_first == NULL) { cpu->queued_work_first = wi; } else { cpu->queued_work_last->next = wi; } cpu->queued_work_last = wi; wi->next = NULL; wi->done = false; qemu_mutex_unlock(&cpu->work_mutex); qemu_cpu_kick(cpu); }
void qemu_mutex_lock_iothread(void) { LOGD_CPUS("%s1\n", __func__); if (!tcg_enabled()) { LOGD_CPUS("%s2\n", __func__); qemu_mutex_lock(&qemu_global_mutex); LOGD_CPUS("%s3\n", __func__); } else { LOGD_CPUS("%s4\n", __func__); iothread_requesting_mutex = true; if (qemu_mutex_trylock(&qemu_global_mutex)) { LOGD_CPUS("%s5\n", __func__); qemu_cpu_kick_thread(first_cpu); LOGD_CPUS("%s6\n", __func__); qemu_mutex_lock(&qemu_global_mutex); LOGD_CPUS("%s7\n", __func__); } LOGD_CPUS("%s8\n", __func__); iothread_requesting_mutex = false; LOGD_CPUS("%s9\n", __func__); qemu_cond_broadcast(&qemu_io_proceeded_cond); LOGD_CPUS("%s10\n", __func__); } }
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) { QEMUBH *bh; bh = g_new0(QEMUBH, 1); bh->ctx = ctx; bh->cb = cb; bh->opaque = opaque; qemu_mutex_lock(&ctx->bh_lock); bh->next = ctx->first_bh; /* Make sure that the members are ready before putting bh into list */ smp_wmb(); ctx->first_bh = bh; qemu_mutex_unlock(&ctx->bh_lock); return bh; }
static void pci_edu_uninit(PCIDevice *pdev) { EduState *edu = DO_UPCAST(EduState, pdev, pdev); qemu_mutex_lock(&edu->thr_mutex); edu->stopping = true; qemu_mutex_unlock(&edu->thr_mutex); qemu_cond_signal(&edu->thr_cond); qemu_thread_join(&edu->thread); qemu_cond_destroy(&edu->thr_cond); qemu_mutex_destroy(&edu->thr_mutex); timer_del(&edu->dma_timer); }
void cpu_list_remove(CPUState *cpu) { qemu_mutex_lock(&qemu_cpu_list_lock); if (!QTAILQ_IN_USE(cpu, node)) { /* there is nothing to undo since cpu_exec_init() hasn't been called */ qemu_mutex_unlock(&qemu_cpu_list_lock); return; } assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ))); QTAILQ_REMOVE(&cpus, cpu, node); cpu->cpu_index = UNASSIGNED_CPU_INDEX; qemu_mutex_unlock(&qemu_cpu_list_lock); }
static void *test_acquire_thread(void *opaque) { AcquireTestData *data = opaque; /* Wait for other thread to let us start */ qemu_mutex_lock(&data->start_lock); qemu_mutex_unlock(&data->start_lock); aio_context_acquire(ctx); aio_context_release(ctx); data->thread_acquired = true; /* success, we got here */ return NULL; }
int set_sample_tainted( dba_context* ctx ) { const char* img; UT_array* fblocks; TSK_DADDR_T st_haddr, ed_haddr; TSK_DADDR_T* haddr_tuple; UT_array* fnames; char** fname; img = get_device_image( "ide0-hd0" ); fblocks = tsk_find_haddr_by_filename( img, ctx->sample_gpath ); qemu_mutex_lock( &qemu_global_mutex ); for( haddr_tuple = (TSK_DADDR_T*)utarray_front(fblocks); haddr_tuple != NULL; haddr_tuple = (TSK_DADDR_T*)utarray_next(fblocks, haddr_tuple) ) { st_haddr = haddr_tuple[0]; ed_haddr = haddr_tuple[1]; fnames = tsk_get_filename_by_haddr( img, st_haddr ); if( fnames == NULL ) continue; for( fname = (char**)utarray_front(fnames); fname != NULL; fname = (char**)utarray_next(fnames, fname) ) { if( strcasecmp(*fname, ctx->sample_gpath) != 0 ) continue; dift_contaminate_disk_or( st_haddr, ed_haddr - st_haddr + 1, ctx->taint.tag ); break; } utarray_free( fnames ); } utarray_free( fblocks ); qemu_mutex_unlock( &qemu_global_mutex ); return 0; }
static void *rcu_read_stress_test(void *arg) { int i; int itercnt = 0; struct rcu_stress *p; int pc; long long n_reads_local = 0; long long rcu_stress_local[RCU_STRESS_PIPE_LEN + 1] = { 0 }; volatile int garbage = 0; rcu_register_thread(); *(struct rcu_reader_data **)arg = &rcu_reader; while (goflag == GOFLAG_INIT) { g_usleep(1000); } while (goflag == GOFLAG_RUN) { rcu_read_lock(); p = atomic_rcu_read(&rcu_stress_current); if (p->mbtest == 0) { n_mberror++; } rcu_read_lock(); for (i = 0; i < 100; i++) { garbage++; } rcu_read_unlock(); pc = p->pipe_count; rcu_read_unlock(); if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) { pc = RCU_STRESS_PIPE_LEN; } rcu_stress_local[pc]++; n_reads_local++; if ((++itercnt % 0x1000) == 0) { synchronize_rcu(); } } qemu_mutex_lock(&counts_mutex); n_reads += n_reads_local; for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) { rcu_stress_count[i] += rcu_stress_local[i]; } qemu_mutex_unlock(&counts_mutex); rcu_unregister_thread(); return NULL; }
static gboolean pty_chr_timer(gpointer opaque) { struct Chardev *chr = CHARDEV(opaque); PtyChardev *s = PTY_CHARDEV(opaque); qemu_mutex_lock(&chr->chr_write_lock); s->timer_src = NULL; g_source_unref(s->open_source); s->open_source = NULL; if (!s->connected) { /* Next poll ... */ pty_chr_update_read_handler_locked(chr); } qemu_mutex_unlock(&chr->chr_write_lock); return FALSE; }
static void qemu_kvm_eat_signal(CPUState *env, int timeout) { struct timespec ts; int r, e; siginfo_t siginfo; sigset_t waitset; sigset_t chkset; ts.tv_sec = timeout / 1000; ts.tv_nsec = (timeout % 1000) * 1000000; sigemptyset(&waitset); sigaddset(&waitset, SIG_IPI); sigaddset(&waitset, SIGBUS); do { qemu_mutex_unlock(&qemu_global_mutex); r = sigtimedwait(&waitset, &siginfo, &ts); e = errno; qemu_mutex_lock(&qemu_global_mutex); if (r == -1 && !(e == EAGAIN || e == EINTR)) { fprintf(stderr, "sigtimedwait: %s\n", strerror(e)); exit(1); } switch (r) { case SIGBUS: #ifdef TARGET_I386 if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) #endif sigbus_reraise(); break; default: break; } r = sigpending(&chkset); if (r == -1) { fprintf(stderr, "sigpending: %s\n", strerror(e)); exit(1); } } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS)); }
static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq) { int i, ne, total_ne = 0; BackendCtx *bctx; struct ibv_wc wc[2]; RdmaProtectedGSList *cqe_ctx_list; qemu_mutex_lock(&rdma_dev_res->lock); do { ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc); trace_rdma_poll_cq(ne, ibcq); for (i = 0; i < ne; i++) { bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id); if (unlikely(!bctx)) { rdma_error_report("No matching ctx for req %"PRId64, wc[i].wr_id); continue; } comp_handler(bctx->up_ctx, &wc[i]); if (bctx->backend_qp) { cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list; } else { cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list; } rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id); rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id); g_free(bctx); } total_ne += ne; } while (ne > 0); atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne); qemu_mutex_unlock(&rdma_dev_res->lock); if (ne < 0) { rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno); } rdma_dev_res->stats.completions += total_ne; return total_ne; }
/* Multiple occurrences of aio_bh_poll cannot be called concurrently */ int aio_bh_poll(AioContext *ctx) { QEMUBH *bh, **bhp, *next; int ret; ctx->walking_bh++; ret = 0; for (bh = ctx->first_bh; bh; bh = next) { /* Make sure that fetching bh happens before accessing its members */ smp_read_barrier_depends(); next = bh->next; if (!bh->deleted && bh->scheduled) { bh->scheduled = 0; /* Paired with write barrier in bh schedule to ensure reading for * idle & callbacks coming after bh's scheduling. */ smp_rmb(); if (!bh->idle) ret = 1; bh->idle = 0; bh->cb(bh->opaque); } } ctx->walking_bh--; /* remove deleted bhs */ if (!ctx->walking_bh) { qemu_mutex_lock(&ctx->bh_lock); bhp = &ctx->first_bh; while (*bhp) { bh = *bhp; if (bh->deleted) { *bhp = bh->next; g_free(bh); } else { bhp = &bh->next; } } qemu_mutex_unlock(&ctx->bh_lock); } return ret; }
/** * Install new regions list */ static void hostmem_listener_commit(MemoryListener *listener) { HostMem *hostmem = container_of(listener, HostMem, listener); int i; qemu_mutex_lock(&hostmem->current_regions_lock); for (i = 0; i < hostmem->num_current_regions; i++) { memory_region_unref(hostmem->current_regions[i].mr); } g_free(hostmem->current_regions); hostmem->current_regions = hostmem->new_regions; hostmem->num_current_regions = hostmem->num_new_regions; qemu_mutex_unlock(&hostmem->current_regions_lock); /* Reset new regions list */ hostmem->new_regions = NULL; hostmem->num_new_regions = 0; }
int qemu_init_main_loop(void) { int ret; ret = qemu_event_init(); if (ret) return ret; qemu_cond_init(&qemu_pause_cond); qemu_mutex_init(&qemu_fair_mutex); qemu_mutex_init(&qemu_global_mutex); qemu_mutex_lock(&qemu_global_mutex); unblock_io_signals(); qemu_thread_self(&io_thread); return 0; }
static uint64_t edu_mmio_read(void *opaque, hwaddr addr, unsigned size) { EduState *edu = opaque; uint64_t val = ~0ULL; if (size != 4) { return val; } switch (addr) { case 0x00: val = 0x010000edu; break; case 0x04: val = edu->addr4; break; case 0x08: qemu_mutex_lock(&edu->thr_mutex); val = edu->fact; qemu_mutex_unlock(&edu->thr_mutex); break; case 0x20: val = atomic_read(&edu->status); break; case 0x24: val = edu->irq_status; break; case 0x80: dma_rw(edu, false, &val, &edu->dma.src, false); break; case 0x88: dma_rw(edu, false, &val, &edu->dma.dst, false); break; case 0x90: dma_rw(edu, false, &val, &edu->dma.cnt, false); break; case 0x98: dma_rw(edu, false, &val, &edu->dma.cmd, false); break; } return val; }
static int interface_get_command(QXLInstance *sin, struct QXLCommandExt *ext) { SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl); SimpleSpiceUpdate *update; int ret = false; dprint(3, "%s:\n", __FUNCTION__); qemu_mutex_lock(&ssd->lock); if (ssd->update != NULL) { update = ssd->update; ssd->update = NULL; *ext = update->ext; ret = true; } qemu_mutex_unlock(&ssd->lock); return ret; }
void qemu_spice_display_refresh(SimpleSpiceDisplay *ssd) { dprint(3, "%s:\n", __func__); vga_hw_update(); qemu_mutex_lock(&ssd->lock); if (ssd->update == NULL) { ssd->update = qemu_spice_create_update(ssd); ssd->notify++; } qemu_spice_cursor_refresh_unlocked(ssd); qemu_mutex_unlock(&ssd->lock); if (ssd->notify) { ssd->notify = 0; qemu_spice_wakeup(ssd); dprint(2, "%s: notify\n", __FUNCTION__); } }
static void *iothread_run(void *opaque) { IOThread *iothread = opaque; rcu_register_thread(); my_iothread = iothread; qemu_mutex_lock(&iothread->init_done_lock); iothread->thread_id = qemu_get_thread_id(); qemu_cond_signal(&iothread->init_done_cond); qemu_mutex_unlock(&iothread->init_done_lock); while (!atomic_read(&iothread->stopping)) { aio_poll(iothread->ctx, true); } rcu_unregister_thread(); return NULL; }
static void* pfifo_puller_thread(void *arg) { NV2AState *d = (NV2AState *)arg; glo_set_current(d->pgraph.gl_context); qemu_mutex_lock(&d->pfifo.lock); while (true) { pfifo_run_puller(d); qemu_cond_wait(&d->pfifo.puller_cond, &d->pfifo.lock); if (d->exiting) { break; } } qemu_mutex_unlock(&d->pfifo.lock); return NULL; }
void qemu_spice_display_resize(SimpleSpiceDisplay *ssd) { dprint(1, "%s:\n", __FUNCTION__); memset(&ssd->dirty, 0, sizeof(ssd->dirty)); qemu_pf_conv_put(ssd->conv); ssd->conv = NULL; qemu_mutex_lock(&ssd->lock); if (ssd->update != NULL) { qemu_spice_destroy_update(ssd, ssd->update); ssd->update = NULL; } qemu_mutex_unlock(&ssd->lock); qemu_spice_destroy_host_primary(ssd); qemu_spice_create_host_primary(ssd); memset(&ssd->dirty, 0, sizeof(ssd->dirty)); ssd->notify++; }