QueueEntry *AddQueueEntry(QueueEntry * qep, char *sqlmsg) { QueueEntry *lp = qep; alog(LOG_DEBUG, "Adding Query %s", sqlmsg); if (qep != NULL) { while (qep && qep->link != NULL) { qep = qep->link; } queue_lock(); qep->link = (QueueEntry *) malloc(sizeof(QueueEntry)); qep = qep->link; qep->link = NULL; qep->msg = sstrdup(sqlmsg); queue_signal(); queue_unlock(NULL); return lp; } else { queue_lock(); qep = (QueueEntry *) malloc(sizeof(QueueEntry)); qep->link = NULL; qep->msg = sstrdup(sqlmsg); queue_signal(); queue_unlock(NULL); return qep; } }
int server_queue_add_tx_data(SServerSendQueue *_this,void *data,size_t datasize,unsigned char senderamnt) { int addindex; if(0==datasize || !data) { printf("server_queue_add_tx_data() invalid data\n"); return -1; } /* if(queue_lock_tx(&(_this->genqueue))) { printf("%s:%d mutex\n",__FILE__,__LINE__); exit(1); } */ while(queue_tx_lock_if_space(&(_this->genqueue))) { queue_signal(&(_this->genqueue)); } addindex=queue_tx_add_unsafe(&(_this->genqueue),SERVERQUEUELEN); _this->data_array[addindex].datasize=datasize; add_senderamnt((char *)&(_this->data_array[addindex].datasize),senderamnt); _this->data_array[addindex].data=data; if(queue_unlock_tx(&(_this->genqueue))) { printf("%s:%d mutex\n",__FILE__,__LINE__); exit(1); } if(queue_txfull(&(_this->genqueue))) queue_signal(&(_this->genqueue)); return 0; }
void ff_circular_queue_abort(struct ff_circular_queue *cq) { queue_lock(cq); cq->abort = true; queue_signal(cq); queue_unlock(cq); }
static void host_signal_handler(int host_signum, siginfo_t *info, void *puc) { int sig; target_siginfo_t tinfo; /* the CPU emulator uses some host signals to detect exceptions, we we forward to it some signals */ if (host_signum == SIGSEGV || host_signum == SIGBUS) { if (cpu_signal_handler(host_signum, (void*)info, puc)) return; } /* get target signal number */ sig = host_to_target_signal(host_signum); if (sig < 1 || sig > NSIG) return; #if defined(DEBUG_SIGNAL) fprintf(stderr, "qemu: got signal %d\n", sig); #endif if (queue_signal(sig, &tinfo) == 1) { /* interrupt the virtual CPU as soon as possible */ cpu_exit(global_env); } }
/* Creates a new queue entry and puts it into the queue. Returns 1 if success, * 0 if memory error. */ static int put_in_queue_long(const struct req_info *req, uint32_t operation, int sync, const unsigned char *key, size_t ksize, const unsigned char *val, size_t vsize, const unsigned char *newval, size_t nvsize) { struct queue_entry *e; e = make_queue_long_entry(req, operation, key, ksize, val, vsize, newval, nvsize); if (e == NULL) { return 0; } queue_lock(op_queue); queue_put(op_queue, e); queue_unlock(op_queue); if (sync) { /* Signal the DB thread it has work only if it's a * synchronous operation, asynchronous don't mind * waiting. It does have a measurable impact on * performance (2083847usec vs 2804973usec for sets on * "test2d 100000 10 10"). */ queue_signal(op_queue); } return 1; }
void ff_circular_queue_advance_read(struct ff_circular_queue *cq) { cq->read_index = (cq->read_index + 1) % cq->capacity; queue_lock(cq); --cq->size; queue_signal(cq); queue_unlock(cq); }
int rx_client_queue_add_data(SClientSendQueue *_this,void *data,size_t datasize) { if(0==datasize || !data) { printf("rx_client_queue_add_data() invalid data\n"); return -1; } while(queue_rx_lock_if_space(&(_this->genqueue))) { queue_signal(&(_this->genqueue)); } addindex=queue_rx_add_unsafe(&(_this->genqueue),SERVER_RX_QUEUELEN); _this->rxdata_array[addindex].datasize=datasize; _this->rxdata_array[addindex].data=data; if(queue_unlock_rx(&(_this->genqueue))) { printf("%s:%d mutex\n",__FILE__,__LINE__); exit(1); } if(queue_rxfull(&(_this->genqueue))) queue_signal(&(_this->genqueue)); return 0; }
static void read_loop(struct queue *queue, struct debugfs_file *df, int interval) { struct timespec starttime; clock_gettime(CLOCK_MONOTONIC_RAW, &starttime); int64_t target = 0; while (break_loop == 0) { struct timespec currenttime; clock_gettime(CLOCK_MONOTONIC_RAW, ¤ttime); uint64_t host_start = (currenttime.tv_sec - starttime.tv_sec) * 1000000 + (currenttime.tv_nsec - starttime.tv_nsec) / 1000; uint64_t tsf_counter; getTSFRegs(df, &tsf_counter); int slot_count = shmRead16(df, B43_SHM_REGS, COUNT_SLOT); int packet_queued = shmRead16(df, B43_SHM_SHARED, PACKET_TO_TRANSMIT); int transmitted = shmRead16(df, B43_SHM_SHARED, MY_TRANSMISSION); int transmit_success = shmRead16(df, B43_SHM_SHARED, SUCCES_TRANSMISSION); int transmit_other = shmRead16(df, B43_SHM_SHARED, OTHER_TRANSMISSION); clock_gettime(CLOCK_MONOTONIC_RAW, ¤ttime); uint64_t host_finish = (currenttime.tv_sec - starttime.tv_sec) * 1000000 + (currenttime.tv_nsec - starttime.tv_nsec) / 1000; struct slot current_slot = { .host_start = host_start, .host_finish = host_finish, .tsf_counter = tsf_counter, .slot_count = slot_count, .packet_queued = packet_queued, .transmitted = transmitted, .transmit_success = transmit_success, .transmit_other = transmit_other }; queue_push(queue, ¤t_slot); target += interval; int64_t diff = target - host_finish; if (diff > 0) { usleep(diff); } } fprintf(stderr, "Exiting read loop."); usleep(10000); queue_signal(queue); }
void cpu_loop(CPUAlphaState *env) { CPUState *cs = CPU(alpha_env_get_cpu(env)); int trapnr; target_siginfo_t info; abi_long sysret; while (1) { bool arch_interrupt = true; cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); process_queued_cpu_work(cs); switch (trapnr) { case EXCP_RESET: fprintf(stderr, "Reset requested. Exit\n"); exit(EXIT_FAILURE); break; case EXCP_MCHK: fprintf(stderr, "Machine check exception. Exit\n"); exit(EXIT_FAILURE); break; case EXCP_SMP_INTERRUPT: case EXCP_CLK_INTERRUPT: case EXCP_DEV_INTERRUPT: fprintf(stderr, "External interrupt. Exit\n"); exit(EXIT_FAILURE); break; case EXCP_MMFAULT: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR); info._sifields._sigfault._addr = env->trap_arg0; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXCP_UNALIGN: info.si_signo = TARGET_SIGBUS; info.si_errno = 0; info.si_code = TARGET_BUS_ADRALN; info._sifields._sigfault._addr = env->trap_arg0; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXCP_OPCDEC: do_sigill: info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_ILLOPC; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXCP_ARITH: info.si_signo = TARGET_SIGFPE; info.si_errno = 0; info.si_code = TARGET_FPE_FLTINV; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXCP_FEN: /* No-op. Linux simply re-enables the FPU. */ break; case EXCP_CALL_PAL: switch (env->error_code) { case 0x80: /* BPT */ info.si_signo = TARGET_SIGTRAP; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case 0x81: /* BUGCHK */ info.si_signo = TARGET_SIGTRAP; info.si_errno = 0; info.si_code = 0; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case 0x83: /* CALLSYS */ trapnr = env->ir[IR_V0]; sysret = do_syscall(env, trapnr, env->ir[IR_A0], env->ir[IR_A1], env->ir[IR_A2], env->ir[IR_A3], env->ir[IR_A4], env->ir[IR_A5], 0, 0); if (sysret == -TARGET_ERESTARTSYS) { env->pc -= 4; break; } if (sysret == -TARGET_QEMU_ESIGRETURN) { break; } /* Syscall writes 0 to V0 to bypass error check, similar to how this is handled internal to Linux kernel. (Ab)use trapnr temporarily as boolean indicating error. */ trapnr = (env->ir[IR_V0] != 0 && sysret < 0); env->ir[IR_V0] = (trapnr ? -sysret : sysret); env->ir[IR_A3] = trapnr; break; case 0x86: /* IMB */ /* ??? We can probably elide the code using page_unprotect that is checking for self-modifying code. Instead we could simply call tb_flush here. Until we work out the changes required to turn off the extra write protection, this can be a no-op. */ break; case 0x9E: /* RDUNIQUE */ /* Handled in the translator for usermode. */ abort(); case 0x9F: /* WRUNIQUE */ /* Handled in the translator for usermode. */ abort(); case 0xAA: /* GENTRAP */ info.si_signo = TARGET_SIGFPE; switch (env->ir[IR_A0]) { case TARGET_GEN_INTOVF: info.si_code = TARGET_FPE_INTOVF; break; case TARGET_GEN_INTDIV: info.si_code = TARGET_FPE_INTDIV; break; case TARGET_GEN_FLTOVF: info.si_code = TARGET_FPE_FLTOVF; break; case TARGET_GEN_FLTUND: info.si_code = TARGET_FPE_FLTUND; break; case TARGET_GEN_FLTINV: info.si_code = TARGET_FPE_FLTINV; break; case TARGET_GEN_FLTINE: info.si_code = TARGET_FPE_FLTRES; break; case TARGET_GEN_ROPRAND: info.si_code = 0; break; default: info.si_signo = TARGET_SIGTRAP; info.si_code = 0; break; } info.si_errno = 0; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; default: goto do_sigill; } break; case EXCP_DEBUG: info.si_signo = gdb_handlesig(cs, TARGET_SIGTRAP); if (info.si_signo) { info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); } else { arch_interrupt = false; } break; case EXCP_INTERRUPT: /* Just indicate that signals should be handled asap. */ break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); arch_interrupt = false; break; default: fprintf(stderr, "Unhandled trap: 0x%x\n", trapnr); cpu_dump_state(cs, stderr, fprintf, 0); exit(EXIT_FAILURE); } process_pending_signals (env); /* Most of the traps imply a transition through PALcode, which implies an REI instruction has been executed. Which means that RX and LOCK_ADDR should be cleared. But there are a few exceptions for traps internal to QEMU. */ if (arch_interrupt) { env->flags &= ~ENV_FLAG_RX_FLAG; env->lock_addr = -1; } } }
void signal_sender(SClientSendQueue *_this) { return queue_signal(&(_this->genqueue)); }
void cpu_loop(CPUMBState *env) { CPUState *cs = CPU(mb_env_get_cpu(env)); int trapnr, ret; target_siginfo_t info; while (1) { cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); process_queued_cpu_work(cs); switch (trapnr) { case 0xaa: { info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; /* XXX: check env->error_code */ info.si_code = TARGET_SEGV_MAPERR; info._sifields._sigfault._addr = 0; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); } break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; case EXCP_BREAK: /* Return address is 4 bytes after the call. */ env->regs[14] += 4; env->sregs[SR_PC] = env->regs[14]; ret = do_syscall(env, env->regs[12], env->regs[5], env->regs[6], env->regs[7], env->regs[8], env->regs[9], env->regs[10], 0, 0); if (ret == -TARGET_ERESTARTSYS) { /* Wind back to before the syscall. */ env->sregs[SR_PC] -= 4; } else if (ret != -TARGET_QEMU_ESIGRETURN) { env->regs[3] = ret; } /* All syscall exits result in guest r14 being equal to the * PC we return to, because the kernel syscall exit "rtbd" does * this. (This is true even for sigreturn(); note that r14 is * not a userspace-usable register, as the kernel may clobber it * at any point.) */ env->regs[14] = env->sregs[SR_PC]; break; case EXCP_HW_EXCP: env->regs[17] = env->sregs[SR_PC] + 4; if (env->iflags & D_FLAG) { env->sregs[SR_ESR] |= 1 << 12; env->sregs[SR_PC] -= 4; /* FIXME: if branch was immed, replay the imm as well. */ } env->iflags &= ~(IMM_FLAG | D_FLAG); switch (env->sregs[SR_ESR] & 31) { case ESR_EC_DIVZERO: info.si_signo = TARGET_SIGFPE; info.si_errno = 0; info.si_code = TARGET_FPE_FLTDIV; info._sifields._sigfault._addr = 0; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case ESR_EC_FPU: info.si_signo = TARGET_SIGFPE; info.si_errno = 0; if (env->sregs[SR_FSR] & FSR_IO) { info.si_code = TARGET_FPE_FLTINV; } if (env->sregs[SR_FSR] & FSR_DZ) { info.si_code = TARGET_FPE_FLTDIV; } info._sifields._sigfault._addr = 0; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; default: printf ("Unhandled hw-exception: 0x%x\n", env->sregs[SR_ESR] & ESR_EC_MASK); cpu_dump_state(cs, stderr, fprintf, 0); exit(EXIT_FAILURE); break; } break; case EXCP_DEBUG: { int sig; sig = gdb_handlesig(cs, TARGET_SIGTRAP); if (sig) { info.si_signo = sig; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); } } break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); break; default: printf ("Unhandled trap: 0x%x\n", trapnr); cpu_dump_state(cs, stderr, fprintf, 0); exit(EXIT_FAILURE); } process_pending_signals (env); } }
void cpu_loop(CPUXtensaState *env) { CPUState *cs = CPU(xtensa_env_get_cpu(env)); target_siginfo_t info; abi_ulong ret; int trapnr; while (1) { cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); process_queued_cpu_work(cs); env->sregs[PS] &= ~PS_EXCM; switch (trapnr) { case EXCP_INTERRUPT: break; case EXC_WINDOW_OVERFLOW4: xtensa_overflow4(env); break; case EXC_WINDOW_UNDERFLOW4: xtensa_underflow4(env); break; case EXC_WINDOW_OVERFLOW8: xtensa_overflow8(env); break; case EXC_WINDOW_UNDERFLOW8: xtensa_underflow8(env); break; case EXC_WINDOW_OVERFLOW12: xtensa_overflow12(env); break; case EXC_WINDOW_UNDERFLOW12: xtensa_underflow12(env); break; case EXC_USER: switch (env->sregs[EXCCAUSE]) { case ILLEGAL_INSTRUCTION_CAUSE: case PRIVILEGED_CAUSE: info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = env->sregs[EXCCAUSE] == ILLEGAL_INSTRUCTION_CAUSE ? TARGET_ILL_ILLOPC : TARGET_ILL_PRVOPC; info._sifields._sigfault._addr = env->sregs[EPC1]; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case SYSCALL_CAUSE: env->pc += 3; ret = do_syscall(env, env->regs[2], env->regs[6], env->regs[3], env->regs[4], env->regs[5], env->regs[8], env->regs[9], 0, 0); switch (ret) { default: env->regs[2] = ret; break; case -TARGET_ERESTARTSYS: env->pc -= 3; break; case -TARGET_QEMU_ESIGRETURN: break; } break; case ALLOCA_CAUSE: env->sregs[PS] = deposit32(env->sregs[PS], PS_OWB_SHIFT, PS_OWB_LEN, env->sregs[WINDOW_BASE]); switch (env->regs[0] & 0xc0000000) { case 0x00000000: case 0x40000000: xtensa_rotate_window(env, -1); xtensa_underflow4(env); break; case 0x80000000: xtensa_rotate_window(env, -2); xtensa_underflow8(env); break; case 0xc0000000: xtensa_rotate_window(env, -3); xtensa_underflow12(env); break; } break; case INTEGER_DIVIDE_BY_ZERO_CAUSE: info.si_signo = TARGET_SIGFPE; info.si_errno = 0; info.si_code = TARGET_FPE_INTDIV; info._sifields._sigfault._addr = env->sregs[EPC1]; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case LOAD_PROHIBITED_CAUSE: case STORE_PROHIBITED_CAUSE: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_ACCERR; info._sifields._sigfault._addr = env->sregs[EXCVADDR]; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; default: fprintf(stderr, "exccause = %d\n", env->sregs[EXCCAUSE]); g_assert_not_reached(); } break; case EXCP_DEBUG: info.si_signo = TARGET_SIGTRAP; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXC_DEBUG: default: fprintf(stderr, "trapnr = %d\n", trapnr); g_assert_not_reached(); } process_pending_signals(env); } }