int main(int argc, char *argv[]) { int ret; ret = check_list(); if(ret) { fprintf(stderr, "LIST fails: %d\n", ret); return 1; } ret = check_stack(); if(ret) { fprintf(stderr, "STACK fails: %d\n", ret); return 2; } ret = check_queue(); if(ret) { fprintf(stderr, "QUEUE fails: %d\n", ret); return 3; } return 0; }
int main() { int i, asize, check; unsigned int item; unsigned int copy[MAX]; create(); asize = 0; unsigned int s = nondet_unsigned_int(); __CPROVER_assume (s <= MAX); for (i=0; i<s; i++) { item = nondet_unsigned_int(); insert_by_priority(item); asize++; display_pqueue(); check = check_queue(pri_que, copy, asize); assert(check == 0); } return 0; }
ssize_t message_receive(char *msg, unsigned int max_len, unsigned int queueid) { check_queue(queueid); ssize_t length = mq_receive(queue_map[queueid], msg, max_len, NULL); if(length == -1){ perror("mq_receive"); fprintf(stderr," pid for mq_receive: %d, max_len is %d, queueid is %d\n", getpid(), max_len, queueid); } return length; }
static int queue_terminate(lua_State *L) { apr_status_t status; lua_apr_queue *object; object = check_queue(L, 1); status = apr_queue_term(object->handle); return push_status(L, status); }
static int queue_interrupt(lua_State *L) { apr_status_t status; lua_apr_queue *object; object = check_queue(L, 1); status = apr_queue_interrupt_all(object->handle); return push_status(L, status); }
static int queue_push_real(lua_State *L, apr_queue_push_func cb) { lua_apr_queue *object; apr_status_t status; void *data; object = check_queue(L, 1); lua_apr_serialize(L, 2); data = strdup(lua_tostring(L, -1)); status = cb(object->handle, data); return push_status(L, status); }
static int queue_pop_real(lua_State *L, apr_queue_pop_func cb) { lua_apr_queue *object; apr_status_t status; void *data; lua_settop(L, 1); object = check_queue(L, 1); status = cb(object->handle, &data); if (status != APR_SUCCESS) return push_error_status(L, status); lua_pushstring(L, data); free(data); lua_apr_unserialize(L); return lua_gettop(L) - 1; }
void RecycleThread::run() { TcpTask_IT it; while(!isFinal()) { this->setRuning(); check_queue(); if(!tasks.empty()) { for(it = tasks.begin();it != tasks.end();) { TcpTask *task = *it; switch(task->recycleConn()) { case 1: { it = tasks.erase(it); if(task->isUnique()) { task->uniqueRemove(); } task->getNextState(); DELETE(task); } break; case 0: { it++; } break; } } } Thread::msleep(200); } for(it = tasks.begin();it != tasks.end();) { TcpTask *task = *it; it = tasks.erase(it); if(task->isUnique()) { task->uniqueRemove(); } task->getNextState(); DELETE(task); } }
void permutation_process(int i, int n) { int k, flag; if (i == n) { flag = check_queue(n); if (flag) count ++; }else { for (k = i; k <= n; k ++) { if (is_swap(i, k)) { swap_process(k, i); permutation_process(i + 1, n); swap_process(k, i); } } } }
void Conveyor::on_idle(void*) { if (running) { check_queue(); } // we can garbage collect the block queue here if (queue.tail_i != queue.isr_tail_i) { if (queue.is_empty()) { __debugbreak(); } else { // Cleanly delete block Block* block = queue.tail_ref(); //block->debug(); block->clear(); queue.consume_tail(); } } }
// Wait for the queue to be empty and for all the jobs to finish in step ticker void Conveyor::wait_for_idle(bool wait_for_motors) { // wait for the job queue to empty, this means cycling everything on the block queue into the job queue // forcing them to be jobs running = false; // stops on_idle calling check_queue while (!queue.is_empty()) { check_queue(true); // forces queue to be made available to stepticker THEKERNEL->call_event(ON_IDLE, this); } if(wait_for_motors) { // now we wait for all motors to stop moving while(!is_idle()) { THEKERNEL->call_event(ON_IDLE, this); } } running = true; // returning now means that everything has totally finished }
static int queue_gc(lua_State *L) { close_queue_real(check_queue(L, 1)); return 0; }
/* * Transfer 'trnsfr_lcks' held by this executing thread to other * threads waiting for the locks. When a lock has been transferred * we also have to try to aquire as many lock as possible for the * other thread. */ static int transfer_locks(Process *p, ErtsProcLocks trnsfr_lcks, erts_pix_lock_t *pix_lock, int unlock) { int transferred = 0; erts_tse_t *wake = NULL; erts_tse_t *wtr; ErtsProcLocks unset_waiter = 0; ErtsProcLocks tlocks = trnsfr_lcks; int lock_no; ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock)); #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif for (lock_no = 0; tlocks && lock_no <= ERTS_PROC_LOCK_MAX_BIT; lock_no++) { ErtsProcLocks lock = ((ErtsProcLocks) 1) << lock_no; if (tlocks & lock) { erts_proc_lock_queues_t *qs = p->lock.queues; /* Transfer lock */ #ifdef ERTS_ENABLE_LOCK_CHECK tlocks &= ~lock; #endif ERTS_LC_ASSERT(ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & (lock << ERTS_PROC_LOCK_WAITER_SHIFT)); transferred++; wtr = dequeue_waiter(qs, lock_no); ERTS_LC_ASSERT(wtr); if (!qs->queue[lock_no]) unset_waiter |= lock; ERTS_LC_ASSERT(wtr->uflgs & lock); wtr->uflgs &= ~lock; if (wtr->uflgs) try_aquire(&p->lock, wtr); if (!wtr->uflgs) { /* * The other thread got all locks it needs; * need to wake it up. */ wtr->next = wake; wake = wtr; } } } if (unset_waiter) { unset_waiter <<= ERTS_PROC_LOCK_WAITER_SHIFT; (void) ERTS_PROC_LOCK_FLGS_BAND_(&p->lock, ~unset_waiter); } #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif ERTS_LC_ASSERT(tlocks == 0); /* We should have transferred all of them */ if (!wake) { if (unlock) erts_pix_unlock(pix_lock); } else { erts_pix_unlock(pix_lock); do { erts_tse_t *tmp = wake; wake = wake->next; erts_atomic32_set_nob(&tmp->uaflgs, 0); erts_tse_set(tmp); } while (wake); if (!unlock) erts_pix_lock(pix_lock); } return transferred; }
/* * Try to grab locks one at a time in lock order and wait on the lowest * lock we fail to grab, if any. * * If successful, this returns 0 and all locks in 'need_locks' are held. * * On entry, the pix lock is held iff !ERTS_PROC_LOCK_ATOMIC_IMPL. * On exit it is not held. */ static void wait_for_locks(Process *p, erts_pix_lock_t *pixlck, ErtsProcLocks locks, ErtsProcLocks need_locks, ErtsProcLocks olflgs) { erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id); erts_tse_t *wtr; erts_proc_lock_queues_t *qs; /* Acquire a waiter object on which this thread can wait. */ wtr = tse_fetch(pix_lock); /* Record which locks this waiter needs. */ wtr->uflgs = need_locks; ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #if ERTS_PROC_LOCK_ATOMIC_IMPL erts_pix_lock(pix_lock); #endif ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock)); qs = wtr->udata; ASSERT(qs); /* Provide the process with waiter queues, if it doesn't have one. */ if (!p->lock.queues) { qs->next = NULL; p->lock.queues = qs; } else { qs->next = p->lock.queues->next; p->lock.queues->next = qs; } #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif /* Try to aquire locks one at a time in lock order and set wait flag */ try_aquire(&p->lock, wtr); ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif if (wtr->uflgs) { /* We didn't get them all; need to wait... */ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); erts_atomic32_set_nob(&wtr->uaflgs, 1); erts_pix_unlock(pix_lock); while (1) { int res; erts_tse_reset(wtr); if (erts_atomic32_read_nob(&wtr->uaflgs) == 0) break; /* * Wait for needed locks. When we are woken all needed locks have * have been acquired by other threads and transfered to us. * However, we need to be prepared for spurious wakeups. */ do { res = erts_tse_wait(wtr); /* might return EINTR */ } while (res != 0); } erts_pix_lock(pix_lock); ASSERT(wtr->uflgs == 0); } /* Recover some queues to store in the waiter. */ ERTS_LC_ASSERT(p->lock.queues); if (p->lock.queues->next) { qs = p->lock.queues->next; p->lock.queues->next = qs->next; } else { qs = p->lock.queues; p->lock.queues = NULL; } wtr->udata = qs; erts_pix_unlock(pix_lock); ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks)); tse_return(wtr, 0); }
static int queue_close(lua_State *L) { close_queue_real(check_queue(L, 1)); lua_pushboolean(L, 1); return 1; }
void message_send(char *msg, unsigned int len, unsigned int queueid, unsigned int priority) { check_queue(queueid); mq_send(queue_map[queueid], msg, len, priority); }
/* * Try to grab locks one at a time in lock order and wait on the lowest * lock we fail to grab, if any. * * If successful, this returns 0 and all locks in 'need_locks' are held. * * On entry, the pix lock is held iff !ERTS_PROC_LOCK_ATOMIC_IMPL. * On exit it is not held. */ static void wait_for_locks(Process *p, erts_pix_lock_t *pixlck, ErtsProcLocks locks, ErtsProcLocks need_locks, ErtsProcLocks olflgs) { erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id); erts_tse_t *wtr; /* Acquire a waiter object on which this thread can wait. */ wtr = tse_fetch(pix_lock); /* Record which locks this waiter needs. */ wtr->uflgs = need_locks; ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #if ERTS_PROC_LOCK_ATOMIC_IMPL erts_pix_lock(pix_lock); #endif ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock)); #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif /* Try to aquire locks one at a time in lock order and set wait flag */ try_aquire(&p->lock, wtr); ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif if (wtr->uflgs == 0) erts_pix_unlock(pix_lock); else { /* We didn't get them all; need to wait... */ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); erts_atomic32_set_nob(&wtr->uaflgs, 1); erts_pix_unlock(pix_lock); while (1) { int res; erts_tse_reset(wtr); if (erts_atomic32_read_nob(&wtr->uaflgs) == 0) break; /* * Wait for needed locks. When we are woken all needed locks have * have been acquired by other threads and transfered to us. * However, we need to be prepared for spurious wakeups. */ do { res = erts_tse_wait(wtr); /* might return EINTR */ } while (res != 0); } ASSERT(wtr->uflgs == 0); } ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks)); tse_return(wtr); }
static int queue_tostring(lua_State *L) { lua_pushfstring(L, "%s (%p)", lua_apr_queue_type.friendlyname, check_queue(L, 1)); return 1; }
void CheckConnectThread::run() { while(!isFinal()) { this->setRuning(); Thread::sleep(1); Time ct; check_queue(); for(TcpClientTaskBase_IT it = m_taskContainer.begin();it != m_taskContainer.end();) { TcpClientTaskBase *task = *it; switch(task->getState()) { #if 0 case TcpClientTaskBase::close: { #if 0 if( !task->checkStateTimeout( TcpClientTaskBase::close,ct,4 ) ) { if( task->connect() ) { m_pool->addCheckWait( task ); } else if( !task->needReConn() ) { TcpClientTaskBase_IT temp = it; ++it; m_taskContainer.erase( temp ); task->resetState(); SAFE_DELETE( task ); continue; } } } #endif break; case TcpClientTaskBase::sync: break; case TcpClientTaskBase::okay: task->checkConn(); break; case TcpClientTaskBase::recycle: #if 0 if( task->checkStateTimeout( TcpClientTaskBase::recycle,ct,4 ) ) { task->getNextState(); if( !task->needReConn() ) { TcpClientTaskBase_IT temp = it; ++it; m_taskContainer.erase( temp ); SAFE_DELETE( task ); continue; } } #endif break; #endif } ++it; } } }
void step(Queue *channel) { check_result(channel); check_timeouts(s3eTimerGetUTC(),channel); check_queue(channel); }
void OkayThread::run() { Time currentTime; Time writeTime; TcpTask_IT it; SDWORD kdpfd; EpollfdContainer epollFdContainer; kdpfd = epoll_create(256); assert(-1 != kdpfd); m_epollFdContainer.resize(256); DWORD countFd = 0; bool check = false; while(!isFinal()) { this->setRuning(); currentTime.now(); if(check) { check_queue(); if(!m_taskContainer.empty()) { for(it = m_taskContainer.begin();it != m_taskContainer.end();) { TcpTask *task = *it; if(task->m_mSocket.checkChangeSocket()) { if(task->isFdsrAdd()) { task->delEpoll(kdpfd,EPOLLIN|EPOLLERR|EPOLLPRI); } task->delEpoll(m_kdpfd,EPOLLIN | EPOLLERR | EPOLLPRI); task->m_mSocket.changeSocket(0); if(task->isFdsrAdd()) { task->addEpoll(kdpfd,EPOLLIN|EPOLLERR|EPOLLPRI,(void*)task); } task->addEpoll(m_kdpfd,EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLPRI,(void*)task); } task->checkSignal(currentTime); if(task->isTerminateWait()) { task->Terminate(); } if(task->isTerminate()) { if(task->isFdsrAdd()) { task->delEpoll(kdpfd,EPOLLIN|EPOLLERR|EPOLLPRI); countFd--; } remove(it); task->getNextState(); m_pool->addRecycle(task); } else { if(!task->isFdsrAdd()) { task->addEpoll(kdpfd,EPOLLIN | EPOLLERR | EPOLLPRI,(void*)task); task->fdsrAdd(); countFd++; if(countFd > epollFdContainer.size()) { epollFdContainer.resize(countFd + 16); } } ++it; } } } check = false; } Thread::msleep(2); if(countFd) { SDWORD retcode = epoll_wait(kdpfd,&epollFdContainer[0],countFd,0); if(retcode > 0) { for(SDWORD index = 0;index < retcode;++index) { TcpTask *task = (TcpTask*)epollFdContainer[index].data.ptr; if(epollFdContainer[index].events & (EPOLLERR|EPOLLPRI)) { task->TerminateError(); task->Terminate(TcpTask::TM_ACTIVE); check = true; } else { if(epollFdContainer[index].events & EPOLLIN) { if(!task->listeningRecv(true)) { task->Terminate(TcpTask::TM_ACTIVE); check = true; } } } epollFdContainer[index].events = 0; } } } if(check) { continue; } if(currentTime.msec() - writeTime.msec() >= (QWORD)(m_pool->s_usleepTime/1000)) { writeTime = currentTime; processMsg(); if(!m_taskContainer.empty()) { SDWORD retcode = epoll_wait(m_kdpfd,&m_epollFdContainer[0],m_taskCount,0); if(retcode > 0) { for(SDWORD index = 0;index < retcode;index++) { TcpTask *task = (TcpTask*)m_epollFdContainer[index].data.ptr; if(m_epollFdContainer[index].events & (EPOLLERR|EPOLLPRI)) { task->TerminateError(); task->Terminate(TcpTask::TM_ACTIVE); } else { if(m_epollFdContainer[index].events & EPOLLIN) { if(!task->listeningRecv(true)) { task->Terminate(TcpTask::TM_ACTIVE); } } if(m_epollFdContainer[index].events & EPOLLOUT) { if(!task->listeningSend()) { task->Terminate(TcpTask::TM_ACTIVE); } } } m_epollFdContainer[index].events = 0; } } } check = true; } } for(it = m_taskContainer.begin();it != m_taskContainer.end();) { TcpTask *task = *it; remove(it); task->getNextState(); m_pool->addRecycle(task); } TEMP_FAILURE_RETRY(::close(m_kdpfd)); }