void free_rpc_call_ctx(rpc_ctx_t *ctx, uint32_t flags) { struct x_vc_data *xd = (struct x_vc_data *) ctx->ctx_u.clnt.clnt->cl_p1; struct rpc_dplx_rec *rec = xd->rec; struct timespec ts; /* wait for commit of any xfer (ctx specific) */ mutex_lock(&ctx->we.mtx); if (ctx->flags & RPC_CTX_FLAG_WAITSYNC) { /* WAITSYNC is already cleared if the call timed out, but it is * incorrect to wait forever */ (void) clock_gettime(CLOCK_MONOTONIC_FAST, &ts); timespecadd(&ts, &ctx->ctx_u.clnt.timeout); (void) cond_timedwait(&ctx->we.cv, &ctx->we.mtx, &ts); } mutex_lock(&rec->mtx); opr_rbtree_remove(&xd->cx.calls.t, &ctx->node_k); /* interlock */ mutex_unlock(&ctx->we.mtx); mutex_unlock(&rec->mtx); if (ctx->msg) free_rpc_msg(ctx->msg); mem_free(ctx, sizeof(rpc_ctx_t)); }
int rpc_ctx_wait_reply(rpc_ctx_t *ctx, uint32_t flags) { struct x_vc_data *xd = (struct x_vc_data *) ctx->ctx_u.clnt.clnt->cl_p1; struct rpc_dplx_rec *rec = xd->rec; rpc_dplx_lock_t *lk = &rec->recv.lock; struct timespec ts; int code = 0; /* we hold recv channel lock */ ctx->flags |= RPC_CTX_FLAG_WAITSYNC; while (! (ctx->flags & RPC_CTX_FLAG_SYNCDONE)) { (void) clock_gettime(CLOCK_MONOTONIC_FAST, &ts); timespecadd(&ts, &ctx->ctx_u.clnt.timeout); code = cond_timedwait(&lk->we.cv, &lk->we.mtx, &ts); /* if we timed out, check for xprt destroyed (no more receives) */ if (code == ETIMEDOUT) { SVCXPRT *xprt = rec->hdl.xprt; uint32_t xp_flags; /* dequeue the call */ mutex_lock(&rec->mtx); opr_rbtree_remove(&xd->cx.calls.t, &ctx->node_k); mutex_unlock(&rec->mtx); mutex_lock(&xprt->xp_lock); xp_flags = xprt->xp_flags; mutex_unlock(&xprt->xp_lock); if (xp_flags & SVC_XPRT_FLAG_DESTROYED) { /* XXX should also set error.re_why, but the facility is not * well developed. */ ctx->error.re_status = RPC_TIMEDOUT; } ctx->flags &= ~RPC_CTX_FLAG_WAITSYNC; goto out; } } ctx->flags &= ~RPC_CTX_FLAG_SYNCDONE; /* switch on direction */ switch (ctx->msg->rm_direction) { case REPLY: if (ctx->msg->rm_xid == ctx->xid) return (RPC_SUCCESS); break; case CALL: /* XXX cond transfer control to svc */ /* */ break; default: break; } out: return (code); }
void ice_thread_run(void *p) { struct ice_agent *ag; struct call *call; long long sleeptime; struct timeval tv; mutex_lock(&ice_agents_timers_lock); while (!g_shutdown) { gettimeofday(&g_now, NULL); /* lock our list and get the first element */ ag = g_tree_find_first(ice_agents_timers, NULL, NULL); /* scheduled to run? if not, we just go to sleep, otherwise we remove it from the tree, * steal the reference and run it */ if (!ag) goto sleep; if (timeval_cmp(&g_now, &ag->next_check) < 0) goto sleep; g_tree_remove(ice_agents_timers, ag); ZERO(ag->next_check); ag->last_run = g_now; mutex_unlock(&ice_agents_timers_lock); /* this agent is scheduled to run right now */ /* lock the call */ call = ag->call; log_info_ice_agent(ag); rwlock_lock_r(&call->master_lock); /* and run our checks */ __do_ice_checks(ag); /* finally, release our reference and start over */ log_info_clear(); rwlock_unlock_r(&call->master_lock); obj_put(ag); mutex_lock(&ice_agents_timers_lock); continue; sleep: /* figure out how long we should sleep */ sleeptime = ag ? timeval_diff(&ag->next_check, &g_now) : 100000; sleeptime = MIN(100000, sleeptime); /* 100 ms at the most */ tv = g_now; timeval_add_usec(&tv, sleeptime); cond_timedwait(&ice_agents_timers_cond, &ice_agents_timers_lock, &tv); continue; } mutex_unlock(&ice_agents_timers_lock); }
int glthread_cond_timedwait_multithreaded (gl_cond_t *cond, gl_lock_t *lock, struct timespec *abstime) { int ret; ret = cond_timedwait (cond, lock, abstime); if (ret == ETIME) return ETIMEDOUT; return ret; }
void Condition::wait(void) { // Wait for anything to change. // #if defined(POSIX_THREADS) MutexLock lock_scope(_mutex); timestruc_t abstime; abstime.tv_sec = time(NULL) + 1; // Wait for 1 second. abstime.tv_nsec = 0; cond_timedwait((cond_t *)_condition, (mutex_t *)_mutex, &abstime); #endif return; }
int LockstepScheduler::usleep_until(uint64_t time_us) { pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t cond = PTHREAD_COND_INITIALIZER; pthread_mutex_lock(&lock); int result = cond_timedwait(&cond, &lock, time_us); if (result == ETIMEDOUT) { // This is expected because we never notified to the condition. result = 0; } pthread_mutex_unlock(&lock); return result; }
void timerthread_run(void *p) { struct timerthread *tt = p; mutex_lock(&tt->lock); while (!rtpe_shutdown) { gettimeofday(&rtpe_now, NULL); /* lock our list and get the first element */ struct timerthread_obj *tt_obj = g_tree_find_first(tt->tree, NULL, NULL); /* scheduled to run? if not, we just go to sleep, otherwise we remove it from the tree, * steal the reference and run it */ if (!tt_obj) goto sleep; if (timeval_cmp(&rtpe_now, &tt_obj->next_check) < 0) goto sleep; // steal reference g_tree_remove(tt->tree, tt_obj); ZERO(tt_obj->next_check); tt_obj->last_run = rtpe_now; mutex_unlock(&tt->lock); // run and release tt->func(tt_obj); obj_put(tt_obj); mutex_lock(&tt->lock); continue; sleep:; /* figure out how long we should sleep */ long long sleeptime = tt_obj ? timeval_diff(&tt_obj->next_check, &rtpe_now) : 100000; sleeptime = MIN(100000, sleeptime); /* 100 ms at the most */ struct timeval tv = rtpe_now; timeval_add_usec(&tv, sleeptime); cond_timedwait(&tt->cond, &tt->lock, &tv); } mutex_unlock(&tt->lock); }
int HID_API_EXPORT hid_read_timeout(hid_device *dev, unsigned char *data, size_t length, int milliseconds) { int bytes_read = -1; /* Lock the access to the report list. */ pthread_mutex_lock(&dev->mutex); /* There's an input report queued up. Return it. */ if (dev->input_reports) { /* Return the first one */ bytes_read = return_data(dev, data, length); goto ret; } /* Return if the device has been disconnected. */ if (dev->disconnected) { bytes_read = -1; goto ret; } if (dev->shutdown_thread) { /* This means the device has been closed (or there has been an error. An error code of -1 should be returned. */ bytes_read = -1; goto ret; } /* There is no data. Go to sleep and wait for data. */ if (milliseconds == -1) { /* Blocking */ int res; res = cond_wait(dev, &dev->condition, &dev->mutex); if (res == 0) bytes_read = return_data(dev, data, length); else { /* There was an error, or a device disconnection. */ bytes_read = -1; } } else if (milliseconds > 0) { /* Non-blocking, but called with timeout. */ int res; struct timespec ts; struct timeval tv; gettimeofday(&tv, NULL); TIMEVAL_TO_TIMESPEC(&tv, &ts); ts.tv_sec += milliseconds / 1000; ts.tv_nsec += (milliseconds % 1000) * 1000000; if (ts.tv_nsec >= 1000000000L) { ts.tv_sec++; ts.tv_nsec -= 1000000000L; } res = cond_timedwait(dev, &dev->condition, &dev->mutex, &ts); if (res == 0) bytes_read = return_data(dev, data, length); else if (res == ETIMEDOUT) bytes_read = 0; else bytes_read = -1; } else { /* Purely non-blocking */ bytes_read = 0; } ret: /* Unlock */ pthread_mutex_unlock(&dev->mutex); return bytes_read; }
static void *aio_thread(void *unused) { pid_t pid; sigset_t set; pid = getpid(); /* block all signals */ if (sigfillset(&set)) die("sigfillset"); if (sigprocmask(SIG_BLOCK, &set, NULL)) die("sigprocmask"); while (1) { struct qemu_paiocb *aiocb; size_t offset; int ret = 0; qemu_timeval tv; struct timespec ts; qemu_gettimeofday(&tv); ts.tv_sec = tv.tv_sec + 10; ts.tv_nsec = 0; mutex_lock(&lock); while (TAILQ_EMPTY(&request_list) && !(ret == ETIMEDOUT)) { ret = cond_timedwait(&cond, &lock, &ts); } if (TAILQ_EMPTY(&request_list)) break; aiocb = TAILQ_FIRST(&request_list); TAILQ_REMOVE(&request_list, aiocb, node); offset = 0; aiocb->active = 1; idle_threads--; mutex_unlock(&lock); while (offset < aiocb->aio_nbytes) { ssize_t len; if (aiocb->is_write) len = pwrite(aiocb->aio_fildes, (const char *)aiocb->aio_buf + offset, aiocb->aio_nbytes - offset, aiocb->aio_offset + offset); else len = pread(aiocb->aio_fildes, (char *)aiocb->aio_buf + offset, aiocb->aio_nbytes - offset, aiocb->aio_offset + offset); if (len == -1 && errno == EINTR) continue; else if (len == -1) { offset = -errno; break; } else if (len == 0) break; offset += len; } mutex_lock(&lock); aiocb->ret = offset; idle_threads++; mutex_unlock(&lock); if (kill(pid, aiocb->ev_signo)) die("kill failed"); } idle_threads--; cur_threads--; mutex_unlock(&lock); return NULL; }
int condvarWait(condvar_t *condvar, mutex_t *mutex, thread_state_t wtype) { sigjmp_buf jmpbuf; int err; sys_thread_t *self = sysThreadSelf(); /* * There is no threads interface to get a thread's state. So, instead, * we use this hack so that the debugger agent can get at this thread's * state. Of course, this is not very reliable, but when a thread goes * to sleep, it *will* be reported as sleeping. During the transition * from running to sleep, it may be incorrectly reported, since the * setting of the state here is not atomic with the voluntary sleep. * The better fix is to extend the Solaris threads interface and have * the debugger agent call this interface OR to use libthread_db for * intra-process state reporting. * * Now, condition variables are used either for waiting to enter a * monitor (MONITOR_WAIT) or to execute a "wait()" method when already * holding a monitor (CONDVAR_WAIT). So, when condvarWait() is called * it could be to wait for a monitor or for a condition within a * monitor. This is indicated by the "wtype" argument to condvarWait(). * This type is set in the thread state before going to sleep. */ self->state = wtype; #ifdef __linux__ /* * Register our intrHandler as a cleanup handler. If we get * interrupted (i.e. canceled), we longjmp out of this handler. */ pthread_cleanup_push(intrHandler, NULL); if (setjmp(jmpbuf) == 0) { /* * Set the jmp buf and enable cancellation. */ thr_setspecific(intrJmpbufkey, &jmpbuf); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); /* * Note: pthread_cond_wait is _not_ interruptible on Linux */ #else thr_setspecific(sigusr1Jmpbufkey, &jmpbuf); if (sigsetjmp(jmpbuf, 1) == 0) { sigset_t osigset; thr_sigsetmask(SIG_UNBLOCK, &sigusr1Mask, &osigset); again: #endif err = cond_wait((cond_t *) condvar, (mutex_t *) mutex); switch(err) { case 0: err = SYS_OK; break; #ifndef __linux__ case EINTR: /* Signals other than USR1 were received. */ goto again; #endif default: err = SYS_ERR; } #ifdef __linux__ /* * Disable cancellation and clear the jump buf. */ pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); thr_setspecific(intrJmpbufkey, NULL); #else thr_sigsetmask(SIG_SETMASK, &osigset, NULL); #endif } else { /* * we've received a SIGUSR1 to interrupt our wait. We just return * and something above use notices the change. * clear the jump buf just to be paranoid. */ #ifndef __linux__ thr_setspecific(sigusr1Jmpbufkey, NULL); #endif err = SYS_INTRPT; } #ifdef __linux__ pthread_cleanup_pop(0); #endif /* * After having woken up, change the thread state to RUNNABLE, since * it is now runnable. */ self->state = RUNNABLE; return err; } /* * Returns 0 if condition variable became true before timeout expired. * Returns 1 if timeout expired first. * Returns <0 if wait fails for any other reason. */ int condvarTimedWait(condvar_t *condvar, mutex_t *mutex, jlong millis, thread_state_t wtype) { #ifdef __linux__ jmp_buf jmpbuf; #else sigjmp_buf jmpbuf; #endif int err; struct timespec timeout; sys_thread_t *self; jlong end_time; if (millis < 0) return SYS_ERR; if (millis > (jlong)INT_MAX) { return condvarWait(condvar, mutex, wtype); } end_time = sysTimeMillis() + millis; self = sysThreadSelf(); self->state = wtype; #ifdef __linux__ /* * Register our intrHandler as a cleanup handler. If we get * interrupted (i.e. canceled), we longjmp out of this handler. */ pthread_cleanup_push(intrHandler, NULL); if (setjmp(jmpbuf) == 0) { /* * Set the jmp buf and enable cancellation. */ thr_setspecific(intrJmpbufkey, &jmpbuf); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); /* * Calculate an absolute timeout value. */ timeout.tv_sec = end_time / 1000; timeout.tv_nsec = (end_time % 1000) * 1000000; again: #else thr_setspecific(sigusr1Jmpbufkey, &jmpbuf); if (sigsetjmp(jmpbuf, 1) == 0) { sigset_t osigset; thr_sigsetmask(SIG_UNBLOCK, &sigusr1Mask, &osigset); again: timeout.tv_sec = end_time / 1000; timeout.tv_nsec = (end_time % 1000) * 1000000; #endif err = cond_timedwait((cond_t *)condvar, (mutex_t *)mutex, &timeout); switch(err) { case 0: err = SYS_OK; break; case EINTR: /* Signals other than USR1 were received. */ if (sysTimeMillis() < end_time) { goto again; } /*FALLTHRU*/ #ifdef USE_PTHREADS case ETIMEDOUT: #else case ETIME: #endif err = SYS_TIMEOUT; break; default: err = SYS_ERR; } #ifdef __linux__ pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); thr_setspecific(intrJmpbufkey, NULL); #else thr_sigsetmask(SIG_SETMASK, &osigset, NULL); #endif } else { /* * we've received a SIGUSR1 to interrupt our wait. We just return * and something above use notices the change. * clear the jump buf just to be paranoid. */ #ifndef __linux__ thr_setspecific(sigusr1Jmpbufkey, NULL); #endif err = SYS_INTRPT; } #ifdef __linux__ /* Remove intrHandler without calling it. */ pthread_cleanup_pop(0); sysAssert(pthread_mutex_trylock(mutex) == EBUSY); /* * After having woken up, change the thread state to RUNNABLE, since * it is now runnable. */ #endif self->state = RUNNABLE; return err; } int condvarSignal(condvar_t *condvar) { int err; err = cond_signal((cond_t *) condvar); condvar->counter++; return (err == 0 ? SYS_OK : SYS_ERR); }
int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime) { return cond_timedwait(cond, mutex, abstime); }
static int do_liblock_cond_wait(mwait)(liblock_cond_t* cond, liblock_lock_t* lock) { return cond_timedwait(cond, lock, 0); }
static int do_liblock_cond_timedwait(mwait)(liblock_cond_t* cond, liblock_lock_t* lock, const struct timespec* ts) { return cond_timedwait(cond, lock, ts); }
static void *aio_thread(void *unused) { pid_t pid; sigset_t set; pid = getpid(); /* block all signals */ if (sigfillset(&set)) die("sigfillset"); if (sigprocmask(SIG_BLOCK, &set, NULL)) die("sigprocmask"); while (1) { struct qemu_paiocb *aiocb; size_t ret = 0; qemu_timeval tv; struct timespec ts; qemu_gettimeofday(&tv); ts.tv_sec = tv.tv_sec + 10; ts.tv_nsec = 0; mutex_lock(&lock); while (TAILQ_EMPTY(&request_list) && !(ret == ETIMEDOUT)) { ret = cond_timedwait(&cond, &lock, &ts); } if (TAILQ_EMPTY(&request_list)) break; aiocb = TAILQ_FIRST(&request_list); TAILQ_REMOVE(&request_list, aiocb, node); aiocb->active = 1; idle_threads--; mutex_unlock(&lock); switch (aiocb->aio_type) { case QEMU_PAIO_READ: case QEMU_PAIO_WRITE: ret = handle_aiocb_rw(aiocb); break; case QEMU_PAIO_IOCTL: ret = handle_aiocb_ioctl(aiocb); break; default: fprintf(stderr, "invalid aio request (0x%x)\n", aiocb->aio_type); ret = -EINVAL; break; } mutex_lock(&lock); aiocb->ret = ret; idle_threads++; mutex_unlock(&lock); if (kill(pid, aiocb->ev_signo)) die("kill failed"); } idle_threads--; cur_threads--; mutex_unlock(&lock); return NULL; }
/* cond_timeout_global: use a global condition time wait variable to process array's data */ void cond_timeout_global(Workblk *array, struct scripttab *k) { int err; struct timeval current_time; /* acquire the global condition lock */ #ifdef SOLARIS mutex_lock(&global_cond_lock); #endif #ifdef POSIX pthread_mutex_lock(&global_cond_lock); #endif gettimeofday(¤t_time, NULL); time_out.tv_sec = current_time.tv_sec; time_out.tv_nsec = current_time.tv_usec * 1000; /* check to see if the condition flag is true, If not then wait * for that condition flag to become true */ while (global_cond_flag != TRUE) { /* add MYTIMEOUT to current time for timeout */ time_out.tv_nsec += MYTIMEOUT; while (time_out.tv_nsec > 1000000000) { time_out.tv_nsec -= 1000000000; time_out.tv_sec ++; } #ifdef SOLARIS err = cond_timedwait( &global_cond, &global_cond_lock, &time_out); #endif #ifdef POSIX err = pthread_cond_timedwait( &global_cond, &global_cond_lock, &time_out); #endif if (err == 0 ) { /* with the condition */ break; } } /* Now, condition is true, and we have the global_cond_lock */ #ifdef SOLARIS mutex_unlock(&global_cond_lock); mutex_lock(&global_cond_lock2); global_cond_flag = FALSE; mutex_unlock(&global_cond_lock2); /* acquire the global lock */ mutex_lock(&global_lock); #endif #ifdef POSIX pthread_mutex_unlock(&global_cond_lock); pthread_mutex_lock(&global_cond_lock2); global_cond_flag = FALSE; pthread_mutex_unlock(&global_cond_lock2); /* acquire the global lock */ pthread_mutex_lock(&global_lock); #endif array->ready = gethrtime(); array->vready = gethrvtime(); array->compute_ready = array->ready; array->compute_vready = array->vready; /* do some work on the current array */ (k->called_func)(&array->list[0]); array->compute_done = gethrtime(); array->compute_vdone = gethrvtime(); /* free the global lock */ #ifdef SOLARIS mutex_unlock(&global_lock); /* now set the condition, and signal any other threads */ mutex_lock(&global_cond_lock2); #endif #ifdef POSIX pthread_mutex_unlock(&global_lock); /* now set the condition, and signal any other threads */ pthread_mutex_lock(&global_cond_lock2); #endif global_cond_flag = TRUE; #ifdef SOLARIS cond_signal(&global_cond); mutex_unlock(&global_cond_lock2); #endif #ifdef POSIX pthread_cond_signal(&global_cond); pthread_mutex_unlock(&global_cond_lock2); #endif /* make another call to preclude tail-call optimization on the unlock */ (void) gethrtime(); }
void * manage_list(void *vlibrary) { int exit_status = 0, old_count; char *ent_pnt = "manage_list"; ushort_t delayed; time_t now, short_delay, auto_check; robo_event_t *current, *next; library_t *library = (library_t *)vlibrary; mutex_lock(&library->mutex); /* wait for initialization */ mutex_unlock(&library->mutex); short_delay = 0; old_count = 0; delayed = 0; auto_check = (time(&now) + 5); for (;;) { mutex_lock(&library->list_mutex); /* * See if there in anything to do. We will wait if the * active count is 0 or its equal to the same value it had * when we last woke up and there is a delayed request. */ if (library->active_count == 0 || ((old_count == library->active_count) && delayed)) { timestruc_t wait_time; wait_time.tv_sec = time(&now) + library->un->delay; wait_time.tv_nsec = 0; if ((auto_check >= now) && (auto_check < wait_time.tv_sec)) wait_time.tv_sec = auto_check; if (delayed && (short_delay < wait_time.tv_sec)) wait_time.tv_sec = short_delay; if (wait_time.tv_sec > now) { cond_timedwait(&library->list_condit, &library->list_mutex, &wait_time); if (library->chk_req) { library->chk_req = FALSE; if (library->un->state == DEV_ON) /* * Force a check */ auto_check = 0; } } } /* * Get the current time */ time(&now); if (auto_check <= now) { mutex_unlock(&library->list_mutex); (void) check_requests(library); auto_check = now + library->un->delay; continue; } /* * If there is something on the list . . . */ if ((old_count = library->active_count) == 0) { mutex_unlock(&library->list_mutex); continue; } short_delay = 0; delayed = FALSE; current = library->first; mutex_unlock(&library->list_mutex); do { mutex_lock(&library->list_mutex); /* * If delayed and the time has not expired, * go on tothe next */ next = current->next; if ((current->status.b.delayed) && (current->timeout > now)) { if (short_delay == 0) short_delay = current->timeout; else if (current->timeout < short_delay) short_delay = current->timeout; current = next; /* * Need to know there are delayed requests */ delayed = TRUE; mutex_unlock(&library->list_mutex); continue; } if (current == library->first) library->first = unlink_list(current); else (void) unlink_list(current); current->next = NULL; ETRACE((LOG_NOTICE, "LbEv c %#x n %#x (%d)\n", current, library->first, library->active_count)); library->active_count--; library->un->active = library->active_count; mutex_unlock(&library->list_mutex); /* * Entry is off the list and ready to process */ switch (current->type) { case EVENT_TYPE_INTERNAL: switch (current->request.internal.command) { case ROBOT_INTRL_AUDIT_SLOT: if (start_audit(library, current, current->request.internal.slot)) { /* * Unable to find resources, * delay the request and try * later */ current->status.b.delayed = TRUE; current->timeout = now + 10; delayed = TRUE; add_to_end(library, current); } current = next; break; default: sam_syslog(LOG_ERR, "%s:Bad internal event: %s:%d\n", ent_pnt, __FILE__, __LINE__); break; } break; case EVENT_TYPE_MESS: if (current->request.message.magic != MESSAGE_MAGIC) { sam_syslog(LOG_ERR, "%s: Bad magic %#x.", ent_pnt, current->request.message.magic); current->completion = EAGAIN; disp_of_event(library, current, EBADF); current = next; continue; } if (library->un->state >= DEV_OFF && (current->request.message.command > ACCEPT_DOWN)) { current->completion = EAGAIN; disp_of_event(library, current, EAGAIN); current = next; continue; } switch (current->request.message.command) { case MESS_CMD_SHUTDOWN: if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "received" " shutdown:%s:%d.\n", __FILE__, __LINE__); post_shutdown(library); threads[SONY_WORK_THREAD] = (thread_t)-1; thr_exit(&exit_status); break; case MESS_CMD_STATE: /* * state_request will put the event * back on the free list when * the command is done. */ state_request(library, current); current = next; break; case MESS_CMD_TAPEALERT: /* * tapealert_request will put the * event back on the * free list when the command is done. */ tapealert_solicit(library, current); current = next; break; case MESS_CMD_SEF: /* * sef_request will put the event * back on the free list when the * command is done. */ sef_solicit(library, current); current = next; break; case MESS_CMD_LABEL: if (label_request(library, current)) { /* * Unable to find resources, * delay the request, try later. */ current->status.b.delayed = TRUE; current->timeout = now + 10; delayed = TRUE; add_to_end(library, current); } current = next; break; case MESS_CMD_MOUNT: /* * mount_request will take care of * putting the event back on free list */ if (mount_request(library, current)) { /* * Unable to find resources, * delay request and try later. */ current->status.b.delayed = TRUE; current->timeout = now + 10; delayed = TRUE; add_to_end(library, current); } current = next; break; case MESS_CMD_LOAD_UNAVAIL: load_unavail_request(library, current); current = next; break; case MESS_CMD_AUDIT: if (start_audit(library, current, current-> request.message.param.audit_request.slot)) { current->status.b.delayed = TRUE; current->timeout = now + 10; delayed = TRUE; add_to_end(library, current); } current = next; break; case MESS_CMD_PREVIEW: (void) check_requests(library); time(&now); auto_check = now + library->un->delay; disp_of_event(library, current, 0); current = next; break; case MESS_CMD_UNLOAD: /* * unload_request will put the event * back on the free list when * the command is done. * unload_request will add the request * to the drive's worklist. */ unload_request(library, current); current = next; break; case MESS_CMD_TODO: todo_request(library, current); current = next; break; case MESS_CMD_ADD: add_to_cat_req(library, current); current = next; break; case MESS_CMD_EXPORT: /* * export_request will add the request * to the * mailbox worklist. */ export_media(library, current); current = next; break; case MESS_CMD_ACK: /* * A no-op. Dispose of event. */ disp_of_event(library, current, 0); current = next; break; default: sam_syslog(LOG_ERR, "%s: Unknown robot command %d.", ent_pnt, current->request.message.command); disp_of_event(library, current, 0); current = next; break; } break; default: sam_syslog(LOG_ERR, "%s: Unknown event type %d.\n", ent_pnt, current->type); disp_of_event(library, current, EBADF); current = next; break; } break; } while (current != NULL); } }