/* * dismount - dismount volser * */ void dismount( library_t *library, robo_event_t *event) { IBM_dismount_t dismount_req; delay_list_ent_t *dly_ent; ibm_req_info_t *ibm_info = (ibm_req_info_t *)event->request.internal.address; dly_ent = (delay_list_ent_t *) malloc_wait(sizeof (delay_list_ent_t), 2, 0); memset(dly_ent, 0, sizeof (delay_list_ent_t)); dly_ent->event = event; memset(&dismount_req, 0, sizeof (IBM_dismount_t)); dismount_req.device = ibm_info->drive_id; dismount_req.target_cat = ibm_info->targ_cat; memcpy(dismount_req.volser, ibm_info->volser, 8); mutex_lock(&library->dlist_mutex); { char *c = &ibm_info->volser[0]; sprintf(l_mess, "dismount %c%c%c%c%c%c from device %#8.8x", c[0], c[1], c[2], c[3], c[4], c[5], ibm_info->drive_id); if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "dismount(%d) %#x, %c%c%c%c%c%c.", LIBEQ, ibm_info->drive_id, c[0], c[1], c[2], c[3], c[4], c[5]); } if (ioctl_ibmatl(library->open_fd, MTIOCLDM, &dismount_req) == -1) { ushort_t cc = dismount_req.mtldret.cc; char dmmy[DIS_MES_LEN * 2]; sprintf(dmmy, "dismount cmd failed(%x): %s", cc, (cc > HIGH_CC) ? "Undefined" : cc_codes[cc], cc); memccpy(l_mess, dmmy, '\0', DIS_MES_LEN); sam_syslog(LOG_INFO, "dismount(%d): (MTIOCLDM): %m", LIBEQ); if (errno != ENOMEM && errno != EFAULT) sam_syslog(LOG_INFO, "dismount(%d): (MTIOCLDM): %s(%#x)", LIBEQ, (cc > HIGH_CC) ? "Undefined" : cc_codes[cc], cc); free(dly_ent); mutex_unlock(&library->dlist_mutex); disp_of_event(library, event, MC_REQ_TR); } else { /* The delay processing thread will dispose of the event */ dly_ent->req_id = dismount_req.mtldret.req_id; if ((dly_ent->next = library->delay_list) != NULL) library->delay_list->last = dly_ent; library->delay_list = dly_ent; mutex_unlock(&library->dlist_mutex); if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "dismount(%d) %#x, req_id = %#x.", LIBEQ, ibm_info->drive_id, dismount_req.mtldret.req_id); } }
/* * dismount_media - unload a volser * */ req_comp_t dismount_media( library_t *library, drive_state_t *drive) { req_comp_t err; ibm_req_info_t *ibm_info; xport_state_t *transport; robo_event_t *dismount, *tmp; ibm_info = malloc_wait(sizeof (ibm_req_info_t), 2, 0); memset(ibm_info, 0, sizeof (ibm_req_info_t)); ibm_info->drive_id = drive->drive_id; sprintf((void *)&ibm_info->volser, "%-8.8s", drive->bar_code); /* Build transport thread request */ dismount = malloc_wait(sizeof (robo_event_t), 5, 0); (void) memset(dismount, 0, sizeof (robo_event_t)); dismount->request.internal.command = ROBOT_INTRL_DISMOUNT_MEDIA; dismount->request.internal.address = (void *)ibm_info; if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "dismount_media: from %s.", drive->un->name); dismount->type = EVENT_TYPE_INTERNAL; dismount->status.bits = REST_SIGNAL; dismount->completion = REQUEST_NOT_COMPLETE; transport = library->transports; mutex_lock(&dismount->mutex); mutex_lock(&transport->list_mutex); if (transport->active_count == 0) transport->first = dismount; else { LISTEND(transport, tmp); append_list(tmp, dismount); } transport->active_count++; cond_signal(&transport->list_condit); mutex_unlock(&transport->list_mutex); /* Wait for the transport to do the unload */ while (dismount->completion == REQUEST_NOT_COMPLETE) cond_wait(&dismount->condit, &dismount->mutex); mutex_unlock(&dismount->mutex); /* Check the dismount request/helper status */ err = (req_comp_t)dismount->completion; if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "Return from transport dismount (%#x).", dismount->completion); free(ibm_info); mutex_destroy(&dismount->mutex); free(dismount); return (err); }
/* * force_media - unload a drive. Issued as a delayed request. * */ req_comp_t force_media( library_t *library, drive_state_t *drive) { req_comp_t err; ibm_req_info_t *ibm_info; robo_event_t *force, *tmp; xport_state_t *transport; ibm_info = (ibm_req_info_t *)malloc_wait(sizeof (ibm_req_info_t), 2, 0); memset(ibm_info, 0, sizeof (ibm_req_info_t)); ibm_info->drive_id = drive->drive_id; /* Build transport thread request */ force = malloc_wait(sizeof (robo_event_t), 5, 0); (void) memset(force, 0, sizeof (robo_event_t)); force->request.internal.command = ROBOT_INTRL_FORCE_MEDIA; force->request.internal.address = (void *)ibm_info; if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "force_media: from %s.", drive->un->name); force->type = EVENT_TYPE_INTERNAL; force->status.bits = REST_SIGNAL; force->completion = REQUEST_NOT_COMPLETE; transport = library->transports; mutex_lock(&force->mutex); mutex_lock(&transport->list_mutex); if (transport->active_count == 0) transport->first = force; else { LISTEND(transport, tmp); append_list(tmp, force); } transport->active_count++; cond_signal(&transport->list_condit); mutex_unlock(&transport->list_mutex); /* Wait for the transport to do the unload */ while (force->completion == REQUEST_NOT_COMPLETE) cond_wait(&force->condit, &force->mutex); mutex_unlock(&force->mutex); err = (req_comp_t)force->completion; if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "Return from transport force (%#x).", force->completion); free(ibm_info); mutex_destroy(&force->mutex); free(force); return (err); }
/* * query_drv - query a drive * */ void query_drv( library_t *library, robo_event_t *event) { IBM_query_t query_req; ibm_req_info_t *ibm_info = (ibm_req_info_t *)event->request.internal.address; memset(&query_req, 0, sizeof (IBM_query_t)); query_req.device = ibm_info->drive_id; query_req.sub_cmd = MT_QDD; mutex_lock(&library->dlist_mutex); sprintf(l_mess, "Issue query drive device %#8.8x", ibm_info->drive_id); mutex_unlock(&library->dlist_mutex); if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "query_drv(%d) %#x.", LIBEQ, ibm_info->drive_id); if (ioctl_ibmatl(library->open_fd, MTIOCLQ, &query_req) == -1) { ushort_t cc = query_req.mtlqret.cc; char dmmy[DIS_MES_LEN * 2]; sprintf(dmmy, "query drive cmd failed(%x): %s", cc, (cc > HIGH_CC) ? "Undefined" : cc_codes[cc], cc); memccpy(l_mess, dmmy, '\0', DIS_MES_LEN); sam_syslog(LOG_INFO, "query_drv(%d): (MTIOCLQ): %m", LIBEQ); if (errno != ENOMEM && errno != EFAULT) sam_syslog(LOG_INFO, "query_drv(%d): (MTIOCLQ): %s(%#x)", LIBEQ, (cc > HIGH_CC) ? "Undefined" : cc_codes[cc], cc); disp_of_event(library, event, MC_REQ_FL); } else { ushort_t cc = query_req.mtlqret.cc; if (cc) sam_syslog(LOG_INFO, "query_drv(%d): ??: (MTIOCLQ): %s(%#x)", LIBEQ, (cc > HIGH_CC) ? "Undefined" : cc_codes[cc], cc); if (query_req.mtlqret.info.info_type != 0) { ibm_info->ret_data = malloc_wait(sizeof (IBM_query_info_t), 2, 0); memcpy(ibm_info->ret_data, &query_req.mtlqret.info, sizeof (IBM_query_info_t)); } else sam_syslog(LOG_INFO, "query_drv(%d): (MTIOCLQ): No information.", LIBEQ); disp_of_event(library, event, MC_REQ_OK); } if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "query_drv(%d) returning.", LIBEQ); }
/* * This function is used to pass an error to the file system for * an associated stage request. */ int sam_mig_stage_error( tp_stage_t *stage_req, int error) { MigFileInfo_t *migfile; FileInfo_t *file; migfile = (MigFileInfo_t *)stage_req; file = migfile->file; if (DBG_LVL(SAM_DBG_MIGKIT)) { sam_syslog(LOG_DEBUG, "%s [t@%d] Error staging inode: %d error: %d", libname, pthread_self(), migfile->req.inode, error); } if (error != 0) { errorFile(migfile, error); } file->migfile = 0; free(migfile); SetStageDone(file); return (0); }
Word readStack(Word *base, int slot) { Word offset = (base - G_shadowStack.origStack) + slot; if (getBit(G_shadowStack.bitset, offset)) { DBG_LVL(3, "SHADOW_STACK_READ: %d => %" FMT_WordX "\n", (int)offset, G_shadowStack.stack[offset]); return G_shadowStack.stack[offset]; } else { DBG_LVL(3, "REAL_STACK_READ: %d/%p => %" FMT_WordX " (base=%p,offs=%d)\n", (int)offset, &G_shadowStack.origStack[offset], G_shadowStack.origStack[offset], base, slot); return G_shadowStack.origStack[offset]; } }
bool verifyShadowHeap() { u4 mismatches = 0; ShadowHeapEntry *p; DBG_LVL(2, ".. Verifying shadow heap (entries: %d)\n", (int)(G_shadowHeapTail - G_shadowHeapTail)); for (p = G_shadowHeap; p < G_shadowHeapTail; p++) { Word value = *p->address; // TODO: Verify that address points to valid region DBG_LVL(3, ".... verifying %p: jit: %9" FMT_WordX " interp: %9" FMT_WordX "\n", p->address, p->value, value); if (value != p->value) { fprintf(stderr, "Heap mismatch at: %p\n" " real heap: %" FMT_WordX " shadow heap: %" FMT_WordX "\n", p->address, value, p->value); mismatches++; } } return mismatches == 0; }
/* * This function is called when the foreign data migration program * is ready to start staging the data for a stage request. Open * the disk cache file. */ int sam_mig_stage_file( tp_stage_t *stage_req) { MigFileInfo_t *migfile; FileInfo_t *file; sam_fsstage_arg_t arg; int rc; migfile = (MigFileInfo_t *)stage_req; if (migfile == NULL) { return (-1); } file = migfile->file; if (file == NULL) { return (-1); } if (DBG_LVL(SAM_DBG_MIGKIT)) { sam_syslog(LOG_DEBUG, "%s [t@%d] Start staging inode: %d", libname, pthread_self(), migfile->req.inode); } SetStageActive(file); LogStageStart(file); memset(&arg.handle, 0, sizeof (sam_handle_t)); arg.handle.id = file->id; arg.handle.fseq = migfile->req.fseq; arg.handle.stage_off = file->fs.stage_off; arg.handle.stage_len = migfile->req.size; arg.handle.flags.b.stage_wait = file->fs.wait; arg.ret_err = 0; rc = 0; file->dcache = sam_syscall(SC_fsstage, &arg, sizeof (sam_fsstage_arg_t)); if (file->dcache < 0) { rc = -1; if (errno == ECANCELED) { sam_syslog(LOG_DEBUG, "%s [t@%d] Stage request canceled by filesystem", libname, pthread_self()); } else { sam_syslog(LOG_INFO, "%s [t@%d] System call (SC_fsstage) failed " "errno: %d", libname, pthread_self(), errno); } } return (rc); }
void kill_off_threads(library_t *library) { int i; for (i = 0; i < SONY_MAIN_THREADS; i++) if (threads[i] > 0) if (thr_kill(threads[i], SIGKILL) && DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_INFO, "kill_off_threads: Unable to kill" " thread %d.\n", i); }
/* * Get a free event and check to see if more are needed. */ robo_event_t * get_free_event( library_t *library) { robo_event_t *ret; char *ent_pnt = "get_free_event"; mutex_lock(&library->free_mutex); if (library->free_count < 20 && !library->inc_free_running) { sigset_t signal_set; (void) sigemptyset(&signal_set); (void) sigaddset(&signal_set, SIGEMT); library->inc_free_running++; thr_sigsetmask(SIG_BLOCK, &signal_set, NULL); thr_create(NULL, MD_THR_STK, &inc_free, (void *)library, (THR_DETACHED | THR_BOUND), NULL); thr_sigsetmask(SIG_UNBLOCK, &signal_set, NULL); thr_yield(); } while (library->free_count <= 0) { mutex_unlock(&library->free_mutex); if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "%s: Waiting for free event.", ent_pnt); sleep(2); mutex_lock(&library->free_mutex); } ret = library->free; ETRACE((LOG_NOTICE, "EV:LfGf: %#x.", ret)); library->free_count--; library->free = ret->next; mutex_unlock(&library->free_mutex); return (ret); }
int build_pids(shm_ptr_tbl_t *ptr_tbl) { int i, j, count, stk_found = 0; char *ent_pnt = "build_pids"; robots_t *robot; dev_ent_t *device_entry; rob_child_pids_t *pid; device_entry = (dev_ent_t *)SHM_REF_ADDR( ((shm_ptr_tbl_t *)master_shm.shared_memory)->first_dev); /* Find number of devices that need drivers. */ for (count = 0; device_entry != NULL; device_entry = (dev_ent_t *)SHM_REF_ADDR(device_entry->next)) { if (IS_ROBOT(device_entry) || device_entry->type == DT_PSEUDO_SC || device_entry->type == DT_PSEUDO_SS) { count++; /* stks need the ssi running */ if (!stk_found && device_entry->type == DT_STKAPI) { stk_found = TRUE; count++; } } } if (count) { count++; pids = (rob_child_pids_t *) malloc_wait((count * sizeof (rob_child_pids_t)), 2, 0); memset(pids, 0, (count * sizeof (rob_child_pids_t))); device_entry = (dev_ent_t *)SHM_REF_ADDR( ((shm_ptr_tbl_t *)master_shm.shared_memory)->first_dev); pid = pids; /* the pseudo device is first pid */ if (stk_found) /* need to have an ssi running */ for (j = 0, robot = robots; j < NUMBER_OF_ROBOT_TYPES; j++, robot++) if (robot->type == DT_PSEUDO_SSI) { pid->oldstate = DEV_ON; pid->who = robot; pid->eq = 0; pid->device = NULL; pid++; break; } for (; device_entry != NULL; device_entry = (dev_ent_t *) SHM_REF_ADDR(device_entry->next)) if (IS_ROBOT(device_entry) || device_entry->type == DT_PSEUDO_SC || device_entry->type == DT_PSEUDO_SS) { pid->who = NULL; /* Is it driven by generic scsi II driver */ if ((device_entry->type & DT_SCSI_R) == DT_SCSI_R) { pid->oldstate = device_entry->state; pid->who = &generic_robot; pid->eq = device_entry->eq; pid->device = device_entry; } else { for (j = 0, robot = robots; j < NUMBER_OF_ROBOT_TYPES; j++, robot++) if (robot->type == device_entry->type) { pid->oldstate = device_entry->state; pid->who = robot; pid->eq = device_entry->eq; pid->device = device_entry; break; } } if (pid->who == NULL && DBG_LVL(SAM_DBG_DEBUG)) /* * To get this far with an unknown * robot type * means things failed during shared * memory creation. */ if (device_entry->type != DT_HISTORIAN) { sam_syslog(LOG_DEBUG, "%s:(%d): unknown" " robot type(%d).", ent_pnt, device_entry->eq, device_entry->type); } pid++; } } else { if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "No robotic devices found."); return (0); } return (count - 1); }
/* * monitor_msg - thread routine to monitor messages. */ void * monitor_msg( void *vlibrary) { int exit_status = 0; sigset_t signal_set; library_t *library = (library_t *)vlibrary; robo_event_t *current_event; struct sigaction sig_action; message_request_t *message, shutdown; enum sam_mess_type mtype; /* dummy up a shutdown message */ (void) memset(&shutdown, 0, sizeof (message_request_t)); (void) memset(&sig_action, 0, sizeof (struct sigaction)); shutdown.mtype = MESS_MT_SHUTDOWN; /* LINTED constant truncated by assignment */ shutdown.message.magic = MESSAGE_MAGIC; shutdown.message.command = MESS_CMD_SHUTDOWN; /* * Should have been called with all signals blocked, * now let sigemt be delivered and just exit when it is */ sig_action.sa_handler = sig_catch; sig_action.sa_flags = 0; (void) sigemptyset(&signal_set); (void) sigaddset(&signal_set, SIGEMT); (void) sigaction(SIGEMT, &sig_action, (struct sigaction *)NULL); (void) thr_sigsetmask(SIG_UNBLOCK, &signal_set, NULL); mutex_lock(&library->mutex); /* wait for initialize */ mutex_unlock(&library->mutex); message = (message_request_t *)SHM_REF_ADDR(library->un->dt.rb.message); if (thr_create(NULL, MD_THR_STK, stk_acs_response, (void *)library, (THR_BOUND | THR_NEW_LWP | THR_DETACHED), NULL)) { sam_syslog(LOG_CRIT, "Unable to start stk_acs_response thread: %m."); thr_exit(NULL); } /* Main loop */ for (;;) { current_event = get_free_event(library); /* * Zeroing the struct has the effect of initializing * the mutex and the condition to USYNC_THREAD, just * what we want */ (void) memset(current_event, 0, sizeof (robo_event_t)); current_event->status.bits = REST_FREEMEM; /* Wait for a message */ mutex_lock(&message->mutex); while (message->mtype == MESS_MT_VOID) cond_wait(&message->cond_r, &message->mutex); /* Copy the request into the event */ current_event->request.message = message->message; mtype = message->mtype; /* capture message type */ message->mtype = MESS_MT_VOID; /* release the message area */ message->message.exit_id.pid = 0; cond_signal(&message->cond_i); /* and wake up anyone waiting */ mutex_unlock(&message->mutex); if (mtype == MESS_MT_APIHELP) { current_event->next = NULL; mutex_lock(&stk_acs_mutex); /* * If the list is NULL, this will be the only * entry on the list. Set the head and last to current */ if (stk_acs_event_head == NULL) { stk_acs_event_head = stk_acs_event_last = current_event; cond_signal(&stk_acs_cond); } else { /* * If the head is not null, last points to the * last entry on the list. Point last * next to the current then set last = current */ stk_acs_event_last->next = current_event; stk_acs_event_last = current_event; } mutex_unlock(&stk_acs_mutex); } else { current_event->type = EVENT_TYPE_MESS; /* * Put the event on the list and * wake up the event handler */ add_to_end(library, current_event); if (message->mtype == MESS_MT_SHUTDOWN) { if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "shutdown request:%s:%d.", __FILE__, __LINE__); threads[STK_MSG_THREAD] = (thread_t)-1; thr_exit(&exit_status); /* NOTREACHED */ return (NULL); } } } }
/* * Main thread. Sits on the message queue and waits for something to do. */ void * transport_thread( void *vxport) { int exit_status = 0, err; robo_event_t *event; xport_state_t *transport = (xport_state_t *)vxport; int is_api = IS_GENERIC_API(transport->library->un->type); dev_ent_t *un = transport->library->un; mutex_lock(&transport->mutex); /* wait for go */ mutex_unlock(&transport->mutex); for (;;) { mutex_lock(&transport->list_mutex); if (transport->active_count == 0) cond_wait(&transport->list_condit, &transport->list_mutex); if (transport->active_count == 0) { /* check to make sure */ mutex_unlock(&transport->list_mutex); continue; } event = transport->first; transport->first = unlink_list(event); transport->active_count--; mutex_unlock(&transport->list_mutex); ETRACE((LOG_NOTICE, "EvTr %#x(%#x) -", event, (event->type == EVENT_TYPE_MESS) ? event->request.message.command : event->request.internal.command)); err = 0; switch (event->type) { case EVENT_TYPE_INTERNAL: switch (event->request.internal.command) { case ROBOT_INTRL_MOVE_MEDIA: if (is_api == TRUE) { err = EINVAL; break; } else { if (un->state <= DEV_IDLE) { err = move(transport->library, event); } else { err = EINVAL; } } break; case ROBOT_INTRL_EXCH_MEDIA: if (is_api == TRUE) { err = EINVAL; break; } else { if (un->state <= DEV_IDLE) { err = exchange( transport->library, event); } else { err = EINVAL; } } break; case ROBOT_INTRL_INIT: init_transport(transport); if (is_api == TRUE) { disp_of_event(transport->library, event, 0); } break; case ROBOT_INTRL_SHUTDOWN: transport->thread = (thread_t)- 1; thr_exit(&exit_status); break; case ROBOT_INTRL_LOAD_MEDIA: if (is_api == FALSE) { err = EINVAL; break; } event->next = (robo_event_t *) transport->library; err = thr_create(NULL, MD_THR_STK, api_load_command, (void *)event, THR_DETACHED, NULL); if (err) DevLog(DL_ERR(6038), event->request.internal.command, err); break; case ROBOT_INTRL_FORCE_MEDIA: if (is_api == FALSE) { err = EINVAL; break; } event->next = (robo_event_t *) transport->library; err = thr_create(NULL, MD_THR_STK, api_force_command, (void *)event, THR_DETACHED, NULL); if (err) DevLog(DL_ERR(6038), event->request.internal.command, err); break; case ROBOT_INTRL_DISMOUNT_MEDIA: if (is_api == FALSE) { err = EINVAL; break; } event->next = (robo_event_t *) transport->library; err = thr_create(NULL, MD_THR_STK, api_dismount_command, (void *)event, THR_DETACHED, NULL); if (err) DevLog(DL_ERR(6038), event->request.internal.command, err); break; case ROBOT_INTRL_VIEW_DATABASE: if (is_api == FALSE) { err = EINVAL; break; } event->next = (robo_event_t *) transport->library; err = thr_create(NULL, MD_THR_STK, api_view_command, (void *)event, THR_DETACHED, NULL); if (err) DevLog(DL_ERR(6038), event->request.internal.command, err); break; case ROBOT_INTRL_DRIVE_ACCESS: if (is_api == FALSE) { err = EINVAL; break; } event->next = (robo_event_t *) transport->library; err = thr_create(NULL, MD_THR_STK, api_drive_access_command, (void *)event, THR_DETACHED, NULL); if (err) DevLog(DL_ERR(6038), event->request.internal.command, err); break; case ROBOT_INTRL_QUERY_DRIVE: if (is_api == FALSE) { err = EINVAL; break; } event->next = (robo_event_t *) transport->library; err = thr_create(NULL, MD_THR_STK, api_query_drive_command, (void *)event, THR_DETACHED, NULL); if (err) DevLog(DL_ERR(6038), event->request.internal.command, err); break; case ROBOT_INTRL_GET_SIDE_INFO: if (is_api == FALSE) { err = EINVAL; break; } event->next = (robo_event_t *) transport->library; err = thr_create(NULL, MD_THR_STK, api_getsideinfo_command, (void *)event, THR_DETACHED, NULL); if (err) DevLog(DL_ERR(6038), event->request.internal.command, err); break; default: err = EINVAL; break; } break; case EVENT_TYPE_MESS: if (event->request.message.magic != MESSAGE_MAGIC) { if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "xpt_thr:bad magic: %s:%d.", __FILE__, __LINE__); break; } switch (event->request.message.command) { default: if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "xpt_thr:msq_bad: %s:%d.", __FILE__, __LINE__); err = EINVAL; break; } default: if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "xpt_thr:event_bad: %s:%d.", __FILE__, __LINE__); err = EINVAL; break; } if (is_api == FALSE) { disp_of_event(transport->library, event, err); } else if (err) { /* call disp_of_event only if an error on grau */ if (err < 0) err = errno; disp_of_event(transport->library, event, err); } } }
int main(int argc, char **argv) { int what_signal, i; char *ent_pnt = "main"; char logname[20]; char *l_mess, *lc_mess; sigset_t sigwait_set; struct sigaction sig_action; dev_ptr_tbl_t *dev_ptr_tbl; shm_ptr_tbl_t *shm_ptr_tbl; sam_defaults_t *defaults; if (argc != 4) exit(1); initialize_fatal_trap_processing(SOLARIS_THREADS, fatal_cleanup); CustmsgInit(1, NULL); library = (library_t *)malloc_wait(sizeof (library_t), 2, 0); (void) memset(library, 0, sizeof (library_t)); /* * Crack the arguments */ argv++; master_shm.shmid = atoi(*argv); argv++; preview_shm.shmid = atoi(*argv); argv++; library->eq = atoi(*argv); mypid = getpid(); if ((master_shm.shared_memory = shmat(master_shm.shmid, NULL, 0774)) == (void *)-1) exit(2); shm_ptr_tbl = (shm_ptr_tbl_t *)master_shm.shared_memory; if ((preview_shm.shared_memory = shmat(preview_shm.shmid, NULL, 0774)) == (void *)-1) exit(3); fifo_path = strdup(SHM_REF_ADDR(shm_ptr_tbl->fifo_path)); sprintf(logname, "sony-%d", library->eq); defaults = GetDefaults(); openlog(logname, LOG_PID | LOG_NOWAIT, defaults->log_facility); dev_ptr_tbl = (dev_ptr_tbl_t *)SHM_REF_ADDR( ((shm_ptr_tbl_t *)master_shm.shared_memory)->dev_table); /* LINTED pointer cast may result in improper alignment */ library->un = (dev_ent_t *) SHM_REF_ADDR(dev_ptr_tbl->d_ent[library->eq]); /* LINTED pointer cast may result in improper alignment */ library->help_msg = (sony_priv_mess_t *) SHM_REF_ADDR(library->un->dt.rb.private); l_mess = library->un->dis_mes[DIS_MES_NORM]; lc_mess = library->un->dis_mes[DIS_MES_CRIT]; /* check if we should log sef data */ (void) sef_status(); if (DBG_LVL(SAM_DBG_RBDELAY)) { int ldk = 60; sam_syslog(LOG_DEBUG, "Waiting for 60 seconds."); while (ldk > 0 && DBG_LVL(SAM_DBG_RBDELAY)) { sprintf(lc_mess, "waiting for %d seconds pid %d", ldk, mypid); sleep(10); ldk -= 10; } *lc_mess = '\0'; } mutex_init(&library->mutex, USYNC_THREAD, NULL); /* * Hold the lock until initialization is complete */ mutex_lock(&library->mutex); common_init(library->un); mutex_init(&library->help_msg->mutex, USYNC_PROCESS, NULL); cond_init(&library->help_msg->cond_i, USYNC_PROCESS, NULL); cond_init(&library->help_msg->cond_r, USYNC_PROCESS, NULL); library->help_msg->mtype = SONY_PRIV_VOID; /* * Start the main threads */ if (thr_create(NULL, DF_THR_STK, monitor_msg, (void *)library, (THR_BOUND | THR_NEW_LWP | THR_DETACHED), &threads[SONY_MSG_THREAD])) { sam_syslog(LOG_ERR, "Unable to start thread monitor_msg: %m.\n"); thr_exit(NULL); } if (thr_create(NULL, MD_THR_STK, manage_list, (void *)library, (THR_BOUND |THR_NEW_LWP | THR_DETACHED), &threads[SONY_WORK_THREAD])) { sam_syslog(LOG_ERR, "Unable to start thread manage_list: %m.\n"); thr_kill(threads[SONY_MSG_THREAD], SIGINT); thr_exit(NULL); } mutex_lock(&library->un->mutex); library->un->dt.rb.process = getpid(); library->un->status.b.ready = FALSE; library->un->status.b.present = FALSE; mutex_unlock(&library->un->mutex); /* * Initialize the library. This will release the library mutex. */ memccpy(l_mess, catgets(catfd, SET, 9065, "initializing"), '\0', DIS_MES_LEN); if (initialize(library, dev_ptr_tbl)) thr_exit(NULL); /* * Now let the other threads run */ thr_yield(); mutex_lock(&library->mutex); i = 30; { char *MES_9155 = catgets(catfd, SET, 9155, "waiting for %d drives to initialize"); char *mes = (char *)malloc_wait(strlen(MES_9155) + 15, 5, 0); while (library->countdown && i-- > 0) { sprintf(mes, MES_9155, library->countdown); memccpy(l_mess, mes, '\0', DIS_MES_LEN); sam_syslog(LOG_INFO, catgets(catfd, SET, 9156, "%s: Waiting for %d drives to initialize."), ent_pnt, library->countdown); mutex_unlock(&library->mutex); sleep(10); mutex_lock(&library->mutex); } free(mes); } if (i <= 0) sam_syslog(LOG_INFO, catgets(catfd, SET, 9157, "%s: %d drive(s) did not initialize."), ent_pnt, library->countdown); mutex_unlock(&library->mutex); memccpy(l_mess, "running", '\0', DIS_MES_LEN); mutex_lock(&library->un->mutex); library->un->status.b.audit = FALSE; library->un->status.b.requested = FALSE; library->un->status.b.mounted = TRUE; library->un->status.b.ready = TRUE; mutex_unlock(&library->un->mutex); /* * Now that the daemon is fully initialized, the main thread is * just used to monitor the thread state and an indication of shutdown. * This is accomplished using the signals SIGALRM, SIGINT, and SIGTERM. * This is not done with a signal handler, but using the sigwait() call. */ sigemptyset(&sigwait_set); sigaddset(&sigwait_set, SIGINT); sigaddset(&sigwait_set, SIGTERM); sigaddset(&sigwait_set, SIGALRM); /* want to restart system calls */ sig_action.sa_handler = SIG_DFL; sigemptyset(&sig_action.sa_mask); sig_action.sa_flags = SA_RESTART; sigaction(SIGINT, &sig_action, NULL); sigaction(SIGTERM, &sig_action, NULL); sigaction(SIGALRM, &sig_action, NULL); for (;;) { alarm(20); what_signal = sigwait(&sigwait_set); switch (what_signal) { case SIGALRM: if ((threads[SONY_MSG_THREAD] == (thread_t)-1) || (threads[SONY_WORK_THREAD] == (thread_t)-1)) { /* * If any of the processing threads * have disappeared, log the * fact, and take a core dump */ sam_syslog(LOG_INFO, "%s: SIGALRM: Thread(s) gone.", ent_pnt); abort(); } break; /* * For a normal shutdown of the robot daemon: * 1) prevent the alarm from going off during * shutdown and causing a core dump * 2) log the reason we are shutting down * 3) kill the helper pid if there is one * 4) terminate the connection to the catalog * 5) terminate all of the processing threads */ case SIGINT: case SIGTERM: sigdelset(&sigwait_set, SIGALRM); sam_syslog(LOG_INFO, "%s: Shutdown by signal %d", ent_pnt, what_signal); if (library->helper_pid > 0) { kill(library->helper_pid, 9); } kill_off_threads(library); exit(0); break; default: break; } } }
int main( int argc, char **argv) { int what_signal; char *ent_pnt = "main"; char logname[20]; char *lc_mess; sigset_t sigwait_set; struct sigaction sig_action; dev_ptr_tbl_t *dev_ptr_tbl; shm_ptr_tbl_t *shm_ptr_tbl; initialize_fatal_trap_processing(SOLARIS_THREADS, fatal_cleanup); program_name = "generic"; library = (library_t *)malloc_wait(sizeof (library_t), 2, 0); (void) memset(library, 0, sizeof (library_t)); (void) memset(&lock_time_mutex, 0, sizeof (mutex_t)); if (argc != 4) exit(1); /* Crack arguments (such as they are) */ argv++; master_shm.shmid = atoi(*argv); argv++; preview_shm.shmid = atoi(*argv); argv++; library->eq = atoi(*argv); sprintf(logname, "genu-%d", library->eq); program_name = logname; open("/dev/null", O_RDONLY); /* stdin */ open("/dev/null", O_RDONLY); /* stdout */ open("/dev/null", O_RDONLY); /* stderr */ CustmsgInit(1, NULL); mypid = getpid(); if ((master_shm.shared_memory = shmat(master_shm.shmid, NULL, 0774)) == (void *)-1) exit(2); shm_ptr_tbl = (shm_ptr_tbl_t *)master_shm.shared_memory; if ((preview_shm.shared_memory = shmat(preview_shm.shmid, NULL, 0774)) == (void *)-1) exit(3); fifo_path = strdup(SHM_REF_ADDR(shm_ptr_tbl->fifo_path)); dev_ptr_tbl = (dev_ptr_tbl_t *)SHM_REF_ADDR( ((shm_ptr_tbl_t *)master_shm.shared_memory)->dev_table); /* LINTED pointer cast may result in improper alignment */ library->un = (dev_ent_t *)SHM_REF_ADDR( dev_ptr_tbl->d_ent[library->eq]); library->ele_dest_len = ELEMENT_DESCRIPTOR_LENGTH; lc_mess = library->un->dis_mes[DIS_MES_CRIT]; if (IS_GENERIC_API(library->un->type)) { if (sizeof (api_resp_api_t) > sizeof (sam_message_t)) { sprintf(lc_mess, "FATAL: API response(%d) larger than message(%d)" " area.", sizeof (api_resp_api_t), sizeof (sam_message_t)); sam_syslog(LOG_CRIT, "FATAL: API response(%d) larger than message(%d)" " area.", sizeof (api_resp_api_t), sizeof (sam_message_t)); exit(4); } } /* check if we should log sef data */ (void) sef_status(); if (DBG_LVL(SAM_DBG_RBDELAY)) { int ldk = 60; sam_syslog(LOG_DEBUG, "Waiting for 60 seconds."); while (ldk > 0 && DBG_LVL(SAM_DBG_RBDELAY)) { sprintf(lc_mess, "waiting for %d seconds pid %d", ldk, mypid); sleep(10); ldk -= 10; } *lc_mess = '\0'; } mutex_init(&library->mutex, USYNC_THREAD, 0); /* grab the lock and hold it until initialization is complete */ mutex_lock(&library->mutex); common_init(library->un); /* allocate the free list */ library->free = init_list(ROBO_EVENT_CHUNK); library->free_count = ROBO_EVENT_CHUNK; mutex_init(&library->free_mutex, USYNC_THREAD, NULL); mutex_init(&library->list_mutex, USYNC_THREAD, NULL); cond_init(&library->list_condit, USYNC_THREAD, NULL); if (IS_GENERIC_API(library->un->type)) { library->help_msg = (api_priv_mess_t *) SHM_REF_ADDR(library->un->dt.rb.private); mutex_init(&library->help_msg->mutex, USYNC_PROCESS, NULL); cond_init(&library->help_msg->cond_i, USYNC_PROCESS, NULL); cond_init(&library->help_msg->cond_r, USYNC_PROCESS, NULL); library->help_msg->mtype = API_PRIV_VOID; }
int irEngine(Capability *cap, Fragment *F) { static Inst disp[] = { #define IRIMPL(name, f, o1, o2) &&op_##name, IRDEF(IRIMPL) #undef IRIMPL &&stop }; IRRef ref; Thread *T = cap->T; Word nphis = F->nphis; Word *base = T->base - 1; Word szins = F->nins - F->nk; Word vals_[szins + nphis]; Word *phibuf = &vals_[szins]; /* For parallel copy of PHI nodes */ Word *vals = vals_ - (int)F->nk; IRIns *pc = F->ir + REF_FIRST; IRRef pcref = REF_FIRST; IRIns *pcmax = F->ir + F->nins; IRIns *pcloop = F->nloop ? F->ir + F->nloop + 1 : pc; //int count = 100; DBG_PR("*** Executing trace.\n" "*** base = %p\n" "*** pc = %p\n" "*** pcmax = %p (%d)\n" "*** loop = %p (%d)\n", base, pc, pcmax, (int)(pcmax - pc), pcloop, (int)(pcloop - pc)); for (ref = F->nk; ref < REF_BIAS; ref++) { switch (IR(ref)->o) { case IR_KINT: vals[ref] = (Word)IR(ref)->i; break; case IR_KBASEO: vals[ref] = (Word)(T->base + IR(ref)->i); break; case IR_KWORD: vals[ref] = (Word)(F->kwords[IR(ref)->u]); break; default: LC_ASSERT(0); break; } DBG_LVL(2, "%d, %" FMT_WordX "\n", ref - REF_BIAS, vals[ref]); } vals[REF_BASE] = (Word)base; goto *disp[pc->o]; # define DISPATCH_NEXT \ if (irt_type(pc->t) != IRT_VOID && pc->o != IR_PHI) { \ if (irt_type(pc->t) == IRT_I32) \ DBG_LVL(2, " ===> %" FMT_Int "\n", vals[pcref]); \ else \ DBG_LVL(2, " ===> 0x%" FMT_WordX "\n", vals[pcref]); } \ ++pc; ++pcref; \ if (LC_UNLIKELY(pc >= pcmax)) { pc = pcloop; pcref = F->nloop + 1; } \ if (pc->o != IR_NOP) { \ DBG_LVL(2, "[%d] ", pcref - REF_BIAS); \ IF_DBG_LVL(2, printIR(F, *pc)); } \ goto *disp[pc->o] op_NOP: op_FRAME: op_RET: op_LOOP: DISPATCH_NEXT; op_PHI: { /* PHI nodes represent parallel assignments, so as soon as we discover the first PHI node, we perform all assignments in parallel. */ LC_ASSERT(nphis > 0); u2 i; DBG_LVL(3, " ( "); for (i = 0; i < nphis; i++) { DBG_LVL(3, "%d ", irref_int(pc[i].op2)); phibuf[i] = vals[pc[i].op2]; } DBG_LVL(3, ") --> ( "); for (i = 0; i < nphis; i++) { DBG_LVL(3, "%d ", irref_int(pc[i].op1)); vals[pc[i].op1] = phibuf[i]; } DBG_LVL(3, ") [%d phis]\n", (int)nphis); pc += nphis - 1; //vals[pc->op1] = vals[pc->op2]; DISPATCH_NEXT; } op_LT: recordEvent(EV_CMP, 0); if (!((WordInt)vals[pc->op1] < (WordInt)vals[pc->op2])) goto guard_failed; DISPATCH_NEXT; op_GE: recordEvent(EV_CMP, 0); if (!((WordInt)vals[pc->op1] >= (WordInt)vals[pc->op2])) goto guard_failed; DISPATCH_NEXT; op_LE: recordEvent(EV_CMP, 0); if (!((WordInt)vals[pc->op1] <= (WordInt)vals[pc->op2])) goto guard_failed; DISPATCH_NEXT; op_GT: recordEvent(EV_CMP, 0); if (!((WordInt)vals[pc->op1] > (WordInt)vals[pc->op2])) goto guard_failed; DISPATCH_NEXT; op_EQ: recordEvent(EV_CMP, 0); if (!((WordInt)vals[pc->op1] == (WordInt)vals[pc->op2])) { goto guard_failed; } DISPATCH_NEXT; op_NE: recordEvent(EV_CMP, 0); if (!((WordInt)vals[pc->op1] != (WordInt)vals[pc->op2])) goto guard_failed; DISPATCH_NEXT; op_ADD: recordEvent(EV_ALU, 0); vals[pcref] = vals[pc->op1] + vals[pc->op2]; DISPATCH_NEXT; op_SUB: recordEvent(EV_ALU, 0); vals[pcref] = vals[pc->op1] - vals[pc->op2]; DISPATCH_NEXT; op_MUL: recordEvent(EV_MUL, 0); vals[pcref] = (WordInt)vals[pc->op1] * (WordInt)vals[pc->op2]; DISPATCH_NEXT; op_DIV: recordEvent(EV_REMDIV, 0); if (LC_LIKELY(vals[pc->op2] != 0)) vals[pcref] = (WordInt)vals[pc->op1] / (WordInt)vals[pc->op2]; else LC_ASSERT(0); DISPATCH_NEXT; op_REM: recordEvent(EV_REMDIV, 0); if (LC_LIKELY(vals[pc->op2] != 0)) vals[pcref] = (WordInt)vals[pc->op1] % (WordInt)vals[pc->op2]; else LC_ASSERT(0); DISPATCH_NEXT; op_FREF: vals[pcref] = (Word)(((Closure*)vals[pc->op1])->payload + (pc->op2 - 1)); DISPATCH_NEXT; op_FLOAD: recordEvent(EV_LOAD, 0); vals[pcref] = *((Word*)vals[pc->op1]); DISPATCH_NEXT; op_SLOAD: recordEvent(EV_LOAD, 0); vals[pcref] = base[pc->op1]; DISPATCH_NEXT; op_ILOAD: recordEvent(EV_LOAD, 0); vals[pcref] = (Word)getInfo(vals[pc->op1]); DISPATCH_NEXT; op_NEW: if (!ir_issunken(pc)) { // do actual allocation on trace HeapInfo *hp = &F->heap[pc->op2]; int j; recordEvent(EV_ALLOC, hp->nfields + 1); Closure *cl = allocClosure(wordsof(ClosureHeader) + hp->nfields); setInfo(cl, (InfoTable*)vals[pc->op1]); for (j = 0; j < hp->nfields; j++) { cl->payload[j] = vals[getHeapInfoField(F, hp, j)]; } vals[pcref] = (Word)cl; } else { vals[pcref] = 0; // to trigger an error if accessed } DISPATCH_NEXT; op_UPDATE: { recordEvent(EV_UPDATE, 0); Closure *oldnode = (Closure *)vals[pc->op1]; Closure *newnode = (Closure *)base[pc->op2]; setInfo(oldnode, (InfoTable*)&stg_IND_info); oldnode->payload[0] = (Word)newnode; DISPATCH_NEXT; } op_RLOAD: op_FSTORE: op_RENAME: op_BNOT: op_BAND: op_BOR: op_BXOR: op_BSHL: op_BSHR: op_BSAR: op_BROL: op_BROR: // These should never be executed. op_BASE: op_KINT: op_KWORD: op_KBASEO: LC_ASSERT(0); guard_failed: DBG_PR("Exiting at %d\n", pcref - REF_BIAS); { int i; SnapShot *snap = 0; SnapEntry *se; for (i = 0; i < F->nsnap; i++) { if (F->snap[i].ref == pcref) { snap = &F->snap[i]; break; } } LC_ASSERT(snap != 0); snap->count++; se = F->snapmap + snap->mapofs; DBG_PR("Snapshot: %d, Snap entries: %d, slots = %d\n", i, snap->nent, snap->nslots); recordEvent(EV_EXIT, snap->nent); for (i = 0; i < snap->nent; i++, se++) { BCReg s = snap_slot(*se); IRRef r = snap_ref(*se); DBG_PR("base[%d] = ", s - 1); base[s] = restoreValue(F, vals, r); IF_DBG_LVL(1, printSlot(stderr, base + s); fprintf(stderr, "\n")); //DBG_PR("0x%" FMT_WordX "\n", base[s]); } DBG_PR("Base slot: %d\n", se[1]); // se[1] = T->pc = (BCIns *)F->startpc + (int)se[0]; T->base = base + se[1]; T->top = base + snap->nslots; //printFrame(T->base, T->top); return 0; } stop: return 1; }
/* * load_media - load media into a drive * */ req_comp_t load_media( library_t *library, drive_state_t *drive, struct CatalogEntry *ce, ushort_t category) { req_comp_t err; xport_state_t *transport; robo_event_t *load, *tmp; ibm_req_info_t *ibm_info; ibm_info = malloc_wait(sizeof (ibm_req_info_t), 2, 0); memset(ibm_info, 0, sizeof (ibm_req_info_t)); ibm_info->drive_id = drive->drive_id; ibm_info->src_cat = category; if (ce != NULL) memcpy((void *)&drive->bar_code, ce->CeBarCode, 8); else { memset((void *)&drive->bar_code, 0, 8); memset((void *)&drive->bar_code, ' ', 6); } sprintf((void *)&ibm_info->volser, "%-8.8s", drive->bar_code); /* Build transport thread request */ load = malloc_wait(sizeof (robo_event_t), 5, 0); (void) memset(load, 0, sizeof (robo_event_t)); load->request.internal.command = ROBOT_INTRL_LOAD_MEDIA; load->request.internal.address = (void *)ibm_info; if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "load_media(%d): from %s to %s.", LIBEQ, drive->bar_code, drive->un->name); load->type = EVENT_TYPE_INTERNAL; load->status.bits = REST_SIGNAL; load->completion = REQUEST_NOT_COMPLETE; transport = library->transports; mutex_lock(&load->mutex); mutex_lock(&transport->list_mutex); if (transport->active_count == 0) transport->first = load; else { LISTEND(transport, tmp); append_list(tmp, load); } transport->active_count++; cond_signal(&transport->list_condit); mutex_unlock(&transport->list_mutex); /* Wait for the transport to do the unload */ while (load->completion == REQUEST_NOT_COMPLETE) cond_wait(&load->condit, &load->mutex); mutex_unlock(&load->mutex); err = (req_comp_t)load->completion; if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "Return from transport load (%#x).", load->completion); free(ibm_info); mutex_destroy(&load->mutex); free(load); return (err); }
/* * view_media - view a database entry */ req_comp_t view_media( library_t *library, char *vsn, void **ret_data) { req_comp_t err; ibm_req_info_t *ibm_info; xport_state_t *transport; robo_event_t *view, *tmp; ibm_info = malloc_wait(sizeof (ibm_req_info_t), 2, 0); memset(ibm_info, 0, sizeof (ibm_req_info_t)); ibm_info->sub_cmd = MT_QEVD; /* view a single data base entry */ sprintf((void *)&ibm_info->volser, "%-8.8s", vsn); /* Build transport thread request */ view = malloc_wait(sizeof (robo_event_t), 5, 0); (void) memset(view, 0, sizeof (robo_event_t)); view->request.internal.command = ROBOT_INTRL_VIEW_DATABASE; view->request.internal.address = (void *)ibm_info; if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "view_media: %s.", vsn); view->type = EVENT_TYPE_INTERNAL; view->status.bits = REST_SIGNAL; view->completion = REQUEST_NOT_COMPLETE; transport = library->transports; mutex_lock(&view->mutex); mutex_lock(&transport->list_mutex); if (transport->active_count == 0) transport->first = view; else { LISTEND(transport, tmp); append_list(tmp, view); } transport->active_count++; cond_signal(&transport->list_condit); mutex_unlock(&transport->list_mutex); /* Wait for the transport to do the request */ while (view->completion == REQUEST_NOT_COMPLETE) cond_wait(&view->condit, &view->mutex); mutex_unlock(&view->mutex); err = (req_comp_t)view->completion; if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "Return from view (%#x).", view->completion); if (!err) { *ret_data = malloc_wait(sizeof (IBM_query_info_t), 2, 0); memcpy(*ret_data, ibm_info->ret_data, sizeof (IBM_query_info_t)); } else *ret_data = NULL; free(ibm_info); mutex_destroy(&view->mutex); free(view); return (err); }
/* * query_library - send the supplied query to the library. */ req_comp_t query_library( library_t *library, int seqno, /* starting-1 sequence number */ int sub_cmd, /* what query */ void **ret_data, /* return data */ ushort_t category) { req_comp_t err; ibm_req_info_t *ibm_info; xport_state_t *transport; robo_event_t *view, *tmp; ibm_info = malloc_wait(sizeof (ibm_req_info_t), 2, 0); memset(ibm_info, 0, sizeof (ibm_req_info_t)); ibm_info->sub_cmd = sub_cmd; ibm_info->src_cat = category; ibm_info->seqno = seqno; memset(&ibm_info->volser[0], ' ', 8); /* Build transport thread request */ view = malloc_wait(sizeof (robo_event_t), 5, 0); (void) memset(view, 0, sizeof (robo_event_t)); view->request.internal.command = ROBOT_INTRL_QUERY_LIBRARY; view->request.internal.address = (void *)ibm_info; if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "query_library: %#x.", sub_cmd); view->type = EVENT_TYPE_INTERNAL; view->status.bits = REST_SIGNAL; view->completion = REQUEST_NOT_COMPLETE; transport = library->transports; mutex_lock(&view->mutex); mutex_lock(&transport->list_mutex); if (transport->active_count == 0) transport->first = view; else { LISTEND(transport, tmp); append_list(tmp, view); } transport->active_count++; cond_signal(&transport->list_condit); mutex_unlock(&transport->list_mutex); /* Wait for the transport to do the request */ while (view->completion == REQUEST_NOT_COMPLETE) cond_wait(&view->condit, &view->mutex); mutex_unlock(&view->mutex); err = (req_comp_t)view->completion; if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "Return from query_library (%#x).", view->completion); if (!err) { *ret_data = malloc_wait(sizeof (IBM_query_info_t), 2, 0); memcpy(*ret_data, ibm_info->ret_data, sizeof (IBM_query_info_t)); } else *ret_data = NULL; free(ibm_info); mutex_destroy(&view->mutex); free(view); return (err); }
/* * set_category - set the category of specified volume to whatever. * */ void set_category( library_t *library, robo_event_t *event) { int delayed_req = FALSE; IBM_set_category_t set_req; delay_list_ent_t *dly_ent; ibm_req_info_t *ibm_info = (ibm_req_info_t *)event->request.internal.address; memset(&set_req, 0, sizeof (IBM_set_category_t)); set_req.target_cat = ibm_info->targ_cat; set_req.source_cat = ibm_info->src_cat; /* Eject process is done as a delayed request */ if (ibm_info->targ_cat == EJECT_CATEGORY || ibm_info->targ_cat == B_EJECT_CATEGORY) { dly_ent = (delay_list_ent_t *) malloc_wait(sizeof (delay_list_ent_t), 2, 0); memset(dly_ent, 0, sizeof (delay_list_ent_t)); dly_ent->event = event; delayed_req = TRUE; } else set_req.wait_flg = 1; /* indicate no delay */ memcpy(set_req.volser, ibm_info->volser, 8); mutex_lock(&library->dlist_mutex); { char *c = &ibm_info->volser[0]; sprintf(l_mess, "set category %#x on %c%c%c%c%c%c", ibm_info->targ_cat, c[0], c[1], c[2], c[3], c[4], c[5]); if (!delayed_req) mutex_unlock(&library->dlist_mutex); if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "set_category(%d) %#x, %c%c%c%c%c%c.", LIBEQ, ibm_info->targ_cat, c[0], c[1], c[2], c[3], c[4], c[5]); } if (ioctl_ibmatl(library->open_fd, MTIOCLSVC, &set_req) == -1) { ushort_t cc = set_req.mtlsvcret.cc; char dmmy[DIS_MES_LEN * 2]; sprintf(dmmy, "set category failed(%x): %s", cc, (cc > HIGH_CC) ? "Undefined" : cc_codes[cc], cc); memccpy(l_mess, dmmy, '\0', DIS_MES_LEN); sam_syslog(LOG_INFO, "set_category(%d): (MTIOCLSVC): %m", LIBEQ); if (errno != ENOMEM && errno != EFAULT) sam_syslog(LOG_INFO, "set_category(%d): (MTIOCLSVC): %s(%#x).", LIBEQ, (cc > HIGH_CC) ? "Undefined" : cc_codes[cc], cc); if (delayed_req) { free(dly_ent); mutex_unlock(&library->dlist_mutex); } disp_of_event(library, event, MC_REQ_TR); } else if (delayed_req) { /* The delay processing thread will dispose of the event */ dly_ent->req_id = set_req.mtlsvcret.req_id; if ((dly_ent->next = library->delay_list) != NULL) library->delay_list->last = dly_ent; library->delay_list = dly_ent; mutex_unlock(&library->dlist_mutex); if (DBG_LVL(SAM_DBG_DEBUG)) { char *c = &ibm_info->volser[0]; sam_syslog(LOG_DEBUG, "set_category(%d): %c%c%c%c%c%c, %#x, id = %#x.", LIBEQ, c[0], c[1], c[2], c[3], c[4], c[5], ibm_info->targ_cat, set_req.mtlsvcret.req_id); } } else disp_of_event(library, event, MC_REQ_OK); }
/* * set_media_category - change the category of media. */ req_comp_t set_media_category( library_t *library, char *volser, ushort_t src_cat, /* source category */ ushort_t targ_cat) { req_comp_t err; ibm_req_info_t *ibm_info; xport_state_t *transport; robo_event_t *set, *tmp; ibm_info = malloc_wait(sizeof (ibm_req_info_t), 2, 0); memset(ibm_info, 0, sizeof (ibm_req_info_t)); ibm_info->targ_cat = targ_cat; ibm_info->src_cat = src_cat; sprintf((void *)&ibm_info->volser, "%-8.8s", volser); /* Build transport thread request */ set = malloc_wait(sizeof (robo_event_t), 5, 0); (void) memset(set, 0, sizeof (robo_event_t)); set->request.internal.command = ROBOT_INTRL_SET_CATEGORY; set->request.internal.address = (void *)ibm_info; if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "set_media_category: %s %#x->%#x.", volser, src_cat, targ_cat); set->type = EVENT_TYPE_INTERNAL; set->status.bits = REST_SIGNAL; set->completion = REQUEST_NOT_COMPLETE; transport = library->transports; mutex_lock(&set->mutex); mutex_lock(&transport->list_mutex); if (transport->active_count == 0) transport->first = set; else { LISTEND(transport, tmp); append_list(tmp, set); } transport->active_count++; cond_signal(&transport->list_condit); mutex_unlock(&transport->list_mutex); /* Wait for the transport to do the request */ while (set->completion == REQUEST_NOT_COMPLETE) cond_wait(&set->condit, &set->mutex); mutex_unlock(&set->mutex); err = (req_comp_t)set->completion; if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "Return from set (%#x).", set->completion); free(ibm_info); mutex_destroy(&set->mutex); free(set); return (err); }
/* * view - view library stuff. * Use library query to return information about things. * ibm_info specifies what information. */ void view( library_t *library, robo_event_t *event) { IBM_query_t query_req; ibm_req_info_t *ibm_info = (ibm_req_info_t *)event->request.internal.address; memset(&query_req, 0, sizeof (IBM_query_t)); memcpy(query_req.volser, ibm_info->volser, 8); query_req.device = ibm_info->drive_id; query_req.sub_cmd = ibm_info->sub_cmd; query_req.cat_seqno = ibm_info->seqno; query_req.source_cat = ibm_info->src_cat; { char *c = &ibm_info->volser[0]; mutex_lock(&library->dlist_mutex); switch (ibm_info->sub_cmd) { case MT_QCID: sprintf(l_mess, "Issue query category %#x", ibm_info->src_cat); break; case MT_QLD: memccpy(l_mess, "Issue query library", '\0', DIS_MES_LEN); break; case MT_QEVD: sprintf(l_mess, "Issue query volume %c%c%c%c%c%c", c[0], c[1], c[2], c[3], c[4], c[5]); break; case MT_QDD: sprintf(l_mess, "Issue query device %#8.8x", ibm_info->drive_id); break; default: sam_syslog(LOG_DEBUG, "Issue query sub_cmd = %#x ", ibm_info->sub_cmd); break; } mutex_unlock(&library->dlist_mutex); if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "view(%d) %c%c%c%c%c%c, sub_cmd = %#x ", LIBEQ, c[0], c[1], c[2], c[3], c[4], c[5], ibm_info->sub_cmd); } if (ioctl_ibmatl(library->open_fd, MTIOCLQ, &query_req) == -1) { ushort_t cc = query_req.mtlqret.cc; char dmmy[DIS_MES_LEN * 2]; sprintf(dmmy, "query cmd %#x failed(%x): %s", ibm_info->sub_cmd, cc, (cc > HIGH_CC) ? "Undefined" : cc_codes[cc], cc); memccpy(l_mess, dmmy, '\0', DIS_MES_LEN); sam_syslog(LOG_INFO, "view(%d): (MTIOCLQ): %m", LIBEQ); if (errno != ENOMEM && errno != EFAULT) sam_syslog(LOG_INFO, "view(%d): (MTIOCLQ): %s(%#x)", LIBEQ, (cc > HIGH_CC) ? "Undefined" : cc_codes[cc], cc); disp_of_event(library, event, MC_REQ_FL); } else { ushort_t cc = query_req.mtlqret.cc; if (cc) sam_syslog(LOG_INFO, "view(%d): ??: (MTIOCLQ): %s(%#x)", LIBEQ, (cc > HIGH_CC) ? "Undefined" : cc_codes[cc], cc); if (query_req.mtlqret.info.info_type != 0) { ibm_info->ret_data = malloc_wait(sizeof (IBM_query_info_t), 2, 0); memcpy(ibm_info->ret_data, &query_req.mtlqret.info, sizeof (IBM_query_info_t)); } else sam_syslog(LOG_INFO, "view(%d): (MTIOCLQ): No information.", LIBEQ); disp_of_event(library, event, MC_REQ_OK); } if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "view(%d): returning.", LIBEQ); }
/* * get_media - get the media mounted. * * entry - * drive->mutex should be held. This mutex * can be released during processing, but will be held * on return. * * returns - * 0 - ok * 1 - some sort of error, dispose of event * Note: The catalog mutex is held on this condition. * 2 - event was requeued. * -1 - same as 1 cept that the drive should be downed * * in all cases, dev_ent activity count will be incremented. */ int get_media( library_t *library, drive_state_t *drive, robo_event_t *event, /* the event (can be NULL) */ struct CatalogEntry *ce) /* catalog entry to be loaded */ { char *d_mess = drive->un->dis_mes[DIS_MES_NORM]; dev_ent_t *un = drive->un; int status = 0; mutex_lock(&un->mutex); INC_ACTIVE(un); mutex_unlock(&un->mutex); /* is the media is already mounted */ if (drive->status.b.full && (un->slot == ce->CeSlot)) { if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "get_media:(%d)%s:%d.", un->eq, __FILE__, __LINE__); } else { /* get the media loaded */ /* * Make sure the source storage element has media in it. * If the element is empty, external requests * will be put back on the library's work list with the * hope that it will be picked up later. Internal * requests will return an error to the caller. If the in_use * flag is not set, then the slot is * "really" empty and the request will be disposed of. */ if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "get_media:(%d)%s:%d.", un->eq, __FILE__, __LINE__); if (!(ce->CeStatus & CES_occupied)) { /* Should this be put back on the library's list? */ if ((ce->CeStatus & CES_inuse) && event != NULL && event->type != EVENT_TYPE_INTERNAL && !event->status.b.dont_reque) { event->next = NULL; add_to_end(library, event); /* Do not dispose of event */ return (RET_GET_MEDIA_REQUEUED); } if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "get_media:(%d)%s:%d.", un->eq, __FILE__, __LINE__); return (RET_GET_MEDIA_DISPOSE); } else { status &= ~CES_occupied; (void) CatalogSetFieldByLoc(library->un->eq, ce->CeSlot, 0, CEF_Status, status, CES_occupied); } if (drive->status.b.full) { mutex_lock(&un->mutex); un->status.bits |= DVST_UNLOAD; mutex_unlock(&un->mutex); /* * Save off what information we know about this volume * before we spin down the drive and clear out the un. */ memmove(un->i.ViMtype, sam_mediatoa(un->type), sizeof (un->i.ViMtype)); memmove(un->i.ViVsn, un->vsn, sizeof (un->i.ViVsn)); un->i.ViEq = un->fseq; un->i.ViSlot = un->slot; un->i.ViPart = 0; if (un->status.b.ready) { (void) spin_drive(drive, SPINDOWN, NOEJECT); sprintf(d_mess, "unloading %s", un->vsn); } mutex_lock(&un->mutex); close_unit(un, &drive->open_fd); un->status.bits = (DVST_REQUESTED | DVST_PRESENT) | (un->status.bits & DVST_CLEANING); clear_un_fields(un); mutex_unlock(&un->mutex); #if defined(USEDISMOUNT) sprintf(d_mess, "dismounting %s", un->vsn); if (dismount_media(library, drive) != MC_REQ_OK) { if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "get_media:(%d)%s:%d.", un->eq, __FILE__, __LINE__); return (RET_GET_MEDIA_DOWN_DRIVE); } #endif /* defined(USEDISMOUNT) */ /* clean up the old entries */ if (*un->i.ViMtype != '\0') un->i.ViFlags |= VI_mtype; if (*un->i.ViVsn != '\0') un->i.ViFlags |= VI_vsn; CatalogVolumeUnloaded(&un->i, ""); mutex_lock(&un->mutex); un->slot = ROBOT_NO_SLOT; un->mid = un->flip_mid = ROBOT_NO_SLOT; un->label_time = 0; mutex_unlock(&un->mutex); /* clear the drive information */ drive->status.b.full = FALSE; drive->status.b.bar_code = FALSE; } #if !defined(USEDISMOUNT) if (*un->i.ViVsn != '\0') sprintf(d_mess, "dismount %s/mount %s", un->i.ViVsn, un->vsn); else sprintf(d_mess, "mount %s", ce->CeVsn); #else sprintf(d_mess, "mount %s", ce->CeVsn); #endif if (load_media(library, drive, ce, 0) != MC_REQ_OK) { /* * Process error and return status to caller. * Requeue the event if not internal otherwise * return status to caller. */ req_comp_t err; int ret = RET_GET_MEDIA_DISPOSE; IBM_query_info_t *info; if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "load of %s failed.", ce->CeVsn); sprintf(d_mess, "mount of %s failed.", ce->CeVsn); if (DBG_LVL(SAM_DBG_TMOVE)) sam_syslog(LOG_DEBUG, "get_media:(%d)%s:%d.", un->eq, __FILE__, __LINE__); /* * Attempt some sort of recovery: * Check the state of the volume itself */ err = view_media(library, ce->CeBarCode, (void *)&info); if (err != MC_REQ_OK) { status |= CES_occupied; (void) CatalogSetFieldByLoc(library->un->eq, ce->CeSlot, 0, CEF_Status, status, 0); /* If internal event, then dont requeue it */ if (event == NULL || event->type == EVENT_TYPE_INTERNAL) ret = RET_GET_MEDIA_DISPOSE; } else if (info != NULL) { ushort_t vol_status, category; memcpy(&vol_status, &info-> data.expand_vol_data.volume_status[0], sizeof (vol_status)); memcpy(&category, &info->data.expand_vol_data.cat_assigned[0], sizeof (category)); if (vol_status & MT_VLI) sam_syslog(LOG_INFO, "get_media(%d):" " %s Present but inaccessible.", LIBEQ, ce->CeVsn); if (vol_status & (MT_VM | MT_VQM | MT_VPM | MT_VQD | MT_VPD)) sam_syslog(LOG_INFO, "get_media(%d):" " %s, Mounting/Dismounting " " or queued.", LIBEQ, ce->CeVsn); if (vol_status & (MT_VQE | MT_VPE)) sam_syslog(LOG_INFO, "get_media(%d):" " %s, Ejecting.", LIBEQ, ce->CeVsn); if (vol_status & MT_VMIS) sam_syslog(LOG_INFO, "get_media(%d):" " %s, Misplaced.", LIBEQ, ce->CeVsn); if (vol_status & MT_VUU) sam_syslog(LOG_INFO, "get_media(%d):" " %s, Unreadable label.", LIBEQ, ce->CeVsn); if (vol_status & MT_VMM) sam_syslog(LOG_INFO, "get_media(%d):" " %s, Used during manual mode.", LIBEQ, ce->CeVsn); if (vol_status & MT_VME) sam_syslog(LOG_INFO, "get_media(%d):" " %s, Manually Ejected.", LIBEQ, ce->CeVsn); if (category == MAN_EJECTED_CAT) set_media_category(library, ce->CeVsn, MAN_EJECTED_CAT, PURGE_VOL_CATEGORY); if (vol_status & (MT_VLI | MT_VQE | MT_VPE | MT_VMIS | MT_VUU | MT_VME)) { ret = RET_GET_MEDIA_DISPOSE; sam_syslog(LOG_INFO, "rec_cat(%d): %s, ce->CeSlot %d:" " removed from catalog", LIBEQ, ce->CeVsn, ce->CeSlot); } free(info); } return (ret); } } mutex_lock(&un->mutex); drive->status.b.full = TRUE; drive->status.b.valid = TRUE; memmove(un->vsn, ce->CeVsn, sizeof (un->vsn)); un->slot = ce->CeSlot; un->mid = ce->CeMid; un->flip_mid = ROBOT_NO_SLOT; un->status.b.labeled = FALSE; un->status.b.ready = FALSE; drive->status.b.bar_code = TRUE; memcpy(drive->bar_code, ce->CeBarCode, BARCODE_LEN + 1); un->space = ce->CeSpace; switch (un->type & DT_CLASS_MASK) { case DT_OPTICAL: un->dt.od.ptoc_fwa = ce->m.CePtocFwa; break; case DT_TAPE: un->dt.tp.position = ce->m.CeLastPos; break; } mutex_unlock(&un->mutex); return (RET_GET_MEDIA_SUCCESS); }
/* * Main thread. Sits on the message queue and waits for something to do. * * The transport thread for the ibm will issue a delayed request for * requests supporting delayed requests. Otherwise will issue the request * and wait for response. */ void * transport_thread( void *vxport) { robo_event_t *event; xport_state_t *transport = (xport_state_t *)vxport; struct sigaction sig_action; sigset_t signal_set, full_block_set; sigfillset(&full_block_set); sigemptyset(&signal_set); /* signals to except. */ sigaddset(&signal_set, SIGCHLD); mutex_lock(&transport->mutex); /* wait for go */ mutex_unlock(&transport->mutex); l_mess = transport->library->un->dis_mes[DIS_MES_NORM]; lc_mess = transport->library->un->dis_mes[DIS_MES_CRIT]; thr_sigsetmask(SIG_SETMASK, &full_block_set, NULL); memset(&sig_action, 0, sizeof (struct sigaction)); (void) sigemptyset(&sig_action.sa_mask); sig_action.sa_flags = SA_RESTART; sig_action.sa_handler = SIG_DFL; (void) sigaction(SIGCHLD, &sig_action, NULL); for (;;) { mutex_lock(&transport->list_mutex); if (transport->active_count == 0) cond_wait(&transport->list_condit, &transport->list_mutex); if (transport->active_count == 0) { /* check to make sure */ mutex_unlock(&transport->list_mutex); continue; } event = transport->first; transport->first = unlink_list(event); transport->active_count--; mutex_unlock(&transport->list_mutex); ETRACE((LOG_NOTICE, "EvTr %#x(%#x) - \n", event, (event->type == EVENT_TYPE_MESS) ? event->request.message.command : event->request.internal.command)); event->next = NULL; /* Everyone must take care of disposing of the event */ switch (event->type) { case EVENT_TYPE_INTERNAL: switch (event->request.internal.command) { case ROBOT_INTRL_LOAD_MEDIA: if (transport->library->un->state <= DEV_IDLE) { load(transport->library, event); } else { disp_of_event(transport->library, event, EINVAL); } break; case ROBOT_INTRL_FORCE_MEDIA: force(transport->library, event); break; case ROBOT_INTRL_DISMOUNT_MEDIA: dismount(transport->library, event); break; case ROBOT_INTRL_INIT: init_transport(transport); disp_of_event(transport->library, event, 0); break; case ROBOT_INTRL_VIEW_DATABASE: view(transport->library, event); break; case ROBOT_INTRL_QUERY_DRIVE: query_drv(transport->library, event); break; case ROBOT_INTRL_QUERY_LIBRARY: query_lib(transport->library, event); break; case ROBOT_INTRL_SET_CATEGORY: set_category(transport->library, event); break; case ROBOT_INTRL_SHUTDOWN: transport->thread = (thread_t)- 1; thr_exit((void *)NULL); break; default: disp_of_event(transport->library, event, EINVAL); break; } break; case EVENT_TYPE_MESS: if (event->request.message.magic != MESSAGE_MAGIC) { if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "xpt_thr:bad magic: %s:%d.\n", __FILE__, __LINE__); disp_of_event(transport->library, event, EINVAL); break; } switch (event->request.message.command) { default: if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "xpt_thr:msq_bad: %s:%d.\n", __FILE__, __LINE__); disp_of_event(transport->library, event, EINVAL); break; } default: if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "xpt_thr:event_bad: %s:%d.\n", __FILE__, __LINE__); disp_of_event(transport->library, event, EINVAL); break; } } }
void * manage_list(void *vlibrary) { int exit_status = 0, old_count; char *ent_pnt = "manage_list"; ushort_t delayed; time_t now, short_delay, auto_check; robo_event_t *current, *next; library_t *library = (library_t *)vlibrary; mutex_lock(&library->mutex); /* wait for initialization */ mutex_unlock(&library->mutex); short_delay = 0; old_count = 0; delayed = 0; auto_check = (time(&now) + 5); for (;;) { mutex_lock(&library->list_mutex); /* * See if there in anything to do. We will wait if the * active count is 0 or its equal to the same value it had * when we last woke up and there is a delayed request. */ if (library->active_count == 0 || ((old_count == library->active_count) && delayed)) { timestruc_t wait_time; wait_time.tv_sec = time(&now) + library->un->delay; wait_time.tv_nsec = 0; if ((auto_check >= now) && (auto_check < wait_time.tv_sec)) wait_time.tv_sec = auto_check; if (delayed && (short_delay < wait_time.tv_sec)) wait_time.tv_sec = short_delay; if (wait_time.tv_sec > now) { cond_timedwait(&library->list_condit, &library->list_mutex, &wait_time); if (library->chk_req) { library->chk_req = FALSE; if (library->un->state == DEV_ON) /* * Force a check */ auto_check = 0; } } } /* * Get the current time */ time(&now); if (auto_check <= now) { mutex_unlock(&library->list_mutex); (void) check_requests(library); auto_check = now + library->un->delay; continue; } /* * If there is something on the list . . . */ if ((old_count = library->active_count) == 0) { mutex_unlock(&library->list_mutex); continue; } short_delay = 0; delayed = FALSE; current = library->first; mutex_unlock(&library->list_mutex); do { mutex_lock(&library->list_mutex); /* * If delayed and the time has not expired, * go on tothe next */ next = current->next; if ((current->status.b.delayed) && (current->timeout > now)) { if (short_delay == 0) short_delay = current->timeout; else if (current->timeout < short_delay) short_delay = current->timeout; current = next; /* * Need to know there are delayed requests */ delayed = TRUE; mutex_unlock(&library->list_mutex); continue; } if (current == library->first) library->first = unlink_list(current); else (void) unlink_list(current); current->next = NULL; ETRACE((LOG_NOTICE, "LbEv c %#x n %#x (%d)\n", current, library->first, library->active_count)); library->active_count--; library->un->active = library->active_count; mutex_unlock(&library->list_mutex); /* * Entry is off the list and ready to process */ switch (current->type) { case EVENT_TYPE_INTERNAL: switch (current->request.internal.command) { case ROBOT_INTRL_AUDIT_SLOT: if (start_audit(library, current, current->request.internal.slot)) { /* * Unable to find resources, * delay the request and try * later */ current->status.b.delayed = TRUE; current->timeout = now + 10; delayed = TRUE; add_to_end(library, current); } current = next; break; default: sam_syslog(LOG_ERR, "%s:Bad internal event: %s:%d\n", ent_pnt, __FILE__, __LINE__); break; } break; case EVENT_TYPE_MESS: if (current->request.message.magic != MESSAGE_MAGIC) { sam_syslog(LOG_ERR, "%s: Bad magic %#x.", ent_pnt, current->request.message.magic); current->completion = EAGAIN; disp_of_event(library, current, EBADF); current = next; continue; } if (library->un->state >= DEV_OFF && (current->request.message.command > ACCEPT_DOWN)) { current->completion = EAGAIN; disp_of_event(library, current, EAGAIN); current = next; continue; } switch (current->request.message.command) { case MESS_CMD_SHUTDOWN: if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "received" " shutdown:%s:%d.\n", __FILE__, __LINE__); post_shutdown(library); threads[SONY_WORK_THREAD] = (thread_t)-1; thr_exit(&exit_status); break; case MESS_CMD_STATE: /* * state_request will put the event * back on the free list when * the command is done. */ state_request(library, current); current = next; break; case MESS_CMD_TAPEALERT: /* * tapealert_request will put the * event back on the * free list when the command is done. */ tapealert_solicit(library, current); current = next; break; case MESS_CMD_SEF: /* * sef_request will put the event * back on the free list when the * command is done. */ sef_solicit(library, current); current = next; break; case MESS_CMD_LABEL: if (label_request(library, current)) { /* * Unable to find resources, * delay the request, try later. */ current->status.b.delayed = TRUE; current->timeout = now + 10; delayed = TRUE; add_to_end(library, current); } current = next; break; case MESS_CMD_MOUNT: /* * mount_request will take care of * putting the event back on free list */ if (mount_request(library, current)) { /* * Unable to find resources, * delay request and try later. */ current->status.b.delayed = TRUE; current->timeout = now + 10; delayed = TRUE; add_to_end(library, current); } current = next; break; case MESS_CMD_LOAD_UNAVAIL: load_unavail_request(library, current); current = next; break; case MESS_CMD_AUDIT: if (start_audit(library, current, current-> request.message.param.audit_request.slot)) { current->status.b.delayed = TRUE; current->timeout = now + 10; delayed = TRUE; add_to_end(library, current); } current = next; break; case MESS_CMD_PREVIEW: (void) check_requests(library); time(&now); auto_check = now + library->un->delay; disp_of_event(library, current, 0); current = next; break; case MESS_CMD_UNLOAD: /* * unload_request will put the event * back on the free list when * the command is done. * unload_request will add the request * to the drive's worklist. */ unload_request(library, current); current = next; break; case MESS_CMD_TODO: todo_request(library, current); current = next; break; case MESS_CMD_ADD: add_to_cat_req(library, current); current = next; break; case MESS_CMD_EXPORT: /* * export_request will add the request * to the * mailbox worklist. */ export_media(library, current); current = next; break; case MESS_CMD_ACK: /* * A no-op. Dispose of event. */ disp_of_event(library, current, 0); current = next; break; default: sam_syslog(LOG_ERR, "%s: Unknown robot command %d.", ent_pnt, current->request.message.command); disp_of_event(library, current, 0); current = next; break; } break; default: sam_syslog(LOG_ERR, "%s: Unknown event type %d.\n", ent_pnt, current->type); disp_of_event(library, current, EBADF); current = next; break; } break; } while (current != NULL); } }