/* * Get information about each of the queues in the list of lists. If * schd_get_queue_limits() fails, return the error condition. It may * be a transient or a hard failure, which the caller may want to deal * with. If all queues are successful, return '0'. */ int schd_get_queues(void) { int ret; QueueList *qptr; ret = schd_get_queue_limits(schd_SubmitQueue->queue); if (ret) { DBPRT(("schd_get_queues: get_queue_limits for %s failed.\n", schd_SubmitQueue->queue->qname)); return(-1); } for (qptr = schd_BatchQueues; qptr != NULL; qptr = qptr->next) { ret = schd_get_queue_limits(qptr->queue); if (ret) DBPRT(("schd_get_queues: get_queue_limits for %s failed.\n", qptr->queue->qname)); } return (0); }
// // Read data from stream. // static int readstream(obex_t *handle, obex_object_t *object) { int actual; obexsrv_t *srv = OBEX_GetUserData(handle); const uint8_t *buf; int len; if (srv->sfd < 0) { /* create temporary storage for an object */ srv->name = strdup("/tmp/obex_tmp_XXXXXX"); if (!srv->name) return -1; srv->sfd = mkstemp(srv->name); if (srv->sfd < 0) { DBPRT("unable to create tmp file: %s\n", srv->name); free(srv->name); srv->name = NULL; return srv->sfd; } DBPRT("created tmp file: %s\n", srv->name); srv->flags = 0x01; } srv->streamming = TRUE; actual = OBEX_ObjectReadStream(handle, object, &buf); DBPRT("got stream: %d\n", actual); if (actual > 0) { len = write(srv->sfd, buf, actual); } return actual; }
/** * @brief * Internal session memory usage function. * * @param[in] job - job pointer * * @return ulong * @retval the total number of bytes of address * space consumed by all current processes within the job. * */ static unsigned long mem_sum(job *pjob) { int i; int inproc = 0; rlim64_t segadd; prpsinfo_t *pi; DBPRT(("%s: entered pagesize %d\n", __func__, pagesize)) segadd = 0; if (pjob->ji_globid == NULL) return (segadd); for (i=0; i<nproc; i++) { pi = &proc_array[i].info; if (!injob(pjob, &proc_array[i].procash)) if (!inproc) continue; else break; DBPRT(("%s: %s(%d:%d) mem %llu\n", __func__, pi->pr_fname, pi->pr_sid, pi->pr_pid, (rlim64_t)((rlim64_t)pi->pr_size * (rlim64_t)pagesize))) segadd += (rlim64_t)((rlim64_t)pi->pr_size*(rlim64_t)pagesize); } DBPRT(("%s: total mem %llu\n\n", __func__, segadd)) return (segadd); }
void obexsrv_reqdone(obex_t *handle, obex_object_t *object, int obex_cmd, int obex_rsp) { obexsrv_t *srv = OBEX_GetUserData(handle); DBPRT("Server request finished!"); switch (obex_cmd) { case OBEX_CMD_DISCONNECT: DBPRT("Disconnect done!"); srv->state = SRVSTATE_CLOSED; srv->disconnect(srv); break; default: DBPRT(" Command (%02x) has now finished", obex_cmd); srv->serverdone = TRUE; break; } /* cleanup resources */ if (srv->sfd >= 0) { close(srv->sfd); srv->sfd = -1; } if (srv->name) { if (srv->flags & 0x01) unlink(srv->name); free(srv->name); srv->name = NULL; } if (srv->buf) { free(srv->buf); srv->buf = NULL; } srv->streamming = FALSE; /* disable streaming */ }
/** * @brief * Internal session resident memory size function. COMPLEX VERSION * * @param[in] job - job pointer * * @return (a 64 bit integer) the number of bytes used by session * */ static rlim64_t resi_sum(job *pjob) { int i; int inproc = 0; rlim64_t resisize, resisub; prpsinfo_t *pi; DBPRT(("%s: entered pagesize %d\n", __func__, pagesize)) resisize = 0; for (i=0; i<nproc; i++) { pi = &proc_array[i].info; if (!injob(pjob, &proc_array[i].procash)) if (!inproc) continue; else break; DBPRT(("%s: %s(%d:%d) rss %llu (%lu pages)\n", __func__, pi->pr_fname, pi->pr_sid, pi->pr_pid, (rlim64_t)((rlim64_t)pi->pr_rssize*(rlim64_t)pagesize), pi->pr_rssize)) resisize += (rlim64_t)((rlim64_t)pagesize * pi->pr_rssize); } DBPRT(("%s: total rss %llu\n\n", __func__, resisize)) return (resisize); }
int save_node_status( struct pbsnode *np, pbs_attribute *temp) { int rc = PBSE_NONE; char date_attrib[MAXLINE]; /* it's nice to know when the last update happened */ snprintf(date_attrib, sizeof(date_attrib), "rectime=%ld", (long)time(NULL)); if (decode_arst(temp, NULL, NULL, date_attrib, 0)) { DBPRT(("is_stat_get: cannot add date_attrib\n")); } /* insert the information from "temp" into np */ if ((rc = node_status_list(temp, np, ATR_ACTION_ALTER)) != PBSE_NONE) { DBPRT(("is_stat_get: cannot set node status list\n")); } free_arst(temp); return(rc); } /* END save_node_status() */
static int user_running(Queue *queue, char *user) { /*char *id = "user_running";*/ Job *job; int jobs_running; DBPRT(("%s(%s, %s)\n", id, user, queue->qname)); jobs_running = 0; /* * Count the jobs in the queue's job list that are running and owned * by this user. */ for (job = queue->jobs; job != NULL; job = job->next) { if ((job->state != 'R') && (!strcmp(job->owner, user))) jobs_running++; } DBPRT((" user %s has %d job%s running in queue '%s'\n", user, jobs_running, jobs_running == 1 ? "" : "s", queue->qname)); return (jobs_running); }
/** * @brief * Internal session resident memory size function. COMPLEX VERSION * * @param[in] job - job pointer * * @return (a 64 bit integer) the number of bytes used by session * */ static rlim64_t resi_sum(job *pjob) { rlim64_t resisize, resisub; int num, i, j; int inproc = 0; prpsinfo_t *pi; prmap_sgi_t *mp; u_long lastseg, nbps; DBPRT(("%s: entered pagesize %d\n", __func__, pagesize)) resisize = 0; if (pjob->ji_globid == NULL) return (resisize); lastseg = 99999; nbps = (pagesize / sizeof(uint_t)) * pagesize; /* sysmacros.h says "4Meg" ...hmmm */ for (i=0; i<nproc; i++) { pi = &proc_array[i].info; if (!injob(pjob, &proc_array[i].procash)) if (!inproc) continue; else break; DBPRT(("%s: %s(%d:%d) rss %llu (%lu pages)\n", __func__, pi->pr_fname, pi->pr_sid, pi->pr_pid, (rlim64_t)((rlim64_t)pi->pr_rssize*(rlim64_t)pagesize), pi->pr_rssize)) resisub = 0; num = proc_array[i].map_num; mp = proc_array[i].map; for (j=0; j<num; j++, mp++) { u_long cnt = mp->pr_mflags >> MA_REFCNT_SHIFT; u_long end = (u_long)mp->pr_vaddr + mp->pr_size - 1; u_long seg1 = (u_long)mp->pr_vaddr / nbps; u_long seg2 = end / nbps; rlim64_t numseg = seg2 - seg1; if (lastseg != seg2) numseg++; lastseg = seg2; numseg = numseg*pagesize/cnt; numseg += mp->pr_wsize*pagesize/MA_WSIZE_FRAC/cnt; resisub += numseg; DBPRT(("%s: %d\t%lluk\t%lluk\n", __func__, j, numseg/1024, resisub/1024)) } resisize += resisub; DBPRT(("%s: %s subtotal rss %llu\n", __func__, pi->pr_fname, resisub)) } DBPRT(("%s: total rss %llu\n\n", __func__, resisize)) return (resisize); }
void obexsrv_connect(obex_t *handle, obex_object_t *object) { obex_headerdata_t hv; uint8_t hi; int hlen; uint8_t *nonhdrdata; obex_target_t target = {0, NULL}; obexsrv_t *srv = OBEX_GetUserData(handle); int err; DBPRT(""); if(OBEX_ObjectGetNonHdrData(object, &nonhdrdata) == 4) { #ifdef CONFIG_AFFIX_DEBUG obex_connect_hdr_t *hdr = (obex_connect_hdr_t*)nonhdrdata; DBPRT("Version: 0x%02x. Flags: 0x%02x OBEX packet length:%d", hdr->version, hdr->flags, ntohs(hdr->mtu)); #endif } else { BTERROR("Invalid packet content."); } while(OBEX_ObjectGetNextHeader(handle, object, &hi, &hv, &hlen)) { switch (hi) { case OBEX_HDR_TARGET: target.data = (void*)hv.bs; target.len = hlen; if (hlen == 16) DBPRT("got TARGET. uuid_t: %08X-%04X-%04X-%04X-%08X%04X", *(uint32_t *)&target.data[0], *(uint16_t *)&target.data[4], *(uint16_t *)&target.data[6], *(uint16_t *)&target.data[8], *(uint32_t *)&target.data[10], *(uint16_t *)&target.data[14]); else DBPRT("got TARGET. unknown fmt"); break; default: DBPRT(" Skipped header %02x", hi); break; } } // call handler err = srv->connect(srv, &target); if (err < 0) { /* error */ OBEX_ObjectSetRsp(object, OBEX_RSP_INTERNAL_SERVER_ERROR, OBEX_RSP_INTERNAL_SERVER_ERROR); } else { OBEX_ObjectSetRsp(object, OBEX_RSP_SUCCESS, OBEX_RSP_SUCCESS); if (target.data) { hv.bq4 = err; /* set connection id */ OBEX_ObjectAddHeader(handle, object, OBEX_HDR_CONNECTION, hv, 4, 0); hv.bs = target.data; OBEX_ObjectAddHeader(handle, object, OBEX_HDR_WHO, hv, target.len, 0); } } }
void obexsrv_req(obex_t *handle, obex_object_t *object, int cmd) { switch(cmd) { case OBEX_CMD_CONNECT: obexsrv_connect(handle, object); break; case OBEX_CMD_DISCONNECT: DBPRT("We got a disconnect-request"); OBEX_ObjectSetRsp(object, OBEX_RSP_SUCCESS, OBEX_RSP_SUCCESS); break; case OBEX_CMD_GET: obexsrv_get(handle, object); break; case OBEX_CMD_PUT: obexsrv_put(handle, object); break; case OBEX_CMD_SETPATH: obexsrv_setpath(handle, object); break; default: BTERROR(" Denied %02x request", cmd); OBEX_ObjectSetRsp(object, OBEX_RSP_NOT_IMPLEMENTED, OBEX_RSP_NOT_IMPLEMENTED); break; } return; }
// // Add more data to stream. // static int writestream(obex_t *handle, obex_object_t *object) { int actual; obexsrv_t *srv = OBEX_GetUserData(handle); obex_headerdata_t hv; actual = read(srv->sfd, srv->buf, OBEX_STREAM_CHUNK); DBPRT("sent %d bytes\n", actual); if(actual > 0) { /* Read was ok! */ hv.bs = srv->buf; OBEX_ObjectAddHeader(handle, object, OBEX_HDR_BODY, hv, actual, OBEX_FL_STREAM_DATA); } else if(actual == 0) { /* EOF */ hv.bs = srv->buf; OBEX_ObjectAddHeader(handle, object, OBEX_HDR_BODY, hv, 0, OBEX_FL_STREAM_DATAEND); } else { /* Error */ hv.bs = NULL; OBEX_ObjectAddHeader(handle, object, OBEX_HDR_BODY, hv, 0, OBEX_FL_STREAM_DATA); } return actual; }
int rmkdir(char *new_dir, int mode) { size_t i = 0; DBPRT("new_dir: %s\n", new_dir); if (new_dir == NULL || new_dir[0] == '\0') return -1; if (access(new_dir, R_OK|X_OK) == 0) return 0; if (new_dir[0] == '/') i++; for (; new_dir[i] != '\0'; i++) { if (new_dir[i] == '/') { char tmpdir[PATH_MAX + 1]; strncpy (tmpdir, new_dir, i); tmpdir[i] = '\0'; if ((mkdir(tmpdir, mode) == -1) && (errno != EEXIST)) return -1; } } if (mkdir(new_dir, mode) == -1 && errno != EEXIST) return -1; return 0; }
int event_handler(HCI_Event_Packet_Header *event, int devnum) { int err = 0; DBFENTER; DBPRT("Manager has an event: 0x%02x\n", event->EventCode); switch (event->EventCode) { case HCI_E_CONNECTION_REQUEST: err = connection_request((void*)event, devnum); break; case HCI_E_PIN_CODE_REQUEST: pin_code_request((void*)event, devnum); break; case HCI_E_LINK_KEY_REQUEST: link_key_request((void*)event, devnum); break; #ifdef CONFIG_AFFIX_UPDATE_CLOCKOFFSET case HCI_E_READ_CLOCK_OFFSET_COMPLETE: read_clock_offset_complete((void*)event, devnum); break; #endif default: break; } DBFEXIT; return err; }
/** * @brief * Restart the job from the checkpoint file. * * @param[in] ptask - pointer to task * @param[in] file - filename * * @return long * @retval session id Success * @retval -1 Error */ long mach_restart(task *ptask, char *file) { #if MOM_CHECKPOINT == 1 ckpt_id_t rc; ash_t momash; ash_t oldash = 0; char cvtbuf[20]; cpr_flags = CKPT_NQE; /* KLUDGE to work-around SGI problem, for some reason ckpt_restart() */ /* passes open file descriptor to /proc to restarted process */ if (pdir) closedir(pdir); /* To restart the job with its old ASH, Mom must be in that ASH */ /* When she does the restart. However, before changing to that */ /* ASH, Mom must put herself in a new ASH all by herself, otherwise */ /* she will take other system daemons with her into the job's ASH */ momash = getash(); newarraysess(); /* isolate Mom in a ASH by herself */ if (ptask->ti_job->ji_globid != NULL) { /* now get job's old ASH and set it */ sscanf(ptask->ti_job->ji_globid, "%llx", &oldash); if (setash(oldash) == -1) { DBPRT(("setash failed before restart, errno = %d", errno)) } }
/** * @brief * Don't need any periodic processing. */ void end_proc() { DWORD now, delta; DWORD nrun; now = timeGetTime(); delta = now - last_time; if (delta <= SAMPLE_DELTA*1000) { return; } wait_time = SAMPLE_DELTA; if (!get_profile(&mom_prof)) { return; } nrun = mom_prof.value + num_acpus + nrun_factor; load = ((load * CEXP) + (nrun * (FSCALE - CEXP) * FSCALE)) >> PROF_FSHIFT; DBPRT(("load = %d, mom_prof=%d num_acpus=%d nrun_factor=%d", load, mom_prof.value, num_acpus, nrun_factor)) last_time = now; return; }
/** * @brief * State whether MOM main loop has to poll this job to determine if some * limits are being exceeded. * * @param[in] pjob - job pointer * * @return int * @retval TRUE if polling is necessary * @retval FALSE otherwise. * * NOTE: Actual polling is done using the mom_over_limit machine-dependent function. * */ int mom_do_poll(job *pjob) { char *pname; resource *pres; DBPRT(("%s: entered\n", __func__)) assert(pjob != NULL); assert(pjob->ji_wattr[(int)JOB_ATR_resource].at_type == ATR_TYPE_RESC); pres = (resource *) GET_NEXT(pjob->ji_wattr[(int)JOB_ATR_resource].at_val.at_list); while (pres != NULL) { assert(pres->rs_defin != NULL); pname = pres->rs_defin->rs_name; assert(pname != NULL); assert(*pname != '\0'); if (strcmp(pname, "walltime") == 0 || strcmp(pname, "mem") == 0 || strcmp(pname, "ncpus") == 0 || strcmp(pname, "cput") == 0 || strcmp(pname, "mem") == 0 || strcmp(pname, "vmem") == 0) return (TRUE); pres = (resource *)GET_NEXT(pres->rs_link); } return (FALSE); }
int save_tmsock( job *pjob) /* pointer to job structure */ { static int sizeofint = sizeof(int); if ((pjob->ji_stdout > 0) && (pjob->ji_stdout < 1024)) { /* We don't have real port numbers (yet), so don't bother */ return(0); } DBPRT(("saving extra job info stdout=%d stderr=%d taskid=%u nodeid=%u\n", pjob->ji_stdout, pjob->ji_stderr, pjob->ji_taskid, pjob->ji_nodeid)); /* FIXME: need error checking here */ save_struct((char *)&pjob->ji_stdout, sizeofint); save_struct((char *)&pjob->ji_stderr, sizeofint); save_struct((char *)&pjob->ji_taskid, sizeof(tm_task_id)); save_struct((char *)&pjob->ji_nodeid, sizeof(tm_node_id)); return(0); } /* END save_tmsock() */
int schd_get_queue_util(void) { int ret; char *id = "get_queue_util"; QueueList *qptr; void schd_calc_suspended_jobs(void); /* first, get status of nodes from the server. */ get_node_status(); /* next, get status of queues for those nodes from the server. */ qptr = schd_SubmitQueue; ret = schd_get_queue_info(qptr->queue); if (ret) DBPRT(("get_queue_util: get_queue_info for %s failed.\n", schd_SubmitQueue->queue->qname)); for (qptr = schd_BatchQueues; qptr != NULL; qptr = qptr->next) { if (qptr->queue->flags & QFLAGS_NODEDOWN) { sprintf(log_buffer, "Skipping UNAVAILABLE node %s", qptr->queue->exechost); DBPRT(("%s: %s\n", id, log_buffer)); log_record(PBSEVENT_ERROR, PBS_EVENTCLASS_SERVER, id, log_buffer); } else { ret = schd_get_queue_info(qptr->queue); if (ret) DBPRT(("get_queue_util: get_queue_info for %s failed.\n", qptr->queue->qname)); } } /* * Update queue resources assigned for suspended jobs. */ schd_calc_suspended_jobs(); return (0); }
void sdp_print_uuid(uuid_t *uuid) { char *str; str = sdp_uuid2str(uuid); switch (uuid->type) { case SDP_DTD_UUID16: DBPRT(" uint16_t : 0x%s", str); break; case SDP_DTD_UUID32: DBPRT(" uint32_t : 0x%s", str); break; case SDP_DTD_UUID128: DBPRT(" uint128_t : 0x%s", str); break; default: DBPRT("%s", str); break; } }
static int schd_alterserver(int sv_conn, char *name, char *value) { char *id = "schd_alterserver"; int err; AttrOpList alist; if (schd_TEST_ONLY) { DBPRT(("%s: Would have altered server %d:\n", id, sv_conn)); DBPRT(("%s: name: %s, value: %s\n", id, name ? name : "(NULL)", value ? value : "(NULL)")); return 0; } /* Fill in the attribute struct with appropriate parameters */ alist.resource = NULL; alist.value = value; alist.name = name; alist.next = NULL; alist.op = SET; err = pbs_manager(sv_conn, MGR_CMD_SET, MGR_OBJ_SERVER, "", &alist, NULL); if (err) { (void)sprintf(log_buffer, "pbs_alterserver(%s, %s) failed: %d", name, value, pbs_errno); log_record(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, id, log_buffer); } return (err); }
void obexsrv_reqhint(obex_t *handle, obex_object_t *object, int cmd) { DBPRT("reqhint: %#x\n", cmd); switch(cmd) { case OBEX_CMD_PUT: OBEX_ObjectReadStream(handle, object, NULL); break; default: break; } /* accept all */ OBEX_ObjectSetRsp(object, OBEX_RSP_CONTINUE, OBEX_RSP_SUCCESS); }
/* Alter a job's actual attributes. */ int schd_alterjob(int sv_conn, Job *job, char *name, char *value, char *rsrc) { char *id = "schd_alterjob"; int err; AttrList atp; if (schd_TEST_ONLY) { DBPRT(("%s: Would have altered job %s:\n", id, job->jobid)); DBPRT(("%s: rsrc: %s, name: %s, value: %s\n", id, rsrc ? rsrc : "(NULL)", name ? name : "(NULL)", value ? value : "(NULL)")); return 0; } /* Fill the attribute structure with function parameters */ atp.resource = rsrc; atp.value = value; atp.name = name; atp.next = NULL; err = pbs_alterjob(sv_conn, job->jobid, &atp, NULL); if (err) { (void)sprintf(log_buffer, "pbs_alterjob(%s, %s, %s, %s) failed: %d", job->jobid, name, value, rsrc, pbs_errno); log_record(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, id, log_buffer); } return (err); }
/* * Populate the queue struct with the information needed for scheduling; * querying the resource monitor for queue->exechost's information. */ int schd_get_queue_info(Queue *queue) { char *id = "get_queue_info"; if (queue->ncpus_assn == UNSPECIFIED) queue->ncpus_assn = 0; if (queue->mem_assn == UNSPECIFIED) queue->mem_assn = 0; if (queue->running == UNSPECIFIED) queue->running = 0; /* * Get the resources for this queue from the resource monitor (if * available). If the resmom is not accessible, disable the queue. * Don't bother checking if the queue is Stopped. */ if (strcmp(queue->qname, schd_SubmitQueue->queue->qname) != 0 && (queue->flags & QFLAGS_STOPPED) == 0) { queue->rsrcs = schd_get_resources(queue->exechost); if (queue->rsrcs != NULL) { /* Account for this queue's resources. */ queue->rsrcs->ncpus_alloc += queue->ncpus_assn; queue->rsrcs->mem_alloc += queue->mem_assn; queue->rsrcs->njobs += queue->running; queue->ncpus_max = (queue->ncpus_max <= queue->rsrcs->ncpus_total ? queue->ncpus_max : queue->rsrcs->ncpus_total); } else { (void)sprintf(log_buffer, "Can't get resources for %s@%s - marking unavailable.", queue->qname, queue->exechost); log_record(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, id, log_buffer); DBPRT(("%s: %s\n", id, log_buffer)); queue->flags |= QFLAGS_NODEDOWN; } } return (0); }
/** * @brief * This is the log handler for tpp implemented in the daemon. The pointer to * this function is used by the Libtpp layer when it needs to log something to * the daemon logs * * @param[in] level - Logging level * @param[in] objname - Name of the object about which logging is being done * @param[in] messa - The log message * * @return void */ static void log_tppmsg(int level, const char *objname, char *mess) { char id[2*PBS_MAXHOSTNAME]; int thrd_index; int etype = log_level_2_etype(level); thrd_index = tpp_get_thrd_index(); if (thrd_index == -1) snprintf(id, sizeof(id), "%s(Main Thread)", (objname != NULL)? objname : msg_daemonname); else snprintf(id, sizeof(id), "%s(Thread %d)", (objname != NULL)? objname : msg_daemonname, thrd_index); log_event(etype, PBS_EVENTCLASS_TPP, level, id, mess); DBPRT(("%s\n", mess)); }
int recov_tmsock( int fds, job *pjob) /* I */ /* pathname to job save file */ { char *id = "recov_tmsock"; static int sizeofint = sizeof(int); if (read(fds, (char *)&pjob->ji_stdout, sizeofint) != sizeofint) { log_err(errno, id, "read"); return(1); } if (read(fds, (char *)&pjob->ji_stderr, sizeofint) != sizeofint) { log_err(errno, id, "read"); return(1); } if (read(fds, (char *)&pjob->ji_taskid, sizeof(tm_task_id)) != sizeof(tm_task_id)) { log_err(errno, id, "read"); return(1); } if (read(fds, (char *)&pjob->ji_nodeid, sizeof(tm_node_id)) != sizeof(tm_node_id)) { log_err(errno, id, "read"); return(1); } DBPRT(("recovered extra job info stdout=%d stderr=%d taskid=%u nodeid=%u\n", pjob->ji_stdout, pjob->ji_stderr, pjob->ji_taskid, pjob->ji_nodeid)); return(0); } /* END recov_tmsock() */
/** * @brief * Setup for polling. * * Open kernel device and get namelist info. * Also open sgi project files. * * @return int * @retval 0 Success * @retval PBSE_SYSTEM error * @retval -1 error */ int mom_open_poll() { extern int open_sgi_proj(); DBPRT(("%s: entered\n", __func__)) pagesize = getpagesize(); proc_array = (struct proc_info *)calloc(TBL_INC, sizeof(struct proc_info)); if (proc_array == NULL) { log_err(errno, __func__, "malloc"); return (PBSE_SYSTEM); } max_proc = TBL_INC; return (open_sgi_proj()); }
/* * Get information about each of the queues in the list of lists. If * schd_get_queue_limits() fails, return the error condition. It may * be a transient or a hard failure, which the caller may want to deal * with. If all queues are successful, return '0'. */ static int get_all_queue_info(int numqlists, ...) { va_list ap; int count = 0, ret; QueueList *list; QueueList *qptr; va_start(ap, numqlists); while (count < numqlists) { list = va_arg(ap, QueueList *); for (qptr = list; qptr != NULL; qptr = qptr->next) { /* * Get the limits, current resources, and any jobs for this * queue. */ if ((ret = schd_get_queue_limits(qptr->queue)) != 0) { DBPRT(("get_all_queue_info: get_queue_limits for %s failed.\n", qptr->queue->qname)); va_end(ap); return (ret); } /* * Set the queue flags if limits are exceeded. Don't bother * getting a reason string. */ schd_check_queue_limits(qptr->queue, NULL); } count ++; } va_end(ap); return (0); }
/** * @brief * start and compose command * * @param[in] stream - socket descriptor * @param[in] com - command * * @return int * @retval 0 success * @retval !0 error * */ static int startcom(int stream, int com) { int ret; setup_dis(stream); ret = diswsi(stream, RM_PROTOCOL); if (ret == DIS_SUCCESS) { ret = diswsi(stream, RM_PROTOCOL_VER); if (ret == DIS_SUCCESS) ret = diswsi(stream, com); } if (ret != DIS_SUCCESS) { DBPRT(("startcom: diswsi error %s\n", dis_emsg[ret])) pbs_errno = errno; } return ret; }
/* * obex_start_server() * * runs obex server fds transport */ int obexsrv_run(obexsrv_t *srv, int rfd, int wfd) { int err = 0, to; srv->handle = OBEX_Init(OBEX_TRANS_FD, obexsrv_event, 0); if (!srv->handle) { BTERROR( "OBEX_Init failed:%s", strerror(errno)); return -1; } /* init some members */ srv->sfd = -1; srv->name = NULL; srv->flags = 0; srv->buf = NULL; // set private pointer OBEX_SetUserData(srv->handle, srv); FdOBEX_TransportSetup(srv->handle, rfd, wfd, 0); for (;;) { /* request processing loop */ DBPRT("Processing request...\n"); srv->serverdone = FALSE; to = 1000; /* unlimmited - waiting for request */ while (!srv->serverdone) { if ((err = OBEX_HandleInput(srv->handle, to)) < 0) { BTERROR("Error while doing OBEX_HandleInput()"); break; } to = 5; /* processing request */ } if (srv->state == SRVSTATE_CLOSED) break; if (err < 0) break; } OBEX_Cleanup(srv->handle); srv->handle = NULL; return 0; }
static int revoke_unused_hpm(Resources *rsrcs) { char *id = "revoke_unused_hpm"; int rc; /* * If the HPM counters on the host associated with this Resources * struct are user mode, and the counters are not being used by any * jobs on the host, then attempt to return them to the global * system mode. */ if ((rsrcs->flags & RSRCS_FLAGS_HPM_USER) && !(rsrcs->flags & RSRCS_FLAGS_HPM_IN_USE)) { DBPRT(("revoke_unused_hpm: host %s HPM in user mode, but nobody " "using it.\n", rsrcs->exechost)); rc = schd_hpm_ctl(rsrcs, HPM_SETUP_GLOBALMODE, NULL); if (!schd_REVOKE_HPM || !rc) return rc; /* * Somebody is using the hpm counters, but nobody has asked for them. * Attempt a forcible revocation. This will kill processes on the * remote host to free the counters. */ (void)sprintf(log_buffer, "Attempting forcible revocation of HPM counters for host %s", rsrcs->exechost); log_record(PBSEVENT_SYSTEM, PBS_EVENTCLASS_SERVER, id, log_buffer); return schd_hpm_ctl(rsrcs, HPM_SETUP_REVOKE, NULL); } return (0); /* Nothing to do. */ }