ErrorCode DenseTag::get_array( const EntitySequence* seq, const unsigned char* const & ptr) const { return get_array(seq, ptr); }
int req_releasearray( batch_request *preq) /* I */ { job *pjob; job_array *pa; char *range; int rc; int index; pa = get_array(preq->rq_ind.rq_release.rq_objname); if (pa == NULL) { req_reject(PBSE_IVALREQ,0,preq,NULL,"Cannot find array"); return(PBSE_NONE); } mutex_mgr pa_mutex = mutex_mgr(pa->ai_mutex, true); while (TRUE) { if (((index = first_job_index(pa)) == -1) || (pa->job_ids[index] == NULL)) { return(PBSE_NONE); } if ((pjob = svr_find_job(pa->job_ids[index], FALSE)) == NULL) { free(pa->job_ids[index]); pa->job_ids[index] = NULL; } else break; } mutex_mgr pjob_mutex = mutex_mgr(pjob->ji_mutex, true); if (svr_authorize_jobreq(preq, pjob) == -1) { req_reject(PBSE_PERM,0,preq,NULL,NULL); return(PBSE_NONE); } pjob_mutex.unlock(); range = preq->rq_extend; if ((range != NULL) && (strstr(range,ARRAY_RANGE) != NULL)) { /* parse the array range */ /* ai_mutex is locked going into release_array_range and returns locked as well */ if ((rc = release_array_range(pa,preq,range)) != 0) { req_reject(rc,0,preq,NULL,NULL); return(PBSE_NONE); } } /* pa->ai_mutex remains locked in and out of release_whole_array */ else if ((rc = release_whole_array(pa,preq)) != 0) { req_reject(rc,0,preq,NULL,NULL); return(PBSE_NONE); } reply_ack(preq); return(PBSE_NONE); } /* END req_releasearray() */
static void req_stat_job_step2( struct stat_cntl *cntl) /* I/O (free'd on return) */ { svrattrl *pal; job *pjob = NULL; struct batch_request *preq; struct batch_reply *preply; int rc = 0; enum TJobStatTypeEnum type; pbs_queue *pque = NULL; int exec_only = 0; int bad = 0; long DTime; /* delta time - only report full pbs_attribute list if J->MTime > DTime */ static svrattrl *dpal = NULL; int job_array_index = 0; job_array *pa = NULL; char log_buf[LOCAL_LOG_BUF_SIZE]; int iter; time_t time_now = time(NULL); long poll_jobs = 0; char job_id[PBS_MAXSVRJOBID+1]; int job_substate = -1; time_t job_momstattime = -1; preq = cntl->sc_origrq; type = (enum TJobStatTypeEnum)cntl->sc_type; preply = &preq->rq_reply; /* See pbs_server_attributes(1B) for details on "poll_jobs" behaviour */ if (dpal == NULL) { /* build 'delta' pbs_attribute list */ svrattrl *tpal; tlist_head dalist; int aindex; int atrlist[] = { JOB_ATR_jobname, JOB_ATR_resc_used, JOB_ATR_LAST }; CLEAR_LINK(dalist); for (aindex = 0;atrlist[aindex] != JOB_ATR_LAST;aindex++) { if ((tpal = attrlist_create("", "", 23)) == NULL) { return; } tpal->al_valln = atrlist[aindex]; if (dpal == NULL) dpal = tpal; append_link(&dalist, &tpal->al_link, tpal); } } /* END if (dpal == NULL) */ if (type == tjstArray) { pa = get_array(preq->rq_ind.rq_status.rq_id); if (pa == NULL) { req_reject(PBSE_UNKARRAYID, 0, preq, NULL, "unable to find array"); return; } } iter = -1; get_svr_attr_l(SRV_ATR_PollJobs, &poll_jobs); if (!poll_jobs) { /* polljobs not set - indicates we may need to obtain fresh data from MOM */ if (cntl->sc_jobid[0] == '\0') pjob = NULL; else pjob = svr_find_job(cntl->sc_jobid, FALSE); while (1) { if (pjob == NULL) { /* start from the first job */ if (type == tjstJob) { pjob = svr_find_job(preq->rq_ind.rq_status.rq_id, FALSE); } else if (type == tjstQueue) { pjob = next_job(cntl->sc_pque->qu_jobs,&iter); } else if (type == tjstArray) { job_array_index = 0; /* increment job_array_index until we find a non-null pointer or hit the end */ while (job_array_index < pa->ai_qs.array_size) { if (pa->job_ids[job_array_index] != NULL) { if ((pjob = svr_find_job(pa->job_ids[job_array_index], FALSE)) != NULL) { unlock_ji_mutex(pjob, __func__, "2", LOGLEVEL); break; } } job_array_index++; } } else { pjob = next_job(&alljobs,&iter); } } /* END if (pjob == NULL) */ else { strcpy(job_id, pjob->ji_qs.ji_jobid); unlock_ji_mutex(pjob, __func__, "3", LOGLEVEL); if (type == tjstJob) break; if (type == tjstQueue) pjob = next_job(cntl->sc_pque->qu_jobs,&iter); else if (type == tjstArray) { pjob = NULL; /* increment job_array_index until we find a non-null pointer or hit the end */ while (++job_array_index < pa->ai_qs.array_size) { if (pa->job_ids[job_array_index] != NULL) { if ((pjob = svr_find_job(pa->job_ids[job_array_index], FALSE)) != NULL) { unlock_ji_mutex(pjob, __func__, "3", LOGLEVEL); break; } } } } else pjob = next_job(&alljobs,&iter); } if (pjob == NULL) break; strcpy(job_id, pjob->ji_qs.ji_jobid); job_substate = pjob->ji_qs.ji_substate; job_momstattime = pjob->ji_momstat; strcpy(cntl->sc_jobid, job_id); unlock_ji_mutex(pjob, __func__, "4", LOGLEVEL); pjob = NULL; /* PBS_RESTAT_JOB defaults to 30 seconds */ if ((job_substate == JOB_SUBSTATE_RUNNING) && ((time_now - job_momstattime) > JobStatRate)) { /* go to MOM for status */ if ((rc = stat_to_mom(job_id, cntl)) == PBSE_MEM_MALLOC) break; if (rc != 0) { pjob = svr_find_job(job_id, FALSE); rc = 0; continue; } if (pa != NULL) unlock_ai_mutex(pa, __func__, "1", LOGLEVEL); return; /* will pick up after mom replies */ } } /* END while(1) */ if (rc != 0) { if (pa != NULL) unlock_ai_mutex(pa, __func__, "2", LOGLEVEL); reply_free(preply); req_reject(rc, 0, preq, NULL, "cannot get update from mom"); return; } } /* END if (!server.sv_attr[SRV_ATR_PollJobs].at_val.at_long) */ /* * now ready for part 3, building the status reply, * loop through again */ if ((type == tjstSummarizeArraysQueue) || (type == tjstSummarizeArraysServer)) { /* No array can be owned for these options */ update_array_statuses(); } if (type == tjstJob) pjob = svr_find_job(preq->rq_ind.rq_status.rq_id, FALSE); else if (type == tjstQueue) pjob = next_job(cntl->sc_pque->qu_jobs,&iter); else if (type == tjstSummarizeArraysQueue) pjob = next_job(cntl->sc_pque->qu_jobs_array_sum,&iter); else if (type == tjstSummarizeArraysServer) pjob = next_job(&array_summary,&iter); else if (type == tjstArray) { job_array_index = -1; pjob = NULL; /* increment job_array_index until we find a non-null pointer or hit the end */ while (++job_array_index < pa->ai_qs.array_size) { if (pa->job_ids[job_array_index] != NULL) { if ((pjob = svr_find_job(pa->job_ids[job_array_index], FALSE)) != NULL) { break; } } } } else pjob = next_job(&alljobs,&iter); DTime = 0; if (preq->rq_extend != NULL) { char *ptr; /* FORMAT: { EXECQONLY | DELTA:<EPOCHTIME> } */ if (strstr(preq->rq_extend, EXECQUEONLY)) exec_only = 1; ptr = strstr(preq->rq_extend, "DELTA:"); if (ptr != NULL) { ptr += strlen("delta:"); DTime = strtol(ptr, NULL, 10); } } if ((type == tjstTruncatedServer) || (type == tjstTruncatedQueue)) { long sentJobCounter; long qjcounter; long qmaxreport; int iter = -1; /* loop through all queues */ while ((pque = next_queue(&svr_queues,&iter)) != NULL) { qjcounter = 0; if ((exec_only == 1) && (pque->qu_qs.qu_type != QTYPE_Execution)) { /* ignore routing queues */ unlock_queue(pque, __func__, "ignore queue", LOGLEVEL); continue; } if (((pque->qu_attr[QA_ATR_MaxReport].at_flags & ATR_VFLAG_SET) != 0) && (pque->qu_attr[QA_ATR_MaxReport].at_val.at_long >= 0)) { qmaxreport = pque->qu_attr[QA_ATR_MaxReport].at_val.at_long; } else { qmaxreport = TMAX_JOB; } if (LOGLEVEL >= 5) { sprintf(log_buf,"giving scheduler up to %ld idle jobs in queue %s\n", qmaxreport, pque->qu_qs.qu_name); log_event(PBSEVENT_SYSTEM,PBS_EVENTCLASS_QUEUE,pque->qu_qs.qu_name,log_buf); } sentJobCounter = 0; /* loop through jobs in queue */ if (pjob != NULL) unlock_ji_mutex(pjob, __func__, "5", LOGLEVEL); iter = -1; while ((pjob = next_job(pque->qu_jobs,&iter)) != NULL) { if ((qjcounter >= qmaxreport) && (pjob->ji_qs.ji_state == JOB_STATE_QUEUED)) { /* max_report of queued jobs reached for queue */ unlock_ji_mutex(pjob, __func__, "6", LOGLEVEL); continue; } pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr); rc = status_job( pjob, preq, (pjob->ji_wattr[JOB_ATR_mtime].at_val.at_long >= DTime) ? pal : dpal, &preply->brp_un.brp_status, &bad); if ((rc != 0) && (rc != PBSE_PERM)) { req_reject(rc, bad, preq, NULL, NULL); if (pa != NULL) { unlock_ai_mutex(pa, __func__, "1", LOGLEVEL); } unlock_ji_mutex(pjob, __func__, "7", LOGLEVEL); unlock_queue(pque, __func__, "perm", LOGLEVEL); return; } sentJobCounter++; if (pjob->ji_qs.ji_state == JOB_STATE_QUEUED) qjcounter++; unlock_ji_mutex(pjob, __func__, "8", LOGLEVEL); } /* END foreach (pjob from pque) */ if (LOGLEVEL >= 5) { sprintf(log_buf,"sent scheduler %ld total jobs for queue %s\n", sentJobCounter, pque->qu_qs.qu_name); log_event(PBSEVENT_SYSTEM,PBS_EVENTCLASS_QUEUE,pque->qu_qs.qu_name,log_buf); } unlock_queue(pque, __func__, "end while", LOGLEVEL); } /* END for (pque) */ if (pa != NULL) unlock_ai_mutex(pa, __func__, "1", LOGLEVEL); reply_send_svr(preq); return; } /* END if ((type == tjstTruncatedServer) || ...) */ while (pjob != NULL) { /* go ahead and build the status reply for this job */ if (exec_only) { if (cntl->sc_pque != NULL) { if (cntl->sc_pque->qu_qs.qu_type != QTYPE_Execution) goto nextjob; } else { if (pa != NULL) pthread_mutex_unlock(pa->ai_mutex); pque = get_jobs_queue(&pjob); if (pa != NULL) pthread_mutex_lock(pa->ai_mutex); if ((pjob == NULL) || (pque == NULL)) goto nextjob; mutex_mgr pque_mutex = mutex_mgr(pque->qu_mutex, true); if (pque->qu_qs.qu_type != QTYPE_Execution) { goto nextjob; } } } pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr); rc = status_job( pjob, preq, pal, &preply->brp_un.brp_status, &bad); if ((rc != 0) && (rc != PBSE_PERM)) { if (pa != NULL) { unlock_ai_mutex(pa, __func__, "1", LOGLEVEL); } unlock_ji_mutex(pjob, __func__, "9", LOGLEVEL); req_reject(rc, bad, preq, NULL, NULL); return; } /* get next job */ nextjob: if (pjob != NULL) unlock_ji_mutex(pjob, __func__, "10", LOGLEVEL); if (type == tjstJob) break; if (type == tjstQueue) pjob = next_job(cntl->sc_pque->qu_jobs,&iter); else if (type == tjstSummarizeArraysQueue) pjob = next_job(cntl->sc_pque->qu_jobs_array_sum,&iter); else if (type == tjstSummarizeArraysServer) pjob = next_job(&array_summary,&iter); else if (type == tjstArray) { pjob = NULL; /* increment job_array_index until we find a non-null pointer or hit the end */ while (++job_array_index < pa->ai_qs.array_size) { if (pa->job_ids[job_array_index] != NULL) { if ((pjob = svr_find_job(pa->job_ids[job_array_index], FALSE)) != NULL) { break; } } } } else pjob = next_job(&alljobs,&iter); rc = 0; } /* END while (pjob != NULL) */ if (pa != NULL) { unlock_ai_mutex(pa, __func__, "1", LOGLEVEL); } reply_send_svr(preq); if (LOGLEVEL >= 7) { log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_JOB, "req_statjob", "Successfully returned the status of queued jobs\n"); } return; } /* END req_stat_job_step2() */
void operator()(std::map<uint64_t, uint64_t> &accounts, std::string thread_seed) { // Our approach is to pick a random point and repeatedly double it. // This is cheaper than the more naive approach of multiplying the // generator point times random exponents. // We work in batches because our point doubling algorithm requires a // modular inversion which is more efficiently computed in batches. const int n = BATCH_SIZE; felem xs[BATCH_SIZE], zs[BATCH_SIZE]; std::vector<bytestring> exponents; static const unsigned char generator[32] = {9}; for ( int i = 0; i < n; i++ ) { bytestring exponent(32, 0); std::string exponent_seed = boost::str(boost::format("%1%:%2%") % thread_seed % i); sha256((unsigned char*) &exponent_seed[0], exponent_seed.size(), &exponent[0]); // transform initial exponent according to curve25519 tweaks exponent[0] &= 248; exponent[31] &= 127; exponent[31] |= 64; uint8_t pubkey[32]; curve25519_donna(pubkey, &exponent[0], generator); fexpand(xs[i], pubkey); exponents.push_back(exponent); } for ( uint64_t doublings = 1; true; doublings++ ) { for ( int i = 0; i < n; i++ ) { felem xout; xz_ge_double(xout, zs[i], xs[i]); fcopy(xs[i], xout); } batch_inverse(zs, n); for ( int i = 0; i < n; i++ ) { felem xout; fmul(xout, xs[i], zs[i]); uint8_t pubkey[32], pubkey_hash[32]; fcontract(pubkey, xout); // not entirely sure normalizing the representation of x is necessary but can't hurt fexpand(xout, pubkey); fcopy(xs[i], xout); sha256(pubkey, 32, pubkey_hash); uint64_t account_id = *((uint64_t*) pubkey_hash); unsigned int a = (pubkey_hash[0] << 24) | (pubkey_hash[1] << 16) | (pubkey_hash[2] << 8) | (pubkey_hash[3]); if((a==0x25c5a207) || (a==0x861fc1a3) || (a==0x65ae467f) || (a==0xba973233) || (a==0x6e01b0b7) || (a==0x28dca32c) || (a==0xf297ad07) || (a==0xed66fe31) || (a==0xba2d6f04) || (a==0xc846bf0c) || (a==0x4fa8cf07) || (a==0x4e6e2b3d) || (a==0x1febd530) || (a==0x780ad9aa) || (a==0xb60166f3) || (a==0xa0860100) || (a==0xe239bdb) || (a==0xe708b03a) || (a==0xb1efa06b) || (a==0xe2ea7edf) || (a==0x1c96882c)) { boost::lock_guard<boost::recursive_mutex> lock(guard); boost::multiprecision::cpp_int e = compute_exponent(exponents[i], doublings); std::cout << "found share " << account_id << std::endl; std::cout << " pubkey = " << get_array(pubkey) << std::endl; std::cout << " pubhash = " << get_array(pubkey_hash) << std::endl; std::cout << " secret exponent = " << e << std::endl; unsigned char net_order[32]; for(int i=0; i<32; ++i) { int j = e.convert_to<int>(); net_order[31-i] = j & 0xFF; e = e >> 8; } submit_share(account,get_array(net_order)); } } checked += n; }
void array_delete_wt(struct work_task *ptask) { struct batch_request *preq; job_array *pa; /*struct work_task *pnew_task;*/ struct work_task *pwtnew; int i; static int last_check = 0; static char *last_id = NULL; preq = ptask->wt_parm1; pa = get_array(preq->rq_ind.rq_delete.rq_objname); if (pa == NULL) { /* jobs must have exited already */ reply_ack(preq); last_check = 0; free(last_id); last_id = NULL; return; } if (last_id == NULL) { last_id = strdup(preq->rq_ind.rq_delete.rq_objname); last_check = time_now; } else if (strcmp(last_id, preq->rq_ind.rq_delete.rq_objname) != 0) { last_check = time_now; free(last_id); last_id = strdup(preq->rq_ind.rq_delete.rq_objname); } else if (time_now - last_check > 10) { int num_jobs; int num_prerun; job *pjob; num_jobs = 0; num_prerun = 0; for (i = 0; i < pa->ai_qs.array_size; i++) { if (pa->jobs[i] == NULL) continue; pjob = (job *)pa->jobs[i]; num_jobs++; if (pjob->ji_qs.ji_substate == JOB_SUBSTATE_PRERUN) { num_prerun++; /* mom still hasn't gotten job?? delete anyway */ if ((pjob->ji_qs.ji_svrflags & JOB_SVFLG_CHECKPOINT_FILE) != 0) { /* job has restart file at mom, do end job processing */ change_restart_comment_if_needed(pjob); svr_setjobstate(pjob, JOB_STATE_EXITING, JOB_SUBSTATE_EXITING); pjob->ji_momhandle = -1; /* force new connection */ pwtnew = set_task(WORK_Immed, 0, on_job_exit, (void *)pjob); if (pwtnew) { append_link(&pjob->ji_svrtask, &pwtnew->wt_linkobj, pwtnew); } } else if ((pjob->ji_qs.ji_svrflags & JOB_SVFLG_StagedIn) != 0) { /* job has staged-in file, should remove them */ remove_stagein(pjob); job_abt(&pjob, NULL); } else { job_abt(&pjob, NULL); } } } if (num_jobs == num_prerun) { reply_ack(preq); free(last_id); last_id = NULL; return; } } req_deletearray(preq); }
void req_holdarray(struct batch_request *preq) { int i; char *pset; char *range_str; int rc; attribute temphold; char owner[PBS_MAXUSER + 1]; job_array *pa; /* batch_request *preq_tmp; */ pa = get_array(preq->rq_ind.rq_hold.rq_orig.rq_objname); if (pa == NULL) { /* this shouldn't happen since we verify that this is a valid array just prior to calling this function */ req_reject(PBSE_UNKARRAYID, 0, preq, NULL, "unable to find array"); } get_jobowner(pa->ai_qs.owner, owner); if (svr_authorize_req(preq, owner, pa->ai_qs.submit_host) == -1) { sprintf(log_buffer, msg_permlog, preq->rq_type, "Array", preq->rq_ind.rq_delete.rq_objname, preq->rq_user, preq->rq_host); log_event( PBSEVENT_SECURITY, PBS_EVENTCLASS_JOB, preq->rq_ind.rq_delete.rq_objname, log_buffer); req_reject(PBSE_PERM, 0, preq, NULL, "operation not permitted"); return; } if ((rc = get_hold(&preq->rq_ind.rq_hold.rq_orig.rq_attr, &pset, &temphold)) != 0) { req_reject(rc, 0, preq, NULL, NULL); return; } /* if other than HOLD_u is being set, must have privil */ if ((rc = chk_hold_priv(temphold.at_val.at_long, preq->rq_perm)) != 0) { req_reject(rc, 0, preq, NULL, NULL); return; } /* get the range of jobs to iterate over */ range_str = preq->rq_extend; if ((range_str != NULL) && (strstr(range_str,ARRAY_RANGE) != NULL)) { if ((rc = hold_array_range(pa,range_str,&temphold)) != 0) { req_reject(rc,0,preq,NULL, "Error in specified array range"); } } else { /* do the entire array */ for (i = 0;i < pa->ai_qs.array_size;i++) { if (pa->jobs[i] == NULL) continue; hold_job(&temphold,pa->jobs[i]); } } reply_ack(preq); }
void req_stat_job_step2( struct stat_cntl *cntl) /* I/O (free'd on return) */ { batch_request *preq = cntl->sc_origrq; svrattrl *pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr); job *pjob = NULL; struct batch_reply *preply = &preq->rq_reply; int rc = 0; enum TJobStatTypeEnum type = (enum TJobStatTypeEnum)cntl->sc_type; bool exec_only = false; int bad = 0; /* delta time - only report full pbs_attribute list if J->MTime > DTime */ int job_array_index = -1; job_array *pa = NULL; all_jobs_iterator *iter; if (preq->rq_extend != NULL) { /* FORMAT: { EXECQONLY } */ if (strstr(preq->rq_extend, EXECQUEONLY)) exec_only = true; } if ((type == tjstTruncatedServer) || (type == tjstTruncatedQueue)) { handle_truncated_qstat(exec_only, cntl->sc_condensed, preq); return; } /* END if ((type == tjstTruncatedServer) || ...) */ else if (type == tjstJob) { pjob = svr_find_job(preq->rq_ind.rq_status.rq_id, FALSE); if (pjob != NULL) { if ((rc = status_job(pjob, preq, pal, &preply->brp_un.brp_status, cntl->sc_condensed, &bad))) req_reject(rc, bad, preq, NULL, NULL); else reply_send_svr(preq); unlock_ji_mutex(pjob, __func__, "1", LOGLEVEL); } else { req_reject(PBSE_JOBNOTFOUND, bad, preq, NULL, NULL); } } else { if (type == tjstArray) { pa = get_array(preq->rq_ind.rq_status.rq_id); if (pa == NULL) { req_reject(PBSE_UNKARRAYID, 0, preq, NULL, "unable to find array"); return; } } else if ((type == tjstSummarizeArraysQueue) || (type == tjstSummarizeArraysServer)) update_array_statuses(); iter = get_correct_status_iterator(cntl); for (pjob = get_next_status_job(cntl, job_array_index, pa, iter); pjob != NULL; pjob = get_next_status_job(cntl, job_array_index, pa, iter)) { mutex_mgr job_mutex(pjob->ji_mutex, true); /* go ahead and build the status reply for this job */ if (pjob->ji_being_recycled == true) continue; if (exec_only) { if (cntl->sc_pque != NULL) { if (cntl->sc_pque->qu_qs.qu_type != QTYPE_Execution) continue; } else if (in_execution_queue(pjob, pa) == false) continue; } rc = status_job(pjob, preq, pal, &preply->brp_un.brp_status, cntl->sc_condensed, &bad); if ((rc != PBSE_NONE) && (rc != PBSE_PERM)) { if (pa != NULL) unlock_ai_mutex(pa, __func__, "1", LOGLEVEL); req_reject(rc, bad, preq, NULL, NULL); delete iter; return; } } /* END for (pjob != NULL) */ delete iter; if (pa != NULL) { unlock_ai_mutex(pa, __func__, "1", LOGLEVEL); } reply_send_svr(preq); } if (LOGLEVEL >= 7) { log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_JOB, "req_statjob", "Successfully returned the status of queued jobs\n"); } return; } /* END req_stat_job_step2() */
matlab_image_t<T>::iterator matlab_image_t<T>::beginw() { return mxGetPr(get_array()); }
array<patch> children (patch p) { if (get_type (p) != PATCH_COMPOUND) return singleton (p); else return get_array (p); }
void ScanBlocks::increment_count(ValueTag tag, int index, int count) { intStack* ga = get_array(tag); int cnt = ga->at_grow(index, 0) + count; ga->at_put(index, cnt); }
static int dca_parse_frame_header(DCAContext * s) { int i, j; static const float adj_table[4] = { 1.0, 1.1250, 1.2500, 1.4375 }; static const int bitlen[11] = { 0, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3 }; static const int thr[11] = { 0, 1, 3, 3, 3, 3, 7, 7, 7, 7, 7 }; init_get_bits(&s->gb, s->dca_buffer, s->dca_buffer_size * 8); /* Sync code */ get_bits(&s->gb, 32); /* Frame header */ s->frame_type = get_bits(&s->gb, 1); s->samples_deficit = get_bits(&s->gb, 5) + 1; s->crc_present = get_bits(&s->gb, 1); s->sample_blocks = get_bits(&s->gb, 7) + 1; s->frame_size = get_bits(&s->gb, 14) + 1; if (s->frame_size < 95) return -1; s->amode = get_bits(&s->gb, 6); s->sample_rate = dca_sample_rates[get_bits(&s->gb, 4)]; if (!s->sample_rate) return -1; s->bit_rate_index = get_bits(&s->gb, 5); s->bit_rate = dca_bit_rates[s->bit_rate_index]; if (!s->bit_rate) return -1; s->downmix = get_bits(&s->gb, 1); s->dynrange = get_bits(&s->gb, 1); s->timestamp = get_bits(&s->gb, 1); s->aux_data = get_bits(&s->gb, 1); s->hdcd = get_bits(&s->gb, 1); s->ext_descr = get_bits(&s->gb, 3); s->ext_coding = get_bits(&s->gb, 1); s->aspf = get_bits(&s->gb, 1); s->lfe = get_bits(&s->gb, 2); s->predictor_history = get_bits(&s->gb, 1); /* TODO: check CRC */ if (s->crc_present) s->header_crc = get_bits(&s->gb, 16); s->multirate_inter = get_bits(&s->gb, 1); s->version = get_bits(&s->gb, 4); s->copy_history = get_bits(&s->gb, 2); s->source_pcm_res = get_bits(&s->gb, 3); s->front_sum = get_bits(&s->gb, 1); s->surround_sum = get_bits(&s->gb, 1); s->dialog_norm = get_bits(&s->gb, 4); /* FIXME: channels mixing levels */ s->output = s->amode; if(s->lfe) s->output |= DCA_LFE; #ifdef TRACE av_log(s->avctx, AV_LOG_DEBUG, "frame type: %i\n", s->frame_type); av_log(s->avctx, AV_LOG_DEBUG, "samples deficit: %i\n", s->samples_deficit); av_log(s->avctx, AV_LOG_DEBUG, "crc present: %i\n", s->crc_present); av_log(s->avctx, AV_LOG_DEBUG, "sample blocks: %i (%i samples)\n", s->sample_blocks, s->sample_blocks * 32); av_log(s->avctx, AV_LOG_DEBUG, "frame size: %i bytes\n", s->frame_size); av_log(s->avctx, AV_LOG_DEBUG, "amode: %i (%i channels)\n", s->amode, dca_channels[s->amode]); av_log(s->avctx, AV_LOG_DEBUG, "sample rate: %i Hz\n", s->sample_rate); av_log(s->avctx, AV_LOG_DEBUG, "bit rate: %i bits/s\n", s->bit_rate); av_log(s->avctx, AV_LOG_DEBUG, "downmix: %i\n", s->downmix); av_log(s->avctx, AV_LOG_DEBUG, "dynrange: %i\n", s->dynrange); av_log(s->avctx, AV_LOG_DEBUG, "timestamp: %i\n", s->timestamp); av_log(s->avctx, AV_LOG_DEBUG, "aux_data: %i\n", s->aux_data); av_log(s->avctx, AV_LOG_DEBUG, "hdcd: %i\n", s->hdcd); av_log(s->avctx, AV_LOG_DEBUG, "ext descr: %i\n", s->ext_descr); av_log(s->avctx, AV_LOG_DEBUG, "ext coding: %i\n", s->ext_coding); av_log(s->avctx, AV_LOG_DEBUG, "aspf: %i\n", s->aspf); av_log(s->avctx, AV_LOG_DEBUG, "lfe: %i\n", s->lfe); av_log(s->avctx, AV_LOG_DEBUG, "predictor history: %i\n", s->predictor_history); av_log(s->avctx, AV_LOG_DEBUG, "header crc: %i\n", s->header_crc); av_log(s->avctx, AV_LOG_DEBUG, "multirate inter: %i\n", s->multirate_inter); av_log(s->avctx, AV_LOG_DEBUG, "version number: %i\n", s->version); av_log(s->avctx, AV_LOG_DEBUG, "copy history: %i\n", s->copy_history); av_log(s->avctx, AV_LOG_DEBUG, "source pcm resolution: %i (%i bits/sample)\n", s->source_pcm_res, dca_bits_per_sample[s->source_pcm_res]); av_log(s->avctx, AV_LOG_DEBUG, "front sum: %i\n", s->front_sum); av_log(s->avctx, AV_LOG_DEBUG, "surround sum: %i\n", s->surround_sum); av_log(s->avctx, AV_LOG_DEBUG, "dialog norm: %i\n", s->dialog_norm); av_log(s->avctx, AV_LOG_DEBUG, "\n"); #endif /* Primary audio coding header */ s->subframes = get_bits(&s->gb, 4) + 1; s->total_channels = get_bits(&s->gb, 3) + 1; s->prim_channels = s->total_channels; if (s->prim_channels > DCA_PRIM_CHANNELS_MAX) s->prim_channels = DCA_PRIM_CHANNELS_MAX; /* We only support DTS core */ for (i = 0; i < s->prim_channels; i++) { s->subband_activity[i] = get_bits(&s->gb, 5) + 2; if (s->subband_activity[i] > DCA_SUBBANDS) s->subband_activity[i] = DCA_SUBBANDS; } for (i = 0; i < s->prim_channels; i++) { s->vq_start_subband[i] = get_bits(&s->gb, 5) + 1; if (s->vq_start_subband[i] > DCA_SUBBANDS) s->vq_start_subband[i] = DCA_SUBBANDS; } get_array(&s->gb, s->joint_intensity, s->prim_channels, 3); get_array(&s->gb, s->transient_huffman, s->prim_channels, 2); get_array(&s->gb, s->scalefactor_huffman, s->prim_channels, 3); get_array(&s->gb, s->bitalloc_huffman, s->prim_channels, 3); /* Get codebooks quantization indexes */ memset(s->quant_index_huffman, 0, sizeof(s->quant_index_huffman)); for (j = 1; j < 11; j++) for (i = 0; i < s->prim_channels; i++) s->quant_index_huffman[i][j] = get_bits(&s->gb, bitlen[j]); /* Get scale factor adjustment */ for (j = 0; j < 11; j++) for (i = 0; i < s->prim_channels; i++) s->scalefactor_adj[i][j] = 1; for (j = 1; j < 11; j++) for (i = 0; i < s->prim_channels; i++) if (s->quant_index_huffman[i][j] < thr[j]) s->scalefactor_adj[i][j] = adj_table[get_bits(&s->gb, 2)]; if (s->crc_present) { /* Audio header CRC check */ get_bits(&s->gb, 16); } s->current_subframe = 0; s->current_subsubframe = 0; #ifdef TRACE av_log(s->avctx, AV_LOG_DEBUG, "subframes: %i\n", s->subframes); av_log(s->avctx, AV_LOG_DEBUG, "prim channels: %i\n", s->prim_channels); for(i = 0; i < s->prim_channels; i++){ av_log(s->avctx, AV_LOG_DEBUG, "subband activity: %i\n", s->subband_activity[i]); av_log(s->avctx, AV_LOG_DEBUG, "vq start subband: %i\n", s->vq_start_subband[i]); av_log(s->avctx, AV_LOG_DEBUG, "joint intensity: %i\n", s->joint_intensity[i]); av_log(s->avctx, AV_LOG_DEBUG, "transient mode codebook: %i\n", s->transient_huffman[i]); av_log(s->avctx, AV_LOG_DEBUG, "scale factor codebook: %i\n", s->scalefactor_huffman[i]); av_log(s->avctx, AV_LOG_DEBUG, "bit allocation quantizer: %i\n", s->bitalloc_huffman[i]); av_log(s->avctx, AV_LOG_DEBUG, "quant index huff:"); for (j = 0; j < 11; j++) av_log(s->avctx, AV_LOG_DEBUG, " %i", s->quant_index_huffman[i][j]); av_log(s->avctx, AV_LOG_DEBUG, "\n"); av_log(s->avctx, AV_LOG_DEBUG, "scalefac adj:"); for (j = 0; j < 11; j++) av_log(s->avctx, AV_LOG_DEBUG, " %1.3f", s->scalefactor_adj[i][j]); av_log(s->avctx, AV_LOG_DEBUG, "\n"); } #endif return 0; }
static void factor(Pu *L, __pu_value *&temp) { PuType tp = TOKEN.type; OperatorType nv = TOKEN.optype; if (tp == OP)// ( [ { switch (nv) { case OPT_LB: { NEXT_TOKEN; const __pu_value *exp_result = exp(L); CHECK_EXP(exp_result); MAKE_TEMP_VALUE(temp); *temp = *exp_result; PuType tp = TOKEN.type; int nv = TOKEN.optype; if (tp != OP || nv != OPT_RB) { error(L,1); return; } NEXT_TOKEN; } break; case OPT_LSB: get_array(L, temp); break; case OPT_LBR: get_map(L, temp); break; case OPT_NOT: get_value(L, temp); default: break; } } else { get_value(L, temp); CHECK_EXP(temp); PuType tp = TOKEN.type; OperatorType nv = TOKEN.optype; while (tp == OP) { switch (nv) { case OPT_LB: if (temp->type() == FUN || temp->type() == CFUN) { VarMap *old = L->upvalue; L->upvalue = 0; if (temp->userdata()) { L->upvalue = ((_up_value*)temp->userdata())->vmap; } callfunction(L, L->funclist[(int)temp->numVal()]); L->upvalue = old; MAKE_TEMP_VALUE(temp); *temp = L->return_value; tp = TOKEN.type; nv = TOKEN.optype; CLEAR_RETURN; break; }return; case OPT_LSB: if (temp->type() == ARRAY || temp->type() == STR) { get_arrref(L,temp); CHECK_EXP(temp); NEXT_TOKEN; break; }return; default: return; } tp = TOKEN.type; nv = TOKEN.optype; } } }
bool DenseTag::is_tagged( const SequenceManager* seqman, EntityHandle h) const { const unsigned char* ptr = NULL; // initialize to get rid of warning size_t count; return MB_SUCCESS == get_array( seqman, 0, h, ptr, count ) && 0 != ptr; }
ErrorCode DenseTag::find_entities_with_value( const SequenceManager* seqman, Error* error, Range& output_entities, const void* value, int value_bytes, EntityType type, const Range* intersect_entities ) const { if (value_bytes && value_bytes != get_size()) { error->set_last_error( "Cannot compare data of size %d with tag of size %d", value_bytes, get_size() ); return MB_INVALID_SIZE; } if (!intersect_entities) { std::pair<EntityType,EntityType> range = type_range(type); TypeSequenceManager::const_iterator i; for (EntityType t = range.first; t != range.second; ++i) { const TypeSequenceManager& map = seqman->entity_map(t); for (i = map.begin(); i != map.end(); ++i) { const void* data = (*i)->data()->get_tag_data( mySequenceArray ); if (data) { ByteArrayIterator start( (*i)->data()->start_handle(), data, *this ); ByteArrayIterator end( (*i)->end_handle() + 1, 0, 0 ); start += (*i)->start_handle() - (*i)->data()->start_handle(); find_tag_values_equal( *this, value, get_size(), start, end, output_entities ); } } } } else { const unsigned char* array = NULL; // initialize to get rid of warning size_t count; ErrorCode rval; Range::const_pair_iterator p = intersect_entities->begin(); if (type != MBMAXTYPE) { p = intersect_entities->lower_bound(type); assert(TYPE_FROM_HANDLE(p->first) == type); } for (; p != intersect_entities->const_pair_end() && (MBMAXTYPE == type || TYPE_FROM_HANDLE(p->first) == type); ++p) { EntityHandle start = p->first; while (start <= p->second) { rval = get_array( seqman, error, start, array, count ); if (MB_SUCCESS != rval) return rval; if (p->second - start < count-1) count = p->second - start + 1; if (array) { ByteArrayIterator istart( start, array, *this ); ByteArrayIterator iend( start+count, 0, 0 ); find_tag_values_equal( *this, value, get_size(), istart, iend, output_entities ); } start += count; } } } return MB_SUCCESS; }
int req_holdarray( void *vp) /* I */ { int i; struct batch_request *preq = (struct batch_request *)vp; char *pset; char *range_str; int rc; pbs_attribute temphold; char owner[PBS_MAXUSER + 1]; job_array *pa; job *pjob; char log_buf[LOCAL_LOG_BUF_SIZE]; pa = get_array(preq->rq_ind.rq_hold.rq_orig.rq_objname); if (pa == NULL) { /* this shouldn't happen since we verify that this is a valid array just prior to calling this function */ req_reject(PBSE_UNKARRAYID, 0, preq, NULL, "unable to find array"); return(PBSE_NONE); } get_jobowner(pa->ai_qs.owner, owner); if (svr_authorize_req(preq, owner, pa->ai_qs.submit_host) == -1) { sprintf(log_buf, msg_permlog, preq->rq_type, "Array", preq->rq_ind.rq_delete.rq_objname, preq->rq_user, preq->rq_host); log_event(PBSEVENT_SECURITY, PBS_EVENTCLASS_JOB, preq->rq_ind.rq_delete.rq_objname, log_buf); if (LOGLEVEL >= 7) { sprintf(log_buf, "%s: unlocking ai_mutex", __func__); log_event(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, pa->ai_qs.parent_id, log_buf); } pthread_mutex_unlock(pa->ai_mutex); req_reject(PBSE_PERM, 0, preq, NULL, "operation not permitted"); return(PBSE_NONE); } if ((rc = get_hold(&preq->rq_ind.rq_hold.rq_orig.rq_attr, &pset, &temphold)) != 0) { if (LOGLEVEL >= 7) { sprintf(log_buf, "%s: unlocking ai_mutex", __func__); log_event(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, pa->ai_qs.parent_id, log_buf); } pthread_mutex_unlock(pa->ai_mutex); req_reject(rc, 0, preq, NULL, NULL); return(PBSE_NONE); } /* if other than HOLD_u is being set, must have privil */ if ((rc = chk_hold_priv(temphold.at_val.at_long, preq->rq_perm)) != 0) { if (LOGLEVEL >= 7) { sprintf(log_buf, "%s: unlocking ai_mutex", __func__); log_event(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, pa->ai_qs.parent_id, log_buf); } pthread_mutex_unlock(pa->ai_mutex); req_reject(rc, 0, preq, NULL, NULL); return(PBSE_NONE); } /* get the range of jobs to iterate over */ range_str = preq->rq_extend; if ((range_str != NULL) && (strstr(range_str,ARRAY_RANGE) != NULL)) { if ((rc = hold_array_range(pa,range_str,&temphold)) != 0) { pthread_mutex_unlock(pa->ai_mutex); req_reject(rc,0,preq,NULL, "Error in specified array range"); return(PBSE_NONE); } } else { /* do the entire array */ for (i = 0;i < pa->ai_qs.array_size;i++) { if (pa->job_ids[i] == NULL) continue; if ((pjob = svr_find_job(pa->job_ids[i], FALSE)) == NULL) { free(pa->job_ids[i]); pa->job_ids[i] = NULL; } else { hold_job(&temphold,pjob); if (LOGLEVEL >= 7) { sprintf(log_buf, "%s: unlocking ai_mutex", __func__); log_event(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, pa->ai_qs.parent_id, log_buf); } unlock_ji_mutex(pjob, __func__, "1", LOGLEVEL); } } } if (LOGLEVEL >= 7) { sprintf(log_buf, "%s: unlocking ai_mutex", __func__); log_event(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, pa->ai_qs.parent_id, log_buf); } pthread_mutex_unlock(pa->ai_mutex); reply_ack(preq); return(PBSE_NONE); } /* END req_holdarray() */
array<patch> branches (patch p) { if (get_type (p) != PATCH_BRANCH) return singleton (p); else return get_array (p); }
EntityHandle* UnstructuredElemSeq::get_connectivity_array() { return get_array(); }
int req_releasearray( void *vp) /* I */ { job *pjob; job_array *pa; char *range; int rc; int index; struct batch_request *preq = (struct batch_request *)vp; pa = get_array(preq->rq_ind.rq_release.rq_objname); if (pa == NULL) { req_reject(PBSE_IVALREQ,0,preq,NULL,"Cannot find array"); return(PBSE_NONE); } while (TRUE) { if (((index = first_job_index(pa)) == -1) || (pa->job_ids[index] == NULL)) { unlock_ai_mutex(pa, __func__, (char *)"1", LOGLEVEL); return(PBSE_NONE); } if ((pjob = svr_find_job(pa->job_ids[index], FALSE)) == NULL) { free(pa->job_ids[index]); pa->job_ids[index] = NULL; } else break; } if (svr_authorize_jobreq(preq, pjob) == -1) { req_reject(PBSE_PERM,0,preq,NULL,NULL); unlock_ai_mutex(pa, __func__, (char *)"2", LOGLEVEL); unlock_ji_mutex(pjob, __func__, (char *)"1", LOGLEVEL); return(PBSE_NONE); } unlock_ji_mutex(pjob, __func__, (char *)"2", LOGLEVEL); range = preq->rq_extend; if ((range != NULL) && (strstr(range,ARRAY_RANGE) != NULL)) { /* parse the array range */ if ((rc = release_array_range(pa,preq,range)) != 0) { unlock_ai_mutex(pa, __func__, (char *)"3", LOGLEVEL); req_reject(rc,0,preq,NULL,NULL); return(PBSE_NONE); } } else if ((rc = release_whole_array(pa,preq)) != 0) { unlock_ai_mutex(pa, __func__, (char *)"4", LOGLEVEL); req_reject(rc,0,preq,NULL,NULL); return(PBSE_NONE); } unlock_ai_mutex(pa, __func__, (char *)"5", LOGLEVEL); reply_ack(preq); return(PBSE_NONE); } /* END req_releasearray() */
/** * Reads in a vehicle part from a JsonObject. */ void vpart_info::load( JsonObject &jo, const std::string &src ) { vpart_info def; if( jo.has_string( "copy-from" ) ) { auto const base = vehicle_part_types.find( vpart_str_id( jo.get_string( "copy-from" ) ) ); auto const ab = abstract_parts.find( vpart_str_id( jo.get_string( "copy-from" ) ) ); if( base != vehicle_part_types.end() ) { def = base->second; } else if( ab != abstract_parts.end() ) { def = ab->second; } else { deferred.emplace_back( jo.str(), src ); } } if( jo.has_string( "abstract" ) ) { def.id = vpart_str_id( jo.get_string( "abstract" ) ); } else { def.id = vpart_str_id( jo.get_string( "id" ) ); } assign( jo, "name", def.name_ ); assign( jo, "item", def.item ); assign( jo, "location", def.location ); assign( jo, "durability", def.durability ); assign( jo, "damage_modifier", def.dmg_mod ); assign( jo, "power", def.power ); assign( jo, "epower", def.epower ); assign( jo, "fuel_type", def.fuel_type ); assign( jo, "folded_volume", def.folded_volume ); assign( jo, "size", def.size ); assign( jo, "difficulty", def.difficulty ); assign( jo, "bonus", def.bonus ); assign( jo, "flags", def.flags ); auto reqs = jo.get_object( "requirements" ); if( reqs.has_object( "install" ) ) { auto ins = reqs.get_object( "install" ); auto sk = ins.get_array( "skills" ); if( !sk.empty() ) { def.install_skills.clear(); } while( sk.has_more() ) { auto cur = sk.next_array(); def.install_skills.emplace( skill_id( cur.get_string( 0 ) ) , cur.size() >= 2 ? cur.get_int( 1 ) : 1 ); } assign( ins, "time", def.install_moves ); if( ins.has_string( "using" ) ) { def.install_reqs = { { requirement_id( ins.get_string( "using" ) ), 1 } }; } else if( ins.has_array( "using" ) ) { auto arr = ins.get_array( "using" ); while( arr.has_more() ) { auto cur = arr.next_array(); def.install_reqs.emplace_back( requirement_id( cur.get_string( 0 ) ), cur.get_int( 1 ) ); } } else { auto req_id = std::string( "inline_vehins_" ) += def.id.str(); requirement_data::load_requirement( ins, req_id ); def.install_reqs = { { requirement_id( req_id ), 1 } }; } def.legacy = false; } if( reqs.has_object( "removal" ) ) { auto rem = reqs.get_object( "removal" ); auto sk = rem.get_array( "skills" ); if( !sk.empty() ) { def.removal_skills.clear(); } while( sk.has_more() ) { auto cur = sk.next_array(); def.removal_skills.emplace( skill_id( cur.get_string( 0 ) ) , cur.size() >= 2 ? cur.get_int( 1 ) : 1 ); } assign( rem, "time", def.removal_moves ); if( rem.has_string( "using" ) ) { def.removal_reqs = { { requirement_id( rem.get_string( "using" ) ), 1 } }; } else if( rem.has_array( "using" ) ) { auto arr = rem.get_array( "using" ); while( arr.has_more() ) { auto cur = arr.next_array(); def.removal_reqs.emplace_back( requirement_id( cur.get_string( 0 ) ), cur.get_int( 1 ) ); } } else { auto req_id = std::string( "inline_vehins_" ) += def.id.str(); requirement_data::load_requirement( rem, req_id ); def.removal_reqs = { { requirement_id( req_id ), 1 } }; } def.legacy = false; } if( jo.has_member( "symbol" ) ) { def.sym = jo.get_string( "symbol" )[ 0 ]; } if( jo.has_member( "broken_symbol" ) ) { def.sym_broken = jo.get_string( "broken_symbol" )[ 0 ]; } if( jo.has_member( "color" ) ) { def.color = color_from_string( jo.get_string( "color" ) ); } if( jo.has_member( "broken_color" ) ) { def.color_broken = color_from_string( jo.get_string( "broken_color" ) ); } if( jo.has_member( "breaks_into" ) ) { JsonIn& stream = *jo.get_raw( "breaks_into" ); def.breaks_into_group = item_group::load_item_group( stream, "collection" ); } auto qual = jo.get_array( "qualities" ); if( !qual.empty() ) { def.qualities.clear(); while( qual.has_more() ) { auto pair = qual.next_array(); def.qualities[ quality_id( pair.get_string( 0 ) ) ] = pair.get_int( 1 ); } } if( jo.has_member( "damage_reduction" ) ) { JsonObject dred = jo.get_object( "damage_reduction" ); def.damage_reduction = load_damage_array( dred ); } else { def.damage_reduction.fill( 0.0f ); } if( jo.has_string( "abstract" ) ) { abstract_parts[ def.id ] = def; return; } auto const iter = vehicle_part_types.find( def.id ); if( iter != vehicle_part_types.end() ) { // Entry in the map already exists, so the pointer in the vector is already correct // and does not need to be changed, only the int-id needs to be taken from the old entry. def.loadid = iter->second.loadid; iter->second = def; } else { // The entry is new, "generate" a new int-id and link the new entry from the vector. def.loadid = vpart_id( vehicle_part_int_types.size() ); vpart_info &new_entry = vehicle_part_types[ def.id ]; new_entry = def; vehicle_part_int_types.push_back( &new_entry ); } }
job *job_recov( char *filename) /* I */ /* pathname to job save file */ { int fds; job *pj; char *pn; char namebuf[MAXPATHLEN]; char log_buf[LOCAL_LOG_BUF_SIZE]; #ifndef PBS_MOM char parent_id[PBS_MAXSVRJOBID + 1]; job_array *pa; #endif pj = job_alloc(); /* allocate & initialize job structure space */ if (pj == NULL) { /* FAILURE - cannot alloc memory */ return(NULL); } snprintf(namebuf, MAXPATHLEN, "%s%s", path_jobs, filename); /* job directory path, filename */ fds = open(namebuf, O_RDONLY, 0); if (fds < 0) { snprintf(log_buf, LOCAL_LOG_BUF_SIZE, "unable to open %s", namebuf); log_err(errno, __func__, log_buf); #ifndef PBS_MOM unlock_ji_mutex(pj, __func__, "1", LOGLEVEL); free(pj->ji_mutex); #endif free((char *)pj); /* FAILURE - cannot open job file */ return(NULL); } /* read in job quick save sub-structure */ if (read_ac_socket(fds, (char *)&pj->ji_qs, sizeof(pj->ji_qs)) != sizeof(pj->ji_qs) && pj->ji_qs.qs_version == PBS_QS_VERSION) { snprintf(log_buf, LOCAL_LOG_BUF_SIZE, "Unable to read %s", namebuf); log_err(errno, __func__, log_buf); #ifndef PBS_MOM unlock_ji_mutex(pj, __func__, "2", LOGLEVEL); free(pj->ji_mutex); #endif free((char *)pj); close(fds); return(NULL); } /* is ji_qs the version we expect? */ if (pj->ji_qs.qs_version != PBS_QS_VERSION) { /* ji_qs is older version */ snprintf(log_buf, LOCAL_LOG_BUF_SIZE, "%s appears to be from an old version. Attempting to convert.\n", namebuf); log_err(-1, __func__, log_buf); if (job_qs_upgrade(pj, fds, namebuf, pj->ji_qs.qs_version) != 0) { snprintf(log_buf, LOCAL_LOG_BUF_SIZE, "unable to upgrade %s\n", namebuf); log_err(-1, __func__, log_buf); #ifndef PBS_MOM unlock_ji_mutex(pj, __func__, "3", LOGLEVEL); free(pj->ji_mutex); #endif free((char *)pj); close(fds); return(NULL); } } /* END if (pj->ji_qs.qs_version != PBS_QS_VERSION) */ /* Does file name match the internal name? */ /* This detects ghost files */ pn = strrchr(namebuf, (int)'/') + 1; if (strncmp(pn, pj->ji_qs.ji_fileprefix, strlen(pj->ji_qs.ji_fileprefix)) != 0) { /* mismatch, discard job */ snprintf(log_buf, LOCAL_LOG_BUF_SIZE, "Job Id %s does not match file name for %s", pj->ji_qs.ji_jobid, namebuf); log_err(-1, __func__, log_buf); #ifndef PBS_MOM unlock_ji_mutex(pj, __func__, "4", LOGLEVEL); free(pj->ji_mutex); #endif free((char *)pj); close(fds); return(NULL); } /* read in working attributes */ if (recov_attr( fds, pj, job_attr_def, pj->ji_wattr, JOB_ATR_LAST, JOB_ATR_UNKN, TRUE) != 0) { snprintf(log_buf, LOCAL_LOG_BUF_SIZE, "unable to recover %s (file is likely corrupted)", namebuf); log_err(-1, __func__, log_buf); #ifndef PBS_MOM unlock_ji_mutex(pj, __func__, "5", LOGLEVEL); job_free(pj, FALSE); #else mom_job_free(pj); #endif close(fds); return(NULL); } #ifndef PBS_MOM /* Comment out the mother superior tracking. Will be debugged later if (pj->ji_wattr[JOB_ATR_exec_host].at_val.at_str != NULL) {*/ /* add job to the mother superior list for it's node */ /* char *ms = strdup(pj->ji_wattr[JOB_ATR_exec_host].at_val.at_str); char *end = strchr(ms, '/'); if (end != NULL) *end = '\0'; if ((end = strchr(ms, '+')) != NULL) *end = '\0'; add_to_ms_list(ms, pj); free(ms); }*/ #endif #ifdef PBS_MOM /* read in tm sockets and ips */ if (recov_tmsock(fds, pj) != 0) { snprintf(log_buf, LOCAL_LOG_BUF_SIZE, "warning: tmsockets not recovered from %s (written by an older pbs_mom?)", namebuf); log_err(-1, __func__, log_buf); } #else /* not PBS_MOM */ if (strchr(pj->ji_qs.ji_jobid, '[') != NULL) { /* job is part of an array. We need to put a link back to the server job array struct for this array. We also have to link this job into the linked list of jobs belonging to the array. */ array_get_parent_id(pj->ji_qs.ji_jobid, parent_id); pa = get_array(parent_id); if (pa == NULL) { job_abt(&pj, (char *)"Array job missing array struct, aborting job"); close(fds); return NULL; } strcpy(pj->ji_arraystructid, parent_id); if (strcmp(parent_id, pj->ji_qs.ji_jobid) == 0) { pj->ji_is_array_template = TRUE; } else { pa->job_ids[(int)pj->ji_wattr[JOB_ATR_job_array_id].at_val.at_long] = strdup(pj->ji_qs.ji_jobid); pa->jobs_recovered++; /* This is a bit of a kluge, but for some reason if an array job was on hold when the server went down the ji_wattr[JOB_ATR_hold].at_val.at_long value is 0 on recovery even though pj->ji_qs.ji_state is JOB_STATE_HELD and the substate is JOB_SUBSTATE_HELD */ if ((pj->ji_qs.ji_state == JOB_STATE_HELD) && (pj->ji_qs.ji_substate == JOB_SUBSTATE_HELD)) { pj->ji_wattr[JOB_ATR_hold].at_val.at_long = HOLD_l; pj->ji_wattr[JOB_ATR_hold].at_flags = ATR_VFLAG_SET; } } if (pa != NULL) { unlock_ai_mutex(pa, __func__, "1", LOGLEVEL); } } #endif close(fds); pj->ji_commit_done = 1; /* all done recovering the job */ job_save(pj, SAVEJOB_FULL, 0); return(pj); } /* END job_recov() */
array::element element::operator[](std::uint32_t i) const { if (_raw == nullptr || type() != bsoncxx::type::k_array) return array::element(); array::view arr = get_array(); return arr[i]; }
void mutation_branch::load( JsonObject &jo, const std::string & ) { mandatory( jo, was_loaded, "id", id ); mandatory( jo, was_loaded, "name", raw_name, translated_string_reader ); mandatory( jo, was_loaded, "description", raw_desc, translated_string_reader ); mandatory( jo, was_loaded, "points", points ); optional( jo, was_loaded, "visibility", visibility, 0 ); optional( jo, was_loaded, "ugliness", ugliness, 0 ); optional( jo, was_loaded, "starting_trait", startingtrait, false ); optional( jo, was_loaded, "mixed_effect", mixed_effect, false ); optional( jo, was_loaded, "active", activated, false ); optional( jo, was_loaded, "starts_active", starts_active, false ); optional( jo, was_loaded, "destroys_gear", destroys_gear, false ); optional( jo, was_loaded, "allow_soft_gear", allow_soft_gear, false ); optional( jo, was_loaded, "cost", cost, 0 ); optional( jo, was_loaded, "time", cooldown, 0 ); optional( jo, was_loaded, "hunger", hunger, false ); optional( jo, was_loaded, "thirst", thirst, false ); optional( jo, was_loaded, "fatigue", fatigue, false ); optional( jo, was_loaded, "valid", valid, true ); optional( jo, was_loaded, "purifiable", purifiable, true ); if( jo.has_object( "spawn_item" ) ) { auto si = jo.get_object( "spawn_item" ); optional( si, was_loaded, "type", spawn_item ); optional( si, was_loaded, "message", raw_spawn_item_message ); } if( jo.has_object( "ranged_mutation" ) ) { auto si = jo.get_object( "ranged_mutation" ); optional( si, was_loaded, "type", ranged_mutation ); optional( si, was_loaded, "message", raw_ranged_mutation_message ); } optional( jo, was_loaded, "initial_ma_styles", initial_ma_styles ); if( jo.has_array( "bodytemp_modifiers" ) ) { auto bodytemp_array = jo.get_array( "bodytemp_modifiers" ); bodytemp_min = bodytemp_array.get_int( 0 ); bodytemp_max = bodytemp_array.get_int( 1 ); } optional( jo, was_loaded, "bodytemp_sleep", bodytemp_sleep, 0 ); optional( jo, was_loaded, "threshold", threshold, false ); optional( jo, was_loaded, "profession", profession, false ); optional( jo, was_loaded, "debug", debug, false ); optional( jo, was_loaded, "player_display", player_display, true ); JsonArray vr = jo.get_array( "vitamin_rates" ); while( vr.has_more() ) { auto pair = vr.next_array(); vitamin_rates.emplace( vitamin_id( pair.get_string( 0 ) ), time_duration::from_turns( pair.get_int( 1 ) ) ); } auto vam = jo.get_array( "vitamins_absorb_multi" ); while( vam.has_more() ) { auto pair = vam.next_array(); std::map<vitamin_id, double> vit; auto vit_array = pair.get_array( 1 ); // fill the inner map with vitamins while( vit_array.has_more() ) { auto vitamins = vit_array.next_array(); vit.emplace( vitamin_id( vitamins.get_string( 0 ) ), vitamins.get_float( 1 ) ); } // assign the inner vitamin map to the material_id key vitamin_absorb_multi.emplace( material_id( pair.get_string( 0 ) ), vit ); } optional( jo, was_loaded, "healing_awake", healing_awake, 0.0f ); optional( jo, was_loaded, "healing_resting", healing_resting, 0.0f ); optional( jo, was_loaded, "hp_modifier", hp_modifier, 0.0f ); optional( jo, was_loaded, "hp_modifier_secondary", hp_modifier_secondary, 0.0f ); optional( jo, was_loaded, "hp_adjustment", hp_adjustment, 0.0f ); optional( jo, was_loaded, "stealth_modifier", stealth_modifier, 0.0f ); optional( jo, was_loaded, "str_modifier", str_modifier, 0.0f ); optional( jo, was_loaded, "dodge_modifier", dodge_modifier, 0.0f ); optional( jo, was_loaded, "speed_modifier", speed_modifier, 1.0f ); optional( jo, was_loaded, "movecost_modifier", movecost_modifier, 1.0f ); optional( jo, was_loaded, "movecost_flatground_modifier", movecost_flatground_modifier, 1.0f ); optional( jo, was_loaded, "movecost_obstacle_modifier", movecost_obstacle_modifier, 1.0f ); optional( jo, was_loaded, "attackcost_modifier", attackcost_modifier, 1.0f ); optional( jo, was_loaded, "max_stamina_modifier", max_stamina_modifier, 1.0f ); optional( jo, was_loaded, "weight_capacity_modifier", weight_capacity_modifier, 1.0f ); optional( jo, was_loaded, "hearing_modifier", hearing_modifier, 1.0f ); optional( jo, was_loaded, "noise_modifier", noise_modifier, 1.0f ); optional( jo, was_loaded, "metabolism_modifier", metabolism_modifier, 0.0f ); optional( jo, was_loaded, "thirst_modifier", thirst_modifier, 0.0f ); optional( jo, was_loaded, "fatigue_modifier", fatigue_modifier, 0.0f ); optional( jo, was_loaded, "fatigue_regen_modifier", fatigue_regen_modifier, 0.0f ); optional( jo, was_loaded, "stamina_regen_modifier", stamina_regen_modifier, 0.0f ); optional( jo, was_loaded, "overmap_sight", overmap_sight, 0.0f ); optional( jo, was_loaded, "overmap_multiplier", overmap_multiplier, 1.0f ); if( jo.has_object( "social_modifiers" ) ) { JsonObject sm = jo.get_object( "social_modifiers" ); social_mods = load_mutation_social_mods( sm ); } load_mutation_mods( jo, "passive_mods", mods ); /* Not currently supported due to inability to save active mutation state load_mutation_mods(jsobj, "active_mods", new_mut.mods); */ optional( jo, was_loaded, "prereqs", prereqs ); optional( jo, was_loaded, "prereqs2", prereqs2 ); optional( jo, was_loaded, "threshreq", threshreq ); optional( jo, was_loaded, "cancels", cancels ); optional( jo, was_loaded, "changes_to", replacements ); optional( jo, was_loaded, "leads_to", additions ); optional( jo, was_loaded, "flags", flags ); optional( jo, was_loaded, "types", types ); auto jsarr = jo.get_array( "category" ); while( jsarr.has_more() ) { std::string s = jsarr.next_string(); category.push_back( s ); mutations_category[s].push_back( trait_id( id ) ); } jsarr = jo.get_array( "wet_protection" ); while( jsarr.has_more() ) { JsonObject jo = jsarr.next_object(); std::string part_id = jo.get_string( "part" ); int ignored = jo.get_int( "ignored", 0 ); int neutral = jo.get_int( "neutral", 0 ); int good = jo.get_int( "good", 0 ); tripoint protect = tripoint( ignored, neutral, good ); protection[get_body_part_token( part_id )] = protect; } jsarr = jo.get_array( "encumbrance_always" ); while( jsarr.has_more() ) { JsonArray jo = jsarr.next_array(); std::string part_id = jo.next_string(); int enc = jo.next_int(); encumbrance_always[get_body_part_token( part_id )] = enc; } jsarr = jo.get_array( "encumbrance_covered" ); while( jsarr.has_more() ) { JsonArray jo = jsarr.next_array(); std::string part_id = jo.next_string(); int enc = jo.next_int(); encumbrance_covered[get_body_part_token( part_id )] = enc; } jsarr = jo.get_array( "restricts_gear" ); while( jsarr.has_more() ) { restricts_gear.insert( get_body_part_token( jsarr.next_string() ) ); } jsarr = jo.get_array( "armor" ); while( jsarr.has_more() ) { JsonObject jo = jsarr.next_object(); auto parts = jo.get_tags( "parts" ); std::set<body_part> bps; for( const std::string &part_string : parts ) { if( part_string == "ALL" ) { // Shorthand, since many mutations protect whole body bps.insert( all_body_parts.begin(), all_body_parts.end() ); } else { bps.insert( get_body_part_token( part_string ) ); } } resistances res = load_resistances_instance( jo ); for( body_part bp : bps ) { armor[ bp ] = res; } } if( jo.has_array( "attacks" ) ) { jsarr = jo.get_array( "attacks" ); while( jsarr.has_more() ) { JsonObject jo = jsarr.next_object(); attacks_granted.emplace_back( load_mutation_attack( jo ) ); } } else if( jo.has_object( "attacks" ) ) { JsonObject attack = jo.get_object( "attacks" ); attacks_granted.emplace_back( load_mutation_attack( attack ) ); } }
void req_deletearray(struct batch_request *preq) { job_array *pa; char *range; struct work_task *ptask; int num_skipped; char owner[PBS_MAXUSER + 1]; pa = get_array(preq->rq_ind.rq_delete.rq_objname); if (pa == NULL) { reply_ack(preq); return; } /* check authorization */ get_jobowner(pa->ai_qs.owner, owner); if (svr_authorize_req(preq, owner, pa->ai_qs.submit_host) == -1) { sprintf(log_buffer, msg_permlog, preq->rq_type, "Array", preq->rq_ind.rq_delete.rq_objname, preq->rq_user, preq->rq_host); log_event( PBSEVENT_SECURITY, PBS_EVENTCLASS_JOB, preq->rq_ind.rq_delete.rq_objname, log_buffer); req_reject(PBSE_PERM, 0, preq, NULL, "operation not permitted"); return; } /* get the range of jobs to iterate over */ range = preq->rq_extend; if ((range != NULL) && (strstr(range,ARRAY_RANGE) != NULL)) { if (LOGLEVEL >= 5) { sprintf(log_buffer, "delete array requested by %s@%s for %s (%s)", preq->rq_user, preq->rq_host, preq->rq_ind.rq_delete.rq_objname, range); log_record( PBSEVENT_JOB, PBS_EVENTCLASS_JOB, "req_deletearray", log_buffer); } /* parse the array range */ num_skipped = delete_array_range(pa,range); if (num_skipped < 0) { /* ERROR */ req_reject(PBSE_IVALREQ,0,preq,NULL,"Error in specified array range"); return; } } else { if (LOGLEVEL >= 5) { sprintf(log_buffer, "delete array requested by %s@%s for %s", preq->rq_user, preq->rq_host, preq->rq_ind.rq_delete.rq_objname); log_record( PBSEVENT_JOB, PBS_EVENTCLASS_JOB, "req_deletearray", log_buffer); } num_skipped = delete_whole_array(pa); } /* check if the array is gone */ if ((pa = get_array(preq->rq_ind.rq_delete.rq_objname)) != NULL) { /* some jobs were not deleted. They must have been running or had JOB_SUBSTATE_TRANSIT */ if (num_skipped != 0) { ptask = set_task(WORK_Timed, time_now + 2, array_delete_wt, preq); if (ptask) { return; } } } /* now that the whole array is deleted, we should mail the user if necessary */ reply_ack(preq); return; }
array::element element::operator[](std::uint32_t i) const { array::view arr = get_array(); return arr[i]; }
job *job_recov( char *filename) /* I */ /* pathname to job save file */ { int fds; job *pj; char *pn; char namebuf[MAXPATHLEN]; int qs_upgrade; #ifndef PBS_MOM char parent_id[PBS_MAXSVRJOBID + 1]; job_array *pa; #endif qs_upgrade = FALSE; pj = job_alloc(); /* allocate & initialize job structure space */ if (pj == NULL) { /* FAILURE - cannot alloc memory */ return(NULL); } strcpy(namebuf, path_jobs); /* job directory path */ strcat(namebuf, filename); fds = open(namebuf, O_RDONLY, 0); if (fds < 0) { sprintf(log_buffer, "unable to open %s", namebuf); log_err(errno, "job_recov", log_buffer); free((char *)pj); /* FAILURE - cannot open job file */ return(NULL); } /* read in job quick save sub-structure */ if (read(fds, (char *)&pj->ji_qs, quicksize) != (ssize_t)quicksize && pj->ji_qs.qs_version == PBS_QS_VERSION) { sprintf(log_buffer, "Unable to read %s", namebuf); log_err(errno, "job_recov", log_buffer); free((char *)pj); close(fds); return(NULL); } /* is ji_qs the version we expect? */ if (pj->ji_qs.qs_version != PBS_QS_VERSION) { /* ji_qs is older version */ sprintf(log_buffer, "%s appears to be from an old version. Attempting to convert.\n", namebuf); log_err(-1, "job_recov", log_buffer); if (job_qs_upgrade(pj, fds, namebuf, pj->ji_qs.qs_version) != 0) { sprintf(log_buffer, "unable to upgrade %s\n", namebuf); log_err(-1, "job_recov", log_buffer); free((char *)pj); close(fds); return(NULL); } qs_upgrade = TRUE; } /* END if (pj->ji_qs.qs_version != PBS_QS_VERSION) */ /* Does file name match the internal name? */ /* This detects ghost files */ pn = strrchr(namebuf, (int)'/') + 1; if (strncmp(pn, pj->ji_qs.ji_fileprefix, strlen(pj->ji_qs.ji_fileprefix)) != 0) { /* mismatch, discard job */ sprintf(log_buffer, "Job Id %s does not match file name for %s", pj->ji_qs.ji_jobid, namebuf); log_err(-1, "job_recov", log_buffer); free((char *)pj); close(fds); return(NULL); } /* read in working attributes */ if (recov_attr( fds, pj, job_attr_def, pj->ji_wattr, (int)JOB_ATR_LAST, (int)JOB_ATR_UNKN, TRUE) != 0) { sprintf(log_buffer, "unable to recover %s (file is likely corrupted)", namebuf); log_err(-1, "job_recov", log_buffer); job_free(pj); close(fds); return(NULL); } #ifdef PBS_MOM /* read in tm sockets and ips */ if (recov_tmsock(fds, pj) != 0) { sprintf(log_buffer, "warning: tmsockets not recovered from %s (written by an older pbs_mom?)", namebuf); log_err(-1, "job_recov", log_buffer); } if (recov_roottask(fds, pj) != 0) { sprintf(log_buffer, "warning: root task not recovered from %s (written by an older pbs_mom?)", namebuf); log_err(-1, "job_recov", log_buffer); } if (recov_jobflags(fds, pj) != 0) { sprintf(log_buffer, "warning: job flags not recovered from %s (written by an older pbs_mom?)", namebuf); log_err(-1, "job_recov", log_buffer); } #else /* PBS_MOM */ if (pj->ji_wattr[(int)JOB_ATR_job_array_request].at_flags & ATR_VFLAG_SET) { /* job is part of an array. We need to put a link back to the server job array struct for this array. We also have to link this job into the linked list of jobs belonging to the array. */ array_get_parent_id(pj->ji_qs.ji_jobid, parent_id); pa = get_array(parent_id); if (strcmp(parent_id, pj->ji_qs.ji_jobid) == 0) { pj->ji_isparent = TRUE; } else { if (pa == NULL) { /* couldn't find array struct, it must not have been recovered, treat job as indepentent job? perhaps we should delete the job XXX_JOB_ARRAY: should I unset this?*/ pj->ji_wattr[(int)JOB_ATR_job_array_request].at_flags &= ~ATR_VFLAG_SET; } else { CLEAR_LINK(pj->ji_arrayjobs); append_link(&pa->array_alljobs, &pj->ji_arrayjobs, (void*)pj); pj->ji_arraystruct = pa; pa->jobs_recovered++; } } } #endif close(fds); /* all done recovering the job */ if (qs_upgrade == TRUE) { job_save(pj, SAVEJOB_FULL); } return(pj); } /* END job_recov() */
const variant& variant::operator[]( size_t pos )const { return get_array()[pos]; }
void *modify_array_work( void *vp) { batch_request *preq = (batch_request *)vp; svrattrl *plist; int rc = 0; char *pcnt = NULL; char *array_spec = NULL; int checkpoint_req = FALSE; job *pjob = NULL; job_array *pa; pa = get_array(preq->rq_ind.rq_modify.rq_objname); if (pa == NULL) { req_reject(PBSE_UNKARRAYID, 0, preq, NULL, "unable to find array"); return(NULL); } mutex_mgr array_mutex(pa->ai_mutex, true); /* pbs_mom sets the extend string to trigger copying of checkpoint files */ if (preq->rq_extend != NULL) { if (strcmp(preq->rq_extend,CHECKPOINTHOLD) == 0) { checkpoint_req = CHK_HOLD; } else if (strcmp(preq->rq_extend,CHECKPOINTCONT) == 0) { checkpoint_req = CHK_CONT; } } /* find if an array range was specified */ if ((preq->rq_extend != NULL) && ((array_spec = strstr(preq->rq_extend,ARRAY_RANGE)) != NULL)) { /* move array spec past ARRAY_RANGE= */ char *equals = strchr(array_spec,'='); if (equals != NULL) { array_spec = equals + 1; } if ((pcnt = strchr(array_spec,'%')) != NULL) { int slot_limit = atoi(pcnt+1); pa->ai_qs.slot_limit = slot_limit; } } plist = (svrattrl *)GET_NEXT(preq->rq_ind.rq_modify.rq_attr); if ((array_spec != NULL) && (pcnt != array_spec)) { if (pcnt != NULL) *pcnt = '\0'; /* there is more than just a slot given, modify that range */ rc = modify_array_range(pa,array_spec,plist,preq,checkpoint_req); if (pcnt != NULL) *pcnt = '%'; if ((rc != 0) && (rc != PBSE_RELAYED_TO_MOM)) { req_reject(PBSE_IVALREQ,0,preq,NULL,"Error reading array range"); return(NULL); } else reply_ack(preq); return(NULL); } else { rc = modify_whole_array(pa,plist,preq,checkpoint_req); if ((rc != 0) && (rc != PBSE_RELAYED_TO_MOM)) { req_reject(PBSE_IVALREQ, 0, preq, NULL, "At least one array element did not modify successfully. Use qstat -f to verify changes"); return(NULL); } /* we modified the job array. We now need to update the job */ if ((pjob = chk_job_request(preq->rq_ind.rq_modify.rq_objname, preq)) == NULL) return(NULL); mutex_mgr job_mutex = mutex_mgr(pjob->ji_mutex, true); /* modify_job will reply to preq and free it */ modify_job((void **)&pjob, plist, preq, checkpoint_req, NO_MOM_RELAY); } return(NULL); } /* END modify_array_work() */
/// @pre is_array() size_t variant::size()const { return get_array().size(); }
// set bit to true inline void set_bit(const size_t i) { BITMAP_TYPE* p(get_array(i)); *p = *p | ((BITMAP_TYPE)0x1 << (BITMAP_TYPE)(i % BITMAP_BITS)); }
int set_array_job_ids( job **pjob, /* M */ char *log_buf, /* error Buffer */ size_t buflen) /* error buffer length */ { int rc = PBSE_NONE; #ifndef PBS_MOM job *pj = *pjob; job_array *pa; char parent_id[PBS_MAXSVRJOBID + 1]; // If this variable isn't set this job isn't actually an array subjob. if ((pj->ji_wattr[JOB_ATR_job_array_id].at_flags & ATR_VFLAG_SET) == 0) { // Check and set if this is the array template job char *open_bracket = strchr(pj->ji_qs.ji_jobid, '['); if (open_bracket != NULL) { if (*(open_bracket + 1) == ']') pj->ji_is_array_template = true; } return(rc); } if (strchr(pj->ji_qs.ji_jobid, '[') != NULL) { /* job is part of an array. We need to put a link back to the server job array struct for this array. We also have to link this job into the linked list of jobs belonging to the array. */ array_get_parent_id(pj->ji_qs.ji_jobid, parent_id); pa = get_array(parent_id); if (pa == NULL) { job_abt(&pj, (char *)"Array job missing array struct, aborting job"); snprintf(log_buf, buflen, "Array job missing array struct %s", __func__); return(-1); } strcpy(pj->ji_arraystructid, parent_id); if (strcmp(parent_id, pj->ji_qs.ji_jobid) == 0) { pj->ji_is_array_template = true; } else { pa->job_ids[(int)pj->ji_wattr[JOB_ATR_job_array_id].at_val.at_long] = strdup(pj->ji_qs.ji_jobid); pa->jobs_recovered++; /* This is a bit of a kluge, but for some reason if an array job was on hold when the server went down the ji_wattr[JOB_ATR_hold].at_val.at_long value is 0 on recovery even though pj->ji_qs.ji_state is JOB_STATE_HELD and the substate is JOB_SUBSTATE_HELD */ if ((pj->ji_qs.ji_state == JOB_STATE_HELD) && (pj->ji_qs.ji_substate == JOB_SUBSTATE_HELD)) { pj->ji_wattr[JOB_ATR_hold].at_val.at_long = HOLD_l; pj->ji_wattr[JOB_ATR_hold].at_flags = ATR_VFLAG_SET; } } if (pa != NULL) { unlock_ai_mutex(pa, __func__, "1", LOGLEVEL); } } #endif /* !PBS_MOM */ return(rc); }