void worker_thread(void *user) { JOBPOOL *pool = (JOBPOOL *)user; while(1) { JOB *job = 0; /* fetch job from queue */ lock_wait(pool->lock); if(pool->first_job) { job = pool->first_job; pool->first_job = pool->first_job->next; if(pool->first_job) pool->first_job->prev = 0; else pool->last_job = 0; } lock_release(pool->lock); /* do the job if we have one */ if(job) { job->status = JOBSTATUS_RUNNING; job->result = job->func(job->func_data); job->status = JOBSTATUS_DONE; } else thread_sleep(10); } }
/* * Enter with Resources locked */ static void show_disabled_jobs(UAContext *ua) { JOB *job; bool first = true; foreach_res(job, R_JOB) { if (!acl_access_ok(ua, Job_ACL, job->name())) { continue; } if (!job->enabled) { if (first) { first = false; ua->send_msg(_("Disabled Jobs:\n")); } ua->send_msg(" %s\n", job->name()); } } if (first) { ua->send_msg(_("No disabled Jobs.\n")); } }
int main()//main function { srand(time(NULL));//initiate or "seed" the clock to start running double timeSlice; //declare three variables that we will get user input for double simulationTime; double newProbability; cout << "How long do you want your time slices to be: " << endl; //user input for how long for each time slice? cin >> timeSlice; cout << "how long do you want to run this simulation: " << endl; //user input for how long to simulate this program? cin >> simulationTime; cout << "Probability that a new job will enter queue: " << endl; //user input for Odds of a new job coming along? cin >> newProbability; queue<JOB> jobs;//make a queue that will hold the class JOB. JOB one; //initiate the class JOB with the object ONE. jobs.push(one);//Push job ONE to the queue JOB two;//initiate the class JOB with the object two. jobs.push(two);//Push job two to the queue JOB three;//initiate the class JOB with the object three. jobs.push(three);//Push job three to the queue JOB four;//initiate the class JOB with the object four. jobs.push(four);//Push job four to the queue JOB five;//initiate the class JOB with the object five. jobs.push(five);//Push job five to the queue JOB tempJob; //creates a temporary job to store the values of jobs.fron(). while(!jobs.empty())//while there are still jobs in the CUP (queue), run this loop { tempJob = jobs.front();//assigns the location of jobs.front() to tempJob. cout << "\n--------------" << endl; cout << "Job # " << tempJob.getId() << "is DeQueued with: " << (jobs.front()).getremaining() << " seconds remaining." << endl;//takes the first job in, off the queue cout << "\nJob # " << tempJob.getId() << " -- " << tempJob.getType() << " -- is now in the CPU" << endl;//states the jobs type and puts it into the CPU for processing cout << "Total " << (jobs.front()).getTotal() << endl;//states the jobs initial total if ((jobs.front()).getremaining()>timeSlice)//checks if the job will need to be re-added to the queue after processing in the CPU { (tempJob).setRemaining(((tempJob).getremaining())-timeSlice);//Jobs time remaining is greater than the time slice, so its time is reduced by the time slice TIMEUSED = TIMEUSED + timeSlice;//the time used in this CPU process (timeslice) is added to the total run time cout << "\nJob # " << tempJob.getId() << "leaves the CPU with: " << (tempJob).getremaining() << " seconds left." << endl; //states how much time is left as job leaves the CPU cout << "\nCPU time used so far: " << TIMEUSED << endl; cout << "assignment is enqueued again." << endl;//tells user that the job is being added to the end of the queue again //cout << "Total " << (tempJob).getTotal() << endl; jobs.push(tempJob);//pushes job to the end of the fifo queue jobs.pop();//pops the job off the begining of the queue }//end if else//If this runs, the jobs remaining time must be LESS than the timeslice, so therefore there will be idle time. { double idleTime; //declare a variable to hold how long the computer idled for (tempJob).setRemaining(timeSlice-(tempJob).getremaining());//temporarily makes the jobs remaining time the time spent idling idleTime = (tempJob).getremaining();//sets the idle time based on the timeslice-remaining time TIMEUSED = TIMEUSED + timeSlice;//adds the total time cout << "Job # " << tempJob.getId() << "is DeQueued with less than " << timeSlice << " seconds remaining and: " << tempJob.getremaining() << " seconds idled." << endl; cout << "\nJob # " << tempJob.getId() << " -- " << tempJob.getType() << " -- is now in the CPU" << endl;//adds the job to the CPU for its last time cout << "Job # " << tempJob.getId() << " didnt use all the timeslice. The idle time is: " << idleTime << endl; cout << "\n0 time remaining with: " << TIMEUSED << " CPU time used so far." << endl;// prints out that the job is done and has used some amount of CPU time //cout << "Total " << (tempJob).getTotal() << endl; jobs.pop();//ONLY pops the job, and doesnt add it to the queue again. It is gone forever }//end else }//end while return 0;//allows int main to close }// end main
/* * Check for duplicate jobs. * Returns: true if current job should continue * false if current job should terminate */ bool allow_duplicate_job(JCR *jcr) { JOB *job = jcr->job; JCR *djcr; /* possible duplicate job */ bool cancel_dup = false; bool cancel_me = false; /* * See if AllowDuplicateJobs is set or * if duplicate checking is disabled for this job. */ if (job->AllowDuplicateJobs || jcr->IgnoreDuplicateJobChecking) { return true; } Dmsg0(800, "Enter allow_duplicate_job\n"); /* * After this point, we do not want to allow any duplicate * job to run. */ foreach_jcr(djcr) { if (jcr == djcr || djcr->JobId == 0) { continue; /* do not cancel this job or consoles */ } /* * See if this Job has the IgnoreDuplicateJobChecking flag set, ignore it * for any checking against other jobs. */ if (djcr->IgnoreDuplicateJobChecking) { continue; } if (strcmp(job->name(), djcr->job->name()) == 0) { if (job->DuplicateJobProximity > 0) { utime_t now = (utime_t)time(NULL); if ((now - djcr->start_time) > job->DuplicateJobProximity) { continue; /* not really a duplicate */ } } if (job->CancelLowerLevelDuplicates && djcr->getJobType() == 'B' && jcr->getJobType() == 'B') { switch (jcr->getJobLevel()) { case L_FULL: if (djcr->getJobLevel() == L_DIFFERENTIAL || djcr->getJobLevel() == L_INCREMENTAL) { cancel_dup = true; } break; case L_DIFFERENTIAL: if (djcr->getJobLevel() == L_INCREMENTAL) { cancel_dup = true; } if (djcr->getJobLevel() == L_FULL) { cancel_me = true; } break; case L_INCREMENTAL: if (djcr->getJobLevel() == L_FULL || djcr->getJobLevel() == L_DIFFERENTIAL) { cancel_me = true; } } /* * cancel_dup will be done below */ if (cancel_me) { /* Zap current job */ Jmsg(jcr, M_FATAL, 0, _("JobId %d already running. Duplicate job not allowed.\n"), djcr->JobId); break; /* get out of foreach_jcr */ } } /* * Cancel one of the two jobs (me or dup) * If CancelQueuedDuplicates is set do so only if job is queued. */ if (job->CancelQueuedDuplicates) { switch (djcr->JobStatus) { case JS_Created: case JS_WaitJobRes: case JS_WaitClientRes: case JS_WaitStoreRes: case JS_WaitPriority: case JS_WaitMaxJobs: case JS_WaitStartTime: cancel_dup = true; /* cancel queued duplicate */ break; default: break; } } if (cancel_dup || job->CancelRunningDuplicates) { /* * Zap the duplicated job djcr */ UAContext *ua = new_ua_context(jcr); Jmsg(jcr, M_INFO, 0, _("Cancelling duplicate JobId=%d.\n"), djcr->JobId); cancel_job(ua, djcr); bmicrosleep(0, 500000); cancel_job(ua, djcr); free_ua_context(ua); Dmsg2(800, "Cancel dup %p JobId=%d\n", djcr, djcr->JobId); } else { /* * Zap current job */ Jmsg(jcr, M_FATAL, 0, _("JobId %d already running. Duplicate job not allowed.\n"), djcr->JobId); Dmsg2(800, "Cancel me %p JobId=%d\n", jcr, jcr->JobId); } Dmsg4(800, "curJobId=%d use_cnt=%d dupJobId=%d use_cnt=%d\n", jcr->JobId, jcr->use_count(), djcr->JobId, djcr->use_count()); break; /* did our work, get out of foreach loop */ } } endeach_jcr(djcr); return true; }
void send_work_matchmaker() { int i, slots_locked=0, slots_nonempty=0; JOB_SET jobs; int min_slots = config.mm_min_slots; if (!min_slots) min_slots = ssp->max_wu_results/2; int max_slots = config.mm_max_slots; if (!max_slots) max_slots = ssp->max_wu_results; int max_locked = 10; lock_sema(); i = rand() % ssp->max_wu_results; // scan through the job cache, maintaining a JOB_SET of jobs // that we can send to this client, ordered by score. // for (int slots_scanned=0; slots_scanned<max_slots; slots_scanned++) { i = (i+1) % ssp->max_wu_results; WU_RESULT& wu_result = ssp->wu_results[i]; switch (wu_result.state) { case WR_STATE_EMPTY: continue; case WR_STATE_PRESENT: slots_nonempty++; break; default: slots_nonempty++; if (wu_result.state == g_pid) break; slots_locked++; continue; } JOB job; job.index = i; // get score for this job, and skip it if it fails quick check. // NOTE: the EDF check done in get_score() // includes only in-progress jobs. // if (!job.get_score()) { continue; } if (config.debug_send) { log_messages.printf(MSG_NORMAL, "[send] score for %s: %f\n", wu_result.workunit.name, job.score ); } if (job.score > jobs.lowest_score() || !jobs.request_satisfied()) { ssp->wu_results[i].state = g_pid; unlock_sema(); if (wu_is_infeasible_slow(wu_result, *g_request, *g_reply)) { // if we can't use this job, put it back in pool // lock_sema(); ssp->wu_results[i].state = WR_STATE_PRESENT; continue; } lock_sema(); jobs.add_job(job); } if (jobs.request_satisfied() && slots_scanned>=min_slots) break; } if (!slots_nonempty) { log_messages.printf(MSG_CRITICAL, "Job cache is empty - check feeder\n" ); g_wreq->no_jobs_available = true; } // TODO: trim jobs from tail of list until we pass the EDF check // jobs.send(); unlock_sema(); if (slots_locked > max_locked) { log_messages.printf(MSG_CRITICAL, "Found too many locked slots (%d>%d) - increase array size\n", slots_locked, max_locked ); } }
// send work for a particular processor type // void send_work_score_type(int rt) { vector<JOB> jobs; if (config.debug_send) { log_messages.printf(MSG_NORMAL, "[send] scanning for %s jobs\n", proc_type_name(rt) ); } clear_others(rt); int nscan = config.mm_max_slots; if (!nscan) nscan = ssp->max_wu_results; int rnd_off = rand() % ssp->max_wu_results; for (int j=0; j<nscan; j++) { int i = (j+rnd_off) % ssp->max_wu_results; WU_RESULT& wu_result = ssp->wu_results[i]; if (wu_result.state != WR_STATE_PRESENT) { continue; } WORKUNIT wu = wu_result.workunit; JOB job; job.app = ssp->lookup_app(wu.appid); if (job.app->non_cpu_intensive) continue; job.bavp = get_app_version(wu, true, false); if (!job.bavp) continue; job.index = i; job.result_id = wu_result.resultid; if (!job.get_score(wu_result)) { continue; } jobs.push_back(job); } std::sort(jobs.begin(), jobs.end(), job_compare); bool sema_locked = false; for (unsigned int i=0; i<jobs.size(); i++) { if (!work_needed(false)) { break; } if (!g_wreq->need_proc_type(rt)) { break; } JOB& job = jobs[i]; if (!sema_locked) { lock_sema(); sema_locked = true; } // make sure the job is still in the cache // array is locked at this point. // WU_RESULT& wu_result = ssp->wu_results[job.index]; if (wu_result.state != WR_STATE_PRESENT) { continue; } if (wu_result.resultid != job.result_id) { continue; } WORKUNIT wu = wu_result.workunit; int retval = wu_is_infeasible_fast( wu, wu_result.res_server_state, wu_result.res_priority, wu_result.res_report_deadline, *job.app, *job.bavp ); if (retval) { continue; } wu_result.state = g_pid; // It passed fast checks. // Release sema and do slow checks // unlock_sema(); sema_locked = false; switch (slow_check(wu_result, job.app, job.bavp)) { case 1: wu_result.state = WR_STATE_PRESENT; break; case 2: wu_result.state = WR_STATE_EMPTY; break; default: // slow_check() refreshes fields of wu_result.workunit; // update our copy too // wu.hr_class = wu_result.workunit.hr_class; wu.app_version_id = wu_result.workunit.app_version_id; // mark slot as empty AFTER we've copied out of it // (since otherwise feeder might overwrite it) // wu_result.state = WR_STATE_EMPTY; // reread result from DB, make sure it's still unsent // TODO: from here to end of add_result_to_reply() // (which updates the DB record) should be a transaction // SCHED_DB_RESULT result; result.id = wu_result.resultid; if (result_still_sendable(result, wu)) { add_result_to_reply(result, wu, job.bavp, false); // add_result_to_reply() fails only in pathological cases - // e.g. we couldn't update the DB record or modify XML fields. // If this happens, don't replace the record in the array // (we can't anyway, since we marked the entry as "empty"). // The feeder will eventually pick it up again, // and hopefully the problem won't happen twice. } break; } } if (sema_locked) { unlock_sema(); } restore_others(rt); g_wreq->best_app_versions.clear(); }
static bool list_nextvol(UAContext *ua, int ndays) { JOB *job; JCR *jcr; USTORE store; RUN *run; utime_t runtime; bool found = false; MEDIA_DBR mr; POOL_DBR pr; memset(&mr, 0, sizeof(mr)); int i = find_arg_with_value(ua, "job"); if (i <= 0) { if ((job = select_job_resource(ua)) == NULL) { return false; } } else { job = (JOB *)GetResWithName(R_JOB, ua->argv[i]); if (!job) { Jmsg(ua->jcr, M_ERROR, 0, _("%s is not a job name.\n"), ua->argv[i]); if ((job = select_job_resource(ua)) == NULL) { return false; } } } jcr = new_jcr(sizeof(JCR), dird_free_jcr); for (run=NULL; (run = find_next_run(run, job, runtime, ndays)); ) { if (!complete_jcr_for_job(jcr, job, run->pool)) { found = false; goto get_out; } if (!jcr->jr.PoolId) { ua->error_msg(_("Could not find Pool for Job %s\n"), job->name()); continue; } memset(&pr, 0, sizeof(pr)); pr.PoolId = jcr->jr.PoolId; if (!db_get_pool_record(jcr, jcr->db, &pr)) { bstrncpy(pr.Name, "*UnknownPool*", sizeof(pr.Name)); } mr.PoolId = jcr->jr.PoolId; get_job_storage(&store, job, run); mr.StorageId = store.store->StorageId; /* no need to set ScratchPoolId, since we use fnv_no_create_vol */ if (!find_next_volume_for_append(jcr, &mr, 1, fnv_no_create_vol, fnv_prune)) { ua->error_msg(_("Could not find next Volume for Job %s (Pool=%s, Level=%s).\n"), job->name(), pr.Name, level_to_str(run->level)); } else { ua->send_msg( _("The next Volume to be used by Job \"%s\" (Pool=%s, Level=%s) will be %s\n"), job->name(), pr.Name, level_to_str(run->level), mr.VolumeName); found = true; } } get_out: if (jcr->db) { db_close_database(jcr, jcr->db); jcr->db = NULL; } free_jcr(jcr); if (!found) { ua->error_msg(_("Could not find next Volume for Job %s.\n"), job->hdr.name); return false; } return true; }
/* * Restore files * */ int restore_cmd(UAContext *ua, const char *cmd) { RESTORE_CTX rx; /* restore context */ POOL_MEM buf; JOB *job; int i; JCR *jcr = ua->jcr; char *escaped_bsr_name = NULL; char *escaped_where_name = NULL; char *strip_prefix, *add_prefix, *add_suffix, *regexp; strip_prefix = add_prefix = add_suffix = regexp = NULL; memset(&rx, 0, sizeof(rx)); rx.path = get_pool_memory(PM_FNAME); rx.fname = get_pool_memory(PM_FNAME); rx.JobIds = get_pool_memory(PM_FNAME); rx.JobIds[0] = 0; rx.BaseJobIds = get_pool_memory(PM_FNAME); rx.query = get_pool_memory(PM_FNAME); rx.bsr = new_bsr(); i = find_arg_with_value(ua, "comment"); if (i >= 0) { rx.comment = ua->argv[i]; if (!is_comment_legal(ua, rx.comment)) { goto bail_out; } } i = find_arg_with_value(ua, "where"); if (i >= 0) { rx.where = ua->argv[i]; } i = find_arg_with_value(ua, "replace"); if (i >= 0) { rx.replace = ua->argv[i]; } i = find_arg_with_value(ua, "strip_prefix"); if (i >= 0) { strip_prefix = ua->argv[i]; } i = find_arg_with_value(ua, "add_prefix"); if (i >= 0) { add_prefix = ua->argv[i]; } i = find_arg_with_value(ua, "add_suffix"); if (i >= 0) { add_suffix = ua->argv[i]; } i = find_arg_with_value(ua, "regexwhere"); if (i >= 0) { rx.RegexWhere = ua->argv[i]; } if (strip_prefix || add_suffix || add_prefix) { int len = bregexp_get_build_where_size(strip_prefix, add_prefix, add_suffix); regexp = (char *)bmalloc(len * sizeof(char)); bregexp_build_where(regexp, len, strip_prefix, add_prefix, add_suffix); rx.RegexWhere = regexp; } /* TODO: add acl for regexwhere ? */ if (rx.RegexWhere) { if (!acl_access_ok(ua, Where_ACL, rx.RegexWhere)) { ua->error_msg(_("\"RegexWhere\" specification not authorized.\n")); goto bail_out; } } if (rx.where) { if (!acl_access_ok(ua, Where_ACL, rx.where)) { ua->error_msg(_("\"where\" specification not authorized.\n")); goto bail_out; } } if (!open_client_db(ua)) { goto bail_out; } /* Ensure there is at least one Restore Job */ LockRes(); foreach_res(job, R_JOB) { if (job->JobType == JT_RESTORE) { if (!rx.restore_job) { rx.restore_job = job; } rx.restore_jobs++; } } UnlockRes(); if (!rx.restore_jobs) { ua->error_msg(_( "No Restore Job Resource found in bacula-dir.conf.\n" "You must create at least one before running this command.\n")); goto bail_out; } /* * Request user to select JobIds or files by various different methods * last 20 jobs, where File saved, most recent backup, ... * In the end, a list of files are pumped into * add_findex() */ switch (user_select_jobids_or_files(ua, &rx)) { case 0: /* error */ goto bail_out; case 1: /* selected by jobid */ get_and_display_basejobs(ua, &rx); if (!build_directory_tree(ua, &rx)) { ua->send_msg(_("Restore not done.\n")); goto bail_out; } break; case 2: /* selected by filename, no tree needed */ break; } if (rx.bsr->JobId) { char ed1[50]; if (!complete_bsr(ua, rx.bsr)) { /* find Vol, SessId, SessTime from JobIds */ ua->error_msg(_("Unable to construct a valid BSR. Cannot continue.\n")); goto bail_out; } if (!(rx.selected_files = write_bsr_file(ua, rx))) { ua->warning_msg(_("No files selected to be restored.\n")); goto bail_out; } display_bsr_info(ua, rx); /* display vols needed, etc */ if (rx.selected_files==1) { ua->info_msg(_("\n1 file selected to be restored.\n\n")); } else { ua->info_msg(_("\n%s files selected to be restored.\n\n"), edit_uint64_with_commas(rx.selected_files, ed1)); } } else { ua->warning_msg(_("No files selected to be restored.\n")); goto bail_out; } if (rx.restore_jobs == 1) { job = rx.restore_job; } else { job = get_restore_job(ua); } if (!job) { goto bail_out; } get_client_name(ua, &rx); if (!rx.ClientName) { ua->error_msg(_("No Client resource found!\n")); goto bail_out; } get_restore_client_name(ua, rx); escaped_bsr_name = escape_filename(jcr->RestoreBootstrap); Mmsg(ua->cmd, "run job=\"%s\" client=\"%s\" restoreclient=\"%s\" storage=\"%s\"" " bootstrap=\"%s\" files=%u catalog=\"%s\"", job->name(), rx.ClientName, rx.RestoreClientName, rx.store?rx.store->name():"", escaped_bsr_name ? escaped_bsr_name : jcr->RestoreBootstrap, rx.selected_files, ua->catalog->name()); /* Build run command */ pm_strcpy(buf, ""); if (rx.RegexWhere) { escaped_where_name = escape_filename(rx.RegexWhere); Mmsg(buf, " regexwhere=\"%s\"", escaped_where_name ? escaped_where_name : rx.RegexWhere); } else if (rx.where) { escaped_where_name = escape_filename(rx.where); Mmsg(buf," where=\"%s\"", escaped_where_name ? escaped_where_name : rx.where); } pm_strcat(ua->cmd, buf); if (rx.replace) { Mmsg(buf, " replace=%s", rx.replace); pm_strcat(ua->cmd, buf); } if (rx.comment) { Mmsg(buf, " comment=\"%s\"", rx.comment); pm_strcat(ua->cmd, buf); } if (escaped_bsr_name != NULL) { bfree(escaped_bsr_name); } if (escaped_where_name != NULL) { bfree(escaped_where_name); } if (regexp) { bfree(regexp); } if (find_arg(ua, NT_("yes")) > 0) { pm_strcat(ua->cmd, " yes"); /* pass it on to the run command */ } Dmsg1(200, "Submitting: %s\n", ua->cmd); /* Transfer jobids to jcr to for picking up restore objects */ jcr->JobIds = rx.JobIds; rx.JobIds = NULL; parse_ua_args(ua); run_cmd(ua, ua->cmd); free_rx(&rx); garbage_collect_memory(); /* release unused memory */ return 1; bail_out: if (escaped_bsr_name != NULL) { bfree(escaped_bsr_name); } if (escaped_where_name != NULL) { bfree(escaped_where_name); } if (regexp) { bfree(regexp); } free_rx(&rx); garbage_collect_memory(); /* release unused memory */ return 0; }