void corejob_end(DL_STATE *page, task_t **complete, Bool failure_handled) { task_group_t *root_group = task_group_root() ; corecontext_t *context = get_core_context() ; error_context_t error = ERROR_CONTEXT_INIT, *olderror ; corejob_t *job = page->job ; VERIFY_OBJECT(job, CORE_JOB_NAME) ; /* Suppress errors from this function and sub-functions. We're able to cope with failure by joining immediately. */ olderror = context->error ; context->error = &error ; /* The job's task group is now complete. If we pipeline jobs, this means that we can run tasks in the job through task_helper_locked(). */ task_group_close(job->task_group) ; if ( complete != NULL ) { *complete = NULL ; #ifdef PIPELINE_JOBS #ifdef DEBUG_BUILD pipeline_jobs = (debug_dl & DEBUG_DL_PIPELINE_JOBS) != 0 ; #endif if ( pipeline_jobs ) { task_group_t *job_group = task_group_acquire(job->task_group) ; if ( task_group_create(&job->join_group, TASK_GROUP_COMPLETE, root_group, NULL) ) { task_group_ready(job->join_group) ; if ( task_create(complete, NULL /*specialiser*/, NULL /*spec args*/, NULL /*no worker*/, NULL /*args*/, &corejob_finalise, job->join_group, SW_TRACE_JOB_COMPLETE) ) { /* If successful, this next call atomically sets the task args to the job object: */ Bool transferred = task_group_set_joiner(job_group, *complete, job_group) ; task_ready(*complete) ; task_group_close(job->join_group) ; if ( transferred ) goto done ; task_release(complete) ; } task_group_cancel(job->join_group, context->error->new_error) ; (void)task_group_join(job->join_group, NULL) ; task_group_release(&job->join_group) ; } task_group_release(&job_group) ; } #endif /* PIPELINE_JOBS */ } /* Ignore the error return value; if the job is failed because of an asynchronous flush, the job's failure field is set. */ (void)dl_pipeline_flush(1, failure_handled) ; /* We failed to create a separate completion task for this job. We need to join the job group synchronously. */ (void)task_group_join(job->task_group, NULL) ; #ifdef PIPELINE_JOBS done: #endif context->error = olderror ; task_group_release(&root_group) ; /* Don't increment job number on a font query etc... */ if ( job->has_output ) ++page->job_number ; corejob_release(&page->job) ; }
Bool corejob_create(DL_STATE *page, task_t *previous) { corejob_t *job ; DEVICE_FILEDESCRIPTOR logfile = -1 ; HQASSERT(page != NULL && page->job == NULL, "Input page already has job object") ; if ( (job = mm_alloc(mm_pool_temp, sizeof(corejob_t), MM_ALLOC_CLASS_JOB)) != NULL ) { task_group_t *root = task_group_root() ; job->refcount = 1 ; /* One for the returned ref. */ job->state = CORE_JOB_NONE ; job->previous = previous ; job->task_group = NULL ; job->join_group = NULL ; job->first_dl = job->last_dl = page ; job->has_output = FALSE ; job->pages_output = 0 ; job->failed = FALSE ; /* We always have a valid pointer for the name */ job->name.string = (uint8 *)"" ; job->name.length = 0 ; job->interrupt_handler.handler = corejob_interrupt_handler; job->interrupt_handler.context = NULL; job->interrupt_handler.reserved = 0; job->timeout_timer = NULL; job->timeline = core_tl_ref ; /* Open the %progress%JobLog file for progress reporting [65510] */ if (progressdev != NULL && isDeviceEnabled(progressdev) && isDeviceRelative(progressdev)) { logfile = (*theIOpenFile(progressdev))( progressdev, (uint8*)"JobLog", SW_WRONLY | SW_CREAT) ; HQASSERT(logfile >= 0, "Unable to open JobLog for progress reporting") ; } job->logfile = logfile ; if ( !task_group_create(&job->task_group, TASK_GROUP_JOB, root, NULL /*resources*/) ) { if ( job->previous != NULL ) task_release(&job->previous) ; mm_free(mm_pool_temp, job, sizeof(corejob_t)) ; job = NULL ; } else { /* We want the job object valid before we start the job timeline, because event handlers want to know they're dealing with a */ NAME_OBJECT(job, CORE_JOB_NAME) ; /* The job task group can be made provisionable immediately, so we can start tasks as they are created. */ task_group_ready(job->task_group) ; /* The timeline object's context is not counted as a reference against the job object. Instead, the job stream timeline ended/aborted handler is made responsible for final freeing of the job object. This is required because the primary context of the timeline cannot be changed safely, and we may receive interrupts at any time. The job object cannot be allowed to go out of scope whilst any interrupt could dereference it, which is for the duration of the timeline. */ if ( (page->timeline = timeline_push(&job->timeline, SWTLT_JOB_STREAM, 0 /*end*/, SW_TL_UNIT_PAGES, job, NULL, 0)) == SW_TL_REF_INVALID ) { (void)task_group_join(job->task_group, NULL) ; task_group_release(&job->task_group) ; if ( job->previous != NULL ) task_release(&job->previous) ; UNNAME_OBJECT(job) ; mm_free(mm_pool_temp, job, sizeof(corejob_t)) ; job = NULL ; } else { #ifdef ASSERT_BUILD ++corejob_allocs ; #endif } } task_group_release(&root) ; if (!job) { /* Close feedback files if job was not created */ if (logfile != -1) (void)(*theICloseFile(progressdev))(progressdev, logfile) ; } } page->job = job ; page->pageno = 0 ; if (job && logfile >= 0) { /* Register the default handler for the %progress%LogFile channel, just above the fallback route to the monitor device */ prog_handler.context = job ; (void)SwRegisterHandler(SWEVT_MONITOR, &prog_handler, SW_EVENT_DEFAULT+1) ; } return (job != NULL) ; }
void corejob_release(corejob_t **jobptr) { corejob_t *job ; hq_atomic_counter_t after ; HQASSERT(jobptr, "Nowhere to find job pointer") ; job = *jobptr ; VERIFY_OBJECT(job, CORE_JOB_NAME) ; *jobptr = NULL ; HqAtomicDecrement(&job->refcount, after) ; HQASSERT(after >= 0, "Job already released") ; if ( after == 0 ) { corejob_set_timeout(job, 0) ; /* Remove any existing timer */ /* De-register handler for user and timeout interrupts */ if (job->interrupt_handler.context != NULL) { (void)SwSafeDeregisterHandler(SWEVT_INTERRUPT_USER, &job->interrupt_handler); (void)SwSafeDeregisterHandler(SWEVT_INTERRUPT_TIMEOUT, &job->interrupt_handler); job->interrupt_handler.context = NULL; } switch ( job->state ) { default: HQFAIL("Invalid core job state") ; case CORE_JOB_NONE: /* Nothing to do */ break ; case CORE_JOB_CONFIG: mps_telemetry_label(0, configEnd); CHECK_TL_VALID(timeline_pop(&job->timeline, SWTLT_JOB_CONFIG, !job->failed)) ; probe_end(SW_TRACE_JOB_CONFIG, (intptr_t)job); break ; case CORE_JOB_RUNNING: mps_telemetry_label(0, jobEnd); CHECK_TL_VALID(timeline_pop(&job->timeline, SWTLT_JOB, !job->failed)) ; probe_end(SW_TRACE_JOB, (intptr_t)job); break ; } if ( job->previous != NULL ) task_release(&job->previous) ; if ( job->join_group != NULL ) { /* Ensure that asynchronous join task has completed. */ (void)task_group_join(job->join_group, NULL) ; task_group_release(&job->join_group) ; } task_group_release(&job->task_group) ; if ( job->name.length > 0 ) { mm_free(mm_pool_temp, job->name.string, job->name.length) ; job->name.length = 0 ; job->name.string = (uint8 *)"" ; } /* Responsibility for actually freeing the job object is passed to the timeline ended event. */ CHECK_TL_VALID(timeline_pop(&job->timeline, SWTLT_JOB_STREAM, !job->failed)) ; } }
int test_logic_task(const char* param) { timerheap_t* timer = timer_create_heap(); if (!timer) { fprintf(stderr, "timer create fail\n"); return -1; } _curl_pool = curlp_create(); if (!_curl_pool) { fprintf(stderr, "curl create fail\n"); timer_release(timer); return -1; } task_t* t = task_create(on_success, on_fail, NULL); if (!t) { fprintf(stderr, "task create fail\n"); timer_release(timer); curlp_release(_curl_pool); return -1; } struct timeval timeout; struct timeval tv; t1_param_t p; p.loop = (param ? atoi(param) : 3); task_step_t* t1 = task_step_create(t1_run, (void*)&p); task_step_t* t2 = task_step_create(t2_run, NULL); task_step_t* t3 = task_step_create(t3_run, NULL); if (!t1 || !t2 || !t3) { fprintf(stderr, "task step create fail\n"); if (t1) task_step_release(t1); if (t2) task_step_release(t2); if (t3) task_step_release(t3); task_release(t); timer_release(timer); curlp_release(_curl_pool); return -1; } task_push_back_step(t, t1); _task_step_id = task_step_id(t1); task_push_back_step(t, t2); task_push_back_step(t, t3); timeout.tv_sec = 3; timeout.tv_usec = 0; task_run(t, timer, &timeout); while (1) { if (curlp_running_count(_curl_pool) > 0) { curlp_poll(_curl_pool); } gettimeofday(&tv, NULL); timer_poll(timer, &tv); if (task_is_finished(t) == 0) { break; } } task_release(t); timer_release(timer); curlp_release(_curl_pool); return 0; }