Esempio n. 1
0
static int check_completion(child_process *cp, int flags)
{
	int result, status;
	time_t max_time;

	if (!cp || !cp->pid) {
		return 0;
	}

	max_time = time(NULL) + 1;

	/*
	 * we mustn't let EINTR interrupt us, since it could well
	 * be a SIGCHLD from the properly exiting process doing it
	 */
	do {
		errno = 0;
		result = wait4(cp->pid, &status, flags, &cp->rusage);
	} while (result == -1 && errno == EINTR && time(NULL) < max_time);

	if (result == cp->pid) {
		cp->ret = status;
		finish_job(cp, 0);
		return 0;
	}

	if (errno == ECHILD) {
		cp->ret = status;
		finish_job(cp, errno);
	}

	return -errno;
}
Esempio n. 2
0
static int check_completion(child_process *cp, int flags)
{
	int result, status;

	if (!cp || !cp->ei->pid) {
		return 0;
	}

	/*
	 * we mustn't let EINTR interrupt us, since it could well
	 * be a SIGCHLD from the properly exiting process doing it
	 */
	do {
		errno = 0;
		result = wait4(cp->ei->pid, &status, flags, &cp->ei->rusage);
	} while (result < 0 && errno == EINTR);

	if (result == cp->ei->pid || (result < 0 && errno == ECHILD)) {
		cp->ret = status;
		finish_job(cp, 0);
		destroy_job(cp);
		return 0;
	}

	if (!result)
		return -1;

	return -errno;
}
Esempio n. 3
0
int ali_ftl_sync()   //for TDS
{
	struct ali_ftl_info *afi = aliFTLDevice;
	struct unfinish_job *ujob = NULL;
	int i, ret, isErr;

	isErr = 0;
	for(i = 0; i < afi->sectionPerPartition; i++) {
		osal_mutex_lock(afi->ali_ftl_mutex,OSAL_WAIT_FOREVER_TIME);   //for TDS

		while(!list_empty(&afi->secInfo[i].ujob_busy)){
			
			ujob=list_entry(afi->secInfo[i].ujob_busy.next,struct unfinish_job, ulist);

			ret = finish_job(afi, ujob);
			if(ret){
				ALI_FTL_ERR("[WARN] %s: secIdx %d, ujob 0x%x sync fail...\n", __FUNCTION__, i, ujob);
				isErr = 1;
			}
		}

		osal_mutex_unlock(afi->ali_ftl_mutex);   //for TDS
	}
	
	return (isErr)? -1 : 0;
}
Esempio n. 4
0
void JobQueue::process_jobs(void)
{
    Uint32 j;
    Job *job;
    Uint32 max_num = active_jobs.size();
    
    if(0 == max_num){
        Job *job = get_next_job();
        if(job) {
            active_jobs.push_back(job);
            max_num = 1;
        }
    }
    for(j=0; j<max_num; ++j){
        job = active_jobs[j];
        job->process();
        if (job->is_finished){
            finish_job(job);
            Job *new_job = get_next_job();
            if(new_job){
                active_jobs[j] = new_job;
            }else{
                active_jobs.erase(active_jobs.begin() + j);
                max_num = active_jobs.size();
                --j;
            }
            delete job;
        }
    }
}
Esempio n. 5
0
static void kill_job(child_process *cp, int reason)
{
	int ret;
	struct rusage ru;

	/* brutal but efficient */
	ret = kill(cp->pid, SIGKILL);
	if (ret < 0) {
		if (errno == ESRCH) {
			finish_job(cp, reason);
			return;
		}
		wlog("kill(%d, SIGKILL) failed: %s\n", cp->pid, strerror(errno));
	}

	ret = wait4(cp->pid, &cp->ret, 0, &ru);
	finish_job(cp, reason);

#ifdef PLAY_NICE_IN_kill_job
	int i, sig = SIGTERM;

	pid = cp->pid;

	for (i = 0; i < 2; i++) {
		/* check one last time if the job is done */
		ret = check_completion(cp, WNOHANG);
		if (!ret || ret == -ECHILD) {
			/* check_completion ran finish_job() */
			return;
		}

		/* not done, so signal it. SIGTERM first and check again */
		errno = 0;
		ret = kill(pid, sig);
		if (ret < 0) {
			finish_job(cp, -errno);
		}
		sig = SIGKILL;
		check_completion(cp, WNOHANG);
		if (ret < 0) {
			finish_job(cp, errno);
		}
		usleep(50000);
	}
#endif /* PLAY_NICE_IN_kill_job */
}
Esempio n. 6
0
    void start_task_callback(evutil_socket_t /*_descriptor*/, short /*_flags*/, void* _curl_handler_ptr)
    {
        const auto handler = static_cast<curl_handler*>(_curl_handler_ptr);

        typedef std::unique_ptr<curl_handler::connection_context> connection_ptr;
        std::vector<connection_ptr> to_process;

        {
            boost::lock_guard<boost::mutex> lock(handler->jobs_mutex_);

            auto transmissions = handler->connections_.size();

            while (!handler->pending_jobs_.empty())
            {
                const auto& job = handler->pending_jobs_.top();

                if (job.priority_ >= highest_priority && transmissions > MAX_HIGHEST_TRANSMISSIONS)
                    break;

                if (job.priority_ >= high_priority && transmissions > MAX_HIGH_TRANSMISSIONS)
                    break;

                if (job.priority_ >= default_priority && transmissions > MAX_NORMAL_TRANSMISSIONS)
                    break;

                ++transmissions;

                const auto completion_handler = job.completion_;
                const auto timeout = job.timeout_;
                const auto easy_handle = job.handle_;

                auto connection = std::make_unique<curl_handler::connection_context>(timeout, handler, easy_handle, completion_handler);
                to_process.push_back(std::move(connection));

                handler->pending_jobs_.pop();
            }
        }

        for (auto&& connection : to_process)
        {
            const auto easy_handle = connection->easy_handle_;

            handler->connections_[easy_handle] = std::move(connection);

            const auto result = curl_multi_add_handle(handler->multi_handle_, easy_handle);

            if (result != CURLM_OK)
            {
                finish_job(handler, easy_handle, CURLE_FAILED_INIT);
            }
        }
    }
Esempio n. 7
0
    void event_timeout_callback(evutil_socket_t /*_descriptor*/, short /*_flags*/, void* _connection_ptr)
    {
        const auto connection = static_cast<curl_handler::connection_context*>(_connection_ptr);

        const auto error = curl_multi_remove_handle(connection->curl_handler_->multi_handle_, connection->easy_handle_);
        assert(!error);
        (void*) error; // supress warning

        const auto handler = connection->curl_handler_;

        finish_job(handler, connection->easy_handle_, CURLE_OPERATION_TIMEDOUT);

        start_new_job(handler);
    }
Esempio n. 8
0
static void check_overwrite (void * data)
{
    ImportExportJob * job = data;

    job->filename = gtk_file_chooser_get_uri ((GtkFileChooser *) job->selector);

    if (! job->filename)
        return;

    if (job->save && vfs_file_test (job->filename, G_FILE_TEST_EXISTS))
        confirm_overwrite (job);
    else
        finish_job (data);
}
Esempio n. 9
0
    void check_multi_info(curl_handler* _curl_handler)
    {
        int messages_left = 0;
        while (auto message = curl_multi_info_read(_curl_handler->multi_handle_, &messages_left))
        {
            if (message->msg == CURLMSG_DONE)
            {
                const auto easy_handle = message->easy_handle;
                const auto result = message->data.result;

                const auto error = curl_multi_remove_handle(_curl_handler->multi_handle_, easy_handle);
                assert(!error);
                (void*) error; // supress warning

                finish_job(_curl_handler, easy_handle, result);

                start_new_job(_curl_handler);
            }
        }
    }
Esempio n. 10
0
void tpie_finish(int subsystems) {
	if (subsystems & STREAMS) {
		finish_compressor();
		finish_stream_buffer_pool();
	}

	if (subsystems & JOB_MANAGER)
		finish_job();

    if (subsystems & PROGRESS)  {
		finish_execution_time_db();
		finish_fraction_db();
	}

	if (subsystems & PRIMEDB)
		finish_prime();

	if (subsystems & DEFAULT_LOGGING)
		finish_default_log();

	if (subsystems & MEMORY_MANAGER)	
	 	finish_memory_manager();
}
Esempio n. 11
0
/*
 * "What can the harvest hope for, if not for the care
 * of the Reaper Man?"
 *   -- Terry Pratchett, Reaper Man
 *
 * We end up here no matter if the job is stale (ie, the child is
 * stuck in uninterruptable sleep) or if it's the first time we try
 * to kill it.
 * A job is considered reaped once we reap our direct child, in
 * which case init will become parent of our grandchildren.
 * It's also considered fully reaped if kill() results in ESRCH or
 * EPERM, or if wait()ing for the process group results in ECHILD.
 */
static void kill_job(child_process *cp, int reason)
{
	int ret, status, reaped = 0;
	int pid = cp ? cp->ei->pid : 0;

	/*
	 * first attempt at reaping, so see if we just failed to
	 * notice that things were going wrong her
	 */
	if (reason == ETIME && !check_completion(cp, WNOHANG)) {
		timeouts++;
		wlog("job %d with pid %d reaped at timeout. timeouts=%u; started=%u", cp->id, pid, timeouts, started);
		return;
	}

	/* brutal but efficient */
	if (kill(-cp->ei->pid, SIGKILL) < 0) {
		if (errno == ESRCH) {
			reaped = 1;
		} else {
			wlog("kill(-%d, SIGKILL) failed: %s\n", cp->ei->pid, strerror(errno));
		}
	}

	/*
	 * we must iterate at least once, in case kill() returns
	 * ESRCH when there's zombies
	 */
	do {
		ret = waitpid(cp->ei->pid, &status, WNOHANG);
		if (ret < 0 && errno == EINTR)
			continue;

		if (ret == cp->ei->pid || (ret < 0 && errno == ECHILD)) {
			reaped = 1;
			break;
		}
		if (!ret) {
			struct timeval tv;

			gettimeofday(&tv, NULL);
			/*
			 * stale process (signal may not have been delivered, or
			 * the child can be stuck in uninterruptible sleep). We
			 * can't hang around forever, so just reschedule a new
			 * reap attempt later.
			 */
			if (reason == ESTALE) {
				tv.tv_sec += 5;
				wlog("Failed to reap child with pid %d. Next attempt @ %lu.%lu", cp->ei->pid, tv.tv_sec, tv.tv_usec);
			} else {
				tv.tv_usec = 250000;
				if (tv.tv_usec > 1000000) {
					tv.tv_usec -= 1000000;
					tv.tv_sec += 1;
				}
				cp->ei->state = ESTALE;
				finish_job(cp, reason);
			}
			squeue_remove(sq, cp->ei->sq_event);
			cp->ei->sq_event = squeue_add_tv(sq, &tv, cp);
			return;
		}
	} while (!reaped);

	if (cp->ei->state != ESTALE)
		finish_job(cp, reason);
	else
		wlog("job %d (pid=%d): Dormant child reaped", cp->id, cp->ei->pid);
	destroy_job(cp);
}
void peer_run(bt_config_t *config) {
    int sock;
    struct sockaddr_in myaddr;
    fd_set readfds;
    struct user_iobuf *userbuf;
    
    if ((userbuf = create_userbuf()) == NULL) {
        perror("peer_run could not allocate userbuf");
        exit(-1);
    }
    
    if ((sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP)) == -1) {
        perror("peer_run could not create socket");
        exit(-1);
    }
    
    bzero(&myaddr, sizeof(myaddr));
    myaddr.sin_family = AF_INET;
    //myaddr.sin_addr.s_addr = htonl(INADDR_ANY);
    inet_aton("127.0.0.1", (struct in_addr*)&myaddr.sin_addr.s_addr);
    myaddr.sin_port = htons(config->myport);
    
    if (bind(sock, (struct sockaddr *) &myaddr, sizeof(myaddr)) == -1) {
        perror("peer_run could not bind socket");
        exit(-1);
    }
    global_socket = sock;
    spiffy_init(config->identity, (struct sockaddr *)&myaddr, sizeof(myaddr));
    init_window_log();
    init_hasChunks(config->has_chunk_file);
    init_connections();

    struct timeval last_flood_whohas_time;
    gettimeofday(&last_flood_whohas_time, NULL);
    while (1) {
        int nfds;
        FD_SET(STDIN_FILENO, &readfds);
        FD_SET(sock, &readfds);

        struct timeval select_timeout;
        select_timeout.tv_sec = 0;
        select_timeout.tv_usec = 300000;

        nfds = select(sock+1, &readfds, NULL, NULL, &select_timeout);
        
        if (nfds > 0) {
            if (FD_ISSET(sock, &readfds)) {
    	       process_inbound_udp(sock);
            }
          
            if(FD_ISSET(STDIN_FILENO, &readfds)) {
    	        process_user_input(STDIN_FILENO, userbuf, handle_user_input,
    		           "Currently unused");
            }
        }
        providers_timeout();
        receivers_timeout();
        /* this is a regular send task, driven purely by time */
        all_provider_connection_send_data(provider_connection_head);

        /* every few seconds, if there are receiver connection available, flooding whohas*/
        if(get_time_diff(&last_flood_whohas_time) > WHOHAS_FLOOD_INTERVAL_MS){
            flood_whohas();
            gettimeofday(&last_flood_whohas_time, NULL);
        }
        /* if there is no more receiver connection and the job is not NULL
         * check to see if the job has finished
         */
        finish_job();
    }
}