Example #1
0
void *thread_ping(void *vargs)
{
    int previous_ping = 0;
    const int ping_delay = 1 * 60;
    struct thread *t = (struct thread *) vargs;

    thread_register_sigint_handler();

    do {

        if (session.xmpp.last_query + 4 * ping_delay < time(NULL))
        {
            xprintf("it's over.\n\n");
            break;
        }
        else if (session.xmpp.last_query + 3 * ping_delay < time(NULL))
        {
            xprintf("Stalling life... ");
            xmpp_iq_ping();
            previous_ping = 1;
        }
        else if (previous_ping)
        {
            xprintf("still there!\n");
            previous_ping = 0;
        }

        sleep(ping_delay);

    } while (session.active);

    return thread_close(t);
}
Example #2
0
void *thread_sendstream(void *vargs)
{
    struct thread *t = (struct thread *) vargs;

    thread_register_sigint_handler();

    do {
        char *msg = thread_sendstream_get_next_msg();

        stream_send_msg(session.wfs, msg);
        stream_flush(session.wfs);

        free(msg);
    } while (session.active);

    /* Destroy remaining messages */
    for (unsigned int i = 0; i < SEND_MSG_MAX; ++i)
    {
        free(send_msgs[i]);
        send_msgs[i] = NULL;
    }

    sem_destroy(&_sem_send_msgs_empty);
    sem_destroy(&_sem_send_msgs_full);

    return thread_close(t);
}
Example #3
0
Spider_Thread_Pool::~Spider_Thread_Pool(void)
{
	m_exit_flag=true;
	for (unsigned int i=0; i<m_thread_list.size(); i++ )
	{
		semaphore_release(m_semaphore);
	}

	LLOG(L_DEBUG,"release threadpool .");
	std::list<handle_thread>::iterator iter=m_thread_list.begin();
	for ( ; iter!=m_thread_list.end() ; iter++ )
	{
	    handle_thread handle=*iter;
		thread_waitforend(handle, INFINITE );
		thread_close(handle);
	}

	LLOG(L_DEBUG,"destroy semphore and mutex .");
	semaphore_destory(m_semaphore);
	m_semaphore=NULL;

	mutex_destroy(m_mutex);
	m_mutex=NULL;

    LLOG(L_DEBUG,"ThreadPool Exit....");
}
Example #4
0
int Spider_Executor::uninitialize()
{
	m_exit=true;
	thread_waitforend(m_thread_handle,INFINITE);
	thread_close(m_thread_handle);
	recursivemutex_destory(m_queue_mutex);
	return 0;
}
Example #5
0
int main()
{
    THREAD thread;
    thread_create(&PrintStuff, NULL, &thread);
    thread_start(&thread);

    _getch();
    thread_close(&thread);
    return 0;
}
Example #6
0
int
main(int argc, char *argv[])
{
	int c;
	struct VSM_data *vd;
	const char *address = NULL;

	vd = VSM_New();
	debug = 0;

	VSL_Arg(vd, 'c', NULL);
	while ((c = getopt(argc, argv, "a:Dr:n:")) != -1) {
		switch (c) {
		case 'a':
			address = optarg;
			break;
		case 'D':
			++debug;
			break;
		default:
			if (VSL_Arg(vd, c, optarg) > 0)
				break;
			usage();
		}
	}

	if (address == NULL) {
		usage();
	}

	if (VSM_Open(vd)) {
		fprintf(stderr, "%s\n", VSM_Error(vd));
		exit(1);
	}

	addr_info = init_connection(address);

	signal(SIGPIPE, SIG_IGN);

	pthread_attr_init(&thread_attr);

	/*
	 * XXX: seting the stack size manually reduces the memory usage
	 * XXX: (allowing more threads) and increases speed (?)
	 */
	pthread_attr_setstacksize(&thread_attr, 32768);

	while (VSL_Dispatch(vd, gen_traffic, NULL) == 0)
		/* nothing */ ;
	thread_close(-1);
	exit(0);
}
static void
thread_close(int fd)
{

	if (fd == -1) {
		for (fd = 0; fd < nthreads; ++fd)
			thread_close(fd);
		return;
	}

	assert(fd < nthreads);

	if (threads[fd] == NULL)
		return;
	mailbox_close(&threads[fd]->mbox);
	pthread_join(threads[fd]->thread_id, NULL);
	thread_log(0, 0, "thread %p stopped",
	    (void *)threads[fd]->thread_id);
	thread_clear(threads[fd]);
	mailbox_destroy(&threads[fd]->mbox);
	freez(threads[fd]);
}
void main() {
    open = 0;
    power_on = 0;
    thread_open();
    thread_close();
}
void igs::resource::thread_join(const HANDLE thread_id)
{
	thread_wait(thread_id);
	thread_close(thread_id);
}
Example #10
0
void *thread_readstream(void *vargs)
{
    struct thread *t = (struct thread *) vargs;

    pthread_cleanup_push(thread_readstream_close, t);

    while (session.state != STATE_DEAD)
    {
        char *msg = NULL;

        if (session.state != STATE_RUN)
        {
            pthread_mutex_lock(&_lock_readstream);

            while (session.state == STATE_TLS_INIT)
                pthread_cond_wait(&_cond_readstream, &_lock_readstream);

            if (session.state == STATE_INIT)
            {
                session.state = STATE_POLL;
                pthread_cond_signal(&_cond_readstream);
            }

            pthread_mutex_unlock(&_lock_readstream);

            msg = stream_read(session.wfs);

            pthread_mutex_lock(&_lock_readstream);

            if (session.state == STATE_POLL)
            {
                session.state = STATE_INIT;
                pthread_cond_signal(&_cond_readstream);
            }

            pthread_mutex_unlock(&_lock_readstream);
        }
        else
        {
            msg = stream_read(session.wfs);
        }

        if (msg == NULL || strlen(msg) <= 0)
        {
            if (session.state != STATE_DEAD)
            {
                status_set(STATUS_OFFLINE);
            }

            break;
        }

        { /* Replace any " with ' */
            for (char *s = msg; *s; ++s)
                if (*s == '"')
                    *s = '\'';
        }

        thread_readstream_post_new_msg(msg);

        session.xmpp.last_query = time(NULL);
    }

    pthread_cleanup_pop(1);
    return thread_close(t);
}
void poirot_main() {
    open = 0;
    power_on = 0;
	__async_call thread_open();
	__async_call thread_close();
}