/**
 * Reads the thread specific userdata to figure out what
 * we need to handle. Things that purely effect the network
 * stack should be handled here, but otherwise we should defer
 * to the connection handlers.
 */
static void invoke_event_handler(worker_ev_userdata* data) {
    // Get the offending handle
    ev_io *watcher = data->watcher;
    int fd = watcher->fd;

    // Check if this is either of the listeners
    if (watcher == &data->netconf->tcp_client) {
        // Accept the new client
        handle_new_client(fd, data);

        // Reschedule the listener
        schedule_async(data->netconf, SCHEDULE_WATCHER, watcher);
        return;

    // If it is write ready, dispatch the write handler
    }  else if (data->ready_events & EV_WRITE) {
        handle_client_writebuf(watcher, data);
        return;
    }

    // Check for UDP inbound
    if (watcher == &data->netconf->udp_client) {
        // Read the message and process
        if (!handle_udp_message(watcher, data)) {
            statsite_proxy_conn_handler handle = {data->netconf->config, data->netconf->proxy, watcher->data};
            handle_client_connect(&handle);
        }

        // Reschedule the listener
        schedule_async(data->netconf, SCHEDULE_WATCHER, watcher);
        return;
    }

    /*
     * If it is not a listener, it must be a connected
     * client. We should just read all the available data,
     * append it to the buffers, and then invoke the
     * connection handlers.
     */
    conn_info *conn = watcher->data;
    incref_client_connection(conn);

    if (!handle_client_data(watcher, data)) {
        statsite_proxy_conn_handler handle = {data->netconf->config, data->netconf->proxy, watcher->data};
        handle_client_connect(&handle);
    }

    // Reschedule the watcher, unless told otherwise.
    if (conn->should_schedule) {
        schedule_async(data->netconf, SCHEDULE_WATCHER, watcher);
    }
    decref_client_connection(conn);
}
Exemple #2
0
void forasync1D_recursive(void * forasync_arg) {
    forasync1D_t * forasync = (forasync1D_t *) forasync_arg;
    loop_domain_t loop0 = forasync->loop0;
    int high0 = loop0.high;
    int low0 = loop0.low;
    int stride0 = loop0.stride;
    int tile0 = loop0.tile;

    //split the range into two, spawn a new task for the first half and recurse on the rest  
    forasync1D_task_t * new_forasync_task = NULL;
     if((high0-low0) > tile0) {
        int mid = (high0+low0)/2;
        // upper-half
        new_forasync_task = allocate_forasync1D_task();
        new_forasync_task->task.forasync_task.def.fct_ptr = forasync1D_recursive;
        new_forasync_task->task.forasync_task.def.arg = &(new_forasync_task->def);
        new_forasync_task->task.forasync_task.def.ddf_list = NULL;
        new_forasync_task->task.forasync_task.def.phased_clause = NULL;
        new_forasync_task->def.base.user = forasync->base.user;
        loop_domain_t new_loop0 = {mid, high0, stride0, tile0};
        new_forasync_task->def.loop0 = new_loop0;
        // update lower-half
        forasync->loop0.high = mid;
        // delegate scheduling to the underlying runtime
        //TODO can we make this a special async to avoid a get_current_finish ?
        schedule_async((async_task_t*)new_forasync_task, get_current_finish(), NO_PROP);
        //continue to work on the half task 
        forasync1D_recursive(forasync_arg); 
     } else { 
        //compute the tile
        forasync1D_runner(forasync_arg);
     }
}
Exemple #3
0
void forasync2D_flat(void * forasync_arg) {
    forasync2D_t * forasync = (forasync2D_t *) forasync_arg;
    loop_domain_t loop0 = forasync->loop0;
    loop_domain_t loop1 = forasync->loop1;
    finish_t * current_finish = get_current_finish();
    int low0, low1;
    for(low0=loop0.low; low0<loop0.high; low0+=loop0.tile) {
        int high0 = (low0+loop0.tile)>loop0.high?loop0.high:(low0+loop0.tile);
        #if DEBUG_FORASYNC
            printf("Scheduling Task Loop1 %d %d\n",low0,high0);
        #endif
        for(low1=loop1.low; low1<loop1.high; low1+=loop1.tile) {
            int high1 = (low1+loop1.tile)>loop1.high?loop1.high:(low1+loop1.tile);
            #if DEBUG_FORASYNC
                printf("Scheduling Task %d %d\n",low1,high1);
            #endif
            forasync2D_task_t * new_forasync_task = allocate_forasync2D_task();
            new_forasync_task->task.forasync_task.def.fct_ptr = forasync2D_runner;
            new_forasync_task->task.forasync_task.def.arg = &(new_forasync_task->def);
            new_forasync_task->task.forasync_task.def.ddf_list = NULL;
            new_forasync_task->task.forasync_task.def.phased_clause = NULL;
            new_forasync_task->def.base.user = forasync->base.user;
            loop_domain_t new_loop0 = {low0, high0, loop0.stride, loop0.tile};
            new_forasync_task->def.loop0 = new_loop0;
            loop_domain_t new_loop1 = {low1, high1, loop1.stride, loop1.tile};
            new_forasync_task->def.loop1 = new_loop1;
            schedule_async((async_task_t*)new_forasync_task, current_finish, NO_PROP);
        }
    }
}
/**
 * Shuts down all the connections
 * and listeners and prepares to exit.
 * @arg netconf The config for the networking stack.
 */
int shutdown_networking(statsite_proxy_networking *netconf) {
    // Instruct the threads to shutdown
    netconf->should_run = 0;

    // Break the EV loop
    schedule_async(netconf, EXIT, NULL);

    // Wait for the thread to return
    if (netconf->thread) pthread_join(netconf->thread, NULL);

    // Stop listening for new connections
    ev_io_stop(&netconf->tcp_client);
    close(netconf->tcp_client.fd);
    ev_io_stop(&netconf->udp_client);
    close(netconf->udp_client.fd);

    // Stop the other timers
    ev_async_stop(&netconf->loop_async);

    // TODO: Close all the client/proxy connections
    // ??? For now, we just leak the memory
    // since we are shutdown down anyways...

    // Free the event loop
    ev_loop_destroy(EV_DEFAULT);

    // Free the netconf
    free(netconf);
    return 0;
}
/**
 * Invoked when a TCP listening socket fd is ready
 * to accept a new client. Accepts the client, initializes
 * the connection buffers, and prepares to start listening
 * for client data
 */
static void handle_new_client(int listen_fd, worker_ev_userdata* data) {
    // Accept the client connection
    struct sockaddr_in client_addr;
    int client_addr_len = sizeof(client_addr);
    int client_fd = accept(listen_fd,
                        (struct sockaddr*)&client_addr,
                        &client_addr_len);

    // Check for an error
    if (client_fd == -1) {
        syslog(LOG_ERR, "Failed to accept() connection! %s.", strerror(errno));
        return;
    }

    // Setup the socket
    if (set_client_sockopts(client_fd)) {
        return;
    }

    // Debug info
    syslog(LOG_DEBUG, "Accepted client connection: %s %d [%d]",
            inet_ntoa(client_addr.sin_addr), ntohs(client_addr.sin_port), client_fd);

    // Get the associated conn object
    conn_info *conn = get_conn(data->netconf);

    // Initialize the libev stuff
    ev_io_init(&conn->client, prepare_event, client_fd, EV_READ);
    ev_io_init(&conn->write_client, prepare_event, client_fd, EV_WRITE);

    // Schedule the new client
    schedule_async(data->netconf, SCHEDULE_WATCHER, &conn->client);
}
Exemple #6
0
void forasync1D_flat(void * forasync_arg) {
    forasync1D_t * forasync = (forasync1D_t *) forasync_arg;
    loop_domain_t loop0 = forasync->loop0;
    int high0 = loop0.high;
    int stride0 = loop0.stride;
    int tile0 = loop0.tile;
    int nb_chunks = (int) (high0/tile0);
    int size = tile0*nb_chunks;
    finish_t * current_finish = get_current_finish();
    int low0;
    for(low0 = loop0.low; low0<size; low0+=tile0) {
        #if DEBUG_FORASYNC
            printf("Scheduling Task %d %d\n",low0,(low0+tile0));
        #endif
        //TODO block allocation ?
        forasync1D_task_t * new_forasync_task = allocate_forasync1D_task();
        new_forasync_task->task.forasync_task.def.fct_ptr = forasync1D_runner;
        new_forasync_task->task.forasync_task.def.arg = &(new_forasync_task->def);
        new_forasync_task->task.forasync_task.def.ddf_list = NULL;
        new_forasync_task->task.forasync_task.def.phased_clause = NULL;
        new_forasync_task->def.base.user = forasync->base.user;
        loop_domain_t new_loop0 = {low0, low0+tile0, stride0, tile0};
        new_forasync_task->def.loop0 = new_loop0;
        // printf("1ddf_list %p\n",((async_task_t*)new_forasync_task));
        // printf("2ddf_list %p\n",((async_task_t*)new_forasync_task)->def.ddf_list);
        schedule_async((async_task_t*)new_forasync_task, current_finish, NO_PROP);
    }
    // handling leftover
    if (size < high0) {
        #if DEBUG_FORASYNC
            printf("Scheduling Task %d %d\n",low0,high0);
        #endif
        forasync1D_task_t * new_forasync_task = allocate_forasync1D_task();
        new_forasync_task->task.forasync_task.def.fct_ptr = forasync1D_runner;
        new_forasync_task->task.forasync_task.def.arg = &(new_forasync_task->def);
        new_forasync_task->task.forasync_task.def.ddf_list = NULL;
        new_forasync_task->task.forasync_task.def.phased_clause = NULL;
        new_forasync_task->def.base.user = forasync->base.user;
        loop_domain_t new_loop0 = {low0, high0, loop0.stride, loop0.tile};
        new_forasync_task->def.loop0 = new_loop0;
        schedule_async((async_task_t*)new_forasync_task, current_finish, NO_PROP);
    }
}
/**
 * Invoked when a client connection is ready to be written to.
 */
static int handle_client_writebuf(ev_io *watch, worker_ev_userdata* data) {
    // Get the associated connection struct
    conn_info *conn = watch->data;

    // Build the IO vectors to perform the write
    struct iovec vectors[2];
    int num_vectors;
    circbuf_setup_writev_iovec(&conn->output, (struct iovec*)&vectors, &num_vectors);

    // Issue the write
    ssize_t write_bytes = writev(watch->fd, (struct iovec*)&vectors, num_vectors);

    int reschedule = 0;
    if (write_bytes > 0) {
        // Update the cursor
        circbuf_advance_read(&conn->output, write_bytes);

        // Check if we should reset the use_write_buf.
        // This is done when the buffer size is 0.
        if (conn->output.read_cursor == conn->output.write_cursor) {
            conn->use_write_buf = 0;
        } else {
            reschedule = 1;
        }
    }

    // Handle any errors
    if (write_bytes <= 0 && (errno != EAGAIN && errno != EINTR)) {
        syslog(LOG_ERR, "Failed to write() to connection [%d]! %s.",
                conn->client.fd, strerror(errno));
        close_client_connection(conn);
        decref_client_connection(conn);
        return 1;
    }

    // Check if we should reschedule or end
    if (reschedule) {
        schedule_async(data->netconf, SCHEDULE_WATCHER, &conn->write_client);
    } else {
        decref_client_connection(conn);
    }
    return 0;
}
static int send_proxy_msg_direct(conn_info *conn, char *msg_buffer, int buf_size) {
    // Stack allocate the iovectors
    struct iovec vector;

    // Setup all the pointers
    vector.iov_base = msg_buffer;
	vector.iov_len = buf_size;

    // Perform the write
    ssize_t sent = writev(conn->client.fd, &vector, 1);
    if (sent == buf_size) return 0;

    // Check for a fatal error
    if (sent == -1) {
        if (errno != EAGAIN && errno != EINTR && errno != EWOULDBLOCK) {
            syslog(LOG_ERR, "Failed to send() to connection [%d]! %s.",
                    conn->client.fd, strerror(errno));

            // Probably want to try and reopen connection here???
            close_client_connection(conn);
            return 1;
        }
    }

    // Copy unsent data to circ buffer
    int res;
    if (sent < buf_size) {
		res = circbuf_write(&conn->output, msg_buffer + sent, buf_size - sent);
		if (res) {
			return 1;
		}
    }

    // Setup the async write
    conn->use_write_buf = 1;
    incref_client_connection(conn);
    schedule_async(conn->netconf, SCHEDULE_WATCHER, &conn->write_client);

    // Done
    return 0;
}