Exemple #1
0
int hammer_launch_cpu_workers()
{
	int efd, i;
	pthread_t tid;
	pthread_attr_t attr;
	hammer_cpu_worker_context_t *context;
	hammer_sched_t *sched_node;

	for (i = 0; i < config->cpu_worker_num; i ++) {
		/* Creating epoll file descriptor */
		efd = hammer_epoll_create(config->epoll_max_events);
		if (efd < 1) {
			return -1;
		}

		/* pass a memory block to each worker */
		context = (hammer_cpu_worker_context_t *)hammer_mem_malloc(sizeof(hammer_cpu_worker_context_t));
		sched_node = &(sched_set[i]);
		hammer_init_sched_node(sched_node, efd, i);
		context->sched = sched_node;
		context->batch = &(batch_set[i]);
		context->core_id = config->core_ids[i];

		pthread_attr_init(&attr);
		pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
		if (pthread_create(&tid, &attr, hammer_cpu_worker_loop, (void *)context) != 0) {
			printf("pthread_create error!!\n");
			return -1;
		}

	}

	return 0;
}
Exemple #2
0
int hammer_launch_gpu_workers()
{
	pthread_t tid;
	pthread_attr_t attr;
	int thread_id, i;
	hammer_gpu_worker_context_t * context;
	hammer_sched_t *sched_node;

	for (i = 0; i < config->gpu_worker_num; i ++) {
		/* We take gpu worker thread */
		thread_id = config->cpu_worker_num + i; /* We take gpu worker thread */

		/* pass a memory block to each worker */
		context = (hammer_gpu_worker_context_t *)hammer_mem_malloc(sizeof(hammer_gpu_worker_context_t));
		context->cpu_batch_set = batch_set;
		context->core_id = config->core_ids[thread_id];
		sched_node = &(sched_set[i]);
		hammer_init_sched_node(sched_node, 0, thread_id);
		context->sched = sched_node;

		pthread_attr_init(&attr);
		pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
		if (pthread_create(&tid, &attr, hammer_gpu_worker_loop, (void *)context) != 0) {
			printf("pthread_create error!!\n");
			return -1;
		}
	}

	return 0;
}
Exemple #3
0
hammer_job_t *hammer_job_get()
{
#if defined(HAMMER_MALLOC)
	return (hammer_job_t *)hammer_mem_malloc(sizeof(hammer_job_t));
#else
	hammer_sched_t *sched = hammer_sched_get_sched_struct();
	return (hammer_job_t *)libpool_alloc(JOB_SIZE, sched->thread_id);
#endif
}
Exemple #4
0
hammer_connection_t *hammer_get_connection()
{
#if defined(HAMMER_MALLOC)
	return hammer_mem_malloc(sizeof(hammer_connection_t));
#else
	hammer_sched_t *sched = hammer_sched_get_sched_struct();
	return (hammer_connection_t *)libpool_alloc(CONN_SIZE, sched->thread_id);
#endif
}
Exemple #5
0
int hammer_init_sched_set()
{
	int i;

	sched_set = (hammer_sched_t *)hammer_mem_malloc(config->worker_num * sizeof(hammer_sched_t));
	for (i = 0; i < config->worker_num; i ++) {
		hammer_init_sched_node((hammer_sched_t *)&(sched_set[i]), -1, -1);
	}

	return 0;
}
Exemple #6
0
// init a connection struct
void hammer_init_connection(hammer_connection_t *c)
{
	c->socket = 0;
	c->ssl = 0; // ssl not enabled by default
	c->body_ptr = hammer_mem_malloc(config->conn_buffer_size);
	c->body_size = config->conn_buffer_size;
	c->body_length = 0;
	c->r_conn = NULL;
	c->job_list = NULL;

	return;
}
Exemple #7
0
hammer_epoll_handlers_t *hammer_epoll_set_handlers(void (*read) (hammer_connection_t *),
                                         void (*ssl_read) (hammer_connection_t *),
                                         void (*write) (hammer_connection_t *),
                                         void (*ssl_write) (hammer_connection_t *),
                                         void (*error) (hammer_connection_t *),
                                         void (*close) (hammer_connection_t *),
                                         void (*timeout) (hammer_connection_t *))
{
	hammer_epoll_handlers_t *handler;

	handler = hammer_mem_malloc(sizeof(hammer_epoll_handlers_t));
	handler->read = (void *) read;
	handler->ssl_read = (void *) ssl_read;
	handler->write = (void *) write;
	handler->ssl_write = (void *) ssl_write;
	handler->error = (void *) error;
	handler->close = (void *) close;
	handler->timeout = (void *) timeout;

	return handler;
}
Exemple #8
0
int hammer_config_init()
{
	int length, i;

	config = hammer_mem_calloc(sizeof(hammer_config_t));

	config->ssl = 0; // if this is a ssl proxy
	config->gpu = 0; // if this need batch processing by GPU

	config->cpu_worker_num = 1;
	config->gpu_worker_num = 0;
	config->worker_num = config->cpu_worker_num + config->gpu_worker_num;
	config->epoll_max_events = 128;

	length = strlen("219.219.216.11");
	config->server_ip = malloc(length);
	memcpy(config->server_ip, "219.219.216.11", length);
	config->server_port = 80;

	length = strlen("127.0.0.1");
	config->listen_ip = malloc(length);
	memcpy(config->listen_ip, "127.0.0.1", length);
	config->listen_port = 80;

	config->conn_buffer_size = 4096;

	config->core_ids = hammer_mem_malloc(config->worker_num * sizeof(unsigned int));
	for (i = 0; i < config->worker_num; i ++) {
		/* currently, we use this sequence */
		config->core_ids[i] = i;
	}

	config->I = 40; // ms
	/* we take 40ms as parameter, for 10Gbps bandwidth,
	   40ms * 10Gbps = 400 * 10^3 bits ~= (<) 50 KB = 40 * 1.25 * 10^3.
	   Take 64 bytes minimum packet size, at most 782 jobs each batch,
	   we allocate 1000 jobs at most.
	   */
	config->batch_buf_max_size = config->I * 1.25 * 1000; // byte
	config->batch_job_max_num = 1000;

	config->aes_key_size = 16; // 128/8 byte
	config->iv_size = 16; // 128/8 byte
	config->hmac_key_size = 64; // for sha1, byte

	/*

	config = {
		1, // cpu_worker_num
		0, // gpu_worker_num
		1, // total worker
		128, // epoll_max_events

		"219.219.216.11", // server_ip
		80, // server_port
		"127.0.0.1", // listen_ip
		80, // listen_port

		4096, // conn_buffer_size
	};
	*/
	return 0;
}
Exemple #9
0
int hammer_init_batch_set()
{
	batch_set = (hammer_batch_t *)hammer_mem_malloc(config->cpu_worker_num * sizeof(hammer_batch_t));
	return 0;
}
void *hammer_epoll_start(int efd, hammer_epoll_handlers_t *handler, int max_events)
{
	int i, ret = -1;
	int num_events;
	struct epoll_event *events;
	hammer_connection_t *c;
	// int fds_timeout;

	//fds_timeout = log_current_utime + config->timeout;
	events = hammer_mem_malloc(max_events * sizeof(struct epoll_event));
	
	while (1) {

		if (config->gpu) {
			/* Each time, we first check if GPU has gave any indication for 
			   1) which buffer is taken,
			   2) which buffer has been processed */
			if (hammer_batch_if_gpu_processed_new()) {
				hammer_batch_forwarding();
			}
		}

		//FIXME: maybe problems in pointer &events
		num_events = hammer_epoll_wait(efd, &events, max_events);

		for (i = 0; i < num_events; i ++) {
			c = (hammer_connection_t *) events[i].data.ptr;

			if (events[i].events & EPOLLIN) {
				if (c->type == HAMMER_CONN_CLIENT) {
					ret = (*handler->client_read) (c);
				} else {
					if (c->type != HAMMER_CONN_SERVER) {
						hammer_err("this connection is not a server conn?\n");
						exit(0);
					}
					ret = (*handler->server_read) (c);
				}
			}
			else if (events[i].events & EPOLLOUT) {
				if (c->type == HAMMER_CONN_CLIENT) {
					ret = (*handler->client_write) (c);
				} else {
					if (c->type != HAMMER_CONN_SERVER) {
						hammer_err("this connection is not a server conn?\n");
						exit(0);
					}
					ret = (*handler->server_write) (c);
				}
			}
			else if (events[i].events & (EPOLLHUP | EPOLLERR | EPOLLRDHUP)) {
				ret = (*handler->error) (c);
			} else {
				hammer_err("What's up man, error here\n");
				exit(0);
			}

			if (ret < 0) {
				HAMMER_TRACE("[FD %i] Epoll Event FORCE CLOSE | ret = %i", fd, ret);
				(*handler->close) (c);
			}
		}

		// FIXME: enable timeout
		/* Check timeouts and update next one 
		   if (log_current_utime >= fds_timeout) {
		   hammer_sched_check_timeouts(sched);
		   fds_timeout = log_current_utime + config->timeout;
		   }*/
	}

	return NULL;
}