Esempio n. 1
0
int gpuvm_init(unsigned ndevs, void **devs, int flags) {
	// check arguments
	if(ndevs == 0) {
		fprintf(stderr, "gpuvm_init: zero devices not allowed\n");
		return GPUVM_EARG;
	}
	if(flags & ~(GPUVM_API | GPUVM_STAT | GPUVM_WRITER_SIG_BLOCK | 
							 GPUVM_UNLINK_NO_SYNC_BACK) || !(flags & GPUVM_API)) {
		fprintf(stderr, "gpuvm_init: invalid flags\n");
		return GPUVM_EARG;
	}

	// check state
	if(ndevs_g) {
		fprintf(stderr, "gpuvm_init: GPUVM already initialized\n");
		return GPUVM_ETWICE;
	}
	ndevs_g = ndevs;

	// initialize auxiliary structures
	int err = 0;
	err = salloc_init();
	if(err)
		return err;

	// initialize devices
	devs_g = (void**)smalloc(ndevs * sizeof(void*));
	if(!devs_g)
		return GPUVM_ESALLOC;

	if(flags & GPUVM_OPENCL) {
		if(!devs) {
			fprintf(stderr, "gpuvm_init: null pointer to devices not allowed\n");
			return GPUVM_ENULL;
		}
		memcpy(devs_g, devs, ndevs * sizeof(void*));
	} else if(flags & GPUVM_CUDA) {
		// ignore devs, just zero out devs_g
		memset(devs_g, 0, ndevs * sizeof(void*));
	}

	// continue with initialization
	(err = sync_init()) || 
		(err = devapi_init(flags)) ||
		(err = handler_init()) || 
		(err = stat_init(flags)) || 
		(err = tsem_init()) || 
		(err = wthreads_init());
	if(err)
		return err;
	
	return 0;
}  // gpuvm_init
Esempio n. 2
0
int main(int argc, char *argv[])
{
	void *ctx, *socket, *pub;
	handler_t *handler;
	int rc; 
	char header[TOPS_MAX_HEADER_SIZE];
	
	ctx = zmq_init (1);
	// TODO: cmd line options for socket ports
	socket = zmq_socket (ctx, ZMQ_REP);
	rc = zmq_bind (socket, "tcp://*:9292");
	pub = zmq_socket (ctx, ZMQ_PUB);
	rc = zmq_bind  (pub, "tcp://*:9293");
	handler = handler_init ();
	
	while (1) {
		memset (header, 0, TOPS_MAX_HEADER_SIZE);
		zmq_recv (socket, header, TOPS_MAX_HEADER_SIZE, 0);
		//printf("DEBUG: %s\n", header);
		
		if (strcmp (header, TOPS_ADD) == 0) {
			rc = handler_add (handler, socket, pub);
		} else if (strcmp (header, TOPS_REM) == 0) {
			rc = handler_rem (handler, socket);
		} else if (strcmp (header, TOPS_GET) == 0) {
			rc = handler_get (handler, socket);
		} else {
			rc = handler_unknown (handler, socket);
		}
		
		if (rc != 0) {
			// TODO: asplode!
		}
	}
	handler_close (handler);
	zmq_close (socket);
	zmq_term (ctx);
	
	return 0;
}
Esempio n. 3
0
static void
scheduler_msg_dispatch(void)
{
	size_t			 n, sz, count;
	struct evpstate		 evpstates[MAX_BATCH_SIZE];
	uint64_t		 evpid, evpids[MAX_BATCH_SIZE], u64;
	uint32_t		 msgids[MAX_BATCH_SIZE], version, msgid;
	struct scheduler_info	 info;
	int			 typemask, r, type, types[MAX_BATCH_SIZE];
	int			 delay;

	switch (imsg.hdr.type) {
	case PROC_SCHEDULER_INIT:
		log_debug("scheduler-api:  PROC_SCHEDULER_INIT");
		scheduler_msg_get(&version, sizeof(version));
		scheduler_msg_end();

		if (version != PROC_SCHEDULER_API_VERSION) {
			log_warnx("warn: scheduler-api: bad API version");
			fatalx("scheduler-api: exiting");
		}

		r = handler_init();

		imsg_compose(&ibuf, PROC_SCHEDULER_OK, 0, 0, -1, &r, sizeof(r));
		break;

	case PROC_SCHEDULER_INSERT:
		log_debug("scheduler-api:  PROC_SCHEDULER_INSERT");
		scheduler_msg_get(&info, sizeof(info));
		scheduler_msg_end();

		r = handler_insert(&info);

		imsg_compose(&ibuf, PROC_SCHEDULER_OK, 0, 0, -1, &r, sizeof(r));
		break;

	case PROC_SCHEDULER_COMMIT:
		log_debug("scheduler-api:  PROC_SCHEDULER_COMMIT");
		scheduler_msg_get(&msgid, sizeof(msgid));
		scheduler_msg_end();

		n = handler_commit(msgid);

		imsg_compose(&ibuf, PROC_SCHEDULER_OK, 0, 0, -1, &n, sizeof(n));
		break;

	case PROC_SCHEDULER_ROLLBACK:
		log_debug("scheduler-api:  PROC_SCHEDULER_ROLLBACK");
		scheduler_msg_get(&msgid, sizeof(msgid));
		scheduler_msg_end();

		n = handler_rollback(msgid);

		imsg_compose(&ibuf, PROC_SCHEDULER_OK, 0, 0, -1, &n, sizeof(n));
		break;

	case PROC_SCHEDULER_UPDATE:
		log_debug("scheduler-api:  PROC_SCHEDULER_UPDATE");
		scheduler_msg_get(&info, sizeof(info));
		scheduler_msg_end();

		r = handler_update(&info);

		scheduler_msg_add(&r, sizeof(r));
		if (r == 1)
			scheduler_msg_add(&info, sizeof(info));
		scheduler_msg_close();
		break;

	case PROC_SCHEDULER_DELETE:
		log_debug("scheduler-api:  PROC_SCHEDULER_DELETE");
		scheduler_msg_get(&evpid, sizeof(evpid));
		scheduler_msg_end();

		r = handler_delete(evpid);

		imsg_compose(&ibuf, PROC_SCHEDULER_OK, 0, 0, -1, &r, sizeof(r));
		break;

	case PROC_SCHEDULER_HOLD:
		log_debug("scheduler-api: PROC_SCHEDULER_HOLD");
		scheduler_msg_get(&evpid, sizeof(evpid));
		scheduler_msg_get(&u64, sizeof(u64));
		scheduler_msg_end();

		r = handler_hold(evpid, u64);

		imsg_compose(&ibuf, PROC_SCHEDULER_OK, 0, 0, -1, &r, sizeof(r));
		break;

	case PROC_SCHEDULER_RELEASE:
		log_debug("scheduler-api: PROC_SCHEDULER_RELEASE");
		scheduler_msg_get(&type, sizeof(type));
		scheduler_msg_get(&u64, sizeof(u64));
		scheduler_msg_get(&r, sizeof(r));
		scheduler_msg_end();

		r = handler_release(type, u64, r);

		imsg_compose(&ibuf, PROC_SCHEDULER_OK, 0, 0, -1, &r, sizeof(r));
		break;

	case PROC_SCHEDULER_BATCH:
		log_debug("scheduler-api:  PROC_SCHEDULER_BATCH");
		scheduler_msg_get(&typemask, sizeof(typemask));
		scheduler_msg_get(&count, sizeof(count));
		scheduler_msg_end();

		if (count > MAX_BATCH_SIZE)
			count = MAX_BATCH_SIZE;

		r = handler_batch(typemask, &delay, &count, evpids, types);
		scheduler_msg_add(&r, sizeof(r));
		scheduler_msg_add(&delay, sizeof(delay));
		scheduler_msg_add(&count, sizeof(count));
		if (r > 0) {
			scheduler_msg_add(evpids, sizeof(*evpids) * count);
			scheduler_msg_add(types, sizeof(*types) * count);
		}
		scheduler_msg_close();
		break;

	case PROC_SCHEDULER_MESSAGES:
		log_debug("scheduler-api:  PROC_SCHEDULER_MESSAGES");
		scheduler_msg_get(&msgid, sizeof(msgid));
		scheduler_msg_get(&sz, sizeof(sz));
		scheduler_msg_end();

		if (sz > MAX_BATCH_SIZE)
			sz = MAX_BATCH_SIZE;

		n = handler_messages(msgid, msgids, sz);

		imsg_compose(&ibuf, PROC_SCHEDULER_OK, 0, 0, -1, msgids,
		    n * sizeof(*msgids));
		break;

	case PROC_SCHEDULER_ENVELOPES:
		log_debug("scheduler-api:  PROC_SCHEDULER_ENVELOPES");
		scheduler_msg_get(&evpid, sizeof(evpid));
		scheduler_msg_get(&sz, sizeof(sz));
		scheduler_msg_end();

		if (sz > MAX_BATCH_SIZE)
			sz = MAX_BATCH_SIZE;

		n = handler_envelopes(evpid, evpstates, sz);

		imsg_compose(&ibuf, PROC_SCHEDULER_OK, 0, 0, -1, evpstates,
		    n * sizeof(*evpstates));
		break;

	case PROC_SCHEDULER_SCHEDULE:
		log_debug("scheduler-api:  PROC_SCHEDULER_SCHEDULE");
		scheduler_msg_get(&evpid, sizeof(evpid));
		scheduler_msg_end();

		r = handler_schedule(evpid);

		imsg_compose(&ibuf, PROC_SCHEDULER_OK, 0, 0, -1, &r, sizeof(r));
		break;

	case PROC_SCHEDULER_REMOVE:
		log_debug("scheduler-api:  PROC_SCHEDULER_REMOVE");
		scheduler_msg_get(&evpid, sizeof(evpid));
		scheduler_msg_end();

		r = handler_remove(evpid);

		imsg_compose(&ibuf, PROC_SCHEDULER_OK, 0, 0, -1, &r, sizeof(r));
		break;

	case PROC_SCHEDULER_SUSPEND:
		log_debug("scheduler-api:  PROC_SCHEDULER_SUSPEND");
		scheduler_msg_get(&evpid, sizeof(evpid));
		scheduler_msg_end();

		r = handler_suspend(evpid);

		imsg_compose(&ibuf, PROC_SCHEDULER_OK, 0, 0, -1, &r, sizeof(r));
		break;

	case PROC_SCHEDULER_RESUME:
		log_debug("scheduler-api:  PROC_SCHEDULER_RESUME");
		scheduler_msg_get(&evpid, sizeof(evpid));
		scheduler_msg_end();

		r = handler_resume(evpid);

		imsg_compose(&ibuf, PROC_SCHEDULER_OK, 0, 0, -1, &r, sizeof(r));
		break;

	default:
		log_warnx("warn: scheduler-api: bad message %d", imsg.hdr.type);
		fatalx("scheduler-api: exiting");
	}
}