Esempio n. 1
0
int web_process_create(const char* appname, const char* arg1, const char* arg2, process_t *pid)
{
	process_create_param_t param;
	memset(&param, 0, sizeof(param));

#if defined(OS_WINDOWS)
	char cmdLine[MAX_PATH] = {0};
	snprintf(cmdLine, sizeof(cmdLine)-1, "%s %s %s", appname, arg1, arg2?arg2:"");
	param.lpCommandLine = cmdLine;

	char workDir[MAX_PATH] = {0};
	path_getcwd(workDir, sizeof(workDir));
	param.lpCurrentDirectory = workDir;

#if defined(DEBUG) || defined(_DEBUG)
	param.dwCreationFlags |= CREATE_NEW_CONSOLE;
#endif
#if defined(_UNICODE) || defined(UNICODE)
	param.dwCreationFlags |= CREATE_UNICODE_ENVIRONMENT;
#endif

#else
	char* argv[] = { (char*)appname, (char*)arg1, (char*)arg2, NULL };
	param.argv = argv;
	param.envp = envp;
#endif

	return process_createve(appname, &param, pid);
}
Esempio n. 2
0
klogger_t* logger_create(const char* path, int level, int mode) {
    char temp[PATH_MAX] = {0};
    klogger_t* logger = create(klogger_t);
    verify(logger);
    if (!logger) {
        return 0;
    }
    memset(logger, 0, sizeof(klogger_t));
    logger->mode  = mode;
    logger->level = level;
    logger->lock  = lock_create();
    verify(logger->lock);
    if (!path) {
        /* 日志建立在当前目录 */
        path = path_getcwd(temp, sizeof(temp));
        strcat(temp, "/knet.log");
    }
    if (mode & logger_mode_file) {
        verify(path);
        if (mode & logger_mode_override) {
            /* 打开并清空 */
            logger->fd = fopen(path, "w+");
        } else {
            /* 添加到原有日志 */
            logger->fd = fopen(path, "a+");
        }
        if (!logger->fd) {
            goto fail_return;
        }
    }
    return logger;
fail_return:
    destroy(logger);
    return 0;
}
Esempio n. 3
0
static int batch_queue_dryrun_create (struct batch_queue *q)
{
	char *cwd = path_getcwd();

	batch_queue_set_feature(q, "local_job_queue", NULL);
	batch_queue_set_feature(q, "batch_log_name", "%s.sh");
	batch_queue_set_option(q, "cwd", cwd);
	return 0;
}
Esempio n. 4
0
int
path_absolute(const char* path, stralloc* sa) {
  int ret = 0;
  stralloc_zero(sa);

  if(!path_isabs(path)) {
    path_getcwd(sa);
    stralloc_catc(sa, PATHSEP_C);
    stralloc_cats(sa, path);
    return 1;
  }

  stralloc_copys(sa, path);
  return 0;
}
Esempio n. 5
0
int path_lookup (char *search_path, const char *exe, char *dest, size_t destlen)
{
	char *s;
	char *e;
	size_t len = strlen(search_path);

	s = e = search_path;

	while(e < search_path+len) {
		DIR *dirp = NULL;

		while(*e != ':' && *e != '\0') e++;
		*e = '\0';

		if( *s != '/' ){
			char tmp[PATH_MAX];
			char *cwd;
			cwd = path_getcwd();
			snprintf(tmp, PATH_MAX, "%s/%s", cwd, s);
			free(cwd);
			s = tmp;
		}

		if(( dirp = opendir(s) )) {
			struct dirent *dp = NULL;
			while(( dp = readdir(dirp) )) {
				if( strcmp(dp->d_name, exe) == 0 ) {
					struct stat sb;
					char fn[PATH_MAX];
					strncpy(fn, s, PATH_MAX);
					strncat(fn, "/", 1);
					strcat(fn, dp->d_name);
					if( stat(fn, &sb) == 0 && sb.st_mode & (S_IXUSR|S_IFREG) ){
						strncpy(dest, fn, destlen);
						closedir(dirp);
						return 0;
					}
				}
			}
			closedir(dirp);
		}
		*e = ':';
		e++;
		s = e;
	}

	return 1;
}
Esempio n. 6
0
static batch_job_id_t batch_job_condor_submit (struct batch_queue *q, const char *cmd, const char *extra_input_files, const char *extra_output_files, struct jx *envlist, const struct rmsummary *resources )
{
	FILE *file;
	int njobs;
	int jobid;
	const char *options = hash_table_lookup(q->options, "batch-options");

	if(setup_condor_wrapper("condor.sh") < 0) {
		debug(D_BATCH, "could not create condor.sh: %s", strerror(errno));
		return -1;
	}

	if(!string_istrue(hash_table_lookup(q->options, "skip-afs-check"))) {
		char *cwd = path_getcwd();
		if(!strncmp(cwd, "/afs", 4)) {
			debug(D_NOTICE|D_BATCH, "The working directory is '%s':", cwd);
			debug(D_NOTICE|D_BATCH, "This won't work because Condor is not able to write to files in AFS.");
			debug(D_NOTICE|D_BATCH, "Instead, run makeflow from a local disk like /tmp.");
			debug(D_NOTICE|D_BATCH, "Or, use the Work Queue with -T wq and condor_submit_workers.");
			free(cwd);
			exit(EXIT_FAILURE);
		}
		free(cwd);
	}

	file = fopen("condor.submit", "w");
	if(!file) {
		debug(D_BATCH, "could not create condor.submit: %s", strerror(errno));
		return -1;
	}

	fprintf(file, "universe = vanilla\n");
	fprintf(file, "executable = condor.sh\n");
	char *escaped = string_escape_condor(cmd);
	fprintf(file, "arguments = %s\n", escaped);
	free(escaped);
	if(extra_input_files)
		fprintf(file, "transfer_input_files = %s\n", extra_input_files);
	// Note that we do not use transfer_output_files, because that causes the job
	// to get stuck in a system hold if the files are not created.
	fprintf(file, "should_transfer_files = yes\n");
	fprintf(file, "when_to_transfer_output = on_exit\n");
	fprintf(file, "notification = never\n");
	fprintf(file, "copy_to_spool = true\n");
	fprintf(file, "transfer_executable = true\n");
	fprintf(file, "keep_claim_idle = 30\n");
	fprintf(file, "log = %s\n", q->logfile);

	const char *c_req = batch_queue_get_option(q, "condor-requirements");
	char *bexp = blacklisted_expression(q);

	if(c_req && bexp) {
		fprintf(file, "requirements = %s && %s\n", c_req, bexp);
	} else if(c_req) {
		fprintf(file, "requirements = %s\n", c_req);
	} else if(bexp) {
		fprintf(file, "requirements = %s\n", bexp);
	}

	if(bexp)
		free(bexp);

	/*
	Getting environment variables formatted for a condor submit
	file is very hairy, due to some strange quoting rules.
	To avoid problems, we simply export vars to the environment,
	and then tell condor getenv=true, which pulls in the environment.
	*/

	fprintf(file, "getenv = true\n");

	if(envlist) {
		jx_export(envlist);
	}

	if(options)
		fprintf(file, "%s\n", options);

	/* set same deafults as condor_submit_workers */
	int64_t cores  = 1;
	int64_t memory = 1024;
	int64_t disk   = 1024;

	if(resources) {
		cores  = resources->cores  > -1 ? resources->cores  : cores;
		memory = resources->memory > -1 ? resources->memory : memory;
		disk   = resources->disk   > -1 ? resources->disk   : disk;
	}

	/* convert disk to KB */
	disk *= 1024;

	if(batch_queue_get_option(q, "autosize")) {
		fprintf(file, "request_cpus   = ifThenElse(%" PRId64 " > TotalSlotCpus, %" PRId64 ", TotalSlotCpus)\n", cores, cores);
		fprintf(file, "request_memory = ifThenElse(%" PRId64 " > TotalSlotMemory, %" PRId64 ", TotalSlotMemory)\n", memory, memory);
		fprintf(file, "request_disk   = ifThenElse((%" PRId64 ") > TotalSlotDisk, (%" PRId64 "), TotalSlotDisk)\n", disk, disk);
	}
	else {
			fprintf(file, "request_cpus = %" PRId64 "\n", cores);
			fprintf(file, "request_memory = %" PRId64 "\n", memory);
			fprintf(file, "request_disk = %" PRId64 "\n", disk);
	}

	fprintf(file, "queue\n");
	fclose(file);

	file = popen("condor_submit condor.submit", "r");
	if(!file)
		return -1;

	char line[BATCH_JOB_LINE_MAX];
	while(fgets(line, sizeof(line), file)) {
		if(sscanf(line, "%d job(s) submitted to cluster %d", &njobs, &jobid) == 2) {
			pclose(file);
			debug(D_BATCH, "job %d submitted to condor", jobid);
			struct batch_job_info *info;
			info = malloc(sizeof(*info));
			memset(info, 0, sizeof(*info));
			info->submitted = time(0);
			itable_insert(q->job_table, jobid, info);
			return jobid;
		}
	}

	pclose(file);
	debug(D_BATCH, "failed to submit job to condor!");
	return -1;
}
Esempio n. 7
0
static void start_mesos_scheduler(struct batch_queue *q)
{

	pid_t mesos_pid;
	mesos_pid = fork();			

	if (mesos_pid > 0) {

		debug(D_INFO, "Start makeflow mesos scheduler.");

	} else if (mesos_pid == 0) {

		char *mesos_cwd;
		mesos_cwd = path_getcwd();
	
		char exe_path[MAX_BUF_SIZE];
	
		if(readlink("/proc/self/exe", exe_path, MAX_BUF_SIZE) == -1) {
			fatal("read \"proc/self/exe\" fail\n");
		}
	
		char exe_dir_path[MAX_BUF_SIZE];
		path_dirname(exe_path, exe_dir_path);
	
	    char *exe_py_path = string_format("%s/mf_mesos_scheduler", exe_dir_path);
		char *ld_preload_str = NULL;
		char *python_path = NULL;
	
		if(mesos_preload) {
			ld_preload_str = string_format("LD_PRELOAD=%s", mesos_preload);
		}

		if(mesos_py_path) {
			char *mesos_python_path = xxstrdup(mesos_py_path);
			python_path = string_format("PYTHONPATH=%s", mesos_python_path);
		}	
		
		char *envs[3];
		if(ld_preload_str && python_path) {
			envs[0] = ld_preload_str;
			envs[1] = python_path;
		} else if(!ld_preload_str && python_path) {
			envs[0] = python_path;
		} else if(ld_preload_str && !python_path) {
			envs[0] = ld_preload_str;
		} else {
			envs[0] = NULL;
		}

		const char *batch_log_name = q->logfile;  

		int mesos_fd = open(batch_log_name, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
		if (mesos_fd == -1) {
			fatal("Failed to open %s \n", batch_log_name);
	    }

	    if (dup2(mesos_fd, 1) == -1) {
			fatal("Failed to duplicate file descriptor: %s\n", strerror(errno));
	   	}

		if (dup2(mesos_fd, 2) == -1) {
			fatal("Failed to duplicate file descriptor: %s\n", strerror(errno));
	   	}

	    close(mesos_fd);
		

		execle("/usr/bin/python", "python", exe_py_path, mesos_cwd, 
			mesos_master, (char *) 0, envs);

		exit(errno);

	} else {

		fatal("mesos batch system couldn't create new process: %s\n", strerror(errno));

	}

}