//Our table may have false positives, but never false negatives
//I.E. For items which ARE in the table, it is ALWAYS correct. (Returns true)
//For items which ARE NOT in the table, it is USUALLY correct. (Returns false, but collisions can return true)
void adler_lookup_table_test(int data_count, int data_offset, int bit_table_width, const char *data, int data_len) {
    debug4("%d %d\n", data_count, bit_table_width);
    struct adler32table testTable;
    struct adler32 adler;
    short *shorts = (short*) data;
    int i;
    assert(data_len >= ((4*data_count)+data_offset)); //4 bytes for a 'fake' adler32 instance
    adler_table_init(&testTable, bit_table_width);
    for (i=data_offset; i < data_count; i++) {
        adler.aVal = shorts[2*i];
        adler.bVal = shorts[2*i + 1];
        adler_table_update(&testTable, &adler);
    }

    for (i=data_offset; i < data_count; i++) {
        adler.aVal = shorts[2*i];
        adler.bVal = shorts[(2*i)+1];
        debug4("Vals: %d %d\n", shorts[2*i], shorts[2*i + 1]);
        assert(adler_table_present(&testTable, &adler));
    }

    adler_table_free(&testTable);
}
Exemplo n.º 2
0
/*
 * init() is called when the plugin is loaded, before any other functions
 * are called.  Put global initialization here.
 */
extern int init ( void )
{
	static int first = 1;

	if(first) {
		/* since this can be loaded from many different places
		   only tell us once. */
		verbose("%s loaded", plugin_name);
		first = 0;
	} else {
		debug4("%s loaded", plugin_name);
	}

	return SLURM_SUCCESS;
}
Exemplo n.º 3
0
static void
conf_proxy_reset_settings_cb (GSettings *settings,
                              guint cnxn_id,
                              gchar *key,
                              gpointer user_data)
{
	gchar		*proxyname, *proxyusername, *proxypassword;
	gint		proxyport;
	gint		proxydetectmode;
	gboolean	proxyuseauth;

	proxyname = NULL;
	proxyport = 0;
	proxyusername = NULL;
	proxypassword = NULL;

	conf_get_int_value (PROXY_DETECT_MODE, &proxydetectmode);
	switch (proxydetectmode) {
		default:
		case 0:
			debug0 (DEBUG_CONF, "proxy auto detect is configured");
			/* nothing to do, all done by libproxy inside libsoup */
			break;
		case 1:
			debug0 (DEBUG_CONF, "proxy is disabled by user");
			/* nothing to do */
			break;
		case 2:
			debug0 (DEBUG_CONF, "manual proxy is configured");

			conf_get_str_value (PROXY_HOST, &proxyname);
			conf_get_int_value (PROXY_PORT, &proxyport);
			conf_get_bool_value (PROXY_USEAUTH, &proxyuseauth);
			if (proxyuseauth) {
				conf_get_str_value (PROXY_USER, &proxyusername);
				conf_get_str_value (PROXY_PASSWD, &proxypassword);
			}
			break;
	}
	debug4 (DEBUG_CONF, "Manual proxy settings are now %s:%d %s:%s", proxyname != NULL ? proxyname : "NULL", proxyport,
		  proxyusername != NULL ? proxyusername : "******",
		  proxypassword != NULL ? proxypassword : "******");

	network_set_proxy (proxydetectmode, proxyname, proxyport, proxyusername, proxypassword);
}
Exemplo n.º 4
0
Arquivo: z29.c Projeto: thektulu/lout
void DeleteEverySym(void)
{ int i, j, load, cost;  OBJECT p, plink, link, x, entry;
  debug0(DST, DD, "DeleteEverySym()");

  /* dispose the bodies of all symbols */
  for( i = 0;  i < MAX_TAB;  i++ )
  { entry = (OBJECT) &symtab[i];
    for( plink = Down(entry);  plink != entry;  plink = NextDown(plink) )
    { Child(p, plink);
      for( link = Down(p);  link != p;  link = NextDown(link) )
      {	Child(x, link);  DeleteSymBody(x);
	/* *** will not work now
	while( base_uses(x) != nilobj )
	{ tmp = base_uses(x);  base_uses(x) = next(tmp);
	  PutMem(tmp, USES_SIZE);
	}
	while( uses(x) != nilobj )
	{ tmp = uses(x);  uses(x) = next(tmp);
	  PutMem(tmp, USES_SIZE);
	}
	*** */
      }
    }
  }

  /* dispose the symbol name strings, gather statistics, and print them */
  load = cost = 0;
  for( i = 0;  i < MAX_TAB;  i++ )
  { j = 1; entry = (OBJECT) &symtab[i];
    while( Down(entry) != entry )
    { load += 1;  cost += j;  j += 1;
      DisposeChild(Down(entry));
    }
  }
  if( load > 0 )
  { debug4(DST, DD, "size = %d, items = %d (%d%%), probes = %.1f",
      MAX_TAB, load, (100*load)/MAX_TAB, (float) cost/load);
  }
  else
  { debug1(DST, DD, "table size = %d, no entries in table", MAX_TAB);
  }
  debug0(DST, DD, "DeleteEverySym returning.");
} /* end DeleteEverySym */
Exemplo n.º 5
0
void scan_info(const char* filename)
{
  struct stat statbuf;
  char infoname[100];
  time_t expiry;
  int fd;
  unsigned i;
  strcpy(infoname, "info/");
  strcpy(infoname+5, filename);
  if((fd = open(infoname, O_RDONLY)) == -1 ||
     fstat(fd, &statbuf) == -1) {
    /* This could race with qmail-clean, ignore missing files. */
    if (errno == ENOENT)
      return;
    die3sys(111, "Can't open or stat info file '", infoname, "'");
  }
  /* Handle the file only if it's expiry time (creation time + opt_age)
     is before now and after the last run */
  for (i = 0; i < opt_age_count; ++i) {
    expiry = statbuf.st_mtime + opt_ages[i];
    debug4(1, "filename=", filename, " expiry=", utoa(expiry));
    if(expiry > now)
      debug1(1, "ignoring, has not yet expired");
    else if(expiry <= lastrun) {
      debug1(1, "ignoring, was previously expired");
      break;
    }
    else {
      /* Load the sender address from the info file */
      char* sender = malloc(statbuf.st_size);
      if (read(fd, sender, statbuf.st_size) != statbuf.st_size)
        die3sys(111, "Reading from info file '", infoname, "' failed");
      if(check_rcpt(sender+1))
	make_bounce(sender+1, filename, opt_ages[i]);
      else
	debug2(1, "ignoring, sender was not in rcpthosts: ", sender+1);
      free(sender);
      break;
    }
  }
  close(fd);
}
Exemplo n.º 6
0
int main(int argc, char **argv) {
	gf_t *gfield_context = malloc(sizeof(gf_t));
	uint32_t *vmond_row = malloc(NUM_CHECKSUMS*sizeof(uint32_t));
	uint32_t *chksums = calloc(NUM_CHECKSUMS, sizeof(uint32_t));
	uint32_t data = 0x41414141;

	debug4("Initializing gf.\n");
	gf_init_easy(gfield_context, FIELD_SIZE);

	for (int i = 0; i < 32; i++) {
		printarray(chksums, NUM_CHECKSUMS);
		createvmond(gfield_context, vmond_row, NUM_CHECKSUMS, i+1);
		updatechecksum(gfield_context, chksums, vmond_row, NUM_CHECKSUMS, data);
		//stepvmond(gfield_context, vmond_matrix, FIELD_SIZE);
	}
	printarray(chksums, NUM_CHECKSUMS);

	//free(gfield_context);
	argc=0; argv=0;
}
Exemplo n.º 7
0
extern int as_mysql_node_up(mysql_conn_t *mysql_conn,
			    struct node_record *node_ptr,
			    time_t event_time)
{
	char* query;
	int rc = SLURM_SUCCESS;

	if (check_connection(mysql_conn) != SLURM_SUCCESS)
		return ESLURM_DB_CONNECTION;

	query = xstrdup_printf(
		"update \"%s_%s\" set time_end=%ld where "
		"time_end=0 and node_name='%s';",
		mysql_conn->cluster_name, event_table,
		event_time, node_ptr->name);
	debug4("%d(%s:%d) query\n%s",
	       mysql_conn->conn, THIS_FILE, __LINE__, query);
	rc = mysql_db_query(mysql_conn, query);
	xfree(query);
	return rc;
}
Exemplo n.º 8
0
void
network_set_proxy (ProxyDetectMode mode, gchar *host, guint port, gchar *user, gchar *password)
{
	g_free (proxyname);
	g_free (proxyusername);
	g_free (proxypassword);
	proxymode = mode;
	proxyname = host;
	proxyport = port;
	proxyusername = user;
	proxypassword = password;

	/* session will be NULL if we were called from conf_init() as that's called
	 * before net_init() */
	if (session)
		network_set_soup_session_proxy (session, mode, host, port, user, password);

	debug4 (DEBUG_NET, "proxy set to http://%s:%s@%s:%d", user, password, host, port);
	
	network_monitor_proxy_changed ();
}
Exemplo n.º 9
0
Arquivo: z16.c Projeto: thektulu/lout
void SetNeighbours(OBJECT link, BOOLEAN ratm, OBJECT *pg, OBJECT *pdef,
OBJECT *sg, OBJECT *sdef, int *side)
{ OBJECT plink, slink;

  /* find preceding definite; if it exists, set *pg */
  *pg = nilobj;
  for( plink = PrevDown(link);  type(plink) == LINK;  plink = PrevDown(plink) )
  { Child(*pdef, plink);
    if( type(*pdef) == SPLIT ? SplitIsDefinite(*pdef) : is_definite(type(*pdef)) )
    { Child(*pg, PrevDown(link));
      while( is_index(type(*pg)) )
      {	link = PrevDown(link);
	Child(*pg, PrevDown(link));
      }
      assert( type(*pg) == GAP_OBJ, "SetNeighbours: type(*pg)!" );
      break;
    }
  }

  /* find succeeding definite; if it exists, set *sg */
  *sg = nilobj;
  for( slink = NextDown(link);  type(slink) == LINK;  slink = NextDown(slink) )
  { Child(*sdef, slink);
    if( type(*sdef) == SPLIT ? SplitIsDefinite(*sdef) : is_definite(type(*sdef)) )
    { Child(*sg, PrevDown(slink));
      while( is_index(type(*sg)) )
      {	slink = PrevDown(slink);
	Child(*sg, PrevDown(slink));
      }
      assert( type(*sg) == GAP_OBJ, "SetNeighbours: type(*sg)!" );
      break;
    }
  }

  *side = ratm ? BACK : *pg == nilobj || mark(gap(*pg)) ? ON : FWD;
  debug4(DSA, DD,
    "SetNeighbours: ratm == %s, pg %s nilobj, sg %s nilobj, side == %s",
    bool(ratm), *pg == nilobj ? "==" : "!=", *sg == nilobj ? "==" : "!=", 
    *side == BACK ? "BACK" : *side == ON ? "ON" : "FWD");
} /* end SetNeighbours */
Exemplo n.º 10
0
/*
 *  Lua interface to SLURM log facility:
 */
static int _log_lua_msg (lua_State *L)
{
	const char *prefix  = "job_submit.lua";
	int        level    = 0;
	const char *msg;

	/*
	 *  Optional numeric prefix indicating the log level
	 *  of the message.
	 */

	/*
	 *  Pop message off the lua stack
	 */
	msg = lua_tostring(L, -1);
	lua_pop (L, 1);
	/*
	 *  Pop level off stack:
	 */
	level = (int)lua_tonumber (L, -1);
	lua_pop (L, 1);

	/*
	 *  Call appropriate slurm log function based on log-level argument
	 */
	if (level > 4)
		debug4 ("%s: %s", prefix, msg);
	else if (level == 4)
		debug3 ("%s: %s", prefix, msg);
	else if (level == 3)
		debug2 ("%s: %s", prefix, msg);
	else if (level == 2)
		debug ("%s: %s", prefix, msg);
	else if (level == 1)
		verbose ("%s: %s", prefix, msg);
	else if (level == 0)
		info ("%s: %s", prefix, msg);
	return (0);
}
Exemplo n.º 11
0
Arquivo: z16.c Projeto: thektulu/lout
FULL_LENGTH FindShift(OBJECT x, OBJECT y, int dim)
{ FULL_LENGTH len = 0, res = 0;  /* initial values unused */
  debug4(DSF, DD, "FindShift(%s, %s %s, %s)", Image(type(x)),
    Image(type(y)), EchoObject(y), dimen(dim));

  /* first determine the magnitude of the shift */
  switch( units(shift_gap(x)) )
  {
    case FIXED_UNIT:	len = width(shift_gap(x));
			break;

    case NEXT_UNIT:	len = (size(y, dim) * width(shift_gap(x))) / FR;
			break;

    default:		assert(FALSE, "FindShift: units");
			break;
  }

  /* then calculate the shift depending on the shift type */
  switch( shift_type(x) )
  {
    case GAP_ABS:	res = len - back(y, dim);
			break;

    case GAP_INC:	res = len;
			break;

    case GAP_DEC:	res = - len;
			break;

    default:		assert(FALSE, "FindShift: type");
			break;
  }

  debug1(DSF, DD, "FindShift returning %s", EchoLength(res));
  return res;
} /* end FindShift */
Exemplo n.º 12
0
/* NOTE: Insure that mysql_conn->lock is set on function entry */
static int _mysql_query_internal(MYSQL *db_conn, char *query)
{
	int rc = SLURM_SUCCESS;

	if (!db_conn)
		fatal("You haven't inited this storage yet.");

	/* clear out the old results so we don't get a 2014 error */
	_clear_results(db_conn);
	if (mysql_query(db_conn, query)) {
		const char *err_str = mysql_error(db_conn);
		errno = mysql_errno(db_conn);
		if (errno == ER_NO_SUCH_TABLE) {
			debug4("This could happen often and is expected.\n"
			       "mysql_query failed: %d %s\n%s",
			       errno, err_str, query);
			errno = 0;
			goto end_it;
		}
		error("mysql_query failed: %d %s\n%s", errno, err_str, query);
		if (errno == ER_LOCK_WAIT_TIMEOUT) {
			fatal("mysql gave ER_LOCK_WAIT_TIMEOUT as an error. "
			      "The only way to fix this is restart the "
			      "calling program");
		}
		/* FIXME: If we get ER_LOCK_WAIT_TIMEOUT here we need
		 * to restart the connections, but it appears restarting
		 * the calling program is the only way to handle this.
		 * If anyone in the future figures out a way to handle
		 * this, super.  Until then we will need to restart the
		 * calling program if you ever get this error.
		 */
		rc = SLURM_ERROR;
	}
end_it:
	return rc;
}
Exemplo n.º 13
0
/* process job usage data */
static int
_process_job_usage(pgsql_conn_t *pg_conn, char *cluster, time_t start,
		   time_t end, List cu_list, List ru_list, List au_list,
		   List wu_list)
{
	DEF_VARS;
	PGresult *result2;
	ListIterator r_itr;
	int seconds = 0, last_id = -1, last_wckeyid = -1;
	local_cluster_usage_t *c_usage = NULL;
	local_resv_usage_t *r_usage = NULL;
	local_id_usage_t *a_usage = NULL, *w_usage = NULL;
	int track_wckey = slurm_get_track_wckey();

	char *gj_fields = "job_db_inx,id_job,id_assoc,id_wckey,time_eligible,"
		"time_start,time_end,time_suspended,cpus_alloc,cpus_req,"
		"id_resv";
	enum {
		F_DB_INX,
		F_JOBID,
		F_ASSOCID,
		F_WCKEYID,
		F_ELG,
		F_START,
		F_END,
		F_SUSPENDED,
		F_ACPU,
		F_RCPU,
		F_RESVID,
		F_COUNT
	};

	query = xstrdup_printf(
		"SELECT %s FROM %s.%s WHERE (time_eligible < %ld AND "
		"(time_end >= %ld OR time_end = 0)) ORDER BY id_assoc, "
		"time_eligible", gj_fields, cluster, job_table,
		(long)end, (long)start);
	result = DEF_QUERY_RET;
	if (!result) {
		error("failed to get jobs");
		return SLURM_ERROR;
	}

	r_itr = list_iterator_create(ru_list);
	FOR_EACH_ROW {
		int job_id = atoi(ROW(F_JOBID));
		int assoc_id = atoi(ROW(F_ASSOCID));
		int wckey_id = atoi(ROW(F_WCKEYID));
		int resv_id = atoi(ROW(F_RESVID));
		int row_eligible = atoi(ROW(F_ELG));
		int row_start = atoi(ROW(F_START));
		int row_end = atoi(ROW(F_END));
		int row_acpu = atoi(ROW(F_ACPU));
		int row_rcpu = atoi(ROW(F_RCPU));
		seconds = 0;

		if (row_start && (row_start < start))
			row_start = start;
		if (!row_start && row_end)
			row_start = row_end;
		if (!row_end || row_end > end)
			row_end = end;
		if (!row_start || ((row_end - row_start) < 1))
			goto calc_cluster;

		seconds = (row_end - row_start);

		if (strcmp(ROW(F_SUSPENDED), "0")) {
                        query = xstrdup_printf(
				"SELECT %s.get_job_suspend_time(%s, %ld, %ld);",
				cluster, ROW(F_DB_INX), start, end);
                        result2 = DEF_QUERY_RET;
			if (!result2) {
				list_iterator_destroy(r_itr);
                                return SLURM_ERROR;
			}
                        seconds -= atoi(PQgetvalue(result2, 0, 0));
                        PQclear(result2);
                }
                if (seconds < 1) {
			debug4("This job (%u) was suspended "
			       "the entire hour", job_id);
			/* TODO: how about resv usage? */
			continue;
		}

		if (last_id != assoc_id) { /* ORDER BY associd */
			a_usage = xmalloc(sizeof(local_id_usage_t));
			a_usage->id = assoc_id;
			list_append(au_list, a_usage);
			last_id = assoc_id;
		}
		a_usage->a_cpu += seconds * row_acpu;

		if (!track_wckey)
			goto calc_cluster;

		/* do the wckey calculation */
		if (last_wckeyid != wckey_id) {
			w_usage = list_find_first(wu_list, _cmp_local_id,
						  &wckey_id);
			if (!w_usage) {
				w_usage = xmalloc(sizeof(local_id_usage_t));
				w_usage->id = wckey_id;
				list_append(wu_list, w_usage);
			}
			last_wckeyid = wckey_id;
		}
		w_usage->a_cpu += seconds * row_acpu;

		/* do the cluster allocated calculation */
	calc_cluster:

		/* first figure out the reservation */
		if (resv_id) {
			if (seconds <= 0)
				continue;
			/* Since we have already added the
			   entire reservation as used time on
			   the cluster we only need to
			   calculate the used time for the
			   reservation and then divy up the
			   unused time over the associations
			   able to run in the reservation.
			   Since the job was to run, or ran a
			   reservation we don't care about eligible time
			   since that could totally skew the
			   clusters reserved time
			   since the job may be able to run
			   outside of the reservation. */
			list_iterator_reset(r_itr);
			while((r_usage = list_next(r_itr))) {
				/* since the reservation could
				   have changed in some way,
				   thus making a new
				   reservation record in the
				   database, we have to make
				   sure all the reservations
				   are checked to see if such
				   a thing has happened */
				if ((r_usage->id == resv_id)) {
					int temp_end = row_end;
					int temp_start = row_start;
					if (r_usage->start > temp_start)
						temp_start = r_usage->start;
					if (r_usage->end < temp_end)
						temp_end = r_usage->end;

					if ((temp_end - temp_start) > 0) {
						r_usage->a_cpu += row_acpu *
							(temp_end - temp_start);
					}
				}
			}
			/* entire resv already added to cluster usage */
			continue;
		}

		c_usage = list_peek(cu_list);
		/* only record time for the clusters that have
		   registered.  This continue should rarely if
		   ever happen.
		*/
		if (!c_usage)
			continue;

		if (row_start && (seconds > 0)) {
/* 					info("%d assoc %d adds " */
/* 					     "(%d)(%d-%d) * %d = %d " */
/* 					     "to %d", */
/* 					     job_id, */
/* 					     a_usage->id, */
/* 					     seconds, */
/* 					     row_end, row_start, */
/* 					     row_acpu, */
/* 					     seconds * row_acpu, */
/* 					     row_acpu); */

			c_usage->a_cpu += seconds * row_acpu;
		}
		/* now reserved time */
		/*
		 * job requesting for rcpu processors has been delayed
		 * by (start_time - eligible_time) seconds
		 * large r_cpu means cluster overload or bad scheduling?
		 */
		if (!row_start || (row_start >= c_usage->start)) {
			row_end = row_start;
			row_start = row_eligible;
			if (c_usage->start > row_start)
				row_start = c_usage->start;
			if (c_usage->end < row_end)
				row_end = c_usage->end;

			if ((row_end - row_start) > 0) {
				seconds = (row_end - row_start)
					* row_rcpu;

/* 					info("%d assoc %d reserved " */
/* 					     "(%d)(%d-%d) * %d = %d " */
/* 					     "to %d", */
/* 					     job_id, */
/* 					     assoc_id, */
/* 					     seconds, */
/* 					     row_end, row_start, */
/* 					     row_rcpu, */
/* 					     seconds * row_rcpu, */
/* 					     row_rcpu); */
				c_usage->r_cpu += seconds;
			}
		}
	} END_EACH_ROW;
	PQclear(result);
	list_iterator_destroy(r_itr);

	return SLURM_SUCCESS;
}
Exemplo n.º 14
0
extern List as_mysql_get_txn(mysql_conn_t *mysql_conn, uid_t uid,
			     slurmdb_txn_cond_t *txn_cond)
{
	char *query = NULL;
	char *assoc_extra = NULL;
	char *name_extra = NULL;
	char *extra = NULL;
	char *tmp = NULL;
	List txn_list = NULL;
	ListIterator itr = NULL;
	char *object = NULL;
	int set = 0;
	int i=0;
	MYSQL_RES *result = NULL;
	MYSQL_ROW row;
	List use_cluster_list = as_mysql_cluster_list;
	bool locked = 0;

	/* if this changes you will need to edit the corresponding enum */
	char *txn_req_inx[] = {
		"id",
		"timestamp",
		"action",
		"name",
		"actor",
		"info",
		"cluster"
	};
	enum {
		TXN_REQ_ID,
		TXN_REQ_TS,
		TXN_REQ_ACTION,
		TXN_REQ_NAME,
		TXN_REQ_ACTOR,
		TXN_REQ_INFO,
		TXN_REQ_CLUSTER,
		TXN_REQ_COUNT
	};

	if (check_connection(mysql_conn) != SLURM_SUCCESS)
		return NULL;

	if (!txn_cond)
		goto empty;

	/* handle query for associations first */
	if (txn_cond->acct_list && list_count(txn_cond->acct_list)) {
		set = 0;
		if (assoc_extra)
			xstrcat(assoc_extra, " && (");
		else
			xstrcat(assoc_extra, " where (");

		if (name_extra)
			xstrcat(name_extra, " && (");
		else
			xstrcat(name_extra, " (");
		itr = list_iterator_create(txn_cond->acct_list);
		while ((object = list_next(itr))) {
			if (set) {
				xstrcat(assoc_extra, " || ");
				xstrcat(name_extra, " || ");
			}

			xstrfmtcat(assoc_extra, "acct='%s'", object);

			xstrfmtcat(name_extra, "(name like '%%\\'%s\\'%%'"
				   " || name='%s')"
				   " || (info like '%%acct=\\'%s\\'%%')",
				   object, object, object);
			set = 1;
		}
		list_iterator_destroy(itr);
		xstrcat(assoc_extra, ")");
		xstrcat(name_extra, ")");
	}

	if (txn_cond->cluster_list && list_count(txn_cond->cluster_list)) {
		set = 0;
		if (name_extra)
			xstrcat(name_extra, " && (");
		else
			xstrcat(name_extra, "(");

		itr = list_iterator_create(txn_cond->cluster_list);
		while ((object = list_next(itr))) {
			if (set) {
				xstrcat(name_extra, " || ");
			}
			xstrfmtcat(name_extra, "(cluster='%s' || "
				   "name like '%%\\'%s\\'%%' || name='%s')"
				   " || (info like '%%cluster=\\'%s\\'%%')",
				   object, object, object, object);
			set = 1;
		}
		list_iterator_destroy(itr);
		xstrcat(name_extra, ")");
		use_cluster_list = txn_cond->cluster_list;
	}

	if (txn_cond->user_list && list_count(txn_cond->user_list)) {
		set = 0;
		if (assoc_extra)
			xstrcat(assoc_extra, " && (");
		else
			xstrcat(assoc_extra, " where (");

		if (name_extra)
			xstrcat(name_extra, " && (");
		else
			xstrcat(name_extra, "(");

		itr = list_iterator_create(txn_cond->user_list);
		while ((object = list_next(itr))) {
			if (set) {
				xstrcat(assoc_extra, " || ");
				xstrcat(name_extra, " || ");
			}
			xstrfmtcat(assoc_extra, "user='******'", object);

			xstrfmtcat(name_extra, "(name like '%%\\'%s\\'%%'"
				   " || name='%s')"
				   " || (info like '%%user=\\'%s\\'%%')",
				   object, object, object);

			set = 1;
		}
		list_iterator_destroy(itr);
		xstrcat(assoc_extra, ")");
		xstrcat(name_extra, ")");
	}

	if (assoc_extra) {
		if (!locked && (use_cluster_list == as_mysql_cluster_list)) {
			slurm_mutex_lock(&as_mysql_cluster_list_lock);
			locked = 1;
		}

		itr = list_iterator_create(use_cluster_list);
		while ((object = list_next(itr))) {
			xstrfmtcat(query, "select id_assoc from \"%s_%s\"%s",
				   object, assoc_table, assoc_extra);
			if (debug_flags & DEBUG_FLAG_DB_QUERY)
				DB_DEBUG(mysql_conn->conn, "query\n%s", query);
			if (!(result = mysql_db_query_ret(
				      mysql_conn, query, 0))) {
				xfree(query);
				break;
			}
			xfree(query);

			if (mysql_num_rows(result)) {
				if (extra)
					xstrfmtcat(extra,
						   " || (cluster='%s' && (",
						   object);
				else
					xstrfmtcat(extra,
						   " where (cluster='%s' && (",
						   object);

				set = 0;

				while ((row = mysql_fetch_row(result))) {
					if (set)
						xstrcat(extra, " || ");

					xstrfmtcat(extra,
						   "(name like "
						   "'%%id_assoc=%s %%' "
						   "|| name like "
						   "'%%id_assoc=%s)')",
						   row[0], row[0]);
					set = 1;
				}
				xstrcat(extra, "))");
			}
			mysql_free_result(result);
		}
		list_iterator_destroy(itr);

		xfree(assoc_extra);
	}

	if (name_extra) {
		if (extra)
			xstrfmtcat(extra, " && (%s)", name_extra);
		else
			xstrfmtcat(extra, " where (%s)", name_extra);
		xfree(name_extra);
	}
	/*******************************************/

	if (txn_cond->action_list && list_count(txn_cond->action_list)) {
		set = 0;
		if (extra)
			xstrcat(extra, " && (");
		else
			xstrcat(extra, " where (");
		itr = list_iterator_create(txn_cond->action_list);
		while ((object = list_next(itr))) {
			if (set)
				xstrcat(extra, " || ");
			xstrfmtcat(extra, "action='%s'", object);
			set = 1;
		}
		list_iterator_destroy(itr);
		xstrcat(extra, ")");
	}

	if (txn_cond->actor_list && list_count(txn_cond->actor_list)) {
		set = 0;
		if (extra)
			xstrcat(extra, " && (");
		else
			xstrcat(extra, " where (");
		itr = list_iterator_create(txn_cond->actor_list);
		while ((object = list_next(itr))) {
			if (set)
				xstrcat(extra, " || ");
			xstrfmtcat(extra, "actor='%s'", object);
			set = 1;
		}
		list_iterator_destroy(itr);
		xstrcat(extra, ")");
	}

	if (txn_cond->id_list && list_count(txn_cond->id_list)) {
		set = 0;
		if (extra)
			xstrcat(extra, " && (");
		else
			xstrcat(extra, " where (");
		itr = list_iterator_create(txn_cond->id_list);
		while ((object = list_next(itr))) {
			char *ptr = NULL;
			long num = strtol(object, &ptr, 10);
			if ((num == 0) && ptr && ptr[0]) {
				error("Invalid value for txn id (%s)",
				      object);
				xfree(extra);
				list_iterator_destroy(itr);
				goto end_it;
			}

			if (set)
				xstrcat(extra, " || ");
			xstrfmtcat(extra, "id=%s", object);
			set = 1;
		}
		list_iterator_destroy(itr);
		xstrcat(extra, ")");
	}

	if (txn_cond->info_list && list_count(txn_cond->info_list)) {
		set = 0;
		if (extra)
			xstrcat(extra, " && (");
		else
			xstrcat(extra, " where (");
		itr = list_iterator_create(txn_cond->info_list);
		while ((object = list_next(itr))) {
			if (set)
				xstrcat(extra, " || ");
			xstrfmtcat(extra, "info like '%%%s%%'", object);
			set = 1;
		}
		list_iterator_destroy(itr);
		xstrcat(extra, ")");
	}

	if (txn_cond->name_list && list_count(txn_cond->name_list)) {
		set = 0;
		if (extra)
			xstrcat(extra, " && (");
		else
			xstrcat(extra, " where (");
		itr = list_iterator_create(txn_cond->name_list);
		while ((object = list_next(itr))) {
			if (set)
				xstrcat(extra, " || ");
			xstrfmtcat(extra, "name like '%%%s%%'", object);
			set = 1;
		}
		list_iterator_destroy(itr);
		xstrcat(extra, ")");
	}

	if (txn_cond->time_start && txn_cond->time_end) {
		if (extra)
			xstrcat(extra, " && (");
		else
			xstrcat(extra, " where (");
		xstrfmtcat(extra, "timestamp < %ld && timestamp >= %ld)",
			   txn_cond->time_end, txn_cond->time_start);
	} else if (txn_cond->time_start) {
		if (extra)
			xstrcat(extra, " && (");
		else
			xstrcat(extra, " where (");
		xstrfmtcat(extra, "timestamp >= %ld)", txn_cond->time_start);

	} else if (txn_cond->time_end) {
		if (extra)
			xstrcat(extra, " && (");
		else
			xstrcat(extra, " where (");
		xstrfmtcat(extra, "timestamp < %ld)", txn_cond->time_end);
	}

	/* make sure we can get the max length out of the database
	 * when grouping the names
	 */
	if (txn_cond->with_assoc_info)
		mysql_db_query(mysql_conn,
			       "set session group_concat_max_len=65536;");

empty:
	if (!locked && (use_cluster_list == as_mysql_cluster_list)) {
		slurm_mutex_lock(&as_mysql_cluster_list_lock);
		locked = 1;
	}

	xfree(tmp);
	xstrfmtcat(tmp, "%s", txn_req_inx[i]);
	for(i=1; i<TXN_REQ_COUNT; i++) {
		xstrfmtcat(tmp, ", %s", txn_req_inx[i]);
	}

	query = xstrdup_printf("select %s from %s", tmp, txn_table);

	if (extra) {
		xstrfmtcat(query, "%s", extra);
		xfree(extra);
	}
	xstrcat(query, " order by timestamp;");

	xfree(tmp);

	if (debug_flags & DEBUG_FLAG_DB_QUERY)
		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
	if (!(result = mysql_db_query_ret(
		      mysql_conn, query, 0))) {
		xfree(query);
		goto end_it;
	}
	xfree(query);

	txn_list = list_create(slurmdb_destroy_txn_rec);

	while ((row = mysql_fetch_row(result))) {
		slurmdb_txn_rec_t *txn = xmalloc(sizeof(slurmdb_txn_rec_t));

		list_append(txn_list, txn);

		txn->action = slurm_atoul(row[TXN_REQ_ACTION]);
		txn->actor_name = xstrdup(row[TXN_REQ_ACTOR]);
		txn->id = slurm_atoul(row[TXN_REQ_ID]);
		txn->set_info = xstrdup(row[TXN_REQ_INFO]);
		txn->timestamp = slurm_atoul(row[TXN_REQ_TS]);
		txn->where_query = xstrdup(row[TXN_REQ_NAME]);
		txn->clusters = xstrdup(row[TXN_REQ_CLUSTER]);

		if (txn_cond && txn_cond->with_assoc_info
		    && (txn->action == DBD_ADD_ASSOCS
			|| txn->action == DBD_MODIFY_ASSOCS
			|| txn->action == DBD_REMOVE_ASSOCS)) {
			MYSQL_RES *result2 = NULL;
			MYSQL_ROW row2;
			if (txn->clusters) {
				query = xstrdup_printf(
					"select "
					"group_concat(distinct user "
					"order by user), "
					"group_concat(distinct acct "
					"order by acct) "
					"from \"%s_%s\" where %s",
					txn->clusters, assoc_table,
					row[TXN_REQ_NAME]);
				debug4("%d(%s:%d) query\n%s", mysql_conn->conn,
				       THIS_FILE, __LINE__, query);
				if (!(result2 = mysql_db_query_ret(
					      mysql_conn, query, 0))) {
					xfree(query);
					continue;
				}
				xfree(query);

				if ((row2 = mysql_fetch_row(result2))) {
					if (row2[0] && row2[0][0])
						txn->users = xstrdup(row2[0]);
					if (row2[1] && row2[1][0])
						txn->accts = xstrdup(row2[1]);
				}
				mysql_free_result(result2);
			} else {
				error("We can't handle associations "
				      "from action %s yet.",
				      slurmdbd_msg_type_2_str(txn->action, 1));
			}
		}
	}
	mysql_free_result(result);

end_it:
	if (locked)
		slurm_mutex_unlock(&as_mysql_cluster_list_lock);

	return txn_list;
}
Exemplo n.º 15
0
Arquivo: rd.c Projeto: KP1533TM2/elks
static int rd_ioctl(register struct inode *inode, struct file *file,
		    unsigned int cmd, unsigned int arg)
{
    unsigned long size;
    unsigned int i;
    int j, k, target = DEVICE_NR(inode->i_rdev);

    if (!suser())
	return -EPERM;
    debug2("RD: ioctl %d %s\n", target, (cmd ? "kill" : "make"));
    switch (cmd) {
    case RDCREATE:
	if (rd_info[target].flags && RD_BUSY) {
	    return -EBUSY;
	} else {
	    /* allocate memory */

#if 0
	    rd_info[target].size = 0;
#endif

	    k = -1;
	    for (i = 0; i <= (arg - 1) / ((SEG_SIZE / 1024) * P_SIZE); i++) {
		j = find_free_seg();	/* find free place in queue */
		debug1("RD: ioctl find_free_seg() = %d\n", j);
		if (j == -1) {
		    rd_dealloc(target);
		    return -ENOMEM;
		}
		if (i == 0)
		    rd_info[target].index = j;

		if (i == (arg / ((SEG_SIZE / 1024) * P_SIZE)))
		    /* size in 16 byte pagez = (arg % 64) * 64 */
		    size =
			(arg % ((SEG_SIZE / 1024) * P_SIZE)) *
			((SEG_SIZE / 1024) * P_SIZE);
		else
		    size = SEG_SIZE;

		rd_segment[j].segment = mm_alloc(size);
		if (rd_segment[j].segment == -1) {
		    rd_dealloc(target);
		    return -ENOMEM;
		}
		debug4("RD: ioctl pass: %d, allocated %d pages, rd_segment[%d].segment = 0x%x\n",
		       i, (int) size, j, rd_segment[j].segment);

		/* recalculate int size to reflect size in sectors, not pages */
		size = size / DIVISOR;

		rd_segment[j].seg_size = size;	/* size in sectors */
		debug2("RD: ioctl rd_segment[%d].seg_size = %d sectors\n",
		       j, rd_segment[j].seg_size);
		rd_info[target].size += rd_segment[j].seg_size;	/* size in 512 B blocks */
		size = (long) rd_segment[j].seg_size * SECTOR_SIZE;
		debug1("RD: ioctl size = %ld\n", size);

		/* this terrible hack makes sure fmemset clears whole segment even if size == 64 KB :) */
		if (size != ((long) SEG_SIZE * (long) P_SIZE)) {
		    debug2("RD: ioctl calling fmemset(0, 0x%x, 0, %d) ..\n",
			   rd_segment[j].segment, (int) size);
		    fmemset(0, rd_segment[j].segment, 0, (int) size);	/* clear seg_size * SECTOR_SIZE bytes */
		} else {
		    debug2("RD: ioctl calling fmemset(0, 0x%x, 0, %d) ..\n",
			   rd_segment[j].segment, (int) (size / 2));
		    fmemset(0, rd_segment[j].segment, 0, (int) (size / 2));	/* we could hardcode 32768 instead of size / 2 here */
		    debug3("rd_ioctl(): calling fmemset(%d, 0x%x, 0, %d) ..\n",
			   (int) (size / 2), rd_segment[j].segment,
			   (int) (size / 2));
		    fmemset((size / 2), rd_segment[j].segment, 0,
			    (int) (size / 2));
		}

		if (k != -1)
		    rd_segment[k].next = j;	/* set link to next index */
		k = j;
	    }
	    rd_info[target].flags = RD_BUSY;
	    debug3("RD: ioctl ramdisk %d created, size = %d blocks, index = %d\n",
		 target, rd_info[target].size, rd_info[target].index);
	}
	debug("RD: ioctl about to return 0\n");
	return 0;
    case RDDESTROY:
	if (rd_info[target].flags && RD_BUSY) {
	    invalidate_inodes(inode->i_rdev);
	    invalidate_buffers(inode->i_rdev);
	    rd_dealloc(target);
	    rd_info[target].flags = 0;
	    return 0;
	} else
	    return -EINVAL;
    }
    return -EINVAL;
}
Exemplo n.º 16
0
static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
			     slurmdb_user_rec_t *user,
			     slurmdb_job_cond_t *job_cond,
			     char *cluster_name,
			     char *job_fields, char *step_fields,
			     char *sent_extra,
			     bool is_admin, int only_pending, List sent_list)
{
	char *query = NULL;
	char *extra = xstrdup(sent_extra);
	uint16_t private_data = slurm_get_private_data();
	slurmdb_selected_step_t *selected_step = NULL;
	MYSQL_RES *result = NULL, *step_result = NULL;
	MYSQL_ROW row, step_row;
	slurmdb_job_rec_t *job = NULL;
	slurmdb_step_rec_t *step = NULL;
	time_t now = time(NULL);
	List job_list = list_create(slurmdb_destroy_job_rec);
	ListIterator itr = NULL, itr2 = NULL;
	List local_cluster_list = NULL;
	int set = 0;
	char *prefix="t2";
	int rc = SLURM_SUCCESS;
	int last_id = -1, curr_id = -1;
	local_cluster_t *curr_cluster = NULL;

	/* This is here to make sure we are looking at only this user
	 * if this flag is set.  We also include any accounts they may be
	 * coordinator of.
	 */
	if (!is_admin && (private_data & PRIVATE_DATA_JOBS)) {
		query = xstrdup_printf("select lft from \"%s_%s\" "
				       "where user='******'",
				       cluster_name, assoc_table, user->name);
		if (user->coord_accts) {
			slurmdb_coord_rec_t *coord = NULL;
			itr = list_iterator_create(user->coord_accts);
			while ((coord = list_next(itr))) {
				xstrfmtcat(query, " || acct='%s'",
					   coord->name);
			}
			list_iterator_destroy(itr);
		}
		if (debug_flags & DEBUG_FLAG_DB_JOB)
			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
		if (!(result = mysql_db_query_ret(
			      mysql_conn, query, 0))) {
			xfree(extra);
			xfree(query);
			rc = SLURM_ERROR;
			goto end_it;
		}
		xfree(query);
		set = 0;
		while ((row = mysql_fetch_row(result))) {
			if (set) {
				xstrfmtcat(extra,
					   " || (%s between %s.lft and %s.rgt)",
					   row[0], prefix, prefix);
			} else {
				set = 1;
				if (extra)
					xstrfmtcat(extra,
						   " && ((%s between %s.lft "
						   "and %s.rgt)",
						   row[0], prefix,
						   prefix);
				else
					xstrfmtcat(extra,
						   " where ((%s between %s.lft "
						   "and %s.rgt)",
						   row[0], prefix,
						   prefix);
			}
		}

		mysql_free_result(result);

		if (set)
			xstrcat(extra, ")");
		else {
			xfree(extra);
			debug("User %s has no assocations, and is not admin, "
			      "so not returning any jobs.", user->name);
			/* This user has no valid associations, so
			 * they will not have any jobs. */
			goto end_it;
		}
	}

	setup_job_cluster_cond_limits(mysql_conn, job_cond,
				      cluster_name, &extra);

	query = xstrdup_printf("select %s from \"%s_%s\" as t1 "
			       "left join \"%s_%s\" as t2 "
			       "on t1.id_assoc=t2.id_assoc "
			       "left join \"%s_%s\" as t3 "
			       "on t1.id_resv=t3.id_resv && "
			       "((t1.time_start && "
			       "(t3.time_start < t1.time_start && "
			       "(t3.time_end >= t1.time_start || "
			       "t3.time_end = 0))) || "
			       "((t3.time_start < t1.time_submit && "
			       "(t3.time_end >= t1.time_submit || "
			       "t3.time_end = 0)) || "
			       "(t3.time_start > t1.time_submit)))",
			       job_fields, cluster_name, job_table,
			       cluster_name, assoc_table,
			       cluster_name, resv_table);
	if (extra) {
		xstrcat(query, extra);
		xfree(extra);
	}

	/* Here we want to order them this way in such a way so it is
	   easy to look for duplicates, it is also easy to sort the
	   resized jobs.
	*/
	xstrcat(query, " group by id_job, time_submit desc");

	if (debug_flags & DEBUG_FLAG_DB_JOB)
		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
		xfree(query);
		rc = SLURM_ERROR;
		goto end_it;
	}
	xfree(query);


	/* Here we set up environment to check used nodes of jobs.
	   Since we store the bitmap of the entire cluster we can use
	   that to set up a hostlist and set up the bitmap to make
	   things work.  This should go before the setup of conds
	   since we could update the start/end time.
	*/
	if (job_cond && job_cond->used_nodes) {
		local_cluster_list = setup_cluster_list_with_inx(
			mysql_conn, job_cond, (void **)&curr_cluster);
		if (!local_cluster_list) {
			rc = SLURM_ERROR;
			goto end_it;
		}
	}

	while ((row = mysql_fetch_row(result))) {
		char *id = row[JOB_REQ_ID];
		bool job_ended = 0;
		int start = slurm_atoul(row[JOB_REQ_START]);

		curr_id = slurm_atoul(row[JOB_REQ_JOBID]);

		if (job_cond && !job_cond->duplicates
		    && (curr_id == last_id)
		    && (slurm_atoul(row[JOB_REQ_STATE]) != JOB_RESIZING))
			continue;

		/* check the bitmap to see if this is one of the jobs
		   we are looking for */
		/* Use start time instead of submit time because node
		 * indexes are determined at start time and not submit. */
		if (!good_nodes_from_inx(local_cluster_list,
					 (void **)&curr_cluster,
					 row[JOB_REQ_NODE_INX], start)) {
			last_id = curr_id;
			continue;
		}

		job = slurmdb_create_job_rec();
		job->state = slurm_atoul(row[JOB_REQ_STATE]);
		if (curr_id == last_id)
			/* put in reverse so we order by the submit getting
			   larger which it is given to us in reverse
			   order from the database */
			list_prepend(job_list, job);
		else
			list_append(job_list, job);
		last_id = curr_id;

		if (row[JOB_REQ_GRES_ALLOC])
			job->alloc_gres = xstrdup(row[JOB_REQ_GRES_ALLOC]);
		else
			job->alloc_gres = xstrdup("");
		job->alloc_nodes = slurm_atoul(row[JOB_REQ_ALLOC_NODES]);
		job->associd = slurm_atoul(row[JOB_REQ_ASSOCID]);
		job->array_job_id = slurm_atoul(row[JOB_REQ_ARRAYJOBID]);
		job->array_task_id = slurm_atoul(row[JOB_REQ_ARRAYTASKID]);
		job->resvid = slurm_atoul(row[JOB_REQ_RESVID]);

		/* This shouldn't happen with new jobs, but older jobs
		 * could of been added without a start and so the
		 * array_task_id would be 0 instead of it's real value */
		if (!job->array_job_id && !job->array_task_id)
			job->array_task_id = NO_VAL;

		if (row[JOB_REQ_RESV_NAME] && row[JOB_REQ_RESV_NAME][0])
			job->resv_name = xstrdup(row[JOB_REQ_RESV_NAME]);

		job->cluster = xstrdup(cluster_name);

		/* we want a blank wckey if the name is null */
		if (row[JOB_REQ_WCKEY])
			job->wckey = xstrdup(row[JOB_REQ_WCKEY]);
		else
			job->wckey = xstrdup("");
		job->wckeyid = slurm_atoul(row[JOB_REQ_WCKEYID]);

		if (row[JOB_REQ_USER_NAME])
			job->user = xstrdup(row[JOB_REQ_USER_NAME]);
		else
			job->uid = slurm_atoul(row[JOB_REQ_UID]);

		if (row[JOB_REQ_LFT])
			job->lft = slurm_atoul(row[JOB_REQ_LFT]);

		if (row[JOB_REQ_ACCOUNT] && row[JOB_REQ_ACCOUNT][0])
			job->account = xstrdup(row[JOB_REQ_ACCOUNT]);
		else if (row[JOB_REQ_ACCOUNT1] && row[JOB_REQ_ACCOUNT1][0])
			job->account = xstrdup(row[JOB_REQ_ACCOUNT1]);

		if (row[JOB_REQ_ARRAY_STR] && row[JOB_REQ_ARRAY_STR][0])
			job->array_task_str = xstrdup(row[JOB_REQ_ARRAY_STR]);

		if (row[JOB_REQ_ARRAY_MAX])
			job->array_max_tasks =
				slurm_atoul(row[JOB_REQ_ARRAY_MAX]);

		if (row[JOB_REQ_BLOCKID])
			job->blockid = xstrdup(row[JOB_REQ_BLOCKID]);

		job->eligible = slurm_atoul(row[JOB_REQ_ELIGIBLE]);
		job->submit = slurm_atoul(row[JOB_REQ_SUBMIT]);
		job->start = start;
		job->end = slurm_atoul(row[JOB_REQ_END]);
		job->timelimit = slurm_atoul(row[JOB_REQ_TIMELIMIT]);

		/* since the job->end could be set later end it here */
		if (job->end) {
			job_ended = 1;
			if (!job->start || (job->start > job->end))
				job->start = job->end;
		}

		if (job_cond && !job_cond->without_usage_truncation
		    && job_cond->usage_start) {
			if (job->start && (job->start < job_cond->usage_start))
				job->start = job_cond->usage_start;

			if (!job->end || job->end > job_cond->usage_end)
				job->end = job_cond->usage_end;

			if (!job->start)
				job->start = job->end;

			job->elapsed = job->end - job->start;

			if (row[JOB_REQ_SUSPENDED]) {
				MYSQL_RES *result2 = NULL;
				MYSQL_ROW row2;
				/* get the suspended time for this job */
				query = xstrdup_printf(
					"select time_start, time_end from "
					"\"%s_%s\" where "
					"(time_start < %ld && (time_end >= %ld "
					"|| time_end = 0)) && job_db_inx=%s "
					"order by time_start",
					cluster_name, suspend_table,
					job_cond->usage_end,
					job_cond->usage_start,
					id);

				debug4("%d(%s:%d) query\n%s",
				       mysql_conn->conn, THIS_FILE,
				       __LINE__, query);
				if (!(result2 = mysql_db_query_ret(
					      mysql_conn,
					      query, 0))) {
					FREE_NULL_LIST(job_list);
					job_list = NULL;
					break;
				}
				xfree(query);
				while ((row2 = mysql_fetch_row(result2))) {
					time_t local_start =
						slurm_atoul(row2[0]);
					time_t local_end =
						slurm_atoul(row2[1]);

					if (!local_start)
						continue;

					if (job->start > local_start)
						local_start = job->start;
					if (job->end < local_end)
						local_end = job->end;

					if ((local_end - local_start) < 1)
						continue;

					job->elapsed -=
						(local_end - local_start);
					job->suspended +=
						(local_end - local_start);
				}
				mysql_free_result(result2);

			}
		} else {
			job->suspended = slurm_atoul(row[JOB_REQ_SUSPENDED]);

			/* fix the suspended number to be correct */
			if (job->state == JOB_SUSPENDED)
				job->suspended = now - job->suspended;
			if (!job->start) {
				job->elapsed = 0;
			} else if (!job->end) {
				job->elapsed = now - job->start;
			} else {
				job->elapsed = job->end - job->start;
			}

			job->elapsed -= job->suspended;
		}

		if ((int)job->elapsed < 0)
			job->elapsed = 0;

		job->jobid = curr_id;
		job->jobname = xstrdup(row[JOB_REQ_NAME]);
		job->gid = slurm_atoul(row[JOB_REQ_GID]);
		job->exitcode = slurm_atoul(row[JOB_REQ_EXIT_CODE]);
		job->derived_ec = slurm_atoul(row[JOB_REQ_DERIVED_EC]);
		job->derived_es = xstrdup(row[JOB_REQ_DERIVED_ES]);

		if (row[JOB_REQ_PARTITION])
			job->partition = xstrdup(row[JOB_REQ_PARTITION]);

		if (row[JOB_REQ_NODELIST])
			job->nodes = xstrdup(row[JOB_REQ_NODELIST]);

		if (!job->nodes || !xstrcmp(job->nodes, "(null)")) {
			xfree(job->nodes);
			job->nodes = xstrdup("(unknown)");
		}

		job->track_steps = slurm_atoul(row[JOB_REQ_TRACKSTEPS]);
		job->priority = slurm_atoul(row[JOB_REQ_PRIORITY]);
		job->req_cpus = slurm_atoul(row[JOB_REQ_REQ_CPUS]);
		if (row[JOB_REQ_GRES_REQ])
			job->req_gres = xstrdup(row[JOB_REQ_GRES_REQ]);
		else
			job->req_gres = xstrdup("");
		job->req_mem = slurm_atoul(row[JOB_REQ_REQ_MEM]);
		job->requid = slurm_atoul(row[JOB_REQ_KILL_REQUID]);
		job->qosid = slurm_atoul(row[JOB_REQ_QOS]);
		job->show_full = 1;

		if (row[JOB_REQ_TRESA])
			job->tres_alloc_str = xstrdup(row[JOB_REQ_TRESA]);
		if (row[JOB_REQ_TRESR])
			job->tres_req_str = xstrdup(row[JOB_REQ_TRESR]);

		if (only_pending || (job_cond && job_cond->without_steps))
			goto skip_steps;

		if (job_cond && job_cond->step_list
		    && list_count(job_cond->step_list)) {
			set = 0;
			itr = list_iterator_create(job_cond->step_list);
			while ((selected_step = list_next(itr))) {
				if ((selected_step->jobid != job->jobid) &&
				    (selected_step->jobid !=
				     job->array_job_id)) {
					continue;
				} else if ((selected_step->array_task_id !=
					    INFINITE) &&
					   (selected_step->array_task_id !=
					    job->array_task_id))
					continue;
				else if (selected_step->stepid == NO_VAL) {
					job->show_full = 1;
					break;
				} else if (selected_step->stepid == INFINITE)
					selected_step->stepid =
						SLURM_BATCH_SCRIPT;

				if (set)
					xstrcat(extra, " || ");
				else
					xstrcat(extra, " && (");

				/* The stepid could be -2 so use %d not %u */
				xstrfmtcat(extra, "t1.id_step=%d",
					   selected_step->stepid);
				set = 1;
				job->show_full = 0;
				/* Set it back just in case we are
				   looking at a job array.
				*/
				if (selected_step->stepid == SLURM_BATCH_SCRIPT)
					selected_step->stepid = INFINITE;
			}
			list_iterator_destroy(itr);
			if (set)
				xstrcat(extra, ")");
		}

		query =	xstrdup_printf("select %s from \"%s_%s\" as t1 "
				       "where t1.job_db_inx=%s",
				       step_fields, cluster_name,
				       step_table, id);
		if (extra) {
			xstrcat(query, extra);
			xfree(extra);
		}

		if (debug_flags & DEBUG_FLAG_DB_STEP)
			DB_DEBUG(mysql_conn->conn, "query\n%s", query);

		if (!(step_result = mysql_db_query_ret(
			      mysql_conn, query, 0))) {
			xfree(query);
			rc = SLURM_ERROR;
			goto end_it;
		}
		xfree(query);

		/* Querying the steps in the fashion was faster than
		   doing only 1 query and then matching the steps up
		   later with the job.
		*/
		while ((step_row = mysql_fetch_row(step_result))) {
			/* check the bitmap to see if this is one of the steps
			   we are looking for */
			if (!good_nodes_from_inx(local_cluster_list,
						 (void **)&curr_cluster,
						 step_row[STEP_REQ_NODE_INX],
						 start))
				continue;

			step = slurmdb_create_step_rec();
			step->tot_cpu_sec = 0;
			step->tot_cpu_usec = 0;
			step->job_ptr = job;
			if (!job->first_step_ptr)
				job->first_step_ptr = step;
			list_append(job->steps, step);
			step->stepid = slurm_atoul(step_row[STEP_REQ_STEPID]);
			/* info("got step %u.%u", */
/* 			     job->header.jobnum, step->stepnum); */
			step->state = slurm_atoul(step_row[STEP_REQ_STATE]);
			step->exitcode =
				slurm_atoul(step_row[STEP_REQ_EXIT_CODE]);
			step->nnodes = slurm_atoul(step_row[STEP_REQ_NODES]);

			step->ntasks = slurm_atoul(step_row[STEP_REQ_TASKS]);
			step->task_dist =
				slurm_atoul(step_row[STEP_REQ_TASKDIST]);

			step->start = slurm_atoul(step_row[STEP_REQ_START]);

			step->end = slurm_atoul(step_row[STEP_REQ_END]);
			/* if the job has ended end the step also */
			if (!step->end && job_ended) {
				step->end = job->end;
				step->state = job->state;
			}

			if (job_cond && !job_cond->without_usage_truncation
			    && job_cond->usage_start) {
				if (step->start
				    && (step->start < job_cond->usage_start))
					step->start = job_cond->usage_start;

				if (!step->start && step->end)
					step->start = step->end;

				if (!step->end
				    || (step->end > job_cond->usage_end))
					step->end = job_cond->usage_end;
			}

			/* figure this out by start stop */
			step->suspended =
				slurm_atoul(step_row[STEP_REQ_SUSPENDED]);
			if (!step->start) {
				step->elapsed = 0;
			} else if (!step->end) {
				step->elapsed = now - step->start;
			} else {
				step->elapsed = step->end - step->start;
			}
			step->elapsed -= step->suspended;

			if ((int)step->elapsed < 0)
				step->elapsed = 0;

			step->req_cpufreq_min = slurm_atoul(
				step_row[STEP_REQ_REQ_CPUFREQ_MIN]);
			step->req_cpufreq_max = slurm_atoul(
				step_row[STEP_REQ_REQ_CPUFREQ_MAX]);
			step->req_cpufreq_gov =	slurm_atoul(
				step_row[STEP_REQ_REQ_CPUFREQ_GOV]);

			step->stepname = xstrdup(step_row[STEP_REQ_NAME]);
			step->nodes = xstrdup(step_row[STEP_REQ_NODELIST]);
			step->requid =
				slurm_atoul(step_row[STEP_REQ_KILL_REQUID]);

			step->stats.cpu_min = slurm_atoul(
				step_row[STEP_REQ_MIN_CPU]);

			if (step->stats.cpu_min != NO_VAL) {
				step->user_cpu_sec = slurm_atoul(
					step_row[STEP_REQ_USER_SEC]);
				step->user_cpu_usec = slurm_atoul(
					step_row[STEP_REQ_USER_USEC]);
				step->sys_cpu_sec =
					slurm_atoul(step_row[STEP_REQ_SYS_SEC]);
				step->sys_cpu_usec = slurm_atoul(
					step_row[STEP_REQ_SYS_USEC]);
				step->tot_cpu_sec +=
					step->user_cpu_sec + step->sys_cpu_sec;
				step->tot_cpu_usec += step->user_cpu_usec +
					step->sys_cpu_usec;
				step->stats.disk_read_max =
					atof(step_row[STEP_REQ_MAX_DISK_READ]);
				step->stats.disk_read_max_taskid = slurm_atoul(
					step_row[STEP_REQ_MAX_DISK_READ_TASK]);
				step->stats.disk_read_ave =
					atof(step_row[STEP_REQ_AVE_DISK_READ]);
				step->stats.disk_write_max =
					atof(step_row[STEP_REQ_MAX_DISK_WRITE]);
				step->stats.disk_write_max_taskid = slurm_atoul(
					step_row[STEP_REQ_MAX_DISK_WRITE_TASK]);
				step->stats.disk_write_ave =
					atof(step_row[STEP_REQ_AVE_DISK_WRITE]);
				step->stats.vsize_max = slurm_atoul(
					step_row[STEP_REQ_MAX_VSIZE]);
				step->stats.vsize_max_taskid = slurm_atoul(
					step_row[STEP_REQ_MAX_VSIZE_TASK]);
				step->stats.vsize_ave =
					atof(step_row[STEP_REQ_AVE_VSIZE]);
				step->stats.rss_max =
					slurm_atoul(step_row[STEP_REQ_MAX_RSS]);
				step->stats.rss_max_taskid = slurm_atoul(
					step_row[STEP_REQ_MAX_RSS_TASK]);
				step->stats.rss_ave =
					atof(step_row[STEP_REQ_AVE_RSS]);
				step->stats.pages_max = slurm_atoul(
					step_row[STEP_REQ_MAX_PAGES]);
				step->stats.pages_max_taskid = slurm_atoul(
					step_row[STEP_REQ_MAX_PAGES_TASK]);
				step->stats.pages_ave =
					atof(step_row[STEP_REQ_AVE_PAGES]);
				step->stats.cpu_min_taskid = slurm_atoul(
					step_row[STEP_REQ_MIN_CPU_TASK]);
				step->stats.cpu_ave =
					atof(step_row[STEP_REQ_AVE_CPU]);
				step->stats.act_cpufreq =
					atof(step_row[STEP_REQ_ACT_CPUFREQ]);
				step->stats.consumed_energy = atof(
					step_row[STEP_REQ_CONSUMED_ENERGY]);
				step->stats.vsize_max_nodeid = slurm_atoul(
					step_row[STEP_REQ_MAX_VSIZE_NODE]);
				step->stats.rss_max_nodeid = slurm_atoul(
					step_row[STEP_REQ_MAX_RSS_NODE]);
				step->stats.pages_max_nodeid = slurm_atoul(
					step_row[STEP_REQ_MAX_PAGES_NODE]);
				step->stats.cpu_min_nodeid = slurm_atoul(
					step_row[STEP_REQ_MIN_CPU_NODE]);
			}

			if (step_row[STEP_REQ_TRES])
				step->tres_alloc_str =
					xstrdup(step_row[STEP_REQ_TRES]);
		}
		mysql_free_result(step_result);

		if (!job->track_steps) {
			uint64_t j_cpus, s_cpus;
			/* If we don't have track_steps we want to see
			   if we have multiple steps.  If we only have
			   1 step check the job name against the step
			   name in most all cases it will be
			   different.  If it is different print out
			   the step separate.  It could also be a single
			   step/allocation where the job was allocated more than
			   the step requested (eg. CR_Socket).
			*/
			if (list_count(job->steps) > 1)
				job->track_steps = 1;
			else if (step &&
				 (xstrcmp(step->stepname, job->jobname) ||
				  (((j_cpus = slurmdb_find_tres_count_in_string(
					     job->tres_alloc_str, TRES_CPU))
				    != INFINITE64) &&
				   ((s_cpus = slurmdb_find_tres_count_in_string(
					     step->tres_alloc_str, TRES_CPU))
				    != INFINITE64) &&
				  j_cpus != s_cpus)))
					job->track_steps = 1;
		}
	skip_steps:
		/* need to reset here to make the above test valid */
		step = NULL;
	}
	mysql_free_result(result);

end_it:
	if (itr2)
		list_iterator_destroy(itr2);

	FREE_NULL_LIST(local_cluster_list);

	if (rc == SLURM_SUCCESS)
		list_transfer(sent_list, job_list);

	FREE_NULL_LIST(job_list);
	return rc;
}
Exemplo n.º 17
0
plugin_handle_t
plugin_load_and_link(const char *type_name, int n_syms,
		     const char *names[], void *ptrs[])
{
	plugin_handle_t plug = PLUGIN_INVALID_HANDLE;
	struct stat st;
	char *head = NULL, *dir_array = NULL, *so_name = NULL;
	char *file_name = NULL;
	int i = 0;
	plugin_err_t err = EPLUGIN_NOTFOUND;

	if (!type_name)
		return plug;
	so_name = xstrdup_printf("%s.so", type_name);
	while (so_name[i]) {
		if (so_name[i] == '/')
			so_name[i] = '_';
		i++;
	}
	if (!(dir_array = slurm_get_plugin_dir())) {
		error("plugin_load_and_link: No plugin dir given");
		xfree(so_name);
		return plug;
	}

	head = dir_array;
	for (i = 0; ; i++) {
		bool got_colon = 0;
		if (dir_array[i] == ':') {
			dir_array[i] = '\0';
			got_colon = 1;
		} else if (dir_array[i] != '\0')
			continue;

		file_name = xstrdup_printf("%s/%s", head, so_name);
		debug3("Trying to load plugin %s", file_name);
		if ((stat(file_name, &st) < 0) || (!S_ISREG(st.st_mode))) {
			debug4("%s: Does not exist or not a regular file.",
			       file_name);
			xfree(file_name);
			err = EPLUGIN_NOTFOUND;
		} else {
			if ((err = plugin_load_from_file(&plug, file_name))
			   == EPLUGIN_SUCCESS) {
				if (plugin_get_syms(plug, n_syms,
						    names, ptrs) >= n_syms) {
					debug3("Success.");
					xfree(file_name);
					break;
				} else {
					(void) dlclose(plug);
					err = EPLUGIN_MISSING_SYMBOL;
					plug = PLUGIN_INVALID_HANDLE;
				}
			} else
				plug = PLUGIN_INVALID_HANDLE;
			xfree(file_name);
		}

		if (got_colon) {
			head = dir_array + i + 1;
		} else
			break;
	}

	xfree(dir_array);
	xfree(so_name);
	errno = err;
	return plug;
}
Exemplo n.º 18
0
Arquivo: z19.c Projeto: thektulu/lout
int AttachGalley(OBJECT hd, OBJECT *inners, OBJECT *suspend_pt)
{ OBJECT hd_index;		/* the index of hd in the enclosing galley   */
  OBJECT hd_inners;		/* inner galleys of hd, if unsized           */
  OBJECT dest;			/* the target @Galley hd empties into        */
  OBJECT dest_index;		/* the index of dest                         */
  OBJECT target;		/* the target indefinite containing dest     */
  OBJECT target_index;		/* the index of target                       */
  OBJECT target_galley;		/* the body of target, made into a galley    */
  OBJECT tg_inners;		/* inner galleys of target_galley            */
  BOOLEAN need_precedes = FALSE;/* true if destination lies before galley    */
  OBJECT recs;			/* list of recursive definite objects        */
  OBJECT link, y = nilobj;	/* for scanning through the components of hd */
  CONSTRAINT c;			/* temporary variable holding a constraint   */
  OBJECT env, n1, tmp, zlink, z, sym;	/* placeholders and temporaries	     */
  BOOLEAN was_sized;		/* true if sized(hd) initially               */
  int dim;			/* the galley direction                      */
  FULL_LENGTH perp_back, perp_fwd;
  OBJECT why, junk;

  debug2(DGA, D, "[ AttachGalley(Galley %s into %s)",
	SymName(actual(hd)), SymName(whereto(hd)));
  ifdebug(DGA, DD, DebugGalley(hd, nilobj, 4));
  assert( Up(hd) != hd, "AttachGalley: no index!" );
  Parent(hd_index, Up(hd));
  assert( type(hd_index) == UNATTACHED, "AttachGalley: not UNATTACHED!" );
  hd_inners = tg_inners = nilobj;
  was_sized = sized(hd);
  dim = gall_dir(hd);

  for(;;)
  {
    /*************************************************************************/
    /*                                                                       */
    /*  Search for a destination for hd.  If hd is unsized, search for       */
    /*  inner galleys preceding it first of all, then for receptive objects  */
    /*  following it, possibly in inner galleys.  If no luck, exit.          */
    /*  If hd is sized, search only for receptive objects in the current     */
    /*  galley below the current spot, and fail if cannot find any.          */
    /*                                                                       */
    /*************************************************************************/

    sym = whereto(hd);
    if( sized(hd) )
    {
      /* sized galley case: search on from current spot */
      target_index = SearchGalley(Up(hd_index), sym, TRUE, FALSE, TRUE, TRUE);
      if( target_index == nilobj )
      {	
	/* search failed to find any new target, so kill the galley */
	for( link = Down(hd); link != hd; link = NextDown(link) )
	{ Child(y, link);
	  if( type(y) == SPLIT )  Child(y, DownDim(y, dim));
	  if( is_definite(type(y)) )  break;
	}
	if( link != hd )
	  Error(19, 1, "galley %s deleted from here (no target)",
	    WARN, &fpos(y), SymName(actual(hd)));
	if( hd_inners != nilobj )  DisposeObject(hd_inners), hd_inners=nilobj;
	if( tg_inners != nilobj )  DisposeObject(tg_inners), tg_inners=nilobj;
	KillGalley(hd, FALSE);
	*inners = nilobj;
	debug0(DGA, D, "] AttachGalley returning ATTACH_KILLED");
	return ATTACH_KILLED;
      }
      else if( actual(actual(target_index)) == InputSym )
      {
	/* search found input object, so suspend on that */
	DeleteNode(hd_index);
	Link(target_index, hd);
	*inners = nilobj;
	debug0(DGA, D, "] AttachGalley returning ATTACH_INPUT");
	return ATTACH_INPUT;
      }

    }
    else /* unsized galley, either backwards or normal */
    {
      if( foll_or_prec(hd) == GALL_PREC )
      {	target_index= SearchGalley(Up(hd_index), sym, FALSE, TRUE,TRUE,FALSE);
	need_precedes = FALSE;
      }
      else
      {	target_index = SearchGalley(Up(hd_index), sym, FALSE,TRUE,FALSE,FALSE);
	need_precedes = (target_index != nilobj);
	if( target_index == nilobj )
	  target_index = SearchGalley(Up(hd_index), sym, TRUE,TRUE,TRUE,FALSE);
      }

      /* if no luck, exit without error */
      if( target_index == nilobj )
      {	*inners = nilobj;
	debug0(DGA, D, "] AttachGalley returning ATTACH_NOTARGET");
	return ATTACH_NOTARGET;
      }
    }
    assert( type(target_index) == RECEPTIVE, "AttachGalley: target_index!" );
    target = actual(target_index);
    assert( type(target) == CLOSURE, "AttachGalley: target!" );

    /* set target_galley to the expanded value of target */
    debug1(DYY, D, "[ EnterErrorBlock(FALSE) (expanding target %s)",
      SymName(actual(target)));
    EnterErrorBlock(FALSE);
    New(target_galley, HEAD);
    force_gall(target_galley) = FALSE;
    enclose_obj(target_galley) = limiter(target_galley) = nilobj;
    ClearHeaders(target_galley);
    opt_components(target_galley) = opt_constraints(target_galley) = nilobj;
    gall_dir(target_galley) = external_hor(target) ? COLM : ROWM;
    FposCopy(fpos(target_galley), fpos(target));
    actual(target_galley) = actual(target);
    whereto(target_galley) = ready_galls(target_galley) = nilobj;
    foll_or_prec(target_galley) = GALL_FOLL;
    must_expand(target_galley) = FALSE;
    sized(target_galley) = FALSE;

    /* get perpendicular constraint (none if horizontal galley) */
    if( dim == ROWM )
    {
      Constrained(target, &c, 1-dim, &junk);
      if( !constrained(c) )
        Error(19, 2, "receptive symbol %s has unconstrained width",
	  FATAL, &fpos(target), SymName(actual(target)));
      debug2(DSC, DD, "Constrained( %s, 1-dim ) = %s",
	EchoObject(target), EchoConstraint(&c));
      if( !FitsConstraint(0, 0, c) )
      { debug0(DGA, D, "  reject: target_galley horizontal constraint is -1");
	y = nilobj;
        goto REJECT;
      }
    }
    else /* actually unused */
      SetConstraint(c, MAX_FULL_LENGTH, MAX_FULL_LENGTH, MAX_FULL_LENGTH);

    debug1(DGA, DDD, "  expanding %s", EchoObject(target));
    tmp = CopyObject(target, no_fpos);
    Link(target_galley, tmp);
    env = DetachEnv(tmp);
    debug4(DGM, D, "  external_ver(%s) = %s, external_hor(%s) = %s",
      SymName(actual(target)), bool(external_ver(target)),
      SymName(actual(target)), bool(external_hor(target)));
    SizeGalley(target_galley, env,
	external_ver(target) || external_hor(target),
	threaded(target), non_blocking(target_index),
	trigger_externs(target_index), &save_style(target),
	&c, whereto(hd), &dest_index, &recs, &tg_inners,
	enclose_obj(hd) != nilobj ? CopyObject(enclose_obj(hd), no_fpos):nilobj);
    debug1(DGA, DD, "  SizeGalley tg_inners: %s", DebugInnersNames(tg_inners));
    if( recs != nilobj )  ExpandRecursives(recs);
    dest = actual(dest_index);
    if( underline(dest) == UNDER_UNDEF )  underline(dest) = UNDER_OFF;

    /* verify that hd satisfies any horizontal constraint on dest */
    if( dim == ROWM )
    {
      debug1(DGA, DDD, "  checking hor fit of hd in %s",SymName(actual(dest)));
      Constrained(dest, &c, 1-dim, &junk);
      debug3(DSC, DD, "Constrained( %s, %s ) = %s",
	EchoObject(dest), dimen(1-dim), EchoConstraint(&c));
      assert( constrained(c), "AttachGalley: dest unconstrained!" );
      if( !FitsConstraint(0, 0, c) )
      { debug0(DGA, D, "  reject: hd horizontal constraint is -1");
	y = nilobj;
        goto REJECT;
      }
    }

    /* manifest and size the galley if not done yet */
    if( !sized(hd) )
    {
      debug2(DYY, D, "[ EnterErrorBlock(TRUE) (sizing galley %s into %s)",
	SymName(actual(hd)), SymName(whereto(hd)));
      EnterErrorBlock(TRUE);
      n1 = nilobj;
      Child(y, Down(hd));
      env = DetachEnv(y);
      /*** threaded() only defined in ROWM case
      SizeGalley(hd, env, TRUE, threaded(dest), non_blocking(target_index),
	TRUE, &save_style(dest), &c, nilobj, &n1, &recs, &hd_inners);
      *** */
      SizeGalley(hd, env, TRUE, dim == ROWM ? threaded(dest) : FALSE,
	non_blocking(target_index), TRUE, &save_style(dest), &c, nilobj,
	&n1, &recs, &hd_inners, nilobj);
      debug1(DGA,DD,"  SizeGalley hd_inners: %s", DebugInnersNames(hd_inners));
      if( recs != nilobj )  ExpandRecursives(recs);
      if( need_precedes )		/* need an ordering constraint */
      {	OBJECT index1, index2;
        New(index1, PRECEDES);
	New(index2, FOLLOWS);
	blocked(index2) = FALSE;
	tmp = MakeWord(WORD, STR_EMPTY, no_fpos);
	Link(index1, tmp);  Link(index2, tmp);
	Link(Up(hd_index), index1);
	Link(Down(hd), index2);
	debug0(DGA, D, "  inserting PRECEDES and FOLLOWS");
      }
      LeaveErrorBlock(TRUE);
      debug0(DYY, D, "] LeaveErrorBlock(TRUE) (finished sizing galley)");
    }

    if( dim == ROWM )
    { if( !FitsConstraint(back(hd, 1-dim), fwd(hd, 1-dim), c) )
      { debug3(DGA, D, "  reject: hd %s,%s does not fit target_galley %s",
	  EchoLength(back(hd, 1-dim)), EchoLength(fwd(hd, 1-dim)),
	  EchoConstraint(&c));
        Error(19, 3, "too little horizontal space for galley %s at %s",
	  WARN, &fpos(hd), SymName(actual(hd)), SymName(actual(dest)));
        goto REJECT;
      }
    }

    /* check status of first component of hd */
    debug0(DGA, DDD, "  now ready to attach; hd =");
    ifdebug(DGA, DDD, DebugObject(hd));
    for( link = Down(hd);  link != hd;  link = NextDown(link) )
    {
      Child(y, link);
      debug1(DGA, DDD, "  examining %s", EchoIndex(y));
      if( type(y) == SPLIT )  Child(y, DownDim(y, dim));
      switch( type(y) )
      {

	case EXPAND_IND:
	case SCALE_IND:
	case COVER_IND:
	case GALL_PREC:
	case GALL_FOLL:
	case GALL_FOLL_OR_PREC:
	case GALL_TARG:
	case CROSS_PREC:
	case CROSS_FOLL:
	case CROSS_FOLL_OR_PREC:
	case CROSS_TARG:
	case PAGE_LABEL_IND:
	    
	  break;


	case PRECEDES:
	case UNATTACHED:
	    
	  if( was_sized )
	  { /* SizeGalley was not called, so hd_inners was not set by it */
	    if( hd_inners == nilobj )  New(hd_inners, ACAT);
	    Link(hd_inners, y);
	  }
	  break;


	case RECEPTIVE:

	  goto SUSPEND;


	case RECEIVING:
	    
	  goto SUSPEND;


	case FOLLOWS:
	    
	  Child(tmp, Down(y));
	  if( Up(tmp) == LastUp(tmp) )
	  { link = pred(link, CHILD);
	    debug0(DGA, DD, "  disposing FOLLOWS");
	    DisposeChild(NextDown(link));
	    break;
	  }
	  Parent(tmp, Up(tmp));
	  assert(type(tmp) == PRECEDES, "Attach: PRECEDES!");
	  switch( CheckComponentOrder(tmp, target_index) )
	  {
	    case CLEAR:		DeleteNode(tmp);
				link = pred(link, CHILD);
				DisposeChild(NextDown(link));
				break;

	    case PROMOTE:	break;

	    case BLOCK:		debug0(DGA, DD, "CheckContraint: BLOCK");
				goto SUSPEND;

	    case CLOSE:		debug0(DGA, D, "  reject: CheckContraint");
				goto REJECT;
	  }
	  break;


	case GAP_OBJ:

	  underline(y) = underline(dest);
	  if( !join(gap(y)) )  seen_nojoin(hd) = TRUE;
	  break;


	case BEGIN_HEADER:
	case END_HEADER:
	case SET_HEADER:
	case CLEAR_HEADER:

	  /* do nothing until actually promoted out of here */
	  underline(y) = underline(dest);
	  break;


	case CLOSURE:
	case CROSS:
	case FORCE_CROSS:
	case NULL_CLOS:
	case PAGE_LABEL:

	  underline(y) = underline(dest);
	  break;


	case WORD:
	case QWORD:
	case ONE_COL:
	case ONE_ROW:
	case WIDE:
	case HIGH:
	case HSHIFT:
	case VSHIFT:
	case HMIRROR:
	case VMIRROR:
	case HSCALE:
	case VSCALE:
	case HCOVER:
	case VCOVER:
	case HCONTRACT:
	case VCONTRACT:
	case HLIMITED:
	case VLIMITED:
	case HEXPAND:
	case VEXPAND:
	case START_HVSPAN:
	case START_HSPAN:
	case START_VSPAN:
	case HSPAN:
	case VSPAN:
	case ROTATE:
	case BACKGROUND:
	case SCALE:
	case KERN_SHRINK:
	case INCGRAPHIC:
	case SINCGRAPHIC:
	case PLAIN_GRAPHIC:
	case GRAPHIC:
	case LINK_SOURCE:
	case LINK_DEST:
	case LINK_DEST_NULL:
	case LINK_URL:
	case ACAT:
	case HCAT:
	case VCAT:
	case ROW_THR:
	case COL_THR:
	    

	  underline(y) = underline(dest);
	  if( dim == ROWM )
	  {
	    /* make sure y is not joined to a target below (vertical only) */
	    for( zlink = NextDown(link); zlink != hd; zlink = NextDown(zlink) )
	    { Child(z, zlink);
	      switch( type(z) )
	      {
	        case RECEPTIVE:
		
		  if( non_blocking(z) )
		  { zlink = PrevDown(zlink);
		    DeleteNode(z);
		  }
		  else
		  { y = z;
		    goto SUSPEND;
		  }
		  break;


	        case RECEIVING:
		
		  if( non_blocking(z) )
		  { zlink = PrevDown(zlink);
		    while( Down(z) != z )
		    { Child(tmp, Down(y));
		      if( opt_components(tmp) != nilobj )
		      { DisposeObject(opt_components(tmp));
		        opt_components(tmp) = nilobj;
		        debug3(DOG, D, "AttachGalley(%s) de-optimizing %s %s",
			  SymName(actual(hd)), SymName(actual(tmp)), "(join)");
		      }
		      DetachGalley(tmp);
		      KillGalley(tmp, FALSE);
		    }
		    DeleteNode(z);
		  }
		  else
		  { y = z;
		    goto SUSPEND;
		  }
		  break;


	        case GAP_OBJ:
		
		  if( !join(gap(z)) )  zlink = PrevDown(hd);
		  break;


	        default:	break;
	      }
	    }

	    /* if HCAT, try vertical hyphenation (vertical galleys only) */
	    if( type(y) == HCAT )  VerticalHyphenate(y);
	  }


	  /* check availability of parallel space for the first component */
	  why = nilobj;
	  Constrained(dest, &c, dim, &why);
	  debug3(DGF, DD, "  dest parallel Constrained(%s, %s) = %s",
	    EchoObject(dest), dimen(dim), EchoConstraint(&c));
	  if( !FitsConstraint(back(y, dim), fwd(y, dim), c) )
	  { BOOLEAN scaled;

	    /* if forcing galley doesn't fit, try scaling first component */
	    scaled = FALSE;
	    if( force_gall(hd) && size(y, dim) > 0 )
	    { int scale_factor;
	      scale_factor = ScaleToConstraint(back(y,dim), fwd(y,dim), &c);
	      if( scale_factor > 0.5 * SF )
	      {	char num1[20], num2[20];
		sprintf(num1, "%.1fc", (float) size(y, dim) / CM);
		sprintf(num2, "%.1fc", (float) bfc(c) / CM);
		if( dim == ROWM )
		  Error(19, 4, "%s object too high for %s space; %s inserted",
		    WARN, &fpos(y), num1, num2, KW_SCALE);
		else
		  Error(19, 5, "%s object too wide for %s space; %s inserted",
		    WARN, &fpos(y), num1, num2, KW_SCALE);
		y = InterposeScale(y, scale_factor, dim);
		scaled = TRUE;
	      }
	    }

	    /* otherwise we must reject, and warn the user */
	    if( !scaled )
	    { char num1[20], num2[20];
	      debug3(DGA, D, "  reject: vsize %s,%s in %s; y=",
		EchoLength(back(y, dim)), EchoLength(fwd(y, dim)),
		EchoConstraint(&c));
	      ifdebug(DGA, D, DebugObject(y));
	      if( size(y, dim) > 0 )
	      { sprintf(num1, "%.1fc", (float) size(y, dim) / CM);
	        sprintf(num2, "%.1fc", (float) bfc(c) / CM);
	        if( dim == ROWM )
		  Error(19, 12, "%s object too high for %s space; will try elsewhere",
		    WARN, &fpos(y), num1, num2);
	        else
		  Error(19, 13, "%s object too wide for %s space; will try elsewhere",
		    WARN, &fpos(y), num1, num2);
	      }
	      goto REJECT;
	    }

	  }

	  /* check availability of perpendicular space for first component */
	  if( dim == ROWM )
	  { perp_back = back(hd, 1-dim);  perp_fwd = fwd(hd, 1-dim);
	  }
	  else
	  { perp_back = back(y, 1-dim);  perp_fwd = fwd(y, 1-dim);
	  }
	  Constrained(dest, &c, 1-dim, &junk);
	  debug3(DGF, DD, "  dest perpendicular Constrained(%s, %s) = %s",
	    EchoObject(dest), dimen(1-dim), EchoConstraint(&c));
	  if( !FitsConstraint(perp_back, perp_fwd, c) )
	  { BOOLEAN scaled;

	    /* if forcing galley doesn't fit, try scaling first component */
	    scaled = FALSE;
	    if( force_gall(hd) && perp_back + perp_fwd > 0 )
	    { int scale_factor;
	      scale_factor = ScaleToConstraint(perp_back, perp_fwd, &c);
	      if( scale_factor > 0.5 * SF )
	      {	char num1[20], num2[20];
		sprintf(num1, "%.1fc", (float) (perp_back + perp_fwd) / CM);
		sprintf(num2, "%.1fc", (float) bfc(c) / CM);
		if( 1-dim == ROWM )
		  Error(19, 6, "%s object too high for %s space; %s inserted",
		    WARN, &fpos(y), num1, num2, KW_SCALE);
		else
		  Error(19, 7, "%s object too wide for %s space; %s inserted",
		    WARN, &fpos(y), num1, num2, KW_SCALE);
		y = InterposeScale(y, scale_factor, 1-dim);
		scaled = TRUE;
	      }
	    }

	    /* otherwise we must reject, and warn the user */
	    if( !scaled )
	    {
	      debug3(DGA, D, "  reject: vsize %s,%s in %s; y=",
		EchoLength(perp_back), EchoLength(perp_fwd),
		EchoConstraint(&c));
	      ifdebug(DGA, D, DebugObject(y));
	      goto REJECT;
	    }

	  }

	  /* dest seems OK, so perform its size adjustments */
	  debug0(DSA, D, "calling AdjustSize from AttachGalley (a)");
	  AdjustSize(dest, back(y, dim), fwd(y, dim), dim);
	  debug0(DSA, D, "calling AdjustSize from AttachGalley (b)");
	  AdjustSize(dest, perp_back, perp_fwd, 1-dim);


	  /* now check parallel space for target_galley in target */
	  Constrained(target, &c, dim, &why);
	  debug3(DGF, DD, "  target parallel Constrained(%s, %s) = %s",
	    EchoObject(target), dimen(dim), EchoConstraint(&c));
	  Child(z, LastDown(target_galley));  /* works in all cases? */
	  assert( !is_index(type(z)), "AttachGalley: is_index(z)!" );
	  assert( back(z, dim)>=0 && fwd(z, dim)>=0, "AttachGalley: z size!" );
	  if( !FitsConstraint(back(z, dim), fwd(z, dim), c) )
	  { BOOLEAN scaled;

	    debug2(DGA, D, "  why     = %d %s", (int) why, EchoObject(why));
	    debug2(DGA, D, "  limiter = %d %s", (int) limiter(hd),
	      EchoObject(limiter(hd)));

	    /* if forcing galley doesn't fit, try scaling z */
	    scaled = FALSE;
	    if( force_gall(hd) && size(z, dim) > 0 && limiter(hd) != why )
	    { int scale_factor;
	      scale_factor = ScaleToConstraint(back(z,dim), fwd(z,dim), &c);
	      if( scale_factor > 0.5 * SF )
	      {	char num1[20], num2[20];
		sprintf(num1, "%.1fc", (float) size(z, dim) / CM);
		sprintf(num2, "%.1fc", (float) bfc(c) / CM);
		if( dim == ROWM )
		  Error(19, 8, "%s object too high for %s space; %s inserted",
		    WARN, &fpos(y), num1, num2, KW_SCALE);
		else
		  Error(19, 9, "%s object too wide for %s space; %s inserted",
		    WARN, &fpos(y), num1, num2, KW_SCALE);
		z = InterposeWideOrHigh(z, dim);
		z = InterposeScale(z, scale_factor, dim);
		scaled = TRUE;
	      }
	    }

	    if( !scaled )
	    { char num1[20], num2[20];
	      limiter(hd) = why;
	      debug3(DGA, D, "  set limiter(%s) = %d %s", SymName(actual(hd)),
		(int) limiter(hd), EchoObject(limiter(hd)));
	      debug3(DGA, D, "  reject: size was %s,%s in %s; y =",
		EchoLength(back(z, dim)), EchoLength(fwd(z, dim)),
		EchoConstraint(&c));
	      ifdebug(DGA, D, DebugObject(y));
	      if( size(z, dim) > 0 )
	      { sprintf(num1, "%.1fc", (float) size(z, dim) / CM);
	        sprintf(num2, "%.1fc", (float) bfc(c) / CM);
	        if( dim == ROWM )
		  Error(19, 14, "%s object too high for %s space; will try elsewhere",
		    WARN, &fpos(y), num1, num2);
	        else
		  Error(19, 15, "%s object too wide for %s space; will try elsewhere",
		    WARN, &fpos(y), num1, num2);
	      }
	      goto REJECT;
	    }
	  }
	  limiter(hd) = why;
	  debug3(DGA, D, "  set limiter(%s) = %d %s", SymName(actual(hd)),
	    (int) limiter(hd), EchoObject(limiter(hd)));

	  /* now check perpendicular space for target_galley in target */
	  Constrained(target, &c, 1-dim, &junk);
	  debug3(DGF, DD, "  target perpendicular Constrained(%s, %s) = %s",
	    EchoObject(target), dimen(1-dim), EchoConstraint(&c));
	  Child(z, LastDown(target_galley));  /* works in all cases? */
	  assert( !is_index(type(z)), "AttachGalley: is_index(z)!" );
	  assert( back(z, 1-dim)>=0 && fwd(z, 1-dim)>=0,
	    "AttachGalley: z size (perpendicular)!" );
	  if( !FitsConstraint(back(z, 1-dim), fwd(z, 1-dim), c) )
	  { BOOLEAN scaled;

	    /* if forcing galley doesn't fit, try scaling z */
	    scaled = FALSE;
	    if( force_gall(hd) && size(z, 1-dim) > 0 )
	    { int scale_factor;
	      scale_factor = ScaleToConstraint(back(z,1-dim), fwd(z,1-dim), &c);
	      if( scale_factor > 0.5 * SF )
	      {	char num1[20], num2[20];
		sprintf(num1, "%.1fc", (float) size(z, 1-dim) / CM);
		sprintf(num2, "%.1fc", (float) bfc(c) / CM);
		if( 1-dim == ROWM )
		  Error(19, 10, "%s object too high for %s space; %s inserted",
		    WARN, &fpos(y), num1, num2, KW_SCALE);
		else
		  Error(19, 11, "%s object too wide for %s space; %s inserted",
		    WARN, &fpos(y), num1, num2, KW_SCALE);
		z = InterposeWideOrHigh(z, 1-dim);
		z = InterposeScale(z, scale_factor, 1-dim);
		scaled = TRUE;
	      }
	    }

	    if( !scaled )
	    {
	      debug3(DGA, D, "  reject: size was %s,%s in %s; y =",
		EchoLength(back(z, 1-dim)), EchoLength(fwd(z, 1-dim)),
		EchoConstraint(&c));
	      ifdebug(DGA, D, DebugObject(y));
	      goto REJECT;
	    }
	  }

	  /* target seems OK, so adjust sizes and accept */
	  if( external_hor(target) )
	  {
	    /* don't adjust any sizes, none to adjust */
	    debug0(DSA, D, "not calling AdjustSize from AttachGalley (c)");
	  }
	  else if( external_ver(target) )
	  {
	    /* adjust perp size only, to galley size */
	    debug0(DSA, D, "calling AdjustSize from AttachGalley (d)");
	    AdjustSize(target, back(target_galley, 1-dim),
	      fwd(target_galley, 1-dim), 1-dim);
	  }
	  else
	  {
	    /* adjust both directions, using z (last component) */
	    Child(z, LastDown(target_galley));
	    debug0(DSA, D, "AttachGalley AdjustSize using z =");
	    ifdebug(DSA, D, DebugObject(z));
	    debug0(DSA, D, "calling AdjustSize from AttachGalley (e)");
	    AdjustSize(target, back(z, dim), fwd(z, dim), dim);
	    debug0(DSA, D, "calling AdjustSize from AttachGalley (f)");
	    AdjustSize(target, back(z, 1-dim), fwd(z, 1-dim), 1-dim);
	  }

	  goto ACCEPT;


	default:
	    
	  assert1(FALSE, "AttachGalley:", Image(type(y)));
	  break;

      } /* end switch */
    } /* end for */

    /* null galley: promote whole galley without expanding the target */
    debug0(DGA, D, "  null galley");
    if( tg_inners != nilobj )  DisposeObject(tg_inners), tg_inners = nilobj;
    DisposeObject(target_galley);
    LeaveErrorBlock(FALSE);
    debug0(DYY, D, "] LeaveErrorBlock(FALSE) (null galley)");

    /* kill off any null objects within the galley, then transfer it */
    /* don't use Promote() since it does extra unwanted things here  */
    for( link = Down(hd);  link != hd;  link = NextDown(link) )
    { Child(y, link);
      switch( type(y) )
      {

	case GAP_OBJ:
	case CLOSURE:
	case CROSS:
	case FORCE_CROSS:
	case NULL_CLOS:
	case PAGE_LABEL:
	
	  link = PrevDown(link);
	  debug1(DGA, D, "  null galley, disposing %s", Image(type(y)));
	  DisposeChild(NextDown(link));
	  break;

	
	default:
	
	  break;
      }
    }
    TransferLinks(NextDown(hd), hd, Up(target_index));

    /* attach hd temporarily to target_index */
    MoveLink(Up(hd), target_index, PARENT);
    assert( type(hd_index) == UNATTACHED, "AttachGalley: type(hd_index)!" );
    DeleteNode(hd_index);

    /* return; only hd_inners needs to be flushed now */
    *inners = hd_inners;
    debug0(DGA, D, "] AttachGalley returning ATTACH_NULL");
    return ATTACH_NULL;


    REJECT:
	
      /* reject first component */
      /* debug1(DGA, D, "  reject %s", EchoObject(y)); */
      debug0(DGA, D, "  reject first component");
      LeaveErrorBlock(TRUE);
      debug0(DYY, D, "] LeaveErrorBlock(TRUE) (REJECT)");
      if( tg_inners != nilobj )  DisposeObject(tg_inners), tg_inners = nilobj;
      DisposeObject(target_galley);
      if( foll_or_prec(hd) == GALL_PREC && !sized(hd) )
      {
	/* move to just before the failed target */
	MoveLink(Up(hd_index), Up(target_index), PARENT);
      }
      else
      {
	/* move to just after the failed target */
	MoveLink(Up(hd_index), NextDown(Up(target_index)), PARENT);
      }
      continue;


    SUSPEND:
	
      /* suspend at first component */
      debug1(DGA, D, "  suspend %s", EchoIndex(y));
      blocked(y) = TRUE;
      LeaveErrorBlock(FALSE);
      debug0(DYY, D, "] LeaveErrorBlock(FALSE) (SUSPEND)");
      if( tg_inners != nilobj )  DisposeObject(tg_inners), tg_inners = nilobj;
      DisposeObject(target_galley);
      MoveLink(Up(hd_index), Up(target_index), PARENT);
      if( was_sized )
      { /* nothing new to flush if suspending and already sized */
	if( hd_inners != nilobj )  DisposeObject(hd_inners), hd_inners=nilobj;
	*inners = nilobj;
      }
      else
      { /* flush newly discovered inners if not sized before */
	*inners = hd_inners;
      }
      debug0(DGA, D, "] AttachGalley returning ATTACH_SUSPEND");
      *suspend_pt = y;
      return ATTACH_SUSPEND;


    ACCEPT:
	
      /* accept first component; now committed to the attach */
      debug3(DGA, D, "  accept %s %s %s", Image(type(y)), EchoObject(y),
	EchoFilePos(&fpos(y)));
      LeaveErrorBlock(TRUE);
      debug0(DYY, D, "] LeaveErrorBlock(TRUE) (ACCEPT)");

      /* attach hd to dest */
      MoveLink(Up(hd), dest_index, PARENT);
      assert( type(hd_index) == UNATTACHED, "AttachGalley: type(hd_index)!" );
      DeleteNode(hd_index);

      /* move first component of hd into dest */
      /* nb Interpose must be done after all AdjustSize calls */
      if( dim == ROWM && !external_ver(dest) )
	Interpose(dest, VCAT, hd, y);
      else if( dim == COLM && !external_hor(dest) )
      { Interpose(dest, ACAT, y, y);
	Parent(junk, Up(dest));
	assert( type(junk) == ACAT, "AttachGalley: type(junk) != ACAT!" );
	StyleCopy(save_style(junk), save_style(dest));
	adjust_cat(junk) = padjust(save_style(junk));
      }
      debug1(DGS, D, "calling Promote(hd, %s) from AttachGalley/ACCEPT",
	link == hd ? "hd" : "NextDown(link)");
      Promote(hd, link == hd ? hd : NextDown(link), dest_index, TRUE);

      /* move target_galley into target */
      /* nb Interpose must be done after all AdjustSize calls */
      if( !(external_ver(target) || external_hor(target)) )
      {	Child(z, LastDown(target_galley));
	Interpose(target, VCAT, z, z);
      }
      debug0(DGS, D, "calling Promote(target_galley) from AttachGalley/ACCEPT");
      Promote(target_galley, target_galley, target_index, TRUE);
      DeleteNode(target_galley);
      assert(Down(target_index)==target_index, "AttachGalley: target_ind");
      if( blocked(target_index) )  blocked(dest_index) = TRUE;
      DeleteNode(target_index);

      /* return; both tg_inners and hd_inners need to be flushed now;        */
      /* if was_sized, hd_inners contains the inners of the first component; */
      /* otherwise it contains the inners of all components, from SizeGalley */
      if( tg_inners == nilobj ) *inners = hd_inners;
      else if( hd_inners == nilobj ) *inners = tg_inners;
      else
      {	TransferLinks(Down(hd_inners), hd_inners, tg_inners);
	DeleteNode(hd_inners);
	*inners = tg_inners;
      }
      debug0(DGA, D, "] AttachGalley returning ATTACH_ACCEPT");
      ifdebug(DGA, D,
	if( dim == COLM && !external_hor(dest) )
	{ OBJECT z;
	  Parent(z, Up(dest));
	  debug2(DGA, D, "  COLM dest_encl on exit = %s %s",
	    Image(type(z)), EchoObject(z));
	}
      )
      return ATTACH_ACCEPT;

  } /* end for */
Exemplo n.º 19
0
/* dump_all_part_state - save the state of all partitions to file */
int dump_all_part_state(void)
{
	/* Save high-water mark to avoid buffer growth with copies */
	static int high_buffer_size = BUF_SIZE;
	ListIterator part_iterator;
	struct part_record *part_ptr;
	int error_code = 0, log_fd;
	char *old_file, *new_file, *reg_file;
	/* Locks: Read partition */
	slurmctld_lock_t part_read_lock =
	    { READ_LOCK, NO_LOCK, NO_LOCK, READ_LOCK };
	Buf buffer = init_buf(high_buffer_size);
	DEF_TIMERS;

	START_TIMER;
	/* write header: time */
	packstr(PART_STATE_VERSION, buffer);
	pack_time(time(NULL), buffer);

	/* write partition records to buffer */
	lock_slurmctld(part_read_lock);
	part_iterator = list_iterator_create(part_list);
	while ((part_ptr = (struct part_record *) list_next(part_iterator))) {
		xassert (part_ptr->magic == PART_MAGIC);
		_dump_part_state(part_ptr, buffer);
	}
	list_iterator_destroy(part_iterator);

	old_file = xstrdup(slurmctld_conf.state_save_location);
	xstrcat(old_file, "/part_state.old");
	reg_file = xstrdup(slurmctld_conf.state_save_location);
	xstrcat(reg_file, "/part_state");
	new_file = xstrdup(slurmctld_conf.state_save_location);
	xstrcat(new_file, "/part_state.new");
	unlock_slurmctld(part_read_lock);

	/* write the buffer to file */
	lock_state_files();
	log_fd = creat(new_file, 0600);
	if (log_fd < 0) {
		error("Can't save state, error creating file %s, %m",
		      new_file);
		error_code = errno;
	} else {
		int pos = 0, nwrite = get_buf_offset(buffer), amount, rc;
		char *data = (char *)get_buf_data(buffer);
		high_buffer_size = MAX(nwrite, high_buffer_size);
		while (nwrite > 0) {
			amount = write(log_fd, &data[pos], nwrite);
			if ((amount < 0) && (errno != EINTR)) {
				error("Error writing file %s, %m", new_file);
				error_code = errno;
				break;
			}
			nwrite -= amount;
			pos    += amount;
		}

		rc = fsync_and_close(log_fd, "partition");
		if (rc && !error_code)
			error_code = rc;
	}
	if (error_code)
		(void) unlink(new_file);
	else {			/* file shuffle */
		(void) unlink(old_file);
		if (link(reg_file, old_file)) {
			debug4("unable to create link for %s -> %s: %m",
			       reg_file, old_file);
		}
		(void) unlink(reg_file);
		if (link(new_file, reg_file)) {
			debug4("unable to create link for %s -> %s: %m",
			       new_file, reg_file);
		}
		(void) unlink(new_file);
	}
	xfree(old_file);
	xfree(reg_file);
	xfree(new_file);
	unlock_state_files();

	free_buf(buffer);
	END_TIMER2("dump_all_part_state");
	return 0;
}
Exemplo n.º 20
0
/*
 * addto_update_list - add object updated to list
 * IN/OUT update_list: list of updated objects
 * IN type: update type
 * IN object: object updated
 * RET: error code
 *
 * NOTE: This function will take the object given and free it later so it
 *       needs to be removed from a existing lists prior.
 */
extern int addto_update_list(List update_list, slurmdb_update_type_t type,
			     void *object)
{
	slurmdb_update_object_t *update_object = NULL;
	slurmdb_assoc_rec_t *assoc = object;
	slurmdb_qos_rec_t *qos = object;
#ifndef NDEBUG
	slurmdb_tres_rec_t *tres = object;
	slurmdb_res_rec_t *res = object;
	slurmdb_wckey_rec_t *wckey = object;
#endif
	ListIterator itr = NULL;

	if (!update_list) {
		error("no update list given");
		return SLURM_ERROR;
	}

	itr = list_iterator_create(update_list);
	while((update_object = list_next(itr))) {
		if (update_object->type == type)
			break;
	}
	list_iterator_destroy(itr);

	if (update_object) {
		/* here we prepend primarly for remove association
		   since parents need to be removed last, and they are
		   removed first in the calling code */
		if (type == SLURMDB_UPDATE_FEDS) {
			FREE_NULL_LIST(update_object->objects);
			update_object->objects = object;
		} else
			list_prepend(update_object->objects, object);
		return SLURM_SUCCESS;
	}

	update_object = xmalloc(sizeof(slurmdb_update_object_t));

	list_append(update_list, update_object);

	update_object->type = type;

	list_sort(update_list, (ListCmpF)_sort_update_object_dec);

	switch(type) {
	case SLURMDB_MODIFY_USER:
	case SLURMDB_ADD_USER:
	case SLURMDB_REMOVE_USER:
	case SLURMDB_ADD_COORD:
	case SLURMDB_REMOVE_COORD:
		update_object->objects = list_create(slurmdb_destroy_user_rec);
		break;
	case SLURMDB_ADD_TRES:
		xassert(tres->id);
		update_object->objects = list_create(slurmdb_destroy_tres_rec);
		break;
	case SLURMDB_ADD_ASSOC:
		/* We are going to send these to the slurmctld's so
		   lets set up the correct limits to INFINITE instead
		   of NO_VAL */
		if (assoc->grp_jobs == NO_VAL)
			assoc->grp_jobs = INFINITE;
		if (assoc->grp_submit_jobs == NO_VAL)
			assoc->grp_submit_jobs = INFINITE;
		if (assoc->grp_wall == NO_VAL)
			assoc->grp_wall = INFINITE;

		if (assoc->max_jobs == NO_VAL)
			assoc->max_jobs = INFINITE;
		if (assoc->max_jobs_accrue == NO_VAL)
			assoc->max_jobs_accrue = INFINITE;
		if (assoc->min_prio_thresh == NO_VAL)
			assoc->min_prio_thresh = INFINITE;
		if (assoc->max_submit_jobs == NO_VAL)
			assoc->max_submit_jobs = INFINITE;
		if (assoc->max_wall_pj == NO_VAL)
			assoc->max_wall_pj = INFINITE;
		/* fall through */
	case SLURMDB_MODIFY_ASSOC:
	case SLURMDB_REMOVE_ASSOC:
		xassert(assoc->cluster);
		update_object->objects = list_create(
			slurmdb_destroy_assoc_rec);
		break;
	case SLURMDB_ADD_QOS:
		/* We are going to send these to the slurmctld's so
		   lets set up the correct limits to INFINITE instead
		   of NO_VAL */
		if (qos->grp_jobs == NO_VAL)
			qos->grp_jobs = INFINITE;
		if (qos->grp_submit_jobs == NO_VAL)
			qos->grp_submit_jobs = INFINITE;
		if (qos->grp_wall == NO_VAL)
			qos->grp_wall = INFINITE;

		if (qos->max_jobs_pu == NO_VAL)
			qos->max_jobs_pu = INFINITE;
		if (qos->max_submit_jobs_pu == NO_VAL)
			qos->max_submit_jobs_pu = INFINITE;
		if (qos->max_wall_pj == NO_VAL)
			qos->max_wall_pj = INFINITE;
		/* fall through */
	case SLURMDB_MODIFY_QOS:
	case SLURMDB_REMOVE_QOS:
		update_object->objects = list_create(
			slurmdb_destroy_qos_rec);
		break;
	case SLURMDB_ADD_WCKEY:
	case SLURMDB_MODIFY_WCKEY:
	case SLURMDB_REMOVE_WCKEY:
		xassert(wckey->cluster);
		update_object->objects = list_create(
			slurmdb_destroy_wckey_rec);
		break;
	case SLURMDB_ADD_CLUSTER:
	case SLURMDB_REMOVE_CLUSTER:
		/* This should only be the name of the cluster, and is
		   only used in the plugin for rollback purposes.
		*/
		update_object->objects = list_create(slurm_destroy_char);
		break;
	case SLURMDB_ADD_RES:
		xassert(res->name);
		xassert(res->server);
		/* fall through */
	case SLURMDB_MODIFY_RES:
	case SLURMDB_REMOVE_RES:
		xassert(res->id != NO_VAL);
		update_object->objects = list_create(
			slurmdb_destroy_res_rec);
		break;
	case SLURMDB_UPDATE_FEDS:
		update_object->objects = object;
		return SLURM_SUCCESS;
	case SLURMDB_UPDATE_NOTSET:
	default:
		error("unknown type set in update_object: %d", type);
		return SLURM_ERROR;
	}
	debug4("XXX: update object with type %d added", type);
	list_append(update_object->objects, object);
	return SLURM_SUCCESS;
}
Exemplo n.º 21
0
extern List setup_cluster_list_with_inx(mysql_conn_t *mysql_conn,
					slurmdb_job_cond_t *job_cond,
					void **curr_cluster)
{
	List local_cluster_list = NULL;
	time_t now = time(NULL);
	MYSQL_RES *result = NULL;
	MYSQL_ROW row;
	hostlist_t temp_hl = NULL;
	hostlist_iterator_t h_itr = NULL;
	char *query = NULL;
	int dims = 0;

	if (!job_cond || !job_cond->used_nodes)
		return NULL;

	if (!job_cond->cluster_list
	    || list_count(job_cond->cluster_list) != 1) {
		error("If you are doing a query against nodes "
		      "you must only have 1 cluster "
		      "you are asking for.");
		return NULL;
	}

	/* get the dimensions of this cluster so we know how to deal
	   with the hostlists */
	query = xstrdup_printf("select dimensions, flags from %s where "
			       "name='%s'",
			       cluster_table,
			       (char *)list_peek(job_cond->cluster_list));

	debug4("%d(%s:%d) query\n%s",
	       mysql_conn->conn, THIS_FILE, __LINE__, query);
	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
		xfree(query);
		return NULL;
	}
	xfree(query);

	if (!(row = mysql_fetch_row(result))) {
		error("Couldn't get the dimensions of cluster '%s'.",
		      (char *)list_peek(job_cond->cluster_list));
		mysql_free_result(result);
		return NULL;
	}

	/* On a Cray System when dealing with hostlists as we are here
	   this always needs to be 1.
	*/
	if (slurm_atoul(row[1]) & CLUSTER_FLAG_CRAY_A)
		dims = 1;
	else
		dims = atoi(row[0]);

	mysql_free_result(result);

	temp_hl = hostlist_create_dims(job_cond->used_nodes, dims);
	if (hostlist_count(temp_hl) <= 0) {
		error("we didn't get any real hosts to look for.");
		goto no_hosts;
	}
	h_itr = hostlist_iterator_create(temp_hl);

	query = xstrdup_printf("select cluster_nodes, time_start, "
			       "time_end from \"%s_%s\" where node_name='' "
			       "&& cluster_nodes !=''",
			       (char *)list_peek(job_cond->cluster_list),
			       event_table);

	if (job_cond->usage_start) {
		if (!job_cond->usage_end)
			job_cond->usage_end = now;

		xstrfmtcat(query,
			   " && ((time_start < %ld) "
			   "&& (time_end >= %ld || time_end = 0))",
			   job_cond->usage_end, job_cond->usage_start);
	}

	if (debug_flags & DEBUG_FLAG_DB_JOB)
		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
		xfree(query);
		goto no_hosts;
	}
	xfree(query);

	local_cluster_list = list_create(_destroy_local_cluster);
	while ((row = mysql_fetch_row(result))) {
		char *host = NULL;
		int loc = 0;
		local_cluster_t *local_cluster =
			xmalloc(sizeof(local_cluster_t));
		local_cluster->hl = hostlist_create_dims(row[0], dims);
		local_cluster->start = slurm_atoul(row[1]);
		local_cluster->end   = slurm_atoul(row[2]);
		local_cluster->asked_bitmap =
			bit_alloc(hostlist_count(local_cluster->hl));
		while ((host = hostlist_next_dims(h_itr, dims))) {
			if ((loc = hostlist_find(
				     local_cluster->hl, host)) != -1)
				bit_set(local_cluster->asked_bitmap, loc);
			free(host);
		}
		hostlist_iterator_reset(h_itr);
		if (bit_ffs(local_cluster->asked_bitmap) != -1) {
			list_append(local_cluster_list, local_cluster);
			if (local_cluster->end == 0) {
				local_cluster->end = now;
				(*curr_cluster) = local_cluster;
			} else if (!(*curr_cluster)
				   || (((local_cluster_t *)(*curr_cluster))->end
				       < local_cluster->end)) {
				(*curr_cluster) = local_cluster;
			}
		} else
			_destroy_local_cluster(local_cluster);
	}
	mysql_free_result(result);

	if (!list_count(local_cluster_list)) {
		FREE_NULL_LIST(local_cluster_list);
		local_cluster_list = NULL;
		goto no_hosts;
	}

no_hosts:

	hostlist_iterator_destroy(h_itr);
	hostlist_destroy(temp_hl);

	return local_cluster_list;
}
Exemplo n.º 22
0
/* NOTE: Insure that mysql_conn->lock is NOT set on function entry */
static int _mysql_make_table_current(mysql_conn_t *mysql_conn, char *table_name,
				     storage_field_t *fields, char *ending)
{
	char *query = NULL;
	char *correct_query = NULL;
	MYSQL_RES *result = NULL;
	MYSQL_ROW row;
	int i = 0;
	List columns = NULL;
	ListIterator itr = NULL;
	char *col = NULL;
	int adding = 0;
	int run_update = 0;
	char *primary_key = NULL;
	char *unique_index = NULL;
	int old_primary = 0;
	char *old_index = NULL;
	char *temp = NULL, *temp2 = NULL;
	List keys_list = NULL;
	db_key_t *db_key = NULL;

	DEF_TIMERS;

	/* figure out the unique keys in the table */
	query = xstrdup_printf("show index from %s where non_unique=0",
			       table_name);
	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
		xfree(query);
		return SLURM_ERROR;
	}
	xfree(query);
	while ((row = mysql_fetch_row(result))) {
		// row[2] is the key name
		if (!strcasecmp(row[2], "PRIMARY"))
			old_primary = 1;
		else if (!old_index)
			old_index = xstrdup(row[2]);
	}
	mysql_free_result(result);

	/* figure out the non-unique keys in the table */
	query = xstrdup_printf("show index from %s where non_unique=1",
			       table_name);
	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
		xfree(query);
		return SLURM_ERROR;
	}
	xfree(query);

	itr = NULL;
	keys_list = list_create(_destroy_db_key);
	while ((row = mysql_fetch_row(result))) {
		if (!itr)
			itr = list_iterator_create(keys_list);
		else
			list_iterator_reset(itr);
		while ((db_key = list_next(itr))) {
			if (!strcmp(db_key->name, row[2]))
				break;
		}

		if (db_key) {
			xstrfmtcat(db_key->columns, ", %s", row[4]);
		} else {
			db_key = xmalloc(sizeof(db_key_t));
			db_key->name = xstrdup(row[2]); // name
			db_key->columns = xstrdup(row[4]); // column name
			list_append(keys_list, db_key); // don't use list_push
		}
	}
	mysql_free_result(result);

	if (itr) {
		list_iterator_destroy(itr);
		itr = NULL;
	}

	/* figure out the existing columns in the table */
	query = xstrdup_printf("show columns from %s", table_name);
	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
		xfree(query);
		xfree(old_index);
		FREE_NULL_LIST(keys_list);
		return SLURM_ERROR;
	}
	xfree(query);
	columns = list_create(slurm_destroy_char);
	while ((row = mysql_fetch_row(result))) {
		col = xstrdup(row[0]); //Field
		list_append(columns, col);
	}
	mysql_free_result(result);


	itr = list_iterator_create(columns);
	query = xstrdup_printf("alter ignore table %s", table_name);
	correct_query = xstrdup_printf("alter ignore table %s", table_name);
	START_TIMER;
	while (fields[i].name) {
		int found = 0;

		list_iterator_reset(itr);
		while ((col = list_next(itr))) {
			if (!strcmp(col, fields[i].name)) {
				xstrfmtcat(query, " modify `%s` %s,",
					   fields[i].name,
					   fields[i].options);
				xstrfmtcat(correct_query, " modify `%s` %s,",
					   fields[i].name,
					   fields[i].options);
				list_delete_item(itr);
				found = 1;
				break;
			}
		}
		if (!found) {
			if (i) {
				info("adding column %s after %s in table %s",
				     fields[i].name,
				     fields[i-1].name,
				     table_name);
				xstrfmtcat(query, " add `%s` %s after %s,",
					   fields[i].name,
					   fields[i].options,
					   fields[i-1].name);
				xstrfmtcat(correct_query, " modify `%s` %s,",
					   fields[i].name,
					   fields[i].options);
			} else {
				info("adding column %s at the beginning "
				     "of table %s",
				     fields[i].name,
				     table_name);
				xstrfmtcat(query, " add `%s` %s first,",
					   fields[i].name,
					   fields[i].options);
				xstrfmtcat(correct_query, " modify `%s` %s,",
					   fields[i].name,
					   fields[i].options);
			}
			adding = 1;
		}

		i++;
	}

	list_iterator_reset(itr);
	while ((col = list_next(itr))) {
		adding = 1;
		info("dropping column %s from table %s", col, table_name);
		xstrfmtcat(query, " drop %s,", col);
	}

	list_iterator_destroy(itr);
	list_destroy(columns);

	if ((temp = strstr(ending, "primary key ("))) {
		int open = 0, close =0;
		int end = 0;
		while (temp[end++]) {
			if (temp[end] == '(')
				open++;
			else if (temp[end] == ')')
				close++;
			else
				continue;
			if (open == close)
				break;
		}
		if (temp[end]) {
			end++;
			primary_key = xstrndup(temp, end);
			if (old_primary) {
				xstrcat(query, " drop primary key,");
				xstrcat(correct_query, " drop primary key,");
			}
			xstrfmtcat(query, " add %s,",  primary_key);
			xstrfmtcat(correct_query, " add %s,",  primary_key);

			xfree(primary_key);
		}
	}

	if ((temp = strstr(ending, "unique index ("))) {
		int open = 0, close =0;
		int end = 0;
		while (temp[end++]) {
			if (temp[end] == '(')
				open++;
			else if (temp[end] == ')')
				close++;
			else
				continue;
			if (open == close)
				break;
		}
		if (temp[end]) {
			end++;
			unique_index = xstrndup(temp, end);
			if (old_index) {
				xstrfmtcat(query, " drop index %s,",
					   old_index);
				xstrfmtcat(correct_query, " drop index %s,",
					   old_index);
			}
			xstrfmtcat(query, " add %s,", unique_index);
			xstrfmtcat(correct_query, " add %s,", unique_index);
			xfree(unique_index);
		}
	}
	xfree(old_index);

	temp2 = ending;
	itr = list_iterator_create(keys_list);
	while ((temp = strstr(temp2, ", key "))) {
		int open = 0, close = 0, name_end = 0;
		int end = 5;
		char *new_key_name = NULL, *new_key = NULL;
		while (temp[end++]) {
			if (!name_end && (temp[end] == ' ')) {
				name_end = end;
				continue;
			} else if (temp[end] == '(') {
				open++;
				if (!name_end)
					name_end = end;
			} else if (temp[end] == ')')
				close++;
			else
				continue;
			if (open == close)
				break;
		}
		if (temp[end]) {
			end++;
			new_key_name = xstrndup(temp+6, name_end-6);
			new_key = xstrndup(temp+2, end-2); // skip ', '
			while ((db_key = list_next(itr))) {
				if (!strcmp(db_key->name, new_key_name)) {
					list_remove(itr);
					break;
				}
			}
			list_iterator_reset(itr);
			if (db_key) {
				xstrfmtcat(query,
					   " drop key %s,", db_key->name);
				xstrfmtcat(correct_query,
					   " drop key %s,", db_key->name);
				_destroy_db_key(db_key);
			} else
				info("adding %s to table %s",
				     new_key, table_name);

			xstrfmtcat(query, " add %s,",  new_key);
			xstrfmtcat(correct_query, " add %s,",  new_key);

			xfree(new_key);
			xfree(new_key_name);
		}
		temp2 = temp + end;
	}

	/* flush extra (old) keys */
	while ((db_key = list_next(itr))) {
		info("dropping key %s from table %s", db_key->name, table_name);
		xstrfmtcat(query, " drop key %s,", db_key->name);
	}
	list_iterator_destroy(itr);

	list_destroy(keys_list);

	query[strlen(query)-1] = ';';
	correct_query[strlen(correct_query)-1] = ';';
	//info("%d query\n%s", __LINE__, query);

	/* see if we have already done this definition */
	if (!adding) {
		char *quoted = slurm_add_slash_to_quotes(query);
		char *query2 = xstrdup_printf("select table_name from "
					      "%s where definition='%s'",
					      table_defs_table, quoted);
		MYSQL_RES *result = NULL;
		MYSQL_ROW row;

		xfree(quoted);
		run_update = 1;
		if ((result = mysql_db_query_ret(mysql_conn, query2, 0))) {
			if ((row = mysql_fetch_row(result)))
				run_update = 0;
			mysql_free_result(result);
		}
		xfree(query2);
		if (run_update) {
			run_update = 2;
			query2 = xstrdup_printf("select table_name from "
						"%s where table_name='%s'",
						table_defs_table, table_name);
			if ((result = mysql_db_query_ret(
				     mysql_conn, query2, 0))) {
				if ((row = mysql_fetch_row(result)))
					run_update = 1;
				mysql_free_result(result);
			}
			xfree(query2);
		}
	}

	/* if something has changed run the alter line */
	if (run_update || adding) {
		time_t now = time(NULL);
		char *query2 = NULL;
		char *quoted = NULL;

		if (run_update == 2)
			debug4("Table %s doesn't exist, adding", table_name);
		else
			debug("Table %s has changed.  Updating...", table_name);
		if (mysql_db_query(mysql_conn, query)) {
			xfree(query);
			return SLURM_ERROR;
		}
		quoted = slurm_add_slash_to_quotes(correct_query);
		query2 = xstrdup_printf("insert into %s (creation_time, "
					"mod_time, table_name, definition) "
					"values (%ld, %ld, '%s', '%s') "
					"on duplicate key update "
					"definition='%s', mod_time=%ld;",
					table_defs_table, now, now,
					table_name, quoted,
					quoted, now);
		xfree(quoted);
		if (mysql_db_query(mysql_conn, query2)) {
			xfree(query2);
			return SLURM_ERROR;
		}
		xfree(query2);
	}

	xfree(query);
	xfree(correct_query);
	query = xstrdup_printf("make table current %s", table_name);
	END_TIMER2(query);
	xfree(query);
	return SLURM_SUCCESS;
}
Exemplo n.º 23
0
Arquivo: eio.c Projeto: BYUHPC/slurm
int eio_handle_mainloop(eio_handle_t *eio)
{
	int            retval  = 0;
	struct pollfd *pollfds = NULL;
	eio_obj_t    **map     = NULL;
	unsigned int   maxnfds = 0, nfds = 0;
	unsigned int   n       = 0;

	xassert (eio != NULL);
	xassert (eio->magic == EIO_MAGIC);

	for (;;) {

		/* Alloc memory for pfds and map if needed */
		n = list_count(eio->obj_list);
		if (maxnfds < n) {
			maxnfds = n;
			xrealloc(pollfds, (maxnfds+1) * sizeof(struct pollfd));
			xrealloc(map,     maxnfds     * sizeof(eio_obj_t *  ));
			/*
			 * Note: xrealloc() also handles initial malloc
			 */
		}

		debug4("eio: handling events for %d objects",
		       list_count(eio->obj_list));
		nfds = _poll_setup_pollfds(pollfds, map, eio->obj_list);
		if ((nfds <= 0) ||
		    (pollfds == NULL))	/* Fix for CLANG false positive */
			goto done;

		/*
		 *  Setup eio handle signalling fd
		 */
		pollfds[nfds].fd     = eio->fds[0];
		pollfds[nfds].events = POLLIN;
		nfds++;

		xassert(nfds <= maxnfds + 1);

		if (_poll_internal(pollfds, nfds) < 0)
			goto error;

		if (pollfds[nfds-1].revents & POLLIN)
			_eio_wakeup_handler(eio);

		_poll_dispatch(pollfds, nfds-1, map, eio->obj_list);

		if (eio_shutdown_time &&
		    (difftime(time(NULL), eio_shutdown_time) >=
		     EIO_SHUTDOWN_WAIT)) {
			error("Abandoning IO %d secs after job shutdown "
			      "initiated", EIO_SHUTDOWN_WAIT);
			break;
		}
	}
  error:
	retval = -1;
  done:
	xfree(pollfds);
	xfree(map);
	return retval;
}
Exemplo n.º 24
0
/* Fill in all the accounts this user is coordinator over.  This
 * will fill in all the sub accounts they are coordinator over also.
 */
static int _get_user_coords(mysql_conn_t *mysql_conn, slurmdb_user_rec_t *user)
{
	char *query = NULL;
	slurmdb_coord_rec_t *coord = NULL;
	MYSQL_RES *result = NULL;
	MYSQL_ROW row;
	ListIterator itr = NULL, itr2 = NULL;
	char *cluster_name = NULL;

	if (!user) {
		error("We need a user to fill in.");
		return SLURM_ERROR;
	}

	if (!user->coord_accts)
		user->coord_accts = list_create(slurmdb_destroy_coord_rec);

	query = xstrdup_printf(
		"select acct from %s where user='******' && deleted=0",
		acct_coord_table, user->name);

	if (!(result =
	      mysql_db_query_ret(mysql_conn, query, 0))) {
		xfree(query);
		return SLURM_ERROR;
	}
	xfree(query);

	while ((row = mysql_fetch_row(result))) {
		coord = xmalloc(sizeof(slurmdb_coord_rec_t));
		list_append(user->coord_accts, coord);
		coord->name = xstrdup(row[0]);
		coord->direct = 1;
	}
	mysql_free_result(result);

	if (!list_count(user->coord_accts))
		return SLURM_SUCCESS;

	slurm_mutex_lock(&as_mysql_cluster_list_lock);
	itr2 = list_iterator_create(as_mysql_cluster_list);
	itr = list_iterator_create(user->coord_accts);
	while ((cluster_name = list_next(itr2))) {
		int set = 0;
		if (query)
			xstrcat(query, " union ");

		while ((coord = list_next(itr))) {
			if (set)
				xstrcat(query, " || ");
			else
				xstrfmtcat(query,
					   "select distinct t1.acct from "
					   "\"%s_%s\" as t1, \"%s_%s\" "
					   "as t2 where t1.deleted=0 && (",
					   cluster_name, assoc_table,
					   cluster_name, assoc_table);
			/* Make sure we don't get the same
			 * account back since we want to keep
			 * track of the sub-accounts.
			 */
			xstrfmtcat(query, "(t2.acct='%s' "
				   "&& t1.lft between t2.lft "
				   "and t2.rgt && t1.user='' "
				   "&& t1.acct!='%s')",
				   coord->name, coord->name);
			set = 1;
		}
		list_iterator_reset(itr);
		if (set)
			xstrcat(query, ")");

	}
	list_iterator_destroy(itr2);
	slurm_mutex_unlock(&as_mysql_cluster_list_lock);

	if (query) {
		debug4("%d(%s:%d) query\n%s",
		       mysql_conn->conn, THIS_FILE, __LINE__, query);
		if (!(result = mysql_db_query_ret(
			      mysql_conn, query, 0))) {
			xfree(query);
			return SLURM_ERROR;
		}
		xfree(query);

		while ((row = mysql_fetch_row(result))) {
			list_iterator_reset(itr);
			while ((coord = list_next(itr))) {
				if (!strcmp(coord->name, row[0]))
					break;
			}

			if (coord)
				continue;

			coord = xmalloc(sizeof(slurmdb_coord_rec_t));
			list_append(user->coord_accts, coord);
			coord->name = xstrdup(row[0]);
			coord->direct = 0;
		}
		mysql_free_result(result);
	}
	list_iterator_destroy(itr);

	return SLURM_SUCCESS;
}
Exemplo n.º 25
0
extern int as_mysql_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
				 List cluster_list)
{
	ListIterator itr = NULL;
	int rc = SLURM_SUCCESS;
	slurmdb_cluster_rec_t *object = NULL;
	char *cols = NULL, *vals = NULL, *extra = NULL,
		*query = NULL, *tmp_extra = NULL;
	time_t now = time(NULL);
	char *user_name = NULL;
	int affect_rows = 0;
	int added = 0;
	List assoc_list = NULL;
	slurmdb_assoc_rec_t *assoc = NULL;

	if (check_connection(mysql_conn) != SLURM_SUCCESS)
		return ESLURM_DB_CONNECTION;

	if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_SUPER_USER))
		return ESLURM_ACCESS_DENIED;

	assoc_list = list_create(slurmdb_destroy_assoc_rec);

	user_name = uid_to_string((uid_t) uid);
	/* Since adding tables make it so you can't roll back, if
	   there is an error there is no way to easily remove entries
	   in the database, so we will create the tables first and
	   then after that works out then add them to the mix.
	*/
	itr = list_iterator_create(cluster_list);
	while ((object = list_next(itr))) {
		if (!object->name || !object->name[0]) {
			error("We need a cluster name to add.");
			rc = SLURM_ERROR;
			list_remove(itr);
			continue;
		}
		if ((rc = create_cluster_tables(mysql_conn,
						object->name))
		    != SLURM_SUCCESS) {
			xfree(extra);
			xfree(cols);
			xfree(vals);
			added = 0;
			if (mysql_errno(mysql_conn->db_conn)
			    == ER_WRONG_TABLE_NAME)
				rc = ESLURM_BAD_NAME;
			goto end_it;
		}
	}

	/* Now that all the tables were created successfully lets go
	   ahead and add it to the system.
	*/
	list_iterator_reset(itr);
	while ((object = list_next(itr))) {
		xstrcat(cols, "creation_time, mod_time, acct");
		xstrfmtcat(vals, "%ld, %ld, 'root'", now, now);
		xstrfmtcat(extra, ", mod_time=%ld", now);
		if (object->root_assoc)
			setup_assoc_limits(object->root_assoc, &cols,
					   &vals, &extra,
					   QOS_LEVEL_SET, 1);
		xstrfmtcat(query,
			   "insert into %s (creation_time, mod_time, "
			   "name, classification) "
			   "values (%ld, %ld, '%s', %u) "
			   "on duplicate key update deleted=0, mod_time=%ld, "
			   "control_host='', control_port=0, "
			   "classification=%u, flags=0",
			   cluster_table,
			   now, now, object->name, object->classification,
			   now, object->classification);
		if (debug_flags & DEBUG_FLAG_DB_ASSOC)
			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
		rc = mysql_db_query(mysql_conn, query);
		xfree(query);
		if (rc != SLURM_SUCCESS) {
			error("Couldn't add cluster %s", object->name);
			xfree(extra);
			xfree(cols);
			xfree(vals);
			added=0;
			break;
		}

		affect_rows = last_affected_rows(mysql_conn);

		if (!affect_rows) {
			debug2("nothing changed %d", affect_rows);
			xfree(extra);
			xfree(cols);
			xfree(vals);
			continue;
		}

		xstrfmtcat(query,
			   "insert into \"%s_%s\" (%s, lft, rgt) "
			   "values (%s, 1, 2) "
			   "on duplicate key update deleted=0, "
			   "id_assoc=LAST_INSERT_ID(id_assoc)%s;",
			   object->name, assoc_table, cols,
			   vals,
			   extra);

		xfree(cols);
		xfree(vals);
		if (debug_flags & DEBUG_FLAG_DB_ASSOC)
			DB_DEBUG(mysql_conn->conn, "query\n%s", query);

		rc = mysql_db_query(mysql_conn, query);
		xfree(query);

		if (rc != SLURM_SUCCESS) {
			error("Couldn't add cluster root assoc");
			xfree(extra);
			added=0;
			break;
		}

		/* we always have a ', ' as the first 2 chars */
		tmp_extra = slurm_add_slash_to_quotes(extra+2);

		xstrfmtcat(query,
			   "insert into %s "
			   "(timestamp, action, name, actor, info) "
			   "values (%ld, %u, '%s', '%s', '%s');",
			   txn_table, now, DBD_ADD_CLUSTERS,
			   object->name, user_name, tmp_extra);
		xfree(tmp_extra);
		xfree(extra);
		debug4("%d(%s:%d) query\n%s",
		       mysql_conn->conn, THIS_FILE, __LINE__, query);

		rc = mysql_db_query(mysql_conn, query);
		xfree(query);
		if (rc != SLURM_SUCCESS) {
			error("Couldn't add txn");
		} else {
			ListIterator check_itr;
			char *tmp_name;

			added++;
			/* add it to the list and sort */
			slurm_mutex_lock(&as_mysql_cluster_list_lock);
			check_itr = list_iterator_create(as_mysql_cluster_list);
			while ((tmp_name = list_next(check_itr))) {
				if (!strcmp(tmp_name, object->name))
					break;
			}
			list_iterator_destroy(check_itr);
			if (!tmp_name) {
				list_append(as_mysql_cluster_list,
					    xstrdup(object->name));
				list_sort(as_mysql_cluster_list,
					  (ListCmpF)slurm_sort_char_list_asc);
			} else
				error("Cluster %s(%s) appears to already be in "
				      "our cache list, not adding.", tmp_name,
				      object->name);
			slurm_mutex_unlock(&as_mysql_cluster_list_lock);
		}
		/* Add user root by default to run from the root
		 * association.  This gets popped off so we need to
		 * read it every time here.
		 */
		assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
		slurmdb_init_assoc_rec(assoc, 0);
		list_append(assoc_list, assoc);

		assoc->cluster = xstrdup(object->name);
		assoc->user = xstrdup("root");
		assoc->acct = xstrdup("root");
		assoc->is_def = 1;

		if (as_mysql_add_assocs(mysql_conn, uid, assoc_list)
		    == SLURM_ERROR) {
			error("Problem adding root user association");
			rc = SLURM_ERROR;
		}
	}
end_it:
	list_iterator_destroy(itr);
	xfree(user_name);

	FREE_NULL_LIST(assoc_list);

	if (!added)
		reset_mysql_conn(mysql_conn);

	return rc;
}
Exemplo n.º 26
0
extern int as_mysql_modify_resv(mysql_conn_t *mysql_conn,
				slurmdb_reservation_rec_t *resv)
{
	MYSQL_RES *result = NULL;
	MYSQL_ROW row;
	int rc = SLURM_SUCCESS;
	char *cols = NULL, *vals = NULL, *extra = NULL,
		*query = NULL;
	time_t start = 0, now = time(NULL);
	int i;
	int set = 0;

	char *resv_req_inx[] = {
		"assoclist",
		"time_start",
		"time_end",
		"resv_name",
		"nodelist",
		"node_inx",
		"flags",
		"tres"
	};
	enum {
		RESV_ASSOCS,
		RESV_START,
		RESV_END,
		RESV_NAME,
		RESV_NODES,
		RESV_NODE_INX,
		RESV_FLAGS,
		RESV_TRES,
		RESV_COUNT
	};

	if (!resv) {
		error("No reservation was given to edit");
		return SLURM_ERROR;
	}

	if (!resv->id) {
		error("We need an id to edit a reservation.");
		return SLURM_ERROR;
	}
	if (!resv->time_start) {
		error("We need a start time to edit a reservation.");
		return SLURM_ERROR;
	}
	if (!resv->cluster || !resv->cluster[0]) {
		error("We need a cluster name to edit a reservation.");
		return SLURM_ERROR;
	}

	if (!resv->time_start_prev) {
		error("We need a time to check for last "
		      "start of reservation.");
		return SLURM_ERROR;
	}

	xstrfmtcat(cols, "%s", resv_req_inx[0]);
	for (i=1; i<RESV_COUNT; i++) {
		xstrfmtcat(cols, ", %s", resv_req_inx[i]);
	}

	/* check for both the last start and the start because most
	   likely the start time hasn't changed, but something else
	   may have since the last time we did an update to the
	   reservation. */
	query = xstrdup_printf("select %s from \"%s_%s\" where id_resv=%u "
			       "and (time_start=%ld || time_start=%ld) "
			       "and deleted=0 order by time_start desc "
			       "limit 1 FOR UPDATE;",
			       cols, resv->cluster, resv_table, resv->id,
			       resv->time_start, resv->time_start_prev);
try_again:
	debug4("%d(%s:%d) query\n%s",
	       mysql_conn->conn, THIS_FILE, __LINE__, query);
	if (!(result = mysql_db_query_ret(
		      mysql_conn, query, 0))) {
		rc = SLURM_ERROR;
		goto end_it;
	}
	if (!(row = mysql_fetch_row(result))) {
		rc = SLURM_ERROR;
		mysql_free_result(result);
		error("There is no reservation by id %u, "
		      "time_start %ld, and cluster '%s'", resv->id,
		      resv->time_start_prev, resv->cluster);
		if (!set && resv->time_end) {
			/* This should never really happen,
			   but just incase the controller and the
			   database get out of sync we check
			   to see if there is a reservation
			   not deleted that hasn't ended yet. */
			xfree(query);
			query = xstrdup_printf(
				"select %s from \"%s_%s\" where id_resv=%u "
				"and time_start <= %ld and deleted=0 "
				"order by time_start desc "
				"limit 1;",
				cols, resv->cluster, resv_table, resv->id,
				resv->time_end);
			set = 1;
			goto try_again;
		}
		goto end_it;
	}

	start = slurm_atoul(row[RESV_START]);

	xfree(query);
	xfree(cols);

	set = 0;

	/* check differences here */

	if (!resv->name
	    && row[RESV_NAME] && row[RESV_NAME][0])
		// if this changes we just update the
		// record, no need to create a new one since
		// this doesn't really effect the
		// reservation accounting wise
		resv->name = xstrdup(row[RESV_NAME]);

	if (resv->assocs)
		set = 1;
	else if (row[RESV_ASSOCS] && row[RESV_ASSOCS][0])
		resv->assocs = xstrdup(row[RESV_ASSOCS]);

	if (resv->flags != NO_VAL)
		set = 1;
	else
		resv->flags = slurm_atoul(row[RESV_FLAGS]);

	if (resv->nodes)
		set = 1;
	else if (row[RESV_NODES] && row[RESV_NODES][0]) {
		resv->nodes = xstrdup(row[RESV_NODES]);
		resv->node_inx = xstrdup(row[RESV_NODE_INX]);
	}

	if (!resv->time_end)
		resv->time_end = slurm_atoul(row[RESV_END]);

	if (resv->tres_str)
		set = 1;
	else if (row[RESV_TRES] && row[RESV_TRES][0])
		resv->tres_str = xstrdup(row[RESV_TRES]);

	mysql_free_result(result);

	_setup_resv_limits(resv, &cols, &vals, &extra);
	/* use start below instead of resv->time_start_prev
	 * just incase we have a different one from being out
	 * of sync
	 */
	if ((start > now) || !set) {
		/* we haven't started the reservation yet, or
		   we are changing the associations or end
		   time which we can just update it */
		query = xstrdup_printf("update \"%s_%s\" set deleted=0%s "
				       "where deleted=0 and id_resv=%u "
				       "and time_start=%ld;",
				       resv->cluster, resv_table,
				       extra, resv->id, start);
	} else {
		/* time_start is already done above and we
		 * changed something that is in need on a new
		 * entry. */
		query = xstrdup_printf("update \"%s_%s\" set time_end=%ld "
				       "where deleted=0 && id_resv=%u "
				       "and time_start=%ld;",
				       resv->cluster, resv_table,
				       resv->time_start-1,
				       resv->id, start);
		xstrfmtcat(query,
			   "insert into \"%s_%s\" (id_resv%s) "
			   "values (%u%s) "
			   "on duplicate key update deleted=0%s;",
			   resv->cluster, resv_table, cols, resv->id,
			   vals, extra);
	}

	if (debug_flags & DEBUG_FLAG_DB_RESV)
		DB_DEBUG(mysql_conn->conn, "query\n%s", query);

	rc = mysql_db_query(mysql_conn, query);

end_it:

	xfree(query);
	xfree(cols);
	xfree(vals);
	xfree(extra);

	return rc;
}
Exemplo n.º 27
0
static int _attempt_backfill(void)
{
	DEF_TIMERS;
	bool filter_root = false;
	List job_queue;
	job_queue_rec_t *job_queue_rec;
	slurmdb_qos_rec_t *qos_ptr = NULL;
	int i, j, node_space_recs;
	struct job_record *job_ptr;
	struct part_record *part_ptr;
	uint32_t end_time, end_reserve;
	uint32_t time_limit, comp_time_limit, orig_time_limit;
	uint32_t min_nodes, max_nodes, req_nodes;
	bitstr_t *avail_bitmap = NULL, *resv_bitmap = NULL;
	bitstr_t *exc_core_bitmap = NULL;
	time_t now, sched_start, later_start, start_res, resv_end;
	node_space_map_t *node_space;
	struct timeval bf_time1, bf_time2;
	int sched_timeout = 2, yield_sleep = 1;
	int rc = 0;
	int job_test_count = 0;
	uint32_t *uid = NULL, nuser = 0;
	uint16_t *njobs = NULL;
	bool already_counted;
	uint32_t reject_array_job_id = 0;

#ifdef HAVE_CRAY
	/*
	 * Run a Basil Inventory immediately before setting up the schedule
	 * plan, to avoid race conditions caused by ALPS node state change.
	 * Needs to be done with the node-state lock taken.
	 */
	START_TIMER;
	if (select_g_reconfigure()) {
		debug4("backfill: not scheduling due to ALPS");
		return SLURM_SUCCESS;
	}
	END_TIMER;
	if (debug_flags & DEBUG_FLAG_BACKFILL)
		info("backfill: ALPS inventory completed, %s", TIME_STR);

	/* The Basil inventory can take a long time to complete. Process
	 * pending RPCs before starting the backfill scheduling logic */
	_yield_locks(1);
#endif

	START_TIMER;
	if (debug_flags & DEBUG_FLAG_BACKFILL)
		info("backfill: beginning");
	sched_start = now = time(NULL);

	if (slurm_get_root_filter())
		filter_root = true;

	job_queue = build_job_queue(true);
	if (list_count(job_queue) == 0) {
		debug("backfill: no jobs to backfill");
		list_destroy(job_queue);
		return 0;
	}

	gettimeofday(&bf_time1, NULL);

	slurmctld_diag_stats.bf_queue_len = list_count(job_queue);
	slurmctld_diag_stats.bf_queue_len_sum += slurmctld_diag_stats.
						 bf_queue_len;
	slurmctld_diag_stats.bf_last_depth = 0;
	slurmctld_diag_stats.bf_last_depth_try = 0;
	slurmctld_diag_stats.bf_when_last_cycle = now;
	bf_last_yields = 0;
	slurmctld_diag_stats.bf_active = 1;

	node_space = xmalloc(sizeof(node_space_map_t) *
			     (max_backfill_job_cnt + 3));
	node_space[0].begin_time = sched_start;
	node_space[0].end_time = sched_start + backfill_window;
	node_space[0].avail_bitmap = bit_copy(avail_node_bitmap);
	node_space[0].next = 0;
	node_space_recs = 1;
	if (debug_flags & DEBUG_FLAG_BACKFILL)
		_dump_node_space_table(node_space);

	if (max_backfill_job_per_user) {
		uid = xmalloc(BF_MAX_USERS * sizeof(uint32_t));
		njobs = xmalloc(BF_MAX_USERS * sizeof(uint16_t));
	}
	while ((job_queue_rec = (job_queue_rec_t *)
				list_pop_bottom(job_queue, sort_job_queue2))) {
		job_ptr  = job_queue_rec->job_ptr;
		orig_time_limit = job_ptr->time_limit;

		if ((time(NULL) - sched_start) >= sched_timeout) {
			uint32_t save_time_limit = job_ptr->time_limit;
			job_ptr->time_limit = orig_time_limit;
			if (debug_flags & DEBUG_FLAG_BACKFILL) {
				END_TIMER;
				info("backfill: completed yielding locks "
				     "after testing %d jobs, %s",
				     job_test_count, TIME_STR);
			}
			if (_yield_locks(yield_sleep) && !backfill_continue) {
				if (debug_flags & DEBUG_FLAG_BACKFILL) {
					info("backfill: system state changed, "
					     "breaking out after testing %d "
					     "jobs", job_test_count);
				}
				rc = 1;
				break;
			}
			job_ptr->time_limit = save_time_limit;
			/* Reset backfill scheduling timers, resume testing */
			sched_start = time(NULL);
			job_test_count = 0;
			START_TIMER;
		}

		part_ptr = job_queue_rec->part_ptr;
		job_test_count++;

		xfree(job_queue_rec);
		if (!IS_JOB_PENDING(job_ptr))
			continue;	/* started in other partition */
		if (!avail_front_end(job_ptr))
			continue;	/* No available frontend for this job */
		if (job_ptr->array_task_id != (uint16_t) NO_VAL) {
			if (reject_array_job_id == job_ptr->array_job_id)
				continue;  /* already rejected array element */
			/* assume reject whole array for now, clear if OK */
			reject_array_job_id = job_ptr->array_job_id;
		}
		job_ptr->part_ptr = part_ptr;

		if (debug_flags & DEBUG_FLAG_BACKFILL)
			info("backfill test for job %u", job_ptr->job_id);

		slurmctld_diag_stats.bf_last_depth++;
		already_counted = false;

		if (max_backfill_job_per_user) {
			for (j = 0; j < nuser; j++) {
				if (job_ptr->user_id == uid[j]) {
					njobs[j]++;
					if (debug_flags & DEBUG_FLAG_BACKFILL)
						debug("backfill: user %u: "
						      "#jobs %u",
						      uid[j], njobs[j]);
					break;
				}
			}
			if (j == nuser) { /* user not found */
				if (nuser < BF_MAX_USERS) {
					uid[j] = job_ptr->user_id;
					njobs[j] = 1;
					nuser++;
				} else {
					error("backfill: too many users in "
					      "queue. Consider increasing "
					      "BF_MAX_USERS");
				}
				if (debug_flags & DEBUG_FLAG_BACKFILL)
					debug2("backfill: found new user %u. "
					       "Total #users now %u",
					       job_ptr->user_id, nuser);
			} else {
				if (njobs[j] > max_backfill_job_per_user) {
					/* skip job */
					if (debug_flags & DEBUG_FLAG_BACKFILL)
						debug("backfill: have already "
						      "checked %u jobs for "
						      "user %u; skipping "
						      "job %u",
						      max_backfill_job_per_user,
						      job_ptr->user_id,
						      job_ptr->job_id);
					continue;
				}
			}
		}

		if (((part_ptr->state_up & PARTITION_SCHED) == 0) ||
		    (part_ptr->node_bitmap == NULL))
		 	continue;
		if ((part_ptr->flags & PART_FLAG_ROOT_ONLY) && filter_root)
			continue;

		if ((!job_independent(job_ptr, 0)) ||
		    (license_job_test(job_ptr, time(NULL)) != SLURM_SUCCESS))
			continue;

		/* Determine minimum and maximum node counts */
		min_nodes = MAX(job_ptr->details->min_nodes,
				part_ptr->min_nodes);
		if (job_ptr->details->max_nodes == 0)
			max_nodes = part_ptr->max_nodes;
		else
			max_nodes = MIN(job_ptr->details->max_nodes,
					part_ptr->max_nodes);
		max_nodes = MIN(max_nodes, 500000);     /* prevent overflows */
		if (job_ptr->details->max_nodes)
			req_nodes = max_nodes;
		else
			req_nodes = min_nodes;
		if (min_nodes > max_nodes) {
			/* job's min_nodes exceeds partition's max_nodes */
			continue;
		}

		/* Determine job's expected completion time */
		if (job_ptr->time_limit == NO_VAL) {
			if (part_ptr->max_time == INFINITE)
				time_limit = 365 * 24 * 60; /* one year */
			else
				time_limit = part_ptr->max_time;
		} else {
			if (part_ptr->max_time == INFINITE)
				time_limit = job_ptr->time_limit;
			else
				time_limit = MIN(job_ptr->time_limit,
						 part_ptr->max_time);
		}
		comp_time_limit = time_limit;
		qos_ptr = job_ptr->qos_ptr;
		if (qos_ptr && (qos_ptr->flags & QOS_FLAG_NO_RESERVE) &&
		    slurm_get_preempt_mode())
			time_limit = job_ptr->time_limit = 1;
		else if (job_ptr->time_min && (job_ptr->time_min < time_limit))
			time_limit = job_ptr->time_limit = job_ptr->time_min;

		/* Determine impact of any resource reservations */
		later_start = now;
 TRY_LATER:
		if ((time(NULL) - sched_start) >= sched_timeout) {
			uint32_t save_time_limit = job_ptr->time_limit;
			job_ptr->time_limit = orig_time_limit;
			if (debug_flags & DEBUG_FLAG_BACKFILL) {
				END_TIMER;
				info("backfill: completed yielding locks 2"
				     "after testing %d jobs, %s",
				     job_test_count, TIME_STR);
			}
			if (_yield_locks(yield_sleep) && !backfill_continue) {
				if (debug_flags & DEBUG_FLAG_BACKFILL) {
					info("backfill: system state changed, "
					     "breaking out after testing %d "
					     "jobs", job_test_count);
				}
				rc = 1;
				break;
			}
			job_ptr->time_limit = save_time_limit;
			/* Reset backfill scheduling timers, resume testing */
			sched_start = time(NULL);
			job_test_count = 1;
			START_TIMER;
		}

		FREE_NULL_BITMAP(avail_bitmap);
		FREE_NULL_BITMAP(exc_core_bitmap);
		start_res   = later_start;
		later_start = 0;
		j = job_test_resv(job_ptr, &start_res, true, &avail_bitmap,
				  &exc_core_bitmap);
		if (j != SLURM_SUCCESS) {
			job_ptr->time_limit = orig_time_limit;
			continue;
		}
		if (start_res > now)
			end_time = (time_limit * 60) + start_res;
		else
			end_time = (time_limit * 60) + now;
		resv_end = find_resv_end(start_res);
		/* Identify usable nodes for this job */
		bit_and(avail_bitmap, part_ptr->node_bitmap);
		bit_and(avail_bitmap, up_node_bitmap);
		for (j=0; ; ) {
			if ((node_space[j].end_time > start_res) &&
			     node_space[j].next && (later_start == 0))
				later_start = node_space[j].end_time;
			if (node_space[j].end_time <= start_res)
				;
			else if (node_space[j].begin_time <= end_time) {
				bit_and(avail_bitmap,
					node_space[j].avail_bitmap);
			} else
				break;
			if ((j = node_space[j].next) == 0)
				break;
		}
		if ((resv_end++) &&
		    ((later_start == 0) || (resv_end < later_start))) {
			later_start = resv_end;
		}

		if (job_ptr->details->exc_node_bitmap) {
			bit_not(job_ptr->details->exc_node_bitmap);
			bit_and(avail_bitmap,
				job_ptr->details->exc_node_bitmap);
			bit_not(job_ptr->details->exc_node_bitmap);
		}

		/* Test if insufficient nodes remain OR
		 *	required nodes missing OR
		 *	nodes lack features */
		if ((bit_set_count(avail_bitmap) < min_nodes) ||
		    ((job_ptr->details->req_node_bitmap) &&
		     (!bit_super_set(job_ptr->details->req_node_bitmap,
				     avail_bitmap))) ||
		    (job_req_node_filter(job_ptr, avail_bitmap))) {
			if (later_start) {
				job_ptr->start_time = 0;
				goto TRY_LATER;
			}
			/* Job can not start until too far in the future */
			job_ptr->time_limit = orig_time_limit;
			job_ptr->start_time = sched_start + backfill_window;
			continue;
		}

		/* Identify nodes which are definitely off limits */
		FREE_NULL_BITMAP(resv_bitmap);
		resv_bitmap = bit_copy(avail_bitmap);
		bit_not(resv_bitmap);

		/* this is the time consuming operation */
		debug2("backfill: entering _try_sched for job %u.",
		       job_ptr->job_id);

		if (!already_counted) {
			slurmctld_diag_stats.bf_last_depth_try++;
			already_counted = true;
		}

		j = _try_sched(job_ptr, &avail_bitmap, min_nodes, max_nodes,
			       req_nodes, exc_core_bitmap);

		now = time(NULL);
		if (j != SLURM_SUCCESS) {
			job_ptr->time_limit = orig_time_limit;
			job_ptr->start_time = 0;
			continue;	/* not runable */
		}

		if (start_res > job_ptr->start_time) {
			job_ptr->start_time = start_res;
			last_job_update = now;
		}
		if (job_ptr->start_time <= now) {
			uint32_t save_time_limit = job_ptr->time_limit;
			int rc = _start_job(job_ptr, resv_bitmap);
			if (qos_ptr && (qos_ptr->flags & QOS_FLAG_NO_RESERVE)) {
				if (orig_time_limit == NO_VAL)
					job_ptr->time_limit = comp_time_limit;
				else
					job_ptr->time_limit = orig_time_limit;
				job_ptr->end_time = job_ptr->start_time +
						    (job_ptr->time_limit * 60);
			} else if ((rc == SLURM_SUCCESS) && job_ptr->time_min) {
				/* Set time limit as high as possible */
				job_ptr->time_limit = comp_time_limit;
				job_ptr->end_time = job_ptr->start_time +
						    (comp_time_limit * 60);
				_reset_job_time_limit(job_ptr, now,
						      node_space);
				time_limit = job_ptr->time_limit;
			} else {
				job_ptr->time_limit = orig_time_limit;
			}
			if (rc == ESLURM_ACCOUNTING_POLICY) {
				/* Unknown future start time, just skip job */
				job_ptr->start_time = 0;
				continue;
			} else if (rc != SLURM_SUCCESS) {
				/* Planned to start job, but something bad
				 * happended. */
				job_ptr->start_time = 0;
				break;
			} else {
				/* Started this job, move to next one */
				reject_array_job_id = 0;

				/* Update the database if job time limit
				 * changed and move to next job */
				if (save_time_limit != job_ptr->time_limit)
					jobacct_storage_g_job_start(acct_db_conn,
								    job_ptr);
				continue;
			}
		} else
			job_ptr->time_limit = orig_time_limit;

		if (later_start && (job_ptr->start_time > later_start)) {
			/* Try later when some nodes currently reserved for
			 * pending jobs are free */
			job_ptr->start_time = 0;
			goto TRY_LATER;
		}

		if (job_ptr->start_time > (sched_start + backfill_window)) {
			/* Starts too far in the future to worry about */
			continue;
		}

		if (node_space_recs >= max_backfill_job_cnt) {
			/* Already have too many jobs to deal with */
			break;
		}

		end_reserve = job_ptr->start_time + (time_limit * 60);
		if (_test_resv_overlap(node_space, avail_bitmap,
				       job_ptr->start_time, end_reserve)) {
			/* This job overlaps with an existing reservation for
			 * job to be backfill scheduled, which the sched
			 * plugin does not know about. Try again later. */
			later_start = job_ptr->start_time;
			job_ptr->start_time = 0;
			goto TRY_LATER;
		}

		/*
		 * Add reservation to scheduling table if appropriate
		 */
		if (qos_ptr && (qos_ptr->flags & QOS_FLAG_NO_RESERVE))
			continue;
		reject_array_job_id = 0;
		bit_not(avail_bitmap);
		_add_reservation(job_ptr->start_time, end_reserve,
				 avail_bitmap, node_space, &node_space_recs);
		if (debug_flags & DEBUG_FLAG_BACKFILL)
			_dump_node_space_table(node_space);
	}
	xfree(uid);
	xfree(njobs);
	FREE_NULL_BITMAP(avail_bitmap);
	FREE_NULL_BITMAP(exc_core_bitmap);
	FREE_NULL_BITMAP(resv_bitmap);

	for (i=0; ; ) {
		FREE_NULL_BITMAP(node_space[i].avail_bitmap);
		if ((i = node_space[i].next) == 0)
			break;
	}
	xfree(node_space);
	list_destroy(job_queue);
	gettimeofday(&bf_time2, NULL);
	_do_diag_stats(&bf_time1, &bf_time2, yield_sleep);
	if (debug_flags & DEBUG_FLAG_BACKFILL) {
		END_TIMER;
		info("backfill: completed testing %d jobs, %s",
		     job_test_count, TIME_STR);
	}
	return rc;
}
Exemplo n.º 28
0
/*
 * _get_process_data() - Build a table of all current processes
 *
 * IN:	pid.
 *
 * OUT:	none
 *
  * THREADSAFE! Only one thread ever gets here.
 *
 * Assumption:
 *    Any file with a name of the form "/proc/[0-9]+/stat"
 *    is a Linux-style stat entry. We disregard the data if they look
 *    wrong.
 */
static void _get_process_data(void)
{
	struct procsinfo proc;
	pid_t *pids = NULL;
	int npids = 0;
	int i;
	uint32_t total_job_mem = 0, total_job_vsize = 0;
	int pid = 0;
	static int processing = 0;
	prec_t *prec = NULL;
	struct jobacctinfo *jobacct = NULL;
	List prec_list = NULL;
	ListIterator itr;
	ListIterator itr2;

	if (!pgid_plugin && (cont_id == (uint64_t)NO_VAL)) {
		debug("cont_id hasn't been set yet not running poll");
		return;
	}

	if(processing) {
		debug("already running, returning");
		return;
	}

	processing = 1;
	prec_list = list_create(_destroy_prec);

	if(!pgid_plugin) {
		/* get only the processes in the proctrack container */
		slurm_container_get_pids(cont_id, &pids, &npids);
		if (!npids) {
			debug4("no pids in this container %"PRIu64"", cont_id);
			goto finished;
		}
		for (i = 0; i < npids; i++) {
			pid = pids[i];
			if(!getprocs(&proc, sizeof(proc), 0, 0, &pid, 1))
				continue; /* Assume the process went away */
			prec = xmalloc(sizeof(prec_t));
			list_append(prec_list, prec);
			prec->pid = proc.pi_pid;
			prec->ppid = proc.pi_ppid;
			prec->usec = proc.pi_ru.ru_utime.tv_sec +
				proc.pi_ru.ru_utime.tv_usec * 1e-6;
			prec->ssec = proc.pi_ru.ru_stime.tv_sec +
				proc.pi_ru.ru_stime.tv_usec * 1e-6;
			prec->pages = proc.pi_majflt;
			prec->rss = (proc.pi_trss + proc.pi_drss) * pagesize;
			//prec->rss *= 1024;
			prec->vsize = (proc.pi_tsize / 1024);
			prec->vsize += (proc.pi_dvm * pagesize);
			//prec->vsize *= 1024;
			/*  debug("vsize = %f = (%d/1024)+(%d*%d)",   */
/*    		      prec->vsize, proc.pi_tsize, proc.pi_dvm, pagesize);  */
		}
	} else {
		while(getprocs(&proc, sizeof(proc), 0, 0, &pid, 1) == 1) {
			prec = xmalloc(sizeof(prec_t));
			list_append(prec_list, prec);
			prec->pid = proc.pi_pid;
			prec->ppid = proc.pi_ppid;
			prec->usec = proc.pi_ru.ru_utime.tv_sec +
				proc.pi_ru.ru_utime.tv_usec * 1e-6;
			prec->ssec = proc.pi_ru.ru_stime.tv_sec +
				proc.pi_ru.ru_stime.tv_usec * 1e-6;
			prec->pages = proc.pi_majflt;
			prec->rss = (proc.pi_trss + proc.pi_drss) * pagesize;
			//prec->rss *= 1024;
			prec->vsize = (proc.pi_tsize / 1024);
			prec->vsize += (proc.pi_dvm * pagesize);
			//prec->vsize *= 1024;
			/*  debug("vsize = %f = (%d/1024)+(%d*%d)",   */
/*    		      prec->vsize, proc.pi_tsize, proc.pi_dvm, pagesize);  */
		}
	}
	if(!list_count(prec_list))
		goto finished;

	slurm_mutex_lock(&jobacct_lock);
	if(!task_list || !list_count(task_list)) {
		slurm_mutex_unlock(&jobacct_lock);
		goto finished;
	}
	itr = list_iterator_create(task_list);
	while((jobacct = list_next(itr))) {
		itr2 = list_iterator_create(prec_list);
		while((prec = list_next(itr2))) {
			//debug2("pid %d ? %d", prec->ppid, jobacct->pid);
			if (prec->pid == jobacct->pid) {
				/* find all my descendents */
				_get_offspring_data(prec_list, prec,
						    prec->pid);

				/* tally their usage */
				jobacct->max_rss = jobacct->tot_rss =
					MAX(jobacct->max_rss, (int)prec->rss);
				total_job_mem += jobacct->max_rss;
				jobacct->max_vsize = jobacct->tot_vsize =
					MAX(jobacct->max_vsize,
					    (int)prec->vsize);
				total_job_vsize += prec->vsize;
				jobacct->max_pages = jobacct->tot_pages =
					MAX(jobacct->max_pages, prec->pages);
				jobacct->min_cpu = jobacct->tot_cpu =
					MAX(jobacct->min_cpu,
					    (prec->usec + prec->ssec));
				debug2("%d size now %d %d time %d",
				      jobacct->pid, jobacct->max_rss,
				      jobacct->max_vsize, jobacct->tot_cpu);

				break;
			}
		}
		list_iterator_destroy(itr2);
	}
	list_iterator_destroy(itr);
	slurm_mutex_unlock(&jobacct_lock);

	if (jobacct_mem_limit) {
		debug("Step %u.%u memory used:%u limit:%u KB",
		      jobacct_job_id, jobacct_step_id, 
		      total_job_mem, jobacct_mem_limit);
	}
	if (jobacct_job_id && jobacct_mem_limit &&
	    (total_job_mem > jobacct_mem_limit)) {
		if (jobacct_step_id == NO_VAL) {
			error("Job %u exceeded %u KB memory limit, being "
 			      "killed", jobacct_job_id, jobacct_mem_limit);
		} else {
			error("Step %u.%u exceeded %u KB memory limit, being "
			      "killed", jobacct_job_id, jobacct_step_id, 
			      jobacct_mem_limit);
		}
		_acct_kill_step();
	} else if (jobacct_job_id && jobacct_vmem_limit &&
	    (total_job_vsize > jobacct_vmem_limit)) {
		if (jobacct_step_id == NO_VAL) {
			error("Job %u exceeded %u KB virtual memory limit, "
			      "being killed", jobacct_job_id, 
			      jobacct_vmem_limit);
		} else {
			error("Step %u.%u exceeded %u KB virtual memory "
			      "limit, being killed", jobacct_job_id, 
			      jobacct_step_id, jobacct_vmem_limit);
		}
		_acct_kill_step();
	}

finished:
	list_destroy(prec_list);
	processing = 0;

	return;
}
Exemplo n.º 29
0
extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
				  char *cluster_name,
				  time_t start, time_t end,
				  uint16_t archive_data)
{
	int rc = SLURM_SUCCESS;
	int add_sec = 3600;
	int i=0;
	time_t now = time(NULL);
	time_t curr_start = start;
	time_t curr_end = curr_start + add_sec;
	char *query = NULL;
	MYSQL_RES *result = NULL;
	MYSQL_ROW row;
	ListIterator a_itr = NULL;
	ListIterator c_itr = NULL;
	ListIterator w_itr = NULL;
	ListIterator r_itr = NULL;
	List assoc_usage_list = list_create(_destroy_local_id_usage);
	List cluster_down_list = list_create(_destroy_local_cluster_usage);
	List wckey_usage_list = list_create(_destroy_local_id_usage);
	List resv_usage_list = list_create(_destroy_local_resv_usage);
	uint16_t track_wckey = slurm_get_track_wckey();
	/* char start_char[20], end_char[20]; */

	char *job_req_inx[] = {
		"job.job_db_inx",
		"job.id_job",
		"job.id_assoc",
		"job.id_wckey",
		"job.array_task_pending",
		"job.time_eligible",
		"job.time_start",
		"job.time_end",
		"job.time_suspended",
		"job.cpus_alloc",
		"job.cpus_req",
		"job.id_resv",
		"SUM(step.consumed_energy)"
	};
	char *job_str = NULL;
	enum {
		JOB_REQ_DB_INX,
		JOB_REQ_JOBID,
		JOB_REQ_ASSOCID,
		JOB_REQ_WCKEYID,
		JOB_REQ_ARRAY_PENDING,
		JOB_REQ_ELG,
		JOB_REQ_START,
		JOB_REQ_END,
		JOB_REQ_SUSPENDED,
		JOB_REQ_ACPU,
		JOB_REQ_RCPU,
		JOB_REQ_RESVID,
		JOB_REQ_ENERGY,
		JOB_REQ_COUNT
	};

	char *suspend_req_inx[] = {
		"time_start",
		"time_end"
	};
	char *suspend_str = NULL;
	enum {
		SUSPEND_REQ_START,
		SUSPEND_REQ_END,
		SUSPEND_REQ_COUNT
	};

	char *resv_req_inx[] = {
		"id_resv",
		"assoclist",
		"cpus",
		"flags",
		"time_start",
		"time_end"
	};
	char *resv_str = NULL;
	enum {
		RESV_REQ_ID,
		RESV_REQ_ASSOCS,
		RESV_REQ_CPU,
		RESV_REQ_FLAGS,
		RESV_REQ_START,
		RESV_REQ_END,
		RESV_REQ_COUNT
	};

	i=0;
	xstrfmtcat(job_str, "%s", job_req_inx[i]);
	for(i=1; i<JOB_REQ_COUNT; i++) {
		xstrfmtcat(job_str, ", %s", job_req_inx[i]);
	}

	i=0;
	xstrfmtcat(suspend_str, "%s", suspend_req_inx[i]);
	for(i=1; i<SUSPEND_REQ_COUNT; i++) {
		xstrfmtcat(suspend_str, ", %s", suspend_req_inx[i]);
	}

	i=0;
	xstrfmtcat(resv_str, "%s", resv_req_inx[i]);
	for(i=1; i<RESV_REQ_COUNT; i++) {
		xstrfmtcat(resv_str, ", %s", resv_req_inx[i]);
	}

/* 	info("begin start %s", slurm_ctime(&curr_start)); */
/* 	info("begin end %s", slurm_ctime(&curr_end)); */
	a_itr = list_iterator_create(assoc_usage_list);
	c_itr = list_iterator_create(cluster_down_list);
	w_itr = list_iterator_create(wckey_usage_list);
	r_itr = list_iterator_create(resv_usage_list);
	while (curr_start < end) {
		int last_id = -1;
		int last_wckeyid = -1;
		int seconds = 0;
		int tot_time = 0;
		local_cluster_usage_t *loc_c_usage = NULL;
		local_cluster_usage_t *c_usage = NULL;
		local_resv_usage_t *r_usage = NULL;
		local_id_usage_t *a_usage = NULL;
		local_id_usage_t *w_usage = NULL;

		if (debug_flags & DEBUG_FLAG_DB_USAGE)
			DB_DEBUG(mysql_conn->conn,
				 "%s curr hour is now %ld-%ld",
				 cluster_name, curr_start, curr_end);
/* 		info("start %s", slurm_ctime(&curr_start)); */
/* 		info("end %s", slurm_ctime(&curr_end)); */

		c_usage = _setup_cluster_usage(mysql_conn, cluster_name,
					       curr_start, curr_end,
					       cluster_down_list);

		// now get the reservations during this time
		/* If a reservation has the IGNORE_JOBS flag we don't
		 * have an easy way to distinguish the cpus a job not
		 * running in the reservation, but on it's cpus.
		 * So we will just ignore these reservations for
		 * accounting purposes.
		 */
		query = xstrdup_printf("select %s from \"%s_%s\" where "
				       "(time_start < %ld && time_end >= %ld) "
				       "&& !(flags & %u)"
				       "order by time_start",
				       resv_str, cluster_name, resv_table,
				       curr_end, curr_start,
				       RESERVE_FLAG_IGN_JOBS);

		if (debug_flags & DEBUG_FLAG_DB_USAGE)
			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
		if (!(result = mysql_db_query_ret(
			      mysql_conn, query, 0))) {
			xfree(query);
			_destroy_local_cluster_usage(c_usage);
			return SLURM_ERROR;
		}
		xfree(query);

		/* If a reservation overlaps another reservation we
		   total up everything here as if they didn't but when
		   calculating the total time for a cluster we will
		   remove the extra time received.  This may result in
		   unexpected results with association based reports
		   since the association is given the total amount of
		   time of each reservation, thus equaling more time
		   than is available.  Job/Cluster/Reservation reports
		   should be fine though since we really don't over
		   allocate resources.  The issue with us not being
		   able to handle overlapping reservations here is
		   unless the reservation completely overlaps the
		   other reservation we have no idea how many cpus
		   should be removed since this could be a
		   heterogeneous system.  This same problem exists
		   when a reservation is created with the ignore_jobs
		   option which will allow jobs to continue to run in the
		   reservation that aren't suppose to.
		*/
		while ((row = mysql_fetch_row(result))) {
			time_t row_start = slurm_atoul(row[RESV_REQ_START]);
			time_t row_end = slurm_atoul(row[RESV_REQ_END]);
			uint32_t row_cpu = slurm_atoul(row[RESV_REQ_CPU]);
			uint32_t row_flags = slurm_atoul(row[RESV_REQ_FLAGS]);

			if (row_start < curr_start)
				row_start = curr_start;

			if (!row_end || row_end > curr_end)
				row_end = curr_end;

			/* Don't worry about it if the time is less
			 * than 1 second.
			 */
			if ((row_end - row_start) < 1)
				continue;

			r_usage = xmalloc(sizeof(local_resv_usage_t));
			r_usage->id = slurm_atoul(row[RESV_REQ_ID]);

			r_usage->local_assocs = list_create(slurm_destroy_char);
			slurm_addto_char_list(r_usage->local_assocs,
					      row[RESV_REQ_ASSOCS]);

			r_usage->total_time = (row_end - row_start) * row_cpu;
			r_usage->start = row_start;
			r_usage->end = row_end;
			list_append(resv_usage_list, r_usage);

			/* Since this reservation was added to the
			   cluster and only certain people could run
			   there we will use this as allocated time on
			   the system.  If the reservation was a
			   maintenance then we add the time to planned
			   down time.
			*/


			/* only record time for the clusters that have
			   registered.  This continue should rarely if
			   ever happen.
			*/
			if (!c_usage)
				continue;
			else if (row_flags & RESERVE_FLAG_MAINT)
				c_usage->pd_cpu += r_usage->total_time;
			else
				c_usage->a_cpu += r_usage->total_time;
			/* slurm_make_time_str(&r_usage->start, start_char, */
			/* 		    sizeof(start_char)); */
			/* slurm_make_time_str(&r_usage->end, end_char, */
			/* 		    sizeof(end_char)); */
			/* info("adding this much %lld to cluster %s " */
			/*      "%d %d %s - %s", */
			/*      r_usage->total_time, c_usage->name, */
			/*      (row_flags & RESERVE_FLAG_MAINT),  */
			/*      r_usage->id, start_char, end_char); */
		}
		mysql_free_result(result);

		/* now get the jobs during this time only  */
		query = xstrdup_printf("select %s from \"%s_%s\" as job "
				       "left outer join \"%s_%s\" as step on "
				       "job.job_db_inx=step.job_db_inx "
				       "and (step.id_step>=0) "
				       "where (job.time_eligible < %ld && "
				       "(job.time_end >= %ld || "
				       "job.time_end = 0)) "
				       "group by job.job_db_inx "
				       "order by job.id_assoc, "
				       "job.time_eligible",
				       job_str, cluster_name, job_table,
				       cluster_name, step_table,
				       curr_end, curr_start);

		if (debug_flags & DEBUG_FLAG_DB_USAGE)
			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
		if (!(result = mysql_db_query_ret(
			      mysql_conn, query, 0))) {
			xfree(query);
			_destroy_local_cluster_usage(c_usage);
			return SLURM_ERROR;
		}
		xfree(query);

		while ((row = mysql_fetch_row(result))) {
			uint32_t job_id = slurm_atoul(row[JOB_REQ_JOBID]);
			uint32_t assoc_id = slurm_atoul(row[JOB_REQ_ASSOCID]);
			uint32_t wckey_id = slurm_atoul(row[JOB_REQ_WCKEYID]);
			uint32_t array_pending =
				slurm_atoul(row[JOB_REQ_ARRAY_PENDING]);
			uint32_t resv_id = slurm_atoul(row[JOB_REQ_RESVID]);
			time_t row_eligible = slurm_atoul(row[JOB_REQ_ELG]);
			time_t row_start = slurm_atoul(row[JOB_REQ_START]);
			time_t row_end = slurm_atoul(row[JOB_REQ_END]);
			uint32_t row_acpu = slurm_atoul(row[JOB_REQ_ACPU]);
			uint32_t row_rcpu = slurm_atoul(row[JOB_REQ_RCPU]);
			uint64_t row_energy = 0;
			int loc_seconds = 0;
			seconds = 0;

			if (row[JOB_REQ_ENERGY])
				row_energy = slurm_atoull(row[JOB_REQ_ENERGY]);
			if (row_start && (row_start < curr_start))
				row_start = curr_start;

			if (!row_start && row_end)
				row_start = row_end;

			if (!row_end || row_end > curr_end)
				row_end = curr_end;

			if (!row_start || ((row_end - row_start) < 1))
				goto calc_cluster;

			seconds = (row_end - row_start);

			if (slurm_atoul(row[JOB_REQ_SUSPENDED])) {
				MYSQL_RES *result2 = NULL;
				MYSQL_ROW row2;
				/* get the suspended time for this job */
				query = xstrdup_printf(
					"select %s from \"%s_%s\" where "
					"(time_start < %ld && (time_end >= %ld "
					"|| time_end = 0)) && job_db_inx=%s "
					"order by time_start",
					suspend_str, cluster_name,
					suspend_table,
					curr_end, curr_start,
					row[JOB_REQ_DB_INX]);

				debug4("%d(%s:%d) query\n%s",
				       mysql_conn->conn, THIS_FILE,
				       __LINE__, query);
				if (!(result2 = mysql_db_query_ret(
					      mysql_conn,
					      query, 0))) {
					xfree(query);
					_destroy_local_cluster_usage(c_usage);
					return SLURM_ERROR;
				}
				xfree(query);
				while ((row2 = mysql_fetch_row(result2))) {
					time_t local_start = slurm_atoul(
						row2[SUSPEND_REQ_START]);
					time_t local_end = slurm_atoul(
						row2[SUSPEND_REQ_END]);

					if (!local_start)
						continue;

					if (row_start > local_start)
						local_start = row_start;
					if (row_end < local_end)
						local_end = row_end;
					tot_time = (local_end - local_start);
					if (tot_time < 1)
						continue;

					seconds -= tot_time;
				}
				mysql_free_result(result2);
			}
			if (seconds < 1) {
				debug4("This job (%u) was suspended "
				       "the entire hour", job_id);
				continue;
			}

			if (last_id != assoc_id) {
				a_usage = xmalloc(sizeof(local_id_usage_t));
				a_usage->id = assoc_id;
				list_append(assoc_usage_list, a_usage);
				last_id = assoc_id;
			}

			a_usage->a_cpu += seconds * row_acpu;
			a_usage->energy += row_energy;

			if (!track_wckey)
				goto calc_cluster;

			/* do the wckey calculation */
			if (last_wckeyid != wckey_id) {
				list_iterator_reset(w_itr);
				while ((w_usage = list_next(w_itr)))
					if (w_usage->id == wckey_id)
						break;

				if (!w_usage) {
					w_usage = xmalloc(
						sizeof(local_id_usage_t));
					w_usage->id = wckey_id;
					list_append(wckey_usage_list,
						    w_usage);
				}

				last_wckeyid = wckey_id;
			}
			w_usage->a_cpu += seconds * row_acpu;
			w_usage->energy += row_energy;
			/* do the cluster allocated calculation */
		calc_cluster:

			/* Now figure out there was a disconnected
			   slurmctld durning this job.
			*/
			list_iterator_reset(c_itr);
			while ((loc_c_usage = list_next(c_itr))) {
				int temp_end = row_end;
				int temp_start = row_start;
				if (loc_c_usage->start > temp_start)
					temp_start = loc_c_usage->start;
				if (loc_c_usage->end < temp_end)
					temp_end = loc_c_usage->end;
				loc_seconds = (temp_end - temp_start);
				if (loc_seconds < 1)
					continue;

				loc_seconds *= row_acpu;
				/* info(" Job %u was running for " */
				/*      "%d seconds while " */
				/*      "cluster %s's slurmctld " */
				/*      "wasn't responding", */
				/*      job_id, loc_seconds, cluster_name); */
				if (loc_seconds >= loc_c_usage->total_time)
					loc_c_usage->total_time = 0;
				else {
					loc_c_usage->total_time -=
						loc_seconds * row_acpu;
				}
			}

			/* first figure out the reservation */
			if (resv_id) {
				if (seconds <= 0)
					continue;
				/* Since we have already added the
				   entire reservation as used time on
				   the cluster we only need to
				   calculate the used time for the
				   reservation and then divy up the
				   unused time over the associations
				   able to run in the reservation.
				   Since the job was to run, or ran a
				   reservation we don't care about
				   eligible time since that could
				   totally skew the clusters reserved time
				   since the job may be able to run
				   outside of the reservation. */
				list_iterator_reset(r_itr);
				while ((r_usage = list_next(r_itr))) {
					/* since the reservation could
					   have changed in some way,
					   thus making a new
					   reservation record in the
					   database, we have to make
					   sure all the reservations
					   are checked to see if such
					   a thing has happened */
					if (r_usage->id == resv_id) {
						int temp_end = row_end;
						int temp_start = row_start;
						if (r_usage->start > temp_start)
							temp_start =
								r_usage->start;
						if (r_usage->end < temp_end)
							temp_end = r_usage->end;

						if ((temp_end - temp_start)
						    > 0) {
							r_usage->a_cpu +=
								(temp_end
								 - temp_start)
								* row_acpu;
						}
					}
				}
				continue;
			}

			/* only record time for the clusters that have
			   registered.  This continue should rarely if
			   ever happen.
			*/
			if (!c_usage)
				continue;

			if (row_start && (seconds > 0)) {
				/* info("%d assoc %d adds " */
				/*      "(%d)(%d-%d) * %d = %d " */
				/*      "to %d", */
				/*      job_id, */
				/*      a_usage->id, */
				/*      seconds, */
				/*      row_end, row_start, */
				/*      row_acpu, */
				/*      seconds * row_acpu, */
				/*      row_acpu); */

				c_usage->a_cpu += seconds * row_acpu;
				c_usage->energy += row_energy;
			}

			/* now reserved time */
			if (!row_start || (row_start >= c_usage->start)) {
				int temp_end = row_start;
				int temp_start = row_eligible;
				if (c_usage->start > temp_start)
					temp_start = c_usage->start;
				if (c_usage->end < temp_end)
					temp_end = c_usage->end;
				loc_seconds = (temp_end - temp_start);
				if (loc_seconds > 0) {
					/* If we have pending jobs in
					   an array they haven't been
					   inserted into the database
					   yet as proper job records,
					   so handle them here.
					*/
					if (array_pending)
						loc_seconds *= array_pending;

					/* info("%d assoc %d reserved " */
					/*      "(%d)(%d-%d) * %d * %d = %d " */
					/*      "to %d", */
					/*      job_id, */
					/*      assoc_id, */
					/*      temp_end - temp_start, */
					/*      temp_end, temp_start, */
					/*      row_rcpu, */
					/*      array_pending, */
					/*      loc_seconds, */
					/*      row_rcpu); */

					c_usage->r_cpu +=
						loc_seconds * row_rcpu;
				}
			}
		}
		mysql_free_result(result);

		/* now figure out how much more to add to the
		   associations that could had run in the reservation
		*/
		list_iterator_reset(r_itr);
		while ((r_usage = list_next(r_itr))) {
			int64_t idle = r_usage->total_time - r_usage->a_cpu;
			char *assoc = NULL;
			ListIterator tmp_itr = NULL;

			if (idle <= 0)
				continue;

			/* now divide that time by the number of
			   associations in the reservation and add
			   them to each association */
			seconds = idle / list_count(r_usage->local_assocs);
/* 			info("resv %d got %d for seconds for %d assocs", */
/* 			     r_usage->id, seconds, */
/* 			     list_count(r_usage->local_assocs)); */
			tmp_itr = list_iterator_create(r_usage->local_assocs);
			while ((assoc = list_next(tmp_itr))) {
				uint32_t associd = slurm_atoul(assoc);
				if (last_id != associd) {
					list_iterator_reset(a_itr);
					while ((a_usage = list_next(a_itr))) {
						if (a_usage->id == associd) {
							last_id = a_usage->id;
							break;
						}
					}
				}

				if (!a_usage) {
					a_usage = xmalloc(
						sizeof(local_id_usage_t));
					a_usage->id = associd;
					list_append(assoc_usage_list, a_usage);
					last_id = associd;
				}

				a_usage->a_cpu += seconds;
			}
			list_iterator_destroy(tmp_itr);
		}

		/* now apply the down time from the slurmctld disconnects */
		if (c_usage) {
			list_iterator_reset(c_itr);
			while ((loc_c_usage = list_next(c_itr)))
				c_usage->d_cpu += loc_c_usage->total_time;

			if ((rc = _process_cluster_usage(
				     mysql_conn, cluster_name, curr_start,
				     curr_end, now, c_usage))
			    != SLURM_SUCCESS) {
				_destroy_local_cluster_usage(c_usage);
				goto end_it;
			}
		}

		list_iterator_reset(a_itr);
		while ((a_usage = list_next(a_itr))) {
/* 			info("association (%d) %d alloc %d", */
/* 			     a_usage->id, last_id, */
/* 			     a_usage->a_cpu); */
			if (query) {
				xstrfmtcat(query,
					   ", (%ld, %ld, %d, %ld, %"PRIu64", "
					   "%"PRIu64")",
					   now, now,
					   a_usage->id, curr_start,
					   a_usage->a_cpu, a_usage->energy);
			} else {
				xstrfmtcat(query,
					   "insert into \"%s_%s\" "
					   "(creation_time, "
					   "mod_time, id_assoc, time_start, "
					   "alloc_cpu_secs, consumed_energy) "
					   "values "
					   "(%ld, %ld, %d, %ld, %"PRIu64", "
					   "%"PRIu64")",
					   cluster_name, assoc_hour_table,
					   now, now,
					   a_usage->id, curr_start,
					   a_usage->a_cpu, a_usage->energy);
			}
		}
		if (query) {
			xstrfmtcat(query,
				   " on duplicate key update "
				   "mod_time=%ld, "
				   "alloc_cpu_secs=VALUES(alloc_cpu_secs), "
				   "consumed_energy=VALUES(consumed_energy);",
				   now);

			if (debug_flags & DEBUG_FLAG_DB_USAGE)
				DB_DEBUG(mysql_conn->conn, "query\n%s", query);
			rc = mysql_db_query(mysql_conn, query);
			xfree(query);
			if (rc != SLURM_SUCCESS) {
				error("Couldn't add assoc hour rollup");
				_destroy_local_cluster_usage(c_usage);
				goto end_it;
			}
		}

		if (!track_wckey)
			goto end_loop;

		list_iterator_reset(w_itr);
		while ((w_usage = list_next(w_itr))) {
/* 			info("association (%d) %d alloc %d", */
/* 			     w_usage->id, last_id, */
/* 			     w_usage->a_cpu); */
			if (query) {
				xstrfmtcat(query,
					   ", (%ld, %ld, %d, %ld, "
					   "%"PRIu64", %"PRIu64")",
					   now, now,
					   w_usage->id, curr_start,
					   w_usage->a_cpu, w_usage->energy);
			} else {
				xstrfmtcat(query,
					   "insert into \"%s_%s\" "
					   "(creation_time, "
					   "mod_time, id_wckey, time_start, "
					   "alloc_cpu_secs, consumed_energy) "
					   "values "
					   "(%ld, %ld, %d, %ld, "
					   "%"PRIu64", %"PRIu64")",
					   cluster_name, wckey_hour_table,
					   now, now,
					   w_usage->id, curr_start,
					   w_usage->a_cpu, w_usage->energy);
			}
		}
		if (query) {
			xstrfmtcat(query,
				   " on duplicate key update "
				   "mod_time=%ld, "
				   "alloc_cpu_secs=VALUES(alloc_cpu_secs), "
				   "consumed_energy=VALUES(consumed_energy);",
				   now);

			if (debug_flags & DEBUG_FLAG_DB_USAGE)
				DB_DEBUG(mysql_conn->conn, "query\n%s", query);
			rc = mysql_db_query(mysql_conn, query);
			xfree(query);
			if (rc != SLURM_SUCCESS) {
				error("Couldn't add wckey hour rollup");
				_destroy_local_cluster_usage(c_usage);
				goto end_it;
			}
		}

	end_loop:
		_destroy_local_cluster_usage(c_usage);
		list_flush(assoc_usage_list);
		list_flush(cluster_down_list);
		list_flush(wckey_usage_list);
		list_flush(resv_usage_list);
		curr_start = curr_end;
		curr_end = curr_start + add_sec;
	}
end_it:
	xfree(suspend_str);
	xfree(job_str);
	xfree(resv_str);
	list_iterator_destroy(a_itr);
	list_iterator_destroy(c_itr);
	list_iterator_destroy(w_itr);
	list_iterator_destroy(r_itr);

	list_destroy(assoc_usage_list);
	list_destroy(cluster_down_list);
	list_destroy(wckey_usage_list);
	list_destroy(resv_usage_list);

/* 	info("stop start %s", slurm_ctime(&curr_start)); */
/* 	info("stop end %s", slurm_ctime(&curr_end)); */

	/* go check to see if we archive and purge */

	if (rc == SLURM_SUCCESS)
		rc = _process_purge(mysql_conn, cluster_name, archive_data,
				    SLURMDB_PURGE_HOURS);

	return rc;
}
Exemplo n.º 30
0
static List _get_precs(List task_list, bool pgid_plugin, uint64_t cont_id,
		       jag_callbacks_t *callbacks)
{
	List prec_list = list_create(destroy_jag_prec);
	char	proc_stat_file[256];	/* Allow ~20x extra length */
	char	proc_io_file[256];	/* Allow ~20x extra length */
	static	int	slash_proc_open = 0;
	int i;

	if (!pgid_plugin) {
		pid_t *pids = NULL;
		int npids = 0;
		/* get only the processes in the proctrack container */
		proctrack_g_get_pids(cont_id, &pids, &npids);
		if (!npids) {
			/* update consumed energy even if pids do not exist */
			ListIterator itr = list_iterator_create(task_list);
			struct jobacctinfo *jobacct = NULL;
			if ((jobacct = list_next(itr))) {
				acct_gather_energy_g_get_data(
					energy_profile,
					&jobacct->energy);
				debug2("getjoules_task energy = %u",
				       jobacct->energy.consumed_energy);
			}
			list_iterator_destroy(itr);

			debug4("no pids in this container %"PRIu64"", cont_id);
			goto finished;
		}
		for (i = 0; i < npids; i++) {
			snprintf(proc_stat_file, 256, "/proc/%d/stat", pids[i]);
			snprintf(proc_io_file, 256, "/proc/%d/io", pids[i]);
			_handle_stats(prec_list, proc_stat_file, proc_io_file,
				      callbacks);
		}
		xfree(pids);
	} else {
		struct dirent *slash_proc_entry;
		char  *iptr = NULL, *optr = NULL, *optr2 = NULL;

		if (slash_proc_open) {
			rewinddir(slash_proc);
		} else {
			slash_proc=opendir("/proc");
			if (slash_proc == NULL) {
				perror("opening /proc");
				goto finished;
			}
			slash_proc_open=1;
		}
		strcpy(proc_stat_file, "/proc/");
		strcpy(proc_io_file, "/proc/");

		while ((slash_proc_entry = readdir(slash_proc))) {

			/* Save a few cyles by simulating
			 * strcat(statFileName, slash_proc_entry->d_name);
			 * strcat(statFileName, "/stat");
			 * while checking for a numeric filename (which really
			 * should be a pid). Then do the same for the
			 * /proc/<pid>/io file name.
			 */
			optr = proc_stat_file + sizeof("/proc");
			iptr = slash_proc_entry->d_name;
			i = 0;
			do {
				if ((*iptr < '0') ||
				    ((*optr++ = *iptr++) > '9')) {
					i = -1;
					break;
				}
			} while (*iptr);

			if (i == -1)
				continue;
			iptr = (char*)"/stat";

			do {
				*optr++ = *iptr++;
			} while (*iptr);
			*optr = 0;

			optr2 = proc_io_file + sizeof("/proc");
			iptr = slash_proc_entry->d_name;
			i = 0;
			do {
				if ((*iptr < '0') ||
				    ((*optr2++ = *iptr++) > '9')) {
					i = -1;
					break;
				}
			} while (*iptr);
			if (i == -1)
				continue;
			iptr = (char*)"/io";

			do {
				*optr2++ = *iptr++;
			} while (*iptr);
			*optr2 = 0;

			_handle_stats(prec_list, proc_stat_file, proc_io_file,
				      callbacks);
		}
	}

finished:

	return prec_list;
}