Exemplo n.º 1
0
static const cst_val *find_rewrite_rule(const cst_val *LC,
					const cst_val *RC,
					const cst_lts_rewrites *r)
{
    /* Search through rewrite rules to find matching one */
    const cst_val *i, *RLC, *RA, *RRC;

    for (i=r->rules; i; i=val_cdr(i))
    {
/*	val_print(stdout, val_car(i));	printf("\n");  */
	RLC = val_car(val_car(i));
	RA = val_car(val_cdr(val_car(i)));
	RRC = val_car(val_cdr(val_cdr(val_car(i))));
	if (rule_matches(LC,RC,RLC,RA,RRC,r->sets))
	    return val_car(i);
    }

    fprintf(stderr,"LTS_REWRITES: unable to find a matching rules for:\n");
    fprintf(stderr,"CL: ");
    val_print(stderr,LC);
    fprintf(stderr,"\n");
    fprintf(stderr,"RC: ");
    val_print(stderr,RC);
    fprintf(stderr,"\n");

    return NULL;
}
Exemplo n.º 2
0
/*
 * Check rules for all the (valid) backends.
 *
 * TODO This pretty much replicates most check_rules() functionality, so maybe
 *      this could be either merged or refactored to reuse some code.
 *
 * TODO This is called only from the connection_limits(), and that may hold the
 *      lock for quite long. Move the lock/release here, and copy all the data
 *      instead of looping through the shared memory.
 */
static void
check_all_rules(void)
{
	/* index of the process */
	int	index;
	int r;

	for (index = 0; index < procArray->numProcs; index++)
	{

#if (PG_VERSION_NUM <= 90200)
			volatile PGPROC *proc = procArray->procs[index];
#else
			volatile PGPROC *proc = &ProcGlobal->allProcs[procArray->procs[index]];
#endif

		/* do not count prepared xacts */
		if (proc->pid == 0)
			continue;

		/* do this only for valid backends */
		if (backend_info_is_valid(backends[proc->backendId], proc))
		{

			/* FIXME This should probably refresh the hostname (using pg_getnameinfo_all) */

			/* check all the rules for this backend */
			for (r = 0; r < rules->n_rules; r++)
			{

				/* check the rule for a backend - if the PID is different, the backend is
				* waiting on the lock (and will be processed soon) */
				if (rule_matches(rules->rules[r], backends[proc->backendId].database,
									backends[proc->backendId].role, backends[proc->backendId].socket,
									backends[proc->backendId].hostname))

					/* increment the count */
					++rules->rules[r].count;

			}
		}
	}
}
Exemplo n.º 3
0
static void
check_rules(Port *port, int status)
{

	int r;

	/* index of the backend process */
	int		index;

	/* limits */
	bool	per_user_overriden = false,
			per_database_overriden = false,
			per_ip_overriden = false;

	/* counters */
	int		per_user = 0,
			per_database = 0,
			per_ip = 0;

	/*
	 * Any other plugins which use ClientAuthentication_hook.
	 */
	if (prev_client_auth_hook)
		prev_client_auth_hook(port, status);

	/* No point in checkin the connection rules after failed authentication. */
	if (status != STATUS_OK)
		return;

	/*
	 * Lock ProcArray (serialize the processes, so that we can use the
	 * counters stored in the rule_r struct).
	 *
	 * TODO Use a private array of counters (same number of rules), so
	 *      that we don't need an exclusive lock.
	 */
	LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);

	/* reset the rule counters */
	reset_rules();

	/* attach the shared segment */
	attach_procarray();

	/*
	 * Perform the actual check - loop through the backends (procArray), and
	 * compare each valid backend against each rule. If it matches, increment
	 * the counter (and if value exceeds the limit, make a failure).
	 *
	 * TODO First check the rules for the current backend, and then only check
	 *      those rules that match (because those are the only rules that may
	 *      be violated by this new connection).
	 */
	for (index = 0; index < procArray->numProcs; index++)
	{

#if (PG_VERSION_NUM <= 90200)
		volatile PGPROC *proc = procArray->procs[index];
#else
		volatile PGPROC *proc = &ProcGlobal->allProcs[procArray->procs[index]];
#endif

		/* do not count prepared xacts */
		if (proc->pid == 0)
			continue;

		/*
		 * If this is the current backend, then update the local info. This
		 * effectively resets info for crashed backends.
		 *
		 * FIXME Maybe this should happen explicitly before the loop.
		 */
		if (proc->backendId == MyBackendId)
		{
			/* lookup remote host name (unless already done) */
			if (! port->remote_hostname)
			{
				char	remote_hostname[NI_MAXHOST];

				if (! pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen,
									remote_hostname, sizeof(remote_hostname),
									NULL, 0, 0))
					port->remote_hostname = pstrdup(remote_hostname);
			}

			/* store the backend info into a cache */
			backend_update_info(&backends[proc->backendId], proc,
								port->database_name, port->user_name,
								port->raddr, port->remote_hostname);
		}

		/* if the backend info is valid, */
		if (backend_info_is_valid(backends[proc->backendId], proc))
		{

			/* see if the database/user/IP matches */
			per_database += (strcmp(backends[proc->backendId].database, port->database_name) == 0) ? 1 : 0;
			per_user     += (strcmp(backends[proc->backendId].role, port->user_name) == 0) ? 1 : 0;
			per_ip       += (memcmp(&backends[proc->backendId].socket, &port->raddr, sizeof(SockAddr)) == 0) ? 1 : 0;

			/* check all the rules for this backend */
			for (r = 0; r < rules->n_rules; r++)
			{

				/*
				 * The rule has to be matched by both the current and new session, otherwise
				 * it can't be violated by the new one.
				 *
				 * FIXME This repeatedly checks all the rules for the current backend, which is not
				 *       needed. We only need to do this check (for the new session) once, and then
				 *       walk only the rules that match it. Althouth that may not detect the
				 *       default rules (per db, ...).
				 */
				if (rule_matches(rules->rules[r], port->database_name, port->user_name, port->raddr, port->remote_host))
				{

					/* check if this rule overrides per-db, per-user or per-ip limits */
					per_database_overriden |= rule_is_per_database(&rules->rules[r]);
					per_user_overriden     |= rule_is_per_user(&rules->rules[r]);
					per_ip_overriden       |= rule_is_per_ip(&rules->rules[r]);

					/* Check the rule for a existing backend (we assume it's valid thanks to backend_info_is_valid()). */
					if (rule_matches(rules->rules[r], backends[proc->backendId].database,
									 backends[proc->backendId].role, backends[proc->backendId].socket,
									 backends[proc->backendId].hostname))
					{

						/* increment the match count for this rule */
						++rules->rules[r].count;

						/*
						 * We're looping over all backends (including the current backend), so the
						 * rule is only violated if the limit is actually exceeded.
						 */
						if (rules->rules[r].count > rules->rules[r].limit)
						{

							if (! is_super_user(port->user_name))
								elog(ERROR, "connection limit reached (rule %d, line %d, limit %d)",
											r, rules->rules[r].line, rules->rules[r].limit);
							else
								elog(WARNING, "connection limit reached (rule %d, line %d, limit %d), but the user is a superuser",
											r, rules->rules[r].line, rules->rules[r].limit);

						}
					}
				}

			}
		}
	}

	/*
	 * Check the per-db/user/IP limits, unless there was an exact rule overriding
	 * the defaults for that object, or unless the default was disabled (set to 0).
	 */

	/* check per-database limit */
	if ((! per_database_overriden) && (default_per_database != 0) && (per_database > default_per_database))
	{
		if (! is_super_user(port->user_name))
			elog(ERROR, "per-database connection limit reached (limit %d)",
				 default_per_database);
		else
			elog(WARNING, "per-database  limit reached (limit %d), but the user is a superuser",
				 default_per_database);
	}

	/* check per-user limit */
	if ((! per_user_overriden) && (default_per_role != 0) && (per_user > default_per_role))
	{
		if (! is_super_user(port->user_name))
			elog(ERROR, "per-user connection limit reached (limit %d)",
				 default_per_role);
		else
			elog(WARNING, "per-user connection limit reached (limit %d), but the user is a superuser",
				 default_per_role);
	}

	/* check per-IP limit */
	if ((! per_ip_overriden) && (default_per_ip != 0) && (per_ip > default_per_ip))
	{
		if (! is_super_user(port->user_name))
			elog(ERROR, "per-IP connection limit reached (limit %d)",
				 default_per_ip);
		else
			elog(WARNING, "per-IP connection limit reached (limit %d), but the user is a superuser",
				 default_per_ip);
	}

	LWLockRelease(ProcArrayLock);

}
Exemplo n.º 4
0
void terrain_builder::build_terrains()
{
	log_scope("terrain_builder::build_terrains");

	// Builds the terrain_by_type_ cache
	for(int x = -2; x <= map().w(); ++x) {
		for(int y = -2; y <= map().h(); ++y) {
			const map_location loc(x,y);
			const t_translation::t_terrain t = map().get_terrain(loc);

			terrain_by_type_[t].push_back(loc);
		}
	}

	int rule_index = 0;
	building_ruleset::const_iterator r;

	for(r = building_rules_.begin(); r != building_rules_.end(); ++r) {

		const building_rule& rule = r->second;

		// Find the constraint that contains the less terrain of all terrain rules.
		// We will keep a track of the matching terrains of this constraint
		// and later try to apply the rule only on them
		size_t min_size = INT_MAX;
		t_translation::t_list min_types;
		constraint_set::const_iterator min_constraint = rule.constraints.end();

		for(constraint_set::const_iterator constraint = rule.constraints.begin();
		    	constraint != rule.constraints.end(); ++constraint) {

		    const t_translation::t_match& match = constraint->second.terrain_types_match;
			t_translation::t_list matching_types;
			size_t constraint_size = 0;

			for (terrain_by_type_map::iterator type_it = terrain_by_type_.begin();
					 type_it != terrain_by_type_.end(); ++type_it) {

				const t_translation::t_terrain t = type_it->first;
				if (terrain_matches(t, match)) {
					const size_t match_size = type_it->second.size();
					constraint_size += match_size;
					if (constraint_size >= min_size) {
						break; // not a minimum, bail out
					}
					matching_types.push_back(t);
				}
			}

			if (constraint_size < min_size) {
				min_size = constraint_size;
				min_types = matching_types;
				min_constraint = constraint;
				if (min_size == 0) {
				 	// a constraint is never matched on this map
				 	// we break with a empty type list
					break;
				}
			}
		}

		//NOTE: if min_types is not empty, we have found a valid min_constraint;
		for(t_translation::t_list::const_iterator t = min_types.begin();
				t != min_types.end(); ++t) {

			const std::vector<map_location>* locations = &terrain_by_type_[*t];

			for(std::vector<map_location>::const_iterator itor = locations->begin();
					itor != locations->end(); ++itor) {
				const map_location loc = itor->legacy_difference(min_constraint->second.loc);

				if(rule_matches(rule, loc, rule_index, min_constraint)) {
					apply_rule(rule, loc);
				}
			}
		}

		++rule_index;
	}
}