예제 #1
0
파일: gpmondb.c 프로젝트: karthijrk/gpdb
static bool gpdb_get_gpperfmon_owner(PGconn *conn, char *owner, size_t owner_length)
{
	ASSERT(conn);
	ASSERT(owner);

	PGresult *result = NULL;
	const char *query = "select pg_catalog.pg_get_userbyid(d.datdba) as "
						"owner from pg_catalog.pg_database d where "
						"d.datname = 'gpperfmon'";
	const char *errmsg = gpdb_exec_only(conn, &result, query);
	bool ret = true;

	if (errmsg != NULL)
	{
		gpmon_warning(FLINE, "GPDB error %s\n\tquery: %s\n", errmsg, query);
		ret = false;
	}
	else
	{
		if (PQntuples(result) > 0)
		{
			const char* owner_field = PQgetvalue(result, 0, 0);
			strncpy(owner, owner_field, owner_length);
			ret = true;
		}
		else
		{
			TR0(("could not find owner for 'gpperfmon' database\n"));
			ret = false;
		}
	}
	PQclear(result);
	return ret;
}
예제 #2
0
파일: gpmondb.c 프로젝트: karthijrk/gpdb
//Return 1 if not an appliance and 0 if an appliance
int get_appliance_hosts_and_add_to_hosts(apr_pool_t* tmp_pool, apr_hash_t* htab)
{
	// open devices.cnf and then start reading the data
	// populate all relevant hosts: Spidey0001, Spidey0002, EtlHost
	FILE* fd = fopen(PATH_TO_APPLIANCE_VERSION_FILE, "r");
	if (!fd)
	{
		TR0(("not an appliance ... not reading devices.cnf\n"));
		return 1;
	}
	fclose(fd);

	fd = fopen(PATH_TO_APPLAINCE_DEVICES_FILE, "r");
	if (!fd)
	{
		gpmon_warningx(FLINE, 0, "can not read %s, ignoring\n", PATH_TO_APPLAINCE_DEVICES_FILE);
		return 0;
	}

	char* line;
	char buffer[1024];

	while (NULL != fgets(buffer, sizeof(buffer), fd))
	{
		// remove new line
		line = gpmon_trim(buffer);
		process_line_in_devices_cnf(tmp_pool, htab, line);
	}

	fclose(fd);
	return 0;
}
예제 #3
0
파일: gpmondb.c 프로젝트: karthijrk/gpdb
static bool get_encoding_from_result(PGresult	*result,
									 char		*encoding,
									 size_t		encoding_len,
									 int		*encoding_num)
{
	ASSERT(result);
	ASSERT(encoding);
	ASSERT(encoding_num);
	if (PQntuples(result) > 0)
	{
		const char* encoding_str = PQgetvalue(result, 0, 0);
		*encoding_num = atoi(encoding_str);
		const char *encoding_item = find_encoding(*encoding_num);
		if (encoding_item)
		{
			strncpy(encoding, encoding_item, encoding_len);
		}
		else
		{
			gpmon_warning(FLINE, "GPDB bad encoding: %d\n", *encoding_num);
			return false;
		}
	}
	else
	{
		TR0(("could not find owner for 'gpperfmon' database\n"));
		return false;
	}
	return true;
}
예제 #4
0
파일: gpmondb.c 프로젝트: karthijrk/gpdb
//Return 1 if not a hadoop software only cluster and 0 it is a hadoop software only cluster
int get_hadoop_hosts_and_add_to_hosts(apr_pool_t* tmp_pool, apr_hash_t* htab, mmon_options_t* opt)
{
	if (!opt->smon_hadoop_swonly_binfile)
	{
		TR0(("hadoop_smon_path not specified in gpmmon config. not processing hadoop nodes\n"));
		return 1;
	}

	char* smon_log_dir;
	char* hadoop_cluster_file;
	if (opt->smon_hadoop_swonly_logdir)
	{
		smon_log_dir = opt->smon_hadoop_swonly_logdir;
	}
	else
	{
		smon_log_dir = (char*)PATH_TO_HADOOP_SMON_LOGS;
	}
	if (opt->smon_hadoop_swonly_clusterfile)
	{
		hadoop_cluster_file = opt->smon_hadoop_swonly_clusterfile;
	}
	else
	{
		hadoop_cluster_file = (char*)DEFAULT_PATH_TO_HADOOP_HOST_FILE;
	}

	FILE* fd = fopen(hadoop_cluster_file, "r");
	if (!fd)
	{
		TR0(("not a hadoop software only cluster ... not reading %s\n", hadoop_cluster_file));
		return 1;
	}

	char* line;
	char buffer[1024];

	// process the hostlines
	while (NULL != fgets(buffer, sizeof(buffer), fd))
	{
		line = gpmon_trim(buffer);// remove new line
		process_line_in_hadoop_cluster_info(tmp_pool, htab, line, opt->smon_hadoop_swonly_binfile, smon_log_dir);
	}

	fclose(fd);
	return 0;
}
예제 #5
0
파일: gpsmon.c 프로젝트: phan-pivotal/gpdb
// Helper function to calculate cpu percentage during a period
static float calc_diff_percentage(sigar_uint64_t newvalue, sigar_uint64_t oldvalue, int total_diff, const char *itemname)
{
	float result = ((float) (newvalue - oldvalue) * 100 / total_diff);
	if (newvalue < oldvalue)
	{
		TR0(("calc_diff_percentage: new value %" APR_UINT64_T_FMT " is less than old value %" APR_UINT64_T_FMT " for metric %s; set to 0.\n",
				newvalue, oldvalue, itemname));
		result = 0.0;
	}
	else if (result > 100)
	{
		TR0(("calc_diff_percentage: new value %" APR_UINT64_T_FMT " old value %" APR_UINT64_T_FMT " total diff %d for metric %s; set to 100.\n",
				newvalue, oldvalue, total_diff, itemname));
		result = 100;
	}
	return result;
}
예제 #6
0
파일: gpmondb.c 프로젝트: karthijrk/gpdb
/*
 * Upgrade: alter distributed key of log_alert_history from logsegment to logtime
 */
void upgrade_log_alert_table_distributed_key(PGconn* conn)
{
	if (conn == NULL)
	{
		TR0(("Can't upgrade log_alert_history: conn is NULL\n"));
		return;
	}

	const char* qry = "SELECT d.nspname||'.'||a.relname as tablename, b.attname as distributed_key\
	    FROM pg_class  a\
	    INNER JOIN pg_attribute b on a.oid=b.attrelid\
	    INNER JOIN gp_distribution_policy c on a.oid = c.localoid\
	    INNER JOIN pg_namespace d on a.relnamespace = d.oid\
	    WHERE a.relkind = 'r' AND b.attnum = any(c.attrnums) AND a.relname = 'log_alert_history'";

	PGresult* result = NULL;
	const char* errmsg = gpdb_exec_only(conn, &result, qry);

	if (errmsg != NULL)
	{
		gpmon_warning(FLINE, "GPDB error %s\n\tquery: %s\n", errmsg, qry);
	}
	else
	{
		if (PQntuples(result) > 0)
		{
			// check if current distributed key is logsegment
			const char* current_distributed_key = PQgetvalue(result, 0, 1);
			if (current_distributed_key == NULL)
			{
				TR0(("could not find distributed key of log_alert_history\n"));
				PQclear(result);
				return;
			}
			if (strcmp(current_distributed_key, "logsegment") == 0)
			{
				TR0(("[INFO] log_alert_history: Upgrading log_alert_history table to use logsegment as distributed key\n"));
				qry = "alter table public.log_alert_history set distributed by (logtime);";
				gpdb_exec_ddl(conn, qry);
			}
		}
	}

	PQclear(result);
	return;
}
예제 #7
0
파일: gpsmon.c 프로젝트: phan-pivotal/gpdb
// Helper function to calculate the metric differences
static apr_uint64_t metric_diff_calc( sigar_uint64_t newval, apr_uint64_t oldval, const char *name_for_log, const char* value_name_for_log ){
	apr_uint64_t diff;

	if (newval < oldval) // assume that the value was reset and we are starting over
	{
		TR0(("metric_diff_calc: new value %" APR_UINT64_T_FMT " is less than old value %" APR_UINT64_T_FMT " for %s metric %s; assume the value was reset and set diff to new value.\n",
				newval, oldval, name_for_log, value_name_for_log));
		diff = newval;
	}
	else
	{
		diff = newval - oldval;
	}
#if defined(rhel5_x86_64) || defined(rhel7_x86_64) || defined(rhel6_x86_64) || defined(suse10_x86_64)
	// Add this debug on 64 bit machines to try and debug strange values we are seeing
	if(diff > 1000000000000000000  ) {
		TR0(("Crazy high value for diff! new value=%" APR_UINT64_T_FMT ", old value=%" APR_UINT64_T_FMT ", diff=%" APR_UINT64_T_FMT "  for %s metric %s; assume the value was reset and set diff to new value.\n",
				newval, oldval, name_for_log, value_name_for_log));
	}
#endif
	return diff;
}
예제 #8
0
파일: gpmondb.c 프로젝트: karthijrk/gpdb
// Drop pretty old partitons if exists.
static void drop_old_partitions(PGconn* conn, const char* tbl, mmon_options_t *opt)
{
	const int QRYBUFSIZ = 1024;
	PGresult* result = NULL;
	const char* errmsg;
	char qry[QRYBUFSIZ];

	const char* SELECT_QRYFMT = "SELECT partitiontablename, partitionrangestart FROM pg_partitions "
						        "WHERE tablename = '%s_history' "
								"ORDER BY partitionrangestart DESC OFFSET %d;";
	const char* DROP_QRYFMT   = "ALTER TABLE %s_history DROP PARTITION IF EXISTS FOR (%s);";

	int partition_age = opt->partition_age;

	if (partition_age <= 0)
		return;

	// partition_age + 1 because we always add 2 partitions for the boundary case
	snprintf(qry, QRYBUFSIZ, SELECT_QRYFMT, tbl, partition_age + 1);

	TR2(("drop partition: executing select query '%s\n'", qry));
	errmsg = gpdb_exec_only(conn, &result, qry);
	if (errmsg)
	{
		gpmon_warning(FLINE, "drop partition: select query '%s' response from server: %s\n", qry, errmsg);
	}
	else
	{
		int rowcount = PQntuples(result);
		int i = 0;
		for (; i < rowcount; i++)
		{
			PGresult* dropResult = NULL;
			char* partitiontablename  = PQgetvalue(result, i, 0);
			char* partitionrangestart = PQgetvalue(result, i, 1);
			snprintf(qry, QRYBUFSIZ, DROP_QRYFMT, tbl, partitionrangestart);
			TR0(("Dropped partition table '%s\n'", partitiontablename));
			errmsg = gpdb_exec_only(conn, &dropResult, qry);
			PQclear(dropResult);
			if (errmsg)
			{
				gpmon_warning(FLINE, "drop partion: drop query '%s' response from server: %s\n", qry, errmsg);
				break;
			}
		}
	}
	PQclear(result);
}
예제 #9
0
파일: gpmondb.c 프로젝트: karthijrk/gpdb
static void gpdb_change_alert_table_owner(PGconn *conn, const char *owner)
{
	ASSERT(conn);
	ASSERT(owner);

	// change owner from gpmon, otherwise, gpperfmon_install
	// might quit with error when execute 'drop role gpmon if exists'
	const char* query_pattern = "ALTER TABLE public.log_alert_history owner to %s;"
								"ALTER EXTERNAL TABLE public.log_alert_tail "
								"owner to %s;ALTER EXTERNAL TABLE public.log_alert_now"
								" owner to %s;";
	const int querybufsize = 512;
	char query[querybufsize];
	snprintf(query, querybufsize, query_pattern, owner, owner, owner);
	TR0(("change owner to %s\n", owner));
	gpdb_exec_ddl(conn, query);
}
예제 #10
0
파일: gpsmon.c 프로젝트: phan-pivotal/gpdb
static void gx_exit(const char* reason)
{
	TR0(("exit %s\n", reason ? reason : "1"));
	exit(reason ? 1 : 0);
}
예제 #11
0
파일: gpsmon.c 프로젝트: phan-pivotal/gpdb
void gx_main(int port, apr_int64_t signature)
{
	/* set up our log files */
	if (opt.log_dir)
	{
		mkdir(opt.log_dir, S_IRWXU | S_IRWXG);

		if (0 != chdir(opt.log_dir))
		{
			/* Invalid dir for log file, try home dir */
			char *home_dir = NULL;
			if (0 == apr_env_get(&home_dir, "HOME", gx.pool))
			{
				if (home_dir)
					chdir(home_dir);
			}
		}
	}

	update_log_filename();
	freopen(log_filename, "w", stdout);
	setlinebuf(stdout);

	if (!get_and_allocate_hostname())
		gpsmon_fatalx(FLINE, 0, "failed to allocate memory for hostname");
	TR0(("HOSTNAME = '%s'\n", gx.hostname));



	// first chace to write to log file
	TR2(("signature = %" FMT64 "\n", signature));
	TR1(("detected %d cpu cores\n", number_cpu_cores));

	setup_gx(port, signature);
	setup_sigar();
	setup_udp();
	setup_tcp();

	gx.tick = 0;
	for (;;)
	{
		struct timeval tv;
		apr_hash_index_t* hi;

		/* serve events every 2 second */
		gx.tick++;
		gx.now = time(NULL);
		tv.tv_sec = 2;
		tv.tv_usec = 0;

		/* event dispatch blocks for a certain time based on the seconds given
		 * to event_loopexit */
		if (-1 == event_loopexit(&tv))
		{
			gpmon_warningx(FLINE, APR_FROM_OS_ERROR(errno),
					"event_loopexit failed");
		}

		if (-1 == event_dispatch())
		{
			gpsmon_fatalx(FLINE, APR_FROM_OS_ERROR(errno), "event_dispatch failed");
		}

		/* get pid metrics */
		for (hi = apr_hash_first(0, gx.qexectab); hi; hi = apr_hash_next(hi))
		{
            void* vptr;
            gpmon_qexec_t* rec;
            apr_hash_this(hi, 0, 0, &vptr);
            rec = vptr;
            get_pid_metrics(rec->key.hash_key.pid,
                    rec->key.tmid,
                    rec->key.ssid,
                    rec->key.ccnt);
		}

		/* check log size */
		if (gx.tick % 60 == 0)
		{
			apr_finfo_t finfo;
			if (0 == apr_stat(&finfo, log_filename, APR_FINFO_SIZE, gx.pool))
			{
				if (opt.max_log_size != 0 && finfo.size > opt.max_log_size)
				{
					update_log_filename();
					freopen(log_filename, "w", stdout);
					setlinebuf(stdout);
				}
			}
		}
	}
}
예제 #12
0
파일: gpmondb.c 프로젝트: karthijrk/gpdb
void gpdb_get_hostlist(int* hostcnt, host_t** host_table, apr_pool_t* global_pool, mmon_options_t* opt)
{
	apr_pool_t* pool;
	PGconn* conn = 0;
	PGresult* result = 0;
	int rowcount, i;
	unsigned int unique_hosts = 0;
	apr_hash_t* htab;
	struct hostinfo_holder_t* hostinfo_holder = NULL;
	host_t* hosts = NULL;
	int e;

	// 0 -- hostname, 1 -- address, 2 -- datadir, 3 -- is_master,
	const char *QUERY = "SELECT distinct hostname, address, case when content < 0 then 1 else 0 end as is_master, MAX(fselocation) as datadir FROM pg_filespace_entry "
			    "JOIN gp_segment_configuration on (dbid = fsedbid) WHERE fsefsoid = (select oid from pg_filespace where fsname='pg_system') "
		  	    "GROUP BY (hostname, address, is_master) order by hostname";

	if (0 != (e = apr_pool_create_alloc(&pool, NULL)))
	{
		gpmon_fatalx(FLINE, e, "apr_pool_create_alloc failed");
	}

	const char* errmsg = gpdb_exec(&conn, &result, QUERY);

	TR2((QUERY));
	TR2(("\n"));

	if (errmsg)
	{
		gpmon_warning(FLINE, "GPDB error %s\n\tquery: %s\n", errmsg, QUERY);
	}
	else
	{
		// hash of hostnames to addresses
		htab = apr_hash_make(pool);

		rowcount = PQntuples(result);

		for (i = 0; i < rowcount; i++)
		{
			char* curr_hostname = PQgetvalue(result, i, 0);

			hostinfo_holder = apr_hash_get(htab, curr_hostname, APR_HASH_KEY_STRING);

			if (!hostinfo_holder)
			{
				hostinfo_holder = apr_pcalloc(pool, sizeof(struct hostinfo_holder_t));
				CHECKMEM(hostinfo_holder);

				apr_hash_set(htab, curr_hostname, APR_HASH_KEY_STRING, hostinfo_holder);

				hostinfo_holder->hostname = curr_hostname;
				hostinfo_holder->is_master = atoi(PQgetvalue(result, i, 2));
				hostinfo_holder->datadir = PQgetvalue(result, i, 3);

				// use permenant memory for address list -- stored for duration

				// populate 1st on list and save to head and tail
				hostinfo_holder->addressinfo_head = hostinfo_holder->addressinfo_tail = calloc(1, sizeof(addressinfo_holder_t));
				CHECKMEM(hostinfo_holder->addressinfo_tail);

				// first is the hostname
				hostinfo_holder->addressinfo_tail->address = strdup(hostinfo_holder->hostname);
				CHECKMEM(hostinfo_holder->addressinfo_tail->address);


				// add a 2nd to the list
				hostinfo_holder->addressinfo_tail->next = calloc(1, sizeof(addressinfo_holder_t));
				CHECKMEM(hostinfo_holder->addressinfo_tail);
				hostinfo_holder->addressinfo_tail = hostinfo_holder->addressinfo_tail->next;

				// second is address
				hostinfo_holder->addressinfo_tail->address = strdup(PQgetvalue(result, i, 1));
				CHECKMEM(hostinfo_holder->addressinfo_tail->address);

				// one for hostname one for address
				hostinfo_holder->address_count = 2;
			}
			else
			{
				// permenant memory for address list -- stored for duration
				hostinfo_holder->addressinfo_tail->next = calloc(1, sizeof(addressinfo_holder_t));
				CHECKMEM(hostinfo_holder->addressinfo_tail);

				hostinfo_holder->addressinfo_tail = hostinfo_holder->addressinfo_tail->next;

				// permenant memory for address list -- stored for duration
				hostinfo_holder->addressinfo_tail->address = strdup(PQgetvalue(result, i, 1));
				CHECKMEM(hostinfo_holder->addressinfo_tail->address);

				hostinfo_holder->address_count++;
			}

		}

		// if we have any appliance specific hosts such as hadoop nodes add them to the hash table
		if (get_appliance_hosts_and_add_to_hosts(pool, htab))
		{
			TR0(("Not an appliance: checking for SW Only hadoop hosts.\n"));
			get_hadoop_hosts_and_add_to_hosts(pool, htab, opt); // Not an appliance, so check for SW only hadoop nodes.
		}

		unique_hosts = apr_hash_count(htab);

		// allocate memory for host list (not freed ever)
		hosts = calloc(unique_hosts, sizeof(host_t));

		apr_hash_index_t* hi;
		void* vptr;
		int hostcounter = 0;
		for (hi = apr_hash_first(0, htab); hi; hi = apr_hash_next(hi))
		{
			// sanity check
			if (hostcounter >= unique_hosts)
			{
				gpmon_fatalx(FLINE, 0, "host counter exceeds unique hosts");
			}

			apr_hash_this(hi, 0, 0, &vptr);
			hostinfo_holder = vptr;

			hosts[hostcounter].hostname = strdup(hostinfo_holder->hostname);
			hosts[hostcounter].data_dir = strdup(hostinfo_holder->datadir);
			if (hostinfo_holder->smon_dir)
			{
				hosts[hostcounter].smon_bin_location = strdup(hostinfo_holder->smon_dir);
			}
			hosts[hostcounter].is_master = hostinfo_holder->is_master;
			hosts[hostcounter].addressinfo_head = hostinfo_holder->addressinfo_head;
			hosts[hostcounter].addressinfo_tail = hostinfo_holder->addressinfo_tail;
			hosts[hostcounter].address_count = hostinfo_holder->address_count;
			hosts[hostcounter].connection_hostname.current = hosts[hostcounter].addressinfo_head;
			hosts[hostcounter].snmp_hostname.current = hosts[hostcounter].addressinfo_head;

			if (hostinfo_holder->is_hdm)
				hosts[hostcounter].is_hdm = 1;

			if (hostinfo_holder->is_hdw)
				hosts[hostcounter].is_hdw = 1;

			if (hostinfo_holder->is_etl)
				hosts[hostcounter].is_etl = 1;

			if (hostinfo_holder->is_hbw)
				hosts[hostcounter].is_hbw = 1;

			if (hostinfo_holder->is_hdc)
				hosts[hostcounter].is_hdc = 1;

			apr_thread_mutex_create(&hosts[hostcounter].mutex, APR_THREAD_MUTEX_UNNESTED, global_pool); // use the global pool so the mutexes last beyond this function

			hostcounter++;
		}

		*hostcnt = hostcounter;
	}

	apr_pool_destroy(pool);
	PQclear(result);
	PQfinish(conn);

	if (!hosts || *hostcnt < 1)
	{
		gpmon_fatalx(FLINE, 0, "no valid hosts found");
	}

	*host_table = hosts;
}
예제 #13
0
파일: gpmondb.c 프로젝트: karthijrk/gpdb
// to mitigate upgrade hassle.
void create_log_alert_table()
{
	PGconn *conn = PQconnectdb(GPDB_CONNECTION_STRING);
	if (PQstatus(conn) != CONNECTION_OK)
	{
		gpmon_warning(FLINE,
			"error creating gpdb client connection to dynamically "
			"check/create gpperfmon partitions: %s",
		PQerrorMessage(conn));
		PQfinish(conn);
		return;
	}

	const char *qry= "SELECT tablename FROM pg_tables "
					"WHERE tablename = 'log_alert_history' "
					"AND schemaname = 'public' ;";

	const bool has_history_table = gpdb_exec_search_for_at_least_one_row(qry, conn);

	char owner[MAX_OWNER_LENGTH] = {};
	bool success_get_owner = gpdb_get_gpperfmon_owner(conn, owner, sizeof(owner));

	// log_alert_history: create table if not exist or alter it to use correct
	// distribution key.
	if (!has_history_table)
	{
		qry = "BEGIN; CREATE TABLE public.log_alert_history (LIKE "
			"gp_toolkit.__gp_log_master_ext) DISTRIBUTED BY (logtime) "
			"PARTITION BY range (logtime)(START (date '2010-01-01') "
			"END (date '2010-02-01') EVERY (interval '1 month')); COMMIT;";

		TR0(("sounds like you have just upgraded your database, creating"
			 " newer tables\n"));

		gpdb_exec_ddl(conn, qry);
	}
	else
	{
		/*
		* Upgrade: alter distributed key of log_alert_history from logsegment to logtime
		*/
		upgrade_log_alert_table_distributed_key(conn);
	}

	// log_alert_now/log_alert_tail: change to use 'gpperfmoncat.sh' from 'iconv/cat' to handle
	// encoding issue.
	if (recreate_alert_tables_if_needed(conn, owner))
	{
		if (success_get_owner)
		{
			gpdb_change_alert_table_owner(conn, owner);
		}
	}
	else
	{
		TR0(("recreate alert_tables failed\n"));
	}

	PQfinish(conn);
	return;
}
예제 #14
0
파일: gpmondb.c 프로젝트: karthijrk/gpdb
static apr_status_t check_partition(const char* tbl, apr_pool_t* pool, PGconn* conn, mmon_options_t *opt)
{
	struct tm tm;
	time_t now;

	unsigned short year[3];
	unsigned char month[3];

	TR0(("check partitions on %s_history\n", tbl));

	if (!conn)
		return APR_ENOMEM;

	now = time(NULL);
	if (!localtime_r(&now, &tm))
	{
		gpmon_warning(FLINE, "error in check_partition getting current time\n");
		return APR_EGENERAL;
	}

	year[0] = 1900 + tm.tm_year;
	month[0] = tm.tm_mon+1;

	if (year[0] < 1 || month[0] < 1 || year[0] > 2030 || month[0] > 12)
	{
		gpmon_warning(FLINE, "invalid current month/year in check_partition %u/%u\n", month, year);
		return APR_EGENERAL;
	}

	if (month[0] < 11)
	{
		month[1] = month[0] + 1;
		month[2] = month[0] + 2;

		year[1] = year[0];
		year[2] = year[0];
	}
	else if (month[0] == 11)
	{
		month[1] = 12;
		month[2] = 1;

		year[1] = year[0];
		year[2] = year[0] + 1;
	}
	else
	{
		month[1] = 1;
		month[2] = 2;

		year[1] = year[0] + 1;
		year[2] = year[0] + 1;
	}

	check_and_add_partition(conn, tbl, year[0], month[0], year[1], month[1]);
	check_and_add_partition(conn, tbl, year[1], month[1], year[2], month[2]);

	drop_old_partitions(conn, tbl, opt);

	TR0(("check partitions on %s_history done\n", tbl));
	return APR_SUCCESS;
}