int task_execute(oph_operator_struct * handle)
{
	if (!handle || !handle->operator_handle) {
		pmesg(LOG_ERROR, __FILE__, __LINE__, "Null Handle\n");
		logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_CUBEIO_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_CUBEIO_NULL_OPERATOR_HANDLE);
		return OPH_ANALYTICS_OPERATOR_NULL_OPERATOR_HANDLE;
	}
	//Only master process has to continue
	if (handle->proc_rank != 0)
		return OPH_ANALYTICS_OPERATOR_SUCCESS;

	ophidiadb *oDB = &((OPH_CUBEIO_operator_handle *) handle->operator_handle)->oDB;
	int direction = ((OPH_CUBEIO_operator_handle *) handle->operator_handle)->direction;
	char *datacube_name = ((OPH_CUBEIO_operator_handle *) handle->operator_handle)->datacube_name;
	char **objkeys = ((OPH_CUBEIO_operator_handle *) handle->operator_handle)->objkeys;
	int objkeys_num = ((OPH_CUBEIO_operator_handle *) handle->operator_handle)->objkeys_num;

	char operation[OPH_ODB_CUBE_OPERATOR_SIZE];
	memset(operation, 0, OPH_ODB_CUBE_OPERATOR_SIZE);
	char parentcube_name[OPH_COMMON_BUFFER_LEN];
	memset(parentcube_name, 0, OPH_COMMON_BUFFER_LEN);
	int *type = (int *) malloc(sizeof(int));
	type[0] = 0;

	oph_odb_datacube cube;
	oph_odb_cube_init_datacube(&cube);

	//retrieve input datacube
	if (oph_odb_cube_retrieve_datacube(oDB, ((OPH_CUBEIO_operator_handle *) handle->operator_handle)->id_input_datacube, &cube)) {
		oph_odb_cube_free_datacube(&cube);
		pmesg(LOG_ERROR, __FILE__, __LINE__, "Error while retrieving input datacube\n");
		logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_CUBEIO_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_CUBEIO_DATACUBE_READ_ERROR);
		return OPH_ANALYTICS_OPERATOR_MYSQL_ERROR;
	}

	char source_buffer[OPH_PID_SIZE];
	char source_buffer_json[OPH_PID_SIZE];
	oph_odb_source src;
	if (cube.id_source) {
		//retrieve input datacube
		if (oph_odb_cube_retrieve_source(oDB, cube.id_source, &src)) {
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Error while retrieving source name\n");
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_CUBEIO_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_CUBEIO_SOURCE_READ_ERROR);
			oph_odb_cube_free_datacube(&cube);
			return OPH_ANALYTICS_OPERATOR_MYSQL_ERROR;
		} else {
			snprintf(source_buffer, OPH_PID_SIZE, "- SOURCE: %s", src.uri);
			snprintf(source_buffer_json, OPH_PID_SIZE, "%s", src.uri);
		}
	} else {
		source_buffer[0] = 0;
		source_buffer_json[0] = 0;
	}

	oph_odb_cube_free_datacube(&cube);

	char *tmp_uri = NULL;
	if (oph_pid_get_uri(&tmp_uri)) {
		pmesg(LOG_ERROR, __FILE__, __LINE__, "Unable to retrieve web server URI.\n");
		logging(LOG_WARNING, __FILE__, __LINE__, OPH_GENERIC_CONTAINER_ID, OPH_LOG_OPH_CUBEIO_PID_URI_ERROR);
		if (tmp_uri)
			free(tmp_uri);
		return OPH_ANALYTICS_OPERATOR_UTILITY_ERROR;
	}

	if (oph_json_is_objkey_printable(objkeys, objkeys_num, OPH_JSON_OBJKEY_CUBEIO)) {
		int num_fields = 4;
		char *keys[4] = { "INPUT CUBE", "OPERATION", "OUTPUT CUBE", "SOURCE" };
		char *fieldtypes[4] = { "string", "string", "string", "string" };
		if (oph_json_add_grid(handle->operator_json, OPH_JSON_OBJKEY_CUBEIO, "Cube Provenance", NULL, keys, num_fields, fieldtypes, num_fields)) {
			pmesg(LOG_ERROR, __FILE__, __LINE__, "ADD GRID error\n");
			logging(LOG_ERROR, __FILE__, __LINE__, OPH_GENERIC_CONTAINER_ID, OPH_LOG_OPH_CUBEIO_MEMORY_ERROR, "output grid");
			free(type);
			if (tmp_uri)
				free(tmp_uri);
			return OPH_ANALYTICS_OPERATOR_MEMORY_ERR;
		}
	}
	if (oph_json_is_objkey_printable(objkeys, objkeys_num, OPH_JSON_OBJKEY_CUBEIO_GRAPH)) {
		int num_fields = 2;
		char *keys[2] = { "PID", "SOURCE" };
		if (oph_json_add_graph(handle->operator_json, OPH_JSON_OBJKEY_CUBEIO_GRAPH, 1, "Cube Provenance Graph", NULL, keys, num_fields)) {
			pmesg(LOG_ERROR, __FILE__, __LINE__, "ADD GRAPH error\n");
			logging(LOG_ERROR, __FILE__, __LINE__, OPH_GENERIC_CONTAINER_ID, OPH_LOG_OPH_CUBEIO_MEMORY_ERROR, "output graph");
			free(type);
			if (tmp_uri)
				free(tmp_uri);
			return OPH_ANALYTICS_OPERATOR_MEMORY_ERR;
		}
	}

	if (direction != OPH_CUBEIO_BRANCH_OUT) {
		//Retrieve upper hierarchy
		if (oph_json_is_objkey_printable(objkeys, objkeys_num, OPH_JSON_OBJKEY_CUBEIO_GRAPH)) {
			unsigned int nn, nnn;
			int found = 0;
			for (nn = 0; nn < handle->operator_json->response_num; nn++) {
				if (!strcmp(handle->operator_json->response[nn].objkey, OPH_JSON_OBJKEY_CUBEIO_GRAPH)) {
					for (nnn = 0; nnn < ((oph_json_obj_graph *) (handle->operator_json->response[nn].objcontent))[0].nodevalues_num1; nnn++) {
						if (!strcmp(datacube_name, ((oph_json_obj_graph *) (handle->operator_json->response[nn].objcontent))[0].nodevalues[nnn][0])) {
							found = 1;
							break;
						}
					}
					break;
				}
			}
			char *my_row[2] = { datacube_name, source_buffer_json };
			if (!found) {
				if (oph_json_add_graph_node(handle->operator_json, OPH_JSON_OBJKEY_CUBEIO_GRAPH, my_row)) {
					pmesg(LOG_ERROR, __FILE__, __LINE__, "ADD GRAPH NODE error\n");
					logging(LOG_ERROR, __FILE__, __LINE__, OPH_GENERIC_CONTAINER_ID, OPH_LOG_OPH_CUBEIO_MEMORY_ERROR, "output graph node");
					free(type);
					if (tmp_uri)
						free(tmp_uri);
					return OPH_ANALYTICS_OPERATOR_MEMORY_ERR;
				}
			}
		}
		if (_oph_cubeio_print_parents
		    (tmp_uri, oDB, 0, ((OPH_CUBEIO_operator_handle *) handle->operator_handle)->id_input_datacube, &type, ((OPH_CUBEIO_operator_handle *) handle->operator_handle)->id_input_container,
		     &operation, handle->operator_json, objkeys, objkeys_num, datacube_name, &parentcube_name)) {
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Error launching parents search function\n");
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_CUBEIO_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_CUBEIO_HIERARCHY_FUNCT_ERROR, "parent",
				datacube_name);
			free(type);
			if (tmp_uri)
				free(tmp_uri);
			return OPH_ANALYTICS_OPERATOR_MEMORY_ERR;
		}
		printf("--> %s (%s) %s\n", datacube_name, operation, source_buffer);

		if (oph_json_is_objkey_printable(objkeys, objkeys_num, OPH_JSON_OBJKEY_CUBEIO)) {
			char *my_row[4] = { parentcube_name, operation, datacube_name, source_buffer_json };
			if (oph_json_add_grid_row(handle->operator_json, OPH_JSON_OBJKEY_CUBEIO, my_row)) {
				pmesg(LOG_ERROR, __FILE__, __LINE__, "ADD GRID ROW error\n");
				logging(LOG_ERROR, __FILE__, __LINE__, OPH_GENERIC_CONTAINER_ID, OPH_LOG_OPH_CUBEIO_MEMORY_ERROR, "output grid row");
				free(type);
				if (tmp_uri)
					free(tmp_uri);
				return OPH_ANALYTICS_OPERATOR_MEMORY_ERR;
			}
		}

	}
	if (direction != OPH_CUBEIO_BRANCH_IN) {
		//Retrive lower hierarchy
		if (direction != 0) {
			printf("--> %s (%s) %s\n", datacube_name, "ROOT", source_buffer);
			if (oph_json_is_objkey_printable(objkeys, objkeys_num, OPH_JSON_OBJKEY_CUBEIO)) {
				char *my_row[4] = { "", "ROOT", datacube_name, source_buffer_json };
				if (oph_json_add_grid_row(handle->operator_json, OPH_JSON_OBJKEY_CUBEIO, my_row)) {
					pmesg(LOG_ERROR, __FILE__, __LINE__, "ADD GRID ROW error\n");
					logging(LOG_ERROR, __FILE__, __LINE__, OPH_GENERIC_CONTAINER_ID, OPH_LOG_OPH_CUBEIO_MEMORY_ERROR, "output grid row");
					free(type);
					if (tmp_uri)
						free(tmp_uri);
					return OPH_ANALYTICS_OPERATOR_MEMORY_ERR;
				}
			}
			if (oph_json_is_objkey_printable(objkeys, objkeys_num, OPH_JSON_OBJKEY_CUBEIO_GRAPH)) {
				unsigned int nn, nnn;
				int found = 0;
				for (nn = 0; nn < handle->operator_json->response_num; nn++) {
					if (!strcmp(handle->operator_json->response[nn].objkey, OPH_JSON_OBJKEY_CUBEIO_GRAPH)) {
						for (nnn = 0; nnn < ((oph_json_obj_graph *) (handle->operator_json->response[nn].objcontent))[0].nodevalues_num1; nnn++) {
							if (!strcmp(datacube_name, ((oph_json_obj_graph *) (handle->operator_json->response[nn].objcontent))[0].nodevalues[nnn][0])) {
								found = 1;
								break;
							}
						}
						break;
					}
				}
				char *my_row[2] = { datacube_name, source_buffer_json };
				if (!found) {
					if (oph_json_add_graph_node(handle->operator_json, OPH_JSON_OBJKEY_CUBEIO_GRAPH, my_row)) {
						pmesg(LOG_ERROR, __FILE__, __LINE__, "ADD GRAPH NODE error\n");
						logging(LOG_ERROR, __FILE__, __LINE__, OPH_GENERIC_CONTAINER_ID, OPH_LOG_OPH_CUBEIO_MEMORY_ERROR, "output graph node");
						free(type);
						if (tmp_uri)
							free(tmp_uri);
						return OPH_ANALYTICS_OPERATOR_MEMORY_ERR;
					}
				}
			}
		}
		if (_oph_cubeio_print_children
		    (tmp_uri, oDB, 0, ((OPH_CUBEIO_operator_handle *) handle->operator_handle)->id_input_datacube, &type, ((OPH_CUBEIO_operator_handle *) handle->operator_handle)->id_input_container,
		     handle->operator_json, objkeys, objkeys_num)) {
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Error launching children search function\n");
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_CUBEIO_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_CUBEIO_HIERARCHY_FUNCT_ERROR, "children",
				datacube_name);
			free(type);
			if (tmp_uri)
				free(tmp_uri);
			return OPH_ANALYTICS_OPERATOR_MEMORY_ERR;
		}
	}
	if (tmp_uri)
		free(tmp_uri);
	free(type);

	return OPH_ANALYTICS_OPERATOR_SUCCESS;
}
int task_init(oph_operator_struct * handle)
{
	if (!handle || !handle->operator_handle) {
		pmesg(LOG_ERROR, __FILE__, __LINE__, "Null Handle\n");
		logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_SPLIT_NULL_OPERATOR_HANDLE);
		return OPH_ANALYTICS_OPERATOR_NULL_OPERATOR_HANDLE;
	}
	//For error checking
	char id_string[4][OPH_ODB_CUBE_FRAG_REL_INDEX_SET_SIZE];
	memset(id_string, 0, sizeof(id_string));
	id_string[0][0] = 0;

	if (handle->proc_rank == 0) {
		ophidiadb *oDB = &((OPH_SPLIT_operator_handle *) handle->operator_handle)->oDB;
		oph_odb_datacube cube;
		oph_odb_cube_init_datacube(&cube);

		int datacube_id = ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_datacube;

		//retrieve input datacube
		if (oph_odb_cube_retrieve_datacube(oDB, datacube_id, &cube)) {
			oph_odb_cube_free_datacube(&cube);
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Error while retrieving input datacube\n");
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_SPLIT_DATACUBE_READ_ERROR);
			goto __OPH_EXIT_1;
		}
		//Change the datacube name
		cube.id_container = ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_output_container;
		cube.fragmentxdb = cube.fragmentxdb * ((OPH_SPLIT_operator_handle *) handle->operator_handle)->split_number;

		//For new fragments, tuplexfragment is floor of old tuplexfragment/split_number; only 'remainder' fragments have one more tuple
		((OPH_SPLIT_operator_handle *) handle->operator_handle)->new_tuplexfragment = cube.tuplexfragment / ((OPH_SPLIT_operator_handle *) handle->operator_handle)->split_number;
		((OPH_SPLIT_operator_handle *) handle->operator_handle)->new_remainder = cube.tuplexfragment % ((OPH_SPLIT_operator_handle *) handle->operator_handle)->split_number;

		//Check if split number is lower than max value of tuplexfragment number
		if (cube.tuplexfragment < ((OPH_SPLIT_operator_handle *) handle->operator_handle)->split_number) {
			oph_odb_cube_free_datacube(&cube);
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Unable to split datacube in more than %d parts.\n", cube.tuplexfragment);
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_SPLIT_SPLITTING_NUMBER_ERROR,
				cube.tuplexfragment);
			goto __OPH_EXIT_1;
		}
		//In the  OphidiaDB I stored the ceil (nearest integer >= number) value
		cube.tuplexfragment = (int) ceilf((float) cube.tuplexfragment / (float) ((OPH_SPLIT_operator_handle *) handle->operator_handle)->split_number);

		//Compute new fragment relative index set
		int id_number;
		char *tmp = cube.frag_relative_index_set;
		//if(oph_ids_count_number_of_ids((tmp, &id_number))){
		if (oph_ids_count_number_of_ids(cube.frag_relative_index_set, &id_number)) {
			oph_odb_cube_free_datacube(&cube);
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Unable to get total number of IDs\n");
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_SPLIT_RETREIVE_IDS_ERROR);
			goto __OPH_EXIT_1;
		}
		//Copy fragment id relative index set 
		if (!(((OPH_SPLIT_operator_handle *) handle->operator_handle)->fragment_ids = (char *) strndup(cube.frag_relative_index_set, OPH_ODB_CUBE_FRAG_REL_INDEX_SET_SIZE))) {
			oph_odb_cube_free_datacube(&cube);
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Error allocating memory\n");
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_SPLIT_MEMORY_ERROR_INPUT, "fragment ids");
			goto __OPH_EXIT_1;
		}

		if (oph_ids_create_new_id_string(&tmp, OPH_ODB_CUBE_FRAG_REL_INDEX_SET_SIZE, 1, id_number * ((OPH_SPLIT_operator_handle *) handle->operator_handle)->split_number)) {
			oph_odb_cube_free_datacube(&cube);
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Unable to extract fragment relative index set\n");
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_SPLIT_CREATE_ID_STRING_ERROR);
			goto __OPH_EXIT_1;
		}
		//New fields
		cube.id_source = 0;
		cube.level++;
		if (((OPH_SPLIT_operator_handle *) handle->operator_handle)->description)
			snprintf(cube.description, OPH_ODB_CUBE_DESCRIPTION_SIZE, "%s", ((OPH_SPLIT_operator_handle *) handle->operator_handle)->description);
		else
			*cube.description = 0;

		//Insert new datacube
		if (oph_odb_cube_insert_into_datacube_partitioned_tables(oDB, &cube, &(((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_output_datacube))) {
			oph_odb_cube_free_datacube(&cube);
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Unable to update datacube table\n");
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_SPLIT_DATACUBE_INSERT_ERROR);
			goto __OPH_EXIT_1;
		}
		oph_odb_cube_free_datacube(&cube);

		oph_odb_cubehasdim *cubedims = NULL;
		int number_of_dimensions = 0;
		int last_insertd_id = 0;
		int l;

		//Read old cube - dimension relation rows
		if (oph_odb_cube_retrieve_cubehasdim_list(oDB, datacube_id, &cubedims, &number_of_dimensions)) {
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Unable to retreive datacube - dimension relations.\n");
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_SPLIT_CUBEHASDIM_READ_ERROR);
			free(cubedims);
			goto __OPH_EXIT_1;
		}
		// Copy the dimension in case of output has to be saved in a new container
		if (((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container != ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_output_container) {
			oph_odb_dimension dim[number_of_dimensions];
			oph_odb_dimension_instance dim_inst[number_of_dimensions];
			for (l = 0; l < number_of_dimensions; ++l) {
				if (oph_odb_dim_retrieve_dimension_instance(oDB, cubedims[l].id_dimensioninst, &(dim_inst[l]), datacube_id)) {
					pmesg(LOG_ERROR, __FILE__, __LINE__, "Error in reading dimension information.\n");
					logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_GENERIC_DIM_READ_ERROR);
					free(cubedims);
					goto __OPH_EXIT_1;
				}
				if (oph_odb_dim_retrieve_dimension(oDB, dim_inst[l].id_dimension, &(dim[l]), datacube_id)) {
					pmesg(LOG_ERROR, __FILE__, __LINE__, "Error in reading dimension information.\n");
					logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_GENERIC_DIM_READ_ERROR);
					free(cubedims);
					goto __OPH_EXIT_1;
				}
			}

			oph_odb_db_instance db_;
			oph_odb_db_instance *db = &db_;
			if (oph_dim_load_dim_dbinstance(db)) {
				pmesg(LOG_ERROR, __FILE__, __LINE__, "Error while loading dimension db paramters\n");
				logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_GENERIC_DIM_LOAD_ERROR);
				oph_dim_unload_dim_dbinstance(db);
				free(cubedims);
				goto __OPH_EXIT_1;
			}
			if (oph_dim_connect_to_dbms(db->dbms_instance, 0)) {
				pmesg(LOG_ERROR, __FILE__, __LINE__, "Error while connecting to dimension dbms\n");
				logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_GENERIC_DIM_CONNECT_ERROR);
				oph_dim_disconnect_from_dbms(db->dbms_instance);
				oph_dim_unload_dim_dbinstance(db);
				free(cubedims);
				goto __OPH_EXIT_1;
			}
			if (oph_dim_use_db_of_dbms(db->dbms_instance, db)) {
				pmesg(LOG_ERROR, __FILE__, __LINE__, "Error while opening dimension db\n");
				logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_GENERIC_DIM_USE_DB_ERROR);
				oph_dim_disconnect_from_dbms(db->dbms_instance);
				oph_dim_unload_dim_dbinstance(db);
				free(cubedims);
				goto __OPH_EXIT_1;
			}

			char index_dimension_table_name[OPH_COMMON_BUFFER_LEN], label_dimension_table_name[OPH_COMMON_BUFFER_LEN], operation[OPH_COMMON_BUFFER_LEN];
			snprintf(index_dimension_table_name, OPH_COMMON_BUFFER_LEN, OPH_DIM_TABLE_NAME_MACRO, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container);
			snprintf(label_dimension_table_name, OPH_COMMON_BUFFER_LEN, OPH_DIM_TABLE_LABEL_MACRO, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container);
			char o_index_dimension_table_name[OPH_COMMON_BUFFER_LEN], o_label_dimension_table_name[OPH_COMMON_BUFFER_LEN];
			snprintf(o_index_dimension_table_name, OPH_COMMON_BUFFER_LEN, OPH_DIM_TABLE_NAME_MACRO, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_output_container);
			snprintf(o_label_dimension_table_name, OPH_COMMON_BUFFER_LEN, OPH_DIM_TABLE_LABEL_MACRO, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_output_container);

			char *dim_row, *cl_value;
			int compressed = 0;
			for (l = 0; l < number_of_dimensions; ++l) {
				if (!dim_inst[l].fk_id_dimension_index) {
					pmesg(LOG_WARNING, __FILE__, __LINE__, "Dimension FK not set in cubehasdim.\n");
					break;
				}

				if (dim_inst[l].size) {
					// Load input labels
					dim_row = NULL;
					if (oph_dim_read_dimension_data(db, index_dimension_table_name, dim_inst[l].fk_id_dimension_index, MYSQL_DIMENSION, compressed, &dim_row) || !dim_row) {
						pmesg(LOG_ERROR, __FILE__, __LINE__, "Error in reading a row from dimension table.\n");
						logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_GENERIC_DIM_READ_ERROR);
						if (dim_row)
							free(dim_row);
						oph_dim_disconnect_from_dbms(db->dbms_instance);
						oph_dim_unload_dim_dbinstance(db);
						free(cubedims);
						goto __OPH_EXIT_1;
					}
					if (dim_inst[l].fk_id_dimension_label) {
						if (oph_dim_read_dimension_filtered_data
						    (db, label_dimension_table_name, dim_inst[l].fk_id_dimension_label, MYSQL_DIMENSION, compressed, &dim_row, dim[l].dimension_type, dim_inst[l].size)
						    || !dim_row) {
							pmesg(LOG_ERROR, __FILE__, __LINE__, "Error in reading a row from dimension table.\n");
							logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container,
								OPH_LOG_GENERIC_DIM_READ_ERROR);
							if (dim_row)
								free(dim_row);
							oph_dim_disconnect_from_dbms(db->dbms_instance);
							oph_dim_unload_dim_dbinstance(db);
							free(cubedims);
							goto __OPH_EXIT_1;
						}
					} else {
						strncpy(dim[l].dimension_type, OPH_DIM_INDEX_DATA_TYPE, OPH_ODB_DIM_DIMENSION_TYPE_SIZE);	// A reduced dimension is handled by indexes
						dim[l].dimension_type[OPH_ODB_DIM_DIMENSION_TYPE_SIZE] = 0;
					}
					// Store output labels
					if (oph_dim_insert_into_dimension_table
					    (db, o_label_dimension_table_name, dim[l].dimension_type, dim_inst[l].size, dim_row, &(dim_inst[l].fk_id_dimension_label))) {
						pmesg(LOG_ERROR, __FILE__, __LINE__, "Error in inserting a new row in dimension table.\n");
						logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_GENERIC_DIM_ROW_ERROR);
						if (dim_row)
							free(dim_row);
						oph_dim_disconnect_from_dbms(db->dbms_instance);
						oph_dim_unload_dim_dbinstance(db);
						free(cubedims);
						goto __OPH_EXIT_1;
					}
					if (dim_row)
						free(dim_row);

					// Set indexes
					snprintf(operation, OPH_COMMON_BUFFER_LEN, MYSQL_DIM_INDEX_ARRAY, OPH_DIM_INDEX_DATA_TYPE, 1, dim_inst[l].size);
					dim_row = NULL;
					if (oph_dim_read_dimension_data(db, index_dimension_table_name, dim_inst[l].fk_id_dimension_index, operation, compressed, &dim_row) || !dim_row) {
						pmesg(LOG_ERROR, __FILE__, __LINE__, "Error in reading a row from dimension table.\n");
						logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_GENERIC_DIM_READ_ERROR);
						if (dim_row)
							free(dim_row);
						oph_dim_disconnect_from_dbms(db->dbms_instance);
						oph_dim_unload_dim_dbinstance(db);
						free(cubedims);
						goto __OPH_EXIT_1;
					}
					// Store output indexes
					if (oph_dim_insert_into_dimension_table
					    (db, o_index_dimension_table_name, OPH_DIM_INDEX_DATA_TYPE, dim_inst[l].size, dim_row, &(dim_inst[l].fk_id_dimension_index))) {
						pmesg(LOG_ERROR, __FILE__, __LINE__, "Error in inserting a new row in dimension table.\n");
						logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_GENERIC_DIM_ROW_ERROR);
						if (dim_row)
							free(dim_row);
						oph_dim_disconnect_from_dbms(db->dbms_instance);
						oph_dim_unload_dim_dbinstance(db);
						free(cubedims);
						goto __OPH_EXIT_1;
					}
					if (dim_row)
						free(dim_row);
				} else
					dim_inst[l].fk_id_dimension_index = dim_inst[l].fk_id_dimension_label = 0;

				dim_inst[l].id_grid = 0;
				cl_value = NULL;
				if (oph_odb_dim_insert_into_dimensioninstance_table
				    (oDB, &(dim_inst[l]), &(cubedims[l].id_dimensioninst), ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_output_datacube, dim[l].dimension_name,
				     cl_value)) {
					pmesg(LOG_ERROR, __FILE__, __LINE__, "Error in inserting a new dimension instance\n");
					logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_GENERIC_DIM_INSTANCE_STORE_ERROR);
					if (cl_value)
						free(cl_value);
					oph_dim_disconnect_from_dbms(db->dbms_instance);
					oph_dim_unload_dim_dbinstance(db);
					free(cubedims);
					goto __OPH_EXIT_1;
				}
				if (cl_value)
					free(cl_value);
			}
			oph_dim_disconnect_from_dbms(db->dbms_instance);
			oph_dim_unload_dim_dbinstance(db);
		}
		//Write new cube - dimension relation rows
		for (l = 0; l < number_of_dimensions; l++) {
			//Change iddatacube in cubehasdim
			cubedims[l].id_datacube = ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_output_datacube;

			if (oph_odb_cube_insert_into_cubehasdim_table(oDB, &(cubedims[l]), &last_insertd_id)) {
				pmesg(LOG_ERROR, __FILE__, __LINE__, "Unable to insert new datacube - dimension relations.\n");
				logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_SPLIT_CUBEHASDIM_INSERT_ERROR);
				free(cubedims);
				goto __OPH_EXIT_1;
			}
		}
		free(cubedims);

		if (oph_odb_meta_copy_from_cube_to_cube
		    (oDB, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_datacube, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_output_datacube,
		     ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_user)) {
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Unable to copy metadata.\n");
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_GENERIC_METADATA_COPY_ERROR);
			goto __OPH_EXIT_1;
		}

		last_insertd_id = 0;
		oph_odb_task new_task;
		new_task.id_outputcube = ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_output_datacube;
		new_task.id_job = ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_job;
		strncpy(new_task.operator, handle->operator_type, OPH_ODB_CUBE_OPERATOR_SIZE);
		new_task.operator[OPH_ODB_CUBE_OPERATOR_SIZE] = 0;
		memset(new_task.query, 0, OPH_ODB_CUBE_OPERATION_QUERY_SIZE * sizeof(char));
		if (!(new_task.id_inputcube = (int *) malloc(1 * sizeof(int)))) {
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Error allocating memory\n");
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_SPLIT_MEMORY_ERROR_STRUCT, "task");
			goto __OPH_EXIT_1;
		}
		new_task.id_inputcube[0] = ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_datacube;
		new_task.input_cube_number = 1;

		if (oph_odb_cube_insert_into_task_table(oDB, &new_task, &last_insertd_id)) {
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Unable to insert new task.\n");
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_SPLIT_TASK_INSERT_ERROR, new_task.operator);
			free(new_task.id_inputcube);
			goto __OPH_EXIT_1;
		}
		free(new_task.id_inputcube);

		strncpy(id_string[0], ((OPH_SPLIT_operator_handle *) handle->operator_handle)->fragment_ids, OPH_ODB_CUBE_FRAG_REL_INDEX_SET_SIZE);
		snprintf(id_string[1], OPH_ODB_CUBE_FRAG_REL_INDEX_SET_SIZE, "%d", ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_output_datacube);
		snprintf(id_string[2], OPH_ODB_CUBE_FRAG_REL_INDEX_SET_SIZE, "%d", ((OPH_SPLIT_operator_handle *) handle->operator_handle)->new_tuplexfragment);
		snprintf(id_string[3], OPH_ODB_CUBE_FRAG_REL_INDEX_SET_SIZE, "%d", ((OPH_SPLIT_operator_handle *) handle->operator_handle)->new_remainder);
	}
      __OPH_EXIT_1:
	//Broadcast to all other processes the fragment relative index        
	MPI_Bcast(id_string, 4 * OPH_ODB_CUBE_FRAG_REL_INDEX_SET_SIZE, MPI_CHAR, 0, MPI_COMM_WORLD);

	//Check if sequential part has been completed
	if (id_string[0][0] == 0) {
		pmesg(LOG_ERROR, __FILE__, __LINE__, "Master procedure or broadcasting has failed\n");
		logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_SPLIT_MASTER_TASK_INIT_FAILED);
		((OPH_SPLIT_operator_handle *) handle->operator_handle)->execute_error = 1;
		return OPH_ANALYTICS_OPERATOR_UTILITY_ERROR;
	}

	if (handle->proc_rank != 0) {
		if (!(((OPH_SPLIT_operator_handle *) handle->operator_handle)->fragment_ids = (char *) strndup(id_string[0], OPH_ODB_CUBE_FRAG_REL_INDEX_SET_SIZE))) {
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Error allocating memory\n");
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_input_container, OPH_LOG_OPH_SPLIT_MEMORY_ERROR_INPUT, "fragment ids");
			((OPH_SPLIT_operator_handle *) handle->operator_handle)->execute_error = 1;
			return OPH_ANALYTICS_OPERATOR_MEMORY_ERR;
		}
		((OPH_SPLIT_operator_handle *) handle->operator_handle)->id_output_datacube = (int) strtol(id_string[1], NULL, 10);
		((OPH_SPLIT_operator_handle *) handle->operator_handle)->new_tuplexfragment = (int) strtol(id_string[2], NULL, 10);
		((OPH_SPLIT_operator_handle *) handle->operator_handle)->new_remainder = (int) strtol(id_string[3], NULL, 10);
	}
	return OPH_ANALYTICS_OPERATOR_SUCCESS;
}
int task_init (oph_operator_struct *handle)
{
  if (!handle || !handle->operator_handle){
  	pmesg(LOG_ERROR, __FILE__, __LINE__, "Null Handle\n");
	logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_DELETE_operator_handle*)handle->operator_handle)->id_input_container, OPH_LOG_OPH_DELETE_NULL_OPERATOR_HANDLE );	
	  return OPH_ANALYTICS_OPERATOR_NULL_OPERATOR_HANDLE;
  }

  //For error checking
  char id_string[OPH_ODB_CUBE_FRAG_REL_INDEX_SET_SIZE];
  memset(id_string, 0, sizeof(id_string));
  id_string[0] = 0;

  if(handle->proc_rank == 0){
	  ophidiadb *oDB = &((OPH_DELETE_operator_handle*)handle->operator_handle)->oDB;
	  oph_odb_datacube cube;
	  oph_odb_cube_init_datacube(&cube);

	  int datacube_id = ((OPH_DELETE_operator_handle*)handle->operator_handle)->id_input_datacube;

	  //retrieve input datacube
	  if(oph_odb_cube_retrieve_datacube(oDB, datacube_id, &cube)){
		oph_odb_cube_free_datacube(&cube);
		pmesg(LOG_ERROR, __FILE__, __LINE__, "Error while retrieving input datacube\n");
		logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_DELETE_operator_handle*)handle->operator_handle)->id_input_container, OPH_LOG_OPH_DELETE_DATACUBE_READ_ERROR );		
	  }
	  else{
		  //Copy fragment id relative index set	
		  if(!(((OPH_DELETE_operator_handle*)handle->operator_handle)->fragment_ids = (char *) strndup (cube.frag_relative_index_set, OPH_ODB_CUBE_FRAG_REL_INDEX_SET_SIZE)))
		  {
			oph_odb_cube_free_datacube(&cube);
			pmesg(LOG_ERROR, __FILE__, __LINE__, "Error allocating memory\n");
			logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_DELETE_operator_handle*)handle->operator_handle)->id_input_container, OPH_LOG_OPH_DELETE_MEMORY_ERROR_INPUT, "fragment ids" );		
		  } 
		  else{
			  oph_odb_cube_free_datacube(&cube);
			  strncpy(id_string, ((OPH_DELETE_operator_handle*)handle->operator_handle)->fragment_ids, OPH_ODB_CUBE_FRAG_REL_INDEX_SET_SIZE);
		  }
	  }
  }

  //Broadcast to all other processes the fragment relative index 	
  MPI_Bcast(id_string,OPH_ODB_CUBE_FRAG_REL_INDEX_SET_SIZE,MPI_CHAR,0,MPI_COMM_WORLD);

  //Check if sequential part has been completed
  if (id_string[0] == 0){
		pmesg(LOG_ERROR, __FILE__, __LINE__, "Master procedure or broadcasting has failed\n");		
	    logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_DELETE_operator_handle*)handle->operator_handle)->id_input_container, OPH_LOG_OPH_DELETE_MASTER_TASK_INIT_FAILED);
		return OPH_ANALYTICS_OPERATOR_UTILITY_ERROR;
  }

  if(handle->proc_rank != 0){
	  if(!(((OPH_DELETE_operator_handle*)handle->operator_handle)->fragment_ids = (char *) strndup (id_string, OPH_ODB_CUBE_FRAG_REL_INDEX_SET_SIZE))){
		pmesg(LOG_ERROR, __FILE__, __LINE__, "Error allocating memory\n");
		logging(LOG_ERROR, __FILE__, __LINE__, ((OPH_DELETE_operator_handle*)handle->operator_handle)->id_input_container, OPH_LOG_OPH_DELETE_MEMORY_ERROR_INPUT, "fragment ids" );		
		return OPH_ANALYTICS_OPERATOR_MEMORY_ERR;
	  }
  }

  return OPH_ANALYTICS_OPERATOR_SUCCESS;
}