コード例 #1
0
ファイル: TileLoader.cpp プロジェクト: PayalPradhan/marble
GeoDataDocument *TileLoader::loadTileVectorData( GeoSceneVectorTileDataset const *textureLayer, TileId const & tileId, DownloadUsage const usage )
{
    // FIXME: textureLayer->fileFormat() could be used in the future for use just that parser, instead of all available parsers

    QString const fileName = tileFileName( textureLayer, tileId );

    TileStatus status = tileStatus( textureLayer, tileId );
    if ( status != Missing ) {
        // check if an update should be triggered

        if ( status == Available ) {
            mDebug() << Q_FUNC_INFO << tileId << "StateUptodate";
        } else {
            Q_ASSERT( status == Expired );
            mDebug() << Q_FUNC_INFO << tileId << "StateExpired";
            triggerDownload( textureLayer, tileId, usage );
        }

        QFile file ( fileName );
        if ( file.exists() ) {

            // File is ready, so parse and return the vector data in any case
            GeoDataDocument* document = openVectorFile(fileName);
            if (document) {
                return document;
            }
        }
    }

    // tile was not locally available => trigger download
    triggerDownload( textureLayer, tileId, usage );
    return nullptr;
}
コード例 #2
0
ファイル: TileLoader.cpp プロジェクト: PayalPradhan/marble
void TileLoader::updateTile(const QString &fileName, const QString &idStr)
{
    QStringList const components = idStr.split( ':', QString::SkipEmptyParts );
    Q_ASSERT( components.size() == 5 );

    QString const origin = components[0];
    QString const sourceDir = components[ 1 ];
    int const zoomLevel = components[ 2 ].toInt();
    int const tileX = components[ 3 ].toInt();
    int const tileY = components[ 4 ].toInt();

    TileId const id = TileId( sourceDir, zoomLevel, tileX, tileY );
    if (origin == GeoSceneTypes::GeoSceneVectorTileType) {
        GeoDataDocument* document = openVectorFile(MarbleDirs::path(fileName));
        if (document) {
            emit tileCompleted(id,  document);
        }
    }
}
コード例 #3
0
ファイル: mpi_psrs.c プロジェクト: c0mpsc1/MPIProgLib
int main(int argc, char** argv) {
	pvector_t v, tmp = NULL, samples = NULL;
	index_t i, length, step;
	unit_t min, max;
	MPI_Status status;
	MPI_Datatype sampleDatatype;

	if (initMPI(&argc, &argv) != MPI_SUCCESS)
		return AbortAndExit(ERRORCODE_MPI_ERROR, "Cannot initialize MPI.");

	if (argc < 3) {
		fprintf(stderr, "MPI Parallel Sorting by Regular Sampling implementation.\nUsage:\n\t%s <data set (to read)> <result  file (to write)>\n", argv[0]);
		MPI_Finalize(); return 1;
	}

	if (ID == ROOT_ID) {
		tmp = openVectorFile(ARGV_FILE_NAME);
		printf("Data set size: %d, process number: %d\n", tmp->length, PROCESS_NUMBER);
		if ((tmp->length/PROCESS_NUMBER) <= PROCESS_NUMBER)
			AbortAndExit(ERRORCODE_SIZE_DONT_MATCH, "Processor number is too big or size of data set is too small for correct calculation.\n");
		ELEMENTS_NUMBER = tmp->length;
	}

	if (MPI_Bcast(tableOfConstants, TABLE_OF_CONSTANTS_SIZE, MPI_INT, ROOT_ID, MPI_COMM_WORLD) != MPI_SUCCESS)
		return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Bcast error.");

	ELEMENTS_PER_PROCESS = listLength(ID);
	initVector(&v, ELEMENTS_PER_PROCESS);

	if (ID == ROOT_ID) { /* Bcast data set */
		copyVector(tmp, v, v->length);
		for(i = 1, step = ELEMENTS_PER_PROCESS; i < PROCESS_NUMBER; i++) {
			if (MPI_Send(&(tmp->vector[step]), listLength(i), MPI_UNIT, i, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
				return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error.");
			step += listLength(i);
		}
	} else if (MPI_Recv(v->vector, ELEMENTS_PER_PROCESS, MPI_UNIT, ROOT_ID, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
		return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error.");

	quicksortVector(v);

	if (initVector(&samples, PROCESS_NUMBER -1) == NULL)
		return AbortAndExit(ERRORCODE_CANT_MALLOC, "Cannot allocate memory for samples vector.");

	MPI_Type_vector(PROCESS_NUMBER, 1, ELEMENTS_NUMBER / SQR_PROCESS_NUMBER, MPI_UNIT, &sampleDatatype);
	MPI_Type_commit(&sampleDatatype);

	if (ID != ROOT_ID) { /* Sending samples to root proces */
 		if (MPI_Send(v->vector, 1, sampleDatatype, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
			return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error.");
		if (initVector(&tmp, listLength(PROCESS_NUMBER -1)) == NULL)
			return AbortAndExit(ERRORCODE_CANT_MALLOC, "Cannot allocate memory for temporary vector.");
	} else { /* Reciving samples */
		copySampleToVector(v, tmp, (v->length)/PROCESS_NUMBER, PROCESS_NUMBER);
		for(step = PROCESS_NUMBER, i = 1; i < PROCESS_NUMBER; i++, step += PROCESS_NUMBER)
			if (MPI_Recv(&(tmp->vector[step]), PROCESS_NUMBER, MPI_UNIT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
				return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error.");
		quicksort(tmp->vector, 0, SQR_PROCESS_NUMBER);
		copySampleToVector(tmp, samples, SQR_PROCESS_NUMBER / (PROCESS_NUMBER - 1), PROCESS_NUMBER - 1);
	}

	/* Broadcast selected samples to processors */
	if (MPI_Bcast(samples->vector, PROCESS_NUMBER-1, MPI_UNIT, ROOT_ID, MPI_COMM_WORLD) != MPI_SUCCESS)
		return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Bcast error.");

	if ((i = dataExchange((ID == 0) ? UNITT_MIN : getFromVector(samples, ID -1), (ID == (PROCESS_NUMBER - 1)) ? UNITT_MAX : getFromVector(samples, ID), &v, tmp)) != ERRORCODE_NOERRORS)
		return AbortAndExit(i, "Error in while of data exchange.");

	/* Sorting new data */
	quicksortVector(v);

	if (ID != ROOT_ID) { /* Sending sorted data */
		if (MPI_Send(&(v->length), 1, MPI_INT, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
			return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send (sending size of data) error.");
		if (MPI_Send(v->vector, v->length, MPI_UNIT, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
			return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error.");
	} else { /* Receiving sorted data */
		copyVector(v, tmp, v->length);
		for(step = v->length, i = 1; i < PROCESS_NUMBER; i++) {
			if (MPI_Recv(&length, 1, MPI_INT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
				return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv (sending size of data) error.");
			if (MPI_Recv(&(tmp->vector[step]), length, MPI_UNIT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
				return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error.");
			step += length;
		}
		writeVectorToFile(tmp, ARGV_RESULT_NAME);
		freeVector(&tmp);
	}
	freeVector(&v);
	MPI_Finalize();
	return 0;
}