static int do_pubkey_enc( IOBUF out, int ctb, PKT_pubkey_enc *enc ) { int rc = 0; int n, i; IOBUF a = iobuf_temp(); write_version( a, ctb ); if ( enc->throw_keyid ) { write_32(a, 0 ); /* Don't tell Eve who can decrypt the message. */ write_32(a, 0 ); } else { write_32(a, enc->keyid[0] ); write_32(a, enc->keyid[1] ); } iobuf_put(a,enc->pubkey_algo ); n = pubkey_get_nenc( enc->pubkey_algo ); if ( !n ) write_fake_data( a, enc->data[0] ); for (i=0; i < n && !rc ; i++ ) rc = mpi_write(a, enc->data[i] ); if (!rc) { write_header(out, ctb, iobuf_get_temp_length(a) ); rc = iobuf_write_temp( out, a ); } iobuf_close(a); return rc; }
static int do_public_key( IOBUF out, int ctb, PKT_public_key *pk ) { int rc = 0; int n, i; IOBUF a = iobuf_temp(); if( !pk->version ) iobuf_put( a, 3 ); else iobuf_put( a, pk->version ); write_32(a, pk->timestamp ); if( pk->version < 4 ) { u16 ndays; if( pk->expiredate ) ndays = (u16)((pk->expiredate - pk->timestamp) / 86400L); else ndays = 0; write_16(a, ndays ); } iobuf_put(a, pk->pubkey_algo ); n = pubkey_get_npkey( pk->pubkey_algo ); if( !n ) write_fake_data( a, pk->pkey[0] ); for(i=0; i < n; i++ ) mpi_write(a, pk->pkey[i] ); write_header2(out, ctb, iobuf_get_temp_length(a), pk->hdrbytes, 1 ); if( iobuf_write_temp( out, a ) ) rc = G10ERR_WRITE_FILE; iobuf_close(a); return rc; }
static int do_secret_key( IOBUF out, int ctb, PKT_secret_key *sk ) { int rc = 0; int i, nskey, npkey; IOBUF a = iobuf_temp(); /* Build in a self-enlarging buffer. */ /* Write the version number - if none is specified, use 3 */ if ( !sk->version ) iobuf_put ( a, 3 ); else iobuf_put ( a, sk->version ); write_32 (a, sk->timestamp ); /* v3 needs the expiration time. */ if ( sk->version < 4 ) { u16 ndays; if ( sk->expiredate ) ndays = (u16)((sk->expiredate - sk->timestamp) / 86400L); else ndays = 0; write_16(a, ndays); } iobuf_put (a, sk->pubkey_algo ); /* Get number of secret and public parameters. They are held in one array first the public ones, then the secret ones. */ nskey = pubkey_get_nskey ( sk->pubkey_algo ); npkey = pubkey_get_npkey ( sk->pubkey_algo ); /* If we don't have any public parameters - which is the case if we don't know the algorithm used - the parameters are stored as one blob in a faked (opaque) MPI. */ if ( !npkey ) { write_fake_data( a, sk->skey[0] ); goto leave; } assert ( npkey < nskey ); /* Writing the public parameters is easy. */ for (i=0; i < npkey; i++ ) if ((rc = mpi_write (a, sk->skey[i]))) goto leave; /* Build the header for protected (encrypted) secret parameters. */ if ( sk->is_protected ) { if ( is_RSA(sk->pubkey_algo) && sk->version < 4 && !sk->protect.s2k.mode ) { /* The simple rfc1991 (v3) way. */ iobuf_put (a, sk->protect.algo ); iobuf_write (a, sk->protect.iv, sk->protect.ivlen ); } else { /* OpenPGP protection according to rfc2440. */ iobuf_put(a, sk->protect.sha1chk? 0xfe : 0xff ); iobuf_put(a, sk->protect.algo ); if ( sk->protect.s2k.mode >= 1000 ) { /* These modes are not possible in OpenPGP, we use them to implement our extensions, 101 can be seen as a private/experimental extension (this is not specified in rfc2440 but the same scheme is used for all other algorithm identifiers) */ iobuf_put(a, 101 ); iobuf_put(a, sk->protect.s2k.hash_algo ); iobuf_write(a, "GNU", 3 ); iobuf_put(a, sk->protect.s2k.mode - 1000 ); } else { iobuf_put(a, sk->protect.s2k.mode ); iobuf_put(a, sk->protect.s2k.hash_algo ); } if ( sk->protect.s2k.mode == 1 || sk->protect.s2k.mode == 3 ) iobuf_write (a, sk->protect.s2k.salt, 8 ); if ( sk->protect.s2k.mode == 3 ) iobuf_put (a, sk->protect.s2k.count ); /* For our special modes 1001, 1002 we do not need an IV. */ if ( sk->protect.s2k.mode != 1001 && sk->protect.s2k.mode != 1002 ) iobuf_write (a, sk->protect.iv, sk->protect.ivlen ); } } else iobuf_put (a, 0 ); if ( sk->protect.s2k.mode == 1001 ) ; /* GnuPG extension - don't write a secret key at all. */ else if ( sk->protect.s2k.mode == 1002 ) { /* GnuPG extension - divert to OpenPGP smartcard. */ iobuf_put(a, sk->protect.ivlen ); /* Length of the serial number or 0 for no serial number. */ /* The serial number gets stored in the IV field. */ iobuf_write(a, sk->protect.iv, sk->protect.ivlen); } else if ( sk->is_protected && sk->version >= 4 ) { /* The secret key is protected - write it out as it is. */ byte *p; unsigned int ndatabits; assert (gcry_mpi_get_flag (sk->skey[npkey], GCRYMPI_FLAG_OPAQUE)); p = gcry_mpi_get_opaque (sk->skey[npkey], &ndatabits ); iobuf_write (a, p, (ndatabits+7)/8 ); } else if ( sk->is_protected ) { /* The secret key is protected the old v4 way. */ for ( ; i < nskey; i++ ) { byte *p; unsigned int ndatabits; assert (gcry_mpi_get_flag (sk->skey[i], GCRYMPI_FLAG_OPAQUE)); p = gcry_mpi_get_opaque (sk->skey[i], &ndatabits); iobuf_write (a, p, (ndatabits+7)/8); } write_16(a, sk->csum ); } else { /* Non-protected key. */ for ( ; i < nskey; i++ ) if ( (rc = mpi_write (a, sk->skey[i]))) goto leave; write_16 (a, sk->csum ); } leave: if (!rc) { /* Build the header of the packet - which we must do after writing all the other stuff, so that we know the length of the packet */ write_header2(out, ctb, iobuf_get_temp_length(a), sk->hdrbytes); /* And finally write it out the real stream */ rc = iobuf_write_temp( out, a ); } iobuf_close(a); /* Close the remporary buffer */ return rc; }
/*---< main() >-------------------------------------------------------------*/ int main(int argc, char **argv) { int opt; extern char *optarg; extern int optind; int i, j; int isInFileBinary, isOutFileBinary, do_pnetcdf; int is_output_timing, is_print_usage, verbose; int numClusters, numCoords, numObjs, totalNumObjs; int *membership; /* [numObjs] */ char *filename; // TODO >> modified by VL: new variable representing the centers file name char *centers_filename; // TODO << end of the modification char *var_name; float **objects; /* [numObjs][numCoords] data objects */ float **clusters; /* [numClusters][numCoords] cluster center */ float threshold; double timing, io_timing, clustering_timing; int rank, nproc, mpi_namelen; char mpi_name[MPI_MAX_PROCESSOR_NAME]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nproc); MPI_Get_processor_name(mpi_name,&mpi_namelen); /* some default values */ _debug = 0; verbose = 1; threshold = 0.001; numClusters = 0; isInFileBinary = 0; isOutFileBinary = 0; is_output_timing = 0; is_print_usage = 0; filename = NULL; // TODO >> modified by VL: initialization of the new variable centers_filename = NULL; // TODO << end of the modification do_pnetcdf = 0; var_name = NULL; // TODO >> modified by VL: added the letter z while ( (opt=getopt(argc,argv,"i:z:n:t:v:c:abdorhq"))!= EOF) { // TODO << end of the modification switch (opt) { case 'i': filename=optarg; break; // TODO >> modified by VL: initialize centers filename case 'z': centers_filename=optarg; break; // TODO << end of the modification case 'b': isInFileBinary = 1; break; case 'r': isOutFileBinary = 1; break; case 't': threshold=atof(optarg); break; case 'n': numClusters = atoi(optarg); break; case 'o': is_output_timing = 1; break; case 'c': do_pnetcdf = 1; var_name = optarg; break; case 'q': verbose = 0; break; case 'd': _debug = 1; break; case 'h': default: is_print_usage = 1; break; } } if (filename == 0 || numClusters <= 1 || is_print_usage == 1 || (do_pnetcdf && var_name == NULL)) { if (rank == 0) usage(argv[0], threshold); MPI_Finalize(); exit(1); } if (_debug) printf("Proc %d of %d running on %s\n", rank, nproc, mpi_name); #ifndef _PNETCDF_BUILT if (do_pnetcdf) { if (rank == 0) printf("Error: PnetCDF feature is not built\n"); MPI_Finalize(); exit(1); } #endif MPI_Barrier(MPI_COMM_WORLD); io_timing = MPI_Wtime(); /* read data points from file ------------------------------------------*/ #ifdef _PNETCDF_BUILT if (do_pnetcdf) objects = pnetcdf_read(filename, var_name, &numObjs, &numCoords, MPI_COMM_WORLD); else #endif objects = mpi_read(isInFileBinary, filename, &numObjs, &numCoords, MPI_COMM_WORLD); if (_debug) { /* print the first 4 objects' coordinates */ int num = (numObjs < 4) ? numObjs : 4; for (i=0; i<num; i++) { char strline[1024], strfloat[16]; sprintf(strline,"%d: objects[%d]= ",rank,i); for (j=0; j<numCoords; j++) { sprintf(strfloat,"%10f",objects[i][j]); strcat(strline, strfloat); } strcat(strline, "\n"); printf("%s",strline); } } timing = MPI_Wtime(); io_timing = timing - io_timing; clustering_timing = timing; /* allocate a 2D space for clusters[] (coordinates of cluster centers) this array should be the same across all processes */ clusters = (float**) malloc(numClusters * sizeof(float*)); assert(clusters != NULL); clusters[0] = (float*) malloc(numClusters * numCoords * sizeof(float)); assert(clusters[0] != NULL); for (i=1; i<numClusters; i++) clusters[i] = clusters[i-1] + numCoords; MPI_Allreduce(&numObjs, &totalNumObjs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); /* checking if numObjs < nproc is done in the I/O routine */ // TODO >> modified by VL: initialize variable "clusters" // possibly load the centers from a file if (centers_filename != NULL) { int num_centers, num_coords; clusters = file_read(0, centers_filename, &num_centers, &num_coords); // no control over the numbers of centers and coordinates } // otherwise, pick the first numClusters elements of objects[] as initial cluster centers else // TODO << end of the modification /* pick first numClusters elements in feature[] as initial cluster centers*/ if (rank == 0) { if (numObjs < numClusters) { /* read the first numClusters data points from file */ read_n_objects(isInFileBinary, filename, numClusters, numCoords, clusters); } else { /* copy the first numClusters elements in feature[] */ for (i=0; i<numClusters; i++) for (j=0; j<numCoords; j++) clusters[i][j] = objects[i][j]; } } MPI_Bcast(clusters[0], numClusters*numCoords, MPI_FLOAT, 0, MPI_COMM_WORLD); /* membership: the cluster id for each data object */ membership = (int*) malloc(numObjs * sizeof(int)); assert(membership != NULL); /* start the core computation -------------------------------------------*/ mpi_kmeans(objects, numCoords, numObjs, numClusters, threshold, membership, clusters, MPI_COMM_WORLD); free(objects[0]); free(objects); timing = MPI_Wtime(); clustering_timing = timing - clustering_timing; /* output: the coordinates of the cluster centres ----------------------*/ #ifdef _PNETCDF_BUILT if (do_pnetcdf) pnetcdf_write(filename, 1, numClusters, numObjs, numCoords, clusters, membership, totalNumObjs, MPI_COMM_WORLD, verbose); else #endif mpi_write(isOutFileBinary, filename, numClusters, numObjs, numCoords, clusters, membership, totalNumObjs, MPI_COMM_WORLD, verbose); free(membership); free(clusters[0]); free(clusters); /*---- output performance numbers ---------------------------------------*/ if (is_output_timing) { double max_io_timing, max_clustering_timing; io_timing += MPI_Wtime() - timing; /* get the max timing measured among all processes */ MPI_Reduce(&io_timing, &max_io_timing, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); MPI_Reduce(&clustering_timing, &max_clustering_timing, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (rank == 0) { printf("\nPerforming **** Simple Kmeans (MPI) ****\n"); printf("Num of processes = %d\n", nproc); printf("Input file: %s\n", filename); // TODO >> modified by VL: display centers filename if(centers_filename!=NULL) printf("Centers file: %s\n", centers_filename); // TODO << end of the modifications printf("numObjs = %d\n", totalNumObjs); printf("numCoords = %d\n", numCoords); printf("numClusters = %d\n", numClusters); printf("threshold = %.4f\n", threshold); printf("I/O time = %10.4f sec\n", max_io_timing); printf("Computation timing = %10.4f sec\n", max_clustering_timing); } } MPI_Finalize(); return(0); }
int main(int argc, char **argv) { int opt; extern char *optarg; extern int optind; int i, j; int isInFileBinary, isOutFileBinary; int is_output_timing, is_print_usage; int numClusters, numCoords, numObjs, totalNumObjs; int *membership; /* [numObjs] */ char *filename; float **objects; /* [numObjs][numCoords] data objects */ float **clusters; /* [numClusters][numCoords] cluster center */ float threshold; double timing, io_timing, clustering_timing; int rank, nproc, mpi_namelen; char mpi_name[MPI_MAX_PROCESSOR_NAME]; MPI_Status status; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nproc); MPI_Get_processor_name(mpi_name,&mpi_namelen); /* some default values */ _debug = 0; threshold = 0.001; numClusters = 0; isInFileBinary = 0; isOutFileBinary = 0; is_output_timing = 0; is_print_usage = 0; filename = NULL; while ( (opt=getopt(argc,argv,"p:i:n:t:abdorh"))!= EOF) { switch (opt) { case 'i': filename=optarg; break; case 'b': isInFileBinary = 1; break; case 'r': isOutFileBinary = 1; break; case 't': threshold=atof(optarg); break; case 'n': numClusters = atoi(optarg); break; case 'o': is_output_timing = 1; break; case 'd': _debug = 1; break; case 'h': is_print_usage = 1; break; default: is_print_usage = 1; break; } } if (filename == 0 || numClusters <= 1 || is_print_usage == 1) { if (rank == 0) usage(argv[0], threshold); MPI_Finalize(); exit(1); } if (_debug) printf("Proc %d of %d running on %s\n", rank, nproc, mpi_name); MPI_Barrier(MPI_COMM_WORLD); io_timing = MPI_Wtime(); /* read data points from file ------------------------------------------*/ objects = mpi_read(filename, &numObjs, &numCoords, MPI_COMM_WORLD); if (_debug) { /* print the first 4 objects' coordinates */ int num = (numObjs < 4) ? numObjs : 4; for (i=0; i<num; i++) { char strline[1024], strfloat[16]; sprintf(strline,"%d: objects[%d]= ",rank,i); for (j=0; j<numCoords; j++) { sprintf(strfloat,"%10f",objects[i][j]); strcat(strline, strfloat); } strcat(strline, "\n"); printf("%s",strline); } } timing = MPI_Wtime(); io_timing = timing - io_timing; clustering_timing = timing; /* allocate a 2D space for clusters[] (coordinates of cluster centers) this array should be the same across all processes */ clusters = (float**) malloc(numClusters * sizeof(float*)); assert(clusters != NULL); clusters[0] = (float*) malloc(numClusters * numCoords * sizeof(float)); assert(clusters[0] != NULL); for (i=1; i<numClusters; i++) clusters[i] = clusters[i-1] + numCoords; MPI_Allreduce(&numObjs, &totalNumObjs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); /* pick first numClusters elements in feature[] as initial cluster centers*/ if (rank == 0) { for (i=0; i<numClusters; i++) for (j=0; j<numCoords; j++) clusters[i][j] = objects[i][j]; } MPI_Bcast(clusters[0], numClusters*numCoords, MPI_FLOAT, 0, MPI_COMM_WORLD); /* membership: the cluster id for each data object */ membership = (int*) malloc(numObjs * sizeof(int)); assert(membership != NULL); /* start the core computation -------------------------------------------*/ mpi_kmeans(objects, numCoords, numObjs, numClusters, threshold, membership, clusters, MPI_COMM_WORLD); free(objects[0]); free(objects); timing = MPI_Wtime(); clustering_timing = timing - clustering_timing; /* output: the coordinates of the cluster centres ----------------------*/ mpi_write(filename, numClusters, numObjs, numCoords, clusters, membership, totalNumObjs, MPI_COMM_WORLD); free(membership); free(clusters[0]); free(clusters); /*---- output performance numbers ---------------------------------------*/ if (is_output_timing) { double max_io_timing, max_clustering_timing; io_timing += MPI_Wtime() - timing; /* get the max timing measured among all processes */ MPI_Reduce(&io_timing, &max_io_timing, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); MPI_Reduce(&clustering_timing, &max_clustering_timing, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (rank == 0) { printf("\nPerforming **** Simple Kmeans (MPI) ****\n"); printf("Num of processes = %d\n", nproc); printf("Input file: %s\n", filename); printf("numObjs = %d\n", totalNumObjs); printf("numCoords = %d\n", numCoords); printf("numClusters = %d\n", numClusters); printf("threshold = %.4f\n", threshold); printf("I/O time = %10.4f sec\n", max_io_timing); printf("Computation timing = %10.4f sec\n", max_clustering_timing); } } MPI_Finalize(); return(0); }