T_VOID BackGroundTask(T_U32 sp_svc) { T_U8 i; #if (MICRO_TASK_DEBUG) akerror("sp_svc:", sp_svc, 1); for (i = 0;i < 16;i++) { print_x(TaskContext[i]); Fwl_ConsoleWriteChr(' '); } Fwl_ConsoleWriteChr('\n'); #endif for (i = 0;i < MAX_MICRO_TASK;i++) { if (TaskActiveMap & (1<<i)) { TaskActiveMap &= ~(1<<i); if (task_param[i].TaskCalBak) { task_param[i].TaskCalBak(); } } } //Do_IRQ_Return(sp_svc); }
int print_p(t_data *data, va_list arg) { ft_printchar('0', data); ft_printchar('x', data); data->len_mod = 8; return (print_x(data, arg)); }
void print(char *text) { int i, len; len = 0; for(i = 0; i < MAX_LEN; i++) { if(text[i] != NULL) ++len; else break; } print_x(text, len); }
void Distribution2_Cylinder:: print_x (const string & filename) const { FILE * fp = fopen (filename.c_str(), "w"); if (fp == NULL){ std::cerr << "cannot open file " << std::endl; return; } print_x (fp); fclose(fp); }
void condition(int a, int b){ int i; if(x[a][b]==0){ for(i=1;i<=9;i++){ if(check_legal(a,b,i)){ x[a][b] = i; if(a==8 && b==8){ print_x(); total++; return; } else{ if(b==8){ condition(a+1,0); } else{ condition(a,b+1); } } x[a][b]=0; } } } else{ if(a==8&&b==8){ print_x(); total++; return; } else if(b==8){ condition(a+1,0); } else{ condition(a,b+1); } } return ; }
template <typename T, typename X> void core_solver_pretty_printer<T, X>::print() { for (unsigned i = 0; i < nrows(); i++) { print_row(i); } print_bottom_line(); print_cost(); print_x(); print_basis_heading(); print_lows(); print_upps(); print_exact_norms(); print_approx_norms(); m_out << std::endl; }
int main() { int i; int x = 999; /* B : ブロック有効範囲 */ print_x(); printf("x = %d\n", x); for (i = 0; i < 5; i++) { int x = i * 100; /* C : ブロック有効範囲*/ printf("x = %d\n", x); } printf("x = %d\n", x); getchar(); return 0; }
T_BOOL TaskPending(T_VOID) { #if(MICRO_TASK_DEBUG) T_U8 i; #endif if(TaskActiveMap && (TaskMutex == 0)) { #if(MICRO_TASK_DEBUG) akerror("TaskPending succedd:",TaskActiveMap,1); for(i = 0;i < 14;i++) { print_x(Data[i]); Fwl_ConsoleWriteChr(' '); } Fwl_ConsoleWriteChr('\n'); #endif return AK_TRUE; } //akerror("TaskPending Failed:",TaskActiveMap,1); return AK_FALSE; }
int print_p(t_data *data, va_list arg) { data->flag = ((data->flag | 1) | 32); data->len_mod = 8; return (print_x(data, arg)); }
int main (int argc, char *argv[]) { int proc_num, my_rank; MPI_Status status; MPI_Request request; MPI_File fh; int i, j, k, iter; int **mymat, *allmat; double compute_time, compute_time_start, compute_time_end; double io_time, io_time_start, io_time_end; double total_time_start, total_time_end; double sum, temp, diff, bb, allbb; double e = 0.00001; double *x, *myx; // whether to use prefetch or not ,see below int compare_mode = 0; // Init MPI MPI_Init(&argc, &argv); // record start time of program total_time_start = MPI_Wtime(); // get the number of procs and rank in the comm MPI_Comm_size(MPI_COMM_WORLD, &proc_num); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if(argc < 4) { printf("Usage: jacobi mat.bin dim mat_num [enable prefetch]\n"); return -1; } else if(argc == 5) { // compare mode // 0 means no prefetch, 1 means prefetch compare_mode = atoi(argv[4]); if(my_rank == 0 ) { if(compare_mode == 1) printf("Using prefetching\n"); else printf("No prefetching\n"); } } // broadcast prefetch mode MPI_Bcast( &compare_mode, 1, MPI_INT, 0, MPI_COMM_WORLD); // n is dimension of the input matrix int n = atoi(argv[2]); // each proc get myrows rows of the matrix int myrows = n / proc_num; // CACHE Allocation int *prefetch_cache = malloc( myrows * (n+1) * sizeof(int)); // allmat is 1-D array to store matrix allmat = (int*) malloc( myrows * (n+1) * sizeof(int)); // mymat makes it a 2-D array mymat = (int**) malloc(myrows * sizeof(int *)); for (i = 0; i < myrows; i++) { mymat[i]= &allmat[i * (n+1)]; } // x stores global result, myx stores local result x = (double*) malloc( n * sizeof(double) ); myx = (double*) malloc( myrows * sizeof(double)); // how many matrices to solve in total int mat_num = atoi(argv[3]); #if DEBUG printf("I'm proc%d, n=%d, myrows=%d, mat_num=%d\n", my_rank, n, myrows, mat_num); if(my_rank == -1) { int dowait = 1; while(dowait) { ; } } #endif // File opening MPI_File_open(MPI_COMM_WORLD, argv[1], MPI_MODE_RDONLY, MPI_INFO_NULL, &fh); if(fh==NULL) { printf("File not exist\n"); return -1; } io_time = 0.0; compute_time = 0.0; double start, finish; // each round read entire matrix and prefetch next round for(k = 0; k < mat_num; k++) { MPI_Barrier(MPI_COMM_WORLD); // I/O time io_time_start = MPI_Wtime(); if(compare_mode == 1) { // use prefetch if(k == 0) { start = MPI_Wtime(); // first time read, no pattern information known so just normal read MPI_File_read_at( fh, (myrows * my_rank + k * n) * (n+1) * sizeof(int) , allmat, myrows * (n+1), MPI_INT, &status ); finish = MPI_Wtime(); if(my_rank ==0) printf("First read time %lf\n",finish - start); // According to previous read, predict the next read // use non-blocking read so computation could be performed at the same time MPI_File_iread_at( fh, (myrows * my_rank + (k+1) * n) * (n+1) * sizeof(int) , prefetch_cache, myrows * (n+1), MPI_INT, &request ); } else { start = MPI_Wtime(); // wait for previous iread(prefetched the predicted next access) completed MPI_Wait(&request, &status); finish = MPI_Wtime(); if(my_rank ==0) printf("Wait time %lf\n",finish - start); start = MPI_Wtime(); // copy prefetched data from cache to target memcpy(allmat, prefetch_cache, myrows * (n+1) * sizeof(int)); finish = MPI_Wtime(); if(my_rank ==0) printf("Memcpy time %lf\n",finish - start); // next read is predicted so perform prefetch if(k != mat_num -1) { MPI_File_iread_at( fh, (myrows * my_rank + (k+1) * n) * (n+1) * sizeof(int) , prefetch_cache, myrows * (n+1), MPI_INT, &request ); } } } else { // normal read MPI_File_read_at( fh, (myrows * my_rank + k * n) * (n+1) * sizeof(int) , allmat, myrows * (n+1), MPI_INT, &status ); } MPI_Barrier(MPI_COMM_WORLD); if(my_rank == 0) { io_time_end = MPI_Wtime(); printf("I/O time of %d round: %lf\n",k , io_time_end - io_time_start); io_time += io_time_end - io_time_start; } #if PRINTMAT // print matrix printf("rank %d:\n",my_rank); for(i=0; i<myrows; i++) { for(j=0; j<n+1; j++) { printf(" %4d", mymat[i][j]); } printf("\n"); } #endif // set local and global x to zero for each iteration memset( myx, 0, sizeof(myx[0]) * myrows ); memset( x, 0, sizeof(x[0]) * myrows ); compute_time_start = MPI_Wtime(); // start iteration of computation till converge iter=0; do { bb=0.0; // all proc get all x MPI_Allgather(myx, myrows, MPI_DOUBLE, x, myrows, MPI_DOUBLE, MPI_COMM_WORLD); for(i = 0; i < myrows; i++) { sum=0.0; for(j = 0; j < n; j++) { if(j!=i+myrows*my_rank) { sum=sum+(double)mymat[i][j]*x[j]; } } temp=( (double)mymat[i][n]-sum ) / (double)mymat[i][i+myrows*my_rank]; diff=fabs(x[i]-temp); if(diff>bb) { bb=diff; } myx[i]=temp; } // each process get same bb value so all can go out of loop MPI_Allreduce( &bb, &allbb, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD ); iter++; // if(my_rank == 0) // printf("iter = %d, bb = %lf\n",iter, allbb); } while(allbb>=e); // gather final x for print of each matrix MPI_Allgather(myx, myrows, MPI_DOUBLE, x, myrows, MPI_DOUBLE, MPI_COMM_WORLD); if(my_rank ==0 ) { // record end time of computation compute_time_end = MPI_Wtime(); printf("Compute time of %d round: %lf\n",k , compute_time_end - compute_time_start); compute_time += compute_time_end - compute_time_start; // append result to file print_x(iter, n, x); } }//k MPI_File_close( &fh ); if(my_rank == 0) { printf("Total I/O time: %lf bandwidth: %.2lf MB/s\n", io_time, n*(n+1)*4.0/(io_time*1024*1024)); printf("Total compute time: %lf\n", compute_time); } total_time_end = MPI_Wtime(); if(my_rank == 0) printf("Total time: %lf\n",total_time_end - total_time_start); // free allocated memory free(allmat); free(mymat); free(myx); free(x); free(prefetch_cache); MPI_Finalize(); return 0; }
static int certificate_signer_tag (cxml_handler_t* const _h, cxml_tag_t * const tag) { int rc = 0; // write signer info cert_cxml_handler_t * h = (cert_cxml_handler_t *)_h; if (cxml_tag_is_open(tag)){ h->signer_type = 1; // digest by default const char * v = cxml_tag_attr_value(tag, "type"); if(v){ h->signer_type = STR2ENUM(_signer_types, v); if(h->signer_type <0){ fprintf(stderr, "%s: Unknown signer type\n", v); return -1; } } cint8_write(h->signer_type, &h->ptr, h->end, &rc); if (h->signer_type > 0){ if (_signerName){ h->signer = _signerName; } else{ v = cxml_tag_attr_value(tag, "name"); if (v == NULL){ fprintf(stderr, "%s: Signer name shall be provided\n", v); return -1; } h->signer = v; } } }else{ // write signer info if (h->signer_type > 0){ if (h->signer_type > 2){ fprintf(stderr, "%d: signer method unsupported\n", h->signer_type); rc = -1; } else{ // load signer certificate int plen = strlen(_searchPath) + strlen(h->signer); char * path = malloc(plen + 16); cvstrncpy(path, plen + 16, _searchPath, "/", h->signer, ".crt", NULL); size_t size = load_certificate(path, h->ptr, h->end); if (size < 0){ fprintf(stderr, "%s: signer certificate not found or error\n", h->signer); rc = -1; } else{ if (h->signer_type == 1){ // digest char hash[sha256_hash_size]; // change eccpoint type of the signature to x_coordinate_only(0) // to follow canonical encoding h->ptr[size-65] = 0; sha256_calculate(hash, h->ptr, size); #ifdef DEBUG_DATA fprintf(stderr, "HASH (%s): ", h->signer); print_x(stderr, hash, sha256_hash_size); fprintf(stderr, "\n"); fprintf(stderr, "DIGEST (%s): ", h->signer); print_x(stderr, &hash[sha256_hash_size - 8], 8); fprintf(stderr, "\n"); #endif cbuf_write(hash + sha256_hash_size - 8, 8, &h->ptr, h->end, &rc); } else {// certificate h->ptr += size; } } free(path); } } } return rc; }
int main(int argc, char *argv[]) { int i,j,n; double **mat, *x, **mymat, *myx; double sum,temp,diff,bb; double e; int iter=0; int proc_num, my_rank; MPI_Init(&argc, &argv); // get the number of procs and rank in the comm MPI_Comm_size(MPI_COMM_WORLD, &proc_num); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); double start_time, end_time, total_time; MPI_Status status; if(my_rank == 0) { // Proc 0 is in charge of reading matrix and distribute data if (argc != 3) { printf("Usage: mpirun -np 4 hw1_3b_mpi <filename> <error>\n"); return 1; } printf("\nInput File: %s\n", argv[1]); e = (double)atof(argv[2]); printf("error= %f\n", e); /* Opening the input file */ FILE *fp = fopen(argv[1], "r"); if (fp == NULL) { printf("Error in opening a file: %s", argv[1]); return 0; } /* Reading the maxtrix dimension */ fscanf(fp, "%d",&n); printf("n= %d\n", n); // store in row major mat = (double**) malloc( n * sizeof(double*)); for (i= 0; i<n; i++) mat[i]= (double*) malloc((n+1) * sizeof(double)); /* Reading the input matrix */ for(i=0; i<n; i++) { for(j=0; j<n+1; j++) { fscanf(fp, "%lf", &mat[i][j]); } } fclose(fp); } /* Solving the given matrix iteratively */ /* if(my_rank == 1){ int dowait = 1; while(dowait){ ; } } */ // Broadcast matrix dimension 'n' and convergence 'e' MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&e, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); int myrows = n / proc_num; // mymat store's each proc's share of matrix columns mymat = (double**) malloc( myrows * sizeof(double*)); for (i= 0; i<myrows; i++) mymat[i]= (double*) malloc((n+1) * sizeof(double)); myx = (double*) malloc((myrows) * sizeof(double)); x = (double*) malloc((n) * sizeof(double)); for(i=0; i<myrows; i++) { myx[i]=0.0; } // start time, total time should include distributing the data // to other processes as part of the parallization start_time = MPI_Wtime(); // make every proc has myrows rows of the mat if(my_rank == 0){ int dest = 0; for(i = myrows; i < n; i++){ dest = i / myrows; MPI_Send(&mat[i][0], n + 1, MPI_DOUBLE, dest, i, MPI_COMM_WORLD); } for(i = 0; i < myrows; i++){ for(j = 0; j < n + 1; j++) mymat[i][j] = mat[i][j]; } } else{ for(i = 0; i < myrows; i++){ MPI_Recv(&mymat[i][0], n + 1, MPI_DOUBLE, 0, my_rank * myrows + i, MPI_COMM_WORLD, &status); } } iter=0; double allbb; double compute_time = MPI_Wtime(); do { bb=0; // all proc get all x MPI_Allgather(myx, myrows, MPI_DOUBLE, x, myrows, MPI_DOUBLE, MPI_COMM_WORLD); for(i=0;i<myrows;i++){ sum=0; for(j=0;j<n;j++){ if(j!=i+myrows*my_rank){ sum=sum+mymat[i][j]*x[j]; } } temp=(mymat[i][n]-sum) / mymat[i][i+myrows*my_rank]; diff=fabs(x[i]-temp); if(diff>bb){ bb=diff; } myx[i]=temp; } // each process get same bb value so all can go out of loop MPI_Allreduce( &bb, &allbb, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD ); iter++; } while(allbb>=e); // gather final x for print MPI_Allgather(myx, myrows, MPI_DOUBLE, x, myrows, MPI_DOUBLE, MPI_COMM_WORLD); if(my_rank ==0 ){ // record end time of computation end_time = MPI_Wtime(); total_time = end_time - start_time; printf("Total time:%lf; Computation time is:%lf\n", total_time, end_time - compute_time); } #if DEBUG /* prints the solution */ printf("\nAnswer >>", i, x[i]); for(i=0; i<n; i++) { printf("\nx[%d]=%f", i, x[i]); } printf("\n"); #endif if(my_rank == 0) { print_x(iter, n, x); printf("\ndone\n"); } // free allocated memory for (i=0; i<myrows; i++) { free(mymat[i]); } free(mymat); free(myx); free(x); if(my_rank == 0) { for (i=0; i<n; i++) { free(mat[i]); } free(mat); } MPI_Finalize(); return 0; }