void jacobi7_3(const int nx,const int ny, int nz, const double alpha,double* A0,const int timesteps,const double* B,const int ldb, double* Anext,const int ldc) { double fac; double *temp_ptr; int i, j, k, t, tstart=0; fac = 6.0/(A0[0]*A0[0]); double *l0, *lnext; unsigned size = nx*ny*nz; if (log_id == 0) { tstart=recover_data(A0, size); if (tstart > 0) memcpy((void*)Anext,(void*)A0,size * sizeof(double)); log_id = (pthread_t*) malloc(sizeof(pthread_t)); pthread_create(log_id,NULL, log_all_data, &size); } /*@;BEGIN(Nest1=Nest)@*/for (t = tstart; t < timesteps; t++) { int do_log = !(log_ready); if (t%2 == 0) { l0 = A0; lnext = Anext; } else {lnext = A0; l0 = Anext; } /*@;BEGIN(Nest2=Nest)@*/for (k = 1; k < nz - 1; k++) { /*@;BEGIN(Nest3=Nest)@*/for (j = 1; j < ny - 1; j++) { /*@;BEGIN(Nest4=Nest)@*/for (i = 1; i < nx - 1; i++) { lnext[Index3D (nx, ny, i, j, k)] = l0[Index3D (nx, ny, i, j, k + 1)] + l0[Index3D (nx, ny, i, j, k - 1)] + l0[Index3D (nx, ny, i, j + 1, k)] + l0[Index3D (nx, ny, i, j - 1, k)] + l0[Index3D (nx, ny, i + 1, j, k)] + l0[Index3D (nx, ny, i - 1, j, k)] - l0[Index3D (nx, ny, i, j, k)] *fac ; } } } if (do_log) { memcpy((void*)log_data,(void*)lnext,size * sizeof(double)); log_ready=t+1; } } }
int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) { struct list_head inode_list, tmp_inode_list; struct list_head dir_list; int err; int ret = 0; unsigned long s_flags = sbi->sb->s_flags; bool need_writecp = false; #ifdef CONFIG_QUOTA int quota_enabled; #endif if (s_flags & SB_RDONLY) { f2fs_msg(sbi->sb, KERN_INFO, "recover fsync data on readonly fs"); sbi->sb->s_flags &= ~SB_RDONLY; } #ifdef CONFIG_QUOTA /* Needed for iput() to work correctly and not trash data */ sbi->sb->s_flags |= SB_ACTIVE; /* Turn on quotas so that they are updated correctly */ quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY); #endif fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", sizeof(struct fsync_inode_entry)); if (!fsync_entry_slab) { err = -ENOMEM; goto out; } INIT_LIST_HEAD(&inode_list); INIT_LIST_HEAD(&tmp_inode_list); INIT_LIST_HEAD(&dir_list); /* prevent checkpoint */ mutex_lock(&sbi->cp_mutex); /* step #1: find fsynced inode numbers */ err = find_fsync_dnodes(sbi, &inode_list, check_only); if (err || list_empty(&inode_list)) goto skip; if (check_only) { ret = 1; goto skip; } need_writecp = true; /* step #2: recover data */ err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list); if (!err) f2fs_bug_on(sbi, !list_empty(&inode_list)); else { /* restore s_flags to let iput() trash data */ sbi->sb->s_flags = s_flags; } skip: destroy_fsync_dnodes(&inode_list, err); destroy_fsync_dnodes(&tmp_inode_list, err); /* truncate meta pages to be used by the recovery */ truncate_inode_pages_range(META_MAPPING(sbi), (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1); if (err) { truncate_inode_pages_final(NODE_MAPPING(sbi)); truncate_inode_pages_final(META_MAPPING(sbi)); } else { clear_sbi_flag(sbi, SBI_POR_DOING); } mutex_unlock(&sbi->cp_mutex); /* let's drop all the directory inodes for clean checkpoint */ destroy_fsync_dnodes(&dir_list, err); if (need_writecp) { set_sbi_flag(sbi, SBI_IS_RECOVERED); if (!err) { struct cp_control cpc = { .reason = CP_RECOVERY, }; err = f2fs_write_checkpoint(sbi, &cpc); } } kmem_cache_destroy(fsync_entry_slab); out: #ifdef CONFIG_QUOTA /* Turn quotas off */ if (quota_enabled) f2fs_quota_off_umount(sbi->sb); #endif sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ return ret ? ret: err; }