static int walk_blocks_func(ocfs2_filesys *fs, uint64_t blkno, uint64_t bcount, uint16_t ext_flags, void *priv_data) { struct walk_block *wb = priv_data; errcode_t ret; int i; uint32_t *up; ret = ocfs2_read_blocks(fs, blkno, 1, wb->buf); if (ret) { com_err("walk_blocks_func", ret, "while reading block %"PRIu64, blkno); return OCFS2_BLOCK_ABORT; } /* set every other bit */ up = (uint32_t *) wb->buf; for(i = 0; i < (fs->fs_blocksize / sizeof(uint32_t)); i++) { up[i] |= 0x55555555; wb->used += BITCOUNT(up[i]); } ret = io_write_block(fs->fs_io, blkno, 1, wb->buf); if (ret) { com_err("walk_blocks_func", ret, "while writing block %"PRIu64, blkno); return OCFS2_BLOCK_ABORT; } return 0; }
/* * Emtpy the blocks on the disk. */ static errcode_t empty_blocks(ocfs2_filesys *fs, uint64_t start_blk, uint64_t num_blocks) { errcode_t ret; char *buf = NULL; ret = ocfs2_malloc_block(fs->fs_io, &buf); if (ret) goto bail; memset(buf, 0, fs->fs_blocksize); while (num_blocks) { ret = io_write_block(fs->fs_io, start_blk, 1, buf); if (ret) goto bail; num_blocks--; start_blk++; } bail: if (buf) ocfs2_free(&buf); return ret; }
errcode_t ocfs2_set_backup_super_list(ocfs2_filesys *fs, uint64_t *blocks, size_t len) { size_t i; errcode_t ret = 0; char *buf = NULL; uint64_t *blkno = blocks; uint32_t cluster, bpc = fs->fs_clustersize / fs->fs_blocksize; if (!len || !blocks || !*blocks) goto bail; len = ocfs2_min(len,(size_t)OCFS2_MAX_BACKUP_SUPERBLOCKS); if (!OCFS2_HAS_COMPAT_FEATURE(OCFS2_RAW_SB(fs->fs_super), OCFS2_FEATURE_COMPAT_BACKUP_SB)) { /* check all the blkno to see whether it is used. */ for (i = 0; i < len; i++, blkno++) { ret = check_cluster(fs, ocfs2_blocks_to_clusters(fs, *blkno)); if (ret) goto bail; } } ret = ocfs2_malloc_blocks(fs->fs_io, bpc, &buf); if (ret) goto bail; memset(buf, 0, fs->fs_clustersize); /* zero all the clusters at first */ blkno = blocks; for (i = 0; i < len; i++, blkno++) { cluster = ocfs2_blocks_to_clusters(fs, *blkno); ret = io_write_block(fs->fs_io, cluster*bpc, bpc, buf); if (ret) goto bail; } ret = ocfs2_refresh_backup_super_list(fs, blocks, len); if (ret) goto bail; /* We just tested the clusters, so the allocation can't fail */ blkno = blocks; for (i = 0; i < len; i++, blkno++) ocfs2_new_specific_cluster(fs, ocfs2_blocks_to_clusters(fs, *blkno)); bail: if (buf) ocfs2_free(&buf); return ret; }
static void mess_up_superblock_clusters(ocfs2_filesys *fs, int excess) { errcode_t ret; char *buf = NULL; struct ocfs2_dinode *di = fs->fs_super; uint32_t new_clusters, cpg, wrong; /* corrupt superblock, just copy the * superblock, change it and * write it back to disk. */ ret = ocfs2_malloc_block(fs->fs_io, &buf); if (ret) FSWRK_COM_FATAL(progname, ret); memcpy(buf, (char *)di, fs->fs_blocksize); di = (struct ocfs2_dinode *)buf; cpg = 8 * ocfs2_group_bitmap_size(fs->fs_blocksize, 0, OCFS2_RAW_SB(fs->fs_super)->s_feature_incompat); /* make the wrong value to 2.5 times of cluster_per_group. */ wrong = cpg * 2 + cpg / 2; if (excess) new_clusters = di->i_clusters + wrong; else new_clusters = di->i_clusters - wrong; fprintf(stdout, "Corrupt SUPERBLOCK_CLUSTERS: " "change superblock i_clusters from %u to %u.\n", di->i_clusters, new_clusters); di->i_clusters = new_clusters; ret = io_write_block(fs->fs_io, di->i_blkno, 1, buf); if (ret) FSWRK_COM_FATAL(progname, ret); if(buf) ocfs2_free(&buf); return; }
errcode_t ocfs2_write_group_desc(ocfs2_filesys *fs, uint64_t blkno, char *gd_buf) { errcode_t ret; char *blk; struct ocfs2_group_desc *gd; if (!(fs->fs_flags & OCFS2_FLAG_RW)) return OCFS2_ET_RO_FILESYS; if ((blkno < OCFS2_SUPER_BLOCK_BLKNO) || (blkno > fs->fs_blocks)) return OCFS2_ET_BAD_BLKNO; ret = ocfs2_malloc_block(fs->fs_io, &blk); if (ret) return ret; memcpy(blk, gd_buf, fs->fs_blocksize); gd = (struct ocfs2_group_desc *)blk; ocfs2_swap_group_desc_from_cpu(fs, gd); ocfs2_compute_meta_ecc(fs, blk, &gd->bg_check); ret = io_write_block(fs->fs_io, blkno, 1, blk); if (ret) goto out; fs->fs_flags |= OCFS2_FLAG_CHANGED; ret = 0; out: ocfs2_free(&blk); return ret; }
errcode_t ocfs2_write_extent_block(ocfs2_filesys *fs, uint64_t blkno, char *eb_buf) { errcode_t ret; char *blk; struct ocfs2_extent_block *eb; if (!(fs->fs_flags & OCFS2_FLAG_RW)) return OCFS2_ET_RO_FILESYS; if ((blkno < OCFS2_SUPER_BLOCK_BLKNO) || (blkno > fs->fs_blocks)) return OCFS2_ET_BAD_BLKNO; ret = ocfs2_malloc_block(fs->fs_io, &blk); if (ret) return ret; memcpy(blk, eb_buf, fs->fs_blocksize); eb = (struct ocfs2_extent_block *) blk; ocfs2_swap_extent_block_from_cpu(fs, eb); ocfs2_compute_meta_ecc(fs, blk, &eb->h_check); ret = io_write_block(fs->fs_io, blkno, 1, blk); if (ret) goto out; fs->fs_flags |= OCFS2_FLAG_CHANGED; ret = 0; out: ocfs2_free(&blk); return ret; }
/* * Zero the area past i_size but still within an allocated * cluster. This avoids exposing nonzero data on subsequent file * extends. */ static errcode_t ocfs2_zero_tail_for_truncate(ocfs2_cached_inode *ci, uint64_t new_size) { errcode_t ret; char *buf = NULL; ocfs2_filesys *fs = ci->ci_fs; uint64_t start_blk, p_blkno, contig_blocks, start_off; int count, byte_counts, bpc = fs->fs_clustersize /fs->fs_blocksize; uint16_t ext_flags; if (new_size == 0) return 0; start_blk = new_size / fs->fs_blocksize; ret = ocfs2_extent_map_get_blocks(ci, start_blk, 1, &p_blkno, &contig_blocks, &ext_flags); if (ret) goto out; /* Tail is a hole. */ if (!p_blkno) goto out; if (ext_flags & OCFS2_EXT_REFCOUNTED) { uint32_t cpos = ocfs2_blocks_to_clusters(fs, start_blk); ret = ocfs2_refcount_cow(ci, cpos, 1, cpos + 1); if (ret) goto out; ret = ocfs2_extent_map_get_blocks(ci, start_blk, 1, &p_blkno, &contig_blocks, &ext_flags); if (ret) goto out; assert(!(ext_flags & OCFS2_EXT_REFCOUNTED) && p_blkno); } /* calculate the total blocks we need to empty. */ count = bpc - (p_blkno & (bpc - 1)); ret = ocfs2_malloc_blocks(fs->fs_io, count, &buf); if (ret) goto out; ret = ocfs2_read_blocks(fs, p_blkno, count, buf); if (ret) goto out; /* empty the content after the new_size and within the same cluster. */ start_off = new_size % fs->fs_blocksize; byte_counts = count * fs->fs_blocksize - start_off; memset(buf + start_off, 0, byte_counts); ret = io_write_block(fs->fs_io, p_blkno, count, buf); out: if (buf) ocfs2_free(&buf); return ret; }
static errcode_t replay_blocks(ocfs2_filesys *fs, struct journal_info *ji, char *buf, uint64_t seq, uint64_t *next_block) { char *tagp; journal_block_tag_t *tag; size_t i, num; char *io_buf = NULL; errcode_t err, ret = 0; int tag_bytes = ocfs2_journal_tag_bytes(ji->ji_jsb); uint32_t t_flags; uint64_t block64; tagp = buf + sizeof(journal_header_t); num = (ji->ji_jsb->s_blocksize - sizeof(journal_header_t)) / tag_bytes; ret = ocfs2_malloc_blocks(fs->fs_io, 1, &io_buf); if (ret) { com_err(whoami, ret, "while allocating a block buffer"); goto out; } for(i = 0; i < num; i++, tagp += tag_bytes, (*next_block)++) { tag = (journal_block_tag_t *)tagp; t_flags = be32_to_cpu(tag->t_flags); block64 = ocfs2_journal_tag_block(tag, tag_bytes); *next_block = jwrap(ji->ji_jsb, *next_block); verbosef("recovering journal block %"PRIu64" to disk block " "%"PRIu64"\n", *next_block, block64); if (revoke_this_block(&ji->ji_revoke, block64, seq)) goto skip_io; err = read_journal_block(fs, ji, *next_block, io_buf, 1); if (err) { ret = err; goto skip_io; } if (t_flags & JBD2_FLAG_ESCAPE) { uint32_t magic = cpu_to_be32(JBD2_MAGIC_NUMBER); memcpy(io_buf, &magic, sizeof(magic)); } err = io_write_block(fs->fs_io, block64, 1, io_buf); if (err) ret = err; skip_io: if (t_flags & JBD2_FLAG_LAST_TAG) i = num; /* be sure to increment next_block */ if (!(t_flags & JBD2_FLAG_SAME_UUID)) tagp += 16; } out: if (io_buf) ocfs2_free(&io_buf); return ret; }
CallBack events_swi_cm920iap_handler (int swi_number, SwiRegs *r) { switch (swi_number) { /********************************************************************** * SLOS SWI's **********************************************************************/ case /* SWI */ SLOS: switch (r->r[0]) { /* ----------------------------------------------------------------- * Low level Debug SWI for debugging before the operating system is * up and running. The following register hold the debug information. * * r8fiq - fatal error * r9fiq - trace entry value * r10fiq - trace exit value * * ----------------------------------------------------------------- */ case /* SWI */ BringUp_Trace: switch (r->r[1]) { case 123: /* ENTRY into routine ...................... */ bringupSetR9fiq (r->r[2]); break; case 321: /* EXIT into routine ....................... */ bringupSetR10fiq (r->r[2]); break; } break; case /* SWI */ BringUp_FatalError: bringupSetR8fiq (r->r[1]); fatalerror: goto fatalerror; /* setup infinite loop ... */ break; /* ----------------------------------------------------------------- * Device Driver SWI's for controlling and initializing device * drivers * ----------------------------------------------------------------- */ case /* SWI */ Event_IODeviceInit: io_initialize_drivers (); break; default: /* ---------------------------------------------------------------- * switch to SYSTEM mode and switch ON IRQ interrupts. * ---------------------------------------------------------------- */ if (STATE!=1) {modifyControlCPSR (SYSTEM|IRQoN);} switch (r->r[0]) { case /* SWI */ Event_IODeviceOpen: r->r[0] = (unsigned int) io_open_driver ((UID *)r->r[1],r->r[2],r->r[3]); break; case /* SWI */ Event_IODeviceClose: r->r[0] = (unsigned int) io_close_driver ((device_treestr *)r->r[1],(UID)r->r[2]); break; case /* SWI */ Event_IODeviceWriteByte: io_write_byte ((device_treestr *)r->r[1],(UID)r->r[2],(BYTE)r->r[3]); break; case /* SWI */ Event_IODeviceReadByte: r->r[0] = (unsigned int) io_read_byte ((device_treestr *)r->r[1],(UID)r->r[2]); break; case /* SWI */ Event_IODeviceWriteBit: io_write_bit ((device_treestr *)r->r[1],(UID)r->r[2],(UINT)r->r[3]); break; case /* SWI */ Event_IODeviceReadBit: r->r[0] = (unsigned int) io_read_bit ((device_treestr *)r->r[1],(UID)r->r[2]); break; case /* SWI */ Event_IODeviceWriteBlock: io_write_block ((device_treestr *)r->r[1],(UID)r->r[2],(void *)r->r[3]); break; case /* SWI */ Event_IODeviceReadBlock: r->r[0] = (unsigned int) io_read_block ((device_treestr *)r->r[1],(UID)r->r[2]); break; } if (STATE!=1) {modifyControlCPSR (SVC|IRQoFF);} } } return ReportOK(); }
errcode_t ocfs2_convert_inline_data_to_extents(ocfs2_cached_inode *ci) { errcode_t ret; uint32_t bytes, n_clusters; uint64_t p_start; char *inline_data = NULL; struct ocfs2_dinode *di = ci->ci_inode; ocfs2_filesys *fs = ci->ci_fs; uint64_t bpc = fs->fs_clustersize/fs->fs_blocksize; unsigned int new_size; if (di->i_size) { ret = ocfs2_malloc_block(fs->fs_io, &inline_data); if (ret) goto out; ret = ocfs2_inline_data_read(di, inline_data, fs->fs_blocksize, 0, &bytes); if (ret) goto out; } ocfs2_dinode_new_extent_list(fs, di); di->i_dyn_features &= ~OCFS2_INLINE_DATA_FL; ret = ocfs2_new_clusters(fs, 1, 1, &p_start, &n_clusters); if (ret || n_clusters == 0) goto out; ret = empty_blocks(fs, p_start, bpc); if (ret) goto out; if (di->i_size) { if (S_ISDIR(di->i_mode)) { if (ocfs2_supports_dir_trailer(fs)) new_size = ocfs2_dir_trailer_blk_off(fs); else new_size = fs->fs_blocksize; ocfs2_expand_last_dirent(inline_data, di->i_size, new_size); if (ocfs2_supports_dir_trailer(fs)) ocfs2_init_dir_trailer(fs, di, p_start, inline_data); di->i_size = fs->fs_blocksize; ret = ocfs2_write_dir_block(fs, di, p_start, inline_data); } else ret = io_write_block(fs->fs_io, p_start, 1, inline_data); if (ret) goto out; } ret = ocfs2_cached_inode_insert_extent(ci, 0, p_start, n_clusters, 0); if (ret) goto out; ret = ocfs2_write_cached_inode(fs, ci); out: if (inline_data) ocfs2_free(&inline_data); return ret; }
static errcode_t ocfs2_file_block_write(ocfs2_cached_inode *ci, void *buf, uint32_t count, uint64_t offset, uint32_t *wrote) { ocfs2_filesys *fs = ci->ci_fs; errcode_t ret = 0; char *ptr = (char *) buf; uint32_t wanted_blocks; uint64_t contig_blocks; uint64_t v_blkno; uint64_t p_blkno, p_start, p_end; uint64_t begin_blocks = 0, end_blocks = 0; uint32_t tmp; uint64_t num_blocks; int bs_bits = OCFS2_RAW_SB(fs->fs_super)->s_blocksize_bits; uint64_t ino = ci->ci_blkno; uint32_t n_clusters, cluster_begin, cluster_end; uint64_t bpc = fs->fs_clustersize/fs->fs_blocksize; int insert = 0; uint16_t extent_flags = 0; /* o_direct requires aligned io */ tmp = fs->fs_blocksize - 1; if ((count & tmp) || (offset & (uint64_t)tmp) || ((unsigned long)ptr & tmp)) return OCFS2_ET_INVALID_ARGUMENT; wanted_blocks = count >> bs_bits; v_blkno = offset >> bs_bits; *wrote = 0; num_blocks = (ci->ci_inode->i_size + fs->fs_blocksize - 1) >> bs_bits; if (v_blkno >= num_blocks) return 0; if (v_blkno + wanted_blocks > num_blocks) wanted_blocks = (uint32_t) (num_blocks - v_blkno); while(wanted_blocks) { ret = ocfs2_extent_map_get_blocks(ci, v_blkno, 1, &p_blkno, &contig_blocks, &extent_flags); if (ret) return ret; if (contig_blocks > wanted_blocks) contig_blocks = wanted_blocks; begin_blocks = 0; end_blocks = 0; p_end = 0; if (!p_blkno) { /* * We meet with a hole here, so we allocate clusters * and empty the both ends in case. * * We will postpone the extent insertion after we * successfully write the extent block, so that and * problems happens in block writing would not affect * the file. */ cluster_begin = ocfs2_blocks_to_clusters(fs, v_blkno); cluster_end = ocfs2_blocks_to_clusters(fs, v_blkno + contig_blocks -1); n_clusters = cluster_end - cluster_begin + 1; ret = ocfs2_new_clusters(fs, 1, n_clusters, &p_start, &n_clusters); if (ret || n_clusters == 0) return ret; begin_blocks = v_blkno & (bpc - 1); p_blkno = p_start + begin_blocks; contig_blocks = n_clusters * bpc - begin_blocks; if (contig_blocks > wanted_blocks) { end_blocks = contig_blocks - wanted_blocks; contig_blocks = wanted_blocks; p_end = p_blkno + wanted_blocks; } insert = 1; } else if (extent_flags & OCFS2_EXT_UNWRITTEN) { begin_blocks = v_blkno & (bpc - 1); p_start = p_blkno - begin_blocks; p_end = p_blkno + wanted_blocks; end_blocks = (p_end & (bpc - 1)) ? bpc - (p_end & (bpc - 1 )) : 0; } if (begin_blocks) { /* * The user don't write the first blocks, * so we have to empty them. */ ret = empty_blocks(fs, p_start, begin_blocks); if (ret) return ret; } if (end_blocks) { /* * we don't need to write that many blocks, * so empty the blocks at the bottom. */ ret = empty_blocks(fs, p_end, end_blocks); if (ret) return ret; } ret = io_write_block(fs->fs_io, p_blkno, contig_blocks, ptr); if (ret) return ret; if (insert) { ret = ocfs2_cached_inode_insert_extent(ci, ocfs2_blocks_to_clusters(fs,v_blkno), p_start, n_clusters, 0); if (ret) { /* * XXX: We don't wan't to overwrite the error * from insert_extent(). But we probably need * to BE LOUDLY UPSET. */ ocfs2_free_clusters(fs, n_clusters, p_start); return ret; } /* save up what we have done. */ ret = ocfs2_write_cached_inode(fs, ci); if (ret) return ret; ret = ocfs2_extent_map_get_blocks(ci, v_blkno, 1, &p_blkno, NULL, NULL); /* now we shouldn't find a hole. */ if (!p_blkno || p_blkno != p_start + begin_blocks) ret = OCFS2_ET_INTERNAL_FAILURE; if (ret) return ret; insert = 0; } else if (extent_flags & OCFS2_EXT_UNWRITTEN) { cluster_begin = ocfs2_blocks_to_clusters(fs, v_blkno); cluster_end = ocfs2_blocks_to_clusters(fs, v_blkno + contig_blocks -1); n_clusters = cluster_end - cluster_begin + 1; ret = ocfs2_mark_extent_written(fs, ci->ci_inode, cluster_begin, n_clusters, p_blkno & ~(bpc - 1)); if (ret) return ret; ocfs2_free_cached_inode(fs, ci); ocfs2_read_cached_inode(fs,ino, &ci); } *wrote += (contig_blocks << bs_bits); wanted_blocks -= contig_blocks; if (wanted_blocks) { ptr += (contig_blocks << bs_bits); v_blkno += (uint64_t)contig_blocks; } else { if (*wrote + offset > ci->ci_inode->i_size) *wrote = (uint32_t) (ci->ci_inode->i_size - offset); /* break */ } } return ret; }