void default_fat_set_uptodate ( struct super_block *sb, struct buffer_head *bh, int val) { mark_buffer_uptodate(bh, val); }
/* conditional bwrite */ static int bwrite_cond (struct buffer_head * bh) { if(!opt_nowrite) { mark_buffer_uptodate(bh,0); mark_buffer_dirty(bh,0); return bwrite(bh); } return 0; }
static int minix_file_write(struct inode * inode, struct file * filp, const char * buf, int count) { off_t pos; int written,c; struct buffer_head * bh; char * p; if (!inode) { printk("minix_file_write: inode = NULL\n"); return -EINVAL; } if (!S_ISREG(inode->i_mode)) { printk("minix_file_write: mode = %07o\n",inode->i_mode); return -EINVAL; } if (filp->f_flags & O_APPEND) pos = inode->i_size; else pos = filp->f_pos; written = 0; while (written < count) { bh = minix_getblk(inode,pos/BLOCK_SIZE,1); if (!bh) { if (!written) written = -ENOSPC; break; } c = BLOCK_SIZE - (pos % BLOCK_SIZE); if (c > count-written) c = count-written; if (c != BLOCK_SIZE && !buffer_uptodate(bh)) { ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { brelse(bh); if (!written) written = -EIO; break; } } p = (pos % BLOCK_SIZE) + bh->b_data; memcpy_fromfs(p,buf,c); update_vm_cache(inode, pos, p, c); mark_buffer_uptodate(bh, 1); mark_buffer_dirty(bh, 0); brelse(bh); pos += c; written += c; buf += c; } if (pos > inode->i_size) inode->i_size = pos; inode->i_mtime = inode->i_ctime = CURRENT_TIME; filp->f_pos = pos; inode->i_dirt = 1; return written; }
int ext_new_block(struct super_block * sb) { struct buffer_head * bh; struct ext_free_block * efb; int j; if (!sb) { printk("trying to get new block from non-existent device\n"); return 0; } if (!sb->u.ext_sb.s_firstfreeblock) return 0; lock_super (sb); efb = (struct ext_free_block *) sb->u.ext_sb.s_firstfreeblock->b_data; if (efb->count) { j = efb->free[--efb->count]; mark_buffer_dirty(sb->u.ext_sb.s_firstfreeblock, 1); } else { #ifdef EXTFS_DEBUG printk("ext_new_block: block empty, skipping to %d\n", efb->next); #endif j = sb->u.ext_sb.s_firstfreeblocknumber; sb->u.ext_sb.s_firstfreeblocknumber = efb->next; brelse (sb->u.ext_sb.s_firstfreeblock); if (!sb->u.ext_sb.s_firstfreeblocknumber) { sb->u.ext_sb.s_firstfreeblock = NULL; } else { if (!(sb->u.ext_sb.s_firstfreeblock = bread (sb->s_dev, sb->u.ext_sb.s_firstfreeblocknumber, sb->s_blocksize))) panic ("ext_new_block: unable to read next free block\n"); } } if (j < sb->u.ext_sb.s_firstdatazone || j > sb->u.ext_sb.s_nzones) { printk ("ext_new_block: blk = %d\n", j); printk("allocating block not in data zone\n"); return 0; } sb->u.ext_sb.s_freeblockscount --; sb->s_dirt = 1; if (!(bh=getblk(sb->s_dev, j, sb->s_blocksize))) { printk("new_block: cannot get block"); return 0; } memset(bh->b_data, 0, BLOCK_SIZE); mark_buffer_uptodate(bh, 1); mark_buffer_dirty(bh, 1); brelse(bh); #ifdef EXTFS_DEBUG printk("ext_new_block: allocating block %d\n", j); #endif unlock_super (sb); return j; }
/* block moving */ static unsigned long move_generic_block(unsigned long block, unsigned long bnd, int h) { struct buffer_head * bh, * bh2; /* primitive fsck */ if (block > rs_block_count(rs)) { fprintf(stderr, "resize_reiserfs: invalid block number (%lu) found.\n", block); quit_resizer(); } /* progress bar, 3D style :) */ if (opt_verbose) print_how_far(&total_node_cnt, blocks_used, 1, 0); else total_node_cnt ++; /* infinite loop check */ if( total_node_cnt > blocks_used && !block_count_mismatch) { fputs("resize_reiserfs: warning: block count exeeded\n",stderr); block_count_mismatch = 1; } if (block < bnd) /* block will not be moved */ return 0; /* move wrong block */ bh = bread(fs->s_dev, block, fs->s_blocksize); reiserfs_bitmap_find_zero_bit(bmp, &unused_block); if (unused_block == 0 || unused_block >= bnd) { fputs ("resize_reiserfs: can\'t find free block\n", stderr); quit_resizer(); } /* blocknr changing */ bh2 = getblk(fs->s_dev, unused_block, fs->s_blocksize); memcpy(bh2->b_data, bh->b_data, bh2->b_size); reiserfs_bitmap_clear_bit(bmp, block); reiserfs_bitmap_set_bit(bmp, unused_block); brelse(bh); mark_buffer_uptodate(bh2,1); mark_buffer_dirty(bh2); bwrite(bh2); brelse(bh2); total_moved_cnt++; return unused_block; }
/* * hfs_buffer_get() * * Return a buffer for the 'block'th block of the media. * If ('read'==0) then the buffer is not read from disk. */ hfs_buffer hfs_buffer_get(hfs_sysmdb sys_mdb, int block, int read) { hfs_buffer tmp = HFS_BAD_BUFFER; if (read) { tmp = sb_bread(sys_mdb, block); } else { tmp = sb_getblk(sys_mdb, block); if (tmp) { mark_buffer_uptodate(tmp, 1); } } if (!tmp) { hfs_error("hfs_fs: unable to read block 0x%08x from dev %s\n", block, hfs_mdb_name(sys_mdb)); } return tmp; }
/* * Transfer a buffer directly, without going through the request queue. */ int sbull_make_request(request_queue_t *queue, int rw, struct buffer_head *bh) { u8 *ptr; /* Figure out what we are doing */ Sbull_Dev *device = sbull_devices + MINOR(bh->b_rdev); ptr = device->data + bh->b_rsector * sbull_hardsect; printk (KERN_DEBUG "make_rq: dev %x, sect %ld, size %d, cmd %d\n", bh->b_rdev, bh->b_rsector, bh->b_size, rw); /* Paranoid check, this apparently can really happen */ if (ptr + bh->b_size > device->data + sbull_blksize*sbull_size) { static int count = 0; if (count++ < 5) printk(KERN_WARNING "sbull: request past end of device\n"); bh->b_end_io(bh, 0); return 0; } /* Do the transfer */ switch(rw) { case READ: case READA: /* Readahead */ memcpy(bh->b_data, ptr, bh->b_size); /* from sbull to buffer */ bh->b_end_io(bh, 1); break; case WRITE: refile_buffer(bh); memcpy(ptr, bh->b_data, bh->b_size); /* from buffer to sbull */ mark_buffer_uptodate(bh, 1); bh->b_end_io(bh, 1); break; default: /* can't happen */ bh->b_end_io(bh, 0); break; } /* Nonzero return means we're done */ return 0; }
static int alloc_branch(struct inode *inode, int num, int *offsets, Indirect *branch) { int n = 0; int i; int parent = minix_new_block(inode); branch[0].key = cpu_to_block(parent); if (parent) for (n = 1; n < num; n++) { struct buffer_head *bh; /* Allocate the next block */ int nr = minix_new_block(inode); if (!nr) break; branch[n].key = cpu_to_block(nr); bh = getblk(inode->i_dev, parent, BLOCK_SIZE); lock_buffer(bh); memset(bh->b_data, 0, BLOCK_SIZE); branch[n].bh = bh; branch[n].p = (block_t*) bh->b_data + offsets[n]; *branch[n].p = branch[n].key; mark_buffer_uptodate(bh, 1); unlock_buffer(bh); mark_buffer_dirty_inode(bh, inode); parent = nr; } if (n == num) return 0; /* Allocation failed, free what we already allocated */ for (i = 1; i < n; i++) bforget(branch[i].bh); for (i = 0; i < n; i++) minix_free_block(inode, block_to_cpu(branch[i].key)); return -ENOSPC; }
int reiserfs_resize (struct super_block * s, unsigned long block_count_new) { struct reiserfs_super_block * sb; struct buffer_head ** bitmap, * bh; struct reiserfs_transaction_handle th; unsigned int bmap_nr_new, bmap_nr; unsigned int block_r_new, block_r; struct reiserfs_list_bitmap * jb; struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS]; unsigned long int block_count, free_blocks; int i; int copy_size ; sb = SB_DISK_SUPER_BLOCK(s); if (SB_BLOCK_COUNT(s) >= block_count_new) { printk("can\'t shrink filesystem on-line\n"); return -EINVAL; } /* check the device size */ bh = sb_bread(s, block_count_new - 1); if (!bh) { printk("reiserfs_resize: can\'t read last block\n"); return -EINVAL; } bforget(bh); /* old disk layout detection; those partitions can be mounted, but * cannot be resized */ if (SB_BUFFER_WITH_SB(s)->b_blocknr * SB_BUFFER_WITH_SB(s)->b_size != REISERFS_DISK_OFFSET_IN_BYTES ) { printk("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n"); return -ENOTSUPP; } /* count used bits in last bitmap block */ block_r = SB_BLOCK_COUNT(s) - (SB_BMAP_NR(s) - 1) * s->s_blocksize * 8; /* count bitmap blocks in new fs */ bmap_nr_new = block_count_new / ( s->s_blocksize * 8 ); block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8; if (block_r_new) bmap_nr_new++; else block_r_new = s->s_blocksize * 8; /* save old values */ block_count = SB_BLOCK_COUNT(s); bmap_nr = SB_BMAP_NR(s); /* resizing of reiserfs bitmaps (journal and real), if needed */ if (bmap_nr_new > bmap_nr) { /* reallocate journal bitmaps */ if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) { printk("reiserfs_resize: unable to allocate memory for journal bitmaps\n"); unlock_super(s) ; return -ENOMEM ; } /* the new journal bitmaps are zero filled, now we copy in the bitmap ** node pointers from the old journal bitmap structs, and then ** transfer the new data structures into the journal struct. ** ** using the copy_size var below allows this code to work for ** both shrinking and expanding the FS. */ copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr ; copy_size = copy_size * sizeof(struct reiserfs_list_bitmap_node *) ; for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) { struct reiserfs_bitmap_node **node_tmp ; jb = SB_JOURNAL(s)->j_list_bitmap + i ; memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size) ; /* just in case vfree schedules on us, copy the new ** pointer into the journal struct before freeing the ** old one */ node_tmp = jb->bitmaps ; jb->bitmaps = jbitmap[i].bitmaps ; vfree(node_tmp) ; } /* allocate additional bitmap blocks, reallocate array of bitmap * block pointers */ bitmap = reiserfs_kmalloc(sizeof(struct buffer_head *) * bmap_nr_new, GFP_KERNEL, s); if (!bitmap) { printk("reiserfs_resize: unable to allocate memory.\n"); return -ENOMEM; } for (i = 0; i < bmap_nr; i++) bitmap[i] = SB_AP_BITMAP(s)[i]; for (i = bmap_nr; i < bmap_nr_new; i++) { bitmap[i] = getblk(s->s_dev, i * s->s_blocksize * 8, s->s_blocksize); memset(bitmap[i]->b_data, 0, sb->s_blocksize); reiserfs_test_and_set_le_bit(0, bitmap[i]->b_data); mark_buffer_dirty(bitmap[i]) ; mark_buffer_uptodate(bitmap[i], 1); ll_rw_block(WRITE, 1, bitmap + i); wait_on_buffer(bitmap[i]); } /* free old bitmap blocks array */ reiserfs_kfree(SB_AP_BITMAP(s), sizeof(struct buffer_head *) * bmap_nr, s); SB_AP_BITMAP(s) = bitmap; } /* begin transaction */ journal_begin(&th, s, 10); /* correct last bitmap blocks in old and new disk layout */ reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr - 1], 1); for (i = block_r; i < s->s_blocksize * 8; i++) reiserfs_test_and_clear_le_bit(i, SB_AP_BITMAP(s)[bmap_nr - 1]->b_data); journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr - 1]); reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr_new - 1], 1); for (i = block_r_new; i < s->s_blocksize * 8; i++) reiserfs_test_and_set_le_bit(i, SB_AP_BITMAP(s)[bmap_nr_new - 1]->b_data); journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr_new - 1]); /* update super */ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ; free_blocks = SB_FREE_BLOCKS(s); PUT_SB_FREE_BLOCKS(s, free_blocks + (block_count_new - block_count - (bmap_nr_new - bmap_nr))); PUT_SB_BLOCK_COUNT(s, block_count_new); PUT_SB_BMAP_NR(s, bmap_nr_new); s->s_dirt = 1; journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s)); SB_JOURNAL(s)->j_must_wait = 1; journal_end(&th, s, 10); return 0; }
static int bext2_file_write (struct inode * inode, struct file * filp, const char * buf, int count) { const loff_t two_gb = 2147483647; loff_t pos; off_t pos2; long block; int offset; int written, c; struct buffer_head * bh, *bufferlist[NBUF]; struct super_block * sb; int err; int i,buffercount,write_error; write_error = buffercount = 0; if (!inode) { printk("bext2_file_write: inode = NULL\n"); return -EINVAL; } sb = inode->i_sb; if (sb->s_flags & MS_RDONLY) /* * This fs has been automatically remounted ro because of errors */ return -ENOSPC; if (!S_ISREG(inode->i_mode)) { bext2_warning (sb, "bext2_file_write", "mode = %07o", inode->i_mode); return -EINVAL; } if (filp->f_flags & O_APPEND) pos = inode->i_size; else pos = filp->f_pos; pos2 = (off_t) pos; /* * If a file has been opened in synchronous mode, we have to ensure * that meta-data will also be written synchronously. Thus, we * set the i_osync field. This field is tested by the allocation * routines. */ if (filp->f_flags & O_SYNC) inode->u.ext2_i.i_osync++; block = pos2 >> EXT2_BLOCK_SIZE_BITS(sb); offset = pos2 & (sb->s_blocksize - 1); c = sb->s_blocksize - offset; written = 0; while (count > 0) { if (pos > two_gb) { if (!written) written = -EFBIG; break; } bh = bext2_getblk (inode, block, 1, &err); if (!bh) { if (!written) written = err; break; } count -= c; if (count < 0) c += count; if (c != sb->s_blocksize && !buffer_uptodate(bh)) { ll_rw_block (READ, 1, &bh); wait_on_buffer (bh); if (!buffer_uptodate(bh)) { brelse (bh); if (!written) written = -EIO; break; } } memcpy_fromfs (bh->b_data + offset, buf, c); update_vm_cache(inode, pos, bh->b_data + offset, c); pos2 += c; pos += c; written += c; buf += c; mark_buffer_uptodate(bh, 1); mark_buffer_dirty(bh, 0); if (filp->f_flags & O_SYNC) bufferlist[buffercount++] = bh; else brelse(bh); if (buffercount == NBUF){ ll_rw_block(WRITE, buffercount, bufferlist); for(i=0; i<buffercount; i++){ wait_on_buffer(bufferlist[i]); if (!buffer_uptodate(bufferlist[i])) write_error=1; brelse(bufferlist[i]); } buffercount=0; } if(write_error) break; block++; offset = 0; c = sb->s_blocksize; } if ( buffercount ){ ll_rw_block(WRITE, buffercount, bufferlist); for(i=0; i<buffercount; i++){ wait_on_buffer(bufferlist[i]); if (!buffer_uptodate(bufferlist[i])) write_error=1; brelse(bufferlist[i]); } } if (pos > inode->i_size) inode->i_size = pos; if (filp->f_flags & O_SYNC) inode->u.ext2_i.i_osync--; inode->i_ctime = inode->i_mtime = CURRENT_TIME; filp->f_pos = pos; inode->i_dirt = 1; return written; }
static errcode_t journal_commit_trans(journal_transaction_t *trans) { struct buffer_head *bh, *cbh = NULL; struct commit_header *commit; #ifdef HAVE_SYS_TIME_H struct timeval tv; #endif errcode_t err; JOURNAL_CHECK_TRANS_MAGIC(trans); if ((trans->flags & J_TRANS_COMMITTED) || !(trans->flags & J_TRANS_OPEN)) return EXT2_ET_INVALID_ARGUMENT; bh = getblk(trans->journal->j_dev, 0, trans->journal->j_blocksize); if (bh == NULL) return ENOMEM; /* write the descriptor block header */ commit = (struct commit_header *)bh->b_data; commit->h_magic = ext2fs_cpu_to_be32(JFS_MAGIC_NUMBER); commit->h_blocktype = ext2fs_cpu_to_be32(JFS_COMMIT_BLOCK); commit->h_sequence = ext2fs_cpu_to_be32(trans->tid); if (JFS_HAS_COMPAT_FEATURE(trans->journal, JFS_FEATURE_COMPAT_CHECKSUM)) { __u32 csum_v1 = ~0; blk64_t cblk; cbh = getblk(trans->journal->j_dev, 0, trans->journal->j_blocksize); if (cbh == NULL) { err = ENOMEM; goto error; } for (cblk = trans->start; cblk < trans->block; cblk++) { err = journal_bmap(trans->journal, cblk, &cbh->b_blocknr); if (err) goto error; mark_buffer_uptodate(cbh, 0); ll_rw_block(READ, 1, &cbh); err = cbh->b_err; if (err) goto error; csum_v1 = ext2fs_crc32_be(csum_v1, (unsigned char const *)cbh->b_data, cbh->b_size); } commit->h_chksum_type = JFS_CRC32_CHKSUM; commit->h_chksum_size = JFS_CRC32_CHKSUM_SIZE; commit->h_chksum[0] = ext2fs_cpu_to_be32(csum_v1); } else { commit->h_chksum_type = 0; commit->h_chksum_size = 0; commit->h_chksum[0] = 0; } #ifdef HAVE_SYS_TIME_H gettimeofday(&tv, NULL); commit->h_commit_sec = ext2fs_cpu_to_be32(tv.tv_sec); commit->h_commit_nsec = ext2fs_cpu_to_be32(tv.tv_usec * 1000); #else commit->h_commit_sec = 0; commit->h_commit_nsec = 0; #endif /* Write block */ jbd2_commit_block_csum_set(trans->journal, bh); err = journal_bmap(trans->journal, trans->block, &bh->b_blocknr); if (err) goto error; dbg_printf("Writing commit block at %llu:%llu\n", trans->block, bh->b_blocknr); mark_buffer_dirty(bh); ll_rw_block(WRITE, 1, &bh); err = bh->b_err; if (err) goto error; trans->flags |= J_TRANS_COMMITTED; trans->flags &= ~J_TRANS_OPEN; trans->block++; trans->fs->super->s_feature_incompat |= EXT3_FEATURE_INCOMPAT_RECOVER; ext2fs_mark_super_dirty(trans->fs); error: if (cbh) brelse(cbh); brelse(bh); return err; }
int jfs_symlink(struct inode *dip, struct dentry *dentry, const char *name) { int rc; tid_t tid; ino_t ino = 0; struct component_name dname; int ssize; /* source pathname size */ struct btstack btstack; struct inode *ip = dentry->d_inode; unchar *i_fastsymlink; s64 xlen = 0; int bmask = 0, xsize; s64 xaddr; struct metapage *mp; struct super_block *sb; struct tblock *tblk; struct inode *iplist[2]; jFYI(1, ("jfs_symlink: dip:0x%p name:%s\n", dip, name)); ssize = strlen(name) + 1; /* * search parent directory for entry/freespace * (dtSearch() returns parent directory page pinned) */ if ((rc = get_UCSname(&dname, dentry, JFS_SBI(dip->i_sb)->nls_tab))) goto out1; /* * allocate on-disk/in-memory inode for symbolic link: * (iAlloc() returns new, locked inode) */ ip = ialloc(dip, S_IFLNK | 0777); if (ip == NULL) { rc = ENOSPC; goto out2; } tid = txBegin(dip->i_sb, 0); down(&JFS_IP(dip)->commit_sem); down(&JFS_IP(ip)->commit_sem); if ((rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE))) goto out3; tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_CREATE; tblk->ip = ip; /* * create entry for symbolic link in parent directory */ ino = ip->i_ino; if ((rc = dtInsert(tid, dip, &dname, &ino, &btstack))) { jERROR(1, ("jfs_symlink: dtInsert returned %d\n", rc)); /* discard ne inode */ goto out3; } /* fix symlink access permission * (dir_create() ANDs in the u.u_cmask, * but symlinks really need to be 777 access) */ ip->i_mode |= 0777; /* * write symbolic link target path name */ xtInitRoot(tid, ip); /* * write source path name inline in on-disk inode (fast symbolic link) */ if (ssize <= IDATASIZE) { ip->i_op = &jfs_symlink_inode_operations; i_fastsymlink = JFS_IP(ip)->i_inline; memcpy(i_fastsymlink, name, ssize); ip->i_size = ssize - 1; /* * if symlink is > 128 bytes, we don't have the space to * store inline extended attributes */ if (ssize > sizeof (JFS_IP(ip)->i_inline)) JFS_IP(ip)->mode2 &= ~INLINEEA; jFYI(1, ("jfs_symlink: fast symlink added ssize:%d name:%s \n", ssize, name)); } /* * write source path name in a single extent */ else { jFYI(1, ("jfs_symlink: allocate extent ip:0x%p\n", ip)); ip->i_op = &page_symlink_inode_operations; ip->i_mapping->a_ops = &jfs_aops; /* * even though the data of symlink object (source * path name) is treated as non-journaled user data, * it is read/written thru buffer cache for performance. */ sb = ip->i_sb; bmask = JFS_SBI(sb)->bsize - 1; xsize = (ssize + bmask) & ~bmask; xaddr = 0; xlen = xsize >> JFS_SBI(sb)->l2bsize; if ((rc = xtInsert(tid, ip, 0, 0, xlen, &xaddr, 0)) == 0) { ip->i_size = ssize - 1; while (ssize) { int copy_size = min(ssize, PSIZE); mp = get_metapage(ip, xaddr, PSIZE, 1); if (mp == NULL) { dtDelete(tid, dip, &dname, &ino, JFS_REMOVE); rc = EIO; goto out3; } memcpy(mp->data, name, copy_size); flush_metapage(mp); #if 0 mark_buffer_uptodate(bp, 1); mark_buffer_dirty(bp, 1); if (IS_SYNC(dip)) { ll_rw_block(WRITE, 1, &bp); wait_on_buffer(bp); } brelse(bp); #endif /* 0 */ ssize -= copy_size; xaddr += JFS_SBI(sb)->nbperpage; } ip->i_blocks = LBLK2PBLK(sb, xlen); } else { dtDelete(tid, dip, &dname, &ino, JFS_REMOVE); rc = ENOSPC; goto out3; } } insert_inode_hash(ip); mark_inode_dirty(ip); d_instantiate(dentry, ip); /* * commit update of parent directory and link object * * if extent allocation failed (ENOSPC), * the parent inode is committed regardless to avoid * backing out parent directory update (by dtInsert()) * and subsequent dtDelete() which is harmless wrt * integrity concern. * the symlink inode will be freed by iput() at exit * as it has a zero link count (by dtDelete()) and * no permanant resources. */ iplist[0] = dip; if (rc == 0) { iplist[1] = ip; rc = txCommit(tid, 2, &iplist[0], 0); } else rc = txCommit(tid, 1, &iplist[0], 0); out3: txEnd(tid); up(&JFS_IP(dip)->commit_sem); up(&JFS_IP(ip)->commit_sem); if (rc) { ip->i_nlink = 0; iput(ip); } out2: free_UCSname(&dname); out1: jFYI(1, ("jfs_symlink: rc:%d\n", -rc)); return -rc; }
static ssize_t affs_file_write_ofs(struct file *filp, const char *buf, size_t count, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; off_t pos; ssize_t written; ssize_t c; ssize_t blocksize; struct buffer_head *bh; char *p; pr_debug("AFFS: file_write_ofs(ino=%lu,pos=%lu,count=%d)\n",inode->i_ino, (unsigned long)*ppos,count); if (!count) return 0; if (!inode) { affs_error(inode->i_sb,"file_write_ofs","Inode = NULL"); return -EINVAL; } if (!S_ISREG(inode->i_mode)) { affs_error(inode->i_sb,"file_write_ofs", "Trying to write to non-regular file (mode=%07o)", inode->i_mode); return -EINVAL; } if (!inode->u.affs_i.i_ec && alloc_ext_cache(inode)) return -ENOMEM; if (filp->f_flags & O_APPEND) pos = inode->i_size; else pos = *ppos; bh = NULL; blocksize = AFFS_I2BSIZE(inode) - 24; written = 0; while (written < count) { bh = affs_getblock(inode,pos / blocksize); if (!bh) { if (!written) written = -ENOSPC; break; } c = blocksize - (pos % blocksize); if (c > count - written) c = count - written; if (c != blocksize && !buffer_uptodate(bh)) { ll_rw_block(READ,1,&bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { affs_brelse(bh); if (!written) written = -EIO; break; } } p = (pos % blocksize) + bh->b_data + 24; c -= copy_from_user(p,buf,c); if (!c) { affs_brelse(bh); if (!written) written = -EFAULT; break; } update_vm_cache(inode,pos,p,c); pos += c; buf += c; written += c; DATA_FRONT(bh)->data_size = cpu_to_be32(be32_to_cpu(DATA_FRONT(bh)->data_size) + c); affs_fix_checksum(AFFS_I2BSIZE(inode),bh->b_data,5); mark_buffer_uptodate(bh,1); mark_buffer_dirty(bh,0); affs_brelse(bh); } if (pos > inode->i_size) inode->i_size = pos; *ppos = pos; inode->i_mtime = inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); return written; }
/* recursive function processing all tree nodes */ static unsigned long move_formatted_block(unsigned long block, unsigned long bnd, int h) { struct buffer_head * bh; struct item_head *ih; unsigned long new_blocknr = 0; int node_is_internal = 0; int i, j; bh = bread(fs->s_dev, block, fs->s_blocksize); if (is_leaf_node (bh)) { leaf_node_cnt++; for (i=0; i < B_NR_ITEMS(bh); i++) { ih = B_N_PITEM_HEAD(bh, i); if (is_indirect_ih(ih)) { __u32 * indirect; indirect = (__u32 *)B_I_PITEM (bh, ih); for (j = 0; j < I_UNFM_NUM(ih); j++) { unsigned long unfm_block; if (indirect [j] == 0) /* hole */ continue; unfm_block = move_unformatted_block(le32_to_cpu (indirect [j]), bnd, h + 1); if (unfm_block) { indirect [j] = cpu_to_le32 (unfm_block); mark_buffer_dirty(bh); } } } } } else if (is_internal_node (bh)) { /* internal node */ int_node_cnt++; node_is_internal = 1; for (i=0; i <= B_NR_ITEMS(bh); i++) { unsigned long moved_block; moved_block = move_formatted_block(B_N_CHILD_NUM(bh, i), bnd, h+1); if (moved_block) { set_child_block_number (bh, i, moved_block); mark_buffer_dirty(bh); } } } else { die ("resize_reiserfs: block (%lu) has invalid format\n", block); } if (buffer_dirty(bh)) { mark_buffer_uptodate(bh,1); bwrite(bh); } brelse(bh); new_blocknr = move_generic_block(block, bnd, h); if (new_blocknr) { if (node_is_internal) int_moved_cnt++; else leaf_moved_cnt++; } return new_blocknr; }
errcode_t journal_find_head(journal_t *journal) { unsigned int next_commit_ID; blk64_t next_log_block, head_block; int err; journal_superblock_t *sb; journal_header_t *tmp; struct buffer_head *bh; unsigned int sequence; int blocktype; /* * First thing is to establish what we expect to find in the log * (in terms of transaction IDs), and where (in terms of log * block offsets): query the superblock. */ sb = journal->j_superblock; next_commit_ID = ext2fs_be32_to_cpu(sb->s_sequence); next_log_block = ext2fs_be32_to_cpu(sb->s_start); head_block = next_log_block; if (next_log_block == 0) return 0; bh = getblk(journal->j_dev, 0, journal->j_blocksize); if (bh == NULL) return ENOMEM; /* * Now we walk through the log, transaction by transaction, * making sure that each transaction has a commit block in the * expected place. Each complete transaction gets replayed back * into the main filesystem. */ while (1) { dbg_printf("Scanning for sequence ID %u at %lu/%lu\n", next_commit_ID, (unsigned long)next_log_block, journal->j_last); /* Skip over each chunk of the transaction looking * either the next descriptor block or the final commit * record. */ err = journal_bmap(journal, next_log_block, &bh->b_blocknr); if (err) goto err; mark_buffer_uptodate(bh, 0); ll_rw_block(READ, 1, &bh); err = bh->b_err; if (err) goto err; next_log_block++; wrap(journal, next_log_block); /* What kind of buffer is it? * * If it is a descriptor block, check that it has the * expected sequence number. Otherwise, we're all done * here. */ tmp = (journal_header_t *)bh->b_data; if (tmp->h_magic != ext2fs_cpu_to_be32(JFS_MAGIC_NUMBER)) { dbg_printf("JBD2: wrong magic 0x%x\n", tmp->h_magic); goto err; } blocktype = ext2fs_be32_to_cpu(tmp->h_blocktype); sequence = ext2fs_be32_to_cpu(tmp->h_sequence); dbg_printf("Found magic %d, sequence %d\n", blocktype, sequence); if (sequence != next_commit_ID) { dbg_printf("JBD2: Wrong sequence %d (wanted %d)\n", sequence, next_commit_ID); goto err; } /* OK, we have a valid descriptor block which matches * all of the sequence number checks. What are we going * to do with it? That depends on the pass... */ switch (blocktype) { case JFS_DESCRIPTOR_BLOCK: next_log_block += count_tags(journal, bh->b_data); wrap(journal, next_log_block); continue; case JFS_COMMIT_BLOCK: head_block = next_log_block; next_commit_ID++; continue; case JFS_REVOKE_BLOCK: continue; default: dbg_printf("Unrecognised magic %d, end of scan.\n", blocktype); err = -EINVAL; goto err; } } err: if (err == 0) { dbg_printf("head seq=%d blk=%llu\n", next_commit_ID, head_block); journal->j_transaction_sequence = next_commit_ID; journal->j_head = head_block; } brelse(bh); return err; }
int shrink_fs(reiserfs_filsys_t reiserfs, unsigned long blocks) { unsigned long n_root_block; unsigned int bmap_nr_new; unsigned long int i; fs = reiserfs; rs = fs->s_rs; /* warn about alpha version */ { int c; printf( "You are running BETA version of reiserfs shrinker.\n" "This version is only for testing or VERY CAREFUL use.\n" "Backup of you data is recommended.\n\n" "Do you want to continue? [y/N]:" ); c = getchar(); if (c != 'y' && c != 'Y') exit(1); } bmap_nr_new = (blocks - 1) / (8 * fs->s_blocksize) + 1; /* is shrinking possible ? */ if (rs_block_count(rs) - blocks > rs_free_blocks(rs) + rs_bmap_nr(rs) - bmap_nr_new) { fprintf(stderr, "resize_reiserfs: can\'t shrink fs; too many blocks already allocated\n"); return -1; } reiserfs_reopen(fs, O_RDWR); set_state (fs->s_rs, REISERFS_ERROR_FS); mark_buffer_uptodate(SB_BUFFER_WITH_SB(fs), 1); mark_buffer_dirty(SB_BUFFER_WITH_SB(fs)); bwrite(SB_BUFFER_WITH_SB(fs)); /* calculate number of data blocks */ blocks_used = SB_BLOCK_COUNT(fs) - SB_FREE_BLOCKS(fs) - SB_BMAP_NR(fs) - SB_JOURNAL_SIZE(fs) - REISERFS_DISK_OFFSET_IN_BYTES / fs->s_blocksize - 2; /* superblock itself and 1 descriptor after the journal */ bmp = reiserfs_create_bitmap(rs_block_count(rs)); reiserfs_fetch_disk_bitmap(bmp, fs); unused_block = 1; if (opt_verbose) { printf("Processing the tree: "); fflush(stdout); } n_root_block = move_formatted_block(rs_root_block(rs), blocks, 0); if (n_root_block) { set_root_block (rs, n_root_block); } if (opt_verbose) printf ("\n\nnodes processed (moved):\n" "int %lu (%lu),\n" "leaves %lu (%lu),\n" "unfm %lu (%lu),\n" "total %lu (%lu).\n\n", int_node_cnt, int_moved_cnt, leaf_node_cnt, leaf_moved_cnt, unfm_node_cnt, unfm_moved_cnt, (unsigned long)total_node_cnt, total_moved_cnt); if (block_count_mismatch) { fprintf(stderr, "resize_reiserfs: data block count %lu" " doesn\'t match data block count %lu from super block\n", (unsigned long)total_node_cnt, blocks_used); } #if 0 printf("check for used blocks in truncated region\n"); { unsigned long l; for (l = blocks; l < rs_block_count(rs); l++) if (is_block_used(bmp, l)) printf("<%lu>", l); printf("\n"); } #endif /* 0 */ reiserfs_free_bitmap_blocks(fs); set_free_blocks (rs, rs_free_blocks(rs) - (rs_block_count(rs) - blocks) + (rs_bmap_nr(rs) - bmap_nr_new)); set_block_count (rs, blocks); set_bmap_nr (rs, bmap_nr_new); reiserfs_read_bitmap_blocks(fs); for (i = blocks; i < bmap_nr_new * fs->s_blocksize; i++) reiserfs_bitmap_set_bit(bmp, i); #if 0 PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (SB_BLOCK_COUNT(s) - blocks) + (SB_BMAP_NR(s) - bmap_nr_new)); PUT_SB_BLOCK_COUNT(s, blocks); PUT_SB_BMAP_NR(s, bmap_nr_new); #endif reiserfs_flush_bitmap(bmp, fs); return 0; }
// // set up start journal block and journal size // make journal unreplayable by kernel replay routine // void reset_journal (struct super_block * s) { int i ; struct buffer_head *bh ; int done = 0; int len; int start; /* first block of journal */ s->u.reiserfs_sb.s_rs->s_journal_block = get_journal_start (s); start = s->u.reiserfs_sb.s_rs->s_journal_block; /* journal size */ s->u.reiserfs_sb.s_rs->s_orig_journal_size = get_journal_size (s); len = s->u.reiserfs_sb.s_rs->s_orig_journal_size + 1; printf ("Resetting journal - "); fflush (stdout); for (i = 0 ; i < len ; i++) { print_how_far (&done, len); bh = getblk (s->s_dev, start + i, s->s_blocksize) ; memset(bh->b_data, 0, s->s_blocksize) ; mark_buffer_dirty(bh,0) ; mark_buffer_uptodate(bh,0) ; bwrite (bh); brelse(bh) ; } printf ("\n"); fflush (stdout); #if 0 /* need better way to make journal unreplayable */ /* have journal_read to replay nothing: look for first non-desc block and set j_first_unflushed_offset to it */ { int offset; struct buffer_head * bh, *jh_bh; struct reiserfs_journal_header * j_head; struct reiserfs_journal_desc * desc; jh_bh = bread (s->s_dev, s->u.reiserfs_sb.s_rs->s_journal_block + s->u.reiserfs_sb.s_rs->s_orig_journal_size, s->s_blocksize); j_head = (struct reiserfs_journal_header *)(jh_bh->b_data); for (offset = 0; offset < s->u.reiserfs_sb.s_rs->s_orig_journal_size; offset ++) { bh = bread (s->s_dev, s->u.reiserfs_sb.s_rs->s_journal_block + offset, s->s_blocksize); desc = (struct reiserfs_journal_desc *)((bh)->b_data); if (memcmp(desc->j_magic, JOURNAL_DESC_MAGIC, 8)) { /* not desc block found */ j_head->j_first_unflushed_offset = offset; brelse (bh); break; } brelse (bh); } mark_buffer_uptodate (jh_bh, 1); mark_buffer_dirty (jh_bh, 1); bwrite (jh_bh); brelse (jh_bh); } #endif }
static int do_one_pass(journal_t *journal, struct recovery_info *info, enum passtype pass) { unsigned int first_commit_ID, next_commit_ID; unsigned long next_log_block; int err, success = 0; journal_superblock_t * sb; journal_header_t * tmp; struct buffer_head * bh; unsigned int sequence; int blocktype; /* Precompute the maximum metadata descriptors in a descriptor block */ int MAX_BLOCKS_PER_DESC; MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t)) / sizeof(journal_block_tag_t)); /* * First thing is to establish what we expect to find in the log * (in terms of transaction IDs), and where (in terms of log * block offsets): query the superblock. */ sb = journal->j_superblock; next_commit_ID = ntohl(sb->s_sequence); next_log_block = ntohl(sb->s_start); first_commit_ID = next_commit_ID; if (pass == PASS_SCAN) info->start_transaction = first_commit_ID; jfs_debug(1, "Starting recovery pass %d\n", pass); /* * Now we walk through the log, transaction by transaction, * making sure that each transaction has a commit block in the * expected place. Each complete transaction gets replayed back * into the main filesystem. */ while (1) { int flags; char * tagp; journal_block_tag_t * tag; struct buffer_head * obh; struct buffer_head * nbh; /* If we already know where to stop the log traversal, * check right now that we haven't gone past the end of * the log. */ if (pass != PASS_SCAN) if (tid_geq(next_commit_ID, info->end_transaction)) break; jfs_debug(2, "Scanning for sequence ID %u at %lu/%lu\n", next_commit_ID, next_log_block, journal->j_last); /* Skip over each chunk of the transaction looking * either the next descriptor block or the final commit * record. */ jfs_debug(3, "JFS: checking block %ld\n", next_log_block); err = jread(&bh, journal, next_log_block); if (err) goto failed; next_log_block++; wrap(journal, next_log_block); /* What kind of buffer is it? * * If it is a descriptor block, check that it has the * expected sequence number. Otherwise, we're all done * here. */ tmp = (journal_header_t *) bh->b_data; if (tmp->h_magic != htonl(JFS_MAGIC_NUMBER)) { brelse(bh); break; } blocktype = ntohl(tmp->h_blocktype); sequence = ntohl(tmp->h_sequence); jfs_debug(3, "Found magic %d, sequence %d\n", blocktype, sequence); if (sequence != next_commit_ID) { brelse(bh); break; } /* OK, we have a valid descriptor block which matches * all of the sequence number checks. What are we going * to do with it? That depends on the pass... */ switch(blocktype) { case JFS_DESCRIPTOR_BLOCK: /* If it is a valid descriptor block, replay it * in pass REPLAY; otherwise, just skip over the * blocks it describes. */ if (pass != PASS_REPLAY) { next_log_block += count_tags(bh, journal->j_blocksize); wrap(journal, next_log_block); brelse(bh); continue; } /* A descriptor block: we can now write all of * the data blocks. Yay, useful work is finally * getting done here! */ tagp = &bh->b_data[sizeof(journal_header_t)]; while ((tagp - bh->b_data +sizeof(journal_block_tag_t)) <= journal->j_blocksize) { unsigned long io_block; tag = (journal_block_tag_t *) tagp; flags = ntohl(tag->t_flags); io_block = next_log_block++; wrap(journal, next_log_block); err = jread(&obh, journal, io_block); if (err) { /* Recover what we can, but * report failure at the end. */ success = err; printk (KERN_ERR "JFS: IO error %d recovering " "block %ld in log\n", err, io_block); } else { unsigned long blocknr; J_ASSERT(obh != NULL); blocknr = ntohl(tag->t_blocknr); /* If the block has been * revoked, then we're all done * here. */ if (journal_test_revoke (journal, blocknr, next_commit_ID)) { brelse(obh); ++info->nr_revoke_hits; goto skip_write; } /* Find a buffer for the new * data being restored */ nbh = getblk(journal->j_dev, blocknr, journal->j_blocksize); if (nbh == NULL) { printk(KERN_ERR "JFS: Out of memory " "during recovery.\n"); err = -ENOMEM; brelse(bh); brelse(obh); goto failed; } memcpy(nbh->b_data, obh->b_data, journal->j_blocksize); if (flags & JFS_FLAG_ESCAPE) { * ((unsigned int *) bh->b_data) = htonl(JFS_MAGIC_NUMBER); } mark_buffer_dirty(nbh, 1); mark_buffer_uptodate(nbh, 1); ++info->nr_replays; /* ll_rw_block(WRITE, 1, &nbh); */ brelse(obh); brelse(nbh); } skip_write: tagp += sizeof(journal_block_tag_t); if (!(flags & JFS_FLAG_SAME_UUID)) tagp += 16; if (flags & JFS_FLAG_LAST_TAG) break; } brelse(bh); continue; case JFS_COMMIT_BLOCK: /* Found an expected commit block: not much to * do other than move on to the next sequence * number. */ brelse(bh); next_commit_ID++; continue; case JFS_REVOKE_BLOCK: /* If we aren't in the REVOKE pass, then we can * just skip over this block. */ if (pass != PASS_REVOKE) { brelse(bh); continue; } err = scan_revoke_records(journal, bh, next_commit_ID, info); brelse(bh); if (err) goto failed; continue; default: jfs_debug(3, "Unrecognised magic %d, end of scan.\n", blocktype); goto done; } } done: /* * We broke out of the log scan loop: either we came to the * known end of the log or we found an unexpected block in the * log. If the latter happened, then we know that the "current" * transaction marks the end of the valid log. */ if (pass == PASS_SCAN) info->end_transaction = next_commit_ID; else { /* It's really bad news if different passes end up at * different places (but possible due to IO errors). */ if (info->end_transaction != next_commit_ID) { printk (KERN_ERR "JFS: recovery pass %d ended at " "transaction %u, expected %u\n", pass, next_commit_ID, info->end_transaction); if (!success) success = -EIO; } } return success; failed: return err; }