Beispiel #1
0
nh_t
node_alloc( void )
{
    nix_t nix;
    u_char_t *p;
    nh_t nh;
    register nix_t *linkagep;
#ifdef NODECHK
    register u_char_t *hkpp;
    register u_char_t gen;
    register u_char_t unq;
#endif /* NODECHK */

    /* if free list is depleted, map in a new window at the
     * end of backing store. put all nodes on free list.
     * initialize the gen count to the node index, and the unique
     * pattern to the free pattern.
     */
    if ( node_hdrp->nh_freenix == NIX_NULL ) {
        nix_t virgbegnix; /* abs. nix of first node in virg seg */
        nix_t virgendnix; /* abs. nix of next node after last */
        nix_t sacrcnt; /* how many virgins to put on free list */
        nix_t sacrnix;

        ASSERT( node_hdrp->nh_virgrelnix
                <
                ( nix_t )node_hdrp->nh_nodesperseg );
        virgbegnix = OFF2NIX( node_hdrp->nh_virgsegreloff )
                     +
                     node_hdrp->nh_virgrelnix;
        virgendnix =
            OFF2NIX( ( node_hdrp->nh_virgsegreloff
                       +
                       ( off64_t )node_hdrp->nh_segsz ) );
#ifdef TREE_DEBUG
        mlog(MLOG_DEBUG | MLOG_TREE,
             "node_alloc(): create freelist - "
             "virg_begin=%lld virg_end=%lld\n",
             virgbegnix, virgendnix);
#endif
        ASSERT( virgendnix > virgbegnix );
        sacrcnt = min( VIRGSACRMAX, virgendnix - virgbegnix );
        ASSERT( sacrcnt >= 1 );
        p = 0; /* keep lint happy */
        win_map( NIX2OFF( virgbegnix ), ( void ** )&p );
        if (p == NULL)
            return NH_NULL;
        node_hdrp->nh_freenix = virgbegnix;
        for ( sacrnix = virgbegnix
                        ;
                sacrnix < virgbegnix + sacrcnt - 1
                ;
                p += node_hdrp->nh_nodesz, sacrnix++ ) {
            linkagep = ( nix_t * )p;
            *linkagep = sacrnix + 1;
#ifdef NODECHK
            hkpp = p + node_hdrp->nh_nodehkix;
            gen = ( u_char_t )sacrnix;
            *hkpp = ( u_char_t )HKPMKHKP( ( size_t )gen,
                                          NODEUNQFREE );
#endif /* NODECHK */
        }
        linkagep = ( nix_t * )p;
        *linkagep = NIX_NULL;
#ifdef NODECHK
        hkpp = p + node_hdrp->nh_nodehkix;
        gen = ( u_char_t )sacrnix;
        *hkpp = HKPMKHKP( gen, NODEUNQFREE );
#endif /* NODECHK */
        node_hdrp->nh_virgrelnix += sacrcnt;
        win_unmap( node_hdrp->nh_virgsegreloff, ( void ** )&p );

        if ( node_hdrp->nh_virgrelnix
                >=
                ( nix_t )node_hdrp->nh_nodesperseg ) {
            intgen_t rval;
            ASSERT( node_hdrp->nh_virgrelnix
                    ==
                    ( nix_t )node_hdrp->nh_nodesperseg );
            ASSERT( node_hdrp->nh_virgsegreloff
                    <=
                    OFF64MAX - ( off64_t )node_hdrp->nh_segsz );
#ifdef TREE_DEBUG
            mlog(MLOG_DEBUG | MLOG_TREE,
                 "node_alloc(): runout of nodes for freelist in "
                 "this segment - nodes used = %lld\n",
                 node_hdrp->nh_virgrelnix);
#endif
            node_hdrp->nh_virgsegreloff +=
                ( off64_t )node_hdrp->nh_segsz;
            node_hdrp->nh_virgrelnix = 0;
            mlog( MLOG_DEBUG,
                  "pre-growing new node array segment at %lld "
                  "size %lld\n",
                  node_hdrp->nh_firstsegoff
                  +
                  node_hdrp->nh_virgsegreloff
                  +
                  ( off64_t )node_hdrp->nh_segsz,
                  ( off64_t )node_hdrp->nh_segsz );
            rval = ftruncate64( node_fd,
                                node_hdrp->nh_firstsegoff
                                +
                                node_hdrp->nh_virgsegreloff
                                +
                                ( off64_t )node_hdrp->nh_segsz );
            if ( rval ) {
                mlog( MLOG_NORMAL | MLOG_WARNING | MLOG_TREE, _(
                          "unable to autogrow node segment %llu: "
                          "%s (%d)\n"),
                      node_hdrp->nh_virgsegreloff
                      /
                      ( off64_t )node_hdrp->nh_segsz,
                      strerror( errno ),
                      errno );
            }
        }
    }

    /* map in window containing node at top of free list,
     * and adjust free list.
     */
    nix = node_hdrp->nh_freenix;
#ifdef TREE_DEBUG
    mlog(MLOG_DEBUG | MLOG_TREE,
         "node_alloc(): win_map(%llu) and get head from node freelist\n",
         NIX2OFF(nix));
#endif
    win_map( NIX2OFF( nix ), ( void ** )&p );
    if (p == NULL)
        return NH_NULL;
#ifdef NODECHK
    hkpp = p + node_hdrp->nh_nodehkix;
    unq = HKPGETUNQ( *hkpp );
    ASSERT( unq != NODEUNQALCD );
    ASSERT( unq == NODEUNQFREE );
#endif /* NODECHK */
    linkagep = ( nix_t * )p;
    node_hdrp->nh_freenix = *linkagep;

    /* clean the node
     */
    memset( ( void * )p, 0, node_hdrp->nh_nodesz );

    /* build a handle for node
     */
    ASSERT( nix <= NIX_MAX );
#ifdef NODECHK
    hkpp = p + ( int )node_hdrp->nh_nodehkix;
    gen = ( u_char_t )( HKPGETGEN( *p ) + ( u_char_t )1 );
    nh = HDLMKHDL( gen, nix );
    *hkpp = HKPMKHKP( gen, NODEUNQALCD );
#else /* NODECHK */
    nh = ( nh_t )nix;
#endif /* NODECHK */

    /* unmap window
     */
#ifdef TREE_DEBUG
    mlog(MLOG_DEBUG | MLOG_TREE,
         "node_alloc(): win_unmap(%llu)\n", NIX2OFF(nix));
#endif
    win_unmap( NIX2OFF( nix ), ( void ** )&p );

    return nh;
}
Beispiel #2
0
static off64_t
quantity2offset( jdm_fshandle_t *fshandlep, xfs_bstat_t *statp, off64_t qty )
{
	intgen_t fd;
	getbmapx_t bmap[ BMAP_LEN ];
	off64_t offset;
	off64_t offset_next;
	off64_t qty_accum;

	/* If GETOPT_DUMPASOFFLINE was specified and the HSM provided an
	 * estimate, then use it.
	 */

	if (hsm_fs_ctxtp) {
		if (HsmEstimateFileOffset(hsm_fs_ctxtp, statp, qty, &offset))
			return offset;
	}

	offset = 0;
	offset_next = 0;
	qty_accum = 0;
	bmap[ 0 ].bmv_offset = 0;
	bmap[ 0 ].bmv_length = -1;
	bmap[ 0 ].bmv_count = BMAP_LEN;
	bmap[ 0 ].bmv_iflags = BMV_IF_NO_DMAPI_READ;
	bmap[ 0 ].bmv_entries = -1;
	fd = jdm_open( fshandlep, statp, O_RDONLY );
	if ( fd < 0 ) {
		mlog( MLOG_NORMAL | MLOG_WARNING | MLOG_INOMAP, _(
		      "could not open ino %llu to read extent map: %s\n"),
		      statp->bs_ino,
		      strerror( errno ));
		return 0;
	}

	for ( ; ; ) {
		intgen_t eix;
		intgen_t rval;

		rval = ioctl( fd, XFS_IOC_GETBMAPX, bmap );
		if ( rval ) {
			mlog( MLOG_NORMAL | MLOG_WARNING | MLOG_INOMAP, _(
			      "could not read extent map for ino %llu: %s\n"),
			      statp->bs_ino,
			      strerror( errno ));
			( void )close( fd );
			return 0;
		}

		if ( bmap[ 0 ].bmv_entries <= 0 ) {
			ASSERT( bmap[ 0 ].bmv_entries == 0 );
			( void )close( fd );
			return offset_next;
		}

		for ( eix = 1 ; eix <= bmap[ 0 ].bmv_entries ; eix++ ) {
			getbmapx_t *bmapp = &bmap[ eix ];
			off64_t qty_new;
			if ( bmapp->bmv_block == -1 ) {
				continue; /* hole */
			}
			offset = bmapp->bmv_offset * BBSIZE;
			qty_new = qty_accum + bmapp->bmv_length * BBSIZE;
			if ( qty_new >= qty ) {
				( void )close( fd );
				return offset + ( qty - qty_accum );
			}
			offset_next = offset + bmapp->bmv_length * BBSIZE;
			qty_accum = qty_new;
		}
	}
	/* NOTREACHED */
}
Beispiel #3
0
int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
			   unsigned int nr, struct buffer_head *bhs[])
{
	int status = 0;
	unsigned int i;
	struct buffer_head *bh;

	trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);

	if (!nr)
		goto bail;

	for (i = 0 ; i < nr ; i++) {
		if (bhs[i] == NULL) {
			bhs[i] = sb_getblk(osb->sb, block++);
			if (bhs[i] == NULL) {
				status = -ENOMEM;
				mlog_errno(status);
				goto bail;
			}
		}
		bh = bhs[i];

		if (buffer_jbd(bh)) {
			trace_ocfs2_read_blocks_sync_jbd(
					(unsigned long long)bh->b_blocknr);
			continue;
		}

		if (buffer_dirty(bh)) {
			/* This should probably be a BUG, or
			 * at least return an error. */
			mlog(ML_ERROR,
			     "trying to sync read a dirty "
			     "buffer! (blocknr = %llu), skipping\n",
			     (unsigned long long)bh->b_blocknr);
			continue;
		}

		lock_buffer(bh);
		if (buffer_jbd(bh)) {
#ifdef CATCH_BH_JBD_RACES
			mlog(ML_ERROR,
			     "block %llu had the JBD bit set "
			     "while I was in lock_buffer!",
			     (unsigned long long)bh->b_blocknr);
			BUG();
#else
			unlock_buffer(bh);
			continue;
#endif
		}

		clear_buffer_uptodate(bh);
		get_bh(bh); /* for end_buffer_read_sync() */
		bh->b_end_io = end_buffer_read_sync;
		submit_bh(REQ_OP_READ, 0, bh);
	}

	for (i = nr; i > 0; i--) {
		bh = bhs[i - 1];

		/* No need to wait on the buffer if it's managed by JBD. */
		if (!buffer_jbd(bh))
			wait_on_buffer(bh);

		if (!buffer_uptodate(bh)) {
			/* Status won't be cleared from here on out,
			 * so we can safely record this and loop back
			 * to cleanup the other buffers. */
			status = -EIO;
			put_bh(bh);
			bhs[i - 1] = NULL;
		}
	}

bail:
	return status;
}
Beispiel #4
0
int ocfs2_load_local_alloc(struct ocfs2_super *osb)
{
	int status = 0;
	struct ocfs2_dinode *alloc = NULL;
	struct buffer_head *alloc_bh = NULL;
	u32 num_used;
	struct inode *inode = NULL;
	struct ocfs2_local_alloc *la;

	mlog_entry_void();

	ocfs2_init_la_debug(osb);

	if (osb->local_alloc_bits == 0)
		goto bail;

	if (osb->local_alloc_bits >= osb->bitmap_cpg) {
		mlog(ML_NOTICE, "Requested local alloc window %d is larger "
		     "than max possible %u. Using defaults.\n",
		     osb->local_alloc_bits, (osb->bitmap_cpg - 1));
		osb->local_alloc_bits =
			ocfs2_megabytes_to_clusters(osb->sb,
						    OCFS2_DEFAULT_LOCAL_ALLOC_SIZE);
	}

	/* read the alloc off disk */
	inode = ocfs2_get_system_file_inode(osb, LOCAL_ALLOC_SYSTEM_INODE,
					    osb->slot_num);
	if (!inode) {
		status = -EINVAL;
		mlog_errno(status);
		goto bail;
	}

	status = ocfs2_read_inode_block_full(inode, &alloc_bh,
					     OCFS2_BH_IGNORE_CACHE);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	alloc = (struct ocfs2_dinode *) alloc_bh->b_data;
	la = OCFS2_LOCAL_ALLOC(alloc);

	if (!(le32_to_cpu(alloc->i_flags) &
	    (OCFS2_LOCAL_ALLOC_FL|OCFS2_BITMAP_FL))) {
		mlog(ML_ERROR, "Invalid local alloc inode, %llu\n",
		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
		status = -EINVAL;
		goto bail;
	}

	if ((la->la_size == 0) ||
	    (le16_to_cpu(la->la_size) > ocfs2_local_alloc_size(inode->i_sb))) {
		mlog(ML_ERROR, "Local alloc size is invalid (la_size = %u)\n",
		     le16_to_cpu(la->la_size));
		status = -EINVAL;
		goto bail;
	}

	/* do a little verification. */
	num_used = ocfs2_local_alloc_count_bits(alloc);

	/* hopefully the local alloc has always been recovered before
	 * we load it. */
	if (num_used
	    || alloc->id1.bitmap1.i_used
	    || alloc->id1.bitmap1.i_total
	    || la->la_bm_off)
		mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
		     "found = %u, set = %u, taken = %u, off = %u\n",
		     num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
		     le32_to_cpu(alloc->id1.bitmap1.i_total),
		     OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);

	osb->local_alloc_bh = alloc_bh;
	osb->local_alloc_state = OCFS2_LA_ENABLED;

bail:
	if (status < 0)
		brelse(alloc_bh);
	if (inode)
		iput(inode);

	if (status < 0)
		ocfs2_shutdown_la_debug(osb);

	mlog(0, "Local alloc window bits = %d\n", osb->local_alloc_bits);

	mlog_exit(status);
	return status;
}
Beispiel #5
0
/*
 * sync the local alloc to main bitmap.
 *
 * assumes you've already locked the main bitmap -- the bitmap inode
 * passed is used for caching.
 */
static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
				    handle_t *handle,
				    struct ocfs2_dinode *alloc,
				    struct inode *main_bm_inode,
				    struct buffer_head *main_bm_bh)
{
	int status = 0;
	int bit_off, left, count, start;
	u64 la_start_blk;
	u64 blkno;
	void *bitmap;
	struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);

	mlog_entry("total = %u, used = %u\n",
		   le32_to_cpu(alloc->id1.bitmap1.i_total),
		   le32_to_cpu(alloc->id1.bitmap1.i_used));

	if (!alloc->id1.bitmap1.i_total) {
		mlog(0, "nothing to sync!\n");
		goto bail;
	}

	if (le32_to_cpu(alloc->id1.bitmap1.i_used) ==
	    le32_to_cpu(alloc->id1.bitmap1.i_total)) {
		mlog(0, "all bits were taken!\n");
		goto bail;
	}

	la_start_blk = ocfs2_clusters_to_blocks(osb->sb,
						le32_to_cpu(la->la_bm_off));
	bitmap = la->la_bitmap;
	start = count = bit_off = 0;
	left = le32_to_cpu(alloc->id1.bitmap1.i_total);

	while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start))
	       != -1) {
		if ((bit_off < left) && (bit_off == start)) {
			count++;
			start++;
			continue;
		}
		if (count) {
			blkno = la_start_blk +
				ocfs2_clusters_to_blocks(osb->sb,
							 start - count);

			mlog(0, "freeing %u bits starting at local alloc bit "
			     "%u (la_start_blk = %llu, blkno = %llu)\n",
			     count, start - count,
			     (unsigned long long)la_start_blk,
			     (unsigned long long)blkno);

			status = ocfs2_free_clusters(handle, main_bm_inode,
						     main_bm_bh, blkno, count);
			if (status < 0) {
				mlog_errno(status);
				goto bail;
			}
		}
		if (bit_off >= left)
			break;
		count = 1;
		start = bit_off + 1;
	}

bail:
	mlog_exit(status);
	return status;
}
Beispiel #6
0
void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
			  int create_ino)
{
	struct super_block *sb;
	struct ocfs2_super *osb;
	int use_plocks = 1;

	sb = inode->i_sb;
	osb = OCFS2_SB(sb);

	if ((osb->s_mount_opt & OCFS2_MOUNT_LOCALFLOCKS) ||
	    ocfs2_mount_local(osb) || !ocfs2_stack_supports_plocks())
		use_plocks = 0;

	/*
	 * These have all been checked by ocfs2_read_inode_block() or set
	 * by ocfs2_mknod_locked(), so a failure is a code bug.
	 */
	BUG_ON(!OCFS2_IS_VALID_DINODE(fe));  /* This means that read_inode
						cannot create a superblock
						inode today.  change if
						that is needed. */
	BUG_ON(!(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)));
	BUG_ON(le32_to_cpu(fe->i_fs_generation) != osb->fs_generation);


	OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
	OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
	OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features);

	inode_set_iversion(inode, 1);
	inode->i_generation = le32_to_cpu(fe->i_generation);
	inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
	inode->i_mode = le16_to_cpu(fe->i_mode);
	i_uid_write(inode, le32_to_cpu(fe->i_uid));
	i_gid_write(inode, le32_to_cpu(fe->i_gid));

	/* Fast symlinks will have i_size but no allocated clusters. */
	if (S_ISLNK(inode->i_mode) && !fe->i_clusters) {
		inode->i_blocks = 0;
		inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops;
	} else {
		inode->i_blocks = ocfs2_inode_sector_count(inode);
		inode->i_mapping->a_ops = &ocfs2_aops;
	}
	inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
	inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
	inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
	inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
	inode->i_ctime.tv_sec = le64_to_cpu(fe->i_ctime);
	inode->i_ctime.tv_nsec = le32_to_cpu(fe->i_ctime_nsec);

	if (OCFS2_I(inode)->ip_blkno != le64_to_cpu(fe->i_blkno))
		mlog(ML_ERROR,
		     "ip_blkno %llu != i_blkno %llu!\n",
		     (unsigned long long)OCFS2_I(inode)->ip_blkno,
		     (unsigned long long)le64_to_cpu(fe->i_blkno));

	set_nlink(inode, ocfs2_read_links_count(fe));

	trace_ocfs2_populate_inode(OCFS2_I(inode)->ip_blkno,
				   le32_to_cpu(fe->i_flags));
	if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) {
		OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE;
		inode->i_flags |= S_NOQUOTA;
	}
  
	if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) {
		OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
	} else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) {
		OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
	} else if (fe->i_flags & cpu_to_le32(OCFS2_QUOTA_FL)) {
		inode->i_flags |= S_NOQUOTA;
	} else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) {
		/* we can't actually hit this as read_inode can't
		 * handle superblocks today ;-) */
		BUG();
	}

	switch (inode->i_mode & S_IFMT) {
	    case S_IFREG:
		    if (use_plocks)
			    inode->i_fop = &ocfs2_fops;
		    else
			    inode->i_fop = &ocfs2_fops_no_plocks;
		    inode->i_op = &ocfs2_file_iops;
		    i_size_write(inode, le64_to_cpu(fe->i_size));
		    break;
	    case S_IFDIR:
		    inode->i_op = &ocfs2_dir_iops;
		    if (use_plocks)
			    inode->i_fop = &ocfs2_dops;
		    else
			    inode->i_fop = &ocfs2_dops_no_plocks;
		    i_size_write(inode, le64_to_cpu(fe->i_size));
		    OCFS2_I(inode)->ip_dir_lock_gen = 1;
		    break;
	    case S_IFLNK:
		    inode->i_op = &ocfs2_symlink_inode_operations;
		    inode_nohighmem(inode);
		    i_size_write(inode, le64_to_cpu(fe->i_size));
		    break;
	    default:
		    inode->i_op = &ocfs2_special_file_iops;
		    init_special_inode(inode, inode->i_mode,
				       inode->i_rdev);
		    break;
	}

	if (create_ino) {
		inode->i_ino = ino_from_blkno(inode->i_sb,
			       le64_to_cpu(fe->i_blkno));

		/*
		 * If we ever want to create system files from kernel,
		 * the generation argument to
		 * ocfs2_inode_lock_res_init() will have to change.
		 */
		BUG_ON(le32_to_cpu(fe->i_flags) & OCFS2_SYSTEM_FL);

		ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_inode_lockres,
					  OCFS2_LOCK_TYPE_META, 0, inode);

		ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_open_lockres,
					  OCFS2_LOCK_TYPE_OPEN, 0, inode);
	}

	ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_rw_lockres,
				  OCFS2_LOCK_TYPE_RW, inode->i_generation,
				  inode);

	ocfs2_set_inode_flags(inode);

	OCFS2_I(inode)->ip_last_used_slot = 0;
	OCFS2_I(inode)->ip_last_used_group = 0;

	if (S_ISDIR(inode->i_mode))
		ocfs2_resv_set_type(&OCFS2_I(inode)->ip_la_data_resv,
				    OCFS2_RESV_FLAG_DIR);
}
/*
 * This function validates existing check information on a list of
 * buffer_heads.  Like _compute_bhs, the function will take care of
 * zeroing bc before calculating check codes.  If bc is not a pointer
 * inside data, the caller must have zeroed any inline
 * ocfs2_block_check structures.
 *
 * Again, the data passed in should be the on-disk endian.
 */
int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
				   struct ocfs2_block_check *bc,
				   struct ocfs2_blockcheck_stats *stats)
{
	int i, rc = 0;
	struct ocfs2_block_check check;
	u32 crc, ecc, fix;

	BUG_ON(nr < 0);

	if (!nr)
		return 0;

	ocfs2_blockcheck_inc_check(stats);

	check.bc_crc32e = le32_to_cpu(bc->bc_crc32e);
	check.bc_ecc = le16_to_cpu(bc->bc_ecc);

	memset(bc, 0, sizeof(struct ocfs2_block_check));

	/* Fast path - if the crc32 validates, we're good to go */
	for (i = 0, crc = ~0; i < nr; i++)
		crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size);
	if (crc == check.bc_crc32e)
		goto out;

	ocfs2_blockcheck_inc_failure(stats);
	mlog(ML_ERROR,
	     "CRC32 failed: stored: %u, computed %u.  Applying ECC.\n",
	     (unsigned int)check.bc_crc32e, (unsigned int)crc);

	/* Ok, try ECC fixups */
	for (i = 0, ecc = 0; i < nr; i++) {
		/*
		 * The number of bits in a buffer is obviously b_size*8.
		 * The offset of this buffer is b_size*i, so the bit offset
		 * of this buffer is b_size*8*i.
		 */
		ecc = (u16)ocfs2_hamming_encode(ecc, bhs[i]->b_data,
						bhs[i]->b_size * 8,
						bhs[i]->b_size * 8 * i);
	}
	fix = ecc ^ check.bc_ecc;
	for (i = 0; i < nr; i++) {
		/*
		 * Try the fix against each buffer.  It will only affect
		 * one of them.
		 */
		ocfs2_hamming_fix(bhs[i]->b_data, bhs[i]->b_size * 8,
				  bhs[i]->b_size * 8 * i, fix);
	}

	/* And check the crc32 again */
	for (i = 0, crc = ~0; i < nr; i++)
		crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size);
	if (crc == check.bc_crc32e) {
		ocfs2_blockcheck_inc_recover(stats);
		goto out;
	}

	mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
	     (unsigned int)check.bc_crc32e, (unsigned int)crc);

	rc = -EIO;

out:
	bc->bc_crc32e = cpu_to_le32(check.bc_crc32e);
	bc->bc_ecc = cpu_to_le16(check.bc_ecc);

	return rc;
}
Beispiel #8
0
int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
{
	int status = -1;
	struct inode *inode = NULL; /* the journal inode */
	journal_t *j_journal = NULL;
	struct ocfs2_dinode *di = NULL;
	struct buffer_head *bh = NULL;
	struct ocfs2_super *osb;
	int inode_lock = 0;

	mlog_entry_void();

	BUG_ON(!journal);

	osb = journal->j_osb;

	/* already have the inode for our journal */
	inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
					    osb->slot_num);
	if (inode == NULL) {
		status = -EACCES;
		mlog_errno(status);
		goto done;
	}
	if (is_bad_inode(inode)) {
		mlog(ML_ERROR, "access error (bad inode)\n");
		iput(inode);
		inode = NULL;
		status = -EACCES;
		goto done;
	}

	SET_INODE_JOURNAL(inode);
	OCFS2_I(inode)->ip_open_count++;

	/* Skip recovery waits here - journal inode metadata never
	 * changes in a live cluster so it can be considered an
	 * exception to the rule. */
	status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
	if (status < 0) {
		if (status != -ERESTARTSYS)
			mlog(ML_ERROR, "Could not get lock on journal!\n");
		goto done;
	}

	inode_lock = 1;
	di = (struct ocfs2_dinode *)bh->b_data;

	if (inode->i_size <  OCFS2_MIN_JOURNAL_SIZE) {
		mlog(ML_ERROR, "Journal file size (%lld) is too small!\n",
		     inode->i_size);
		status = -EINVAL;
		goto done;
	}

	mlog(0, "inode->i_size = %lld\n", inode->i_size);
	mlog(0, "inode->i_blocks = %llu\n",
			(unsigned long long)inode->i_blocks);
	mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);

	/* call the kernels journal init function now */
	j_journal = journal_init_inode(inode);
	if (j_journal == NULL) {
		mlog(ML_ERROR, "Linux journal layer error\n");
		status = -EINVAL;
		goto done;
	}

	mlog(0, "Returned from journal_init_inode\n");
	mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen);

	*dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
		  OCFS2_JOURNAL_DIRTY_FL);

	journal->j_journal = j_journal;
	journal->j_inode = inode;
	journal->j_bh = bh;

	ocfs2_set_journal_params(osb);

	journal->j_state = OCFS2_JOURNAL_LOADED;

	status = 0;
done:
	if (status < 0) {
		if (inode_lock)
			ocfs2_inode_unlock(inode, 1);
		if (bh != NULL)
			brelse(bh);
		if (inode) {
			OCFS2_I(inode)->ip_open_count--;
			iput(inode);
		}
	}

	mlog_exit(status);
	return status;
}
Beispiel #9
0
/*
 * If the journal has been kmalloc'd it needs to be freed after this
 * call.
 */
void ocfs2_journal_shutdown(struct ocfs2_super *osb)
{
	struct ocfs2_journal *journal = NULL;
	int status = 0;
	struct inode *inode = NULL;
	int num_running_trans = 0;

	mlog_entry_void();

	BUG_ON(!osb);

	journal = osb->journal;
	if (!journal)
		goto done;

	inode = journal->j_inode;

	if (journal->j_state != OCFS2_JOURNAL_LOADED)
		goto done;

	/* need to inc inode use count as journal_destroy will iput. */
	if (!igrab(inode))
		BUG();

	num_running_trans = atomic_read(&(osb->journal->j_num_trans));
	if (num_running_trans > 0)
		mlog(0, "Shutting down journal: must wait on %d "
		     "running transactions!\n",
		     num_running_trans);

	/* Do a commit_cache here. It will flush our journal, *and*
	 * release any locks that are still held.
	 * set the SHUTDOWN flag and release the trans lock.
	 * the commit thread will take the trans lock for us below. */
	journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN;

	/* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not
	 * drop the trans_lock (which we want to hold until we
	 * completely destroy the journal. */
	if (osb->commit_task) {
		/* Wait for the commit thread */
		mlog(0, "Waiting for ocfs2commit to exit....\n");
		kthread_stop(osb->commit_task);
		osb->commit_task = NULL;
	}

	BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0);

	if (ocfs2_mount_local(osb)) {
		journal_lock_updates(journal->j_journal);
		status = journal_flush(journal->j_journal);
		journal_unlock_updates(journal->j_journal);
		if (status < 0)
			mlog_errno(status);
	}

	if (status == 0) {
		/*
		 * Do not toggle if flush was unsuccessful otherwise
		 * will leave dirty metadata in a "clean" journal
		 */
		status = ocfs2_journal_toggle_dirty(osb, 0);
		if (status < 0)
			mlog_errno(status);
	}

	/* Shutdown the kernel journal system */
	journal_destroy(journal->j_journal);

	OCFS2_I(inode)->ip_open_count--;

	/* unlock our journal */
	ocfs2_inode_unlock(inode, 1);

	brelse(journal->j_bh);
	journal->j_bh = NULL;

	journal->j_state = OCFS2_JOURNAL_FREE;

//	up_write(&journal->j_trans_barrier);
done:
	if (inode)
		iput(inode);
	mlog_exit_void();
}
Beispiel #10
0
bool DBQuestCachingData::DoUpdateDBCharQuestItemInfo()
{
	// 퀘스트 서버인지 먼저 검사.
	if( MSM_TEST != MGetServerConfig()->GetServerMode() ) 
		return false;

	// 정상적인 Object인지 검사.
	if( !IsEnabledObject(m_pObject) )
		return false;

	// 현재 상태가 업데이트 가능한지 검사.
	if( !IsRequestUpdate() ) 
	{
		// 다음 업데이트를 검사를 위해서 마지막 업데이트 검사 시간을 저장해 놓음.
		m_dwLastUpdateTime = timeGetTime();
		return false;
	}

	MAsyncDBJob_UpdateQuestItemInfo* pAsyncJob = new MAsyncDBJob_UpdateQuestItemInfo(m_pObject->GetUID());
	if( 0 == pAsyncJob )
	{
		mlog( "DBQuestCachingData::DoUpdateDBCharQuestItemInfo - QuestItemUpdate async작업 실패.\n" );
		return false;
	}
	if( !pAsyncJob->Input(m_pObject->GetCharInfo()->m_nCID, 
		m_pObject->GetCharInfo()->m_QuestItemList, 
		m_pObject->GetCharInfo()->m_QMonsterBible) )
	{
		return false;
	}

	MMatchServer::GetInstance()->PostAsyncJob( pAsyncJob );

#ifdef _DEBUG
	{
		// 업데이트 정보가 정상적으로 되는지 로그를 남김.
		char szDbgOut[ 1000 ] = {0};
		MQuestItemMap::iterator it, end;

		strcat( szDbgOut, "Quest Item Caching UpdateDB\n" );
		strcat( szDbgOut, m_pObject->GetName() );
		strcat( szDbgOut, "\n" );

		it = m_pObject->GetCharInfo()->m_QuestItemList.begin();
		end = m_pObject->GetCharInfo()->m_QuestItemList.end();

		for( ; it != end; ++it )
		{
			char tmp[ 100 ] = {0};
			sprintf( tmp, "%s : %d\n", it->second->GetDesc()->m_szQuestItemName, it->second->GetCount() );
			strcat( szDbgOut, tmp );
		}
		strcat( szDbgOut, "\n" );
		MMatchServer::GetInstance()->LOG( MMatchServer::LOG_PROG, szDbgOut );
	}
#endif

	// 업데이트가 성공하면 다음 검사를 위해서 다시 설정함.
	Reset();

	return true;
}
Beispiel #11
0
static int ocfs2_map_slot_buffers(struct ocfs2_super *osb,
				  struct ocfs2_slot_info *si)
{
	int status = 0;
	u64 blkno;
	unsigned long long blocks, bytes;
	unsigned int i;
	struct buffer_head *bh;

	status = ocfs2_slot_map_physical_size(osb, si->si_inode, &bytes);
	if (status)
		goto bail;

	blocks = ocfs2_blocks_for_bytes(si->si_inode->i_sb, bytes);
	BUG_ON(blocks > UINT_MAX);
	si->si_blocks = blocks;
	if (!si->si_blocks)
		goto bail;

	if (si->si_extended)
		si->si_slots_per_block =
			(osb->sb->s_blocksize /
			 sizeof(struct ocfs2_extended_slot));
	else
		si->si_slots_per_block = osb->sb->s_blocksize / sizeof(__le16);

	/* The size checks above should ensure this */
	BUG_ON((osb->max_slots / si->si_slots_per_block) > blocks);

	mlog(0, "Slot map needs %u buffers for %llu bytes\n",
	     si->si_blocks, bytes);

	si->si_bh = kzalloc(sizeof(struct buffer_head *) * si->si_blocks,
			    GFP_KERNEL);
	if (!si->si_bh) {
		status = -ENOMEM;
		mlog_errno(status);
		goto bail;
	}

	for (i = 0; i < si->si_blocks; i++) {
		status = ocfs2_extent_map_get_blocks(si->si_inode, i,
						     &blkno, NULL, NULL);
		if (status < 0) {
			mlog_errno(status);
			goto bail;
		}

		mlog(0, "Reading slot map block %u at %llu\n", i,
		     (unsigned long long)blkno);

		bh = NULL;  /* Acquire a fresh bh */
		status = ocfs2_read_blocks(INODE_CACHE(si->si_inode), blkno,
					   1, &bh, OCFS2_BH_IGNORE_CACHE, NULL);
		if (status < 0) {
			mlog_errno(status);
			goto bail;
		}

		si->si_bh[i] = bh;
	}

bail:
	return status;
}
Beispiel #12
0
void MMatchClient::OnAllowTunnelingUDP()
{
	SetAllowTunneling(true);
	SetAgentPeerFlag(true);
	mlog("TUNNELING_UDP_ALLOWED \n");
}
Beispiel #13
0
MMatchPeerInfoList::~MMatchPeerInfoList()
{
	Clear();
	DeleteCriticalSection(&m_csLock);
	mlog("PeerInfoList Released\n");
}
Beispiel #14
0
void MMatchClient::ParseUDPPacket(char* pData, MPacketHeader* pPacketHeader, DWORD dwIP, unsigned int nPort)
{
	switch (pPacketHeader->nMsg)
	{
	case MSGID_RAWCOMMAND:
		{
			unsigned short nCheckSum = MBuildCheckSum(pPacketHeader, pPacketHeader->nSize);
			if (pPacketHeader->nCheckSum != nCheckSum) {
				static int nLogCount = 0;
				if (nLogCount++ < 100) {	// Log Flooding 방지
					mlog("MMatchClient::ParseUDPPacket() -> CHECKSUM ERROR(R=%u/C=%u)\n", 
						pPacketHeader->nCheckSum, nCheckSum);
				}
				return;
			} else {
				MCommand* pCmd = new MCommand();
				if (!pCmd->SetData(pData, &m_CommandManager))
				{
					mlog("MMatchClient::ParseUDPPacket() -> SetData Error\n");

					delete pCmd;
					return;
				}

				MUID uidPeer = FindPeerUID(dwIP, nPort);
				if (uidPeer != MUID(0,0))
				{
					pCmd->m_Sender = uidPeer;
				} else {
					// TODO: 여기 수정해야함.
					sockaddr_in Addr;
					Addr.sin_addr.S_un.S_addr = dwIP;
					Addr.sin_port = nPort;
					char* pszIP = inet_ntoa(Addr.sin_addr);

					if (strcmp(pszIP, GetAgentIP()) == 0) 
					{
						pCmd->m_Sender = GetAgentServerUID();
					}
					else if( (MC_RESPONSE_SERVER_LIST_INFO == pCmd->GetID()) ||
						(MC_RESPONSE_BLOCK_COUNTRY_CODE_IP == pCmd->GetID()) )
					{
						// 특별히 하는건 없음.
						// Lcator는 Peer설정이 되지 않기때문에 여기서 따로 처리함.
					}
					else if (pCmd->GetID() == MC_UDP_PONG) 
					{
						// 특별히 하는건 없음. Command를 넘겨주기 위해서...(밑에 커맨드를 딜리트하기때문에)
					}	
					else 
					{
						delete pCmd; pCmd = NULL;
						return;
					}
				}

				pCmd->m_Receiver = m_This;

				if( IsUDPCommandValidationCheck(pCmd->GetID()) ) {
					LockRecv();				
					m_CommandManager.Post(pCmd);
					UnlockRecv();
				} else {
#ifdef _DEBUG
					mlog("%s(ID:%d) is Denied Command!\n"
						, pCmd->m_pCommandDesc->GetName(), pCmd->GetID());
#endif
				}
			}
		}
		break;
	case MSGID_COMMAND:
		{
			int nPacketSize = pPacketHeader->CalcPacketSize(&m_PeerPacketCrypter);
			unsigned short nCheckSum = MBuildCheckSum(pPacketHeader, nPacketSize);

			if (pPacketHeader->nCheckSum != nCheckSum) {
				static int nLogCount = 0;
				if (nLogCount++ < 100) {	// Log Flooding 방지
					mlog("MMatchClient::ParseUDPPacket() -> CHECKSUM ERROR(R=%u/C=%u)\n", 
						pPacketHeader->nCheckSum, nCheckSum);
				}
				return;
			} else {
				MCommand* pCmd = new MCommand();

				int nCmdSize = nPacketSize - sizeof(MPacketHeader);

				if (!m_PeerPacketCrypter.Decrypt(pData, nCmdSize))
				{
					mlog("MMatchClient::ParseUDPPacket() -> Decrypt Error\n");

					delete pCmd; pCmd = NULL;
					return;
				}

				if (!pCmd->SetData(pData, &m_CommandManager))
				{
					// TODO: 여기 수정해야함.
					sockaddr_in Addr;
					Addr.sin_addr.S_un.S_addr = dwIP;
					Addr.sin_port = nPort;
					char* pszIP = inet_ntoa(Addr.sin_addr);

					mlog("MMatchClient::ParseUDPPacket() -> MSGID_COMMAND SetData Error(%s:%d), size=%d\n", 
						pszIP, nPort, nCmdSize);

					delete pCmd; pCmd = NULL;
					return;
				}

				MUID uidPeer = FindPeerUID(dwIP, nPort);
				if (uidPeer != MUID(0,0)) {
					pCmd->m_Sender = uidPeer;
				} else {
					// Agent와는 암호화된 커맨드는 사용하지 않는다.
					delete pCmd;
					return;
/*
					// TODO: 여기 수정해야함.
					sockaddr_in Addr;
					Addr.sin_addr.S_un.S_addr = dwIP;
					Addr.sin_port = nPort;
					char* pszIP = inet_ntoa(Addr.sin_addr);

					if (strcmp(pszIP, GetAgentIP()) == 0) {
						pCmd->m_Sender = GetAgentServerUID();
					}else {
						delete pCmd; pCmd = NULL;
						return;
					}
*/
				}

				pCmd->m_Receiver = m_This;

				if( IsUDPCommandValidationCheck(pCmd->GetID()) ) {
					LockRecv();				
					m_CommandManager.Post(pCmd);
					UnlockRecv();
				} else {
#ifdef _DEBUG
					mlog("%s(ID:%d) is Denied Command!\n"
						, pCmd->m_pCommandDesc->GetName(), pCmd->GetID());
#endif
				}
			}
		}
		break;
	default:
		{
			Log("MatchClient: Parse Packet Error");
		}
		break;
	}
}
Beispiel #15
0
static int ocfs2_filecheck_validate_inode_block(struct super_block *sb,
						struct buffer_head *bh)
{
	int rc = 0;
	struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;

	trace_ocfs2_filecheck_validate_inode_block(
		(unsigned long long)bh->b_blocknr);

	BUG_ON(!buffer_uptodate(bh));

	/*
	 * Call ocfs2_validate_meta_ecc() first since it has ecc repair
	 * function, but we should not return error immediately when ecc
	 * validation fails, because the reason is quite likely the invalid
	 * inode number inputed.
	 */
	rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check);
	if (rc) {
		mlog(ML_ERROR,
		     "Filecheck: checksum failed for dinode %llu\n",
		     (unsigned long long)bh->b_blocknr);
		rc = -OCFS2_FILECHECK_ERR_BLOCKECC;
	}

	if (!OCFS2_IS_VALID_DINODE(di)) {
		mlog(ML_ERROR,
		     "Filecheck: invalid dinode #%llu: signature = %.*s\n",
		     (unsigned long long)bh->b_blocknr, 7, di->i_signature);
		rc = -OCFS2_FILECHECK_ERR_INVALIDINO;
		goto bail;
	} else if (rc)
		goto bail;

	if (le64_to_cpu(di->i_blkno) != bh->b_blocknr) {
		mlog(ML_ERROR,
		     "Filecheck: invalid dinode #%llu: i_blkno is %llu\n",
		     (unsigned long long)bh->b_blocknr,
		     (unsigned long long)le64_to_cpu(di->i_blkno));
		rc = -OCFS2_FILECHECK_ERR_BLOCKNO;
		goto bail;
	}

	if (!(di->i_flags & cpu_to_le32(OCFS2_VALID_FL))) {
		mlog(ML_ERROR,
		     "Filecheck: invalid dinode #%llu: OCFS2_VALID_FL "
		     "not set\n",
		     (unsigned long long)bh->b_blocknr);
		rc = -OCFS2_FILECHECK_ERR_VALIDFLAG;
		goto bail;
	}

	if (le32_to_cpu(di->i_fs_generation) !=
	    OCFS2_SB(sb)->fs_generation) {
		mlog(ML_ERROR,
		     "Filecheck: invalid dinode #%llu: fs_generation is %u\n",
		     (unsigned long long)bh->b_blocknr,
		     le32_to_cpu(di->i_fs_generation));
		rc = -OCFS2_FILECHECK_ERR_GENERATION;
	}

bail:
	return rc;
}
Beispiel #16
0
/*
 * Using one journal handle to guarantee the data consistency in case
 * crash happens anywhere.
 *
 *  XXX: defrag can end up with finishing partial extent as requested,
 * due to not enough contiguous clusters can be found in allocator.
 */
static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
                               u32 cpos, u32 phys_cpos, u32 *len, int ext_flags)
{
    int ret, credits = 0, extra_blocks = 0, partial = context->partial;
    handle_t *handle;
    struct inode *inode = context->inode;
    struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
    struct inode *tl_inode = osb->osb_tl_inode;
    struct ocfs2_refcount_tree *ref_tree = NULL;
    u32 new_phys_cpos, new_len;
    u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);

    if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {

        BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
                 OCFS2_HAS_REFCOUNT_FL));

        BUG_ON(!context->refcount_loc);

        ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
                                       &ref_tree, NULL);
        if (ret) {
            mlog_errno(ret);
            return ret;
        }

        ret = ocfs2_prepare_refcount_change_for_del(inode,
                context->refcount_loc,
                phys_blkno,
                *len,
                &credits,
                &extra_blocks);
        if (ret) {
            mlog_errno(ret);
            goto out;
        }
    }

    ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
            &context->meta_ac,
            &context->data_ac,
            extra_blocks, &credits);
    if (ret) {
        mlog_errno(ret);
        goto out;
    }

    /*
     * should be using allocation reservation strategy there?
     *
     * if (context->data_ac)
     *	context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
     */

    mutex_lock(&tl_inode->i_mutex);

    if (ocfs2_truncate_log_needs_flush(osb)) {
        ret = __ocfs2_flush_truncate_log(osb);
        if (ret < 0) {
            mlog_errno(ret);
            goto out_unlock_mutex;
        }
    }

    handle = ocfs2_start_trans(osb, credits);
    if (IS_ERR(handle)) {
        ret = PTR_ERR(handle);
        mlog_errno(ret);
        goto out_unlock_mutex;
    }

    ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len,
                                 &new_phys_cpos, &new_len);
    if (ret) {
        mlog_errno(ret);
        goto out_commit;
    }

    /*
     * allowing partial extent moving is kind of 'pros and cons', it makes
     * whole defragmentation less likely to fail, on the contrary, the bad
     * thing is it may make the fs even more fragmented after moving, let
     * userspace make a good decision here.
     */
    if (new_len != *len) {
        mlog(0, "len_claimed: %u, len: %u\n", new_len, *len);
        if (!partial) {
            context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
            ret = -ENOSPC;
            goto out_commit;
        }
    }

    mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos,
         phys_cpos, new_phys_cpos);

    ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos,
                              new_phys_cpos, ext_flags);
    if (ret)
        mlog_errno(ret);

    if (partial && (new_len != *len))
        *len = new_len;

    /*
     * Here we should write the new page out first if we are
     * in write-back mode.
     */
    ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len);
    if (ret)
        mlog_errno(ret);

out_commit:
    ocfs2_commit_trans(osb, handle);

out_unlock_mutex:
    mutex_unlock(&tl_inode->i_mutex);

    if (context->data_ac) {
        ocfs2_free_alloc_context(context->data_ac);
        context->data_ac = NULL;
    }

    if (context->meta_ac) {
        ocfs2_free_alloc_context(context->meta_ac);
        context->meta_ac = NULL;
    }

out:
    if (ref_tree)
        ocfs2_unlock_refcount_tree(osb, ref_tree, 1);

    return ret;
}
Beispiel #17
0
static int ocfs2_filecheck_repair_inode_block(struct super_block *sb,
					      struct buffer_head *bh)
{
	int changed = 0;
	struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;

	if (!ocfs2_filecheck_validate_inode_block(sb, bh))
		return 0;

	trace_ocfs2_filecheck_repair_inode_block(
		(unsigned long long)bh->b_blocknr);

	if (ocfs2_is_hard_readonly(OCFS2_SB(sb)) ||
	    ocfs2_is_soft_readonly(OCFS2_SB(sb))) {
		mlog(ML_ERROR,
		     "Filecheck: cannot repair dinode #%llu "
		     "on readonly filesystem\n",
		     (unsigned long long)bh->b_blocknr);
		return -OCFS2_FILECHECK_ERR_READONLY;
	}

	if (buffer_jbd(bh)) {
		mlog(ML_ERROR,
		     "Filecheck: cannot repair dinode #%llu, "
		     "its buffer is in jbd\n",
		     (unsigned long long)bh->b_blocknr);
		return -OCFS2_FILECHECK_ERR_INJBD;
	}

	if (!OCFS2_IS_VALID_DINODE(di)) {
		/* Cannot fix invalid inode block */
		return -OCFS2_FILECHECK_ERR_INVALIDINO;
	}

	if (!(di->i_flags & cpu_to_le32(OCFS2_VALID_FL))) {
		/* Cannot just add VALID_FL flag back as a fix,
		 * need more things to check here.
		 */
		return -OCFS2_FILECHECK_ERR_VALIDFLAG;
	}

	if (le64_to_cpu(di->i_blkno) != bh->b_blocknr) {
		di->i_blkno = cpu_to_le64(bh->b_blocknr);
		changed = 1;
		mlog(ML_ERROR,
		     "Filecheck: reset dinode #%llu: i_blkno to %llu\n",
		     (unsigned long long)bh->b_blocknr,
		     (unsigned long long)le64_to_cpu(di->i_blkno));
	}

	if (le32_to_cpu(di->i_fs_generation) !=
	    OCFS2_SB(sb)->fs_generation) {
		di->i_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
		changed = 1;
		mlog(ML_ERROR,
		     "Filecheck: reset dinode #%llu: fs_generation to %u\n",
		     (unsigned long long)bh->b_blocknr,
		     le32_to_cpu(di->i_fs_generation));
	}

	if (changed || ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check)) {
		ocfs2_compute_meta_ecc(sb, bh->b_data, &di->i_check);
		mark_buffer_dirty(bh);
		mlog(ML_ERROR,
		     "Filecheck: reset dinode #%llu: compute meta ecc\n",
		     (unsigned long long)bh->b_blocknr);
	}

	return 0;
}
Beispiel #18
0
/*
 * find the victim alloc group, where #blkno fits.
 */
static int ocfs2_find_victim_alloc_group(struct inode *inode,
        u64 vict_blkno,
        int type, int slot,
        int *vict_bit,
        struct buffer_head **ret_bh)
{
    int ret, i, bits_per_unit = 0;
    u64 blkno;
    char namebuf[40];

    struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
    struct buffer_head *ac_bh = NULL, *gd_bh = NULL;
    struct ocfs2_chain_list *cl;
    struct ocfs2_chain_rec *rec;
    struct ocfs2_dinode *ac_dinode;
    struct ocfs2_group_desc *bg;

    ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot);
    ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
                                     strlen(namebuf), &blkno);
    if (ret) {
        ret = -ENOENT;
        goto out;
    }

    ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh);
    if (ret) {
        mlog_errno(ret);
        goto out;
    }

    ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data;
    cl = &(ac_dinode->id2.i_chain);
    rec = &(cl->cl_recs[0]);

    if (type == GLOBAL_BITMAP_SYSTEM_INODE)
        bits_per_unit = osb->s_clustersize_bits -
                        inode->i_sb->s_blocksize_bits;
    /*
     * 'vict_blkno' was out of the valid range.
     */
    if ((vict_blkno < le64_to_cpu(rec->c_blkno)) ||
            (vict_blkno >= ((u64)le32_to_cpu(ac_dinode->id1.bitmap1.i_total) <<
                            bits_per_unit))) {
        ret = -EINVAL;
        goto out;
    }

    for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {

        rec = &(cl->cl_recs[i]);
        if (!rec)
            continue;

        bg = NULL;

        do {
            if (!bg)
                blkno = le64_to_cpu(rec->c_blkno);
            else
                blkno = le64_to_cpu(bg->bg_next_group);

            if (gd_bh) {
                brelse(gd_bh);
                gd_bh = NULL;
            }

            ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh);
            if (ret) {
                mlog_errno(ret);
                goto out;
            }

            bg = (struct ocfs2_group_desc *)gd_bh->b_data;

            if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
                              le16_to_cpu(bg->bg_bits))) {

                *ret_bh = gd_bh;
                *vict_bit = (vict_blkno - blkno) >>
                            bits_per_unit;
                mlog(0, "find the victim group: #%llu, "
                     "total_bits: %u, vict_bit: %u\n",
                     blkno, le16_to_cpu(bg->bg_bits),
                     *vict_bit);
                goto out;
            }

        } while (le64_to_cpu(bg->bg_next_group));
    }

    ret = -EINVAL;
out:
    brelse(ac_bh);

    /*
     * caller has to release the gd_bh properly.
     */
    return ret;
}
Beispiel #19
0
/* Query the cluster to determine whether we should wipe an inode from
 * disk or not.
 *
 * Requires the inode to have the cluster lock. */
static int ocfs2_query_inode_wipe(struct inode *inode,
				  struct buffer_head *di_bh,
				  int *wipe)
{
	int status = 0, reason = 0;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	struct ocfs2_dinode *di;

	*wipe = 0;

	trace_ocfs2_query_inode_wipe_begin((unsigned long long)oi->ip_blkno,
					   inode->i_nlink);

	/* While we were waiting for the cluster lock in
	 * ocfs2_delete_inode, another node might have asked to delete
	 * the inode. Recheck our flags to catch this. */
	if (!ocfs2_inode_is_valid_to_delete(inode)) {
		reason = 1;
		goto bail;
	}

	/* Now that we have an up to date inode, we can double check
	 * the link count. */
	if (inode->i_nlink)
		goto bail;

	/* Do some basic inode verification... */
	di = (struct ocfs2_dinode *) di_bh->b_data;
	if (!(di->i_flags & cpu_to_le32(OCFS2_ORPHANED_FL)) &&
	    !(oi->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)) {
		/*
		 * Inodes in the orphan dir must have ORPHANED_FL.  The only
		 * inodes that come back out of the orphan dir are reflink
		 * targets. A reflink target may be moved out of the orphan
		 * dir between the time we scan the directory and the time we
		 * process it. This would lead to HAS_REFCOUNT_FL being set but
		 * ORPHANED_FL not.
		 */
		if (di->i_dyn_features & cpu_to_le16(OCFS2_HAS_REFCOUNT_FL)) {
			reason = 2;
			goto bail;
		}

		/* for lack of a better error? */
		status = -EEXIST;
		mlog(ML_ERROR,
		     "Inode %llu (on-disk %llu) not orphaned! "
		     "Disk flags  0x%x, inode flags 0x%x\n",
		     (unsigned long long)oi->ip_blkno,
		     (unsigned long long)le64_to_cpu(di->i_blkno),
		     le32_to_cpu(di->i_flags), oi->ip_flags);
		goto bail;
	}

	/* has someone already deleted us?! baaad... */
	if (di->i_dtime) {
		status = -EEXIST;
		mlog_errno(status);
		goto bail;
	}

	/*
	 * This is how ocfs2 determines whether an inode is still live
	 * within the cluster. Every node takes a shared read lock on
	 * the inode open lock in ocfs2_read_locked_inode(). When we
	 * get to ->delete_inode(), each node tries to convert it's
	 * lock to an exclusive. Trylocks are serialized by the inode
	 * meta data lock. If the upconvert succeeds, we know the inode
	 * is no longer live and can be deleted.
	 *
	 * Though we call this with the meta data lock held, the
	 * trylock keeps us from ABBA deadlock.
	 */
	status = ocfs2_try_open_lock(inode, 1);
	if (status == -EAGAIN) {
		status = 0;
		reason = 3;
		goto bail;
	}
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	*wipe = 1;
	trace_ocfs2_query_inode_wipe_succ(le16_to_cpu(di->i_orphaned_slot));

bail:
	trace_ocfs2_query_inode_wipe_end(status, reason);
	return status;
}
Beispiel #20
0
static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
                             u32 cpos, u32 phys_cpos, u32 *new_phys_cpos,
                             u32 len, int ext_flags)
{
    int ret, credits = 0, extra_blocks = 0, goal_bit = 0;
    handle_t *handle;
    struct inode *inode = context->inode;
    struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
    struct inode *tl_inode = osb->osb_tl_inode;
    struct inode *gb_inode = NULL;
    struct buffer_head *gb_bh = NULL;
    struct buffer_head *gd_bh = NULL;
    struct ocfs2_group_desc *gd;
    struct ocfs2_refcount_tree *ref_tree = NULL;
    u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb,
                       context->range->me_threshold);
    u64 phys_blkno, new_phys_blkno;

    phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);

    if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) {

        BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
                 OCFS2_HAS_REFCOUNT_FL));

        BUG_ON(!context->refcount_loc);

        ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
                                       &ref_tree, NULL);
        if (ret) {
            mlog_errno(ret);
            return ret;
        }

        ret = ocfs2_prepare_refcount_change_for_del(inode,
                context->refcount_loc,
                phys_blkno,
                len,
                &credits,
                &extra_blocks);
        if (ret) {
            mlog_errno(ret);
            goto out;
        }
    }

    ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
            &context->meta_ac,
            NULL, extra_blocks, &credits);
    if (ret) {
        mlog_errno(ret);
        goto out;
    }

    /*
     * need to count 2 extra credits for global_bitmap inode and
     * group descriptor.
     */
    credits += OCFS2_INODE_UPDATE_CREDITS + 1;

    /*
     * ocfs2_move_extent() didn't reserve any clusters in lock_allocators()
     * logic, while we still need to lock the global_bitmap.
     */
    gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
                                           OCFS2_INVALID_SLOT);
    if (!gb_inode) {
        mlog(ML_ERROR, "unable to get global_bitmap inode\n");
        ret = -EIO;
        goto out;
    }

    mutex_lock(&gb_inode->i_mutex);

    ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1);
    if (ret) {
        mlog_errno(ret);
        goto out_unlock_gb_mutex;
    }

    mutex_lock(&tl_inode->i_mutex);

    handle = ocfs2_start_trans(osb, credits);
    if (IS_ERR(handle)) {
        ret = PTR_ERR(handle);
        mlog_errno(ret);
        goto out_unlock_tl_inode;
    }

    new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos);
    ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno,
                                        GLOBAL_BITMAP_SYSTEM_INODE,
                                        OCFS2_INVALID_SLOT,
                                        &goal_bit, &gd_bh);
    if (ret) {
        mlog_errno(ret);
        goto out_commit;
    }

    /*
     * probe the victim cluster group to find a proper
     * region to fit wanted movement, it even will perfrom
     * a best-effort attempt by compromising to a threshold
     * around the goal.
     */
    ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
                            new_phys_cpos);
    if (!*new_phys_cpos) {
        ret = -ENOSPC;
        goto out_commit;
    }

    ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos,
                              *new_phys_cpos, ext_flags);
    if (ret) {
        mlog_errno(ret);
        goto out_commit;
    }

    gd = (struct ocfs2_group_desc *)gd_bh->b_data;
    ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len,
                                           le16_to_cpu(gd->bg_chain));
    if (ret) {
        mlog_errno(ret);
        goto out_commit;
    }

    ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
                                     goal_bit, len);
    if (ret) {
        ocfs2_rollback_alloc_dinode_counts(gb_inode, gb_bh, len,
                                           le16_to_cpu(gd->bg_chain));
        mlog_errno(ret);
    }

    /*
     * Here we should write the new page out first if we are
     * in write-back mode.
     */
    ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len);
    if (ret)
        mlog_errno(ret);

out_commit:
    ocfs2_commit_trans(osb, handle);
    brelse(gd_bh);

out_unlock_tl_inode:
    mutex_unlock(&tl_inode->i_mutex);

    ocfs2_inode_unlock(gb_inode, 1);
out_unlock_gb_mutex:
    mutex_unlock(&gb_inode->i_mutex);
    brelse(gb_bh);
    iput(gb_inode);

out:
    if (context->meta_ac) {
        ocfs2_free_alloc_context(context->meta_ac);
        context->meta_ac = NULL;
    }

    if (ref_tree)
        ocfs2_unlock_refcount_tree(osb, ref_tree, 1);

    return ret;
}
Beispiel #21
0
bool MMatchConfig::Create()
{
	int nTemp = 0;

	GetPrivateProfileString("DB", "DNS", "gunzdb", m_szDB_DNS, 64, SERVER_CONFIG_FILENAME);
	GetPrivateProfileString("DB", "USERNAME", "gunzdb", m_szDB_UserName, 64, SERVER_CONFIG_FILENAME);
	GetPrivateProfileString("DB", "PASSWORD", "gunzdb", m_szDB_Password, 64, SERVER_CONFIG_FILENAME);

	m_nMaxUser = GetPrivateProfileInt("SERVER", "MAXUSER", 1500, SERVER_CONFIG_FILENAME);
	m_nServerID = GetPrivateProfileInt("SERVER", "SERVERID", 1500, SERVER_CONFIG_FILENAME);
	GetPrivateProfileString("SERVER", "SERVERNAME", "matchserver", m_szServerName, 256, SERVER_CONFIG_FILENAME);
	m_nServerPort = GetPrivateProfileInt("SERVER", "SERVERPORT", 6000, SERVER_CONFIG_FILENAME);
	m_nServerUDPPort = GetPrivateProfileInt("SERVER", "SERVERUDPPORT", 7777, SERVER_CONFIG_FILENAME);

	char szServerMode[128] = "";
	GetPrivateProfileString("SERVER", "MODE", SERVER_CONFIG_SERVERMODE_NORMAL, szServerMode, 128, SERVER_CONFIG_FILENAME);

	if (!stricmp(szServerMode, SERVER_CONFIG_SERVERMODE_NORMAL)) m_nServerMode = MSM_NORMAL;
	else if (!stricmp(szServerMode, SERVER_CONFIG_SERVERMODE_CLAN)) m_nServerMode = MSM_CLAN;
	else if (!stricmp(szServerMode, SERVER_CONFIG_SERVERMODE_LADDER)) m_nServerMode = MSM_LADDER;
	else if (!stricmp(szServerMode, SERVER_CONFIG_SERVERMODE_EVENT)) m_nServerMode = MSM_EVENT;
	else if (!stricmp(szServerMode, SERVER_CONFIG_SERVERMODE_TEST)) m_nServerMode = MSM_TEST;
	else { _ASSERT(0); }

	m_bEnabledSurvivalMode = (0 != GetPrivateProfileInt("SERVER", "SURVIVALENABLE", 1, SERVER_CONFIG_FILENAME));

	m_dwSurvivalRankingDailyRequestHour = GetPrivateProfileInt("SERVER", "SURVIVALRANKING_DAILY_REQUEST_HOUR", 5, SERVER_CONFIG_FILENAME);
	m_dwSurvivalRankingDailyRequestMin = GetPrivateProfileInt("SERVER", "SURVIVALRANKING_DAILY_REQUEST_MINUTE", 0, SERVER_CONFIG_FILENAME);
	if (m_dwSurvivalRankingDailyRequestHour < 0 || m_dwSurvivalRankingDailyRequestHour >= 24) {
		mlog("[ASSERTION FAILED!] %s : SURVIVALRANKING_DAILY_REQUEST_HOUR invalid!\n", SERVER_CONFIG_FILENAME);
		_ASSERT(0);
	}
	if (m_dwSurvivalRankingDailyRequestMin < 0 || m_dwSurvivalRankingDailyRequestMin >= 60) {
		mlog("[ASSERTION FAILED!] %s : SURVIVALRANKING_DAILY_REQUEST_MINUTE invalid!\n", SERVER_CONFIG_FILENAME);
		_ASSERT(0);
	}

	m_dwDuelTournamentMatchMakingInterval = GetPrivateProfileInt("SERVER", "DUELTOURNAMENT_MATCHMAKING_INTERVAL", 1000, SERVER_CONFIG_FILENAME);
	m_dwDuelTournamentMatchMaingAcceptableTpGap = GetPrivateProfileInt("SERVER", "DUELTOURNAMENT_MATCHMAKING_ACCEPTABLE_TP_GAP", 10, SERVER_CONFIG_FILENAME);
	m_dwDuelTournamentMatchMaingWaitLimit = GetPrivateProfileInt("SERVER", "DUELTOURNAMENT_MATCHMAKING_WAIT_LIMIT", 10000, SERVER_CONFIG_FILENAME);
	if (m_dwDuelTournamentMatchMakingInterval > 10000)
		mlog("[WARNING] %s : DUELTOURNAMENT_MATCHMAKING_INTERVAL is too big.\n", SERVER_CONFIG_FILENAME);
	if (m_dwDuelTournamentMatchMaingAcceptableTpGap > 1000)
		mlog("[WARNING] %s : DUELTOURNAMENT_MATCHMAKING_ACCEPTABLE_TP_GAP is too big.\n", SERVER_CONFIG_FILENAME);
	if (m_dwDuelTournamentMatchMaingWaitLimit > 60000)
		mlog("[WARNING] %s : DUELTOURNAMENT_MATCHMAKING_WAIT_LIMIT is too big.\n", SERVER_CONFIG_FILENAME);

	m_dwDuelTournamentServiceStartTime = GetPrivateProfileInt("SERVER", "DUELTOURNAMENT_SERVICE_START_TIME ", 0, SERVER_CONFIG_FILENAME);
	m_dwDuelTournamentServiceEndTime = GetPrivateProfileInt("SERVER", "DUELTOURNAMENT_SERVICE_END_TIME ", 23, SERVER_CONFIG_FILENAME);
	if(m_dwDuelTournamentServiceStartTime > 23) {
		m_dwDuelTournamentServiceStartTime = 23;
		mlog("[WARNING] %s : DUELTOURNAMENT_SERVICE_START_TIME is too big. max is 23.\n", SERVER_CONFIG_FILENAME);_ASSERT(0);
	}
	if(m_dwDuelTournamentServiceEndTime > 23) {
		m_dwDuelTournamentServiceEndTime = 23;
		mlog("[WARNING] %s : DUELTOURNAMENT_SERVICE_END_TIME is too big. max is 23.\n", SERVER_CONFIG_FILENAME);_ASSERT(0);
	}
	if( 0 > m_dwDuelTournamentServiceStartTime) {
		m_dwDuelTournamentServiceStartTime = 0;
		mlog("[WARNING] %s : DUELTOURNAMENT_SERVICE_START_TIME must be a positive value.\n", SERVER_CONFIG_FILENAME);_ASSERT(0);
	}
	if( 0 > m_dwDuelTournamentServiceEndTime)	{
		m_dwDuelTournamentServiceEndTime = 0;
		mlog("[WARNING] %s : DUELTOURNAMENT_SERVICE_END_TIME must be a positive value.\n", SERVER_CONFIG_FILENAME);_ASSERT(0);
	}

	m_bSendLoginUserToDuelTournamentChannel = (0 != GetPrivateProfileInt("SERVER", "SEND_LOGINUSER_TO_DUELTOURNAMENT_CHANNEL", 1, SERVER_CONFIG_FILENAME));

	m_bEnabledDuelTournament = (0 != GetPrivateProfileInt("SERVER", "DUELTOURNAMENT_ENABLE", 1, SERVER_CONFIG_FILENAME));

	m_dwBRDescriptionRefreshInterval = GetPrivateProfileInt("BATTLETIMEREWARD", "BATTLETIMEREWARD_REFRESH_INTERVAL", 5, SERVER_CONFIG_FILENAME);
	m_dwBRDescriptionRefreshInterval = m_dwBRDescriptionRefreshInterval * 60 * 1000;

	// 인원제한 무시하는 허용IP
	char szAllowIP[1024] = "";
	char* pNextArg = szAllowIP;
	GetPrivateProfileString("SERVER", "FREELOGINIP", "", szAllowIP, 1024, SERVER_CONFIG_FILENAME);
	MLex lex;
	while(true) {
		char szIP[128] = "";
		pNextArg = lex.GetOneArg(pNextArg, szIP);
		if (*szIP == NULL)
			break;
		AddFreeLoginIP(szIP);
	}

	char szDebug[4] = {0,};
	GetPrivateProfileString( "SERVER", "DEBUG", SERVER_CONFIG_DEBUG_DEFAULT, szDebug, 4, SERVER_CONFIG_FILENAME );
	if( 0 == stricmp("0", szDebug) )
		m_bIsDebugServer = false;
	else
		m_bIsDebugServer = true;

	// Debug ip.
	char szDebugIP[ 1024 ] = {0,};
	char* pNextDbgIP = szDebugIP;
	GetPrivateProfileString( "SERVER", "DEBUGIP", "", szDebugIP, 1024, SERVER_CONFIG_FILENAME );
	while(true) {
		char szIP[128] = "";
		pNextDbgIP = lex.GetOneArg(pNextDbgIP, szIP);
		if (*szIP == NULL)
			break;
		AddDebugLoginIP(szIP);
	}

	if( !LoadMonitorIPnPort() )
	{
		mlog( "server.ini - monitor ip not setting\n" );
		return false;
	}

	if( !LoadKeeperIP() )
	{
		mlog( "server.ini - Keeper ip not setting\n" );
		return false;
	}
	
	
	// 프리미엄 IP 체크
	int nCheckPremiumIP = GetPrivateProfileInt("SERVER", "CheckPremiumIP", 0, SERVER_CONFIG_FILENAME);
	if (nCheckPremiumIP != 0) m_bCheckPremiumIP = true;

	char szCountry[ 32 ] = "";
	GetPrivateProfileString( "SERVER", "COUNTRY", "", szCountry, 31, SERVER_CONFIG_FILENAME );
	if( 0 != strlen(szCountry) )
		m_strCountry = szCountry;
	else
	{
		ASSERT( 0 );
		mlog( "server.ini - Invalid country type.\n" );
		return false;
	}

	char szLanguage[ 32 ] = "";
	GetPrivateProfileString( "SERVER", "LANGUAGE", "", szLanguage, 31, SERVER_CONFIG_FILENAME );
	if( 0 != strlen(szLanguage) )
		m_strLanguage = szLanguage;
	else
	{
		ASSERT( 0 );
		mlog( "server.ini - Invalid language type.\n" );
		return false;
	}

	char szIsUseTicket[ 2 ] = "";
	GetPrivateProfileString( "SERVER", "USETICKET", SERVER_CONFIG_USE_TICKET, szIsUseTicket, 2, SERVER_CONFIG_FILENAME );
	if( 0 != strlen(szIsUseTicket) )
		m_bIsUseTicket = static_cast< bool >( atoi(szIsUseTicket) );
	else
	{
		ASSERT( 0 );
		mlog( "server.ini - invalid ticket setting.\n" );
		return false;
	}
	
	
	char szGameguard[ 2 ] = "";
	GetPrivateProfileString( "SERVER", "GAMEGUARD", "1", szGameguard, 2, SERVER_CONFIG_FILENAME );
	if( 1 == atoi(szGameguard) )
		 m_bIsUseGameguard = true;
	else
	{
		mlog( "gameguard not running\n" );
		m_bIsUseGameguard = false;
	}

	char szNHNUSAAuth[ 2 ] = "";
	GetPrivateProfileString( "SERVER", "NHNUSA_AUTH", "1", szNHNUSAAuth, 2, SERVER_CONFIG_FILENAME );
	if( 1 == atoi(szNHNUSAAuth) )
		m_bIsUseNHNUSAAuth = true;
	else
	{
		m_bIsUseNHNUSAAuth = false;
		mlog( "nhn usa auth not running.\n" );
	}

	char szNHNServerMode[ 2 ] = "";
	GetPrivateProfileString( "SERVER", "NHN_SERVERMODE", "r", szNHNServerMode, 2, SERVER_CONFIG_FILENAME );
	if( 0 == stricmp("r", szNHNServerMode) )
	{
		m_NHNServerMode = NSM_REAL;
		//mlog( "nhn server mode is real\n" );
	}
	else if( 0 == stricmp("a", szNHNServerMode) )
	{
		m_NHNServerMode = NSM_ALPHA;
		//mlog( "nhn server mode is alpha\n" );
	}

	// 일본 넷마블 전용
	GetPrivateProfileString("LOCALE", "DBAgentIP",						SERVER_CONFIG_DEFAULT_NJ_DBAGENT_IP, m_NJ_szDBAgentIP, 64, SERVER_CONFIG_FILENAME);
	m_NJ_nDBAgentPort = GetPrivateProfileInt("LOCALE", "DBAgentPort",	SERVER_CONFIG_DEFAULT_NJ_DBAGENT_PORT, SERVER_CONFIG_FILENAME);
	m_NJ_nGameCode = GetPrivateProfileInt("LOCALE", "GameCode",			SERVER_CONFIG_DEFAULT_NJ_DBAGENT_GAMECODE, SERVER_CONFIG_FILENAME);


	ReadEnableMaps();

	// filer.
	char szUse[ 2 ] = {0,};
	GetPrivateProfileString( "FILTER", "USE", "0", szUse, 2, SERVER_CONFIG_FILENAME );
	SetUseFilterState( atoi(szUse) );
	
	char szAccept[ 2 ] = {0,};
	GetPrivateProfileString( "FILTER", "ACCEPT_INVALID_IP", "1", szAccept, 2, SERVER_CONFIG_FILENAME );
	SetAcceptInvalidIPState( atoi(szAccept) );

	// environment
	char szUseHShield[ 2 ] = {0,};
	GetPrivateProfileString("ENVIRONMENT", "USE_HSHIELD", SERVER_CONFIG_DEFAULT_USE_HSHIELD, szUseHShield, 2, SERVER_CONFIG_FILENAME);
	ASSERT( 0 != strlen(szUseHShield) );
	if( 0 == stricmp("1", szUseHShield) )
		m_bIsUseHShield = true;
	else
		m_bIsUseHShield = false;

	char szUseXTrap[ 2 ] = {0,};
	GetPrivateProfileString("ENVIRONMENT", "USE_XTRAP", SERVER_CONFIG_DEFAULT_USE_XTRAP, szUseXTrap, 2, SERVER_CONFIG_FILENAME);
	ASSERT( 0 != strlen(szUseXTrap) );
	if( 0 == stricmp("1", szUseXTrap) )
		m_bIsUseXTrap = true;
	else
		m_bIsUseXTrap = false;

	char szUseEvent[ 2 ] = {0,};
	GetPrivateProfileString("ENVIRONMENT", "USE_EVENT", SERVER_CONFIG_DEFAULT_USE_EVENT, szUseEvent, 2, SERVER_CONFIG_FILENAME);
	ASSERT( 0 != strlen(szUseEvent) );
	if( 0 == stricmp("1", szUseEvent) )
		m_bIsUseEvent = true;
	else
		m_bIsUseEvent = false;

	char szUseFileCrc[ 2 ] = {0,};
	GetPrivateProfileString("ENVIRONMENT", "USE_FILECRC", SERVER_CONFIG_DEFAULT_USE_FILECRC, szUseFileCrc, 2, SERVER_CONFIG_FILENAME);
	ASSERT( 0 != strlen(szUseFileCrc) );
	if( 0 == stricmp("1", szUseFileCrc) )
		m_bIsUseFileCrc = true;
	else
		m_bIsUseFileCrc = false;

	char szUseMD5[ 2 ] = {0,};
	GetPrivateProfileString("ENVIRONMENT", "USE_MD5", SERVER_CONFIG_DEFAULT_USE_FILECRC, szUseMD5, 2, SERVER_CONFIG_FILENAME);
	ASSERT( 0 != strlen(szUseMD5) );
	if( 0 == stricmp("1", szUseMD5) )
		m_bIsUseMD5 = true;
	else
		m_bIsUseMD5 = false;

	char szBlockFlooding[ 2 ] = {0,};
	GetPrivateProfileString("ENVIRONMENT", "BLOCK_FLOODING", SERVER_CONFIG_DEFAULT_BLOCK_FLOODING, szBlockFlooding, 2, SERVER_CONFIG_FILENAME);
	ASSERT( 0 != strlen(szBlockFlooding) );
	if( 0 == stricmp("1", szBlockFlooding) )
		m_bBlockFlooding = true;
	else
		m_bBlockFlooding = false;

	if( m_bIsUseHShield && m_bIsUseXTrap )
	{
		ASSERT( 0 && "hackshield와 x-trap은 같이 사용할 수 없다." );
		mlog( "server.ini - HackShield and XTrap is duplicated\n" );
		return false;
	}

	char szBlockHacking[ 2 ] = {0,};
	GetPrivateProfileString("ENVIRONMENT", "USE_BLOCKHACKING", 0, szBlockHacking, 2, SERVER_CONFIG_FILENAME);
	ASSERT( 0 != strlen(szBlockHacking) );
	if( 0 == stricmp("1", szBlockHacking) )
		m_bIsUseBlockHancking = true;
	else
		m_bIsUseBlockHancking = false;


	// 새로운 아이템 정책1. 아이템의 일치성 보장
	// Added By 홍기주(2010-04-07)
	char szItemConsistency[ 2 ] = {0,};
	GetPrivateProfileString("ENVIRONMENT", "USE_ITEM_CONSISTENCY", 0, szItemConsistency, 2, SERVER_CONFIG_FILENAME);
	ASSERT( 0 != strlen(szItemConsistency) );
	if( 0 == stricmp("1", szItemConsistency) )	m_bIsUseItemConsistency = true;
	else										m_bIsUseItemConsistency = false;
	
	
	InitKillTrackerConfig();
	InitPowerLevelingConfig();

	char szUseResourceCRC32CacheCheck[ 2 ] = {0,};
	GetPrivateProfileString("ENVIRONMENT"
		, "USE_RESOURCECRC32CACHECKECK"
		, SERVER_CONFIG_DEFAULT_USE_RESOURCECRC32CACHECHECK
		, szUseResourceCRC32CacheCheck, 2, SERVER_CONFIG_FILENAME);
	ASSERT( 0 != strlen(szUseResourceCRC32CacheCheck) );
	if( 0 == stricmp("1", szUseResourceCRC32CacheCheck) )
		m_bIsUseResourceCRC32CacheCheck = true;
	else
		m_bIsUseResourceCRC32CacheCheck = false;

	InitLoopLogConfig();

	m_bIsComplete = true;
	return m_bIsComplete;
}
Beispiel #22
0
void MAsyncDBJob_InsertQuestGameLog::Run( void* pContext )
{
	if( MSM_TEST == MGetServerConfig()->GetServerMode() ) 
	{
		MMatchDBMgr* pDBMgr = reinterpret_cast< MMatchDBMgr* >( pContext );

		int nQGLID;

		// 우선 퀘스트 게임 로그를 저장함.
		if( !pDBMgr->InsertQuestGameLog(m_szStageName, 
			m_nScenarioID,
			m_nMasterCID, m_PlayersCID[0], m_PlayersCID[1], m_PlayersCID[2],
			m_nTotalRewardQItemCount,
			m_nElapsedPlayTime,
			nQGLID) )
		{
			SetResult(MASYNC_RESULT_FAILED);
			return;
		}

		// 유니크 아이템에 관한 데이터는 QUniqueItemLog에 따로 저장을 해줘야 함.
		int											i;
		int											nCID;
		int											nQIID;
		int											nQUItemCount;
		QItemLogMapIter								itQUItem, endQUItem;
		vector< MQuestPlayerLogInfo* >::iterator	itPlayer, endPlayer;

		itPlayer  = m_Player.begin();
		endPlayer = m_Player.end();

		for( ; itPlayer != endPlayer; ++itPlayer )
		{
			if( (*itPlayer)->GetUniqueItemList().empty() )
				continue;	// 유니크 아이템을 가지고 있지 않으면 무시.

			nCID		= (*itPlayer)->GetCID();
			itQUItem	= (*itPlayer)->GetUniqueItemList().begin();
			endQUItem	= (*itPlayer)->GetUniqueItemList().end();

			for( ; itQUItem != endQUItem; ++itQUItem )
			{
				nQIID			= itQUItem->first;
				nQUItemCount	= itQUItem->second;

				for( i = 0; i < nQUItemCount; ++i )
				{
					if( !pDBMgr->InsertQUniqueGameLog(nQGLID, nCID, nQIID) )
					{
						mlog( "MAsyncDBJob_InsertQuestGameLog::Run - 유니크 아이템 로그 저장 실패. CID:%d QIID:%d\n", 
							nCID, nQIID );

						SetResult(MASYNC_RESULT_FAILED);
					}
				}
			}

			// 작업이 끝난 정보는 메모리에서 삭제한다.
			delete (*itPlayer);
		}
	}

	m_Player.clear();
	
	SetResult(MASYNC_RESULT_SUCCEED);
}
Beispiel #23
0
/*
 * make sure we've got at least bits_wanted contiguous bits in the
 * local alloc. You lose them when you drop i_mutex.
 *
 * We will add ourselves to the transaction passed in, but may start
 * our own in order to shift windows.
 */
int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
				   u32 bits_wanted,
				   struct ocfs2_alloc_context *ac)
{
	int status;
	struct ocfs2_dinode *alloc;
	struct inode *local_alloc_inode;
	unsigned int free_bits;

	mlog_entry_void();

	BUG_ON(!ac);

	local_alloc_inode =
		ocfs2_get_system_file_inode(osb,
					    LOCAL_ALLOC_SYSTEM_INODE,
					    osb->slot_num);
	if (!local_alloc_inode) {
		status = -ENOENT;
		mlog_errno(status);
		goto bail;
	}

	mutex_lock(&local_alloc_inode->i_mutex);

	/*
	 * We must double check state and allocator bits because
	 * another process may have changed them while holding i_mutex.
	 */
	spin_lock(&osb->osb_lock);
	if (!ocfs2_la_state_enabled(osb) ||
	    (bits_wanted > osb->local_alloc_bits)) {
		spin_unlock(&osb->osb_lock);
		status = -ENOSPC;
		goto bail;
	}
	spin_unlock(&osb->osb_lock);

	alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;

#ifdef CONFIG_OCFS2_DEBUG_FS
	if (le32_to_cpu(alloc->id1.bitmap1.i_used) !=
	    ocfs2_local_alloc_count_bits(alloc)) {
		ocfs2_error(osb->sb, "local alloc inode %llu says it has "
			    "%u free bits, but a count shows %u",
			    (unsigned long long)le64_to_cpu(alloc->i_blkno),
			    le32_to_cpu(alloc->id1.bitmap1.i_used),
			    ocfs2_local_alloc_count_bits(alloc));
		status = -EIO;
		goto bail;
	}
#endif

	free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) -
		le32_to_cpu(alloc->id1.bitmap1.i_used);
	if (bits_wanted > free_bits) {
		/* uhoh, window change time. */
		status =
			ocfs2_local_alloc_slide_window(osb, local_alloc_inode);
		if (status < 0) {
			if (status != -ENOSPC)
				mlog_errno(status);
			goto bail;
		}

		/*
		 * Under certain conditions, the window slide code
		 * might have reduced the number of bits available or
		 * disabled the the local alloc entirely. Re-check
		 * here and return -ENOSPC if necessary.
		 */
		status = -ENOSPC;
		if (!ocfs2_la_state_enabled(osb))
			goto bail;

		free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) -
			le32_to_cpu(alloc->id1.bitmap1.i_used);
		if (bits_wanted > free_bits)
			goto bail;
	}

	if (ac->ac_max_block)
		mlog(0, "Calling in_range for max block %llu\n",
		     (unsigned long long)ac->ac_max_block);

	if (!ocfs2_local_alloc_in_range(local_alloc_inode, ac,
					bits_wanted)) {
		/*
		 * The window is outside ac->ac_max_block.
		 * This errno tells the caller to keep localalloc enabled
		 * but to get the allocation from the main bitmap.
		 */
		status = -EFBIG;
		goto bail;
	}

	ac->ac_inode = local_alloc_inode;
	/* We should never use localalloc from another slot */
	ac->ac_alloc_slot = osb->slot_num;
	ac->ac_which = OCFS2_AC_USE_LOCAL;
	get_bh(osb->local_alloc_bh);
	ac->ac_bh = osb->local_alloc_bh;
	status = 0;
bail:
	if (status < 0 && local_alloc_inode) {
		mutex_unlock(&local_alloc_inode->i_mutex);
		iput(local_alloc_inode);
	}

	mlog(0, "bits=%d, slot=%d, ret=%d\n", bits_wanted, osb->slot_num,
	     status);

	mlog_exit(status);
	return status;
}
Beispiel #24
0
void MAsyncDBJob_GetCharInfo::Run(void* pContext)
{
	_ASSERT(m_pCharInfo);
	MMatchDBMgr* pDBMgr = (MMatchDBMgr*)pContext;

	//int nWaitHourDiff;

	if (!pDBMgr->GetCharInfoByAID(m_nAID, m_nCharIndex, m_pCharInfo)) {
		SetResult(MASYNC_RESULT_FAILED);
		return;
	}

	unsigned int nEquipedItemID[MMCIP_END];
	unsigned int nEquipedItemCIID[MMCIP_END];

	if( !pDBMgr->GetCharEquipmentInfoByAID(m_nAID, m_nCharIndex, nEquipedItemID, nEquipedItemCIID)) {
		SetResult(MASYNC_RESULT_FAILED);
		return;
	}

	for(int i = 0; i < MMCIP_END; i++) {
		m_pCharInfo->m_nEquipedItemCIID[i] = nEquipedItemCIID[i];
	}

#ifdef _DELETE_CLAN
	// 클랜에 가입된 캐릭터이면 폐쇄요청된 클랜인지 검사를 해줘야 한다.
	if( 0 != m_pCharInfo->m_ClanInfo.m_nClanID ) 
	{
		/*
		// nWaitHourDiff의 값이 0이면 정상 클랜.
		// 0 이상이면 폐쇄요청이 접수된 클랜이다. 
		// 폐쇄 요청된 클랜을 정상 클랜으로 바꿔주기 위해서는, 
		//  Clan테이블의 DeleteDate를 NULL로 셋팅해 줘야 한다. - by SungE.

		if( UNDEFINE_DELETE_HOUR == nWaitHourDiff )
		{
			// 정상 클랜.
		}
		else if( 0 > nWaitHourDiff )
		{
			SetDeleteState( MMCDS_WAIT );
		}
		//else if( MAX_WAIT_CLAN_DELETE_HOUR < nWaitHourDiff )
		//{
		//	// 클랜정보를 DB에서 삭제.
		//	// 이작업은 DB의 Agent server작업으로 일괄 처리히한다.
		//}
		else if( 0 <= nWaitHourDiff) 
		{
			// 아직 DB는 삭제하지 않고, 유저만 일반 유저로 처리함.
			SetDeleteState( MMCDS_NORMAL );
			m_pCharInfo->m_ClanInfo.Clear();
		}
		*/
	}
#endif

	// 디비에서 아이템 정보를 가져온다. 
	// 이것은 퍼포먼스 문제로 나중에 플레이어가 자기 아이템 보기 할때만 가져와야 할듯	
	m_pCharInfo->ClearItems();
	if (!pDBMgr->GetCharItemInfo(m_pCharInfo))
	{
		SetResult(MASYNC_RESULT_FAILED);
		return;
	}

#ifdef _QUEST_ITEM
	if( MSM_TEST == MGetServerConfig()->GetServerMode() ) 
	{
		m_pCharInfo->m_QuestItemList.Clear();
		if( !pDBMgr->GetCharQuestItemInfo(m_pCharInfo) )
		{
			mlog( "MAsyncDBJob_GetCharInfo::Run - 디비에서 퀘스트 아이템 목록을 가져오는데 실패했음.\n" );
			SetResult(MASYNC_RESULT_FAILED);
			return;
		}
	}
#endif

	if( !pDBMgr->GetCharBRInfoAll(m_pCharInfo->m_nCID, m_pCharInfo->GetBRInfoMap()) )
	{
		SetResult(MASYNC_RESULT_FAILED);
		return;
	}

	SetResult(MASYNC_RESULT_SUCCEED);
}
Beispiel #25
0
/* ARGSUSED */
bool_t
inomap_build( jdm_fshandle_t *fshandlep,
	      intgen_t fsfd,
	      xfs_bstat_t *rootstatp,
	      bool_t last,
	      time32_t lasttime,
	      bool_t resume,
	      time32_t resumetime,
	      size_t resumerangecnt,
	      drange_t *resumerangep,
	      char *subtreebuf[],
	      ix_t subtreecnt,
	      bool_t skip_unchanged_dirs,
	      startpt_t *startptp,
	      size_t startptcnt,
	      ix_t *statphasep,
	      ix_t *statpassp,
	      size64_t statcnt,
	      size64_t *statdonep )
{
	xfs_bstat_t *bstatbufp;
	size_t bstatbuflen;
	bool_t pruneneeded = BOOL_FALSE;
	intgen_t igrpcnt = 0;
	intgen_t stat;
	intgen_t rval;

        /* do a sync so that bulkstat will pick up inode changes
         * that are currently in the inode cache. this is necessary
         * for incremental dumps in order to have the dump time
         * accurately reflect what inodes were included in this dump.
         * (PV 881455)
         */
	sync();

	/* copy stat ptrs
	 */
	inomap_statphasep = statphasep;
	inomap_statpassp = statpassp;
	inomap_statdonep = statdonep;

	/* allocate a bulkstat buf
	 */
	bstatbuflen = BSTATBUFLEN;
	bstatbufp = ( xfs_bstat_t * )memalign( pgsz,
					       bstatbuflen
					       *
					       sizeof( xfs_bstat_t ));
	ASSERT( bstatbufp );

	/* count the number of inode groups, which will serve as a
	 * starting point for the size of the inomap.
	 */
	rval = inogrp_iter( fsfd, cb_count_inogrp, (void *)&igrpcnt, &stat );
	if ( rval || stat ) {
		free( ( void * )bstatbufp );
		return BOOL_FALSE;
	}

	/* initialize the callback context
	 */
	rval = cb_context( last,
			   lasttime,
			   resume,
			   resumetime,
			   resumerangecnt,
			   resumerangep,
			   startptp,
			   startptcnt,
			   igrpcnt,
			   skip_unchanged_dirs,
			   &pruneneeded );
 	if ( rval ) {
 		free( ( void * )bstatbufp );
 		return BOOL_FALSE;
 	}

	/* the inode map requires that inodes are added in increasing
	 * ino order. in the case of a subtree dump, inodes would be
	 * added in whatever order they were discovered when walking the
	 * subtrees. so pre-populate the inomap with all the inode groups
	 * in this filesystem. each inode will be marked unused until its
	 * correct state is set in cb_add.
	 */
	rval = inogrp_iter( fsfd, cb_add_inogrp, NULL, &stat );
 	if ( rval || stat ) {
		cb_context_free();
 		free( ( void * )bstatbufp );
 		return BOOL_FALSE;
 	}

	/* construct the ino map, based on the last dump time, resumed
	 * dump info, and subtree list. place all unchanged directories
	 * in the "needed for children" state (MAP_DIR_SUPPRT). these will be
	 * dumped even though they have not changed. a later pass will move
	 * some of these to "not dumped", such that only those necessary
	 * to represent the minimal tree containing only changes will remain.
	 * for subtree dumps, recurse over the specified subtrees calling
	 * the inomap constructor (cb_add). otherwise, if dumping the entire
	 * filesystem, use the bigstat iterator to add inos to the inomap.
	 * set a flag if any ino not put in a dump state. This will be used
	 * to decide if any pruning can be done.
	 */
	mlog( MLOG_VERBOSE | MLOG_INOMAP, _(
	      "ino map phase 1: "
	      "constructing initial dump list\n") );

	*inomap_statdonep = 0;
	*inomap_statphasep = 1;
	stat = 0;
	cb_accuminit_sz( );

	if ( subtreecnt ) {
		rval = subtreelist_parse( fshandlep,
					  fsfd,
					  rootstatp,
					  subtreebuf,
					  subtreecnt );
	} else {
		rval = bigstat_iter( fshandlep,
				     fsfd,
				     BIGSTAT_ITER_ALL,
				     ( xfs_ino_t )0,
				     cb_add,
				     NULL,
				     NULL,
				     NULL,
				     &stat,
				     preemptchk,
				     bstatbufp,
				     bstatbuflen );
	}
	*inomap_statphasep = 0;
	if ( rval || preemptchk( PREEMPT_FULL )) {
		cb_context_free();
		free( ( void * )bstatbufp );
		return BOOL_FALSE;
	}

	if ( inomap_exclude_filesize > 0 ) {
		mlog( MLOG_NOTE | MLOG_VERBOSE, _(
		      "pruned %llu files: maximum size exceeded\n"),
		      inomap_exclude_filesize );
	}
	if ( inomap_exclude_skipattr > 0 ) {
		mlog( MLOG_NOTE | MLOG_VERBOSE, _(
		      "pruned %llu files: skip attribute set\n"),
		      inomap_exclude_skipattr );
	}

	/* prune directories unchanged since the last dump and containing
	 * no children needing dumping.
	 */
	if ( pruneneeded ) {
		bool_t	rootdump = BOOL_FALSE;

		mlog( MLOG_VERBOSE | MLOG_INOMAP, _(
		      "ino map phase 2: "
		      "pruning unneeded subtrees\n") );
		*inomap_statdonep = 0;
		*inomap_statpassp = 0;
		*inomap_statphasep = 2;

		(void) supprt_prune( &rootdump,
				     fshandlep,
				     fsfd,
				     rootstatp,
				     NULL );
		*inomap_statphasep = 0;

		if ( preemptchk( PREEMPT_FULL )) {
			cb_context_free();
			free( ( void * )bstatbufp );
			return BOOL_FALSE;
		}

	} else {
		mlog( MLOG_VERBOSE | MLOG_INOMAP, _(
		      "ino map phase 2: "
		      "skipping (no pruning necessary)\n") );
	}

	/* initialize the callback context for startpoint calculation
	 */
	cb_spinit( );

	/* identify dump stream startpoints
	 */
	if ( startptcnt > 1 ) {
		mlog( MLOG_VERBOSE | MLOG_INOMAP, _(
		      "ino map phase 3: "
		      "identifying stream starting points\n") );
	} else {
		mlog( MLOG_VERBOSE | MLOG_INOMAP, _(
		      "ino map phase 3: "
		      "skipping (only one dump stream)\n") );
	}
	stat = 0;
	*inomap_statdonep = 0;
	*inomap_statphasep = 3;
	rval = bigstat_iter( fshandlep,
			     fsfd,
			     BIGSTAT_ITER_NONDIR,
			     ( xfs_ino_t )0,
			     cb_startpt,
			     NULL,
			     inomap_next_nondir,
			     inomap_alloc_context(),
			     &stat,
			     preemptchk,
			     bstatbufp,
			     bstatbuflen );
	*inomap_statphasep = 0;
	
	if ( rval ) {
		cb_context_free();
		free( ( void * )bstatbufp );
		return BOOL_FALSE;
	}

	if ( startptcnt > 1 ) {
		ix_t startptix;
		for ( startptix = 0 ; startptix < startptcnt ; startptix++ ) {
			startpt_t *p;
			startpt_t *ep;

			p = &startptp[ startptix ];
			if ( startptix == startptcnt - 1 ) {
				ep = 0;
			} else {
				ep = &startptp[ startptix + 1 ];
			}
			ASSERT( ! p->sp_flags );
			mlog( MLOG_VERBOSE | MLOG_INOMAP,
			      _("stream %u: ino %llu offset %lld to "),
			      startptix,
			      p->sp_ino,
			      p->sp_offset );
			if ( ! ep ) {
				mlog( MLOG_VERBOSE | MLOG_BARE | MLOG_INOMAP,
				      _("end\n") );
			} else {
				mlog( MLOG_VERBOSE |  MLOG_BARE | MLOG_INOMAP,
				      _("ino %llu offset %lld\n"),
				      ep->sp_ino,
				      ep->sp_offset );
			}
		}
	}

	cb_context_free();
	free( ( void * )bstatbufp );
	mlog( MLOG_VERBOSE | MLOG_INOMAP, _(
	      "ino map construction complete\n") );
	return BOOL_TRUE;
}
Beispiel #26
0
static void ocfs2_delete_inode(struct inode *inode)
{
	int wipe, status;
	sigset_t oldset;
	struct buffer_head *di_bh = NULL;
	struct ocfs2_dinode *di = NULL;

	trace_ocfs2_delete_inode(inode->i_ino,
				 (unsigned long long)OCFS2_I(inode)->ip_blkno,
				 is_bad_inode(inode));

	/* When we fail in read_inode() we mark inode as bad. The second test
	 * catches the case when inode allocation fails before allocating
	 * a block for inode. */
	if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno)
		goto bail;

	if (!ocfs2_inode_is_valid_to_delete(inode)) {
		/* It's probably not necessary to truncate_inode_pages
		 * here but we do it for safety anyway (it will most
		 * likely be a no-op anyway) */
		ocfs2_cleanup_delete_inode(inode, 0);
		goto bail;
	}

	dquot_initialize(inode);

	/* We want to block signals in delete_inode as the lock and
	 * messaging paths may return us -ERESTARTSYS. Which would
	 * cause us to exit early, resulting in inodes being orphaned
	 * forever. */
	ocfs2_block_signals(&oldset);

	/*
	 * Synchronize us against ocfs2_get_dentry. We take this in
	 * shared mode so that all nodes can still concurrently
	 * process deletes.
	 */
	status = ocfs2_nfs_sync_lock(OCFS2_SB(inode->i_sb), 0);
	if (status < 0) {
		mlog(ML_ERROR, "getting nfs sync lock(PR) failed %d\n", status);
		ocfs2_cleanup_delete_inode(inode, 0);
		goto bail_unblock;
	}
	/* Lock down the inode. This gives us an up to date view of
	 * it's metadata (for verification), and allows us to
	 * serialize delete_inode on multiple nodes.
	 *
	 * Even though we might be doing a truncate, we don't take the
	 * allocation lock here as it won't be needed - nobody will
	 * have the file open.
	 */
	status = ocfs2_inode_lock(inode, &di_bh, 1);
	if (status < 0) {
		if (status != -ENOENT)
			mlog_errno(status);
		ocfs2_cleanup_delete_inode(inode, 0);
		goto bail_unlock_nfs_sync;
	}

	di = (struct ocfs2_dinode *)di_bh->b_data;
	/* Skip inode deletion and wait for dio orphan entry recovered
	 * first */
	if (unlikely(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL))) {
		ocfs2_cleanup_delete_inode(inode, 0);
		goto bail_unlock_inode;
	}

	/* Query the cluster. This will be the final decision made
	 * before we go ahead and wipe the inode. */
	status = ocfs2_query_inode_wipe(inode, di_bh, &wipe);
	if (!wipe || status < 0) {
		/* Error and remote inode busy both mean we won't be
		 * removing the inode, so they take almost the same
		 * path. */
		if (status < 0)
			mlog_errno(status);

		/* Someone in the cluster has disallowed a wipe of
		 * this inode, or it was never completely
		 * orphaned. Write out the pages and exit now. */
		ocfs2_cleanup_delete_inode(inode, 1);
		goto bail_unlock_inode;
	}

	ocfs2_cleanup_delete_inode(inode, 0);

	status = ocfs2_wipe_inode(inode, di_bh);
	if (status < 0) {
		if (status != -EDEADLK)
			mlog_errno(status);
		goto bail_unlock_inode;
	}

	/*
	 * Mark the inode as successfully deleted.
	 *
	 * This is important for ocfs2_clear_inode() as it will check
	 * this flag and skip any checkpointing work
	 *
	 * ocfs2_stuff_meta_lvb() also uses this flag to invalidate
	 * the LVB for other nodes.
	 */
	OCFS2_I(inode)->ip_flags |= OCFS2_INODE_DELETED;

bail_unlock_inode:
	ocfs2_inode_unlock(inode, 1);
	brelse(di_bh);

bail_unlock_nfs_sync:
	ocfs2_nfs_sync_unlock(OCFS2_SB(inode->i_sb), 0);

bail_unblock:
	ocfs2_unblock_signals(&oldset);
bail:
	return;
}
Beispiel #27
0
/* ARGSUSED */
static intgen_t
cb_add( void *arg1,
	jdm_fshandle_t *fshandlep,
	intgen_t fsfd,
	xfs_bstat_t *statp )
{
	register time32_t mtime = statp->bs_mtime.tv_sec;
	register time32_t ctime = statp->bs_ctime.tv_sec;
	register time32_t ltime = max( mtime, ctime );
	register mode_t mode = statp->bs_mode & S_IFMT;
	xfs_off_t estimated_size = 0;
	xfs_ino_t ino = statp->bs_ino;
	bool_t changed;
	bool_t resumed;

	( *inomap_statdonep )++;

	/* skip if no links
	 */
	if ( statp->bs_nlink == 0 ) {
		return 0;
	}

	/* if no portion of this ino is in the resume range,
	 * then only dump it if it has changed since the interrupted
	 * dump.
	 *
	 * otherwise, if some or all of this ino is in the resume range,
	 * and has changed since the base dump upon which the original
	 * increment was based, dump it if it has changed since that
	 * original base dump.
	 */
	if ( cb_resume && ! cb_inoinresumerange( ino )) {
		if ( ltime >= cb_resumetime ) {
			changed = BOOL_TRUE;
		} else {
			changed = BOOL_FALSE;
		}
	} else if ( cb_last ) {
		if ( ltime >= cb_lasttime ) {
			changed = BOOL_TRUE;
		} else {
			changed = BOOL_FALSE;
		}
	} else {
		changed = BOOL_TRUE;
	}

	/* this is redundant: make sure any ino partially dumped
	 * is completed.
	 */
	if ( cb_resume && cb_inoresumed( ino )) {
		resumed = BOOL_TRUE;
	} else {
		resumed = BOOL_FALSE;
	}

	if ( changed ) {
		if ( mode == S_IFDIR ) {
			inomap_add( cb_inomap_contextp,
				    ino,
				    (gen_t)statp->bs_gen,
				    MAP_DIR_CHANGE );
			cb_dircnt++;
		} else {
			estimated_size = estimate_dump_space( statp );

			/* skip if size is greater than prune size. quota
			 * files are exempt from the check.
			 */
			if ( maxdumpfilesize > 0 &&
			     estimated_size > maxdumpfilesize &&
			     !is_quota_file(statp->bs_ino) ) {
				mlog( MLOG_DEBUG | MLOG_EXCLFILES,
				      "pruned ino %llu, owner %u, estimated size %llu: maximum size exceeded\n",
				      statp->bs_ino,
				      statp->bs_uid,
				      estimated_size );
				inomap_add( cb_inomap_contextp,
					    ino,
					    (gen_t)statp->bs_gen,
					    MAP_NDR_NOCHNG );
				inomap_exclude_filesize++;
				return 0;
			}

			if (allowexcludefiles_pr && statp->bs_xflags & XFS_XFLAG_NODUMP) {
				mlog( MLOG_DEBUG | MLOG_EXCLFILES,
				      "pruned ino %llu, owner %u, estimated size %llu: skip flag set\n",
				      statp->bs_ino,
				      statp->bs_uid,
				      estimated_size );
				inomap_add( cb_inomap_contextp,
					    ino,
					    (gen_t)statp->bs_gen,
					    MAP_NDR_NOCHNG );
				inomap_exclude_skipattr++;
				return 0;
			}

			inomap_add( cb_inomap_contextp,
				    ino,
				    (gen_t)statp->bs_gen,
				    MAP_NDR_CHANGE );
			cb_nondircnt++;
			cb_datasz += estimated_size;
			cb_hdrsz += ( EXTENTHDR_SZ * (statp->bs_extents + 1) );
		}
	} else if ( resumed ) {
		ASSERT( mode != S_IFDIR );
		ASSERT( changed );
	} else {
		if ( mode == S_IFDIR ) {
			if ( cb_skip_unchanged_dirs ) {
				inomap_add( cb_inomap_contextp,
					    ino,
					    (gen_t)statp->bs_gen,
					    MAP_DIR_NOCHNG );
			} else {
				*cb_pruneneededp = BOOL_TRUE;
				inomap_add( cb_inomap_contextp,
					    ino,
					    (gen_t)statp->bs_gen,
					    MAP_DIR_SUPPRT );
				cb_dircnt++;
			}
		} else {
			inomap_add( cb_inomap_contextp,
				    ino,
				    (gen_t)statp->bs_gen,
				    MAP_NDR_NOCHNG );
		}
	}

	return 0;
}
Beispiel #28
0
int ocfs2_validate_inode_block(struct super_block *sb,
			       struct buffer_head *bh)
{
	int rc;
	struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;

	trace_ocfs2_validate_inode_block((unsigned long long)bh->b_blocknr);

	BUG_ON(!buffer_uptodate(bh));

	/*
	 * If the ecc fails, we return the error but otherwise
	 * leave the filesystem running.  We know any error is
	 * local to this block.
	 */
	rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check);
	if (rc) {
		mlog(ML_ERROR, "Checksum failed for dinode %llu\n",
		     (unsigned long long)bh->b_blocknr);
		goto bail;
	}

	/*
	 * Errors after here are fatal.
	 */

	rc = -EINVAL;

	if (!OCFS2_IS_VALID_DINODE(di)) {
		rc = ocfs2_error(sb, "Invalid dinode #%llu: signature = %.*s\n",
				 (unsigned long long)bh->b_blocknr, 7,
				 di->i_signature);
		goto bail;
	}

	if (le64_to_cpu(di->i_blkno) != bh->b_blocknr) {
		rc = ocfs2_error(sb, "Invalid dinode #%llu: i_blkno is %llu\n",
				 (unsigned long long)bh->b_blocknr,
				 (unsigned long long)le64_to_cpu(di->i_blkno));
		goto bail;
	}

	if (!(di->i_flags & cpu_to_le32(OCFS2_VALID_FL))) {
		rc = ocfs2_error(sb,
				 "Invalid dinode #%llu: OCFS2_VALID_FL not set\n",
				 (unsigned long long)bh->b_blocknr);
		goto bail;
	}

	if (le32_to_cpu(di->i_fs_generation) !=
	    OCFS2_SB(sb)->fs_generation) {
		rc = ocfs2_error(sb,
				 "Invalid dinode #%llu: fs_generation is %u\n",
				 (unsigned long long)bh->b_blocknr,
				 le32_to_cpu(di->i_fs_generation));
		goto bail;
	}

	rc = 0;

bail:
	return rc;
}
Beispiel #29
0
int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
		      struct buffer_head *bhs[], int flags,
		      int (*validate)(struct super_block *sb,
				      struct buffer_head *bh))
{
	int status = 0;
	int i, ignore_cache = 0;
	struct buffer_head *bh;
	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);

	trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);

	BUG_ON(!ci);
	BUG_ON((flags & OCFS2_BH_READAHEAD) &&
	       (flags & OCFS2_BH_IGNORE_CACHE));

	if (bhs == NULL) {
		status = -EINVAL;
		mlog_errno(status);
		goto bail;
	}

	if (nr < 0) {
		mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
		status = -EINVAL;
		mlog_errno(status);
		goto bail;
	}

	if (nr == 0) {
		status = 0;
		goto bail;
	}

	ocfs2_metadata_cache_io_lock(ci);
	for (i = 0 ; i < nr ; i++) {
		if (bhs[i] == NULL) {
			bhs[i] = sb_getblk(sb, block++);
			if (bhs[i] == NULL) {
				ocfs2_metadata_cache_io_unlock(ci);
				status = -ENOMEM;
				mlog_errno(status);
				goto bail;
			}
		}
		bh = bhs[i];
		ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);

		/* There are three read-ahead cases here which we need to
		 * be concerned with. All three assume a buffer has
		 * previously been submitted with OCFS2_BH_READAHEAD
		 * and it hasn't yet completed I/O.
		 *
		 * 1) The current request is sync to disk. This rarely
		 *    happens these days, and never when performance
		 *    matters - the code can just wait on the buffer
		 *    lock and re-submit.
		 *
		 * 2) The current request is cached, but not
		 *    readahead. ocfs2_buffer_uptodate() will return
		 *    false anyway, so we'll wind up waiting on the
		 *    buffer lock to do I/O. We re-check the request
		 *    with after getting the lock to avoid a re-submit.
		 *
		 * 3) The current request is readahead (and so must
		 *    also be a caching one). We short circuit if the
		 *    buffer is locked (under I/O) and if it's in the
		 *    uptodate cache. The re-check from #2 catches the
		 *    case that the previous read-ahead completes just
		 *    before our is-it-in-flight check.
		 */

		if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
			trace_ocfs2_read_blocks_from_disk(
			     (unsigned long long)bh->b_blocknr,
			     (unsigned long long)ocfs2_metadata_cache_owner(ci));
			/* We're using ignore_cache here to say
			 * "go to disk" */
			ignore_cache = 1;
		}

		trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
			ignore_cache, buffer_jbd(bh), buffer_dirty(bh));

		if (buffer_jbd(bh)) {
			continue;
		}

		if (ignore_cache) {
			if (buffer_dirty(bh)) {
				/* This should probably be a BUG, or
				 * at least return an error. */
				continue;
			}

			/* A read-ahead request was made - if the
			 * buffer is already under read-ahead from a
			 * previously submitted request than we are
			 * done here. */
			if ((flags & OCFS2_BH_READAHEAD)
			    && ocfs2_buffer_read_ahead(ci, bh))
				continue;

			lock_buffer(bh);
			if (buffer_jbd(bh)) {
#ifdef CATCH_BH_JBD_RACES
				mlog(ML_ERROR, "block %llu had the JBD bit set "
					       "while I was in lock_buffer!",
				     (unsigned long long)bh->b_blocknr);
				BUG();
#else
				unlock_buffer(bh);
				continue;
#endif
			}

			/* Re-check ocfs2_buffer_uptodate() as a
			 * previously read-ahead buffer may have
			 * completed I/O while we were waiting for the
			 * buffer lock. */
			if (!(flags & OCFS2_BH_IGNORE_CACHE)
			    && !(flags & OCFS2_BH_READAHEAD)
			    && ocfs2_buffer_uptodate(ci, bh)) {
				unlock_buffer(bh);
				continue;
			}

			clear_buffer_uptodate(bh);
			get_bh(bh); /* for end_buffer_read_sync() */
			if (validate)
				set_buffer_needs_validate(bh);
			bh->b_end_io = end_buffer_read_sync;
			submit_bh(REQ_OP_READ, 0, bh);
			continue;
		}
	}

	status = 0;

	for (i = (nr - 1); i >= 0; i--) {
		bh = bhs[i];

		if (!(flags & OCFS2_BH_READAHEAD)) {
			if (status) {
				/* Clear the rest of the buffers on error */
				put_bh(bh);
				bhs[i] = NULL;
				continue;
			}
			/* We know this can't have changed as we hold the
			 * owner sem. Avoid doing any work on the bh if the
			 * journal has it. */
			if (!buffer_jbd(bh))
				wait_on_buffer(bh);

			if (!buffer_uptodate(bh)) {
				/* Status won't be cleared from here on out,
				 * so we can safely record this and loop back
				 * to cleanup the other buffers. Don't need to
				 * remove the clustered uptodate information
				 * for this bh as it's not marked locally
				 * uptodate. */
				status = -EIO;
				put_bh(bh);
				bhs[i] = NULL;
				continue;
			}

			if (buffer_needs_validate(bh)) {
				/* We never set NeedsValidate if the
				 * buffer was held by the journal, so
				 * that better not have changed */
				BUG_ON(buffer_jbd(bh));
				clear_buffer_needs_validate(bh);
				status = validate(sb, bh);
				if (status) {
					put_bh(bh);
					bhs[i] = NULL;
					continue;
				}
			}
		}

		/* Always set the buffer in the cache, even if it was
		 * a forced read, or read-ahead which hasn't yet
		 * completed. */
		ocfs2_set_buffer_uptodate(ci, bh);
	}
	ocfs2_metadata_cache_io_unlock(ci);

	trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
				    flags, ignore_cache);

bail:

	return status;
}
Beispiel #30
0
/* ARGSUSED */
bool_t
node_init( intgen_t fd,
           off64_t off,
           size_t usrnodesz,
           ix_t nodehkix,
           size_t nodealignsz,
           size64_t vmsz,
           size64_t dirs_nondirs_cnt )
{
    size64_t nodesz;
    size64_t winmap_mem;
    size64_t segsz;
    size64_t segtablesz;
    size64_t nodesperseg;
    size64_t minsegsz;
    size64_t winmapmax;
    intgen_t rval;

    /* sanity checks
     */
    ASSERT( sizeof( node_hdr_t ) <= NODE_HDRSZ );
    ASSERT( sizeof( nh_t ) < sizeof( off64_t ));
    ASSERT( nodehkix < usrnodesz );
    ASSERT( usrnodesz >= sizeof( char * ) + 1 );
    /* so node is at least big enough to hold
     * the free list linkage and the housekeeping byte
     */
    ASSERT( nodehkix > sizeof( char * ));
    /* since beginning of each node is used to
     * link it in the free list.
     */

    /* adjust the user's node size to meet user's alignment constraint
    */
    nodesz = ( usrnodesz + nodealignsz - 1 ) & ~( nodealignsz - 1 );

#define	WINMAP_MAX	20	/* maximum number of windows to use */
#define	WINMAP_MIN	4	/* minimum number of windows to use */
#define	HARDLINK_FUDGE	1.2	/* approx 1.2 hard links per file */

    /* Calculate the expected size of the segment table using the number
     * of dirs and non-dirs.  Since we don't know how many hard-links
     * there will be, scale the size upward using HARDLINK_FUDGE.
     */

    segtablesz = ( (size64_t)(HARDLINK_FUDGE * (double)dirs_nondirs_cnt) * nodesz);

    /* Figure out how much memory is available for use by winmaps, and
     * use that to pick an appropriate winmapmax, segsz, and nodesperseg,
     * the goal being that if at all possible we want the entire segment
     * table to be mapped so that we aren't constantly mapping and
     * unmapping winmaps.  There must be at least WINMAP_MIN winmaps
     * because references can be held on more than one winmap at the
     * same time.  More winmaps are generally better to reduce the
     * number of nodes that are unmapped if unmapping does occur.
     */

    minsegsz = pgsz * nodesz;	/* must be pgsz and nodesz multiple */
    winmap_mem = min(vmsz, segtablesz);
    segsz = (((winmap_mem / WINMAP_MAX) + minsegsz - 1) / minsegsz) * minsegsz;
    segsz = max(segsz, minsegsz);

    nodesperseg = segsz / nodesz;

    winmapmax = min(WINMAP_MAX, vmsz / segsz);
    winmapmax = max(winmapmax, WINMAP_MIN);

    /* map the abstraction header
     */
    ASSERT( ( NODE_HDRSZ & pgmask ) == 0 );
    ASSERT( ! ( NODE_HDRSZ % pgsz ));
    ASSERT( off <= OFF64MAX );
    ASSERT( ! ( off % ( off64_t )pgsz ));
    node_hdrp = ( node_hdr_t * )mmap_autogrow(
                    NODE_HDRSZ,
                    fd,
                    off );
    if ( node_hdrp == (node_hdr_t *)-1 ) {
        mlog( MLOG_NORMAL | MLOG_ERROR, _(
                  "unable to map node hdr of size %d: %s\n"),
              NODE_HDRSZ,
              strerror( errno ));
        return BOOL_FALSE;
    }

    /* initialize and save persistent context.
     */
    node_hdrp->nh_nodesz = nodesz;
    node_hdrp->nh_nodehkix = nodehkix;
    node_hdrp->nh_segsz = segsz;
    node_hdrp->nh_segtblsz = segtablesz;
    node_hdrp->nh_winmapmax = winmapmax;
    node_hdrp->nh_nodesperseg = nodesperseg;
    node_hdrp->nh_nodealignsz = nodealignsz;
    node_hdrp->nh_freenix = NIX_NULL;
    node_hdrp->nh_firstsegoff = off + ( off64_t )NODE_HDRSZ;
    node_hdrp->nh_virgsegreloff = 0;
    node_hdrp->nh_virgrelnix = 0;

    /* save transient context
     */
    node_fd = fd;

    /* autogrow the first segment
     */
    mlog( MLOG_DEBUG,
          "pre-growing new node array segment at %lld "
          "size %lld\n",
          node_hdrp->nh_firstsegoff,
          ( off64_t )node_hdrp->nh_segsz );
    rval = ftruncate64( node_fd,
                        node_hdrp->nh_firstsegoff
                        +
                        ( off64_t )node_hdrp->nh_segsz );
    if ( rval ) {
        mlog( MLOG_NORMAL | MLOG_ERROR | MLOG_TREE, _(
                  "unable to autogrow first node segment: %s (%d)\n"),
              strerror( errno ),
              errno );
        return BOOL_FALSE;
    }

    /* initialize the window abstraction
     */
    win_init( fd,
              node_hdrp->nh_firstsegoff,
              segsz,
              segtablesz,
              winmapmax );

    /* announce the results
     */
    mlog( MLOG_DEBUG | MLOG_TREE,
          "node_init:"
          " vmsz = %llu (0x%llx)"
          " segsz = %u (0x%x)"
          " segtblsz = %llu (0x%llx)"
          " nodesperseg = %u (0x%x)"
          " winmapmax = %llu (0x%llx)"
          "\n",
          vmsz, vmsz,
          segsz, segsz,
          segtablesz, segtablesz,
          nodesperseg, nodesperseg,
          winmapmax, winmapmax );

    return BOOL_TRUE;
}