Ejemplo n.º 1
0
static int qcom_smem_alloc_global(struct qcom_smem *smem,
				  unsigned item,
				  size_t size)
{
	struct smem_header *header;
	struct smem_global_entry *entry;

	if (WARN_ON(item >= SMEM_ITEM_COUNT))
		return -EINVAL;

	header = smem->regions[0].virt_base;
	entry = &header->toc[item];
	if (entry->allocated)
		return -EEXIST;

	size = ALIGN(size, 8);
	if (WARN_ON(size > le32_to_cpu(header->available)))
		return -ENOMEM;

	entry->offset = header->free_offset;
	entry->size = cpu_to_le32(size);

	/*
	 * Ensure the header is consistent before we mark the item allocated,
	 * so that remote processors will get a consistent view of the item
	 * even though they do not take the spinlock on read.
	 */
	wmb();
	entry->allocated = cpu_to_le32(1);

	le32_add_cpu(&header->free_offset, size);
	le32_add_cpu(&header->available, -size);

	return 0;
}
Ejemplo n.º 2
0
Archivo: ialloc.c Proyecto: 274914765/C
void udf_free_inode(struct inode *inode)
{
    struct super_block *sb = inode->i_sb;
    struct udf_sb_info *sbi = UDF_SB(sb);

    /*
     * Note: we must free any quota before locking the superblock,
     * as writing the quota to disk may need the lock as well.
     */
    DQUOT_FREE_INODE(inode);
    DQUOT_DROP(inode);

    clear_inode(inode);

    mutex_lock(&sbi->s_alloc_mutex);
    if (sbi->s_lvid_bh) {
        struct logicalVolIntegrityDescImpUse *lvidiu =
                            udf_sb_lvidiu(sbi);
        if (S_ISDIR(inode->i_mode))
            le32_add_cpu(&lvidiu->numDirs, -1);
        else
            le32_add_cpu(&lvidiu->numFiles, -1);

        mark_buffer_dirty(sbi->s_lvid_bh);
    }
    mutex_unlock(&sbi->s_alloc_mutex);

    udf_free_blocks(sb, NULL, UDF_I(inode)->i_location, 0, 1);
}
static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
				       handle_t *handle,
				       struct buffer_head *di_bh,
				       u32 num_bits,
				       u16 chain)
{
	int ret;
	u32 tmp_used;
	struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
	struct ocfs2_chain_list *cl =
				(struct ocfs2_chain_list *) &di->id2.i_chain;

	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
				      OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		mlog_errno(ret);
		goto out;
	}

	tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
	di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
	le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
	ocfs2_journal_dirty(handle, di_bh);

out:
	return ret;
}
Ejemplo n.º 4
0
int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
				 handle_t *handle,
				 struct ocfs2_alloc_context *ac,
				 u32 bits_wanted,
				 u32 *bit_off,
				 u32 *num_bits)
{
	int status, start;
	struct inode *local_alloc_inode;
	void *bitmap;
	struct ocfs2_dinode *alloc;
	struct ocfs2_local_alloc *la;

	mlog_entry_void();
	BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);

	local_alloc_inode = ac->ac_inode;
	alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
	la = OCFS2_LOCAL_ALLOC(alloc);

	start = ocfs2_local_alloc_find_clear_bits(osb, alloc, bits_wanted);
	if (start == -1) {
		/* TODO: Shouldn't we just BUG here? */
		status = -ENOSPC;
		mlog_errno(status);
		goto bail;
	}

	bitmap = la->la_bitmap;
	*bit_off = le32_to_cpu(la->la_bm_off) + start;
	/* local alloc is always contiguous by nature -- we never
	 * delete bits from it! */
	*num_bits = bits_wanted;

	status = ocfs2_journal_access_di(handle,
					 INODE_CACHE(local_alloc_inode),
					 osb->local_alloc_bh,
					 OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	while(bits_wanted--)
		ocfs2_set_bit(start++, bitmap);

	le32_add_cpu(&alloc->id1.bitmap1.i_used, *num_bits);

	status = ocfs2_journal_dirty(handle, osb->local_alloc_bh);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	status = 0;
bail:
	mlog_exit(status);
	return status;
}
Ejemplo n.º 5
0
void udf_free_inode(struct inode *inode)
{
	struct super_block *sb = inode->i_sb;
	struct udf_sb_info *sbi = UDF_SB(sb);
	struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);

	if (lvidiu) {
		mutex_lock(&sbi->s_alloc_mutex);
		if (S_ISDIR(inode->i_mode))
			le32_add_cpu(&lvidiu->numDirs, -1);
		else
			le32_add_cpu(&lvidiu->numFiles, -1);
		udf_updated_lvid(sb);
		mutex_unlock(&sbi->s_alloc_mutex);
	}

	udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
}
Ejemplo n.º 6
0
/**
 * nilfs_palloc_group_desc_add_entries - adjust count of free entries
 * @inode: inode of metadata file using this allocator
 * @group: group number
 * @desc: pointer to descriptor structure for the group
 * @n: delta to be added
 */
static void
nilfs_palloc_group_desc_add_entries(struct inode *inode,
				    unsigned long group,
				    struct nilfs_palloc_group_desc *desc,
				    u32 n)
{
	spin_lock(nilfs_mdt_bgl_lock(inode, group));
	le32_add_cpu(&desc->pg_nfrees, n);
	spin_unlock(nilfs_mdt_bgl_lock(inode, group));
}
Ejemplo n.º 7
0
int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
				 handle_t *handle,
				 struct ocfs2_alloc_context *ac,
				 u32 bits_wanted,
				 u32 *bit_off,
				 u32 *num_bits)
{
	int status, start;
	struct inode *local_alloc_inode;
	void *bitmap;
	struct ocfs2_dinode *alloc;
	struct ocfs2_local_alloc *la;

	BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);

	local_alloc_inode = ac->ac_inode;
	alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
	la = OCFS2_LOCAL_ALLOC(alloc);

	start = ocfs2_local_alloc_find_clear_bits(osb, alloc, &bits_wanted,
						  ac->ac_resv);
	if (start == -1) {
		/* TODO: Shouldn't we just BUG here? */
		status = -ENOSPC;
		mlog_errno(status);
		goto bail;
	}

	bitmap = la->la_bitmap;
	*bit_off = le32_to_cpu(la->la_bm_off) + start;
	*num_bits = bits_wanted;

	status = ocfs2_journal_access_di(handle,
					 INODE_CACHE(local_alloc_inode),
					 osb->local_alloc_bh,
					 OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	ocfs2_resmap_claimed_bits(&osb->osb_la_resmap, ac->ac_resv, start,
				  bits_wanted);

	while(bits_wanted--)
		ocfs2_set_bit(start++, bitmap);

	le32_add_cpu(&alloc->id1.bitmap1.i_used, *num_bits);
	ocfs2_journal_dirty(handle, osb->local_alloc_bh);

bail:
	if (status)
		mlog_errno(status);
	return status;
}
Ejemplo n.º 8
0
static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
{
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;
	struct pblk_line_meta *lm = &pblk->lm;
	struct pblk_emeta *emeta = line->emeta;
	struct line_emeta *emeta_buf = emeta->buf;
	__le64 *lba_list;
	int data_start;
	int nr_data_lbas, nr_valid_lbas, nr_lbas = 0;
	int i;

	lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
	if (!lba_list)
		return 1;

	data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
	nr_data_lbas = lm->sec_per_line - lm->emeta_sec[0];
	nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);

	for (i = data_start; i < nr_data_lbas && nr_lbas < nr_valid_lbas; i++) {
		struct ppa_addr ppa;
		int pos;

		ppa = addr_to_pblk_ppa(pblk, i, line->id);
		pos = pblk_ppa_to_pos(geo, ppa);

		/* Do not update bad blocks */
		if (test_bit(pos, line->blk_bitmap))
			continue;

		if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
			spin_lock(&line->lock);
			if (test_and_set_bit(i, line->invalid_bitmap))
				WARN_ONCE(1, "pblk: rec. double invalidate:\n");
			else
				le32_add_cpu(line->vsc, -1);
			spin_unlock(&line->lock);

			continue;
		}

		pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
		nr_lbas++;
	}

	if (nr_valid_lbas != nr_lbas)
		pr_err("pblk: line %d - inconsistent lba list(%llu/%d)\n",
				line->id, emeta_buf->nr_valid_lbas, nr_lbas);

	line->left_msecs = 0;

	return 0;
}
Ejemplo n.º 9
0
static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
{
	struct udf_sb_info *sbi = UDF_SB(sb);
	struct logicalVolIntegrityDesc *lvid;

	if (!sbi->s_lvid_bh)
		return;

	lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
	le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
	udf_updated_lvid(sb);
}
Ejemplo n.º 10
0
static bool udf_add_free_space(struct udf_sb_info *sbi,
				u16 partition, u32 cnt)
{
	struct logicalVolIntegrityDesc *lvid;

	if (sbi->s_lvid_bh == NULL)
		return false;

	lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
	le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
	return true;
}
Ejemplo n.º 11
0
/**
 * nilfs_palloc_group_desc_add_entries - adjust count of free entries
 * @desc: pointer to descriptor structure for the group
 * @lock: spin lock protecting @desc
 * @n: delta to be added
 */
static u32
nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc,
				    spinlock_t *lock, u32 n)
{
	u32 nfree;

	spin_lock(lock);
	le32_add_cpu(&desc->pg_nfrees, n);
	nfree = le32_to_cpu(desc->pg_nfrees);
	spin_unlock(lock);
	return nfree;
}
Ejemplo n.º 12
0
static int qcom_smem_alloc_private(struct qcom_smem *smem,
				   unsigned host,
				   unsigned item,
				   size_t size)
{
	struct smem_partition_header *phdr;
	struct smem_private_entry *hdr, *end;
	size_t alloc_size;
	void *cached;

	phdr = smem->partitions[host];
	hdr = phdr_to_first_private_entry(phdr);
	end = phdr_to_last_private_entry(phdr);
	cached = phdr_to_first_cached_entry(phdr);

	while (hdr < end) {
		if (hdr->canary != SMEM_PRIVATE_CANARY) {
			dev_err(smem->dev,
				"Found invalid canary in host %d partition\n",
				host);
			return -EINVAL;
		}

		if (le16_to_cpu(hdr->item) == item)
			return -EEXIST;

		hdr = private_entry_next(hdr);
	}

	/* Check that we don't grow into the cached region */
	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
	if ((void *)hdr + alloc_size >= cached) {
		dev_err(smem->dev, "Out of memory\n");
		return -ENOSPC;
	}

	hdr->canary = SMEM_PRIVATE_CANARY;
	hdr->item = cpu_to_le16(item);
	hdr->size = cpu_to_le32(ALIGN(size, 8));
	hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
	hdr->padding_hdr = 0;

	/*
	 * Ensure the header is written before we advance the free offset, so
	 * that remote processors that does not take the remote spinlock still
	 * gets a consistent view of the linked list.
	 */
	wmb();
	le32_add_cpu(&phdr->offset_free_uncached, alloc_size);

	return 0;
}
Ejemplo n.º 13
0
static void ocfs2_update_super_and_backups(struct inode *inode,
					   int new_clusters)
{
	int ret;
	u32 clusters = 0;
	struct buffer_head *super_bh = NULL;
	struct ocfs2_dinode *super_di = NULL;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	/*
	 * update the superblock last.
	 * It doesn't matter if the write failed.
	 */
	ret = ocfs2_read_blocks_sync(osb, OCFS2_SUPER_BLOCK_BLKNO, 1,
				     &super_bh);
	if (ret < 0) {
		mlog_errno(ret);
		goto out;
	}

	super_di = (struct ocfs2_dinode *)super_bh->b_data;
	le32_add_cpu(&super_di->i_clusters, new_clusters);
	clusters = le32_to_cpu(super_di->i_clusters);

	ret = ocfs2_write_super_or_backup(osb, super_bh);
	if (ret < 0) {
		mlog_errno(ret);
		goto out;
	}

	if (OCFS2_HAS_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_COMPAT_BACKUP_SB))
		ret = update_backups(inode, clusters, super_bh->b_data);

out:
	brelse(super_bh);
	if (ret)
		printk(KERN_WARNING "ocfs2: Failed to update super blocks on %s"
			" during fs resize. This condition is not fatal,"
			" but fsck.ocfs2 should be run to fix it\n",
			osb->dev_str);
	return;
}
Ejemplo n.º 14
0
int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
				handle_t *handle,
				struct ocfs2_alloc_context *ac,
				u32 bit_off,
				u32 num_bits)
{
	int status, start;
	u32 clear_bits;
	struct inode *local_alloc_inode;
	void *bitmap;
	struct ocfs2_dinode *alloc;
	struct ocfs2_local_alloc *la;

	BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);

	local_alloc_inode = ac->ac_inode;
	alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
	la = OCFS2_LOCAL_ALLOC(alloc);

	bitmap = la->la_bitmap;
	start = bit_off - le32_to_cpu(la->la_bm_off);
	clear_bits = num_bits;

	status = ocfs2_journal_access_di(handle,
			INODE_CACHE(local_alloc_inode),
			osb->local_alloc_bh,
			OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	while (clear_bits--)
		ocfs2_clear_bit(start++, bitmap);

	le32_add_cpu(&alloc->id1.bitmap1.i_used, -num_bits);
	ocfs2_journal_dirty(handle, osb->local_alloc_bh);

bail:
	return status;
}
Ejemplo n.º 15
0
/*
 * Append this record to the tail of the extent map.  It must be
 * tree_depth 0.  The record might be an extension of an existing
 * record, and as such that needs to be handled.  eg:
 *
 * Existing record in the extent map:
 *
 *	cpos = 10, len = 10
 *	|---------|
 *
 * New Record:
 *
 *	cpos = 10, len = 20
 *	|------------------|
 *
 * The passed record is the new on-disk record.  The new_clusters value
 * is how many clusters were added to the file.  If the append is a
 * contiguous append, the new_clusters has been added to
 * rec->e_clusters.  If the append is an entirely new extent, then
 * rec->e_clusters is == new_clusters.
 */
int ocfs2_extent_map_append(struct inode *inode,
			    struct ocfs2_extent_rec *rec,
			    u32 new_clusters)
{
	int ret;
	struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
	struct ocfs2_extent_map_entry *ent;
	struct ocfs2_extent_rec *old;

	BUG_ON(!new_clusters);
	BUG_ON(le32_to_cpu(rec->e_clusters) < new_clusters);

	if (em->em_clusters < OCFS2_I(inode)->ip_clusters) {
		/*
		 * Size changed underneath us on disk.  Drop any
		 * straddling records and update our idea of
		 * i_clusters
		 */
		ocfs2_extent_map_drop(inode, em->em_clusters - 1);
		em->em_clusters = OCFS2_I(inode)->ip_clusters;
	}

	mlog_bug_on_msg((le32_to_cpu(rec->e_cpos) +
			 le32_to_cpu(rec->e_clusters)) !=
			(em->em_clusters + new_clusters),
			"Inode %llu:\n"
			"rec->e_cpos = %u + rec->e_clusters = %u = %u\n"
			"em->em_clusters = %u + new_clusters = %u = %u\n",
			(unsigned long long)OCFS2_I(inode)->ip_blkno,
			le32_to_cpu(rec->e_cpos), le32_to_cpu(rec->e_clusters),
			le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters),
			em->em_clusters, new_clusters,
			em->em_clusters + new_clusters);

	em->em_clusters += new_clusters;

	ret = -ENOENT;
	if (le32_to_cpu(rec->e_clusters) > new_clusters) {
		/* This is a contiguous append */
		ent = ocfs2_extent_map_lookup(em, le32_to_cpu(rec->e_cpos), 1,
					      NULL, NULL);
		if (ent) {
			old = &ent->e_rec;
			BUG_ON((le32_to_cpu(rec->e_cpos) +
				le32_to_cpu(rec->e_clusters)) !=
				 (le32_to_cpu(old->e_cpos) +
				  le32_to_cpu(old->e_clusters) +
				  new_clusters));
			if (ent->e_tree_depth == 0) {
				BUG_ON(le32_to_cpu(old->e_cpos) !=
				       le32_to_cpu(rec->e_cpos));
				BUG_ON(le64_to_cpu(old->e_blkno) !=
				       le64_to_cpu(rec->e_blkno));
				ret = 0;
			}
			/*
			 * Let non-leafs fall through as -ENOENT to
			 * force insertion of the new leaf.
			 */
			le32_add_cpu(&old->e_clusters, new_clusters);
		}
	}

	if (ret == -ENOENT)
		ret = ocfs2_extent_map_insert(inode, rec, 0);
	if (ret < 0)
		mlog_errno(ret);
	return ret;
}
Ejemplo n.º 16
0
struct inode *udf_new_inode(struct inode *dir, umode_t mode)
{
	struct super_block *sb = dir->i_sb;
	struct udf_sb_info *sbi = UDF_SB(sb);
	struct inode *inode;
	udf_pblk_t block;
	uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
	struct udf_inode_info *iinfo;
	struct udf_inode_info *dinfo = UDF_I(dir);
	struct logicalVolIntegrityDescImpUse *lvidiu;
	int err;

	inode = new_inode(sb);

	if (!inode)
		return ERR_PTR(-ENOMEM);

	iinfo = UDF_I(inode);
	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
		iinfo->i_efe = 1;
		if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev)
			sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE;
		iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize -
					    sizeof(struct extendedFileEntry),
					    GFP_KERNEL);
	} else {
		iinfo->i_efe = 0;
		iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize -
					    sizeof(struct fileEntry),
					    GFP_KERNEL);
	}
	if (!iinfo->i_ext.i_data) {
		iput(inode);
		return ERR_PTR(-ENOMEM);
	}

	err = -ENOSPC;
	block = udf_new_block(dir->i_sb, NULL,
			      dinfo->i_location.partitionReferenceNum,
			      start, &err);
	if (err) {
		iput(inode);
		return ERR_PTR(err);
	}

	lvidiu = udf_sb_lvidiu(sb);
	if (lvidiu) {
		iinfo->i_unique = lvid_get_unique_id(sb);
		inode->i_generation = iinfo->i_unique;
		mutex_lock(&sbi->s_alloc_mutex);
		if (S_ISDIR(mode))
			le32_add_cpu(&lvidiu->numDirs, 1);
		else
			le32_add_cpu(&lvidiu->numFiles, 1);
		udf_updated_lvid(sb);
		mutex_unlock(&sbi->s_alloc_mutex);
	}

	inode_init_owner(inode, dir, mode);
	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
		inode->i_uid = sbi->s_uid;
	if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
		inode->i_gid = sbi->s_gid;

	iinfo->i_location.logicalBlockNum = block;
	iinfo->i_location.partitionReferenceNum =
				dinfo->i_location.partitionReferenceNum;
	inode->i_ino = udf_get_lb_pblock(sb, &iinfo->i_location, 0);
	inode->i_blocks = 0;
	iinfo->i_lenEAttr = 0;
	iinfo->i_lenAlloc = 0;
	iinfo->i_use = 0;
	iinfo->i_checkpoint = 1;
	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
		iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
	else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
		iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
	else
		iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
	inode->i_mtime = inode->i_atime = inode->i_ctime =
		iinfo->i_crtime = current_time(inode);
	if (unlikely(insert_inode_locked(inode) < 0)) {
		make_bad_inode(inode);
		iput(inode);
		return ERR_PTR(-EIO);
	}
	mark_inode_dirty(inode);

	return inode;
}
Ejemplo n.º 17
0
/*
 * We expect the block group allocator to already be locked.
 */
static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
				   struct inode *alloc_inode,
				   struct buffer_head *bh)
{
	int status, credits;
	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
	struct ocfs2_chain_list *cl;
	struct ocfs2_alloc_context *ac = NULL;
	handle_t *handle = NULL;
	u32 bit_off, num_bits;
	u16 alloc_rec;
	u64 bg_blkno;
	struct buffer_head *bg_bh = NULL;
	struct ocfs2_group_desc *bg;

	BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));

	mlog_entry_void();

	cl = &fe->id2.i_chain;
	status = ocfs2_reserve_clusters(osb,
					le16_to_cpu(cl->cl_cpg),
					&ac);
	if (status < 0) {
		if (status != -ENOSPC)
			mlog_errno(status);
		goto bail;
	}

	credits = ocfs2_calc_group_alloc_credits(osb->sb,
						 le16_to_cpu(cl->cl_cpg));
	handle = ocfs2_start_trans(osb, credits);
	if (IS_ERR(handle)) {
		status = PTR_ERR(handle);
		handle = NULL;
		mlog_errno(status);
		goto bail;
	}

	status = ocfs2_claim_clusters(osb,
				      handle,
				      ac,
				      le16_to_cpu(cl->cl_cpg),
				      &bit_off,
				      &num_bits);
	if (status < 0) {
		if (status != -ENOSPC)
			mlog_errno(status);
		goto bail;
	}

	alloc_rec = ocfs2_find_smallest_chain(cl);

	/* setup the group */
	bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
	mlog(0, "new descriptor, record %u, at block %llu\n",
	     alloc_rec, (unsigned long long)bg_blkno);

	bg_bh = sb_getblk(osb->sb, bg_blkno);
	if (!bg_bh) {
		status = -EIO;
		mlog_errno(status);
		goto bail;
	}
	ocfs2_set_new_buffer_uptodate(alloc_inode, bg_bh);

	status = ocfs2_block_group_fill(handle,
					alloc_inode,
					bg_bh,
					bg_blkno,
					alloc_rec,
					cl);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	bg = (struct ocfs2_group_desc *) bg_bh->b_data;

	status = ocfs2_journal_access(handle, alloc_inode,
				      bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
		     le16_to_cpu(bg->bg_free_bits_count));
	le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, le16_to_cpu(bg->bg_bits));
	cl->cl_recs[alloc_rec].c_blkno  = cpu_to_le64(bg_blkno);
	if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
		le16_add_cpu(&cl->cl_next_free_rec, 1);

	le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
					le16_to_cpu(bg->bg_free_bits_count));
	le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
	le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));

	status = ocfs2_journal_dirty(handle, bh);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
	OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
	fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
					     le32_to_cpu(fe->i_clusters)));
	spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
	i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
	alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode);

	status = 0;
bail:
	if (handle)
		ocfs2_commit_trans(osb, handle);

	if (ac)
		ocfs2_free_alloc_context(ac);

	if (bg_bh)
		brelse(bg_bh);

	mlog_exit(status);
	return status;
}
Ejemplo n.º 18
0
/*
 * We expect the block group allocator to already be locked.
 */
static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
				   struct inode *alloc_inode,
				   struct buffer_head *bh,
				   u64 max_block,
				   u64 *last_alloc_group,
				   int flags)
{
	int status, credits;
	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
	struct ocfs2_chain_list *cl;
	struct ocfs2_alloc_context *ac = NULL;
	handle_t *handle = NULL;
	u16 alloc_rec;
	struct buffer_head *bg_bh = NULL;
	struct ocfs2_group_desc *bg;

	BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));

	cl = &fe->id2.i_chain;
	status = ocfs2_reserve_clusters_with_limit(osb,
						   le16_to_cpu(cl->cl_cpg),
						   max_block, flags, &ac);
	if (status < 0) {
		if (status != -ENOSPC)
			mlog_errno(status);
		goto bail;
	}

	credits = ocfs2_calc_group_alloc_credits(osb->sb,
						 le16_to_cpu(cl->cl_cpg));
	handle = ocfs2_start_trans(osb, credits);
	if (IS_ERR(handle)) {
		status = PTR_ERR(handle);
		handle = NULL;
		mlog_errno(status);
		goto bail;
	}

	if (last_alloc_group && *last_alloc_group != 0) {
		trace_ocfs2_block_group_alloc(
				(unsigned long long)*last_alloc_group);
		ac->ac_last_group = *last_alloc_group;
	}

	bg_bh = ocfs2_block_group_alloc_contig(osb, handle, alloc_inode,
					       ac, cl);
	if (IS_ERR(bg_bh) && (PTR_ERR(bg_bh) == -ENOSPC))
		bg_bh = ocfs2_block_group_alloc_discontig(handle,
							  alloc_inode,
							  ac, cl);
	if (IS_ERR(bg_bh)) {
		status = PTR_ERR(bg_bh);
		bg_bh = NULL;
		if (status != -ENOSPC)
			mlog_errno(status);
		goto bail;
	}
	bg = (struct ocfs2_group_desc *) bg_bh->b_data;

	status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
					 bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	alloc_rec = le16_to_cpu(bg->bg_chain);
	le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
		     le16_to_cpu(bg->bg_free_bits_count));
	le32_add_cpu(&cl->cl_recs[alloc_rec].c_total,
		     le16_to_cpu(bg->bg_bits));
	cl->cl_recs[alloc_rec].c_blkno = bg->bg_blkno;
	if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
		le16_add_cpu(&cl->cl_next_free_rec, 1);

	le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
					le16_to_cpu(bg->bg_free_bits_count));
	le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
	le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));

	ocfs2_journal_dirty(handle, bh);

	spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
	OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
	fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
					     le32_to_cpu(fe->i_clusters)));
	spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
	i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
	alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode);
	ocfs2_update_inode_fsync_trans(handle, alloc_inode, 0);

	status = 0;

	/* save the new last alloc group so that the caller can cache it. */
	if (last_alloc_group)
		*last_alloc_group = ac->ac_last_group;

bail:
	if (handle)
		ocfs2_commit_trans(osb, handle);

	if (ac)
		ocfs2_free_alloc_context(ac);

	brelse(bg_bh);

	if (status)
		mlog_errno(status);
	return status;
}
Ejemplo n.º 19
0
int
islpci_eth_receive(islpci_private *priv)
{
	struct net_device *ndev = priv->ndev;
	isl38xx_control_block *control_block = priv->control_block;
	struct sk_buff *skb;
	u16 size;
	u32 index, offset;
	unsigned char *src;
	int discard = 0;

#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive\n");
#endif

	/* the device has written an Ethernet frame in the data area
	 * of the sk_buff without updating the structure, do it now */
	index = priv->free_data_rx % ISL38XX_CB_RX_QSIZE;
	size = le16_to_cpu(control_block->rx_data_low[index].size);
	skb = priv->data_low_rx[index];
	offset = ((unsigned long)
		  le32_to_cpu(control_block->rx_data_low[index].address) -
		  (unsigned long) skb->data) & 3;

#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_TRACING,
	      "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n ",
	      control_block->rx_data_low[priv->free_data_rx].address, skb->data,
	      skb->len, offset, skb->truesize);
#endif

	/* delete the streaming DMA mapping before processing the skb */
	pci_unmap_single(priv->pdev,
			 priv->pci_map_rx_address[index],
			 MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE);

	/* update the skb structure and align the buffer */
	skb_put(skb, size);
	if (offset) {
		/* shift the buffer allocation offset bytes to get the right frame */
		skb_pull(skb, 2);
		skb_put(skb, 2);
	}
#if VERBOSE > SHOW_ERROR_MESSAGES
	/* display the buffer contents for debugging */
	DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
	display_buffer((char *) skb->data, skb->len);
#endif

	/* check whether WDS is enabled and whether the data frame is a WDS frame */

	if (init_wds) {
		/* WDS enabled, check for the wds address on the first 6 bytes of the buffer */
		src = skb->data + 6;
		memmove(skb->data, src, skb->len - 6);
		skb_trim(skb, skb->len - 6);
	}
#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb);
	DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len);

	/* display the buffer contents for debugging */
	DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
	display_buffer((char *) skb->data, skb->len);
#endif
	/* take care of monitor mode and spy monitoring. */
	if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) {
		skb->dev = ndev;
		discard = islpci_monitor_rx(priv, &skb);
	} else {
		if (unlikely(skb->data[2 * ETH_ALEN] == 0)) {
			/* The packet has a rx_annex. Read it for spy monitoring, Then
			 * remove it, while keeping the 2 leading MAC addr.
			 */
			struct iw_quality wstats;
			struct rx_annex_header *annex =
			    (struct rx_annex_header *) skb->data;
			wstats.level = annex->rfmon.rssi;
			/* The noise value can be a bit outdated if nobody's
			 * reading wireless stats... */
			wstats.noise = priv->local_iwstatistics.qual.noise;
			wstats.qual = wstats.level - wstats.noise;
			wstats.updated = 0x07;
			/* Update spy records */
			wireless_spy_update(ndev, annex->addr2, &wstats);

			skb_copy_from_linear_data(skb,
						  (skb->data +
						   sizeof(struct rfmon_header)),
						  2 * ETH_ALEN);
			skb_pull(skb, sizeof (struct rfmon_header));
		}
		skb->protocol = eth_type_trans(skb, ndev);
	}
	skb->ip_summed = CHECKSUM_NONE;
	ndev->stats.rx_packets++;
	ndev->stats.rx_bytes += size;

	/* deliver the skb to the network layer */
#ifdef ISLPCI_ETH_DEBUG
	printk
	    ("islpci_eth_receive:netif_rx %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
	     skb->data[0], skb->data[1], skb->data[2], skb->data[3],
	     skb->data[4], skb->data[5]);
#endif
	if (unlikely(discard)) {
		dev_kfree_skb_irq(skb);
		skb = NULL;
	} else
		netif_rx(skb);

	/* increment the read index for the rx data low queue */
	priv->free_data_rx++;

	/* add one or more sk_buff structures */
	while (index =
	       le32_to_cpu(control_block->
			   driver_curr_frag[ISL38XX_CB_RX_DATA_LQ]),
	       index - priv->free_data_rx < ISL38XX_CB_RX_QSIZE) {
		/* allocate an sk_buff for received data frames storage
		 * include any required allignment operations */
		skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
		if (unlikely(skb == NULL)) {
			/* error allocating an sk_buff structure elements */
			DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n");
			break;
		}
		skb_reserve(skb, (4 - (long) skb->data) & 0x03);
		/* store the new skb structure pointer */
		index = index % ISL38XX_CB_RX_QSIZE;
		priv->data_low_rx[index] = skb;

#if VERBOSE > SHOW_ERROR_MESSAGES
		DEBUG(SHOW_TRACING,
		      "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n ",
		      skb, skb->data, skb->len, index, skb->truesize);
#endif

		/* set the streaming DMA mapping for proper PCI bus operation */
		priv->pci_map_rx_address[index] =
		    pci_map_single(priv->pdev, (void *) skb->data,
				   MAX_FRAGMENT_SIZE_RX + 2,
				   PCI_DMA_FROMDEVICE);
		if (unlikely(!priv->pci_map_rx_address[index])) {
			/* error mapping the buffer to device accessible memory address */
			DEBUG(SHOW_ERROR_MESSAGES,
			      "Error mapping DMA address\n");

			/* free the skbuf structure before aborting */
			dev_kfree_skb_irq(skb);
			skb = NULL;
			break;
		}
		/* update the fragment address */
		control_block->rx_data_low[index].address =
			cpu_to_le32((u32)priv->pci_map_rx_address[index]);
		wmb();

		/* increment the driver read pointer */
		le32_add_cpu(&control_block->
			     driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1);
	}

	/* trigger the device */
	islpci_trigger(priv);

	return 0;
}
Ejemplo n.º 20
0
/**
 * Set attributes of object during write bulk IO processing.
 *
 * Change object attributes and write parent FID into extended
 * attributes when needed.
 *
 * \param[in] env	execution environment
 * \param[in] ofd	OFD device
 * \param[in] ofd_obj	OFD object
 * \param[in] la	object attributes
 * \param[in] oa	obdo
 *
 * \retval		0 on successful attributes update
 * \retval		negative value on error
 */
static int
ofd_write_attr_set(const struct lu_env *env, struct ofd_device *ofd,
		   struct ofd_object *ofd_obj, struct lu_attr *la,
		   struct obdo *oa)
{
	struct ofd_thread_info	*info = ofd_info(env);
	struct filter_fid	*ff = &info->fti_mds_fid;
	__u64			 valid = la->la_valid;
	struct thandle		*th;
	struct dt_object	*dt_obj;
	int			 fl = 0;
	int			 rc;

	ENTRY;

	LASSERT(la);

	dt_obj = ofd_object_child(ofd_obj);
	LASSERT(dt_obj != NULL);

	la->la_valid &= LA_UID | LA_GID | LA_PROJID;

	rc = ofd_attr_handle_id(env, ofd_obj, la, 0 /* !is_setattr */);
	if (rc != 0)
		GOTO(out, rc);

	fl = ofd_object_ff_update(env, ofd_obj, oa, ff);
	if (fl < 0)
		GOTO(out, rc = fl);

	if (!la->la_valid && !fl)
		/* no attributes to set */
		GOTO(out, rc = 0);

	th = ofd_trans_create(env, ofd);
	if (IS_ERR(th))
		GOTO(out, rc = PTR_ERR(th));

	if (la->la_valid) {
		rc = dt_declare_attr_set(env, dt_obj, la, th);
		if (rc)
			GOTO(out_tx, rc);
	}

	if (fl) {
		if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_UNMATCHED_PAIR1))
			ff->ff_parent.f_oid = cpu_to_le32(1UL << 31);
		else if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_UNMATCHED_PAIR2))
			le32_add_cpu(&ff->ff_parent.f_oid, -1);

		rc = dt_declare_xattr_set(env, dt_obj, &info->fti_buf,
					  XATTR_NAME_FID, 0, th);
		if (rc)
			GOTO(out_tx, rc);
	}

	/* We don't need a transno for this operation which will be re-executed
	 * anyway when the OST_WRITE (with a transno assigned) is replayed */
	rc = dt_trans_start_local(env, ofd->ofd_osd , th);
	if (rc)
		GOTO(out_tx, rc);

	/* set uid/gid/projid */
	if (la->la_valid) {
		rc = dt_attr_set(env, dt_obj, la, th);
		if (rc)
			GOTO(out_tx, rc);
	}

	/* set filter fid EA.
	 * FIXME: it holds read lock of ofd object to modify the XATTR_NAME_FID
	 * while the write lock should be held. However, it should work because
	 * write RPCs only modify ff_{parent,layout} and those information will
	 * be the same from all the write RPCs. The reason that fl is not used
	 * in dt_xattr_set() is to allow this race. */
	if (fl) {
		if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NOPFID))
			GOTO(out_tx, rc);

		info->fti_buf.lb_buf = ff;
		info->fti_buf.lb_len = sizeof(*ff);
		rc = dt_xattr_set(env, dt_obj, &info->fti_buf, XATTR_NAME_FID,
				  0, th);
		if (rc == 0)
			filter_fid_le_to_cpu(&ofd_obj->ofo_ff, ff, sizeof(*ff));
	}

	GOTO(out_tx, rc);

out_tx:
	dt_trans_stop(env, ofd->ofd_osd, th);
out:
	la->la_valid = valid;
	return rc;
}
Ejemplo n.º 21
0
/* makes object identifier unused */
void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
			       __u32 objectid_to_release)
{
	struct super_block *s = th->t_super;
	struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
	__le32 *map = objectid_map(s, rs);
	int i = 0;

	BUG_ON(!th->t_trans_id);
	//return;
	check_objectid_map(s, map);

	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
	journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));

	/* start at the beginning of the objectid map (i = 0) and go to
	   the end of it (i = disk_sb->s_oid_cursize).  Linear search is
	   what we use, though it is possible that binary search would be
	   more efficient after performing lots of deletions (which is
	   when oids is large.)  We only check even i's. */
	while (i < sb_oid_cursize(rs)) {
		if (objectid_to_release == le32_to_cpu(map[i])) {
			/* This incrementation unallocates the objectid. */
			//map[i]++;
			le32_add_cpu(&map[i], 1);

			/* Did we unallocate the last member of an odd sequence, and can shrink oids? */
			if (map[i] == map[i + 1]) {
				/* shrink objectid map */
				memmove(map + i, map + i + 2,
					(sb_oid_cursize(rs) - i -
					 2) * sizeof(__u32));
				//disk_sb->s_oid_cursize -= 2;
				set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);

				RFALSE(sb_oid_cursize(rs) < 2 ||
				       sb_oid_cursize(rs) > sb_oid_maxsize(rs),
				       "vs-15005: objectid map corrupted cur_size == %d (max == %d)",
				       sb_oid_cursize(rs), sb_oid_maxsize(rs));
			}
			return;
		}

		if (objectid_to_release > le32_to_cpu(map[i]) &&
		    objectid_to_release < le32_to_cpu(map[i + 1])) {
			/* size of objectid map is not changed */
			if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
				//objectid_map[i+1]--;
				le32_add_cpu(&map[i + 1], -1);
				return;
			}

			/* JDM comparing two little-endian values for equality -- safe */
			if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
				/* objectid map must be expanded, but there is no space */
				PROC_INFO_INC(s, leaked_oid);
				return;
			}

			/* expand the objectid map */
			memmove(map + i + 3, map + i + 1,
				(sb_oid_cursize(rs) - i - 1) * sizeof(__u32));
			map[i + 1] = cpu_to_le32(objectid_to_release);
			map[i + 2] = cpu_to_le32(objectid_to_release + 1);
			set_sb_oid_cursize(rs, sb_oid_cursize(rs) + 2);
			return;
		}
		i += 2;
	}

	reiserfs_error(s, "vs-15011", "tried to free free object id (%lu)",
		       (long unsigned)objectid_to_release);
}
Ejemplo n.º 22
0
/*
 * expects the suballoc inode to already be locked.
 */
static int ocfs2_free_suballoc_bits(handle_t *handle,
				    struct inode *alloc_inode,
				    struct buffer_head *alloc_bh,
				    unsigned int start_bit,
				    u64 bg_blkno,
				    unsigned int count)
{
	int status = 0;
	u32 tmp_used;
	struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) alloc_bh->b_data;
	struct ocfs2_chain_list *cl = &fe->id2.i_chain;
	struct buffer_head *group_bh = NULL;
	struct ocfs2_group_desc *group;

	mlog_entry_void();

	if (!OCFS2_IS_VALID_DINODE(fe)) {
		OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
		status = -EIO;
		goto bail;
	}
	BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));

	mlog(0, "%llu: freeing %u bits from group %llu, starting at %u\n",
	     (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, count,
	     (unsigned long long)bg_blkno, start_bit);

	status = ocfs2_read_block(osb, bg_blkno, &group_bh, OCFS2_BH_CACHED,
				  alloc_inode);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	group = (struct ocfs2_group_desc *) group_bh->b_data;
	status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, group);
	if (status) {
		mlog_errno(status);
		goto bail;
	}
	BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));

	status = ocfs2_block_group_clear_bits(handle, alloc_inode,
					      group, group_bh,
					      start_bit, count);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	status = ocfs2_journal_access(handle, alloc_inode, alloc_bh,
				      OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	le32_add_cpu(&cl->cl_recs[le16_to_cpu(group->bg_chain)].c_free,
		     count);
	tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
	fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count);

	status = ocfs2_journal_dirty(handle, alloc_bh);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

bail:
	if (group_bh)
		brelse(group_bh);

	mlog_exit(status);
	return status;
}
Ejemplo n.º 23
0
/* Add a new group descriptor to global_bitmap. */
int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
{
	int ret;
	handle_t *handle;
	struct buffer_head *main_bm_bh = NULL;
	struct inode *main_bm_inode = NULL;
	struct ocfs2_dinode *fe = NULL;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
	struct buffer_head *group_bh = NULL;
	struct ocfs2_group_desc *group = NULL;
	struct ocfs2_chain_list *cl;
	struct ocfs2_chain_rec *cr;
	u16 cl_bpc;

	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
		return -EROFS;

	main_bm_inode = ocfs2_get_system_file_inode(osb,
						    GLOBAL_BITMAP_SYSTEM_INODE,
						    OCFS2_INVALID_SLOT);
	if (!main_bm_inode) {
		ret = -EINVAL;
		mlog_errno(ret);
		goto out;
	}

	mutex_lock(&main_bm_inode->i_mutex);

	ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_mutex;
	}

	fe = (struct ocfs2_dinode *)main_bm_bh->b_data;

	if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
		ocfs2_group_bitmap_size(osb->sb, 0,
					osb->s_feature_incompat) * 8) {
		mlog(ML_ERROR, "The disk is too old and small."
		     " Force to do offline resize.");
		ret = -EINVAL;
		goto out_unlock;
	}

	ret = ocfs2_read_blocks_sync(osb, input->group, 1, &group_bh);
	if (ret < 0) {
		mlog(ML_ERROR, "Can't read the group descriptor # %llu "
		     "from the device.", (unsigned long long)input->group);
		goto out_unlock;
	}

	ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), group_bh);

	ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh);
	if (ret) {
		mlog_errno(ret);
		goto out_unlock;
	}

	trace_ocfs2_group_add((unsigned long long)input->group,
			       input->chain, input->clusters, input->frees);

	handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS);
	if (IS_ERR(handle)) {
		mlog_errno(PTR_ERR(handle));
		ret = -EINVAL;
		goto out_unlock;
	}

	cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
	cl = &fe->id2.i_chain;
	cr = &cl->cl_recs[input->chain];

	ret = ocfs2_journal_access_gd(handle, INODE_CACHE(main_bm_inode),
				      group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_commit;
	}

	group = (struct ocfs2_group_desc *)group_bh->b_data;
	group->bg_next_group = cr->c_blkno;
	ocfs2_journal_dirty(handle, group_bh);

	ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode),
				      main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_commit;
	}

	if (input->chain == le16_to_cpu(cl->cl_next_free_rec)) {
		le16_add_cpu(&cl->cl_next_free_rec, 1);
		memset(cr, 0, sizeof(struct ocfs2_chain_rec));
	}

	cr->c_blkno = cpu_to_le64(input->group);
	le32_add_cpu(&cr->c_total, input->clusters * cl_bpc);
	le32_add_cpu(&cr->c_free, input->frees * cl_bpc);

	le32_add_cpu(&fe->id1.bitmap1.i_total, input->clusters *cl_bpc);
	le32_add_cpu(&fe->id1.bitmap1.i_used,
		     (input->clusters - input->frees) * cl_bpc);
	le32_add_cpu(&fe->i_clusters, input->clusters);

	ocfs2_journal_dirty(handle, main_bm_bh);

	spin_lock(&OCFS2_I(main_bm_inode)->ip_lock);
	OCFS2_I(main_bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
	le64_add_cpu(&fe->i_size, input->clusters << osb->s_clustersize_bits);
	spin_unlock(&OCFS2_I(main_bm_inode)->ip_lock);
	i_size_write(main_bm_inode, le64_to_cpu(fe->i_size));

	ocfs2_update_super_and_backups(main_bm_inode, input->clusters);

out_commit:
	ocfs2_commit_trans(osb, handle);
out_unlock:
	brelse(group_bh);
	brelse(main_bm_bh);

	ocfs2_inode_unlock(main_bm_inode, 1);

out_mutex:
	mutex_unlock(&main_bm_inode->i_mutex);
	iput(main_bm_inode);

out:
	return ret;
}
Ejemplo n.º 24
0
Archivo: ialloc.c Proyecto: 274914765/C
struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
{
    struct super_block *sb = dir->i_sb;
    struct udf_sb_info *sbi = UDF_SB(sb);
    struct inode *inode;
    int block;
    uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
    struct udf_inode_info *iinfo;
    struct udf_inode_info *dinfo = UDF_I(dir);

    inode = new_inode(sb);

    if (!inode) {
        *err = -ENOMEM;
        return NULL;
    }
    *err = -ENOSPC;

    iinfo = UDF_I(inode);
    iinfo->i_unique = 0;
    iinfo->i_lenExtents = 0;
    iinfo->i_next_alloc_block = 0;
    iinfo->i_next_alloc_goal = 0;
    iinfo->i_strat4096 = 0;

    block = udf_new_block(dir->i_sb, NULL,
                  dinfo->i_location.partitionReferenceNum,
                  start, err);
    if (*err) {
        iput(inode);
        return NULL;
    }

    mutex_lock(&sbi->s_alloc_mutex);
    if (sbi->s_lvid_bh) {
        struct logicalVolIntegrityDesc *lvid =
            (struct logicalVolIntegrityDesc *)
            sbi->s_lvid_bh->b_data;
        struct logicalVolIntegrityDescImpUse *lvidiu =
                            udf_sb_lvidiu(sbi);
        struct logicalVolHeaderDesc *lvhd;
        uint64_t uniqueID;
        lvhd = (struct logicalVolHeaderDesc *)
                (lvid->logicalVolContentsUse);
        if (S_ISDIR(mode))
            le32_add_cpu(&lvidiu->numDirs, 1);
        else
            le32_add_cpu(&lvidiu->numFiles, 1);
        iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID);
        if (!(++uniqueID & 0x00000000FFFFFFFFUL))
            uniqueID += 16;
        lvhd->uniqueID = cpu_to_le64(uniqueID);
        mark_buffer_dirty(sbi->s_lvid_bh);
    }
    inode->i_mode = mode;
    inode->i_uid = current->fsuid;
    if (dir->i_mode & S_ISGID) {
        inode->i_gid = dir->i_gid;
        if (S_ISDIR(mode))
            mode |= S_ISGID;
    } else {
        inode->i_gid = current->fsgid;
    }

    iinfo->i_location.logicalBlockNum = block;
    iinfo->i_location.partitionReferenceNum =
                dinfo->i_location.partitionReferenceNum;
    inode->i_ino = udf_get_lb_pblock(sb, iinfo->i_location, 0);
    inode->i_blocks = 0;
    iinfo->i_lenEAttr = 0;
    iinfo->i_lenAlloc = 0;
    iinfo->i_use = 0;
    if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
        iinfo->i_efe = 1;
        if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev)
            sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE;
        iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize -
                        sizeof(struct extendedFileEntry),
                        GFP_KERNEL);
    } else {
        iinfo->i_efe = 0;
        iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize -
                        sizeof(struct fileEntry),
                        GFP_KERNEL);
    }
    if (!iinfo->i_ext.i_data) {
        iput(inode);
        *err = -ENOMEM;
        mutex_unlock(&sbi->s_alloc_mutex);
        return NULL;
    }
    if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
        iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
    else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
        iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
    else
        iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
    inode->i_mtime = inode->i_atime = inode->i_ctime =
        iinfo->i_crtime = current_fs_time(inode->i_sb);
    insert_inode_hash(inode);
    mark_inode_dirty(inode);
    mutex_unlock(&sbi->s_alloc_mutex);

    if (DQUOT_ALLOC_INODE(inode)) {
        DQUOT_DROP(inode);
        inode->i_flags |= S_NOQUOTA;
        inode->i_nlink = 0;
        iput(inode);
        *err = -EDQUOT;
        return NULL;
    }

    *err = 0;
    return inode;
}
Ejemplo n.º 25
0
static int ocfs2_update_last_group_and_inode(handle_t *handle,
					     struct inode *bm_inode,
					     struct buffer_head *bm_bh,
					     struct buffer_head *group_bh,
					     u32 first_new_cluster,
					     int new_clusters)
{
	int ret = 0;
	struct ocfs2_super *osb = OCFS2_SB(bm_inode->i_sb);
	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bm_bh->b_data;
	struct ocfs2_chain_list *cl = &fe->id2.i_chain;
	struct ocfs2_chain_rec *cr;
	struct ocfs2_group_desc *group;
	u16 chain, num_bits, backups = 0;
	u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
	u16 cl_cpg = le16_to_cpu(cl->cl_cpg);

	trace_ocfs2_update_last_group_and_inode(new_clusters,
						first_new_cluster);

	ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode),
				      group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		mlog_errno(ret);
		goto out;
	}

	group = (struct ocfs2_group_desc *)group_bh->b_data;

	/* update the group first. */
	num_bits = new_clusters * cl_bpc;
	le16_add_cpu(&group->bg_bits, num_bits);
	le16_add_cpu(&group->bg_free_bits_count, num_bits);

	/*
	 * check whether there are some new backup superblocks exist in
	 * this group and update the group bitmap accordingly.
	 */
	if (OCFS2_HAS_COMPAT_FEATURE(osb->sb,
				     OCFS2_FEATURE_COMPAT_BACKUP_SB)) {
		backups = ocfs2_calc_new_backup_super(bm_inode,
						     group,
						     new_clusters,
						     first_new_cluster,
						     cl_cpg, 1);
		le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
	}

	ocfs2_journal_dirty(handle, group_bh);

	/* update the inode accordingly. */
	ret = ocfs2_journal_access_di(handle, INODE_CACHE(bm_inode), bm_bh,
				      OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_rollback;
	}

	chain = le16_to_cpu(group->bg_chain);
	cr = (&cl->cl_recs[chain]);
	le32_add_cpu(&cr->c_total, num_bits);
	le32_add_cpu(&cr->c_free, num_bits);
	le32_add_cpu(&fe->id1.bitmap1.i_total, num_bits);
	le32_add_cpu(&fe->i_clusters, new_clusters);

	if (backups) {
		le32_add_cpu(&cr->c_free, -1 * backups);
		le32_add_cpu(&fe->id1.bitmap1.i_used, backups);
	}

	spin_lock(&OCFS2_I(bm_inode)->ip_lock);
	OCFS2_I(bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
	le64_add_cpu(&fe->i_size, new_clusters << osb->s_clustersize_bits);
	spin_unlock(&OCFS2_I(bm_inode)->ip_lock);
	i_size_write(bm_inode, le64_to_cpu(fe->i_size));

	ocfs2_journal_dirty(handle, bm_bh);

out_rollback:
	if (ret < 0) {
		ocfs2_calc_new_backup_super(bm_inode,
					    group,
					    new_clusters,
					    first_new_cluster,
					    cl_cpg, 0);
		le16_add_cpu(&group->bg_free_bits_count, backups);
		le16_add_cpu(&group->bg_bits, -1 * num_bits);
		le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
	}
out:
	if (ret)
		mlog_errno(ret);
	return ret;
}
Ejemplo n.º 26
0
static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
			      handle_t *handle,
			      u32 bits_wanted,
			      u32 min_bits,
			      u16 *bit_off,
			      unsigned int *num_bits,
			      u64 *bg_blkno,
			      u16 *bits_left)
{
	int status;
	u16 chain, tmp_bits;
	u32 tmp_used;
	u64 next_group;
	struct inode *alloc_inode = ac->ac_inode;
	struct buffer_head *group_bh = NULL;
	struct buffer_head *prev_group_bh = NULL;
	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
	struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
	struct ocfs2_group_desc *bg;

	chain = ac->ac_chain;
	mlog(0, "trying to alloc %u bits from chain %u, inode %llu\n",
	     bits_wanted, chain,
	     (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno);

	status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb),
				  le64_to_cpu(cl->cl_recs[chain].c_blkno),
				  &group_bh, OCFS2_BH_CACHED, alloc_inode);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}
	bg = (struct ocfs2_group_desc *) group_bh->b_data;
	status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg);
	if (status) {
		mlog_errno(status);
		goto bail;
	}

	status = -ENOSPC;
	/* for now, the chain search is a bit simplistic. We just use
	 * the 1st group with any empty bits. */
	while ((status = ac->ac_group_search(alloc_inode, group_bh,
					     bits_wanted, min_bits, bit_off,
					     &tmp_bits)) == -ENOSPC) {
		if (!bg->bg_next_group)
			break;

		if (prev_group_bh) {
			brelse(prev_group_bh);
			prev_group_bh = NULL;
		}
		next_group = le64_to_cpu(bg->bg_next_group);
		prev_group_bh = group_bh;
		group_bh = NULL;
		status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb),
					  next_group, &group_bh,
					  OCFS2_BH_CACHED, alloc_inode);
		if (status < 0) {
			mlog_errno(status);
			goto bail;
		}
		bg = (struct ocfs2_group_desc *) group_bh->b_data;
		status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg);
		if (status) {
			mlog_errno(status);
			goto bail;
		}
	}
	if (status < 0) {
		if (status != -ENOSPC)
			mlog_errno(status);
		goto bail;
	}

	mlog(0, "alloc succeeds: we give %u bits from block group %llu\n",
	     tmp_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno));

	*num_bits = tmp_bits;

	BUG_ON(*num_bits == 0);

	/*
	 * Keep track of previous block descriptor read. When
	 * we find a target, if we have read more than X
	 * number of descriptors, and the target is reasonably
	 * empty, relink him to top of his chain.
	 *
	 * We've read 0 extra blocks and only send one more to
	 * the transaction, yet the next guy to search has a
	 * much easier time.
	 *
	 * Do this *after* figuring out how many bits we're taking out
	 * of our target group.
	 */
	if (ac->ac_allow_chain_relink &&
	    (prev_group_bh) &&
	    (ocfs2_block_group_reasonably_empty(bg, *num_bits))) {
		status = ocfs2_relink_block_group(handle, alloc_inode,
						  ac->ac_bh, group_bh,
						  prev_group_bh, chain);
		if (status < 0) {
			mlog_errno(status);
			goto bail;
		}
	}

	/* Ok, claim our bits now: set the info on dinode, chainlist
	 * and then the group */
	status = ocfs2_journal_access(handle,
				      alloc_inode,
				      ac->ac_bh,
				      OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
	fe->id1.bitmap1.i_used = cpu_to_le32(*num_bits + tmp_used);
	le32_add_cpu(&cl->cl_recs[chain].c_free, -(*num_bits));

	status = ocfs2_journal_dirty(handle,
				     ac->ac_bh);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	status = ocfs2_block_group_set_bits(handle,
					    alloc_inode,
					    bg,
					    group_bh,
					    *bit_off,
					    *num_bits);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	mlog(0, "Allocated %u bits from suballocator %llu\n", *num_bits,
	     (unsigned long long)le64_to_cpu(fe->i_blkno));

	*bg_blkno = le64_to_cpu(bg->bg_blkno);
	*bits_left = le16_to_cpu(bg->bg_free_bits_count);
bail:
	if (group_bh)
		brelse(group_bh);
	if (prev_group_bh)
		brelse(prev_group_bh);

	mlog_exit(status);
	return status;
}