예제 #1
0
int __init qpnpint_of_init(struct device_node *node, struct device_node *parent)
{
	struct q_chip_data *chip_d;

	chip_d = kzalloc(sizeof(struct q_chip_data), GFP_KERNEL);
	if (!chip_d)
		return -ENOMEM;

	chip_d->domain = irq_domain_add_tree(node,
					&qpnpint_irq_domain_ops, chip_d);
	if (!chip_d->domain) {
		pr_err("Unable to allocate irq_domain\n");
		kfree(chip_d);
		return -ENOMEM;
	}

	INIT_RADIX_TREE(&chip_d->per_tree, GFP_ATOMIC);
	list_add(&chip_d->list, &qpnpint_chips);

	return 0;
}
예제 #2
0
파일: swap_state.c 프로젝트: mdamt/linux
int init_swap_address_space(unsigned int type, unsigned long nr_pages)
{
	struct address_space *spaces, *space;
	unsigned int i, nr;

	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
	spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL);
	if (!spaces)
		return -ENOMEM;
	for (i = 0; i < nr; i++) {
		space = spaces + i;
		INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN);
		atomic_set(&space->i_mmap_writable, 0);
		space->a_ops = &swap_aops;
		/* swap cache doesn't use writeback related tags */
		mapping_set_no_writeback_tags(space);
		spin_lock_init(&space->tree_lock);
	}
	nr_swapper_spaces[type] = nr;
	rcu_assign_pointer(swapper_spaces[type], spaces);

	return 0;
}
예제 #3
0
파일: blk-ioc.c 프로젝트: CSCLOG/beaglebone
struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
{
	struct io_context *ioc;

	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
	if (ioc) {
		atomic_long_set(&ioc->refcount, 1);
		atomic_set(&ioc->nr_tasks, 1);
		spin_lock_init(&ioc->lock);
		ioc->ioprio_changed = 0;
		ioc->ioprio = 0;
		ioc->last_waited = 0; /* doesn't matter... */
		ioc->nr_batch_requests = 0; /* because this is 0 */
		INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
		INIT_HLIST_HEAD(&ioc->cic_list);
		ioc->ioc_data = NULL;
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
		ioc->cgroup_changed = 0;
#endif
	}

	return ioc;
}
예제 #4
0
파일: blkdev.c 프로젝트: kennethlyn/mylinux
int alloc_diskmem(void)
{
    int ret,i;
    void *p;
    INIT_RADIX_TREE(&blkdev_data,GFP_KERNEL);
    for(i=0;i<(BLKDEV_BYTES + PAGE_SIZE - 1)>>PAGE_SHIFT;i++)
    {
        p=(void *)__get_free_page(GFP_KERNEL);
        if(!p){
            ret = -ENOMEM;
            goto err_alloc;
        }
        ret = radix_tree_insert(&blkdev_data,i,p);
        if(IS_ERR_VALUE(ret))
            goto err_radix_tree_insert;
    }
    return 0;

err_radix_tree_insert:
    free_page((unsigned long)p);
err_alloc:
    free_diskmem();
    return ret;
}
예제 #5
0
int au_si_alloc(struct super_block *sb)
{
	int err;
	struct au_sbinfo *sbinfo;
	static struct lock_class_key aufs_si;

	err = -ENOMEM;
	sbinfo = kzalloc(sizeof(*sbinfo), GFP_NOFS);
	if (unlikely(!sbinfo))
		goto out;

	BUILD_BUG_ON(sizeof(unsigned long) !=
		     sizeof(*sbinfo->au_si_pid.bitmap));
	sbinfo->au_si_pid.bitmap = kcalloc(BITS_TO_LONGS(PID_MAX_DEFAULT),
					sizeof(*sbinfo->au_si_pid.bitmap),
					GFP_NOFS);
	if (unlikely(!sbinfo->au_si_pid.bitmap))
		goto out_sbinfo;

	/* will be reallocated separately */
	sbinfo->si_branch = kzalloc(sizeof(*sbinfo->si_branch), GFP_NOFS);
	if (unlikely(!sbinfo->si_branch))
		goto out_pidmap;

	err = sysaufs_si_init(sbinfo);
	if (unlikely(err))
		goto out_br;

	au_nwt_init(&sbinfo->si_nowait);
	au_rw_init_wlock(&sbinfo->si_rwsem);
	au_rw_class(&sbinfo->si_rwsem, &aufs_si);
	spin_lock_init(&sbinfo->au_si_pid.tree_lock);
	INIT_RADIX_TREE(&sbinfo->au_si_pid.tree, GFP_ATOMIC | __GFP_NOFAIL);

	atomic_long_set(&sbinfo->si_ninodes, 0);
	atomic_long_set(&sbinfo->si_nfiles, 0);

	sbinfo->si_bend = -1;

	sbinfo->si_wbr_copyup = AuWbrCopyup_Def;
	sbinfo->si_wbr_create = AuWbrCreate_Def;
	sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + sbinfo->si_wbr_copyup;
	sbinfo->si_wbr_create_ops = au_wbr_create_ops + sbinfo->si_wbr_create;

	sbinfo->si_mntflags = au_opts_plink(AuOpt_Def);

	mutex_init(&sbinfo->si_xib_mtx);
	sbinfo->si_xino_brid = -1;
	/* leave si_xib_last_pindex and si_xib_next_bit */

	sbinfo->si_rdcache = msecs_to_jiffies(AUFS_RDCACHE_DEF * MSEC_PER_SEC);
	sbinfo->si_rdblk = AUFS_RDBLK_DEF;
	sbinfo->si_rdhash = AUFS_RDHASH_DEF;
	sbinfo->si_dirwh = AUFS_DIRWH_DEF;

	au_spl_init(&sbinfo->si_plink);
	init_waitqueue_head(&sbinfo->si_plink_wq);
	spin_lock_init(&sbinfo->si_plink_maint_lock);

	/* leave other members for sysaufs and si_mnt. */
	sbinfo->si_sb = sb;
	sb->s_fs_info = sbinfo;
	si_pid_set(sb);
	au_debug_sbinfo_init(sbinfo);
	return 0; /* success */

out_br:
	kfree(sbinfo->si_branch);
out_pidmap:
	kfree(sbinfo->au_si_pid.bitmap);
out_sbinfo:
	kfree(sbinfo);
out:
	return err;
}
예제 #6
0
/*
 * This initializes all the quota information that's kept in the
 * mount structure
 */
STATIC int
xfs_qm_init_quotainfo(
	xfs_mount_t	*mp)
{
	xfs_quotainfo_t *qinf;
	int		error;
	xfs_dquot_t	*dqp;

	ASSERT(XFS_IS_QUOTA_RUNNING(mp));

	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);

	/*
	 * See if quotainodes are setup, and if not, allocate them,
	 * and change the superblock accordingly.
	 */
	if ((error = xfs_qm_init_quotainos(mp))) {
		kmem_free(qinf);
		mp->m_quotainfo = NULL;
		return error;
	}

	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
	mutex_init(&qinf->qi_tree_lock);

	INIT_LIST_HEAD(&qinf->qi_lru_list);
	qinf->qi_lru_count = 0;
	mutex_init(&qinf->qi_lru_lock);

	/* mutex used to serialize quotaoffs */
	mutex_init(&qinf->qi_quotaofflock);

	/* Precalc some constants */
	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
	ASSERT(qinf->qi_dqchunklen);
	qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen);
	do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t));

	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);

	/*
	 * We try to get the limits from the superuser's limits fields.
	 * This is quite hacky, but it is standard quota practice.
	 *
	 * We look at the USR dquot with id == 0 first, but if user quotas
	 * are not enabled we goto the GRP dquot with id == 0.
	 * We don't really care to keep separate default limits for user
	 * and group quotas, at least not at this point.
	 *
	 * Since we may not have done a quotacheck by this point, just read
	 * the dquot without attaching it to any hashtables or lists.
	 */
	error = xfs_qm_dqread(mp, 0,
			XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
			 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
			  XFS_DQ_PROJ),
			XFS_QMOPT_DOWARN, &dqp);
	if (!error) {
		xfs_disk_dquot_t	*ddqp = &dqp->q_core;

		/*
		 * The warnings and timers set the grace period given to
		 * a user or group before he or she can not perform any
		 * more writing. If it is zero, a default is used.
		 */
		qinf->qi_btimelimit = ddqp->d_btimer ?
			be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
		qinf->qi_itimelimit = ddqp->d_itimer ?
			be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
		qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
			be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
		qinf->qi_bwarnlimit = ddqp->d_bwarns ?
			be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
		qinf->qi_iwarnlimit = ddqp->d_iwarns ?
			be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
		qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
			be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
		qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
		qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
		qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
		qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
		qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
		qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
 
		xfs_qm_dqdestroy(dqp);
	} else {
		qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
		qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
		qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
		qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
		qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
		qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
	}

	qinf->qi_shrinker.shrink = xfs_qm_shake;
	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
	register_shrinker(&qinf->qi_shrinker);
	return 0;
}
예제 #7
0
/*
 * Open NVMe device
 */
NVMED* nvmed_open(char* path, int flags) {
	char* admin_path;
	int result;
	NVMED_DEVICE_INFO *dev_info;
	NVMED* nvmed;
	int fd;
	int ret;
	unsigned int num_cache;

	if(access(path, F_OK) < 0) {
		nvmed_err("%s: fail to open device file\n", path);
		return NULL;
	}

	result = get_path_from_blkdev(path, &admin_path);
	if(result < 0) {
		nvmed_printf("%s: fail to open nvme device file\n", path);
		return NULL;
	}
	
	fd = open(admin_path, 0);
	if(fd < 0) {
		nvmed_printf("%s: fail to open nvme device file\n", admin_path);
		return NULL;
	}

	//IOCTL - Get Device Info
	dev_info = malloc(sizeof(*dev_info));
	ret = ioctl(fd, NVMED_IOCTL_NVMED_INFO, dev_info);
	if(ret<0) {
		close(fd);
		
		return NULL;
	}

	//nvmed = malloc(sizeof(NVMED));
	nvmed = malloc(sizeof(*nvmed));
	if(nvmed==NULL) {
		free(admin_path);
		free(dev_info);
		close(fd);
		
		nvmed_printf("%s: fail to allocation nvmed buffer\n", admin_path);

		return NULL;
	}

	// Getting NVMe Device Info
	nvmed->ns_path = admin_path;
	nvmed->ns_fd = fd;
	nvmed->dev_info = dev_info;
	nvmed->flags = flags;

	// QUEUE
	nvmed->numQueue = 0;
	LIST_INIT(&nvmed->queue_head);
	
	pthread_spin_init(&nvmed->mngt_lock, 0);

	// PROCESS_CQ THREAD
	nvmed->process_cq_status = TD_STATUS_STOP;

	pthread_cond_init(&nvmed->process_cq_cond, NULL);
	pthread_mutex_init(&nvmed->process_cq_mutex, NULL);
	//pthread_create(&nvmed->process_cq_td, NULL, &nvmed_process_cq, (void*)nvmed);

	if(!__FLAG_ISSET(flags, NVMED_NO_CACHE)) {
		// CACHE
		if((nvmed->dev_info->max_hw_sectors * 512) / PAGE_SIZE > NVMED_CACHE_INIT_NUM_PAGES)
			num_cache = (nvmed->dev_info->max_hw_sectors * 512) / PAGE_SIZE;
		else
			num_cache = NVMED_CACHE_INIT_NUM_PAGES;

		nvmed->num_cache_usage = 0;
	
		TAILQ_INIT(&nvmed->lru_head);
		TAILQ_INIT(&nvmed->free_head);
		pthread_rwlock_init(&nvmed->cache_radix_lock, 0);
		pthread_spin_init(&nvmed->cache_list_lock, 0);
	
		// CACHE - INIT
		LIST_INIT(&nvmed->slot_head);
		nvmed_cache_alloc(nvmed, num_cache, 
				__FLAG_ISSET(flags, NVMED_CACHE_LAZY_INIT));

		INIT_RADIX_TREE(&nvmed->cache_root);
		radix_tree_init();
	}

	return nvmed;
}
예제 #8
0
파일: init.c 프로젝트: kenhys/partclone
/*
 * Mount structure initialization, provides a filled-in xfs_mount_t
 * such that the numerous XFS_* macros can be used.  If dev is zero,
 * no IO will be performed (no size checks, read root inodes).
 */
xfs_mount_t *
libxfs_mount(
	xfs_mount_t	*mp,
	xfs_sb_t	*sb,
	dev_t		dev,
	dev_t		logdev,
	dev_t		rtdev,
	int		flags)
{
	xfs_daddr_t	d;
	xfs_buf_t	*bp;
	xfs_sb_t	*sbp;
	int		error;

	libxfs_buftarg_init(mp, dev, logdev, rtdev);

	mp->m_flags = (LIBXFS_MOUNT_32BITINODES|LIBXFS_MOUNT_32BITINOOPT);
	mp->m_sb = *sb;
	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_KERNEL);
	sbp = &(mp->m_sb);

	xfs_sb_mount_common(mp, sb);

	xfs_alloc_compute_maxlevels(mp);
	xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
	xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
	xfs_ialloc_compute_maxlevels(mp);

	if (sbp->sb_imax_pct) {
		/* Make sure the maximum inode count is a multiple of the
		 * units we allocate inodes in.
		 */
		mp->m_maxicount = (sbp->sb_dblocks * sbp->sb_imax_pct) / 100;
		mp->m_maxicount = ((mp->m_maxicount / mp->m_ialloc_blks) *
				  mp->m_ialloc_blks)  << sbp->sb_inopblog;
	} else
		mp->m_maxicount = 0;

	mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;

	/*
	 * Set whether we're using stripe alignment.
	 */
	if (xfs_sb_version_hasdalign(&mp->m_sb)) {
		mp->m_dalign = sbp->sb_unit;
		mp->m_swidth = sbp->sb_width;
	}

	/*
	 * Set whether we're using inode alignment.
	 */
	if (xfs_sb_version_hasalign(&mp->m_sb) &&
	    mp->m_sb.sb_inoalignmt >=
	    XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
		mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
	else
		mp->m_inoalign_mask = 0;
	/*
	 * If we are using stripe alignment, check whether
	 * the stripe unit is a multiple of the inode alignment
	 */
	if (mp->m_dalign && mp->m_inoalign_mask &&
					!(mp->m_dalign & mp->m_inoalign_mask))
		mp->m_sinoalign = mp->m_dalign;
	else
		mp->m_sinoalign = 0;

	/*
	 * Check that the data (and log if separate) are an ok size.
	 */
	d = (xfs_daddr_t) XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
	if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
		fprintf(stderr, _("%s: size check failed\n"), progname);
		if (!(flags & LIBXFS_MOUNT_DEBUGGER))
			return NULL;
	}

	/*
	 * We automatically convert v1 inodes to v2 inodes now, so if
	 * the NLINK bit is not set we can't operate on the filesystem.
	 */
	if (!(sbp->sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {

		fprintf(stderr, _(
	"%s: V1 inodes unsupported. Please try an older xfsprogs.\n"),
				 progname);
		exit(1);
	}

	/* Check for supported directory formats */
	if (!(sbp->sb_versionnum & XFS_SB_VERSION_DIRV2BIT)) {

		fprintf(stderr, _(
	"%s: V1 directories unsupported. Please try an older xfsprogs.\n"),
				 progname);
		exit(1);
	}

	/* check for unsupported other features */
	if (!xfs_sb_good_version(sbp)) {
		fprintf(stderr, _(
	"%s: Unsupported features detected. Please try a newer xfsprogs.\n"),
				 progname);
		exit(1);
	}

	xfs_da_mount(mp);

	if (xfs_sb_version_hasattr2(&mp->m_sb))
		mp->m_flags |= LIBXFS_MOUNT_ATTR2;

	/* Initialize the precomputed transaction reservations values */
	xfs_trans_init(mp);

	if (dev == 0)	/* maxtrres, we have no device so leave now */
		return mp;

	bp = libxfs_readbuf(mp->m_dev,
			d - XFS_FSS_TO_BB(mp, 1), XFS_FSS_TO_BB(mp, 1),
			!(flags & LIBXFS_MOUNT_DEBUGGER), NULL);
	if (!bp) {
		fprintf(stderr, _("%s: data size check failed\n"), progname);
		if (!(flags & LIBXFS_MOUNT_DEBUGGER))
			return NULL;
	} else
		libxfs_putbuf(bp);

	if (mp->m_logdev_targp->dev &&
	    mp->m_logdev_targp->dev != mp->m_ddev_targp->dev) {
		d = (xfs_daddr_t) XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
		if ( (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) ||
		     (!(bp = libxfs_readbuf(mp->m_logdev_targp,
					d - XFS_FSB_TO_BB(mp, 1),
					XFS_FSB_TO_BB(mp, 1),
					!(flags & LIBXFS_MOUNT_DEBUGGER), NULL))) ) {
			fprintf(stderr, _("%s: log size checks failed\n"),
					progname);
			if (!(flags & LIBXFS_MOUNT_DEBUGGER))
				return NULL;
		}
		if (bp)
			libxfs_putbuf(bp);
	}

	/* Initialize realtime fields in the mount structure */
	if (rtmount_init(mp, flags)) {
		fprintf(stderr, _("%s: realtime device init failed\n"),
			progname);
			return NULL;
	}

	error = libxfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
	if (error) {
		fprintf(stderr, _("%s: perag init failed\n"),
			progname);
		exit(1);
	}

	return mp;
}