Пример #1
0
int lfsck_namespace_setup(const struct lu_env *env,
			  struct lfsck_instance *lfsck)
{
	struct lfsck_component	*com;
	struct lfsck_namespace	*ns;
	struct dt_object	*root = NULL;
	struct dt_object	*obj;
	int			 rc;
	ENTRY;

	LASSERT(lfsck->li_master);

	OBD_ALLOC_PTR(com);
	if (com == NULL)
		RETURN(-ENOMEM);

	CFS_INIT_LIST_HEAD(&com->lc_link);
	CFS_INIT_LIST_HEAD(&com->lc_link_dir);
	init_rwsem(&com->lc_sem);
	atomic_set(&com->lc_ref, 1);
	com->lc_lfsck = lfsck;
	com->lc_type = LT_NAMESPACE;
	com->lc_ops = &lfsck_namespace_ops;
	com->lc_file_size = sizeof(struct lfsck_namespace);
	OBD_ALLOC(com->lc_file_ram, com->lc_file_size);
	if (com->lc_file_ram == NULL)
		GOTO(out, rc = -ENOMEM);

	OBD_ALLOC(com->lc_file_disk, com->lc_file_size);
	if (com->lc_file_disk == NULL)
		GOTO(out, rc = -ENOMEM);

	root = dt_locate(env, lfsck->li_bottom, &lfsck->li_local_root_fid);
	if (IS_ERR(root))
		GOTO(out, rc = PTR_ERR(root));

	if (unlikely(!dt_try_as_dir(env, root)))
		GOTO(out, rc = -ENOTDIR);

	obj = local_index_find_or_create(env, lfsck->li_los, root,
					 lfsck_namespace_name,
					 S_IFREG | S_IRUGO | S_IWUSR,
					 &dt_lfsck_features);
	if (IS_ERR(obj))
		GOTO(out, rc = PTR_ERR(obj));

	com->lc_obj = obj;
	rc = obj->do_ops->do_index_try(env, obj, &dt_lfsck_features);
	if (rc != 0)
		GOTO(out, rc);

	rc = lfsck_namespace_load(env, com);
	if (rc > 0)
		rc = lfsck_namespace_reset(env, com, true);
	else if (rc == -ENODATA)
		rc = lfsck_namespace_init(env, com);
	if (rc != 0)
		GOTO(out, rc);

	ns = com->lc_file_ram;
	switch (ns->ln_status) {
	case LS_INIT:
	case LS_COMPLETED:
	case LS_FAILED:
	case LS_STOPPED:
		spin_lock(&lfsck->li_lock);
		cfs_list_add_tail(&com->lc_link, &lfsck->li_list_idle);
		spin_unlock(&lfsck->li_lock);
		break;
	default:
		CERROR("%s: unknown lfsck_namespace status: rc = %u\n",
		       lfsck_lfsck2name(lfsck), ns->ln_status);
		/* fall through */
	case LS_SCANNING_PHASE1:
	case LS_SCANNING_PHASE2:
		/* No need to store the status to disk right now.
		 * If the system crashed before the status stored,
		 * it will be loaded back when next time. */
		ns->ln_status = LS_CRASHED;
		/* fall through */
	case LS_PAUSED:
	case LS_CRASHED:
		spin_lock(&lfsck->li_lock);
		cfs_list_add_tail(&com->lc_link, &lfsck->li_list_scan);
		cfs_list_add_tail(&com->lc_link_dir, &lfsck->li_list_dir);
		spin_unlock(&lfsck->li_lock);
		break;
	}

	GOTO(out, rc = 0);

out:
	if (root != NULL && !IS_ERR(root))
		lu_object_put(env, &root->do_lu);
	if (rc != 0)
		lfsck_component_cleanup(env, com);
	return rc;
}
Пример #2
0
/**
 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
 *
 *	Return: 0 for success or negative value for err.
 */
int acpi_cppc_processor_probe(struct acpi_processor *pr)
{
	struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
	union acpi_object *out_obj, *cpc_obj;
	struct cpc_desc *cpc_ptr;
	struct cpc_reg *gas_t;
	struct device *cpu_dev;
	acpi_handle handle = pr->handle;
	unsigned int num_ent, i, cpc_rev;
	int pcc_subspace_id = -1;
	acpi_status status;
	int ret = -EFAULT;

	/* Parse the ACPI _CPC table for this cpu. */
	status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
			ACPI_TYPE_PACKAGE);
	if (ACPI_FAILURE(status)) {
		ret = -ENODEV;
		goto out_buf_free;
	}

	out_obj = (union acpi_object *) output.pointer;

	cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
	if (!cpc_ptr) {
		ret = -ENOMEM;
		goto out_buf_free;
	}

	/* First entry is NumEntries. */
	cpc_obj = &out_obj->package.elements[0];
	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
		num_ent = cpc_obj->integer.value;
	} else {
		pr_debug("Unexpected entry type(%d) for NumEntries\n",
				cpc_obj->type);
		goto out_free;
	}

	/* Only support CPPCv2. Bail otherwise. */
	if (num_ent != CPPC_NUM_ENT) {
		pr_debug("Firmware exports %d entries. Expected: %d\n",
				num_ent, CPPC_NUM_ENT);
		goto out_free;
	}

	cpc_ptr->num_entries = num_ent;

	/* Second entry should be revision. */
	cpc_obj = &out_obj->package.elements[1];
	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
		cpc_rev = cpc_obj->integer.value;
	} else {
		pr_debug("Unexpected entry type(%d) for Revision\n",
				cpc_obj->type);
		goto out_free;
	}

	if (cpc_rev != CPPC_REV) {
		pr_debug("Firmware exports revision:%d. Expected:%d\n",
				cpc_rev, CPPC_REV);
		goto out_free;
	}

	/* Iterate through remaining entries in _CPC */
	for (i = 2; i < num_ent; i++) {
		cpc_obj = &out_obj->package.elements[i];

		if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
			cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
		} else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
			gas_t = (struct cpc_reg *)
				cpc_obj->buffer.pointer;

			/*
			 * The PCC Subspace index is encoded inside
			 * the CPC table entries. The same PCC index
			 * will be used for all the PCC entries,
			 * so extract it only once.
			 */
			if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
				if (pcc_subspace_id < 0) {
					pcc_subspace_id = gas_t->access_width;
					if (pcc_data_alloc(pcc_subspace_id))
						goto out_free;
				} else if (pcc_subspace_id != gas_t->access_width) {
					pr_debug("Mismatched PCC ids.\n");
					goto out_free;
				}
			} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
				if (gas_t->address) {
					void __iomem *addr;

					addr = ioremap(gas_t->address, gas_t->bit_width/8);
					if (!addr)
						goto out_free;
					cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
				}
			} else {
				if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
					/* Support only PCC ,SYS MEM and FFH type regs */
					pr_debug("Unsupported register type: %d\n", gas_t->space_id);
					goto out_free;
				}
			}

			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
			memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
		} else {
			pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
			goto out_free;
		}
	}
	per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
	/* Store CPU Logical ID */
	cpc_ptr->cpu_id = pr->id;

	/* Parse PSD data for this CPU */
	ret = acpi_get_psd(cpc_ptr, handle);
	if (ret)
		goto out_free;

	/* Register PCC channel once for all PCC subspace id. */
	if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
		ret = register_pcc_channel(pcc_subspace_id);
		if (ret)
			goto out_free;

		init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
		init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
	}

	/* Everything looks okay */
	pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);

	/* Add per logical CPU nodes for reading its feedback counters. */
	cpu_dev = get_cpu_device(pr->id);
	if (!cpu_dev) {
		ret = -EINVAL;
		goto out_free;
	}

	/* Plug PSD data into this CPUs CPC descriptor. */
	per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;

	ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
			"acpi_cppc");
	if (ret) {
		per_cpu(cpc_desc_ptr, pr->id) = NULL;
		goto out_free;
	}

	kfree(output.pointer);
	return 0;

out_free:
	/* Free all the mapped sys mem areas for this CPU */
	for (i = 2; i < cpc_ptr->num_entries; i++) {
		void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;

		if (addr)
			iounmap(addr);
	}
	kfree(cpc_ptr);

out_buf_free:
	kfree(output.pointer);
	return ret;
}
Пример #3
0
/*
 * Allocate and initialise a blank device with a given minor.
 */
static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
{
	int r;
	struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);

	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

	/* get a minor number for the dev */
	r = persistent ? specific_minor(md, minor) : next_free_minor(md, &minor);
	if (r < 0)
		goto bad1;

	memset(md, 0, sizeof(*md));
	init_rwsem(&md->io_lock);
	init_MUTEX(&md->suspend_lock);
	rwlock_init(&md->map_lock);
	atomic_set(&md->holders, 1);
	atomic_set(&md->event_nr, 0);

	md->queue = blk_alloc_queue(GFP_KERNEL);
	if (!md->queue)
		goto bad1;

	md->queue->queuedata = md;
	md->queue->backing_dev_info.congested_fn = dm_any_congested;
	md->queue->backing_dev_info.congested_data = md;
	blk_queue_make_request(md->queue, dm_request);
	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
	md->queue->unplug_fn = dm_unplug_all;
	md->queue->issue_flush_fn = dm_flush_all;

	md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
				     mempool_free_slab, _io_cache);
 	if (!md->io_pool)
 		goto bad2;

	md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
				      mempool_free_slab, _tio_cache);
	if (!md->tio_pool)
		goto bad3;

	md->disk = alloc_disk(1);
	if (!md->disk)
		goto bad4;

	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
	add_disk(md->disk);

	atomic_set(&md->pending, 0);
	init_waitqueue_head(&md->wait);
	init_waitqueue_head(&md->eventq);

	return md;

 bad4:
	mempool_destroy(md->tio_pool);
 bad3:
	mempool_destroy(md->io_pool);
 bad2:
	blk_put_queue(md->queue);
	free_minor(minor);
 bad1:
	kfree(md);
	return NULL;
}
Пример #4
0
static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
{
	struct ncp_mount_data_kernel data;
	struct ncp_server *server;
	struct file *ncp_filp;
	struct inode *root_inode;
	struct inode *sock_inode;
	struct socket *sock;
	int error;
	int default_bufsize;
#ifdef CONFIG_NCPFS_PACKET_SIGNING
	int options;
#endif
	struct ncp_entry_info finfo;

	memset(&data, 0, sizeof(data));
	server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
	if (!server)
		return -ENOMEM;
	sb->s_fs_info = server;

	error = -EFAULT;
	if (raw_data == NULL)
		goto out;
	switch (*(int*)raw_data) {
		case NCP_MOUNT_VERSION:
			{
				struct ncp_mount_data* md = (struct ncp_mount_data*)raw_data;

				data.flags = md->flags;
				data.int_flags = NCP_IMOUNT_LOGGEDIN_POSSIBLE;
				data.mounted_uid = make_kuid(current_user_ns(), md->mounted_uid);
				data.wdog_pid = find_get_pid(md->wdog_pid);
				data.ncp_fd = md->ncp_fd;
				data.time_out = md->time_out;
				data.retry_count = md->retry_count;
				data.uid = make_kuid(current_user_ns(), md->uid);
				data.gid = make_kgid(current_user_ns(), md->gid);
				data.file_mode = md->file_mode;
				data.dir_mode = md->dir_mode;
				data.info_fd = -1;
				memcpy(data.mounted_vol, md->mounted_vol,
					NCP_VOLNAME_LEN+1);
			}
			break;
		case NCP_MOUNT_VERSION_V4:
			{
				struct ncp_mount_data_v4* md = (struct ncp_mount_data_v4*)raw_data;

				data.flags = md->flags;
				data.mounted_uid = make_kuid(current_user_ns(), md->mounted_uid);
				data.wdog_pid = find_get_pid(md->wdog_pid);
				data.ncp_fd = md->ncp_fd;
				data.time_out = md->time_out;
				data.retry_count = md->retry_count;
				data.uid = make_kuid(current_user_ns(), md->uid);
				data.gid = make_kgid(current_user_ns(), md->gid);
				data.file_mode = md->file_mode;
				data.dir_mode = md->dir_mode;
				data.info_fd = -1;
			}
			break;
		default:
			error = -ECHRNG;
			if (memcmp(raw_data, "vers", 4) == 0) {
				error = ncp_parse_options(&data, raw_data);
			}
			if (error)
				goto out;
			break;
	}
	error = -EINVAL;
	if (!uid_valid(data.mounted_uid) || !uid_valid(data.uid) ||
	    !gid_valid(data.gid))
		goto out;
	error = -EBADF;
	ncp_filp = fget(data.ncp_fd);
	if (!ncp_filp)
		goto out;
	error = -ENOTSOCK;
	sock_inode = file_inode(ncp_filp);
	if (!S_ISSOCK(sock_inode->i_mode))
		goto out_fput;
	sock = SOCKET_I(sock_inode);
	if (!sock)
		goto out_fput;
		
	if (sock->type == SOCK_STREAM)
		default_bufsize = 0xF000;
	else
		default_bufsize = 1024;

	sb->s_flags |= MS_NODIRATIME;	/* probably even noatime */
	sb->s_maxbytes = 0xFFFFFFFFU;
	sb->s_blocksize = 1024;	/* Eh...  Is this correct? */
	sb->s_blocksize_bits = 10;
	sb->s_magic = NCP_SUPER_MAGIC;
	sb->s_op = &ncp_sops;
	sb->s_d_op = &ncp_dentry_operations;
	sb->s_bdi = &server->bdi;

	server = NCP_SBP(sb);
	memset(server, 0, sizeof(*server));

	error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
	if (error)
		goto out_fput;

	server->ncp_filp = ncp_filp;
	server->ncp_sock = sock;
	
	if (data.info_fd != -1) {
		struct socket *info_sock;

		error = -EBADF;
		server->info_filp = fget(data.info_fd);
		if (!server->info_filp)
			goto out_bdi;
		error = -ENOTSOCK;
		sock_inode = file_inode(server->info_filp);
		if (!S_ISSOCK(sock_inode->i_mode))
			goto out_fput2;
		info_sock = SOCKET_I(sock_inode);
		if (!info_sock)
			goto out_fput2;
		error = -EBADFD;
		if (info_sock->type != SOCK_STREAM)
			goto out_fput2;
		server->info_sock = info_sock;
	}

/*	server->lock = 0;	*/
	mutex_init(&server->mutex);
	server->packet = NULL;
/*	server->buffer_size = 0;	*/
/*	server->conn_status = 0;	*/
/*	server->root_dentry = NULL;	*/
/*	server->root_setuped = 0;	*/
	mutex_init(&server->root_setup_lock);
#ifdef CONFIG_NCPFS_PACKET_SIGNING
/*	server->sign_wanted = 0;	*/
/*	server->sign_active = 0;	*/
#endif
	init_rwsem(&server->auth_rwsem);
	server->auth.auth_type = NCP_AUTH_NONE;
/*	server->auth.object_name_len = 0;	*/
/*	server->auth.object_name = NULL;	*/
/*	server->auth.object_type = 0;		*/
/*	server->priv.len = 0;			*/
/*	server->priv.data = NULL;		*/

	server->m = data;
	/* Although anything producing this is buggy, it happens
	   now because of PATH_MAX changes.. */
	if (server->m.time_out < 1) {
		server->m.time_out = 10;
		printk(KERN_INFO "You need to recompile your ncpfs utils..\n");
	}
	server->m.time_out = server->m.time_out * HZ / 100;
	server->m.file_mode = (server->m.file_mode & S_IRWXUGO) | S_IFREG;
	server->m.dir_mode = (server->m.dir_mode & S_IRWXUGO) | S_IFDIR;

#ifdef CONFIG_NCPFS_NLS
	/* load the default NLS charsets */
	server->nls_vol = load_nls_default();
	server->nls_io = load_nls_default();
#endif /* CONFIG_NCPFS_NLS */

	atomic_set(&server->dentry_ttl, 0);	/* no caching */

	INIT_LIST_HEAD(&server->tx.requests);
	mutex_init(&server->rcv.creq_mutex);
	server->tx.creq		= NULL;
	server->rcv.creq	= NULL;

	init_timer(&server->timeout_tm);
#undef NCP_PACKET_SIZE
#define NCP_PACKET_SIZE 131072
	error = -ENOMEM;
	server->packet_size = NCP_PACKET_SIZE;
	server->packet = vmalloc(NCP_PACKET_SIZE);
	if (server->packet == NULL)
		goto out_nls;
	server->txbuf = vmalloc(NCP_PACKET_SIZE);
	if (server->txbuf == NULL)
		goto out_packet;
	server->rxbuf = vmalloc(NCP_PACKET_SIZE);
	if (server->rxbuf == NULL)
		goto out_txbuf;

	lock_sock(sock->sk);
	server->data_ready	= sock->sk->sk_data_ready;
	server->write_space	= sock->sk->sk_write_space;
	server->error_report	= sock->sk->sk_error_report;
	sock->sk->sk_user_data	= server;
	sock->sk->sk_data_ready	  = ncp_tcp_data_ready;
	sock->sk->sk_error_report = ncp_tcp_error_report;
	if (sock->type == SOCK_STREAM) {
		server->rcv.ptr = (unsigned char*)&server->rcv.buf;
		server->rcv.len = 10;
		server->rcv.state = 0;
		INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc);
		INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc);
		sock->sk->sk_write_space = ncp_tcp_write_space;
	} else {
		INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc);
		INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc);
		server->timeout_tm.data = (unsigned long)server;
		server->timeout_tm.function = ncpdgram_timeout_call;
	}
	release_sock(sock->sk);

	ncp_lock_server(server);
	error = ncp_connect(server);
	ncp_unlock_server(server);
	if (error < 0)
		goto out_rxbuf;
	DPRINTK("ncp_fill_super: NCP_SBP(sb) = %x\n", (int) NCP_SBP(sb));

	error = -EMSGSIZE;	/* -EREMOTESIDEINCOMPATIBLE */
#ifdef CONFIG_NCPFS_PACKET_SIGNING
	if (ncp_negotiate_size_and_options(server, default_bufsize,
		NCP_DEFAULT_OPTIONS, &(server->buffer_size), &options) == 0)
	{
		if (options != NCP_DEFAULT_OPTIONS)
		{
			if (ncp_negotiate_size_and_options(server, 
				default_bufsize,
				options & 2, 
				&(server->buffer_size), &options) != 0)
				
			{
				goto out_disconnect;
			}
		}
		ncp_lock_server(server);
		if (options & 2)
			server->sign_wanted = 1;
		ncp_unlock_server(server);
	}
	else 
#endif	/* CONFIG_NCPFS_PACKET_SIGNING */
	if (ncp_negotiate_buffersize(server, default_bufsize,
  				     &(server->buffer_size)) != 0)
		goto out_disconnect;
	DPRINTK("ncpfs: bufsize = %d\n", server->buffer_size);

	memset(&finfo, 0, sizeof(finfo));
	finfo.i.attributes	= aDIR;
	finfo.i.dataStreamSize	= 0;	/* ignored */
	finfo.i.dirEntNum	= 0;
	finfo.i.DosDirNum	= 0;
#ifdef CONFIG_NCPFS_SMALLDOS
	finfo.i.NSCreator	= NW_NS_DOS;
#endif
	finfo.volume		= NCP_NUMBER_OF_VOLUMES;
	/* set dates of mountpoint to Jan 1, 1986; 00:00 */
	finfo.i.creationTime	= finfo.i.modifyTime
				= cpu_to_le16(0x0000);
	finfo.i.creationDate	= finfo.i.modifyDate
				= finfo.i.lastAccessDate
				= cpu_to_le16(0x0C21);
	finfo.i.nameLen		= 0;
	finfo.i.entryName[0]	= '\0';

	finfo.opened		= 0;
	finfo.ino		= 2;	/* tradition */

	server->name_space[finfo.volume] = NW_NS_DOS;

	error = -ENOMEM;
        root_inode = ncp_iget(sb, &finfo);
        if (!root_inode)
		goto out_disconnect;
	DPRINTK("ncp_fill_super: root vol=%d\n", NCP_FINFO(root_inode)->volNumber);
	sb->s_root = d_make_root(root_inode);
        if (!sb->s_root)
		goto out_disconnect;
	return 0;

out_disconnect:
	ncp_lock_server(server);
	ncp_disconnect(server);
	ncp_unlock_server(server);
out_rxbuf:
	ncp_stop_tasks(server);
	vfree(server->rxbuf);
out_txbuf:
	vfree(server->txbuf);
out_packet:
	vfree(server->packet);
out_nls:
#ifdef CONFIG_NCPFS_NLS
	unload_nls(server->nls_io);
	unload_nls(server->nls_vol);
#endif
	mutex_destroy(&server->rcv.creq_mutex);
	mutex_destroy(&server->root_setup_lock);
	mutex_destroy(&server->mutex);
out_fput2:
	if (server->info_filp)
		fput(server->info_filp);
out_bdi:
	bdi_destroy(&server->bdi);
out_fput:
	/* 23/12/1998 Marcin Dalecki <*****@*****.**>:
	 * 
	 * The previously used put_filp(ncp_filp); was bogus, since
	 * it doesn't perform proper unlocking.
	 */
	fput(ncp_filp);
out:
	put_pid(data.wdog_pid);
	sb->s_fs_info = NULL;
	kfree(server);
	return error;
}
Пример #5
0
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
	struct f2fs_super_block *raw_super;
	struct buffer_head *raw_super_buf;
	struct inode *root;
	int i;

	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	/* set a temporary block size */
	if (!sb_set_blocksize(sb, F2FS_BLKSIZE))
		goto free_sbi;

	/* read f2fs raw super block */
	raw_super_buf = sb_bread(sb, F2FS_SUPER_OFFSET);
	if (!raw_super_buf)
		goto free_sbi;
	raw_super = (struct f2fs_super_block *) ((char *)raw_super_buf->b_data);

	/* init some FS parameters */
	set_opt(sbi, BG_GC);

#ifdef CONFIG_F2FS_FS_XATTR
	set_opt(sbi, XATTR_USER);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
	set_opt(sbi, POSIX_ACL);
#endif
	/* parse mount options */
	if (parse_options(sbi, (char *)data))
		goto free_sb_buf;

	/* sanity checking of raw super */
	if (sanity_check_raw_super(raw_super))
		goto free_sb_buf;

	sb->s_maxbytes = max_file_size(raw_super->log_blocksize);
	sb->s_op = &f2fs_sops;
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_fs_info = sbi;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);

	/* init f2fs-specific super block info */
	sbi->sb = sb;
	sbi->raw_super = raw_super;
	sbi->raw_super_buf = raw_super_buf;
	mutex_init(&sbi->gc_mutex);
	mutex_init(&sbi->write_inode);
	mutex_init(&sbi->writepages);
	mutex_init(&sbi->cp_mutex);
	for (i = 0; i < NR_LOCK_TYPE; i++)
		mutex_init(&sbi->fs_lock[i]);
	sbi->por_doing = 0;
	spin_lock_init(&sbi->stat_lock);
	init_rwsem(&sbi->bio_sem);
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode))
		goto free_sb_buf;

	if (get_valid_checkpoint(sbi))
		goto free_meta_inode;

	/* sanity checking of checkpoint */
	if (sanity_check_ckpt(raw_super, sbi->ckpt))
		goto free_cp;

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
	INIT_LIST_HEAD(&sbi->dir_inode_list);
	spin_lock_init(&sbi->dir_inode_lock);

	/* init super block */
	if (!sb_set_blocksize(sb, sbi->blocksize))
		goto free_cp;

	init_orphan_info(sbi);

	/* setup f2fs internal modules */
	if (build_segment_manager(sbi))
		goto free_sm;
	if (build_node_manager(sbi))
		goto free_nm;
	if (build_gc_manager(sbi))
		goto free_gc;

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode))
		goto free_gc;

	/* if there are nt orphan nodes free them */
	if (recover_orphan_inodes(sbi))
		goto free_node_inode;

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root))
		goto free_node_inode;
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size)
		goto free_root_inode;

	sb->s_root = d_alloc_root(root); /* allocate root dentry */
	if (!sb->s_root)
		goto free_root_inode;

	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD))
		recover_fsync_data(sbi);

	/* After POR, we can run background GC thread */
	if (start_gc_thread(sbi))
		goto fail;

#ifdef CONFIG_F2FS_STAT_FS
	if (f2fs_proc_root) {
		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
		if (f2fs_stat_init(sbi))
			goto fail;
	}
#endif
	return 0;
fail:
	stop_gc_thread(sbi);
free_root_inode:
	make_bad_inode(root);
	iput(root);
free_node_inode:
	make_bad_inode(sbi->node_inode);
	iput(sbi->node_inode);
free_gc:
	destroy_gc_manager(sbi);
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
free_cp:
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
free_sb_buf:
	brelse(raw_super_buf);
free_sbi:
	kfree(sbi);
	return -EINVAL;
}
Пример #6
0
/*
 * our custom d_alloc_root work-alike
 *
 * we can't use d_alloc_root if we want to use our own interpose function
 * unchanged, so we simply call our own "fake" d_alloc_root
 */
static struct dentry *unionfs_d_alloc_root(struct super_block *sb)
{
	struct dentry *ret = NULL;

	if (sb) {
		static const struct qstr name = {
			.name = "/",
			.len = 1
		};

		ret = d_alloc(NULL, &name);
		if (likely(ret)) {
			ret->d_op = &unionfs_dops;
			ret->d_sb = sb;
			ret->d_parent = ret;
		}
	}
	return ret;
}

/*
 * There is no need to lock the unionfs_super_info's rwsem as there is no
 * way anyone can have a reference to the superblock at this point in time.
 */
static int unionfs_read_super(struct super_block *sb, void *raw_data,
			      int silent)
{
	int err = 0;
	struct unionfs_dentry_info *lower_root_info = NULL;
	int bindex, bstart, bend;

	if (!raw_data) {
		printk(KERN_ERR
		       "unionfs: read_super: missing data argument\n");
		err = -EINVAL;
		goto out;
	}

	/* Allocate superblock private data */
	sb->s_fs_info = kzalloc(sizeof(struct unionfs_sb_info), GFP_KERNEL);
	if (unlikely(!UNIONFS_SB(sb))) {
		printk(KERN_CRIT "unionfs: read_super: out of memory\n");
		err = -ENOMEM;
		goto out;
	}

	UNIONFS_SB(sb)->bend = -1;
	atomic_set(&UNIONFS_SB(sb)->generation, 1);
	init_rwsem(&UNIONFS_SB(sb)->rwsem);
	UNIONFS_SB(sb)->high_branch_id = -1; /* -1 == invalid branch ID */

	lower_root_info = unionfs_parse_options(sb, raw_data);
	if (IS_ERR(lower_root_info)) {
		printk(KERN_ERR
		       "unionfs: read_super: error while parsing options "
		       "(err = %ld)\n", PTR_ERR(lower_root_info));
		err = PTR_ERR(lower_root_info);
		lower_root_info = NULL;
		goto out_free;
	}
	if (lower_root_info->bstart == -1) {
		err = -ENOENT;
		goto out_free;
	}

	/* set the lower superblock field of upper superblock */
	bstart = lower_root_info->bstart;
	BUG_ON(bstart != 0);
	sbend(sb) = bend = lower_root_info->bend;
	for (bindex = bstart; bindex <= bend; bindex++) {
		struct dentry *d = lower_root_info->lower_paths[bindex].dentry;
		atomic_inc(&d->d_sb->s_active);
		unionfs_set_lower_super_idx(sb, bindex, d->d_sb);
	}

	/* max Bytes is the maximum bytes from highest priority branch */
	sb->s_maxbytes = unionfs_lower_super_idx(sb, 0)->s_maxbytes;

	/*
	 * Our c/m/atime granularity is 1 ns because we may stack on file
	 * systems whose granularity is as good.  This is important for our
	 * time-based cache coherency.
	 */
	sb->s_time_gran = 1;

	sb->s_op = &unionfs_sops;

	/* See comment next to the definition of unionfs_d_alloc_root */
	sb->s_root = unionfs_d_alloc_root(sb);
	if (unlikely(!sb->s_root)) {
		err = -ENOMEM;
		goto out_dput;
	}

	/* link the upper and lower dentries */
	sb->s_root->d_fsdata = NULL;
	err = new_dentry_private_data(sb->s_root, UNIONFS_DMUTEX_ROOT);
	if (unlikely(err))
		goto out_freedpd;

	/* Set the lower dentries for s_root */
	for (bindex = bstart; bindex <= bend; bindex++) {
		struct dentry *d;
		struct vfsmount *m;

		d = lower_root_info->lower_paths[bindex].dentry;
		m = lower_root_info->lower_paths[bindex].mnt;

		unionfs_set_lower_dentry_idx(sb->s_root, bindex, d);
		unionfs_set_lower_mnt_idx(sb->s_root, bindex, m);
	}
	dbstart(sb->s_root) = bstart;
	dbend(sb->s_root) = bend;

	/* Set the generation number to one, since this is for the mount. */
	atomic_set(&UNIONFS_D(sb->s_root)->generation, 1);

	/*
	 * Call interpose to create the upper level inode.  Only
	 * INTERPOSE_LOOKUP can return a value other than 0 on err.
	 */
	err = PTR_ERR(unionfs_interpose(sb->s_root, sb, 0));
	unionfs_unlock_dentry(sb->s_root);
	if (!err)
		goto out;
	/* else fall through */

out_freedpd:
	if (UNIONFS_D(sb->s_root)) {
		kfree(UNIONFS_D(sb->s_root)->lower_paths);
		free_dentry_private_data(sb->s_root);
	}
	dput(sb->s_root);

out_dput:
	if (lower_root_info && !IS_ERR(lower_root_info)) {
		for (bindex = lower_root_info->bstart;
		     bindex <= lower_root_info->bend; bindex++) {
			struct dentry *d;
			struct vfsmount *m;

			d = lower_root_info->lower_paths[bindex].dentry;
			m = lower_root_info->lower_paths[bindex].mnt;

			dput(d);
			/* initializing: can't use unionfs_mntput here */
			mntput(m);
			/* drop refs we took earlier */
			atomic_dec(&d->d_sb->s_active);
		}
		kfree(lower_root_info->lower_paths);
		kfree(lower_root_info);
		lower_root_info = NULL;
	}

out_free:
	kfree(UNIONFS_SB(sb)->data);
	kfree(UNIONFS_SB(sb));
	sb->s_fs_info = NULL;

out:
	if (lower_root_info && !IS_ERR(lower_root_info)) {
		kfree(lower_root_info->lower_paths);
		kfree(lower_root_info);
	}
	return err;
}
Пример #7
0
struct ib_qp *siw_create_qp(struct ib_pd *ofa_pd,
			    struct ib_qp_init_attr *attrs,
			    struct ib_udata *udata)
{
	struct siw_qp			*qp = NULL;
	struct siw_pd			*pd = siw_pd_ofa2siw(ofa_pd);
	struct ib_device		*ofa_dev = ofa_pd->device;
	struct siw_dev			*sdev = siw_dev_ofa2siw(ofa_dev);
	struct siw_cq			*scq = NULL, *rcq = NULL;
	struct siw_uresp_create_qp	uresp;

	unsigned long flags;
	int kernel_verbs = ofa_pd->uobject ? 0 : 1;
	int rv = 0;

	dprint(DBG_OBJ|DBG_CM, ": new QP on device %s\n",
		ofa_dev->name);

	if (attrs->qp_type != IB_QPT_RC) {
		dprint(DBG_ON, ": Only RC QP's supported\n");
		return ERR_PTR(-ENOSYS);
	}
	if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
	    (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
	    (attrs->cap.max_send_sge > SIW_MAX_SGE)  ||
	    (attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
		dprint(DBG_ON, ": QP Size!\n");
		return ERR_PTR(-EINVAL);
	}
	if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
		dprint(DBG_ON, ": Max Inline Send %d > %d!\n",
		       attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
		return ERR_PTR(-EINVAL);
	}
	/*
	 * NOTE: we allow for zero element SQ and RQ WQE's SGL's
	 * but not for a QP unable to hold any WQE (SQ + RQ)
	 */
	if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0)
		return ERR_PTR(-EINVAL);

	if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
		dprint(DBG_ON, ": Out of QP's\n");
		rv = -ENOMEM;
		goto err_out;
	}
	scq = siw_cq_id2obj(sdev, ((struct siw_cq *)attrs->send_cq)->hdr.id);
	rcq = siw_cq_id2obj(sdev, ((struct siw_cq *)attrs->recv_cq)->hdr.id);

	if (!scq || !rcq) {
		dprint(DBG_OBJ, ": Fail: SCQ: 0x%p, RCQ: 0x%p\n",
			scq, rcq);
		rv = -EINVAL;
		goto err_out;
	}
	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
	if (!qp) {
		dprint(DBG_ON, ": kzalloc\n");
		rv = -ENOMEM;
		goto err_out;
	}

	INIT_LIST_HEAD(&qp->freeq);
	INIT_LIST_HEAD(&qp->sq);
	INIT_LIST_HEAD(&qp->rq);
	INIT_LIST_HEAD(&qp->orq);
	INIT_LIST_HEAD(&qp->irq);

	init_rwsem(&qp->state_lock);
	spin_lock_init(&qp->freeq_lock);
	spin_lock_init(&qp->sq_lock);
	spin_lock_init(&qp->rq_lock);
	spin_lock_init(&qp->orq_lock);

	init_waitqueue_head(&qp->tx_ctx.waitq);

	rv = siw_qp_add(sdev, qp);
	if (rv)
		goto err_out;

	if (kernel_verbs) {
		int num_wqe = attrs->cap.max_send_wr + attrs->cap.max_recv_wr;
		while (num_wqe--) {
			struct siw_wqe *wqe = kzalloc(sizeof *wqe, GFP_KERNEL);
			if (!wqe) {
				rv = -ENOMEM;
				goto err_out_idr;
			}
			SIW_INC_STAT_WQE;
			INIT_LIST_HEAD(&wqe->list);
			list_add(&wqe->list, &qp->freeq);
		}
		qp->attrs.flags |= SIW_KERNEL_VERBS;
	}
	if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) {
		if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
			qp->attrs.flags |= SIW_SIGNAL_ALL_WR;
		else {
			rv = -EINVAL;
			goto err_out;
		}
	}
	qp->pd  = pd;
	qp->scq = scq;
	qp->rcq = rcq;

	if (attrs->srq) {
		/*
		 * SRQ support.
		 * Verbs 6.3.7: ignore RQ size, if SRQ present
		 * Verbs 6.3.5: do not check PD of SRQ against PD of QP
		 */
		qp->srq = siw_srq_ofa2siw(attrs->srq);
		qp->attrs.rq_size = 0;
		atomic_set(&qp->rq_space, 0);
		dprint(DBG_OBJ, " QP(%d): SRQ(%p) attached\n",
			QP_ID(qp), qp->srq);
	} else {
		qp->srq = NULL;
		qp->attrs.rq_size = attrs->cap.max_recv_wr;
		atomic_set(&qp->rq_space, qp->attrs.rq_size);
	}
	qp->attrs.sq_size = attrs->cap.max_send_wr;
	atomic_set(&qp->sq_space, qp->attrs.sq_size);
	qp->attrs.sq_max_sges = attrs->cap.max_send_sge;
	/*
	 * ofed has no max_send_sge_rdmawrite
	 */
	qp->attrs.sq_max_sges_rdmaw = attrs->cap.max_send_sge;
	qp->attrs.rq_max_sges = attrs->cap.max_recv_sge;

	qp->attrs.state = SIW_QP_STATE_IDLE;

	if (udata) {
		uresp.sq_size = qp->attrs.sq_size;
		uresp.rq_size = qp->attrs.rq_size;
		uresp.qp_id = QP_ID(qp);

		rv = ib_copy_to_udata(udata, &uresp, sizeof uresp);
		if (rv)
			goto err_out_idr;
	}

	atomic_set(&qp->tx_ctx.in_use, 0);

	qp->ofa_qp.qp_num = QP_ID(qp);

	siw_pd_get(pd);

	INIT_LIST_HEAD(&qp->devq);
	spin_lock_irqsave(&sdev->idr_lock, flags);
	list_add_tail(&qp->devq, &sdev->qp_list);
	spin_unlock_irqrestore(&sdev->idr_lock, flags);

	return &qp->ofa_qp;

err_out_idr:
	siw_remove_obj(&sdev->idr_lock, &sdev->qp_idr, &qp->hdr);
err_out:
	if (scq)
		siw_cq_put(scq);
	if (rcq)
		siw_cq_put(rcq);

	if (qp)
		siw_drain_wq(&qp->freeq);

	kfree(qp);
	atomic_dec(&sdev->num_qp);

	return ERR_PTR(rv);
}
Пример #8
0
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
	struct f2fs_super_block *raw_super;
	struct buffer_head *raw_super_buf;
	struct inode *root;
	long err = -EINVAL;
	int i;

	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	/* set a block size */
	if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) {
		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
		goto free_sbi;
	}

	err = read_raw_super_block(sb, &raw_super, &raw_super_buf);
	if (err)
		goto free_sbi;

	sb->s_fs_info = sbi;
	/* init some FS parameters */
	sbi->active_logs = NR_CURSEG_TYPE;

	set_opt(sbi, BG_GC);

#ifdef CONFIG_F2FS_FS_XATTR
	set_opt(sbi, XATTR_USER);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
	set_opt(sbi, POSIX_ACL);
#endif
	/* parse mount options */
	err = parse_options(sb, (char *)data);
	if (err)
		goto free_sb_buf;

	sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
	sb->s_max_links = F2FS_LINK_MAX;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));

	sb->s_op = &f2fs_sops;
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_time_gran = 1;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));

	/* init f2fs-specific super block info */
	sbi->sb = sb;
	sbi->raw_super = raw_super;
	sbi->raw_super_buf = raw_super_buf;
	mutex_init(&sbi->gc_mutex);
	mutex_init(&sbi->writepages);
	mutex_init(&sbi->cp_mutex);
	mutex_init(&sbi->node_write);
	sbi->por_doing = false;
	spin_lock_init(&sbi->stat_lock);

	mutex_init(&sbi->read_io.io_mutex);
	for (i = 0; i < NR_PAGE_TYPE; i++)
		mutex_init(&sbi->write_io[i].io_mutex);

	init_rwsem(&sbi->cp_rwsem);
	init_waitqueue_head(&sbi->cp_wait);
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
		err = PTR_ERR(sbi->meta_inode);
		goto free_sb_buf;
	}

	err = get_valid_checkpoint(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
		goto free_meta_inode;
	}

	/* sanity checking of checkpoint */
	err = -EINVAL;
	if (sanity_check_ckpt(sbi)) {
		f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
		goto free_cp;
	}

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
	INIT_LIST_HEAD(&sbi->dir_inode_list);
	spin_lock_init(&sbi->dir_inode_lock);

	init_orphan_info(sbi);

	/* setup f2fs internal modules */
	err = build_segment_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS segment manager");
		goto free_sm;
	}
	err = build_node_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS node manager");
		goto free_nm;
	}

	build_gc_manager(sbi);

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
		err = PTR_ERR(sbi->node_inode);
		goto free_nm;
	}

	/* if there are nt orphan nodes free them */
	recover_orphan_inodes(sbi);

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
		err = PTR_ERR(root);
		goto free_node_inode;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		err = -EINVAL;
		goto free_root_inode;
	}

	sb->s_root = d_make_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
		goto free_root_inode;
	}

	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
		err = recover_fsync_data(sbi);
		if (err)
			f2fs_msg(sb, KERN_ERR,
				"Cannot recover all fsync data errno=%ld", err);
	}

	/*
	 * If filesystem is not mounted as read-only then
	 * do start the gc_thread.
	 */
	if (!(sb->s_flags & MS_RDONLY)) {
		/* After POR, we can run background GC thread.*/
		err = start_gc_thread(sbi);
		if (err)
			goto free_gc;
	}

	err = f2fs_build_stats(sbi);
	if (err)
		goto free_gc;

	if (f2fs_proc_root)
		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);

	if (sbi->s_proc)
		proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
				 &f2fs_seq_segment_info_fops, sb);

	if (test_opt(sbi, DISCARD)) {
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		if (!blk_queue_discard(q))
			f2fs_msg(sb, KERN_WARNING,
					"mounting with \"discard\" option, but "
					"the device does not support discard");
	}

	sbi->s_kobj.kset = f2fs_kset;
	init_completion(&sbi->s_kobj_unregister);
	err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
							"%s", sb->s_id);
	if (err)
		goto fail;

	return 0;
fail:
	if (sbi->s_proc) {
		remove_proc_entry("segment_info", sbi->s_proc);
		remove_proc_entry(sb->s_id, f2fs_proc_root);
	}
	f2fs_destroy_stats(sbi);
free_gc:
	stop_gc_thread(sbi);
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
	iput(sbi->node_inode);
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
free_cp:
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
free_sb_buf:
	brelse(raw_super_buf);
free_sbi:
	kfree(sbi);
	return err;
}
Пример #9
0
static struct gfs2_sbd *init_sbd(struct super_block *sb)
{
	struct gfs2_sbd *sdp;

	sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
	if (!sdp)
		return NULL;

	sb->s_fs_info = sdp;
	sdp->sd_vfs = sb;
	sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
	if (!sdp->sd_lkstats) {
		kfree(sdp);
		return NULL;
	}

	set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
	gfs2_tune_init(&sdp->sd_tune);

	init_waitqueue_head(&sdp->sd_glock_wait);
	atomic_set(&sdp->sd_glock_disposal, 0);
	init_completion(&sdp->sd_locking_init);
	spin_lock_init(&sdp->sd_statfs_spin);

	spin_lock_init(&sdp->sd_rindex_spin);
	sdp->sd_rindex_tree.rb_node = NULL;

	INIT_LIST_HEAD(&sdp->sd_jindex_list);
	spin_lock_init(&sdp->sd_jindex_spin);
	mutex_init(&sdp->sd_jindex_mutex);

	INIT_LIST_HEAD(&sdp->sd_quota_list);
	mutex_init(&sdp->sd_quota_mutex);
	init_waitqueue_head(&sdp->sd_quota_wait);
	INIT_LIST_HEAD(&sdp->sd_trunc_list);
	spin_lock_init(&sdp->sd_trunc_lock);

	spin_lock_init(&sdp->sd_log_lock);
	atomic_set(&sdp->sd_log_pinned, 0);
	INIT_LIST_HEAD(&sdp->sd_log_le_buf);
	INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
	INIT_LIST_HEAD(&sdp->sd_log_le_rg);
	INIT_LIST_HEAD(&sdp->sd_log_le_databuf);
	INIT_LIST_HEAD(&sdp->sd_log_le_ordered);

	init_waitqueue_head(&sdp->sd_log_waitq);
	init_waitqueue_head(&sdp->sd_logd_waitq);
	spin_lock_init(&sdp->sd_ail_lock);
	INIT_LIST_HEAD(&sdp->sd_ail1_list);
	INIT_LIST_HEAD(&sdp->sd_ail2_list);

	init_rwsem(&sdp->sd_log_flush_lock);
	atomic_set(&sdp->sd_log_in_flight, 0);
	init_waitqueue_head(&sdp->sd_log_flush_wait);

	INIT_LIST_HEAD(&sdp->sd_revoke_list);

	mutex_init(&sdp->sd_freeze_lock);

	return sdp;
}
Пример #10
0
static struct dentry *unionfs_d_alloc_root(struct super_block *sb)
{
	struct dentry *ret = NULL;

	if (sb) {
		static const struct qstr name = {.name = "/",.len = 1 };

		ret = d_alloc(NULL, &name);
		if (ret) {
			ret->d_op = &unionfs_dops;
			ret->d_sb = sb;
			ret->d_parent = ret;
		}
	}
	return ret;
}

static int unionfs_read_super(struct super_block *sb, void *raw_data,
			      int silent)
{
	int err = 0;

	struct unionfs_dentry_info *hidden_root_info = NULL;
	int bindex, bstart, bend;
	unsigned long long maxbytes;

	print_entry_location();

	if (!raw_data) {
		printk(KERN_WARNING
		       "unionfs_read_super: missing data argument\n");
		err = -EINVAL;
		goto out;
	}

	/*
	 * Allocate superblock private data
	 */
	stopd_lhs(sb) = KZALLOC(sizeof(struct unionfs_sb_info), GFP_KERNEL);
	if (!stopd(sb)) {
		printk(KERN_WARNING "%s: out of memory\n", __FUNCTION__);
		err = -ENOMEM;
		goto out;
	}
	stopd(sb)->b_end = -1;
	atomic_set(&stopd(sb)->usi_generation, 1);
	init_rwsem(&stopd(sb)->usi_rwsem);

	hidden_root_info = unionfs_parse_options(sb, raw_data);
	if (IS_ERR(hidden_root_info)) {
		printk(KERN_WARNING
		       "unionfs_read_super: error while parsing options (err = %ld)\n",
		       PTR_ERR(hidden_root_info));
		err = PTR_ERR(hidden_root_info);
		hidden_root_info = NULL;
		goto out_free;
	}
	if (hidden_root_info->udi_bstart == -1) {
		err = -ENOENT;
		goto out_free;
	}

	/* set the hidden superblock field of upper superblock */
	bstart = hidden_root_info->udi_bstart;
	BUG_ON(bstart != 0);
	sbend(sb) = bend = hidden_root_info->udi_bend;
	for (bindex = bstart; bindex <= bend; bindex++) {
		struct dentry *d;

		d = hidden_root_info->udi_dentry[bindex];

		set_stohs_index(sb, bindex, d->d_sb);
	}

	/* Unionfs: Max Bytes is the maximum bytes from among all the branches */
	maxbytes = -1;
	for (bindex = bstart; bindex <= bend; bindex++)
		if (maxbytes < stohs_index(sb, bindex)->s_maxbytes)
			maxbytes = stohs_index(sb, bindex)->s_maxbytes;
	sb->s_maxbytes = maxbytes;

	sb->s_op = &unionfs_sops;
	sb->s_export_op = &unionfs_export_ops;

	/*
	 * we can't use d_alloc_root if we want to use
	 * our own interpose function unchanged,
	 * so we simply call our own "fake" d_alloc_root
	 */
	sb->s_root = unionfs_d_alloc_root(sb);
	if (!sb->s_root) {
		err = -ENOMEM;
		goto out_dput;
	}

	/* link the upper and lower dentries */
	dtopd_lhs(sb->s_root) = NULL;
	if ((err = new_dentry_private_data(sb->s_root)))
		goto out_freedpd;

	/* Set the hidden dentries for s_root */
	for (bindex = bstart; bindex <= bend; bindex++) {
		struct dentry *d;

		d = hidden_root_info->udi_dentry[bindex];

		set_dtohd_index(sb->s_root, bindex, d);
	}
	set_dbstart(sb->s_root, bstart);
	set_dbend(sb->s_root, bend);

	/* Set the generation number to one, since this is for the mount. */
	atomic_set(&dtopd(sb->s_root)->udi_generation, 1);

	/* call interpose to create the upper level inode */
	if ((err = unionfs_interpose(sb->s_root, sb, 0)))
		goto out_freedpd;
	unlock_dentry(sb->s_root);
	goto out;

      out_freedpd:
	if (dtopd(sb->s_root)) {
		KFREE(dtohd_ptr(sb->s_root));
		free_dentry_private_data(dtopd(sb->s_root));
	}
	DPUT(sb->s_root);
      out_dput:
	if (hidden_root_info && !IS_ERR(hidden_root_info)) {
		for (bindex = hidden_root_info->udi_bstart;
		     bindex <= hidden_root_info->udi_bend; bindex++) {
			struct dentry *d;

			d = hidden_root_info->udi_dentry[bindex];

			if (d)
				DPUT(d);

			if (stopd(sb) && stohiddenmnt_index(sb, bindex))
				mntput(stohiddenmnt_index(sb, bindex));
		}
		KFREE(hidden_root_info->udi_dentry);
		KFREE(hidden_root_info);
		hidden_root_info = NULL;
	}
      out_free:
	KFREE(stopd(sb)->usi_data);
	KFREE(stopd(sb));
	stopd_lhs(sb) = NULL;
      out:
	if (hidden_root_info && !IS_ERR(hidden_root_info)) {
		KFREE(hidden_root_info->udi_dentry);
		KFREE(hidden_root_info);
	}
	print_exit_status(err);
	return err;
}
Пример #11
0
/* Inode operations */
static void gfs_init_inode_always(struct gfs_inode_info *i)
{
	
	init_rwsem(&i->i_rwsem);
	i->i_bh = NULL;
}
Пример #12
0
struct ib_qp *siw_create_qp(struct ib_pd *ofa_pd,
			    struct ib_qp_init_attr *attrs,
			    struct ib_udata *udata)
{
	struct siw_qp			*qp = NULL;
	struct siw_pd			*pd = siw_pd_ofa2siw(ofa_pd);
	struct ib_device		*ofa_dev = ofa_pd->device;
	struct siw_dev			*sdev = siw_dev_ofa2siw(ofa_dev);
	struct siw_cq			*scq = NULL, *rcq = NULL;

	int rv = 0;

	pr_debug(DBG_OBJ DBG_CM ": new QP on device %s\n",
		ofa_dev->name);

	if (!ofa_pd->uobject) {
		pr_debug(": This driver does not support kernel clients\n");
		return ERR_PTR(-EINVAL);
	}

	if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
		pr_debug(": Out of QP's\n");
		rv = -ENOMEM;
		goto err_out;
	}
	if (attrs->qp_type != IB_QPT_RC) {
		pr_debug(": Only RC QP's supported\n");
		rv = -EINVAL;
		goto err_out;
	}
	if (attrs->srq) {
		pr_debug(": SRQ is not supported\n");
		rv = -EINVAL;
		goto err_out;
	}

	scq = siw_cq_id2obj(sdev, ((struct siw_cq *)attrs->send_cq)->hdr.id);
	rcq = siw_cq_id2obj(sdev, ((struct siw_cq *)attrs->recv_cq)->hdr.id);

	if (!scq || !rcq) {
		pr_debug(DBG_OBJ ": Fail: SCQ: 0x%p, RCQ: 0x%p\n",
			scq, rcq);
		rv = -EINVAL;
		goto err_out;
	}
	qp = kzalloc(sizeof *qp, GFP_KERNEL);
	if (!qp) {
		pr_debug(": kzalloc\n");
		rv = -ENOMEM;
		goto err_out;
	}

	init_rwsem(&qp->state_lock);

	rv = siw_qp_add(sdev, qp);
	if (rv)
		goto err_out;

	qp->pd  = pd;
	qp->scq = scq;
	qp->rcq = rcq;
	qp->attrs.state = SIW_QP_STATE_IDLE;

	if (udata) {
		struct urdma_udata_create_qp ureq;
		struct urdma_uresp_create_qp uresp;

		rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
		if (rv)
			goto err_out_idr;
		qp->attrs.irq_size = ureq.ird_max;
		qp->attrs.orq_size = ureq.ord_max;
		qp->attrs.urdma_devid = ureq.urdmad_dev_id;
		qp->attrs.urdma_qp_id = ureq.urdmad_qp_id;
		qp->attrs.urdma_rxq = ureq.rxq;
		qp->attrs.urdma_txq = ureq.txq;

		memset(&uresp, 0, sizeof uresp);
		uresp.kmod_qp_id = QP_ID(qp);

		rv = ib_copy_to_udata(udata, &uresp, sizeof uresp);
		if (rv)
			goto err_out_idr;
	}

	qp->ofa_qp.qp_num = QP_ID(qp);

	siw_pd_get(pd);

	return &qp->ofa_qp;

err_out_idr:
	siw_remove_obj(&sdev->idr_lock, &sdev->qp_idr, &qp->hdr);
err_out:
	if (scq)
		siw_cq_put(scq);
	if (rcq)
		siw_cq_put(rcq);

	if (qp) {
		kfree(qp);
	}
	atomic_dec(&sdev->num_qp);

	return ERR_PTR(rv);
}
Пример #13
0
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
	struct signal_struct *sig;

	if (clone_flags & CLONE_THREAD)
		return 0;

	sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
	tsk->signal = sig;
	if (!sig)
		return -ENOMEM;

	atomic_set(&sig->count, 1);
	atomic_set(&sig->live, 1);
	init_waitqueue_head(&sig->wait_chldexit);
	sig->flags = 0;
	if (clone_flags & CLONE_NEWPID)
		sig->flags |= SIGNAL_UNKILLABLE;
	sig->group_exit_code = 0;
	sig->group_exit_task = NULL;
	sig->group_stop_count = 0;
	sig->curr_target = tsk;
	init_sigpending(&sig->shared_pending);
	INIT_LIST_HEAD(&sig->posix_timers);

	hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	sig->it_real_incr.tv64 = 0;
	sig->real_timer.function = it_real_fn;

	sig->leader = 0;	/* session leadership doesn't inherit */
	sig->tty_old_pgrp = NULL;
	sig->tty = NULL;

	sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
	sig->gtime = cputime_zero;
	sig->cgtime = cputime_zero;
	sig->prev_utime = sig->prev_stime = cputime_zero;
	sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
	sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
	sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
	sig->maxrss = sig->cmaxrss = 0;
	task_io_accounting_init(&sig->ioac);
	sig->sum_sched_runtime = 0;
	taskstats_tgid_init(sig);

	task_lock(current->group_leader);
	memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
	task_unlock(current->group_leader);

	posix_cpu_timers_init_group(sig);

	acct_init_pacct(&sig->pacct);

	tty_audit_fork(sig);
	sched_autogroup_fork(sig);

#ifdef CONFIG_CGROUPS
	init_rwsem(&sig->threadgroup_fork_lock);
#endif

	sig->oom_adj = current->signal->oom_adj;
	sig->oom_score_adj = current->signal->oom_score_adj;
	sig->oom_score_adj_min = current->signal->oom_score_adj_min;

	return 0;
}
Пример #14
0
/*
 * lookup a volume by name
 * - this can be one of the following:
 *    "%[cell:]volume[.]"        R/W volume
 *    "#[cell:]volume[.]"        R/O or R/W volume (rwparent=0),
 *                     or R/W (rwparent=1) volume
 *    "%[cell:]volume.readonly"    R/O volume
 *    "#[cell:]volume.readonly"    R/O volume
 *    "%[cell:]volume.backup"        Backup volume
 *    "#[cell:]volume.backup"        Backup volume
 *
 * The cell name is optional, and defaults to the current cell.
 *
 * See "The Rules of Mount Point Traversal" in Chapter 5 of the AFS SysAdmin
 * Guide
 * - Rule 1: Explicit type suffix forces access of that type or nothing
 *           (no suffix, then use Rule 2 & 3)
 * - Rule 2: If parent volume is R/O, then mount R/O volume by preference, R/W
 *           if not available
 * - Rule 3: If parent volume is R/W, then only mount R/W volume unless
 *           explicitly told otherwise
 */
struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
{
    struct afs_vlocation *vlocation = NULL;
    struct afs_volume *volume = NULL;
    struct afs_server *server = NULL;
    char srvtmask;
    int ret, loop;

    _enter("{%*.*s,%d}",
           params->volnamesz, params->volnamesz, params->volname, params->rwpath);

    /* lookup the volume location record */
    vlocation = afs_vlocation_lookup(params->cell, params->key,
                     params->volname, params->volnamesz);
    if (IS_ERR(vlocation)) {
        ret = PTR_ERR(vlocation);
        vlocation = NULL;
        goto error;
    }

    /* make the final decision on the type we want */
    ret = -ENOMEDIUM;
    if (params->force && !(vlocation->vldb.vidmask & (1 << params->type)))
        goto error;

    srvtmask = 0;
    for (loop = 0; loop < vlocation->vldb.nservers; loop++)
        srvtmask |= vlocation->vldb.srvtmask[loop];

    if (params->force) {
        if (!(srvtmask & (1 << params->type)))
            goto error;
    } else if (srvtmask & AFS_VOL_VTM_RO) {
        params->type = AFSVL_ROVOL;
    } else if (srvtmask & AFS_VOL_VTM_RW) {
        params->type = AFSVL_RWVOL;
    } else {
        goto error;
    }

    down_write(&params->cell->vl_sem);

    /* is the volume already active? */
    if (vlocation->vols[params->type]) {
        /* yes - re-use it */
        volume = vlocation->vols[params->type];
        afs_get_volume(volume);
        goto success;
    }

    /* create a new volume record */
    _debug("creating new volume record");

    ret = -ENOMEM;
    volume = kzalloc(sizeof(struct afs_volume), GFP_KERNEL);
    if (!volume)
        goto error_up;

    atomic_set(&volume->usage, 1);
    volume->type        = params->type;
    volume->type_force    = params->force;
    volume->cell        = params->cell;
    volume->vid        = vlocation->vldb.vid[params->type];

    init_rwsem(&volume->server_sem);

    /* look up all the applicable server records */
    for (loop = 0; loop < 8; loop++) {
        if (vlocation->vldb.srvtmask[loop] & (1 << volume->type)) {
            server = afs_lookup_server(
                   volume->cell, &vlocation->vldb.servers[loop]);
            if (IS_ERR(server)) {
                ret = PTR_ERR(server);
                goto error_discard;
            }

            volume->servers[volume->nservers] = server;
            volume->nservers++;
        }
    }

    /* attach the cache and volume location */
#ifdef AFS_CACHING_SUPPORT
    cachefs_acquire_cookie(vlocation->cache,
                   &afs_vnode_cache_index_def,
                   volume,
                   &volume->cache);
#endif

    afs_get_vlocation(vlocation);
    volume->vlocation = vlocation;

    vlocation->vols[volume->type] = volume;

success:
    _debug("kAFS selected %s volume %08x",
           afs_voltypes[volume->type], volume->vid);
    up_write(&params->cell->vl_sem);
    afs_put_vlocation(vlocation);
    _leave(" = %p", volume);
    return volume;

    /* clean up */
error_up:
    up_write(&params->cell->vl_sem);
error:
    afs_put_vlocation(vlocation);
    _leave(" = %d", ret);
    return ERR_PTR(ret);

error_discard:
    up_write(&params->cell->vl_sem);

    for (loop = volume->nservers - 1; loop >= 0; loop--)
        afs_put_server(volume->servers[loop]);

    kfree(volume);
    goto error;
}
Пример #15
0
/*
 * There is no need to lock the unionfs_super_info's rwsem as there is no
 * way anyone can have a reference to the superblock at this point in time.
 */
static int unionfs_read_super(struct super_block *sb, void *raw_data,
			      int silent)
{
	int err = 0;
	struct unionfs_dentry_info *lower_root_info = NULL;
	int bindex, bstart, bend;
	struct inode *inode = NULL;

	if (!raw_data) {
		printk(KERN_ERR
		       "unionfs: read_super: missing data argument\n");
		err = -EINVAL;
		goto out;
	}

	/* Allocate superblock private data */
	sb->s_fs_info = kzalloc(sizeof(struct unionfs_sb_info), GFP_KERNEL);
	if (unlikely(!UNIONFS_SB(sb))) {
		printk(KERN_CRIT "unionfs: read_super: out of memory\n");
		err = -ENOMEM;
		goto out;
	}

	UNIONFS_SB(sb)->bend = -1;
	atomic_set(&UNIONFS_SB(sb)->generation, 1);
	init_rwsem(&UNIONFS_SB(sb)->rwsem);
	UNIONFS_SB(sb)->high_branch_id = -1; /* -1 == invalid branch ID */

	lower_root_info = unionfs_parse_options(sb, raw_data);
	if (IS_ERR(lower_root_info)) {
		printk(KERN_ERR
		       "unionfs: read_super: error while parsing options "
		       "(err = %ld)\n", PTR_ERR(lower_root_info));
		err = PTR_ERR(lower_root_info);
		lower_root_info = NULL;
		goto out_free;
	}
	if (lower_root_info->bstart == -1) {
		err = -ENOENT;
		goto out_free;
	}

	/* set the lower superblock field of upper superblock */
	bstart = lower_root_info->bstart;
	BUG_ON(bstart != 0);
	sbend(sb) = bend = lower_root_info->bend;
	for (bindex = bstart; bindex <= bend; bindex++) {
		struct dentry *d = lower_root_info->lower_paths[bindex].dentry;
		atomic_inc(&d->d_sb->s_active);
		unionfs_set_lower_super_idx(sb, bindex, d->d_sb);
	}

	/* max Bytes is the maximum bytes from highest priority branch */
	sb->s_maxbytes = unionfs_lower_super_idx(sb, 0)->s_maxbytes;

	/*
	 * Our c/m/atime granularity is 1 ns because we may stack on file
	 * systems whose granularity is as good.  This is important for our
	 * time-based cache coherency.
	 */
	sb->s_time_gran = 1;

	sb->s_op = &unionfs_sops;

	/* get a new inode and allocate our root dentry */
	inode = unionfs_iget(sb, iunique(sb, UNIONFS_ROOT_INO));
	if (IS_ERR(inode)) {
		err = PTR_ERR(inode);
		goto out_dput;
	}
	sb->s_root = d_make_root(inode);
	if (unlikely(!sb->s_root)) {
		err = -ENOMEM;
		goto out_iput;
	}
	d_set_d_op(sb->s_root, &unionfs_dops);

	/* link the upper and lower dentries */
	sb->s_root->d_fsdata = NULL;
	err = new_dentry_private_data(sb->s_root, UNIONFS_DMUTEX_ROOT);
	if (unlikely(err))
		goto out_freedpd;

	/* if get here: cannot have error */

	/* Set the lower dentries for s_root */
	for (bindex = bstart; bindex <= bend; bindex++) {
		struct dentry *d;
		struct vfsmount *m;

		d = lower_root_info->lower_paths[bindex].dentry;
		m = lower_root_info->lower_paths[bindex].mnt;

		unionfs_set_lower_dentry_idx(sb->s_root, bindex, d);
		unionfs_set_lower_mnt_idx(sb->s_root, bindex, m);
	}
	dbstart(sb->s_root) = bstart;
	dbend(sb->s_root) = bend;

	/* Set the generation number to one, since this is for the mount. */
	atomic_set(&UNIONFS_D(sb->s_root)->generation, 1);

	if (atomic_read(&inode->i_count) <= 1)
		unionfs_fill_inode(sb->s_root, inode);

	/*
	 * No need to call interpose because we already have a positive
	 * dentry, which was instantiated by d_alloc_root.  Just need to
	 * d_rehash it.
	 */
	d_rehash(sb->s_root);

	unionfs_unlock_dentry(sb->s_root);
	goto out; /* all is well */

out_freedpd:
	if (UNIONFS_D(sb->s_root)) {
		kfree(UNIONFS_D(sb->s_root)->lower_paths);
		free_dentry_private_data(sb->s_root);
	}
	dput(sb->s_root);

out_iput:
	iput(inode);

out_dput:
	if (lower_root_info && !IS_ERR(lower_root_info)) {
		for (bindex = lower_root_info->bstart;
		     bindex <= lower_root_info->bend; bindex++) {
			struct dentry *d;
			d = lower_root_info->lower_paths[bindex].dentry;
			/* drop refs we took earlier */
			atomic_dec(&d->d_sb->s_active);
			path_put(&lower_root_info->lower_paths[bindex]);
		}
		kfree(lower_root_info->lower_paths);
		kfree(lower_root_info);
		lower_root_info = NULL;
	}

out_free:
	kfree(UNIONFS_SB(sb)->data);
	kfree(UNIONFS_SB(sb));
	sb->s_fs_info = NULL;

out:
	if (lower_root_info && !IS_ERR(lower_root_info)) {
		kfree(lower_root_info->lower_paths);
		kfree(lower_root_info);
	}
	return err;
}
Пример #16
0
/*
 * Allocate and initialise a blank device with a given minor.
 */
static struct mapped_device *alloc_dev(int minor)
{
	int r;
	struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
	void *old_md;

	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

	if (!try_module_get(THIS_MODULE))
		goto bad0;

	/* get a minor number for the dev */
	if (minor == DM_ANY_MINOR)
		r = next_free_minor(md, &minor);
	else
		r = specific_minor(md, minor);
	if (r < 0)
		goto bad1;

	memset(md, 0, sizeof(*md));
	init_rwsem(&md->io_lock);
	init_MUTEX(&md->suspend_lock);
	spin_lock_init(&md->pushback_lock);
	rwlock_init(&md->map_lock);
	atomic_set(&md->holders, 1);
	atomic_set(&md->open_count, 0);
	atomic_set(&md->event_nr, 0);

	md->queue = blk_alloc_queue(GFP_KERNEL);
	if (!md->queue)
		goto bad1_free_minor;

	md->queue->queuedata = md;
	md->queue->backing_dev_info.congested_fn = dm_any_congested;
	md->queue->backing_dev_info.congested_data = md;
	blk_queue_make_request(md->queue, dm_request);
	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
	md->queue->unplug_fn = dm_unplug_all;
	md->queue->issue_flush_fn = dm_flush_all;

	md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
	if (!md->io_pool)
		goto bad2;

	md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
	if (!md->tio_pool)
		goto bad3;

	md->bs = bioset_create(16, 16);
	if (!md->bs)
		goto bad_no_bioset;

	md->disk = alloc_disk(1);
	if (!md->disk)
		goto bad4;

	atomic_set(&md->pending, 0);
	init_waitqueue_head(&md->wait);
	init_waitqueue_head(&md->eventq);

	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
	add_disk(md->disk);
	format_dev_t(md->name, MKDEV(_major, minor));

	/* Populate the mapping, nobody knows we exist yet */
	spin_lock(&_minor_lock);
	old_md = idr_replace(&_minor_idr, md, minor);
	spin_unlock(&_minor_lock);

	BUG_ON(old_md != MINOR_ALLOCED);

	return md;

 bad4:
	bioset_free(md->bs);
 bad_no_bioset:
	mempool_destroy(md->tio_pool);
 bad3:
	mempool_destroy(md->io_pool);
 bad2:
	blk_cleanup_queue(md->queue);
 bad1_free_minor:
	free_minor(minor);
 bad1:
	module_put(THIS_MODULE);
 bad0:
	kfree(md);
	return NULL;
}
Пример #17
0
static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct ubi_volume_desc *ubi = sb->s_fs_info;
	struct ubifs_info *c;
	struct inode *root;
	int err;

	c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL);
	if (!c)
		return -ENOMEM;

	spin_lock_init(&c->cnt_lock);
	spin_lock_init(&c->cs_lock);
	spin_lock_init(&c->buds_lock);
	spin_lock_init(&c->space_lock);
	spin_lock_init(&c->orphan_lock);
	init_rwsem(&c->commit_sem);
	mutex_init(&c->lp_mutex);
	mutex_init(&c->tnc_mutex);
	mutex_init(&c->log_mutex);
	mutex_init(&c->mst_mutex);
	mutex_init(&c->umount_mutex);
	init_waitqueue_head(&c->cmt_wq);
	c->buds = RB_ROOT;
	c->old_idx = RB_ROOT;
	c->size_tree = RB_ROOT;
	c->orph_tree = RB_ROOT;
	INIT_LIST_HEAD(&c->infos_list);
	INIT_LIST_HEAD(&c->idx_gc);
	INIT_LIST_HEAD(&c->replay_list);
	INIT_LIST_HEAD(&c->replay_buds);
	INIT_LIST_HEAD(&c->uncat_list);
	INIT_LIST_HEAD(&c->empty_list);
	INIT_LIST_HEAD(&c->freeable_list);
	INIT_LIST_HEAD(&c->frdi_idx_list);
	INIT_LIST_HEAD(&c->unclean_leb_list);
	INIT_LIST_HEAD(&c->old_buds);
	INIT_LIST_HEAD(&c->orph_list);
	INIT_LIST_HEAD(&c->orph_new);

	c->highest_inum = UBIFS_FIRST_INO;
	c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM;

	ubi_get_volume_info(ubi, &c->vi);
	ubi_get_device_info(c->vi.ubi_num, &c->di);

	/* Re-open the UBI device in read-write mode */
	c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READONLY);
	if (IS_ERR(c->ubi)) {
		err = PTR_ERR(c->ubi);
		goto out_free;
	}

	c->vfs_sb = sb;

	sb->s_fs_info = c;
	sb->s_magic = UBIFS_SUPER_MAGIC;
	sb->s_blocksize = UBIFS_BLOCK_SIZE;
	sb->s_blocksize_bits = UBIFS_BLOCK_SHIFT;
	sb->s_dev = c->vi.cdev;
	sb->s_maxbytes = c->max_inode_sz = key_max_inode_size(c);
	if (c->max_inode_sz > MAX_LFS_FILESIZE)
		sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE;

	if (c->rw_incompat) {
		ubifs_err("the file-system is not R/W-compatible");
		ubifs_msg("on-flash format version is w%d/r%d, but software "
			  "only supports up to version w%d/r%d", c->fmt_version,
			  c->ro_compat_version, UBIFS_FORMAT_VERSION,
			  UBIFS_RO_COMPAT_VERSION);
		return -EROFS;
	}

	mutex_lock(&c->umount_mutex);
	err = mount_ubifs(c);
	if (err) {
		ubifs_assert(err < 0);
		goto out_unlock;
	}

	/* Read the root inode */
	root = ubifs_iget(sb, UBIFS_ROOT_INO);
	if (IS_ERR(root)) {
		err = PTR_ERR(root);
		goto out_umount;
	}

	sb->s_root = NULL;

	mutex_unlock(&c->umount_mutex);
	return 0;

out_umount:
	ubifs_umount(c);
out_unlock:
	mutex_unlock(&c->umount_mutex);
	ubi_close_volume(c->ubi);
out_free:
	kfree(c);
	return err;
}
Пример #18
0
int tracefile_init_arch() {
    init_rwsem(&tracefile_sem);
#error "Todo: initialise per-cpu console buffers"
    return 0;
}
/**
 * led_classdev_register - register a new object of led_classdev class.
 * @parent: The device to register.
 * @led_cdev: the led_classdev structure for this device.
 */
int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
{
	int rc;

	led_cdev->dev = device_create(leds_class, parent, 0, led_cdev,
				      "%s", led_cdev->name);
	if (IS_ERR(led_cdev->dev))
		return PTR_ERR(led_cdev->dev);

	/* register the attributes */
	rc = device_create_file(led_cdev->dev, &led_class_attrs[2]);
	if (rc)
		goto err_out;

#ifdef CONFIG_LEDS_TRIGGERS
	init_rwsem(&led_cdev->trigger_lock);
#endif
	/* add to the list of leds */
	down_write(&leds_list_lock);
	list_add_tail(&led_cdev->node, &leds_list);
	up_write(&leds_list_lock);

	if (!led_cdev->max_brightness)
		led_cdev->max_brightness = LED_FULL;

	rc = device_create_file(led_cdev->dev, &led_class_attrs[3]);
	if (rc)
		goto err_out_attr_max;

	led_update_brightness(led_cdev);

#ifdef CONFIG_LEDS_TRIGGERS
#ifndef SUPPORT_LCD_ACL_CTL
	rc = device_create_file(led_cdev->dev, &led_class_attrs[4]);
#else
	rc = device_create_file(led_cdev->dev, &led_class_attrs[5]);
#endif
	if (rc)
		goto err_out_led_list;

	led_trigger_set_default(led_cdev);
#endif

    #if 1  // Archer custom feature
	/* register the attributes */
	rc = device_create_file(led_cdev->dev, &led_class_attrs[1]);
	if (rc)
		goto err_out;
    
	led_update_lcd_gamma(led_cdev);


#ifdef SUPPORT_LCD_ACL_CTL
	/* register the attributes */
	rc = device_create_file(led_cdev->dev, &led_class_attrs[4]);
	if (rc)
		goto err_out;
    
	lcd_update_ACL_state(led_cdev);
#endif

	rc = device_create_file(led_cdev->dev, &led_class_attrs[0]);
	if (rc)
		goto err_out;
    
	led_update_flashlight(led_cdev);
    #endif 
	printk(KERN_INFO "Registered led device: %s\n",	led_cdev->name);

	return 0;

#ifdef CONFIG_LEDS_TRIGGERS
err_out_led_list:
	device_remove_file(led_cdev->dev, &led_class_attrs[3]);

#endif
err_out_attr_max:
	device_remove_file(led_cdev->dev, &led_class_attrs[2]);
	device_remove_file(led_cdev->dev, &led_class_attrs[1]);
#ifdef SUPPORT_LCD_ACL_CTL	
	device_remove_file(led_cdev->dev, &led_class_attrs[4]);
#endif	
	list_del(&led_cdev->node);
err_out:
	device_unregister(led_cdev->dev);
	return rc;
}
Пример #20
0
static int init_libcfs_module(void)
{
	int rc;

	libcfs_arch_init();
	libcfs_init_nidstrings();
	init_rwsem(&cfs_tracefile_sem);
	mutex_init(&cfs_trace_thread_mutex);
	init_rwsem(&ioctl_list_sem);
	CFS_INIT_LIST_HEAD(&ioctl_list);
	init_waitqueue_head(&cfs_race_waitq);

	rc = libcfs_debug_init(5 * 1024 * 1024);
	if (rc < 0) {
		printk(KERN_ERR "LustreError: libcfs_debug_init: %d\n", rc);
		return (rc);
	}

	rc = cfs_cpu_init();
	if (rc != 0)
		goto cleanup_debug;

#if LWT_SUPPORT
	rc = lwt_init();
	if (rc != 0) {
		CERROR("lwt_init: error %d\n", rc);
		goto cleanup_debug;
	}
#endif
	rc = misc_register(&libcfs_dev);
	if (rc) {
		CERROR("misc_register: error %d\n", rc);
		goto cleanup_lwt;
	}

	rc = cfs_wi_startup();
	if (rc) {
		CERROR("initialize workitem: error %d\n", rc);
		goto cleanup_deregister;
	}

	/* max to 4 threads, should be enough for rehash */
	rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4);
	rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY,
				 rc, &cfs_sched_rehash);
	if (rc != 0) {
		CERROR("Startup workitem scheduler: error: %d\n", rc);
		goto cleanup_deregister;
	}

	rc = cfs_crypto_register();
	if (rc) {
		CERROR("cfs_crypto_regster: error %d\n", rc);
		goto cleanup_wi;
	}


	rc = insert_proc();
	if (rc) {
		CERROR("insert_proc: error %d\n", rc);
		goto cleanup_crypto;
	}

	CDEBUG (D_OTHER, "portals setup OK\n");
	return 0;
 cleanup_crypto:
	cfs_crypto_unregister();
 cleanup_wi:
	cfs_wi_shutdown();
 cleanup_deregister:
	misc_deregister(&libcfs_dev);
 cleanup_lwt:
#if LWT_SUPPORT
	lwt_fini();
#endif
 cleanup_debug:
	libcfs_debug_cleanup();
	return rc;
}
struct inode * ext2_new_inode (struct inode * dir, int mode)
{
	struct super_block * sb;
	struct buffer_head * bh;
	struct buffer_head * bh2;
	int group, i;
	ino_t ino;
	struct inode * inode;
	struct ext2_group_desc * desc;
	struct ext2_super_block * es;
	int err;

	sb = dir->i_sb;
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);

	lock_super (sb);
	es = sb->u.ext2_sb.s_es;
repeat:
	if (S_ISDIR(mode))
		group = find_group_dir(sb, dir->u.ext2_i.i_block_group);
	else 
		group = find_group_other(sb, dir->u.ext2_i.i_block_group);

	err = -ENOSPC;
	if (group == -1)
		goto fail;

	err = -EIO;
	bh = load_inode_bitmap (sb, group);
	if (IS_ERR(bh))
		goto fail2;

	i = ext2_find_first_zero_bit ((unsigned long *) bh->b_data,
				      EXT2_INODES_PER_GROUP(sb));
	if (i >= EXT2_INODES_PER_GROUP(sb))
		goto bad_count;
	ext2_set_bit (i, bh->b_data);

	mark_buffer_dirty(bh);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ll_rw_block (WRITE, 1, &bh);
		wait_on_buffer (bh);
	}

	ino = group * EXT2_INODES_PER_GROUP(sb) + i + 1;
	if (ino < EXT2_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
		ext2_error (sb, "ext2_new_inode",
			    "reserved inode or inode > inodes count - "
			    "block_group = %d,inode=%ld", group, ino);
		err = -EIO;
		goto fail2;
	}

	es->s_free_inodes_count =
		cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) - 1);
	mark_buffer_dirty(sb->u.ext2_sb.s_sbh);
	sb->s_dirt = 1;
	inode->i_uid = current->fsuid;
	if (test_opt (sb, GRPID))
		inode->i_gid = dir->i_gid;
	else if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;
	inode->i_mode = mode;

	inode->i_ino = ino;
	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
	inode->u.ext2_i.i_new_inode = 1;
	inode->u.ext2_i.i_flags = dir->u.ext2_i.i_flags & ~EXT2_BTREE_FL;
	if (S_ISLNK(mode))
		inode->u.ext2_i.i_flags &= ~(EXT2_IMMUTABLE_FL|EXT2_APPEND_FL);
	inode->u.ext2_i.i_block_group = group;
	ext2_set_inode_flags(inode);
	insert_inode_hash(inode);
	inode->i_generation = event++;
	mark_inode_dirty(inode);

#ifdef CONFIG_EXT2_FS_XATTR
	init_rwsem(&inode->u.ext2_i.xattr_sem);
#endif

	unlock_super (sb);
	if(DQUOT_ALLOC_INODE(inode)) {
		DQUOT_DROP(inode);
		err = -EDQUOT;
		goto fail3;
	}
	err = ext2_init_acl(inode, dir);
	if (err) {
		DQUOT_FREE_INODE(inode);
		goto fail3;
	}

	ext2_debug ("allocating inode %lu\n", inode->i_ino);
	return inode;

fail3:
	inode->i_flags |= S_NOQUOTA;
	inode->i_nlink = 0;
	iput(inode);
	return ERR_PTR(err);

fail2:
	desc = ext2_get_group_desc (sb, group, &bh2);
	desc->bg_free_inodes_count =
		cpu_to_le16(le16_to_cpu(desc->bg_free_inodes_count) + 1);
	if (S_ISDIR(mode))
		desc->bg_used_dirs_count =
			cpu_to_le16(le16_to_cpu(desc->bg_used_dirs_count) - 1);
	mark_buffer_dirty(bh2);
fail:
	unlock_super(sb);
	make_bad_inode(inode);
	iput(inode);
	return ERR_PTR(err);

bad_count:
	ext2_error (sb, "ext2_new_inode",
		    "Free inodes count corrupted in group %d",
		    group);
	/* Is it really ENOSPC? */
	err = -ENOSPC;
	if (sb->s_flags & MS_RDONLY)
		goto fail;

	desc = ext2_get_group_desc (sb, group, &bh2);
	desc->bg_free_inodes_count = 0;
	mark_buffer_dirty(bh2);
	goto repeat;
}
Пример #22
0
static int __devinit bd2802_probe(struct i2c_client *client,
			const struct i2c_device_id *id)
{
	struct bd2802_led *led;
	int ret, i;

	pr_warning("%s() -- start\n", __func__);

	led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL);
	if (!led) {
		dev_err(&client->dev, "failed to allocate driver data\n");
		return -ENOMEM;
	}

	led->client = client;
	i2c_set_clientdata(client, led);

	INIT_WORK(&led->work, bd2802_work_func);
	INIT_WORK(&led->touchkey_work, bd2802_touchkey_work_func);
	INIT_WORK(&led->ledmin_work, bd2802_ledmin_work_func);
	
	led->bd2802_wq = create_singlethread_workqueue("bd2802_wq");
	if (!led->bd2802_wq)
		return -ENOMEM;

	led->touchkey_wq = create_singlethread_workqueue("touchkey_wq");
	if (!led->touchkey_wq)
		return -ENOMEM;

	led->ledmin_wq = create_singlethread_workqueue("ledmin_wq");
	if (!led->ledmin_wq)
		return -ENOMEM;

	bd2802_i2c_client = led->client;
	/* Default attributes */
	led->wave_pattern = BD2802_PATTERN_FULL;
	led->blink_enable =0;
	led->led_state = BD2802_SEQ;
	led->key_led = ALL;
	led->key_direction= FORWARD;
	led->led_counter=0;
#if defined(BLINK_ON_BOOTING)
	led->white_current = BD2802_CURRENT_WHITE_MAX;
	led->blue_current = BD2802_CURRENT_000;
#else
	led->white_current = BD2802_CURRENT_WHITE_MAX;
	led->blue_current = BD2802_CURRENT_BLUE_MAX;
#endif

	init_rwsem(&led->rwsem);

	for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) {
		ret = device_create_file(&led->client->dev,
						bd2802_attributes[i]);
		if (ret) {
			dev_err(&led->client->dev, "failed: sysfs file %s\n",
					bd2802_attributes[i]->attr.name);
			goto failed_unregister_dev_file;
		}
	}

	hrtimer_init(&led->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	led->timer.function = bd2802_timer_func;
#if defined(BLINK_ON_BOOTING)
#else
	hrtimer_start(&led->timer, ktime_set(4, 0), HRTIMER_MODE_REL);
#endif
	
	hrtimer_init(&led->touchkey_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	led->touchkey_timer.function = bd2802_touchkey_timer_func;
	
	hrtimer_init(&led->ledmin_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	led->ledmin_timer.function = bd2802_ledmin_timer_func;
	

#ifdef CONFIG_HAS_EARLYSUSPEND
	led->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN;
	led->early_suspend.suspend = bd2802_early_suspend;
	led->early_suspend.resume = bd2802_late_resume;
	register_early_suspend(&led->early_suspend);
#endif

	bd2802_configure(led);
#if defined(BLINK_ON_BOOTING)
	led->blink_enable =1;
#endif
	bd2802_on(led);
	bd2802_enable(led);

//LGE_UPDATE
	led->led_state=BD2802_ON;
//LGE_UPDATE
	return 0;

failed_unregister_dev_file:
	for (i--; i >= 0; i--)
		device_remove_file(&led->client->dev, bd2802_attributes[i]);

	return ret;
}
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
	uint32_t max;
	uint32_t min;
	uint32_t dummy;

	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
	if (unlikely(fifo->static_buffer == NULL))
		return -ENOMEM;

	fifo->dynamic_buffer = NULL;
	fifo->reserved_size = 0;
	fifo->using_bounce_buffer = false;

	mutex_init(&fifo->fifo_mutex);
	init_rwsem(&fifo->rwsem);

	/*
	 * Allow mapping the first page read-only to user-space.
	 */

	DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));

	mutex_lock(&dev_priv->hw_mutex);
	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
	vmw_write(dev_priv, SVGA_REG_ENABLE, 1);

	min = 4;
	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
		min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
	min <<= 2;

	if (min < PAGE_SIZE)
		min = PAGE_SIZE;

	iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
	iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
	wmb();
	iowrite32(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
	iowrite32(min,  fifo_mem + SVGA_FIFO_STOP);
	iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
	mb();

	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
	mutex_unlock(&dev_priv->hw_mutex);

	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
	min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
	fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);

	DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
		 (unsigned int) max,
		 (unsigned int) min,
		 (unsigned int) fifo->capabilities);

	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
	iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
	vmw_marker_queue_init(&fifo->marker_queue);
	return vmw_fifo_send_fence(dev_priv, &dummy);
}
Пример #24
0
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
	struct f2fs_super_block *raw_super;
	struct inode *root;
	long err;
	bool retry = true, need_fsck = false;
	char *options = NULL;
	int recovery, i, valid_super_block;

try_onemore:
	err = -EINVAL;
	raw_super = NULL;
	valid_super_block = -1;
	recovery = 0;

	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	/* set a block size */
	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
		goto free_sbi;
	}

	err = read_raw_super_block(sb, &raw_super, &valid_super_block,
								&recovery);
	if (err)
		goto free_sbi;

	sb->s_fs_info = sbi;
	default_options(sbi);
	/* parse mount options */
	options = kstrdup((const char *)data, GFP_KERNEL);
	if (data && !options) {
		err = -ENOMEM;
		goto free_sb_buf;
	}

	err = parse_options(sb, options);
	if (err)
		goto free_options;

	sbi->max_file_blocks = max_file_blocks();
	sb->s_maxbytes = sbi->max_file_blocks <<
				le32_to_cpu(raw_super->log_blocksize);
	sb->s_max_links = F2FS_LINK_MAX;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));

	sb->s_op = &f2fs_sops;
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_time_gran = 1;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));

	/* init f2fs-specific super block info */
	sbi->sb = sb;
	sbi->raw_super = raw_super;
	sbi->valid_super_block = valid_super_block;
	mutex_init(&sbi->gc_mutex);
	mutex_init(&sbi->writepages);
	mutex_init(&sbi->cp_mutex);
	init_rwsem(&sbi->node_write);

	/* disallow all the data/node/meta page writes */
	set_sbi_flag(sbi, SBI_POR_DOING);
	spin_lock_init(&sbi->stat_lock);

	init_rwsem(&sbi->read_io.io_rwsem);
	sbi->read_io.sbi = sbi;
	sbi->read_io.bio = NULL;
	for (i = 0; i < NR_PAGE_TYPE; i++) {
		init_rwsem(&sbi->write_io[i].io_rwsem);
		sbi->write_io[i].sbi = sbi;
		sbi->write_io[i].bio = NULL;
	}

	init_rwsem(&sbi->cp_rwsem);
	init_waitqueue_head(&sbi->cp_wait);
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
		err = PTR_ERR(sbi->meta_inode);
		goto free_options;
	}

	err = get_valid_checkpoint(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
		goto free_meta_inode;
	}

	/* sanity checking of checkpoint */
	err = -EINVAL;
	if (sanity_check_ckpt(sbi)) {
		f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
		goto free_cp;
	}

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
	for (i = 0; i < NR_INODE_TYPE; i++) {
		INIT_LIST_HEAD(&sbi->inode_list[i]);
		spin_lock_init(&sbi->inode_lock[i]);
	}

	init_extent_cache_info(sbi);

	init_ino_entry_info(sbi);

	/* setup f2fs internal modules */
	err = build_segment_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS segment manager");
		goto free_sm;
	}
	err = build_node_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS node manager");
		goto free_nm;
	}

	build_gc_manager(sbi);

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
		err = PTR_ERR(sbi->node_inode);
		goto free_nm;
	}

	f2fs_join_shrinker(sbi);

	/* if there are nt orphan nodes free them */
	err = recover_orphan_inodes(sbi);
	if (err)
		goto free_node_inode;

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
		err = PTR_ERR(root);
		goto free_node_inode;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		iput(root);
		err = -EINVAL;
		goto free_node_inode;
	}

	sb->s_root = d_make_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
		goto free_root_inode;
	}

	err = f2fs_build_stats(sbi);
	if (err)
		goto free_root_inode;

	if (f2fs_proc_root)
		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);

	if (sbi->s_proc)
		proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
				 &f2fs_seq_segment_info_fops, sb);

	sbi->s_kobj.kset = f2fs_kset;
	init_completion(&sbi->s_kobj_unregister);
	err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
							"%s", sb->s_id);
	if (err)
		goto free_proc;

	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
		/*
		 * mount should be failed, when device has readonly mode, and
		 * previous checkpoint was not done by clean system shutdown.
		 */
		if (bdev_read_only(sb->s_bdev) &&
				!is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
			err = -EROFS;
			goto free_kobj;
		}

		if (need_fsck)
			set_sbi_flag(sbi, SBI_NEED_FSCK);

		err = recover_fsync_data(sbi);
		if (err) {
			need_fsck = true;
			f2fs_msg(sb, KERN_ERR,
				"Cannot recover all fsync data errno=%ld", err);
			goto free_kobj;
		}
	}
	/* recover_fsync_data() cleared this already */
	clear_sbi_flag(sbi, SBI_POR_DOING);

	/*
	 * If filesystem is not mounted as read-only then
	 * do start the gc_thread.
	 */
	if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
		/* After POR, we can run background GC thread.*/
		err = start_gc_thread(sbi);
		if (err)
			goto free_kobj;
	}
	kfree(options);

	/* recover broken superblock */
	if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) {
		f2fs_msg(sb, KERN_INFO, "Recover invalid superblock");
		f2fs_commit_super(sbi, true);
	}

	f2fs_update_time(sbi, CP_TIME);
	f2fs_update_time(sbi, REQ_TIME);
	return 0;

free_kobj:
	kobject_del(&sbi->s_kobj);
	kobject_put(&sbi->s_kobj);
	wait_for_completion(&sbi->s_kobj_unregister);
free_proc:
	if (sbi->s_proc) {
		remove_proc_entry("segment_info", sbi->s_proc);
		remove_proc_entry(sb->s_id, f2fs_proc_root);
	}
	f2fs_destroy_stats(sbi);
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
	mutex_lock(&sbi->umount_mutex);
	f2fs_leave_shrinker(sbi);
	iput(sbi->node_inode);
	mutex_unlock(&sbi->umount_mutex);
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
free_cp:
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
free_options:
	kfree(options);
free_sb_buf:
	kfree(raw_super);
free_sbi:
	kfree(sbi);

	/* give only one another chance */
	if (retry) {
		retry = false;
		shrink_dcache_sb(sb);
		goto try_onemore;
	}
	return err;
}
Пример #25
0
static int bd2802_probe(struct i2c_client *client,
			const struct i2c_device_id *id)
{
	struct bd2802_led *led;
	struct bd2802_led_platform_data *pdata;
	int ret, i;

	led = devm_kzalloc(&client->dev, sizeof(struct bd2802_led), GFP_KERNEL);
	if (!led) {
		dev_err(&client->dev, "failed to allocate driver data\n");
		return -ENOMEM;
	}

	led->client = client;
	pdata = led->pdata = client->dev.platform_data;
	i2c_set_clientdata(client, led);

	/* Configure RESET GPIO (L: RESET, H: RESET cancel) */
	gpio_request_one(pdata->reset_gpio, GPIOF_OUT_INIT_HIGH, "RGB_RESETB");

	/* Tacss = min 0.1ms */
	udelay(100);

	/* Detect BD2802GU */
	ret = bd2802_write_byte(client, BD2802_REG_CLKSETUP, 0x00);
	if (ret < 0) {
		dev_err(&client->dev, "failed to detect device\n");
		return ret;
	} else
		dev_info(&client->dev, "return 0x%02x\n", ret);

	/* To save the power, reset BD2802 after detecting */
	gpio_set_value(led->pdata->reset_gpio, 0);

	/* Default attributes */
	led->wave_pattern = BD2802_PATTERN_HALF;
	led->rgb_current = BD2802_CURRENT_032;

	init_rwsem(&led->rwsem);

	for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) {
		ret = device_create_file(&led->client->dev,
						bd2802_attributes[i]);
		if (ret) {
			dev_err(&led->client->dev, "failed: sysfs file %s\n",
					bd2802_attributes[i]->attr.name);
			goto failed_unregister_dev_file;
		}
	}

	ret = bd2802_register_led_classdev(led);
	if (ret < 0)
		goto failed_unregister_dev_file;

	return 0;

failed_unregister_dev_file:
	for (i--; i >= 0; i--)
		device_remove_file(&led->client->dev, bd2802_attributes[i]);
	return ret;
}
Пример #26
0
static struct gfs2_sbd *init_sbd(struct super_block *sb)
{
    struct gfs2_sbd *sdp;
    struct address_space *mapping;

    sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
    if (!sdp)
        return NULL;

    sb->s_fs_info = sdp;
    sdp->sd_vfs = sb;
    sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
    if (!sdp->sd_lkstats) {
        kfree(sdp);
        return NULL;
    }

    set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
    gfs2_tune_init(&sdp->sd_tune);

    init_waitqueue_head(&sdp->sd_glock_wait);
    atomic_set(&sdp->sd_glock_disposal, 0);
    init_completion(&sdp->sd_locking_init);
    init_completion(&sdp->sd_wdack);
    spin_lock_init(&sdp->sd_statfs_spin);

    spin_lock_init(&sdp->sd_rindex_spin);
    sdp->sd_rindex_tree.rb_node = NULL;

    INIT_LIST_HEAD(&sdp->sd_jindex_list);
    spin_lock_init(&sdp->sd_jindex_spin);
    mutex_init(&sdp->sd_jindex_mutex);
    init_completion(&sdp->sd_journal_ready);

    INIT_LIST_HEAD(&sdp->sd_quota_list);
    mutex_init(&sdp->sd_quota_mutex);
    mutex_init(&sdp->sd_quota_sync_mutex);
    init_waitqueue_head(&sdp->sd_quota_wait);
    INIT_LIST_HEAD(&sdp->sd_trunc_list);
    spin_lock_init(&sdp->sd_trunc_lock);
    spin_lock_init(&sdp->sd_bitmap_lock);

    mapping = &sdp->sd_aspace;

    address_space_init_once(mapping);
    mapping->a_ops = &gfs2_rgrp_aops;
    mapping->host = sb->s_bdev->bd_inode;
    mapping->flags = 0;
    mapping_set_gfp_mask(mapping, GFP_NOFS);
    mapping->private_data = NULL;
    mapping->writeback_index = 0;

    spin_lock_init(&sdp->sd_log_lock);
    atomic_set(&sdp->sd_log_pinned, 0);
    INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
    INIT_LIST_HEAD(&sdp->sd_log_le_ordered);
    spin_lock_init(&sdp->sd_ordered_lock);

    init_waitqueue_head(&sdp->sd_log_waitq);
    init_waitqueue_head(&sdp->sd_logd_waitq);
    spin_lock_init(&sdp->sd_ail_lock);
    INIT_LIST_HEAD(&sdp->sd_ail1_list);
    INIT_LIST_HEAD(&sdp->sd_ail2_list);

    init_rwsem(&sdp->sd_log_flush_lock);
    atomic_set(&sdp->sd_log_in_flight, 0);
    atomic_set(&sdp->sd_reserving_log, 0);
    init_waitqueue_head(&sdp->sd_reserving_log_wait);
    init_waitqueue_head(&sdp->sd_log_flush_wait);
    atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
    mutex_init(&sdp->sd_freeze_mutex);

    return sdp;
}
Пример #27
0
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
	struct f2fs_super_block *raw_super;
	struct buffer_head *raw_super_buf;
	struct inode *root;
	long err = -EINVAL;
	int i;

	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	/* set a block size */
	if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) {
		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
		goto free_sbi;
	}

	if (validate_superblock(sb, &raw_super, &raw_super_buf, 0)) {
		brelse(raw_super_buf);
		if (validate_superblock(sb, &raw_super, &raw_super_buf, 1))
			goto free_sb_buf;
	}
	/* init some FS parameters */
	sbi->active_logs = NR_CURSEG_TYPE;

	set_opt(sbi, BG_GC);

#ifdef CONFIG_F2FS_FS_XATTR
	set_opt(sbi, XATTR_USER);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
	set_opt(sbi, POSIX_ACL);
#endif
	/* parse mount options */
	if (parse_options(sb, sbi, (char *)data))
		goto free_sb_buf;

	sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
	sb->s_max_links = F2FS_LINK_MAX;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));

	sb->s_op = &f2fs_sops;
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_fs_info = sbi;
	sb->s_time_gran = 1;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));

	/* init f2fs-specific super block info */
	sbi->sb = sb;
	sbi->raw_super = raw_super;
	sbi->raw_super_buf = raw_super_buf;
	mutex_init(&sbi->gc_mutex);
	mutex_init(&sbi->write_inode);
	mutex_init(&sbi->writepages);
	mutex_init(&sbi->cp_mutex);
	for (i = 0; i < NR_LOCK_TYPE; i++)
		mutex_init(&sbi->fs_lock[i]);
	sbi->por_doing = 0;
	spin_lock_init(&sbi->stat_lock);
	init_rwsem(&sbi->bio_sem);
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
		err = PTR_ERR(sbi->meta_inode);
		goto free_sb_buf;
	}

	err = get_valid_checkpoint(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
		goto free_meta_inode;
	}

	/* sanity checking of checkpoint */
	err = -EINVAL;
	if (sanity_check_ckpt(sbi)) {
		f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
		goto free_cp;
	}

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
	INIT_LIST_HEAD(&sbi->dir_inode_list);
	spin_lock_init(&sbi->dir_inode_lock);

	init_orphan_info(sbi);

	/* setup f2fs internal modules */
	err = build_segment_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS segment manager");
		goto free_sm;
	}
	err = build_node_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS node manager");
		goto free_nm;
	}

	build_gc_manager(sbi);

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
		err = PTR_ERR(sbi->node_inode);
		goto free_nm;
	}

	/* if there are nt orphan nodes free them */
	err = -EINVAL;
	if (recover_orphan_inodes(sbi))
		goto free_node_inode;

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
		err = PTR_ERR(root);
		goto free_node_inode;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size)
		goto free_root_inode;

	sb->s_root = d_make_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
		goto free_root_inode;
	}

	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD))
		recover_fsync_data(sbi);

	/* After POR, we can run background GC thread */
	err = start_gc_thread(sbi);
	if (err)
		goto fail;

	err = f2fs_build_stats(sbi);
	if (err)
		goto fail;

	return 0;
fail:
	stop_gc_thread(sbi);
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
	iput(sbi->node_inode);
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
free_cp:
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
free_sb_buf:
	brelse(raw_super_buf);
free_sbi:
	kfree(sbi);
	return err;
}