예제 #1
0
/**
 *	ipc_init_proc_interface	-  Create a proc interface for sysipc types using a seq_file interface.
 *	@path: Path in procfs
 *	@header: Banner to be printed at the beginning of the file.
 *	@ids: ipc id table to iterate.
 *	@show: show routine.
 */
void __init ipc_init_proc_interface(const char *path, const char *header,
		int ids, int (*show)(struct seq_file *, void *))
{
	struct proc_dir_entry *pde;
	struct ipc_proc_iface *iface;

	iface = kmalloc(sizeof(*iface), GFP_KERNEL);
	if (!iface)
		return;
	iface->path	= path;
	iface->header	= header;
	iface->ids	= ids;
	iface->show	= show;

	pde = proc_create_data(path,
			       S_IRUGO,        /* world readable */
			       NULL,           /* parent dir */
			       &sysvipc_proc_fops,
			       iface);
	if (!pde) {
		kfree(iface);
	}
}
예제 #2
0
파일: ipsec_proc.c 프로젝트: dkg/libreswan
int ipsec_proc_init(void)
{
	int error = 0;
	struct proc_dir_entry *item;
	struct ipsec_proc_list *it;

	/* create /proc/net/ipsec */
	proc_net_ipsec_dir = proc_mkdir("ipsec", PROC_NET);
	if (proc_net_ipsec_dir == NULL) {
		/* no point in continuing */
		return 1;
	}

	for (it = proc_items; it->name; it++) {
		if (it->dir) {
			item = proc_mkdir(it->name, *it->parent);
			*it->dir = item;
		} else
			item = proc_create_data(it->name, it->mode, *it->parent,
						&ipsec_proc_fops, it);
		if (!item)
			error |= 1;
	}

	/* now create some symlinks to provide compatibility */
	proc_symlink("ipsec_eroute", PROC_NET, "ipsec/eroute/all");
	proc_symlink("ipsec_spi",    PROC_NET, "ipsec/spi/all");
	proc_symlink("ipsec_spigrp", PROC_NET, "ipsec/spigrp/all");
#ifdef IPSEC_SA_RECOUNT_DEBUG
	proc_symlink("ipsec_saraw",  PROC_NET, "ipsec/saraw/all");
#endif
	proc_symlink("ipsec_tncfg",  PROC_NET, "ipsec/tncfg");
	proc_symlink("ipsec_version", PROC_NET, "ipsec/version");
	proc_symlink("ipsec_klipsdebug", PROC_NET, "ipsec/klipsdebug");

	return error;
}
예제 #3
0
static void usbrh_create_proc(struct usbrh *dev)
{
    struct proc_dir_entry *proc_dir;
    struct proc_dir_entry *proc_file;
    unsigned int index;
    umode_t mode;
    int i;

    index = dev->index;

    if (index >= ARRAY_SIZE(USBRH_DIGIT_STR)) {
        pr_err("too many USBRH: %d", index);
    }

    proc_dir = proc_mkdir(USBRH_DIGIT_STR[index], usbrh_proc_base);
    if (proc_dir == NULL) {
        pr_err("Faile to create /proc/" USBRH_NAME "/%d", index);
        return;
    }

    for (i = 0; USBRH_ENTRY_LIST[i].name != NULL; i++) {
        mode = S_IFREG|S_IRUGO;
        if (usbrh_proc_ops[i].write != NULL) {
            mode |= S_IWUSR;
        }

        proc_file = proc_create_data(USBRH_ENTRY_LIST[i].name, mode, proc_dir,
                                     &usbrh_proc_ops[i], dev);

        if (proc_file == NULL) {
            pr_err("Faile to create /proc/" USBRH_NAME "/%d/%s",
                   index, USBRH_ENTRY_LIST[i].name);
        }
    }

    dev->proc_dir = proc_dir;
}
static int __devinit msm_gpio_probe(struct platform_device *dev)
{
	int i, j = 0;
	int grp_irq;

	for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) {
		if (i - FIRST_GPIO_IRQ >=
			msm_gpio_chips[j].chip.base +
			msm_gpio_chips[j].chip.ngpio)
			j++;
		irq_set_chip_data(i, &msm_gpio_chips[j]);
		irq_set_chip_and_handler(i, &msm_gpio_irq_chip,
					 handle_edge_irq);
		set_irq_flags(i, IRQF_VALID);
	}

	for (i = 0; i < dev->num_resources; i++) {
		grp_irq = platform_get_irq(dev, i);
		if (grp_irq < 0)
			return -ENXIO;

		irq_set_chained_handler(grp_irq, msm_gpio_irq_handler);
		irq_set_irq_wake(grp_irq, (i + 1));
	}

	for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) {
		spin_lock_init(&msm_gpio_chips[i].lock);
		__raw_writel(0, msm_gpio_chips[i].regs.int_en);
		gpiochip_add(&msm_gpio_chips[i].chip);
	}
	proc_create_data("gdump", 0, NULL, &gdump_proc_fops, NULL);
	msm_gpio_buf = kzalloc(512, GFP_KERNEL);

	mb();
	return 0;
}
예제 #5
0
int atm_proc_dev_register(struct atm_dev *dev)
{
	int error;

	/* No proc info */
	if (!dev->ops->proc_read)
		return 0;

	error = -ENOMEM;
	dev->proc_name = kasprintf(GFP_KERNEL, "%s:%d", dev->type, dev->number);
	if (!dev->proc_name)
		goto err_out;

	dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root,
					   &proc_atm_dev_ops, dev);
	if (!dev->proc_entry)
		goto err_free_name;
	return 0;

err_free_name:
	kfree(dev->proc_name);
err_out:
	return error;
}
예제 #6
0
파일: jit_sched.c 프로젝트: lym/dd_primer
static void jit_create_proc(void)
{
	struct proc_dir_entry *entry;
	entry = proc_create_data("jit_sched", 0, NULL, &jit_proc_ops, NULL);
}
예제 #7
0
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
	struct f2fs_super_block *raw_super;
	struct buffer_head *raw_super_buf;
	struct inode *root;
	long err;
	bool retry = true, need_fsck = false;
	char *options = NULL;
	int recovery, i;

try_onemore:
	err = -EINVAL;
	raw_super = NULL;
	raw_super_buf = NULL;
	recovery = 0;

	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	/* set a block size */
	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
		goto free_sbi;
	}

	err = read_raw_super_block(sb, &raw_super, &raw_super_buf, &recovery);
	if (err)
		goto free_sbi;

	sb->s_fs_info = sbi;
	default_options(sbi);
	/* parse mount options */
	options = kstrdup((const char *)data, GFP_KERNEL);
	if (data && !options) {
		err = -ENOMEM;
		goto free_sb_buf;
	}

	err = parse_options(sb, options);
	if (err)
		goto free_options;

	sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
	sb->s_max_links = F2FS_LINK_MAX;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));

	sb->s_op = &f2fs_sops;
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_time_gran = 1;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));

	/* init f2fs-specific super block info */
	sbi->sb = sb;
	sbi->raw_super = raw_super;
	sbi->raw_super_buf = raw_super_buf;
	mutex_init(&sbi->gc_mutex);
	mutex_init(&sbi->writepages);
	mutex_init(&sbi->cp_mutex);
	init_rwsem(&sbi->node_write);
	clear_sbi_flag(sbi, SBI_POR_DOING);
	spin_lock_init(&sbi->stat_lock);

	init_rwsem(&sbi->read_io.io_rwsem);
	sbi->read_io.sbi = sbi;
	sbi->read_io.bio = NULL;
	for (i = 0; i < NR_PAGE_TYPE; i++) {
		init_rwsem(&sbi->write_io[i].io_rwsem);
		sbi->write_io[i].sbi = sbi;
		sbi->write_io[i].bio = NULL;
	}

	init_rwsem(&sbi->cp_rwsem);
	init_waitqueue_head(&sbi->cp_wait);
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
		err = PTR_ERR(sbi->meta_inode);
		goto free_options;
	}

	err = get_valid_checkpoint(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
		goto free_meta_inode;
	}

	/* sanity checking of checkpoint */
	err = -EINVAL;
	if (sanity_check_ckpt(sbi)) {
		f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
		goto free_cp;
	}

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
	INIT_LIST_HEAD(&sbi->dir_inode_list);
	spin_lock_init(&sbi->dir_inode_lock);

	init_extent_cache_info(sbi);

	init_ino_entry_info(sbi);

	/* setup f2fs internal modules */
	err = build_segment_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS segment manager");
		goto free_sm;
	}
	err = build_node_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS node manager");
		goto free_nm;
	}

	build_gc_manager(sbi);

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
		err = PTR_ERR(sbi->node_inode);
		goto free_nm;
	}

	/* if there are nt orphan nodes free them */
	recover_orphan_inodes(sbi);

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
		err = PTR_ERR(root);
		goto free_node_inode;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		iput(root);
		err = -EINVAL;
		goto free_node_inode;
	}

	sb->s_root = d_make_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
		goto free_root_inode;
	}

	err = f2fs_build_stats(sbi);
	if (err)
		goto free_root_inode;

	if (f2fs_proc_root)
		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);

	if (sbi->s_proc)
		proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
				 &f2fs_seq_segment_info_fops, sb);

	if (test_opt(sbi, DISCARD)) {
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		if (!blk_queue_discard(q))
			f2fs_msg(sb, KERN_WARNING,
					"mounting with \"discard\" option, but "
					"the device does not support discard");
		clear_opt(sbi, DISCARD);
	}

	sbi->s_kobj.kset = f2fs_kset;
	init_completion(&sbi->s_kobj_unregister);
	err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
							"%s", sb->s_id);
	if (err)
		goto free_proc;

	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
		/*
		 * mount should be failed, when device has readonly mode, and
		 * previous checkpoint was not done by clean system shutdown.
		 */
		if (bdev_read_only(sb->s_bdev) &&
				!is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
			err = -EROFS;
			goto free_kobj;
		}

		if (need_fsck)
			set_sbi_flag(sbi, SBI_NEED_FSCK);

		err = recover_fsync_data(sbi);
		if (err) {
			need_fsck = true;
			f2fs_msg(sb, KERN_ERR,
				"Cannot recover all fsync data errno=%ld", err);
			goto free_kobj;
		}
	}

	/*
	 * If filesystem is not mounted as read-only then
	 * do start the gc_thread.
	 */
	if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
		/* After POR, we can run background GC thread.*/
		err = start_gc_thread(sbi);
		if (err)
			goto free_kobj;
	}
	kfree(options);

	/* recover broken superblock */
	if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) {
		f2fs_msg(sb, KERN_INFO, "Recover invalid superblock");
		f2fs_commit_super(sbi);
	}

	return 0;

free_kobj:
	kobject_del(&sbi->s_kobj);
free_proc:
	if (sbi->s_proc) {
		remove_proc_entry("segment_info", sbi->s_proc);
		remove_proc_entry(sb->s_id, f2fs_proc_root);
	}
	f2fs_destroy_stats(sbi);
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
	iput(sbi->node_inode);
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
free_cp:
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
free_options:
	kfree(options);
free_sb_buf:
	brelse(raw_super_buf);
free_sbi:
	kfree(sbi);

	/* give only one another chance */
	if (retry) {
		retry = false;
		shrink_dcache_sb(sb);
		goto try_onemore;
	}
	return err;
}
예제 #8
0
int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
			      struct acpi_device *device)
{
	acpi_status status = 0;
	static int first_run;
#ifdef CONFIG_ACPI_PROCFS
	struct proc_dir_entry *entry = NULL;
#endif

	if (boot_option_idle_override)
		return 0;

	if (!first_run) {
		if (idle_halt) {
			/*
			 * When the boot option of "idle=halt" is added, halt
			 * is used for CPU IDLE.
			 * In such case C2/C3 is meaningless. So the max_cstate
			 * is set to one.
			 */
			max_cstate = 1;
		}
		dmi_check_system(processor_power_dmi_table);
		max_cstate = acpi_processor_cstate_check(max_cstate);
		if (max_cstate < ACPI_C_STATES_MAX)
			printk(KERN_NOTICE
			       "ACPI: processor limited to max C-state %d\n",
			       max_cstate);
		first_run++;
	}

	if (!pr)
		return -EINVAL;

	if (acpi_gbl_FADT.cst_control && !nocst) {
		status =
		    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
		if (ACPI_FAILURE(status)) {
			ACPI_EXCEPTION((AE_INFO, status,
					"Notifying BIOS of _CST ability failed"));
		}
	}

	acpi_processor_get_power_info(pr);
	pr->flags.power_setup_done = 1;

	/*
	 * Install the idle handler if processor power management is supported.
	 * Note that we use previously set idle handler will be used on
	 * platforms that only support C1.
	 */
	if (pr->flags.power) {
		acpi_processor_setup_cpuidle(pr);
		if (cpuidle_register_device(&pr->power.dev))
			return -EIO;
	}
#ifdef CONFIG_ACPI_PROCFS
	/* 'power' [R] */
	entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
				 S_IRUGO, acpi_device_dir(device),
				 &acpi_processor_power_fops,
				 acpi_driver_data(device));
	if (!entry)
		return -EIO;
#endif
	return 0;
}
예제 #9
0
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
	struct f2fs_super_block *raw_super;
	struct buffer_head *raw_super_buf;
	struct inode *root;
	long err = -EINVAL;
	int i;

	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	/* set a block size */
	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
		goto free_sbi;
	}

	err = read_raw_super_block(sb, &raw_super, &raw_super_buf);
	if (err)
		goto free_sbi;

	sb->s_fs_info = sbi;
	/* init some FS parameters */
	sbi->active_logs = NR_CURSEG_TYPE;

	set_opt(sbi, BG_GC);

#ifdef CONFIG_F2FS_FS_XATTR
	set_opt(sbi, XATTR_USER);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
	set_opt(sbi, POSIX_ACL);
#endif
	/* parse mount options */
	err = parse_options(sb, (char *)data);
	if (err)
		goto free_sb_buf;

	sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
//	sb->s_max_links = F2FS_LINK_MAX;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));

	sb->s_op = &f2fs_sops;
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_time_gran = 1;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));

	/* init f2fs-specific super block info */
	sbi->sb = sb;
	sbi->raw_super = raw_super;
	sbi->raw_super_buf = raw_super_buf;
	mutex_init(&sbi->gc_mutex);
	mutex_init(&sbi->writepages);
	mutex_init(&sbi->cp_mutex);
	mutex_init(&sbi->node_write);
	sbi->por_doing = false;
	spin_lock_init(&sbi->stat_lock);

	init_rwsem(&sbi->read_io.io_rwsem);
	sbi->read_io.sbi = sbi;
	sbi->read_io.bio = NULL;
	for (i = 0; i < NR_PAGE_TYPE; i++) {
		init_rwsem(&sbi->write_io[i].io_rwsem);
		sbi->write_io[i].sbi = sbi;
		sbi->write_io[i].bio = NULL;
	}

	init_rwsem(&sbi->cp_rwsem);
	init_waitqueue_head(&sbi->cp_wait);
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
		err = PTR_ERR(sbi->meta_inode);
		goto free_sb_buf;
	}

	err = get_valid_checkpoint(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
		goto free_meta_inode;
	}

	/* sanity checking of checkpoint */
	err = -EINVAL;
	if (sanity_check_ckpt(sbi)) {
		f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
		goto free_cp;
	}

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
	INIT_LIST_HEAD(&sbi->dir_inode_list);
	spin_lock_init(&sbi->dir_inode_lock);

	init_orphan_info(sbi);

	/* setup f2fs internal modules */
	err = build_segment_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS segment manager");
		goto free_sm;
	}
	err = build_node_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS node manager");
		goto free_nm;
	}

	build_gc_manager(sbi);

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
		err = PTR_ERR(sbi->node_inode);
		goto free_nm;
	}

	/* if there are nt orphan nodes free them */
	recover_orphan_inodes(sbi);

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
		err = PTR_ERR(root);
		goto free_node_inode;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		err = -EINVAL;
		goto free_root_inode;
	}

	sb->s_root = d_alloc_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
		goto free_root_inode;
	}

	err = f2fs_build_stats(sbi);
	if (err)
		goto free_root_inode;

	if (f2fs_proc_root)
		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);

	if (sbi->s_proc)
		proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
				 &f2fs_seq_segment_info_fops, sb);

	if (test_opt(sbi, DISCARD)) {
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		if (!blk_queue_discard(q))
			f2fs_msg(sb, KERN_WARNING,
					"mounting with \"discard\" option, but "
					"the device does not support discard");
	}

	sbi->s_kobj.kset = f2fs_kset;
	init_completion(&sbi->s_kobj_unregister);
	err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
							"%s", sb->s_id);
	if (err)
		goto free_proc;

	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
		err = recover_fsync_data(sbi);
		if (err)
			f2fs_msg(sb, KERN_ERR,
				"Cannot recover all fsync data errno=%ld", err);
	}

	/*
	 * If filesystem is not mounted as read-only then
	 * do start the gc_thread.
	 */
	if (!(sb->s_flags & MS_RDONLY)) {
		/* After POR, we can run background GC thread.*/
		err = start_gc_thread(sbi);
		if (err)
			goto free_kobj;
	}
	return 0;

free_kobj:
	kobject_del(&sbi->s_kobj);
free_proc:
	if (sbi->s_proc) {
		remove_proc_entry("segment_info", sbi->s_proc);
		remove_proc_entry(sb->s_id, f2fs_proc_root);
	}
	f2fs_destroy_stats(sbi);
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
	iput(sbi->node_inode);
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
free_cp:
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
free_sb_buf:
	brelse(raw_super_buf);
free_sbi:
	kfree(sbi);
	return err;
}
예제 #10
0
static void add_file(struct super_block *sb, char *name,
		     int (*func) (struct seq_file *, void *))
{
	proc_create_data(name, 0, REISERFS_SB(sb)->procdir,
			 &r_file_operations, func);
}
/**
 *	Initialize logging configuration.  Schedules a work thread to
 *	load the configuration file once the file system is readable.
 **/
void BCMLOG_InitConfig(void *h)
{
	int value;
	struct device *dev = (struct device *)h;
	/*
	 *      disable all AP logging (CP logging is
	 *      handled by CP) [MobC00126731]
	 */
	memset(&g_config, 0x00, sizeof(g_config));

	/*
	 *      set default configuration
	 */
	SetConfigDefaults();

	/*
	 *      create the procfs entry
	 */
	g_proc_dir_entry =
	    proc_create_data(BCMLOG_CONFIG_PROC_FILE,
			      S_IRWXU | S_IRWXG | S_IRWXO, NULL,
				&g_proc_dir_fops, NULL);

	if (g_proc_dir_entry == NULL) {
		remove_proc_entry(BCMLOG_CONFIG_PROC_FILE, NULL);
	}

	strncpy(g_config.file_base, BCMLOG_DEFAULT_FILE_BASE, MAX_STR_NAME);
	strncpy(g_config.uart_dev, BCMLOG_DEFAULT_UART_DEV, MAX_STR_NAME);
	strncpy(g_config.acm_dev, BCMLOG_DEFAULT_ACM_DEV, MAX_STR_NAME);

	value = device_create_file(dev, &dev_attr_log);
	if (value < 0)
		pr_err("BCMLOG Init failed to create bcmlog log attribute\n");
	value = device_create_file(dev, &dev_attr_log_lock);
	if (value < 0)
		pr_err
	    ("BCMLOG Init failed to create bcmlog log_lock attribute\n");
	value = device_create_file(dev, &dev_attr_cp_crash);
	if (value < 0)
		pr_err
	    ("BCMLOG Init failed to create bcmlog cp crash log attribute\n");
	value = device_create_file(dev, &dev_attr_cp_crash_lock);
	if (value < 0)
		pr_err
	("BCMLOG Init failed to create bcmlog cp crash log lock attribute\n");
	value = device_create_file(dev, &dev_attr_ap_crash);
	if (value < 0)
		pr_err
	    ("BCMLOG Init failed to create bcmlog ap crash log attribute\n");
	value = device_create_file(dev, &dev_attr_ap_crash_lock);
	if (value < 0)
		pr_err
	("BCMLOG Init failed to create bcmlog ap crash log lock attribute\n");
	value = device_create_file(dev, &dev_attr_file_base);
	if (value < 0)
		pr_err
	    ("BCMLOG Init failed to create bcmlog file_base attribute\n");
	value = device_create_file(dev, &dev_attr_file_max);
	if (value < 0)
		pr_err
	    ("BCMLOG Init failed to create bcmlog file max attribute\n");
	value = device_create_file(dev, &dev_attr_file_last);
	if (value < 0)
		pr_err
	    ("BCMLOG Init failed to create bcmlog file last attribute\n");
	value = device_create_file(dev, &dev_attr_uart_dev);
	if (value < 0)
		pr_err
	    ("BCMLOG Init failed to create bcmlog uart_dev attribute\n");
	value = device_create_file(dev, &dev_attr_acm_dev);
	if (value < 0)
		pr_err
	    ("BCMLOG Init failed to create bcmlog acm_dev attribute\n");
}
예제 #12
0
파일: ap8xLnxStat.c 프로젝트: 7LK/McWRT
int ap8x_stat_proc_register(struct net_device *dev)
{
#define WL_PROC(x) ap8x_proc_read##x
#define WL_CASE(x) \
	case x: \
	devstat[x].ap8x_proc->read_proc = WL_PROC( x); \
	break

	if(devstat_index >=(WL_MAXIMUM_INSTANCES))
	{
		printk("Error: more than %d instances not supported\n", WL_MAXIMUM_INSTANCES);
		return 1;
	}
	if (ap8x==NULL)
	{
		ap8x = proc_mkdir("ap8x", proc_net);
		if(!ap8x)
			return 1;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
		ap8x->owner = THIS_MODULE;
#endif
	}

	devstat[devstat_index].netdev = dev;

	sprintf(devstat[devstat_index].filename,  "%s_stats", dev->name);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	devstat[devstat_index].ap8x_proc = proc_create_data(devstat[devstat_index].filename, 0666 , ap8x, &ap8x_proc_file_ops, &devstat[devstat_index]);
#else
	devstat[devstat_index].ap8x_proc = create_proc_entry(devstat[devstat_index].filename, 0666 , ap8x);

	if(!devstat[devstat_index].ap8x_proc)
	{	
		printk("create_procfs_file %s failed\n", devstat[devstat_index].filename);
		return 1;
	}
		
	switch (devstat_index)
	{
		WL_CASE(0);
		WL_CASE(1);
		WL_CASE(2);
		WL_CASE(3);
		WL_CASE(4);
		WL_CASE(5);
		WL_CASE(6);
		WL_CASE(7);
		WL_CASE(8);
		WL_CASE(9);
		WL_CASE(10);
		WL_CASE(11);
		WL_CASE(12);
		WL_CASE(13);
		WL_CASE(14);
		WL_CASE(15);
		WL_CASE(16);
		WL_CASE(17);
		WL_CASE(18);
		WL_CASE(19);
		default:
			break;
	}			
	devstat[devstat_index].ap8x_proc->write_proc = ap8x_proc_write;
	devstat[devstat_index].ap8x_proc->nlink = 1;
#endif
	devstat_index++;
	return 0;
#undef WL_PROC
#undef WL_CASE
}
예제 #13
0
static int _stp_create_procfs(const char *path, int num,
			      const struct file_operations *fops, int perm,
			      void *data) 
{  
	const char *p; char *next;
	struct proc_dir_entry *last_dir, *de;

	if (num >= STP_MAX_PROCFS_FILES) {
		_stp_error("Requested file number %d is larger than max (%d)\n", 
			   num, STP_MAX_PROCFS_FILES);
		return -1;
	}

	last_dir = _stp_proc_root;

	/* if no path, use default one */
	if (strlen(path) == 0)
		p = "command";
	else
		p = path;
	
#ifdef _STP_ALLOW_PROCFS_PATH_SUBDIRS
	while ((next = strchr(p, '/'))) {
		if (_stp_num_pde == STP_MAX_PROCFS_FILES)
			goto too_many;
		*next = 0;
		de = _stp_procfs_lookup(p, last_dir);
		if (de == NULL) {
			last_dir = proc_mkdir(p, last_dir);
			if (!last_dir) {
				_stp_error("Could not create directory \"%s\"\n", p);
				goto err;
			}
			_stp_pde[_stp_num_pde++] = last_dir;
#ifdef STAPCONF_PROCFS_OWNER
			last_dir->owner = THIS_MODULE;
#endif
			proc_set_user(last_dir, KUIDT_INIT(_stp_uid),
				      KGIDT_INIT(_stp_gid));
		}
		else {
			last_dir = de;
		}
		p = next + 1;
	}
#else  /* !_STP_ALLOW_PROCFS_PATH_SUBDIRS */
	if (strchr(p, '/') != NULL) {
		_stp_error("Could not create path \"%s\","
			   " contains subdirectories\n", p);
		goto err;
	}
#endif	/* !_STP_ALLOW_PROCFS_PATH_SUBDIRS */
	
	if (_stp_num_pde == STP_MAX_PROCFS_FILES)
		goto too_many;
	
	de = proc_create_data(p, perm, last_dir, fops, data);
	if (de == NULL) {
		_stp_error("Could not create file \"%s\" in path \"%s\"\n",
			   p, path);
		goto err;
	}
#ifdef STAPCONF_PROCFS_OWNER
	de->owner = THIS_MODULE;
#endif
	proc_set_user(de, KUIDT_INIT(_stp_uid), KGIDT_INIT(_stp_gid));
	_stp_pde[_stp_num_pde++] = de;
	return 0;
	
too_many:
	_stp_error("Attempted to open too many procfs files. Maximum is %d\n",
		   STP_MAX_PROCFS_FILES);
err:
	_stp_close_procfs();
	return -1;
}
예제 #14
0
static int __init vtunerc_init(void)
{
	struct vtunerc_ctx *ctx = NULL;
	struct dvb_demux *dvbdemux;
	struct dmx_demux *dmx;
	int ret = -EINVAL, i, idx;

	printk(KERN_INFO "virtual DVB adapter driver, version "
			VTUNERC_MODULE_VERSION
			", (c) 2010-12 Honza Petrous, SmartImp.cz\n");

	request_module("dvb-core"); /* FIXME: dunno which way it should work :-/ */

	for (idx = 0; idx < config.devices; idx++) {
		ctx = kzalloc(sizeof(struct vtunerc_ctx), GFP_KERNEL);
		if (!ctx) {
			while(idx)
				kfree(vtunerc_tbl[--idx]);
			return -ENOMEM;
		}

		vtunerc_tbl[idx] = ctx;

		ctx->idx = idx;
		ctx->config = &config;
		ctx->ctrldev_request.type = -1;
		ctx->ctrldev_response.type = -1;
		init_waitqueue_head(&ctx->ctrldev_wait_request_wq);
		init_waitqueue_head(&ctx->ctrldev_wait_response_wq);

		// buffer
		ctx->kernel_buf = NULL;
		ctx->kernel_buf_size = 0;

		/* dvb */

		/* create new adapter */
		ret = dvb_register_adapter(&ctx->dvb_adapter, DRIVER_NAME,
					   THIS_MODULE, NULL, adapter_nr);
		if (ret < 0)
			goto err_kfree;

		ctx->dvb_adapter.priv = ctx;

		memset(&ctx->demux, 0, sizeof(ctx->demux));
		dvbdemux = &ctx->demux;
		dvbdemux->priv = ctx;
		dvbdemux->filternum = MAX_PIDTAB_LEN;
		dvbdemux->feednum = MAX_PIDTAB_LEN;
		dvbdemux->start_feed = vtunerc_start_feed;
		dvbdemux->stop_feed = vtunerc_stop_feed;
		dvbdemux->dmx.capabilities = 0;
		ret = dvb_dmx_init(dvbdemux);
		if (ret < 0)
			goto err_dvb_unregister_adapter;

		dmx = &dvbdemux->dmx;

		ctx->hw_frontend.source = DMX_FRONTEND_0;
		ctx->mem_frontend.source = DMX_MEMORY_FE;
		ctx->dmxdev.filternum = MAX_PIDTAB_LEN;
		ctx->dmxdev.demux = dmx;

		ret = dvb_dmxdev_init(&ctx->dmxdev, &ctx->dvb_adapter);
		if (ret < 0)
			goto err_dvb_dmx_release;

		ret = dmx->add_frontend(dmx, &ctx->hw_frontend);
		if (ret < 0)
			goto err_dvb_dmxdev_release;

		ret = dmx->add_frontend(dmx, &ctx->mem_frontend);
		if (ret < 0)
			goto err_remove_hw_frontend;

		ret = dmx->connect_frontend(dmx, &ctx->hw_frontend);
		if (ret < 0)
			goto err_remove_mem_frontend;

		sema_init(&ctx->xchange_sem, 1);
		sema_init(&ctx->ioctl_sem, 1);
		sema_init(&ctx->tswrite_sem, 1);

		/* init pid table */
		for (i = 0; i < MAX_PIDTAB_LEN; i++)
			ctx->pidtab[i] = PID_UNKNOWN;

#ifdef CONFIG_PROC_FS
		{
			char procfilename[64];

			sprintf(procfilename, VTUNERC_PROC_FILENAME,
					ctx->idx);
			ctx->procname = my_strdup(procfilename);
			if (proc_create_data(ctx->procname, 0, NULL,
							&vtunerc_read_proc_fops,
							ctx) == 0)
				printk(KERN_WARNING
					"vtunerc%d: Unable to register '%s' proc file\n",
					ctx->idx, ctx->procname);
		}
#endif
	}

	vtunerc_register_ctrldev(ctx);

out:
	return ret;

	dmx->disconnect_frontend(dmx);
err_remove_mem_frontend:
	dmx->remove_frontend(dmx, &ctx->mem_frontend);
err_remove_hw_frontend:
	dmx->remove_frontend(dmx, &ctx->hw_frontend);
err_dvb_dmxdev_release:
	dvb_dmxdev_release(&ctx->dmxdev);
err_dvb_dmx_release:
	dvb_dmx_release(dvbdemux);
err_dvb_unregister_adapter:
	dvb_unregister_adapter(&ctx->dvb_adapter);
err_kfree:
	kfree(ctx);
	goto out;
}
예제 #15
0
//Camera key bring up -E
static int kpd_pdrv_probe(struct platform_device *pdev)
{

	int i, r;
	int err = 0;
	struct clk *kpd_clk = NULL;
	//Keypad porting - S
  #if 1
	struct pinctrl *pinctrl1;
	struct pinctrl_state *pins_default, *pins_eint_int;
  #endif
        //Keypad porting - E
	kpd_info("Keypad probe start!!!\n");

	/*kpd-clk should be control by kpd driver, not depend on default clock state*/
	kpd_clk = devm_clk_get(&pdev->dev, "kpd-clk");
	if (!IS_ERR(kpd_clk)) {
		clk_prepare(kpd_clk);
		clk_enable(kpd_clk);
	} else {
		kpd_print("get kpd-clk fail, but not return, maybe kpd-clk is set by ccf.\n");
	}

	kp_base = of_iomap(pdev->dev.of_node, 0);
	if (!kp_base) {
		kpd_info("KP iomap failed\n");
		return -ENODEV;
	};

	kp_irqnr = irq_of_parse_and_map(pdev->dev.of_node, 0);
	if (!kp_irqnr) {
		kpd_info("KP get irqnr failed\n");
		return -ENODEV;
	}
	kpd_info("kp base: 0x%p, addr:0x%p,  kp irq: %d\n", kp_base, &kp_base, kp_irqnr);
	/* initialize and register input device (/dev/input/eventX) */
	kpd_input_dev = input_allocate_device();
	if (!kpd_input_dev) {
		kpd_print("input allocate device fail.\n");
		return -ENOMEM;
	}

	kpd_input_dev->name = KPD_NAME;
	kpd_input_dev->id.bustype = BUS_HOST;
	kpd_input_dev->id.vendor = 0x2454;
	kpd_input_dev->id.product = 0x6500;
	kpd_input_dev->id.version = 0x0010;
	kpd_input_dev->open = kpd_open;

	kpd_get_dts_info(pdev->dev.of_node);

#ifdef CONFIG_ARCH_MT8173
	wake_lock_init(&pwrkey_lock, WAKE_LOCK_SUSPEND, "PWRKEY");
#endif

	/* fulfill custom settings */
	kpd_memory_setting();

	__set_bit(EV_KEY, kpd_input_dev->evbit);
//keypad bring up - S

#if 1  //for volume down key
  pinctrl1 = devm_pinctrl_get(&pdev->dev);
	if (IS_ERR(pinctrl1)) {
		err = PTR_ERR(pinctrl1);
		dev_err(&pdev->dev, "fwq Cannot find voldown pinctrl1!\n");
		return err;
	}

	pins_default = pinctrl_lookup_state(pinctrl1, "default");
	if (IS_ERR(pins_default)) {
		err = PTR_ERR(pins_default);
		dev_err(&pdev->dev, "fwq Cannot find voldown pinctrl default!\n");
	}

	pins_eint_int = pinctrl_lookup_state(pinctrl1, "kpd_pins_eint");
	if (IS_ERR(pins_eint_int)) {
		err = PTR_ERR(pins_eint_int);
		dev_err(&pdev->dev, "fwq Cannot find voldown pinctrl state_eint_int!\n");
		return err;
	}
#endif
	#if 0
	gpio_request(KPD_VOLUP , "KPD_KCOL1");
	gpio_direction_input(KPD_VOLUP);
	gpio_free(KPD_VOLUP);
	#endif
	pinctrl_select_state(pinctrl1, pins_eint_int);
//keypad bring up - E

	/**/
	err = hall_gpio_eint_setup(pdev);
	if (err!=0) {
		kpd_print("[Keypad] %s , hall_gpio_eint_setup failed (%d)\n", __FUNCTION__ , err );
	}

	proc_create_data("hall_out_status", 0444, NULL, &hall_out_status_fops, NULL);
	sdev.name = "hall_gpio";
	sdev.index = 0;
	sdev.state = 1;
	r = switch_dev_register(&sdev);
	if (r) {
		kpd_info("[Keypad] %s , register switch device failed (%d)\n", __FUNCTION__ , r);
		switch_dev_unregister(&sdev);
		return r;
	}
	/**/
	switch_set_state((struct switch_dev *)&sdev, 1);	// state initialization
	/**/
	/**/
	mutex_init(&hall_state_mutex);
	INIT_DELAYED_WORK(&hall_work, hall_work_func);
	/**/
	/**/

#if defined(CONFIG_KPD_PWRKEY_USE_EINT) || defined(CONFIG_KPD_PWRKEY_USE_PMIC)
	__set_bit(kpd_dts_data.kpd_sw_pwrkey, kpd_input_dev->keybit);
	kpd_keymap[8] = 0;
#endif
	if (!kpd_dts_data.kpd_use_extend_type) {
		for (i = 17; i < KPD_NUM_KEYS; i += 9)	/* only [8] works for Power key */
			kpd_keymap[i] = 0;
	}
	for (i = 0; i < KPD_NUM_KEYS; i++) {
		if (kpd_keymap[i] != 0)
			__set_bit(kpd_keymap[i], kpd_input_dev->keybit);
	}

#if KPD_AUTOTEST
	for (i = 0; i < ARRAY_SIZE(kpd_auto_keymap); i++)
		__set_bit(kpd_auto_keymap[i], kpd_input_dev->keybit);
#endif

#if KPD_HAS_SLIDE_QWERTY
	__set_bit(EV_SW, kpd_input_dev->evbit);
	__set_bit(SW_LID, kpd_input_dev->swbit);
#endif
	if (kpd_dts_data.kpd_sw_rstkey)
		__set_bit(kpd_dts_data.kpd_sw_rstkey, kpd_input_dev->keybit);
#ifdef KPD_KEY_MAP
	__set_bit(KPD_KEY_MAP, kpd_input_dev->keybit);
#endif
#ifdef CONFIG_MTK_MRDUMP_KEY
		__set_bit(KEY_RESTART, kpd_input_dev->keybit);
#endif
//Caerma key porting
#if 1
	for (i = 0; i < KPD_CAMERA_NUM; i++) {
		if (kpd_camerakeymap[i] != 0)
	__set_bit(kpd_camerakeymap[i], kpd_input_dev->keybit);
		kpd_info("[Keypad] set kpd_camerakeymap[%d]" , i);
		}
#endif
	kpd_input_dev->dev.parent = &pdev->dev;
	r = input_register_device(kpd_input_dev);
	if (r) {
		kpd_info("register input device failed (%d)\n", r);
		input_free_device(kpd_input_dev);
		return r;
	}

	/* register device (/dev/mt6575-kpd) */
	kpd_dev.parent = &pdev->dev;
	r = misc_register(&kpd_dev);
	if (r) {
		kpd_info("register device failed (%d)\n", r);
		input_unregister_device(kpd_input_dev);
		return r;
	}

	wake_lock_init(&kpd_suspend_lock, WAKE_LOCK_SUSPEND, "kpd wakelock");

	/* register IRQ and EINT */
	kpd_set_debounce(kpd_dts_data.kpd_key_debounce);
	r = request_irq(kp_irqnr, kpd_irq_handler, IRQF_TRIGGER_NONE, KPD_NAME, NULL);
	if (r) {
		kpd_info("register IRQ failed (%d)\n", r);
		misc_deregister(&kpd_dev);
		input_unregister_device(kpd_input_dev);
		return r;
	}
	mt_eint_register();

        //Camera key bring up -S
        printk("camera_key_setup_eint() START!!\n");
	kpd_camerakey_setup_eint();
	printk("camera_key_setup_eint() Done!!\n");
	//Camera key bring up -E

#ifndef KPD_EARLY_PORTING	/*add for avoid early porting build err the macro is defined in custom file */
	long_press_reboot_function_setting();	/* /API 4 for kpd long press reboot function setting */
#endif
	hrtimer_init(&aee_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	aee_timer.function = aee_timer_func;

#if AEE_ENABLE_5_15
	hrtimer_init(&aee_timer_5s, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	aee_timer_5s.function = aee_timer_5s_func;
#endif

#ifdef PWK_DUMP
       hrtimer_init(&aee_timer_powerkey_30s, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
       aee_timer_powerkey_30s.function = aee_timer_30s_func;
#endif

	err = kpd_create_attr(&kpd_pdrv.driver);
	if (err) {
		kpd_info("create attr file fail\n");
		kpd_delete_attr(&kpd_pdrv.driver);
		return err;
	}
	kpd_info("%s Done\n", __func__);
	return 0;
}
예제 #16
0
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct f2fs_sb_info *sbi;
	struct f2fs_super_block *raw_super;
	struct inode *root;
	long err;
	bool retry = true, need_fsck = false;
	char *options = NULL;
	int recovery, i, valid_super_block;
	struct curseg_info *seg_i;

try_onemore:
	err = -EINVAL;
	raw_super = NULL;
	valid_super_block = -1;
	recovery = 0;

	/* allocate memory for f2fs-specific super block info */
	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	/* Load the checksum driver */
	sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
	if (IS_ERR(sbi->s_chksum_driver)) {
		f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
		err = PTR_ERR(sbi->s_chksum_driver);
		sbi->s_chksum_driver = NULL;
		goto free_sbi;
	}

	/* set a block size */
	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
		goto free_sbi;
	}

	err = read_raw_super_block(sb, &raw_super, &valid_super_block,
								&recovery);
	if (err)
		goto free_sbi;

	sb->s_fs_info = sbi;
	default_options(sbi);
	/* parse mount options */
	options = kstrdup((const char *)data, GFP_KERNEL);
	if (data && !options) {
		err = -ENOMEM;
		goto free_sb_buf;
	}

	err = parse_options(sb, options);
	if (err)
		goto free_options;

	sbi->max_file_blocks = max_file_blocks();
	sb->s_maxbytes = sbi->max_file_blocks <<
				le32_to_cpu(raw_super->log_blocksize);
	sb->s_max_links = F2FS_LINK_MAX;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));

	sb->s_op = &f2fs_sops;
	sb->s_cop = &f2fs_cryptops;
	sb->s_xattr = f2fs_xattr_handlers;
	sb->s_export_op = &f2fs_export_ops;
	sb->s_magic = F2FS_SUPER_MAGIC;
	sb->s_time_gran = 1;
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));

	/* init f2fs-specific super block info */
	sbi->sb = sb;
	sbi->raw_super = raw_super;
	sbi->valid_super_block = valid_super_block;
	mutex_init(&sbi->gc_mutex);
	mutex_init(&sbi->writepages);
	mutex_init(&sbi->cp_mutex);
	init_rwsem(&sbi->node_write);

	/* disallow all the data/node/meta page writes */
	set_sbi_flag(sbi, SBI_POR_DOING);
	spin_lock_init(&sbi->stat_lock);

	init_rwsem(&sbi->read_io.io_rwsem);
	sbi->read_io.sbi = sbi;
	sbi->read_io.bio = NULL;
	for (i = 0; i < NR_PAGE_TYPE; i++) {
		init_rwsem(&sbi->write_io[i].io_rwsem);
		sbi->write_io[i].sbi = sbi;
		sbi->write_io[i].bio = NULL;
	}

	init_rwsem(&sbi->cp_rwsem);
	init_waitqueue_head(&sbi->cp_wait);
	init_sb_info(sbi);

	/* get an inode for meta space */
	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
	if (IS_ERR(sbi->meta_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
		err = PTR_ERR(sbi->meta_inode);
		goto free_options;
	}

	err = get_valid_checkpoint(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
		goto free_meta_inode;
	}

	sbi->total_valid_node_count =
				le32_to_cpu(sbi->ckpt->valid_node_count);
	sbi->total_valid_inode_count =
				le32_to_cpu(sbi->ckpt->valid_inode_count);
	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
	sbi->total_valid_block_count =
				le64_to_cpu(sbi->ckpt->valid_block_count);
	sbi->last_valid_block_count = sbi->total_valid_block_count;
	sbi->alloc_valid_block_count = 0;
	for (i = 0; i < NR_INODE_TYPE; i++) {
		INIT_LIST_HEAD(&sbi->inode_list[i]);
		spin_lock_init(&sbi->inode_lock[i]);
	}

	init_extent_cache_info(sbi);

	init_ino_entry_info(sbi);

	/* setup f2fs internal modules */
	err = build_segment_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS segment manager");
		goto free_sm;
	}
	err = build_node_manager(sbi);
	if (err) {
		f2fs_msg(sb, KERN_ERR,
			"Failed to initialize F2FS node manager");
		goto free_nm;
	}

	/* For write statistics */
	if (sb->s_bdev->bd_part)
		sbi->sectors_written_start =
			(u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);

	/* Read accumulated write IO statistics if exists */
	seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
	if (__exist_node_summaries(sbi))
		sbi->kbytes_written =
			le64_to_cpu(seg_i->sum_blk->journal.info.kbytes_written);

	build_gc_manager(sbi);

	/* get an inode for node space */
	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
	if (IS_ERR(sbi->node_inode)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
		err = PTR_ERR(sbi->node_inode);
		goto free_nm;
	}

	f2fs_join_shrinker(sbi);

	/* if there are nt orphan nodes free them */
	err = recover_orphan_inodes(sbi);
	if (err)
		goto free_node_inode;

	/* read root inode and dentry */
	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
	if (IS_ERR(root)) {
		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
		err = PTR_ERR(root);
		goto free_node_inode;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		iput(root);
		err = -EINVAL;
		goto free_node_inode;
	}

	sb->s_root = d_make_root(root); /* allocate root dentry */
	if (!sb->s_root) {
		err = -ENOMEM;
		goto free_root_inode;
	}

	err = f2fs_build_stats(sbi);
	if (err)
		goto free_root_inode;

	if (f2fs_proc_root)
		sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);

	if (sbi->s_proc)
		proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
				 &f2fs_seq_segment_info_fops, sb);

	sbi->s_kobj.kset = f2fs_kset;
	init_completion(&sbi->s_kobj_unregister);
	err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
							"%s", sb->s_id);
	if (err)
		goto free_proc;

	/* recover fsynced data */
	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
		/*
		 * mount should be failed, when device has readonly mode, and
		 * previous checkpoint was not done by clean system shutdown.
		 */
		if (bdev_read_only(sb->s_bdev) &&
				!is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
			err = -EROFS;
			goto free_kobj;
		}

		if (need_fsck)
			set_sbi_flag(sbi, SBI_NEED_FSCK);

		err = recover_fsync_data(sbi);
		if (err) {
			need_fsck = true;
			f2fs_msg(sb, KERN_ERR,
				"Cannot recover all fsync data errno=%ld", err);
			goto free_kobj;
		}
	}
	/* recover_fsync_data() cleared this already */
	clear_sbi_flag(sbi, SBI_POR_DOING);

	/*
	 * If filesystem is not mounted as read-only then
	 * do start the gc_thread.
	 */
	if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
		/* After POR, we can run background GC thread.*/
		err = start_gc_thread(sbi);
		if (err)
			goto free_kobj;
	}
	kfree(options);

	/* recover broken superblock */
	if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) {
		err = f2fs_commit_super(sbi, true);
		f2fs_msg(sb, KERN_INFO,
			"Try to recover %dth superblock, ret: %ld",
			sbi->valid_super_block ? 1 : 2, err);
	}

	f2fs_update_time(sbi, CP_TIME);
	f2fs_update_time(sbi, REQ_TIME);
	return 0;

free_kobj:
	kobject_del(&sbi->s_kobj);
	kobject_put(&sbi->s_kobj);
	wait_for_completion(&sbi->s_kobj_unregister);
free_proc:
	if (sbi->s_proc) {
		remove_proc_entry("segment_info", sbi->s_proc);
		remove_proc_entry(sb->s_id, f2fs_proc_root);
	}
	f2fs_destroy_stats(sbi);
free_root_inode:
	dput(sb->s_root);
	sb->s_root = NULL;
free_node_inode:
	mutex_lock(&sbi->umount_mutex);
	f2fs_leave_shrinker(sbi);
	iput(sbi->node_inode);
	mutex_unlock(&sbi->umount_mutex);
free_nm:
	destroy_node_manager(sbi);
free_sm:
	destroy_segment_manager(sbi);
	kfree(sbi->ckpt);
free_meta_inode:
	make_bad_inode(sbi->meta_inode);
	iput(sbi->meta_inode);
free_options:
	kfree(options);
free_sb_buf:
	kfree(raw_super);
free_sbi:
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
	kfree(sbi);

	/* give only one another chance */
	if (retry) {
		retry = false;
		shrink_dcache_sb(sb);
		goto try_onemore;
	}
	return err;
}
int register_tx_isp_vic_device(struct platform_device *pdev, struct v4l2_device *v4l2_dev)
{
	struct tx_isp_subdev_platform_data *pdata = pdev->dev.platform_data;
	struct tx_isp_vic_driver *vsd = NULL;
	struct resource *res = NULL;
	struct v4l2_subdev *sd = NULL;
	struct media_pad *pads = NULL;
	struct media_entity *me = NULL;
	struct proc_dir_entry *proc;
	int ret;

	if(!pdata){
		v4l2_err(v4l2_dev, "The platform_data of csi is NULL!\n");
		ret = -ISP_ERROR;
		goto exit;
	};
	vsd = (struct tx_isp_vic_driver *)kzalloc(sizeof(*vsd), GFP_KERNEL);
	if(!vsd){
		v4l2_err(v4l2_dev, "Failed to allocate sensor device\n");
		ret = -ISP_ERROR;
		goto exit;
	}
	vsd->pdata = pdata;
	vsd->dev = &pdev->dev;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if(res){
		res = request_mem_region(res->start,
				res->end - res->start + 1, dev_name(&pdev->dev));
		if (!res) {
			v4l2_err(v4l2_dev, "Not enough memory for resources\n");
			ret = -EBUSY;
			goto mem_region_failed;
		}
		vsd->base = ioremap(res->start, res->end - res->start + 1);
		if (!vsd->base) {
			v4l2_err(v4l2_dev, "Unable to ioremap registers!\n");
			ret = -ENXIO;
			goto ioremap_failed;
		}
	}
	vsd->res = res;
	sd = &vsd->sd;
	pads = vsd->pads;
	me = &sd->entity;

	v4l2_subdev_init(sd, &vic_subdev_ops);
	strlcpy(sd->name, "TX-ISP-VIC-SUBDEV ", sizeof(sd->name));

	sd->grp_id = pdata->grp_id ;	/* group ID for isp subdevs */
	v4l2_set_subdevdata(sd, vsd);

	pads[TX_ISP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
	pads[TX_ISP_PAD_LINK].flags = MEDIA_PAD_FL_SINK;

	me->ops = &vic_media_ops;
//	me->parent = v4l2_dev->mdev;
	ret = media_entity_init(me, TX_ISP_PADS_NUM, pads, 0);
	if (ret < 0){
		v4l2_err(v4l2_dev, "Failed to init media entity!\n");
		ret = -ISP_ERROR;
		goto entity_init_failed;
	}
	ret = v4l2_device_register_subdev(v4l2_dev, sd);
	if (ret < 0){
		v4l2_err(v4l2_dev, "Failed to register vic-subdev!\n");
		ret = -ISP_ERROR;
		goto register_failed;
	}

	ret = isp_vic_init_clk(vsd);
	if(ret < 0){
		v4l2_err(v4l2_dev, "Failed to init isp's clks!\n");
		ret = -ISP_ERROR;
	}
	dump_vsd=vsd;

	/* creat the node of printing isp info */
	proc = jz_proc_mkdir("vic");
	if (!proc) {
		vsd->proc_vic = NULL;
		v4l2_err(v4l2_dev, "create dev_attr_isp_info failed!\n");
	} else {
		vsd->proc_vic = proc;
	}
	proc_create_data("isp_vic_frd", S_IRUGO, proc, &isp_vic_frd_fops, (void *)vsd);

	return ISP_SUCCESS;
register_failed:
	media_entity_cleanup(me);
entity_init_failed:
	if(vsd->base)
		iounmap(vsd->base);
ioremap_failed:
	if(res)
		release_mem_region(res->start,res->end - res->start + 1);
mem_region_failed:
	kfree(vsd);
exit:
	return ret;
}
예제 #18
0
/*
 * ---------------------------------------------------------------------------
 *  register_unifi_sdio
 *
 *      This function is called from the Probe (or equivalent) method of
 *      the SDIO driver when a UniFi card is detected.
 *      We allocate the Linux net_device struct, initialise the HIP core
 *      lib, create the char device nodes and start the userspace helper
 *      to initialise the device.
 *
 *  Arguments:
 *      sdio_dev        Pointer to SDIO context handle to use for all
 *                      SDIO ops.
 *      bus_id          A small number indicating the SDIO card position on the
 *                      bus. Typically this is the slot number, e.g. 0, 1 etc.
 *                      Valid values are 0 to MAX_UNIFI_DEVS-1.
 *      dev             Pointer to kernel device manager struct.
 *
 *  Returns:
 *      Pointer to the unifi instance, or NULL on error.
 * ---------------------------------------------------------------------------
 */
static unifi_priv_t *
register_unifi_sdio(CsrSdioFunction *sdio_dev, int bus_id, struct device *dev)
{
    unifi_priv_t *priv = NULL;
    int r = -1;
    CsrResult csrResult;

    if ((bus_id < 0) || (bus_id >= MAX_UNIFI_DEVS)) {
        unifi_error(priv, "register_unifi_sdio: invalid device %d\n",
                bus_id);
        return NULL;
    }

    down(&Unifi_instance_mutex);

    if (In_use[bus_id] != UNIFI_DEV_NOT_IN_USE) {
        unifi_error(priv, "register_unifi_sdio: device %d is already in use\n",
                bus_id);
        goto failed0;
    }


    /* Allocate device private and net_device structs */
    priv = uf_alloc_netdevice(sdio_dev, bus_id);
    if (priv == NULL) {
        unifi_error(priv, "Failed to allocate driver private\n");
        goto failed0;
    }

    priv->unifi_device = dev;

    SET_NETDEV_DEV(priv->netdev[0], dev);

    /* We are not ready to send data yet. */
    netif_carrier_off(priv->netdev[0]);

    /* Allocate driver context. */
    priv->card = unifi_alloc_card(priv->sdio, priv);
    if (priv->card == NULL) {
        unifi_error(priv, "Failed to allocate UniFi driver card struct.\n");
        goto failed1;
    }

    if (Unifi_instances[bus_id]) {
        unifi_error(priv, "Internal error: instance for slot %d is already taken\n",
                bus_id);
    }
    Unifi_instances[bus_id] = priv;
    In_use[bus_id] = UNIFI_DEV_IN_USE;

    /* Save the netdev_priv for use by the netdev event callback mechanism */
    Unifi_netdev_instances[bus_id * CSR_WIFI_NUM_INTERFACES] = netdev_priv(priv->netdev[0]);

    /* Initialise the mini-coredump capture buffers */
    csrResult = unifi_coredump_init(priv->card, (u16)coredump_max);
    if (csrResult != CSR_RESULT_SUCCESS) {
        unifi_error(priv, "Couldn't allocate mini-coredump buffers\n");
    }

    /* Create the character device nodes */
    r = uf_create_device_nodes(priv, bus_id);
    if (r) {
        goto failed1;
    }

    /*
     * We use the slot number as unifi device index.
     */
    scnprintf(priv->proc_entry_name, 64, "driver/unifi%d", priv->instance);
    /*
     * The following complex casting is in place in order to eliminate 64-bit compilation warning
     * "cast to/from pointer from/to integer of different size"
     */
    if (!proc_create_data(priv->proc_entry_name, 0, NULL,
			  &uf_proc_fops, (void *)(long)priv->instance))
    {
        unifi_error(priv, "unifi: can't create /proc/driver/unifi\n");
    }

    /* Allocate the net_device for interfaces other than 0. */
    {
        int i;
        priv->totalInterfaceCount =0;

        for(i=1;i<CSR_WIFI_NUM_INTERFACES;i++)
        {
            if( !uf_alloc_netdevice_for_other_interfaces(priv,i) )
            {
                /* error occured while allocating the net_device for interface[i]. The net_device are
                 * allocated for the interfaces with id<i. Dont worry, all the allocated net_device will
                 * be releasing chen the control goes to the label failed0.
                 */
                unifi_error(priv, "Failed to allocate driver private for interface[%d]\n",i);
                goto failed0;
            }
            else
            {
                SET_NETDEV_DEV(priv->netdev[i], dev);

                /* We are not ready to send data yet. */
                netif_carrier_off(priv->netdev[i]);

                /* Save the netdev_priv for use by the netdev event callback mechanism */
                Unifi_netdev_instances[bus_id * CSR_WIFI_NUM_INTERFACES + i] = netdev_priv(priv->netdev[i]);
            }
        }

        for(i=0;i<CSR_WIFI_NUM_INTERFACES;i++)
        {
            netInterface_priv_t *interfacePriv = priv->interfacePriv[i];
            interfacePriv->netdev_registered=0;
        }
    }

#ifdef CSR_WIFI_RX_PATH_SPLIT
    if (signal_buffer_init(priv, CSR_WIFI_RX_SIGNAL_BUFFER_SIZE))
    {
        unifi_error(priv,"Failed to allocate shared memory for T-H signals\n");
        goto failed2;
    }
    priv->rx_workqueue = create_singlethread_workqueue("rx_workq");
    if (priv->rx_workqueue == NULL) {
        unifi_error(priv,"create_singlethread_workqueue failed \n");
        goto failed3;
    }
    INIT_WORK(&priv->rx_work_struct, rx_wq_handler);
#endif

#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
    if (log_hip_signals)
    {
        uf_register_hip_offline_debug(priv);
    }
#endif

    /* Initialise the SME related threads and parameters */
    r = uf_sme_init(priv);
    if (r) {
        unifi_error(priv, "SME initialisation failed.\n");
        goto failed4;
    }

    /*
     * Run the userspace helper program (unififw) to perform
     * the device initialisation.
     */
    unifi_trace(priv, UDBG1, "run UniFi helper app...\n");
    r = uf_run_unifihelper(priv);
    if (r) {
        unifi_notice(priv, "unable to run UniFi helper app\n");
        /* Not a fatal error. */
    }

    up(&Unifi_instance_mutex);

    return priv;

failed4:
#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
if (log_hip_signals)
{
    uf_unregister_hip_offline_debug(priv);
}
#endif
#ifdef CSR_WIFI_RX_PATH_SPLIT
    flush_workqueue(priv->rx_workqueue);
    destroy_workqueue(priv->rx_workqueue);
failed3:
    signal_buffer_free(priv,CSR_WIFI_RX_SIGNAL_BUFFER_SIZE);
failed2:
#endif
    /* Remove the device nodes */
    uf_destroy_device_nodes(priv);
failed1:
    /* Deregister priv->netdev_client */
    ul_deregister_client(priv->netdev_client);

failed0:
    if (priv && priv->card) {
        unifi_coredump_free(priv->card);
        unifi_free_card(priv->card);
    }
    if (priv) {
        uf_free_netdevice(priv);
    }

    up(&Unifi_instance_mutex);

    return NULL;
} /* register_unifi_sdio() */
예제 #19
0
/**
 *  @brief This function initializes proc entry
 *
 *  @param priv     A pointer to bt_private structure
 *  @param m_dev    A pointer to struct m_dev
 *  @param seq      Sequence number
 *
 *  @return	BT_STATUS_SUCCESS or BT_STATUS_FAILURE
 */
int
bt_proc_init(bt_private *priv, struct m_dev *m_dev, int seq)
{
	int ret = BT_STATUS_SUCCESS;
	struct proc_dir_entry *entry;
	int i, j;
	ENTER();

	bpriv = priv;
	memset(cmd52_string, 0, CMD52_STR_LEN);
	if (proc_mbt) {
		priv->dev_proc[seq].proc_entry =
			proc_mkdir(m_dev->name, proc_mbt);
		if (!priv->dev_proc[seq].proc_entry) {
			PRINTM(ERROR, "BT: Could not mkdir %s!\n", m_dev->name);
			ret = BT_STATUS_FAILURE;
			goto done;
		}
		priv->dev_proc[seq].pfiles =
			kmalloc(sizeof(proc_files), GFP_ATOMIC);
		if (!priv->dev_proc[seq].pfiles) {
			PRINTM(ERROR,
			       "BT: Could not alloc memory for pfile!\n");
			ret = BT_STATUS_FAILURE;
			goto done;
		}
		memcpy((u8 *)priv->dev_proc[seq].pfiles, (u8 *)proc_files,
		       sizeof(proc_files));
		priv->dev_proc[seq].num_proc_files = ARRAY_SIZE(proc_files);
		for (j = 0; j < priv->dev_proc[seq].num_proc_files; j++)
			priv->dev_proc[seq].pfiles[j].pdata = NULL;
		for (j = 0; j < priv->dev_proc[seq].num_proc_files; j++) {
			priv->dev_proc[seq].pfiles[j].pdata =
				kmalloc(priv->dev_proc[seq].pfiles[j].
					num_items * sizeof(struct item_data),
					GFP_ATOMIC);
			if (!priv->dev_proc[seq].pfiles[j].pdata) {
				PRINTM(ERROR,
				       "BT: Could not alloc memory for pdata!\n");
				ret = BT_STATUS_FAILURE;
				goto done;
			}
			memcpy((u8 *)priv->dev_proc[seq].pfiles[j].pdata,
			       (u8 *)proc_files[j].pdata,
			       priv->dev_proc[seq].pfiles[j].num_items *
			       sizeof(struct item_data));
			for (i = 0; i < priv->dev_proc[seq].pfiles[j].num_items;
			     i++) {
				if (priv->dev_proc[seq].pfiles[j].
				    pdata[i].flag & OFFSET_BT_DEV)
					priv->dev_proc[seq].pfiles[j].pdata[i].
						addr =
						priv->dev_proc[seq].pfiles[j].
						pdata[i].offset +
						(t_ptr)&priv->bt_dev;
				if (priv->dev_proc[seq].pfiles[j].
				    pdata[i].flag & OFFSET_BT_ADAPTER)
					priv->dev_proc[seq].pfiles[j].pdata[i].
						addr =
						priv->dev_proc[seq].pfiles[j].
						pdata[i].offset +
						(t_ptr)priv->adapter;
			}
			priv->dev_proc[seq].pfiles[j].pbt = priv;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
			entry = proc_create_data(proc_files[j].name,
						 S_IFREG | proc_files[j].
						 fileflag,
						 priv->dev_proc[seq].proc_entry,
						 proc_files[j].fops,
						 &priv->dev_proc[seq].
						 pfiles[j]);
			if (entry == NULL)
#else
			entry = create_proc_entry(proc_files[j].name,
						  S_IFREG | proc_files[j].
						  fileflag,
						  priv->dev_proc[seq].
						  proc_entry);
			if (entry) {
				entry->data = &priv->dev_proc[seq].pfiles[j];
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
				entry->owner = THIS_MODULE;
#endif
				entry->proc_fops = proc_files[j].fops;
			} else
#endif
				PRINTM(MSG, "BT: Fail to create proc %s\n",
				       proc_files[j].name);
		}
	}
done:
	if (ret == BT_STATUS_FAILURE) {
		if (priv->dev_proc[seq].proc_entry) {
			remove_proc_entry(m_dev->name, proc_mbt);
			priv->dev_proc[seq].proc_entry = NULL;
		}
		if (priv->dev_proc[seq].pfiles) {
			for (j = 0; j < priv->dev_proc[seq].num_proc_files; j++) {
				if (priv->dev_proc[seq].pfiles[j].pdata) {
					kfree(priv->dev_proc[seq].pfiles[j].
					      pdata);
					priv->dev_proc[seq].pfiles[j].pdata =
						NULL;
				}
			}
			kfree(priv->dev_proc[seq].pfiles);
			priv->dev_proc[seq].pfiles = NULL;
		}
	}
	LEAVE();
	return ret;
}
void rtc_proc_add_device(struct rtc_device *rtc)
{
	if (rtc->id == 0)
		proc_create_data("driver/rtc", 0, NULL, &rtc_proc_fops, rtc);
}
예제 #21
0
void rtl_proc_add_one(struct ieee80211_hw *hw)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
	struct proc_dir_entry *entry;

	snprintf(rtlpriv->dbg.proc_name, 18, "%x-%x-%x-%x-%x-%x",
		rtlefuse->dev_addr[0], rtlefuse->dev_addr[1],
		rtlefuse->dev_addr[2], rtlefuse->dev_addr[3],
		rtlefuse->dev_addr[4], rtlefuse->dev_addr[5]);

	rtlpriv->dbg.proc_dir = proc_mkdir(rtlpriv->dbg.proc_name, proc_topdir);
	if (!rtlpriv->dbg.proc_dir) {
		RT_TRACE(COMP_INIT, DBG_EMERG, ("Unable to init "
			"/proc/net/%s/%s\n", rtlpriv->cfg->name,
			rtlpriv->dbg.proc_name));
		return;
	}

	entry = proc_create_data("mac-0", S_IFREG | S_IRUGO,
				  rtlpriv->dbg.proc_dir, &file_ops_mac_0, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, DBG_EMERG,
			 ("Unable to initialize /proc/net/%s/%s/mac-0\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("mac-1", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_mac_1, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/mac-1\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("mac-2", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_mac_2, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/mac-2\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("mac-3", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_mac_3, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/mac-3\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("mac-4", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_mac_4, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/mac-4\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("mac-5", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_mac_5, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/mac-5\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("mac-6", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_mac_6, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/mac-6\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("mac-7", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_mac_7, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/mac-7\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("bb-8", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_bb_8, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/bb-8\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("bb-9", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_bb_9, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/bb-9\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("bb-a", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_bb_a, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/bb-a\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("bb-b", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_bb_b, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/bb-b\n",
		      rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("bb-c", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_bb_c, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/bb-c\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("bb-d", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_bb_d, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/bb-d\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("bb-e", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_bb_e, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/bb-e\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("bb-f", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_bb_f, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/bb-f\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("rf-a", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_rf_a, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/rf-a\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("rf-b", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_rf_b, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/rf-b\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("cam-1", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_cam_1, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/cam-1\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("cam-2", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_cam_2, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/cam-2\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));

	entry = proc_create_data("cam-3", S_IFREG | S_IRUGO,
				 rtlpriv->dbg.proc_dir, &file_ops_cam_3, hw);
	if (!entry)
		RT_TRACE(COMP_INIT, COMP_ERR,
			 ("Unable to initialize /proc/net/%s/%s/cam-3\n",
			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
}
예제 #22
0
static int mic_dma_reg_show(struct seq_file *m, void *v)
{
	int i, j, chan_num, size, dtpr;
	struct mic_dma_ctx_t *dma_ctx = m->private;
	struct mic_dma_device *dma_dev = &dma_ctx->dma_dev;
	struct dma_channel *curr_chan;
	union md_mic_dma_desc desc;

	seq_printf(m, "========================================"
				"=======================================\n");
	seq_printf(m, "SBOX_DCR: %#x\n",
				mic_sbox_read_mmio(dma_dev->mm_sbox, SBOX_DCR));
	seq_printf(m, "DMA Channel Registers\n");
	seq_printf(m, "========================================"
				"=======================================\n");
	seq_printf(m, "%-10s| %-10s %-10s %-10s %-10s %-10s %-10s"
#ifdef CONFIG_MK1OM
				  " %-10s %-11s %-14s %-10s"
#endif
				"\n", "Channel", "DCAR", "DTPR", "DHPR",
					"DRAR_HI", "DRAR_LO",
#ifdef CONFIG_MK1OM
					"DSTATWB_LO", "DSTATWB_HI", "DSTAT_CHERR", "DSTAT_CHERRMSK",
#endif
					"DSTAT");
	seq_printf(m, "========================================"
				"=======================================\n");

#ifdef _MIC_SCIF_
	for (i = 0; i < MAX_NUM_DMA_CHAN; i++) {
#else
	for (i = first_dma_chan(); i <= last_dma_chan(); i++) {
#endif
		curr_chan = &dma_ctx->dma_channels[i];
		chan_num = curr_chan->ch_num;
		seq_printf(m, "%-10i| %-#10x %-#10x %-#10x %-#10x"
			" %-#10x"
#ifdef CONFIG_MK1OM
			" %-#10x %-#11x %-#10x %-#14x"
#endif
			" %-#10x\n", chan_num,
			md_mic_dma_read_mmio(dma_dev, chan_num, REG_DCAR),
			md_mic_dma_read_mmio(dma_dev, chan_num, REG_DTPR),
			md_mic_dma_read_mmio(dma_dev, chan_num, REG_DHPR),
			md_mic_dma_read_mmio(dma_dev, chan_num, REG_DRAR_HI),
			md_mic_dma_read_mmio(dma_dev, chan_num, REG_DRAR_LO),
#ifdef CONFIG_MK1OM
			md_mic_dma_read_mmio(dma_dev, chan_num, REG_DSTATWB_LO),
			md_mic_dma_read_mmio(dma_dev, chan_num, REG_DSTATWB_HI),
			md_mic_dma_read_mmio(dma_dev, chan_num, REG_DCHERR),
			md_mic_dma_read_mmio(dma_dev, chan_num, REG_DCHERRMSK),
#endif
			md_mic_dma_read_mmio(dma_dev, chan_num, REG_DSTAT));
	}

	seq_printf(m, "\nDMA Channel Descriptor Rings\n");
	seq_printf(m, "========================================"
				"=======================================\n");

	for (i = first_dma_chan(); i <= last_dma_chan(); i++) {
		curr_chan = &dma_ctx->dma_channels[i];
		chan_num = curr_chan->ch_num;
		dtpr = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DTPR);
		seq_printf(m,  "Channel %i: [", chan_num);
		size = ((int) md_mic_dma_read_mmio(dma_dev, chan_num, REG_DHPR)
			- dtpr) % curr_chan->chan->num_desc_in_ring;
		/*
		 * In KNC B0, empty condition is tail = head -1
		 */
		if (mic_hw_family(dma_ctx->device_num) == FAMILY_KNC &&
			mic_hw_stepping(dma_ctx->device_num) >= KNC_B0_STEP)
			size -= 1;

		for (j = 0; j < size; j++) {
			desc = curr_chan->desc_ring[(j+dtpr) % 
				curr_chan->chan->num_desc_in_ring];	

			switch (desc.desc.nop.type){
			case NOP:
				seq_printf(m," {Type: NOP, 0x%#llx"
					" %#llx} ",  desc.qwords.qw0,
						   desc.qwords.qw1);
			case MEMCOPY:
				seq_printf(m," {Type: MEMCOPY, SAP:"
					" 0x%#llx, DAP: %#llx, length: %#llx} ",
					  (uint64_t) desc.desc.memcopy.sap,
					  (uint64_t) desc.desc.memcopy.dap,
					  (uint64_t) desc.desc.memcopy.length);
				break;
			case STATUS:
				seq_printf(m," {Type: STATUS, data:"
					" 0x%#llx, DAP: %#llx, intr: %lli} ",
					(uint64_t) desc.desc.status.data,
					(uint64_t) desc.desc.status.dap,
					(uint64_t) desc.desc.status.intr);
				break;
			case GENERAL:
				seq_printf(m," {Type: GENERAL, "
					"DAP: %#llx, dword: %#llx} ",
					(uint64_t) desc.desc.general.dap,
					(uint64_t) desc.desc.general.data);
				break;
			case KEYNONCECNT:
				seq_printf(m," {Type: KEYNONCECNT, sel: "
					"%lli, h: %lli, index: %lli, cs: %lli,"
					" value: %#llx} ",
						(uint64_t) desc.desc.keynoncecnt.sel,
						(uint64_t) desc.desc.keynoncecnt.h,
						(uint64_t) desc.desc.keynoncecnt.index,
						(uint64_t) desc.desc.keynoncecnt.cs,
						(uint64_t) desc.desc.keynoncecnt.data);
				break;
			case KEY:
				seq_printf(m," {Type: KEY, dest_ind"
					   "ex: %lli, ski: %lli, skap: %#llx ",
						(uint64_t) desc.desc.key.di,
						(uint64_t) desc.desc.key.ski,
						(uint64_t) desc.desc.key.skap);
				break;
			default:
				seq_printf(m," {Uknown Type=%lli ,"
				 "%#llx %#llx} ",(uint64_t)  desc.desc.nop.type,
						(uint64_t) desc.qwords.qw0,
						(uint64_t) desc.qwords.qw1);
			}
		}
		seq_printf(m,  "]\n");
		if (mic_hw_family(dma_ctx->device_num) == FAMILY_KNC &&
		    mic_hw_stepping(dma_ctx->device_num) >= KNC_B0_STEP &&
		    curr_chan->chan->dstat_wb_loc)
			seq_printf(m, "DSTAT_WB = 0x%x\n",
				*((uint32_t*)curr_chan->chan->dstat_wb_loc));
	}
	return 0;
}

static int mic_dma_reg_open(struct inode *inode, struct file *file)
{
	return single_open(file, mic_dma_reg_show, PDE_DATA(inode));
}

static const struct file_operations mic_dma_reg_fops = {
	.owner   = THIS_MODULE,
	.open    = mic_dma_reg_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = single_release
};

static void
mic_dma_proc_init(struct mic_dma_ctx_t *dma_ctx)
{
	char name[64];
	snprintf(name, 63, "%s%d", proc_dma_ring, dma_ctx->device_num);
	proc_create_data(name,  S_IFREG | S_IRUGO, NULL, &mic_dma_ring_fops, dma_ctx);
	snprintf(name, 63, "%s%d", proc_dma_reg, dma_ctx->device_num);
	proc_create_data(name,  S_IFREG | S_IRUGO, NULL, &mic_dma_reg_fops, dma_ctx);
}
예제 #23
0
void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size)
{
	unsigned int cpu;
	struct proc_dir_entry *d_entry;
	int i = 0;

	for_each_possible_cpu(cpu) {
		struct msm_pm_time_stats *stats =
			per_cpu(msm_pm_stats, cpu).stats;

		stats[MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request";
		stats[MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
			CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;

		stats[MSM_PM_STAT_IDLE_SPIN].name = "idle-spin";
		stats[MSM_PM_STAT_IDLE_SPIN].first_bucket_time =
			CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;

		stats[MSM_PM_STAT_IDLE_WFI].name = "idle-wfi";
		stats[MSM_PM_STAT_IDLE_WFI].first_bucket_time =
			CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;

		stats[MSM_PM_STAT_RETENTION].name = "retention";
		stats[MSM_PM_STAT_RETENTION].first_bucket_time =
			CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;

		stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].name =
			"idle-standalone-power-collapse";
		stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].
			first_bucket_time = CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;

		stats[MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE].name =
			"idle-failed-standalone-power-collapse";
		stats[MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE].
			first_bucket_time =
			CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;

		stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].name =
			"idle-power-collapse";
		stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
			CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;

		stats[MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].name =
			"idle-failed-power-collapse";
		stats[MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].
			first_bucket_time =
			CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;

		stats[MSM_PM_STAT_NOT_IDLE].name = "not-idle";
		stats[MSM_PM_STAT_NOT_IDLE].first_bucket_time =
			CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;

		for (i = 0; i < size; i++)
			stats[enable_stats[i]].enabled = true;

	}
	suspend_stats.name = "system_suspend";
	suspend_stats.first_bucket_time =
		CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;

	d_entry = proc_create_data("msm_pm_stats", S_IRUGO | S_IWUSR | S_IWGRP,
			NULL, &msm_pm_stats_fops, NULL);
}
예제 #24
0
파일: hadm_proc.c 프로젝트: forthewatch/xdm
static void hadm_create_proc_file(const char *file_name, struct hadm_struct *hadm)
{
	proc_create_data(file_name, S_IFREG | S_IRUGO, hadm->proc_dir, &hadm_proc_fops, NULL);
}
예제 #25
0
static int antenna_dev_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
    
	int error=0;
	
printk("%s\n",__func__);

        antenna_data = kzalloc(sizeof(struct antenna_dev_data), GFP_KERNEL);
        antenna_data->dev = dev;

/*
        if (!antenna_class) {
    		antenna_class = class_create(THIS_MODULE, CLASS_NAME);
    		if (IS_ERR(antenna_class))
    			error =  PTR_ERR(antenna_class);
    			//dev_err(dev,"%s: class_create FAILED %d.\n", __func__, error);
    			//goto err_antenna_dev_register;
	    }
	    
	    error = alloc_chrdev_region(&antenna_data->devno,
					antenna_device_count++,
					1,
					DRV_NAME);

	if (error < 0) {
		dev_err(dev,
				"%s: alloc_chrdev_region FAILED %d.\n", __func__, error);
		goto err_class;

	} else {
		dev_info(dev, "%s: major=%d, minor=%d\n",
						__func__,
						MAJOR(antenna_data->devno),
						MINOR(antenna_data->devno));
	}
	antenna_data->device = device_create(antenna_class, NULL, antenna_data->devno,
						NULL, "%s", CDEV_NAME);

	if (IS_ERR(antenna_data->device)) {
		dev_err(dev, "%s: device_create failed.\n",__func__);
		error = PTR_ERR(antenna_data->device);
		goto err_class;
	}
	
	    error =	sysfs_create_file(&antenna_data->device->kobj, &antenna_state_attr.attr);
        //error =	sysfs_create_file(&antenna_data->dev->kobj, &antenna_state_attr.attr);
*/        
       #ifndef BUILD_MODULE
      //parse device tree node
		error = antenna_dev_get_devtree_pdata(dev);
	   #else //TODO  modify gpio number by hw
         antenna_data->antenna1_gpio = 998;//34-->912  66-->944
         antenna_data->antenna1_gpio = 999;
         antenna_data->antenna1_gpio = 1000;
         antenna_data->antenna1_gpio = 1002;
         //TODO maybe need to use pin-ctrl if that gpios are not in gpio mode,
         //then to do in .ko will use ioremap instead of pin-ctrl to write the register(0xfd511xxx),
         //register bit[0]=
         //0:mode
         //4:in out status 0 1
         //8:INTR status  9f 
	   #endif	
		if (error) {
			dev_err(dev, "parse device tree fail!!!\n");
			//goto err_device;
			goto err_antenna_dev_register;
		}
        if (!proc_create_data("antenna_switch", 0666, NULL, &antenna_state_proc_fops, antenna_data)) {
    		error = -ENOMEM;
    		goto err_antenna_dev_register;
	    }
		
        	error = gpio_request(antenna_data->antenna1_gpio,"antenna1_gpio");        
        	if(error < 0)
        	{
        		printk(KERN_ERR "%s: gpio_request, err=%d", __func__, error);
        		//return retval;
        		goto err_set_gpio;
        	}
        	error = gpio_direction_output(antenna_data->antenna1_gpio,0);
        	if(error < 0)
        	{
        		printk(KERN_ERR "%s: gpio_direction_output, err=%d", __func__, error);
        		//return retval;
        		goto err_set_gpio;
        	}

              
        	error = gpio_request(antenna_data->antenna2_gpio,"antenna2_gpio");        
        	if(error < 0)
        	{
        		printk(KERN_ERR "%s: gpio_request, err=%d", __func__, error);
        		//return retval;
        		goto err_set_gpio;
        	}
        	error = gpio_direction_output(antenna_data->antenna2_gpio,0);
        	if(error < 0)
        	{
        		printk(KERN_ERR "%s: gpio_direction_output, err=%d", __func__, error);
        		//return retval;
        		goto err_set_gpio;
        	}
        	
            error = gpio_request(antenna_data->antenna3_gpio,"antenna3_gpio");        
        	if(error < 0)
        	{
        		printk(KERN_ERR "%s: gpio_request, err=%d", __func__, error);
        		//return retval;
        		goto err_set_gpio;
        	}
        	error = gpio_direction_output(antenna_data->antenna3_gpio,0);
        	if(error < 0)
        	{
        		printk(KERN_ERR "%s: gpio_direction_output, err=%d", __func__, error);
        		//return retval;
        		goto err_set_gpio;
        	}

            error = gpio_request(antenna_data->antenna4_gpio,"antenna4_gpio");        
        	if(error < 0)
        	{
        		printk(KERN_ERR "%s: gpio_request, err=%d", __func__, error);
        		//return retval;
        		goto err_set_gpio;
        	}
        	error = gpio_direction_output(antenna_data->antenna4_gpio,0);
        	if(error < 0)
        	{
        		printk(KERN_ERR "%s: gpio_direction_output, err=%d", __func__, error);
        		//return retval;
        		goto err_set_gpio;
        	}
        printk("%s  ok!\n",__func__);	
        return 0;



err_set_gpio:
if (gpio_is_valid(antenna_data->antenna1_gpio))
	gpio_free(antenna_data->antenna1_gpio);
if (gpio_is_valid(antenna_data->antenna2_gpio))	
	gpio_free(antenna_data->antenna2_gpio);
if (gpio_is_valid(antenna_data->antenna3_gpio))	
	gpio_free(antenna_data->antenna3_gpio);
if (gpio_is_valid(antenna_data->antenna4_gpio))	
	gpio_free(antenna_data->antenna4_gpio);

/*
err_device:
 if (!IS_ERR_OR_NULL(antenna_data->device))
    		device_destroy(antenna_class, antenna_data->devno);

err_class:
class_destroy(antenna_class);
		antenna_class = NULL;	
*/		
err_antenna_dev_register:
	kfree(antenna_data);	

	return error;
}
예제 #26
0
static int __init kaodv_init(void)
{
	struct net_device *dev = NULL;
	int i, ret = -ENOMEM;
	const struct kaodv_proc_file *f;

	kaodv_expl_init();

	ret = kaodv_queue_init();

	if (ret < 0)
		return ret;

	ret = kaodv_netlink_init();

	if (ret < 0)
		goto cleanup_queue;

	ret = nf_register_hook(&kaodv_ops[0]);

	if (ret < 0)
		goto cleanup_netlink;

	ret = nf_register_hook(&kaodv_ops[1]);

	if (ret < 0)
		goto cleanup_hook0;

	ret = nf_register_hook(&kaodv_ops[2]);

	if (ret < 0)
		goto cleanup_hook1;

	/* Prefetch network device info (ip, broadcast address, ifindex). */
	for (i = 0; i < MAX_INTERFACES; i++) {
		if (!ifname[i])
			break;

		dev = dev_get_by_name(&init_net, ifname[i]);

		if (!dev) {
			printk("No device %s available, ignoring!\n",
			       ifname[i]);
			continue;
		}
		if_info_add(dev);

		dev_put(dev);
	}
	
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
	proc_net_create("kaodv", 0, kaodv_proc_info);
#else
//    if (!create_proc_read_entry("kaodv", 0, init_net.proc_net, kaodv_read_proc,
//                            NULL))
//	if (!proc_create("kaodv", 0, init_net.proc_net, &kaodv_fops))
//        KAODV_DEBUG("Could not create kaodv proc entry");
	for (f = kaodv_proc_files; f->name[0]; f++) {
		if (!proc_create_data(f->name, 0, init_net.proc_net,
				&kaodv_proc_fops, f->show)) {
			KAODV_DEBUG("Could not create kaodv proc entry");
		}
	}
#endif
	KAODV_DEBUG("Module init OK");

	return ret;

cleanup_hook1:
	nf_unregister_hook(&kaodv_ops[1]);
cleanup_hook0:
	nf_unregister_hook(&kaodv_ops[0]);
cleanup_netlink:
	kaodv_netlink_fini();
cleanup_queue:
	kaodv_queue_fini();

	return ret;
}
예제 #27
0
static int32_t msm_led_trigger_probe(struct platform_device *pdev)
{
	int32_t rc = 0, rc_1 = 0, i = 0;
	struct device_node *of_node = pdev->dev.of_node;
	struct device_node *flash_src_node = NULL;
	uint32_t count = 0;
	struct led_trigger *temp = NULL;
    struct proc_dir_entry * rcdir;
	CDBG("called\n");
	rcdir = proc_create_data("CTP_FLASH_CTRL", S_IFREG | S_IWUGO | S_IWUSR, NULL, &proc_flash_led_operations, NULL);
    if(rcdir==NULL)
    {
    	CDBG("proc_create_data fail\n");

    }
	if (!of_node) {
		pr_err("of_node NULL\n");
		return -EINVAL;
	}

	fctrl.pdev = pdev;
	fctrl.num_sources = 0;

	rc = of_property_read_u32(of_node, "cell-index", &pdev->id);
	if (rc < 0) {
		pr_err("failed\n");
		return -EINVAL;
	}
	CDBG("pdev id %d\n", pdev->id);

	rc = of_property_read_u32(of_node,
			"qcom,flash-type", &flashtype);
	if (rc < 0) {
		pr_err("flash-type: read failed\n");
		return -EINVAL;
	}

	if (of_get_property(of_node, "qcom,flash-source", &count)) {
		count /= sizeof(uint32_t);
		CDBG("count %d\n", count);
		if (count > MAX_LED_TRIGGERS) {
			pr_err("invalid count\n");
			return -EINVAL;
		}
		fctrl.num_sources = count;
		for (i = 0; i < count; i++) {
			flash_src_node = of_parse_phandle(of_node,
				"qcom,flash-source", i);
			if (!flash_src_node) {
				pr_err("flash_src_node NULL\n");
				continue;
			}

			rc = of_property_read_string(flash_src_node,
				"linux,default-trigger",
				&fctrl.flash_trigger_name[i]);
			if (rc < 0) {
				pr_err("default-trigger: read failed\n");
				of_node_put(flash_src_node);
				continue;
			}

			CDBG("default trigger %s\n",
				fctrl.flash_trigger_name[i]);

			if (flashtype == GPIO_FLASH) {
				/* use fake current */
				fctrl.flash_op_current[i] = LED_FULL;
			} else {
				rc = of_property_read_u32(flash_src_node,
					"qcom,current",
					&fctrl.flash_op_current[i]);
				rc_1 = of_property_read_u32(flash_src_node,
					"qcom,max-current",
					&fctrl.flash_max_current[i]);
				if ((rc < 0) || (rc_1 < 0)) {
					pr_err("current: read failed\n");
					of_node_put(flash_src_node);
					continue;
				}
			}

			of_node_put(flash_src_node);

			CDBG("max_current[%d] %d\n",
				i, fctrl.flash_op_current[i]);

			led_trigger_register_simple(fctrl.flash_trigger_name[i],
				&fctrl.flash_trigger[i]);

			if (flashtype == GPIO_FLASH)
				if (fctrl.flash_trigger[i])
					temp = fctrl.flash_trigger[i];
		}

		/* Torch source */
		flash_src_node = of_parse_phandle(of_node, "qcom,torch-source",
			0);
		if (flash_src_node) {
			rc = of_property_read_string(flash_src_node,
				"linux,default-trigger",
				&fctrl.torch_trigger_name);
			if (rc < 0) {
				pr_err("default-trigger: read failed\n");
				goto torch_failed;
			}

			CDBG("default trigger %s\n",
				fctrl.torch_trigger_name);

			if (flashtype == GPIO_FLASH) {
				/* use fake current */
				fctrl.torch_op_current = LED_FULL;
				if (temp)
					fctrl.torch_trigger = temp;
				else
					led_trigger_register_simple(
						fctrl.torch_trigger_name,
						&fctrl.torch_trigger);
			} else {
				rc = of_property_read_u32(flash_src_node,
					"qcom,current",
					&fctrl.torch_op_current);
				rc_1 = of_property_read_u32(flash_src_node,
					"qcom,max-current",
					&fctrl.torch_max_current);

				if ((rc < 0) || (rc_1 < 0)) {
					pr_err("current: read failed\n");
					goto torch_failed;
				}

				CDBG("torch max_current %d\n",
					fctrl.torch_op_current);

				led_trigger_register_simple(
					fctrl.torch_trigger_name,
					&fctrl.torch_trigger);
			}
torch_failed:
			of_node_put(flash_src_node);
		}
	}

	rc = msm_led_flash_create_v4lsubdev(pdev, &fctrl);
	if (!rc)
		msm_led_torch_create_classdev(pdev, &fctrl);

	return rc;
}