/**
 * gser_bind_config - add a generic serial function to a configuration
 * @c: the configuration to support the serial instance
 * @port_num: /dev/ttyGS* port this interface will use
 * Context: single threaded during gadget setup
 *
 * Returns zero on success, else negative errno.
 *
 * Caller must have called @gserial_setup() with enough ports to
 * handle all the ones it binds.  Caller is also responsible
 * for calling @gserial_cleanup() before module unload.
 */
int gser_bind_config(struct usb_configuration *c, u8 port_num)
{
	struct f_gser	*gser;
	int		status;

	/* REVISIT might want instance-specific strings to help
	 * distinguish instances ...
	 */

	/* maybe allocate device-global string ID */
	if (gser_string_defs[0].id == 0) {
		status = usb_string_id(c->cdev);
		if (status < 0)
			return status;
		gser_string_defs[0].id = status;
	}

	/* allocate and initialize one new instance */
	gser = kzalloc(sizeof *gser, GFP_KERNEL);
	if (!gser)
		return -ENOMEM;

#ifdef CONFIG_MODEM_SUPPORT
	spin_lock_init(&gser->lock);
#endif
	gser->port_num = port_num;

	gser->port.func.name = "gser";
	gser->port.func.strings = gser_strings;
	gser->port.func.bind = gser_bind;
	gser->port.func.unbind = gser_unbind;
	gser->port.func.set_alt = gser_set_alt;
	gser->port.func.disable = gser_disable;
	gser->transport		= gserial_ports[port_num].transport;
#ifdef CONFIG_MODEM_SUPPORT
	/* We support only three ports for now */
	if (port_num == 0)
		gser->port.func.name = "modem";
	else if (port_num == 1)
		gser->port.func.name = "nmea";
	else
		gser->port.func.name = "modem2";
	gser->port.func.setup = gser_setup;
	gser->port.connect = gser_connect;
	gser->port.get_dtr = gser_get_dtr;
	gser->port.get_rts = gser_get_rts;
	gser->port.send_carrier_detect = gser_send_carrier_detect;
	gser->port.send_ring_indicator = gser_send_ring_indicator;
	gser->port.send_modem_ctrl_bits = gser_send_modem_ctrl_bits;
	gser->port.disconnect = gser_disconnect;
	gser->port.send_break = gser_send_break;
#endif

#ifdef FEATURE_PANTECH_MODEM_REOPEN_DELAY
	INIT_DELAYED_WORK(&gser->connect_work, gser_connect_work);
#endif

	status = usb_add_function(c, &gser->port.func);
	if (status)
		kfree(gser);
	return status;
}
Beispiel #2
0
static int bcm2079x_probe(struct i2c_client *client,
			   const struct i2c_device_id *id)
{
	int ret;
	struct bcm2079x_platform_data *platform_data;
	struct bcm2079x_dev *bcm2079x_dev;
	if (client->dev.of_node) {
		platform_data = devm_kzalloc(&client->dev,
			sizeof(struct bcm2079x_platform_data), GFP_KERNEL);
		if (!platform_data) {
			dev_err(&client->dev, "Failed to allocate memory\n");
			return -ENOMEM;
		}
		ret = bcm2079x_parse_dt(&client->dev, platform_data);
		if (ret)
			return ret;
	} else
		platform_data = client->dev.platform_data;

	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
		dev_err(&client->dev, "need I2C_FUNC_I2C\n");
		return -ENODEV;
	}

	ret = gpio_request(platform_data->irq_gpio, "nfc_int");
	if (ret)
		return -ENODEV;
	ret = gpio_request(platform_data->en_gpio, "nfc_ven");
	if (ret)
		goto err_en;
	ret = gpio_request(platform_data->wake_gpio, "nfc_firm");
	if (ret)
		goto err_firm;
	gpio_set_value(platform_data->en_gpio, 0);
	gpio_set_value(platform_data->wake_gpio, 0);
	bcm2079x_dev = kzalloc(sizeof(*bcm2079x_dev), GFP_KERNEL);
	if (bcm2079x_dev == NULL) {
		dev_err(&client->dev,
			"failed to allocate memory for module data\n");
		ret = -ENOMEM;
		goto err_exit;
	}

	bcm2079x_dev->wake_gpio = platform_data->wake_gpio;
	bcm2079x_dev->irq_gpio = platform_data->irq_gpio;
	bcm2079x_dev->en_gpio = platform_data->en_gpio;
	bcm2079x_dev->client = client;

	/* init mutex and queues */
	init_waitqueue_head(&bcm2079x_dev->read_wq);
	mutex_init(&bcm2079x_dev->read_mutex);
	spin_lock_init(&bcm2079x_dev->irq_enabled_lock);

	bcm2079x_dev->bcm2079x_device.minor = MISC_DYNAMIC_MINOR;
	bcm2079x_dev->bcm2079x_device.name = "bcm2079x";
	bcm2079x_dev->bcm2079x_device.fops = &bcm2079x_dev_fops;

	ret = misc_register(&bcm2079x_dev->bcm2079x_device);
	if (ret) {
		dev_err(&client->dev, "misc_register failed\n");
		goto err_misc_register;
	}

	dev_info(&client->dev,
		 "%s, saving address 0x%x\n",
		 __func__, client->addr);
    bcm2079x_dev->original_address = client->addr;

	/* request irq.  the irq is set whenever the chip has data available
	 * for reading.  it is cleared when all data has been read.
	 */
	client->irq = gpio_to_irq(platform_data->irq_gpio);
	dev_info(&client->dev, "requesting IRQ %d with IRQF_NO_SUSPEND\n", client->irq);
	bcm2079x_dev->irq_enabled = true;
	ret = request_irq(client->irq, bcm2079x_dev_irq_handler,
			  IRQF_TRIGGER_RISING|IRQF_NO_SUSPEND, client->name, bcm2079x_dev);
	if (ret) {
		dev_err(&client->dev, "request_irq failed\n");
		goto err_request_irq_failed;
	}
	bcm2079x_disable_irq(bcm2079x_dev);
	i2c_set_clientdata(client, bcm2079x_dev);
	dev_info(&client->dev,
		 "%s, probing bcm2079x driver exited successfully\n",
		 __func__);
	return 0;

err_request_irq_failed:
	misc_deregister(&bcm2079x_dev->bcm2079x_device);
err_misc_register:
	mutex_destroy(&bcm2079x_dev->read_mutex);
	kfree(bcm2079x_dev);
err_exit:
	gpio_free(platform_data->wake_gpio);
err_firm:
	gpio_free(platform_data->en_gpio);
err_en:
	gpio_free(platform_data->irq_gpio);
	return ret;
}
Beispiel #3
0
int autofs4_fill_super(struct super_block *s, void *data, int silent)
{
	struct inode * root_inode;
	struct dentry * root;
	struct file * pipe;
	int pipefd;
	struct autofs_sb_info *sbi;
	struct autofs_info *ino;

	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
	if (!sbi)
		goto fail_unlock;
	DPRINTK("starting up, sbi = %p",sbi);

	s->s_fs_info = sbi;
	sbi->magic = AUTOFS_SBI_MAGIC;
	sbi->pipefd = -1;
	sbi->pipe = NULL;
	sbi->catatonic = 1;
	sbi->exp_timeout = 0;
	sbi->oz_pgrp = task_pgrp_nr(current);
	sbi->sb = s;
	sbi->version = 0;
	sbi->sub_version = 0;
	set_autofs_type_indirect(&sbi->type);
	sbi->min_proto = 0;
	sbi->max_proto = 0;
	mutex_init(&sbi->wq_mutex);
	spin_lock_init(&sbi->fs_lock);
	sbi->queues = NULL;
	spin_lock_init(&sbi->lookup_lock);
	INIT_LIST_HEAD(&sbi->active_list);
	INIT_LIST_HEAD(&sbi->expiring_list);
	s->s_blocksize = 1024;
	s->s_blocksize_bits = 10;
	s->s_magic = AUTOFS_SUPER_MAGIC;
	s->s_op = &autofs4_sops;
	s->s_time_gran = 1;

	/*
	 * Get the root inode and dentry, but defer checking for errors.
	 */
	ino = autofs4_mkroot(sbi);
	if (!ino)
		goto fail_free;
	root_inode = autofs4_get_inode(s, ino);
	if (!root_inode)
		goto fail_ino;

	root = d_alloc_root(root_inode);
	if (!root)
		goto fail_iput;
	pipe = NULL;

	root->d_op = &autofs4_sb_dentry_operations;
	root->d_fsdata = ino;

	/* Can this call block? */
	if (parse_options(data, &pipefd, &root_inode->i_uid, &root_inode->i_gid,
				&sbi->oz_pgrp, &sbi->type, &sbi->min_proto,
				&sbi->max_proto)) {
		printk("autofs: called with bogus options\n");
		goto fail_dput;
	}

	root_inode->i_fop = &autofs4_root_operations;
	root_inode->i_op = autofs_type_trigger(sbi->type) ?
			&autofs4_direct_root_inode_operations :
			&autofs4_indirect_root_inode_operations;

	/* Couldn't this be tested earlier? */
	if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
	    sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) {
		printk("autofs: kernel does not match daemon version "
		       "daemon (%d, %d) kernel (%d, %d)\n",
			sbi->min_proto, sbi->max_proto,
			AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION);
		goto fail_dput;
	}

	/* Establish highest kernel protocol version */
	if (sbi->max_proto > AUTOFS_MAX_PROTO_VERSION)
		sbi->version = AUTOFS_MAX_PROTO_VERSION;
	else
		sbi->version = sbi->max_proto;
	sbi->sub_version = AUTOFS_PROTO_SUBVERSION;

	DPRINTK("pipe fd = %d, pgrp = %u", pipefd, sbi->oz_pgrp);
	pipe = fget(pipefd);

	if (!pipe) {
		printk("autofs: could not open pipe file descriptor\n");
		goto fail_dput;
	}
	if (!pipe->f_op || !pipe->f_op->write)
		goto fail_fput;
	sbi->pipe = pipe;
	sbi->pipefd = pipefd;
	sbi->catatonic = 0;

	/*
	 * Success! Install the root dentry now to indicate completion.
	 */
	s->s_root = root;
	return 0;

	/*
	 * Failure ... clean up.
	 */
fail_fput:
	printk("autofs: pipe file descriptor does not contain proper ops\n");
	fput(pipe);
	/* fall through */
fail_dput:
	dput(root);
	goto fail_free;
fail_iput:
	printk("autofs: get root dentry failed\n");
	iput(root_inode);
fail_ino:
	kfree(ino);
fail_free:
	kfree(sbi);
	s->s_fs_info = NULL;
fail_unlock:
	return -EINVAL;
}
Beispiel #4
0
static int zd1201_fw_upload(struct usb_device *dev, int apfw)
{
	const struct firmware *fw_entry;
	char *data;
	unsigned long len;
	int err;
	unsigned char ret;
	char *buf;
	char *fwfile;

	if (apfw)
		fwfile = "zd1201-ap.fw";
	else
		fwfile = "zd1201.fw";

	err = request_firmware(&fw_entry, fwfile, &dev->dev);
	if (err) {
		dev_err(&dev->dev, "Failed to load %s firmware file!\n", fwfile);
		dev_err(&dev->dev, "Make sure the hotplug firmware loader is installed.\n");
		dev_err(&dev->dev, "Goto http://linux-lc100020.sourceforge.net for more info.\n");
		return err;
	}

	data = fw_entry->data;
        len = fw_entry->size;

	buf = kmalloc(1024, GFP_ATOMIC);
	if (!buf)
		goto exit;
	
	while (len > 0) {
		int translen = (len > 1024) ? 1024 : len;
		memcpy(buf, data, translen);

		err = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0,
		    USB_DIR_OUT | 0x40, 0, 0, buf, translen,
		    ZD1201_FW_TIMEOUT);
		if (err < 0)
			goto exit;

		len -= translen;
		data += translen;
	}
                                        
	err = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x2,
	    USB_DIR_OUT | 0x40, 0, 0, NULL, 0, ZD1201_FW_TIMEOUT);
	if (err < 0)
		goto exit;

	err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
	    USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
	if (err < 0)
		goto exit;

	if (ret & 0x80) {
		err = -EIO;
		goto exit;
	}

	err = 0;
exit:
	kfree(buf);
	release_firmware(fw_entry);
	return err;
}
static int ddr_health_set(const char *val, struct kernel_param *kp)
{
	int	 ret;
	void	 *virt;
	uint64_t old_addr = 0;
	uint32_t old_size = 0;

	mutex_lock(&lock);
	ret = param_set_uint(val, kp);
	if (ret) {
		pr_err("ddr-health: error setting value %d\n", ret);
		mutex_unlock(&lock);
		return ret;
	}

	if (rpm_kvp.data) {
		ddr_health = (struct ddr_health *)rpm_kvp.data;
		old_addr = ddr_health->addr;
		old_size = ddr_health->size;
	}

	rpm_kvp.key = RPM_MISC_REQ_DDR_HEALTH;

	if (mem_size) {
		virt = kzalloc(mem_size, GFP_KERNEL);
		if (!virt) {
			pr_err("ddr-health: failed to alloc mem request %x\n",
			       mem_size);
			mutex_unlock(&lock);
			return -ENOMEM;
		}

		ddr_health->addr = (uint64_t)virt_to_phys(virt);
		ddr_health->size = mem_size;

		rpm_kvp.length = sizeof(struct ddr_health);
		rpm_kvp.data = (void *)ddr_health;

		ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
					   RPM_MISC_REQ_TYPE, 0, &rpm_kvp, 1);
		if (ret) {
			pr_err("ddr-health: send buf to RPM failed %d, %x\n",
			       ret, mem_size);
			kfree(virt);
			goto err;
		}
	} else {
		ddr_health->addr = 0;
		ddr_health->size = 0;

		rpm_kvp.length = sizeof(struct ddr_health);
		rpm_kvp.data = (void *)ddr_health;

		ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
					   RPM_MISC_REQ_TYPE, 0, &rpm_kvp, 1);
		if (ret) {
			pr_err("ddr-health: send nobuf to RPM failed %d, %x\n",
			       ret, mem_size);
			goto err;
		}
	}

	if (old_addr)
		kfree(phys_to_virt((phys_addr_t)old_addr));

	mutex_unlock(&lock);
	return 0;
err:
	ddr_health->addr = old_addr;
	ddr_health->size = old_size;
	mutex_unlock(&lock);
	return ret;
}
static void msm_vb2_mem_ops_put_userptr(void *buf_priv)
{
	kfree(buf_priv);
}
Beispiel #7
0
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
	struct sk_buff *frag;
	struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
	struct ipv6hdr *tmp_hdr;
	struct frag_hdr *fh;
	unsigned int mtu, hlen, left, len;
	__be32 frag_id = 0;
	int ptr, offset = 0, err=0;
	u8 *prevhdr, nexthdr = 0;
	struct net *net = dev_net(skb_dst(skb)->dev);

	hlen = ip6_find_1stfragopt(skb, &prevhdr);
	nexthdr = *prevhdr;

	mtu = ip6_skb_dst_mtu(skb);

	/* We must not fragment if the socket is set to force MTU discovery
	 * or if the skb it not generated by a local socket.  (This last
	 * check should be redundant, but it's free.)
	 */
	if (!skb->local_df) {
		skb->dev = skb_dst(skb)->dev;
		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_FRAGFAILS);
		kfree_skb(skb);
		return -EMSGSIZE;
	}

	if (np && np->frag_size < mtu) {
		if (np->frag_size)
			mtu = np->frag_size;
	}
	mtu -= hlen + sizeof(struct frag_hdr);

	if (skb_has_frags(skb)) {
		int first_len = skb_pagelen(skb);
		int truesizes = 0;

		if (first_len - hlen > mtu ||
		    ((first_len - hlen) & 7) ||
		    skb_cloned(skb))
			goto slow_path;

		skb_walk_frags(skb, frag) {
			/* Correct geometry. */
			if (frag->len > mtu ||
			    ((frag->len & 7) && frag->next) ||
			    skb_headroom(frag) < hlen)
			    goto slow_path;

			/* Partially cloned skb? */
			if (skb_shared(frag))
				goto slow_path;

			BUG_ON(frag->sk);
			if (skb->sk) {
				frag->sk = skb->sk;
				frag->destructor = sock_wfree;
				truesizes += frag->truesize;
			}
		}

		err = 0;
		offset = 0;
		frag = skb_shinfo(skb)->frag_list;
		skb_frag_list_init(skb);
		/* BUILD HEADER */

		*prevhdr = NEXTHDR_FRAGMENT;
		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
		if (!tmp_hdr) {
			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
				      IPSTATS_MIB_FRAGFAILS);
			return -ENOMEM;
		}

		__skb_pull(skb, hlen);
		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
		__skb_push(skb, hlen);
		skb_reset_network_header(skb);
		memcpy(skb_network_header(skb), tmp_hdr, hlen);

		ipv6_select_ident(fh);
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		fh->frag_off = htons(IP6_MF);
		frag_id = fh->identification;

		first_len = skb_pagelen(skb);
		skb->data_len = first_len - skb_headlen(skb);
		skb->truesize -= truesizes;
		skb->len = first_len;
		ipv6_hdr(skb)->payload_len = htons(first_len -
						   sizeof(struct ipv6hdr));

		dst_hold(&rt->u.dst);

		for (;;) {
			/* Prepare header of the next frame,
			 * before previous one went down. */
			if (frag) {
				frag->ip_summed = CHECKSUM_NONE;
				skb_reset_transport_header(frag);
				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
				__skb_push(frag, hlen);
				skb_reset_network_header(frag);
				memcpy(skb_network_header(frag), tmp_hdr,
				       hlen);
				offset += skb->len - hlen - sizeof(struct frag_hdr);
				fh->nexthdr = nexthdr;
				fh->reserved = 0;
				fh->frag_off = htons(offset);
				if (frag->next != NULL)
					fh->frag_off |= htons(IP6_MF);
				fh->identification = frag_id;
				ipv6_hdr(frag)->payload_len =
						htons(frag->len -
						      sizeof(struct ipv6hdr));
				ip6_copy_metadata(frag, skb);
			}

			err = output(skb);
			if(!err)
				IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
					      IPSTATS_MIB_FRAGCREATES);

			if (err || !frag)
				break;

			skb = frag;
			frag = skb->next;
			skb->next = NULL;
		}

		kfree(tmp_hdr);

		if (err == 0) {
			IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
				      IPSTATS_MIB_FRAGOKS);
			dst_release(&rt->u.dst);
			return 0;
		}

		while (frag) {
			skb = frag->next;
			kfree_skb(frag);
			frag = skb;
		}

		IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
			      IPSTATS_MIB_FRAGFAILS);
		dst_release(&rt->u.dst);
		return err;
	}

slow_path:
	left = skb->len - hlen;		/* Space per frame */
	ptr = hlen;			/* Where to start from */

	/*
	 *	Fragment the datagram.
	 */

	*prevhdr = NEXTHDR_FRAGMENT;

	/*
	 *	Keep copying data until we run out.
	 */
	while(left > 0)	{
		len = left;
		/* IF: it doesn't fit, use 'mtu' - the data space left */
		if (len > mtu)
			len = mtu;
		/* IF: we are not sending upto and including the packet end
		   then align the next start on an eight byte boundary */
		if (len < left)	{
			len &= ~7;
		}
		/*
		 *	Allocate buffer.
		 */

		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
				      IPSTATS_MIB_FRAGFAILS);
			err = -ENOMEM;
			goto fail;
		}

		/*
		 *	Set up data on packet
		 */

		ip6_copy_metadata(frag, skb);
		skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
		skb_reset_network_header(frag);
		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
		frag->transport_header = (frag->network_header + hlen +
					  sizeof(struct frag_hdr));

		/*
		 *	Charge the memory for the fragment to any owner
		 *	it might possess
		 */
		if (skb->sk)
			skb_set_owner_w(frag, skb->sk);

		/*
		 *	Copy the packet header into the new buffer.
		 */
		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);

		/*
		 *	Build fragment header.
		 */
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		if (!frag_id) {
			ipv6_select_ident(fh);
			frag_id = fh->identification;
		} else
			fh->identification = frag_id;

		/*
		 *	Copy a block of the IP datagram.
		 */
		if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
			BUG();
		left -= len;

		fh->frag_off = htons(offset);
		if (left > 0)
			fh->frag_off |= htons(IP6_MF);
		ipv6_hdr(frag)->payload_len = htons(frag->len -
						    sizeof(struct ipv6hdr));

		ptr += len;
		offset += len;

		/*
		 *	Put this fragment into the sending queue.
		 */
		err = output(frag);
		if (err)
			goto fail;

		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_FRAGCREATES);
	}
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_FRAGOKS);
	kfree_skb(skb);
	return err;

fail:
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return err;
}
/*
 * Check whether the dentry is still valid
 *
 * If the entry validity timeout has expired and the dentry is
 * positive, try to redo the lookup.  If the lookup results in a
 * different inode, then let the VFS invalidate the dentry and redo
 * the lookup once more.  If the lookup results in the same inode,
 * then refresh the attributes, timeouts and mark the dentry valid.
 */
static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
{
	struct inode *inode;

	inode = ACCESS_ONCE(entry->d_inode);
	if (inode && is_bad_inode(inode))
		return 0;
	else if (fuse_dentry_time(entry) < get_jiffies_64()) {
		int err;
		struct fuse_entry_out outarg;
		struct fuse_conn *fc;
		struct fuse_req *req;
		struct fuse_forget_link *forget;
		struct dentry *parent;
		u64 attr_version;

		/* For negative dentries, always do a fresh lookup */
		if (!inode)
			return 0;

		if (nd && (nd->flags & LOOKUP_RCU))
			return -ECHILD;

		fc = get_fuse_conn(inode);
		req = fuse_get_req(fc);
		if (IS_ERR(req))
			return 0;

		forget = fuse_alloc_forget();
		if (!forget) {
			fuse_put_request(fc, req);
			return 0;
		}

		attr_version = fuse_get_attr_version(fc);

		parent = dget_parent(entry);
		fuse_lookup_init(fc, req, get_node_id(parent->d_inode),
				 &entry->d_name, &outarg);
		fuse_request_send(fc, req);
		dput(parent);
		err = req->out.h.error;
		fuse_put_request(fc, req);
		/* Zero nodeid is same as -ENOENT */
		if (!err && !outarg.nodeid)
			err = -ENOENT;
		if (!err) {
			struct fuse_inode *fi = get_fuse_inode(inode);
			if (outarg.nodeid != get_node_id(inode)) {
				fuse_queue_forget(fc, forget, outarg.nodeid, 1);
				return 0;
			}
			spin_lock(&fc->lock);
			fi->nlookup++;
			spin_unlock(&fc->lock);
		}
		kfree(forget);
		if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
			return 0;

		fuse_change_attributes(inode, &outarg.attr,
				       entry_attr_timeout(&outarg),
				       attr_version);
		fuse_change_entry_timeout(entry, &outarg);
	}
	return 1;
}
/*
 * Atomic create+open operation
 *
 * If the filesystem doesn't support this, then fall back to separate
 * 'mknod' + 'open' requests.
 */
static int fuse_create_open(struct inode *dir, struct dentry *entry,
			    umode_t mode, struct nameidata *nd)
{
	int err;
	struct inode *inode;
	struct fuse_conn *fc = get_fuse_conn(dir);
	struct fuse_req *req;
	struct fuse_forget_link *forget;
	struct fuse_create_in inarg;
	struct fuse_open_out outopen;
	struct fuse_entry_out outentry;
	struct fuse_file *ff;
	struct file *file;
	int flags = nd->intent.open.flags;

	if (fc->no_create)
		return -ENOSYS;

	forget = fuse_alloc_forget();
	if (!forget)
		return -ENOMEM;

	req = fuse_get_req(fc);
	err = PTR_ERR(req);
	if (IS_ERR(req))
		goto out_put_forget_req;

	err = -ENOMEM;
	ff = fuse_file_alloc(fc);
	if (!ff)
		goto out_put_request;

	if (!fc->dont_mask)
		mode &= ~current_umask();

	flags &= ~O_NOCTTY;
	memset(&inarg, 0, sizeof(inarg));
	memset(&outentry, 0, sizeof(outentry));
	inarg.flags = flags;
	inarg.mode = mode;
	inarg.umask = current_umask();
	req->in.h.opcode = FUSE_CREATE;
	req->in.h.nodeid = get_node_id(dir);
	req->in.numargs = 2;
	req->in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
						sizeof(inarg);
	req->in.args[0].value = &inarg;
	req->in.args[1].size = entry->d_name.len + 1;
	req->in.args[1].value = entry->d_name.name;
	req->out.numargs = 2;
	if (fc->minor < 9)
		req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
	else
		req->out.args[0].size = sizeof(outentry);
	req->out.args[0].value = &outentry;
	req->out.args[1].size = sizeof(outopen);
	req->out.args[1].value = &outopen;
	fuse_request_send(fc, req);
	err = req->out.h.error;
	if (err) {
		if (err == -ENOSYS)
			fc->no_create = 1;
		goto out_free_ff;
	}

	err = -EIO;
	if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
		goto out_free_ff;

	fuse_put_request(fc, req);
	ff->fh = outopen.fh;
	ff->nodeid = outentry.nodeid;
	ff->open_flags = outopen.open_flags;
	inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
			  &outentry.attr, entry_attr_timeout(&outentry), 0);
	if (!inode) {
		flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
		fuse_sync_release(ff, flags);
		fuse_queue_forget(fc, forget, outentry.nodeid, 1);
		return -ENOMEM;
	}
	kfree(forget);
	d_instantiate(entry, inode);
	fuse_change_entry_timeout(entry, &outentry);
	fuse_invalidate_attr(dir);
	file = lookup_instantiate_filp(nd, entry, generic_file_open);
	if (IS_ERR(file)) {
		fuse_sync_release(ff, flags);
		return PTR_ERR(file);
	}
	file->private_data = fuse_file_get(ff);
	fuse_finish_open(inode, file);
	return 0;

 out_free_ff:
	fuse_file_free(ff);
 out_put_request:
	fuse_put_request(fc, req);
 out_put_forget_req:
	kfree(forget);
	return err;
}
int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
{
	struct snd_soc_codec *codec = rtd->codec;
	struct snd_soc_platform *platform = rtd->platform;
	struct snd_soc_dai *codec_dai = rtd->codec_dai;
	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
	struct snd_compr *compr;
	struct snd_pcm *be_pcm;
	char new_name[64];
	int ret = 0, direction = 0;

	
	snprintf(new_name, sizeof(new_name), "%s %s-%d",
			rtd->dai_link->stream_name, codec_dai->name, num);
	direction = SND_COMPRESS_PLAYBACK;
	compr = kzalloc(sizeof(*compr), GFP_KERNEL);
	if (compr == NULL) {
		snd_printk(KERN_ERR "Cannot allocate compr\n");
		return -ENOMEM;
	}

	compr->ops = devm_kzalloc(rtd->card->dev, sizeof(soc_compr_ops),
				  GFP_KERNEL);
	if (compr->ops == NULL) {
		dev_err(rtd->card->dev, "Cannot allocate compressed ops\n");
		ret = -ENOMEM;
		goto compr_err;
	}

	if (rtd->dai_link->dynamic) {
		snprintf(new_name, sizeof(new_name), "(%s)",
			rtd->dai_link->stream_name);

		ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
				1, 0, &be_pcm);
		if (ret < 0) {
			dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n",
				rtd->dai_link->name);
			goto compr_err;
		}

		rtd->pcm = be_pcm;
		rtd->fe_compr = 1;
		be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
		
		memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
	} else
		memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));

	
	if (platform->driver->compr_ops && platform->driver->compr_ops->copy)
		compr->ops->copy = soc_compr_copy;

	mutex_init(&compr->lock);
	ret = snd_compress_new(rtd->card->snd_card, num, direction, compr);
	if (ret < 0) {
		pr_err("compress asoc: can't create compress for codec %s\n",
			codec->name);
		goto compr_err;
	}

	
	INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work);

	rtd->compr = compr;
	compr->private_data = rtd;

	if (platform->driver->pcm_new) {
		ret = platform->driver->pcm_new(rtd);
		if (ret < 0) {
			pr_err("asoc: compress pcm constructor failed\n");
			goto compr_err;
		}
	}

	printk(KERN_INFO "compress asoc: %s <-> %s mapping ok\n", codec_dai->name,
		cpu_dai->name);
	return ret;

compr_err:
	kfree(compr);
	return ret;
}
Beispiel #11
0
/*
 * Read and decompress a metadata block or datablock.  Length is non-zero
 * if a datablock is being read (the size is stored elsewhere in the
 * filesystem), otherwise the length is obtained from the first two bytes of
 * the metadata block.  A bit in the length field indicates if the block
 * is stored uncompressed in the filesystem (usually because compression
 * generated a larger block - this does occasionally happen with zlib).
 */
int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
			int length, u64 *next_index, int srclength, int pages)
{
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	struct buffer_head **bh;
	int offset = index & ((1 << msblk->devblksize_log2) - 1);
	u64 cur_index = index >> msblk->devblksize_log2;
	int bytes, compressed, b = 0, k = 0, page = 0, avail;

	bh = kcalloc(((srclength + msblk->devblksize - 1)
		>> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
	if (bh == NULL)
		return -ENOMEM;

	if (length) {
		/*
		 * Datablock.
		 */
		bytes = -offset;
		compressed = SQUASHFS_COMPRESSED_BLOCK(length);
		length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
		if (next_index)
			*next_index = index + length;

		TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
			index, compressed ? "" : "un", length, srclength);

		if (length < 0 || length > srclength ||
				(index + length) > msblk->bytes_used)
			goto read_failure;

		for (b = 0; bytes < length; b++, cur_index++) {
			bh[b] = sb_getblk(sb, cur_index);
			if (bh[b] == NULL)
				goto block_release;
			bytes += msblk->devblksize;
		}
		ll_rw_block(READ, b, bh);
	} else {
		/*
		 * Metadata block.
		 */
		if ((index + 2) > msblk->bytes_used)
			goto read_failure;

		bh[0] = get_block_length(sb, &cur_index, &offset, &length);
		if (bh[0] == NULL)
			goto read_failure;
		b = 1;

		bytes = msblk->devblksize - offset;
		compressed = SQUASHFS_COMPRESSED(length);
		length = SQUASHFS_COMPRESSED_SIZE(length);
		if (next_index)
			*next_index = index + length + 2;

		TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
				compressed ? "" : "un", length);

		if (length < 0 || length > srclength ||
					(index + length) > msblk->bytes_used)
			goto block_release;

		for (; bytes < length; b++) {
			bh[b] = sb_getblk(sb, ++cur_index);
			if (bh[b] == NULL)
				goto block_release;
			bytes += msblk->devblksize;
		}
		ll_rw_block(READ, b - 1, bh + 1);
	}

	if (compressed) {
		length = squashfs_decompress(msblk, buffer, bh, b, offset,
			 length, srclength, pages);
		if (length < 0)
			goto read_failure;
	} else {
		/*
		 * Block is uncompressed.
		 */
		int i, in, pg_offset = 0;

		for (i = 0; i < b; i++) {
			wait_on_buffer(bh[i]);
			if (!buffer_uptodate(bh[i]))
				goto block_release;
		}

		for (bytes = length; k < b; k++) {
			in = min(bytes, msblk->devblksize - offset);
			bytes -= in;
			while (in) {
				if (pg_offset == PAGE_CACHE_SIZE) {
					page++;
					pg_offset = 0;
				}
				avail = min_t(int, in, PAGE_CACHE_SIZE -
						pg_offset);
				memcpy(buffer[page] + pg_offset,
						bh[k]->b_data + offset, avail);
				in -= avail;
				pg_offset += avail;
				offset += avail;
			}
			offset = 0;
			put_bh(bh[k]);
		}
	}

	kfree(bh);
	return length;

block_release:
	for (; k < b; k++)
		put_bh(bh[k]);

read_failure:
	ERROR("squashfs_read_data failed to read block 0x%llx\n",
					(unsigned long long) index);
	kfree(bh);
	return -EIO;
}
Beispiel #12
0
static void put_path(const char *path)
{
	kfree(path);
}
Beispiel #13
0
int sys_execv(userptr_t progname, userptr_t args) {
	struct addrspace *as;
	struct vnode *v;
	vaddr_t entrypoint, stackptr;
	int result;
	
	char * path = kmalloc(PATH_MAX);

	as = curproc_getas();

	if (as != NULL && !valid_address_check(as, (vaddr_t)args)) { // out of vaddr boundary for this proc
		errno = EFAULT;
		return -1;
	}
	if (as != NULL && !valid_address_check(as, (vaddr_t)progname)) { // out of vaddr boundary for this proc
		errno = EFAULT;
		return -1;
	}

	result = copyinstr(progname, path, PATH_MAX, NULL);
	if(result) {
		return result;
	}
	if(*path == '\0') {
		errno = EISDIR;
		return -1;
	}

	// Open the executable, create a new address space and load the elf into it

	result = vfs_open((char *)path, O_RDONLY, 0, &v);
	if (result) {
		return result;
	}
	// KASSERT(curproc_getas() == NULL);

	as = as_create();
	if (as == NULL) {
		vfs_close(v);
		return ENOMEM;
	}

	curproc_setas(as);
	as_activate();

	result = load_elf(v, &entrypoint);
	if (result) {
		/* p_addrspace will go away when curproc is destroyed */
		vfs_close(v);
		return result;
	}
	vfs_close(v);

	result = as_define_stack(as, &stackptr); // stackptr set
	if (result) {
		/* p_addrspace will go away when curproc is destroyed */
		return result;
	}
	kfree(path);

	userptr_t karg, uargs, argbase;
	char * buffer, * bufend;
	buffer = kmalloc(sizeof(ARG_MAX));
	if(buffer == NULL) {
		errno = ENOMEM;
		return -1;
	}
	bufend = buffer;
	size_t resid, bufsize;
	size_t alen;
	size_t *offsets = kmalloc(NARG_MAX * sizeof(size_t));
	if(offsets == NULL) {
		errno = ENOMEM;
		return -1;
	}
	int size = 0;
	userptr_t arg;


	resid = bufsize = ARG_MAX;

	for(int i = 0 ; i < NARG_MAX ; i++) { // copyin from user.
		copyin(args, &karg, sizeof(userptr_t)); // copy the pointer.
		if(karg == NULL) break; // if NULL, break.
		copyinstr(karg, bufend, resid, &alen);
		offsets[i] = bufsize - resid;
		bufend += alen;
		resid -= alen;
		args += sizeof(userptr_t);
		size++;
	}

	size_t buflen = bufend - buffer; // length of buffer.
	vaddr_t stack = stackptr - buflen; // current stack position.
	stack -= (stack & (sizeof(void *) - 1)); // alignment
	argbase = (userptr_t)stack; // argbase.

	copyout(buffer, argbase, buflen); // copy the arguments into stack.

	stack -= (size + 1)*sizeof(userptr_t); // go to array pointer (bottom of stack).
	uargs = (userptr_t)stack; // got stack.

	for(int i = 0 ; i < size ; i++) { // copy the elements
		arg = argbase + offsets[i];
		copyout(&arg, uargs, sizeof(userptr_t));
		uargs += sizeof(userptr_t); // 4
	}
	arg = NULL;
	copyout(&arg, uargs, sizeof(userptr_t)); // copy the NULL pointer.



	kfree(buffer);
	kfree(offsets);

	enter_new_process(size /*argc*/, (userptr_t)stack /*userspace addr of argv*/,
			  stack, entrypoint);

	return 0;

}
Beispiel #14
0
/* dummy encoder */
static void udl_enc_destroy(struct drm_encoder *encoder)
{
	drm_encoder_cleanup(encoder);
	kfree(encoder);
}
static int __devinit bfin_sir_probe(struct platform_device *pdev)
{
	struct net_device *dev;
	struct bfin_sir_self *self;
	unsigned int baudrate_mask;
	struct bfin_sir_port *sir_port;
	int err;

	if (pdev->id >= 0 && pdev->id < ARRAY_SIZE(per) && \
				per[pdev->id][3] == pdev->id) {
		err = peripheral_request_list(per[pdev->id], DRIVER_NAME);
		if (err)
			return err;
	} else {
		dev_err(&pdev->dev, "Invalid pdev id, please check board file\n");
		return -ENODEV;
	}

	err = -ENOMEM;
	sir_port = kmalloc(sizeof(*sir_port), GFP_KERNEL);
	if (!sir_port)
		goto err_mem_0;

	bfin_sir_init_ports(sir_port, pdev);

	dev = alloc_irdadev(sizeof(*self));
	if (!dev)
		goto err_mem_1;

	self = netdev_priv(dev);
	self->dev = &pdev->dev;
	self->sir_port = sir_port;
	sir_port->dev = dev;

	err = bfin_sir_init_iobuf(&self->rx_buff, IRDA_SKB_MAX_MTU);
	if (err)
		goto err_mem_2;
	err = bfin_sir_init_iobuf(&self->tx_buff, IRDA_SIR_MAX_FRAME);
	if (err)
		goto err_mem_3;

	dev->netdev_ops = &bfin_sir_ndo;
	dev->irq = sir_port->irq;

	irda_init_max_qos_capabilies(&self->qos);

	baudrate_mask = IR_9600;

	switch (max_rate) {
	case 115200:
		baudrate_mask |= IR_115200;
	case 57600:
		baudrate_mask |= IR_57600;
	case 38400:
		baudrate_mask |= IR_38400;
	case 19200:
		baudrate_mask |= IR_19200;
	case 9600:
		break;
	default:
		dev_warn(&pdev->dev, "Invalid maximum baud rate, using 9600\n");
	}

	self->qos.baud_rate.bits &= baudrate_mask;

	self->qos.min_turn_time.bits = 1; /* 10 ms or more */

	irda_qos_bits_to_value(&self->qos);

	err = register_netdev(dev);

	if (err) {
		kfree(self->tx_buff.head);
err_mem_3:
		kfree(self->rx_buff.head);
err_mem_2:
		free_netdev(dev);
err_mem_1:
		kfree(sir_port);
err_mem_0:
		peripheral_free_list(per[pdev->id]);
	} else
		platform_set_drvdata(pdev, sir_port);

	return err;
}
/*
 * Code shared between mknod, mkdir, symlink and link
 */
static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
			    struct inode *dir, struct dentry *entry,
			    umode_t mode)
{
	struct fuse_entry_out outarg;
	struct inode *inode;
	int err;
	struct fuse_forget_link *forget;

	forget = fuse_alloc_forget();
	if (!forget) {
		fuse_put_request(fc, req);
		return -ENOMEM;
	}

	memset(&outarg, 0, sizeof(outarg));
	req->in.h.nodeid = get_node_id(dir);
	req->out.numargs = 1;
	if (fc->minor < 9)
		req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
	else
		req->out.args[0].size = sizeof(outarg);
	req->out.args[0].value = &outarg;
	fuse_request_send(fc, req);
	err = req->out.h.error;
	fuse_put_request(fc, req);
	if (err)
		goto out_put_forget_req;

	err = -EIO;
	if (invalid_nodeid(outarg.nodeid))
		goto out_put_forget_req;

	if ((outarg.attr.mode ^ mode) & S_IFMT)
		goto out_put_forget_req;

	inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
			  &outarg.attr, entry_attr_timeout(&outarg), 0);
	if (!inode) {
		fuse_queue_forget(fc, forget, outarg.nodeid, 1);
		return -ENOMEM;
	}
	kfree(forget);

	if (S_ISDIR(inode->i_mode)) {
		struct dentry *alias;
		mutex_lock(&fc->inst_mutex);
		alias = d_find_alias(inode);
		if (alias) {
			/* New directory must have moved since mkdir */
			mutex_unlock(&fc->inst_mutex);
			dput(alias);
			iput(inode);
			return -EBUSY;
		}
		d_instantiate(entry, inode);
		mutex_unlock(&fc->inst_mutex);
	} else
		d_instantiate(entry, inode);

	fuse_change_entry_timeout(entry, &outarg);
	fuse_invalidate_attr(dir);
	return 0;

 out_put_forget_req:
	kfree(forget);
	return err;
}
Beispiel #17
0
static int mmc_ext_csd_release(struct inode *inode, struct file *file)
{
	kfree(file->private_data);
	return 0;
}
Beispiel #18
0
static int __cpuinit
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
	struct vcpu_guest_context *ctxt;
	struct desc_struct *gdt;
	unsigned long gdt_mfn;

	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
		return 0;

	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
	if (ctxt == NULL)
		return -ENOMEM;

	gdt = get_cpu_gdt_table(cpu);

	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.ds = __USER_DS;
	ctxt->user_regs.es = __USER_DS;
	ctxt->user_regs.ss = __KERNEL_DS;
#ifdef CONFIG_X86_32
	ctxt->user_regs.fs = __KERNEL_PERCPU;
	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
#else
	ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif
	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */

	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

	xen_copy_trap_info(ctxt->trap_ctxt);

	ctxt->ldt_ents = 0;

	BUG_ON((unsigned long)gdt & ~PAGE_MASK);

	gdt_mfn = arbitrary_virt_to_mfn(gdt);
	make_lowmem_page_readonly(gdt);
	make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));

	ctxt->gdt_frames[0] = gdt_mfn;
	ctxt->gdt_ents      = GDT_ENTRIES;

	ctxt->user_regs.cs = __KERNEL_CS;
	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);

	ctxt->kernel_ss = __KERNEL_DS;
	ctxt->kernel_sp = idle->thread.sp0;

#ifdef CONFIG_X86_32
	ctxt->event_callback_cs     = __KERNEL_CS;
	ctxt->failsafe_callback_cs  = __KERNEL_CS;
#endif
	ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
	ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;

	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));

	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
		BUG();

	kfree(ctxt);
	return 0;
}
Beispiel #19
0
int __init
uvc_bind_config(struct usb_configuration *c,
		const struct uvc_descriptor_header * const *control,
		const struct uvc_descriptor_header * const *fs_streaming,
		const struct uvc_descriptor_header * const *hs_streaming)
{
	struct uvc_device *uvc;
	int ret = 0;

	/* TODO Check if the USB device controller supports the required
	 * features.
	 */
	if (!gadget_is_dualspeed(c->cdev->gadget))
		return -EINVAL;

	uvc = kzalloc(sizeof(*uvc), GFP_KERNEL);
	if (uvc == NULL)
		return -ENOMEM;

	uvc->state = UVC_STATE_DISCONNECTED;

	/* Validate the descriptors. */
	if (control == NULL || control[0] == NULL ||
	    control[0]->bDescriptorSubType != UVC_DT_HEADER)
		goto error;

	if (fs_streaming == NULL || fs_streaming[0] == NULL ||
	    fs_streaming[0]->bDescriptorSubType != UVC_DT_INPUT_HEADER)
		goto error;

	if (hs_streaming == NULL || hs_streaming[0] == NULL ||
	    hs_streaming[0]->bDescriptorSubType != UVC_DT_INPUT_HEADER)
		goto error;

	uvc->desc.control = control;
	uvc->desc.fs_streaming = fs_streaming;
	uvc->desc.hs_streaming = hs_streaming;

	/* Allocate string descriptor numbers. */
	if ((ret = usb_string_id(c->cdev)) < 0)
		goto error;
	uvc_en_us_strings[UVC_STRING_ASSOCIATION_IDX].id = ret;
	uvc_iad.iFunction = ret;

	if ((ret = usb_string_id(c->cdev)) < 0)
		goto error;
	uvc_en_us_strings[UVC_STRING_CONTROL_IDX].id = ret;
	uvc_control_intf.iInterface = ret;

	if ((ret = usb_string_id(c->cdev)) < 0)
		goto error;
	uvc_en_us_strings[UVC_STRING_STREAMING_IDX].id = ret;
	uvc_streaming_intf_alt0.iInterface = ret;
	uvc_streaming_intf_alt1.iInterface = ret;

	/* Register the function. */
	uvc->func.name = "uvc";
	uvc->func.strings = uvc_function_strings;
	uvc->func.bind = uvc_function_bind;
	uvc->func.unbind = uvc_function_unbind;
	uvc->func.get_alt = uvc_function_get_alt;
	uvc->func.set_alt = uvc_function_set_alt;
	uvc->func.disable = uvc_function_disable;
	uvc->func.setup = uvc_function_setup;

	ret = usb_add_function(c, &uvc->func);
	if (ret)
		kfree(uvc);

	return 0;

error:
	kfree(uvc);
	return ret;
}
Beispiel #20
0
int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
{
    int ret = 0;
    char *command = NULL;
    int cmd_num;
    int bytes_written = 0;
    android_wifi_priv_cmd priv_cmd;

    rtw_lock_suspend();

    if (!ifr->ifr_data) {
        ret = -EINVAL;
        goto exit;
    }
    if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
        ret = -EFAULT;
        goto exit;
    }

    //DBG_871X("%s priv_cmd.buf=%p priv_cmd.total_len=%d  priv_cmd.used_len=%d\n",__func__,priv_cmd.buf,priv_cmd.total_len,priv_cmd.used_len);
    command = kmalloc(priv_cmd.total_len, GFP_KERNEL);
    if (!command)
    {
        DBG_871X("%s: failed to allocate memory\n", __FUNCTION__);
        ret = -ENOMEM;
        goto exit;
    }

    if (!access_ok(VERIFY_READ, priv_cmd.buf, priv_cmd.total_len)) {
        DBG_871X("%s: failed to access memory\n", __FUNCTION__);
        ret = -EFAULT;
        goto exit;
    }
    if (copy_from_user(command, (void *)priv_cmd.buf, priv_cmd.total_len)) {
        ret = -EFAULT;
        goto exit;
    }

    DBG_871X("%s: Android private cmd \"%s\" on %s\n"
             , __FUNCTION__, command, ifr->ifr_name);

    cmd_num = rtw_android_cmdstr_to_num(command);

    switch(cmd_num) {
    case ANDROID_WIFI_CMD_START:
        //bytes_written = wl_android_wifi_on(net);
        goto response;
    case ANDROID_WIFI_CMD_SETFWPATH:
        goto response;
    }

    if (!g_wifi_on) {
        DBG_871X("%s: Ignore private cmd \"%s\" - iface %s is down\n"
                 ,__FUNCTION__, command, ifr->ifr_name);
        ret = 0;
        goto exit;
    }

    switch(cmd_num) {

    case ANDROID_WIFI_CMD_STOP:
        //bytes_written = wl_android_wifi_off(net);
        break;

    case ANDROID_WIFI_CMD_SCAN_ACTIVE:
        //rtw_set_scan_mode((_adapter *)rtw_netdev_priv(net), SCAN_ACTIVE);
#ifdef CONFIG_PLATFORM_MSTAR_TITANIA12
#ifdef CONFIG_IOCTL_CFG80211
        (wdev_to_priv(net->ieee80211_ptr))->bandroid_scan = _TRUE;
#endif //CONFIG_IOCTL_CFG80211
#endif //CONFIG_PLATFORM_MSTAR_TITANIA12
        break;
    case ANDROID_WIFI_CMD_SCAN_PASSIVE:
        //rtw_set_scan_mode((_adapter *)rtw_netdev_priv(net), SCAN_PASSIVE);
        break;

    case ANDROID_WIFI_CMD_RSSI:
        bytes_written = rtw_android_get_rssi(net, command, priv_cmd.total_len);
        break;
    case ANDROID_WIFI_CMD_LINKSPEED:
        bytes_written = rtw_android_get_link_speed(net, command, priv_cmd.total_len);
        break;

    case ANDROID_WIFI_CMD_MACADDR:
        bytes_written = rtw_android_get_macaddr(net, command, priv_cmd.total_len);
        break;

    case ANDROID_WIFI_CMD_BLOCK:
        bytes_written = rtw_android_set_block(net, command, priv_cmd.total_len);
        break;

    case ANDROID_WIFI_CMD_RXFILTER_START:
        //bytes_written = net_os_set_packet_filter(net, 1);
        break;
    case ANDROID_WIFI_CMD_RXFILTER_STOP:
        //bytes_written = net_os_set_packet_filter(net, 0);
        break;
    case ANDROID_WIFI_CMD_RXFILTER_ADD:
        //int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0';
        //bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num);
        break;
    case ANDROID_WIFI_CMD_RXFILTER_REMOVE:
        //int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
        //bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
        break;

    case ANDROID_WIFI_CMD_BTCOEXSCAN_START:
        /* TBD: BTCOEXSCAN-START */
        break;
    case ANDROID_WIFI_CMD_BTCOEXSCAN_STOP:
        /* TBD: BTCOEXSCAN-STOP */
        break;
    case ANDROID_WIFI_CMD_BTCOEXMODE:
#if 0
        uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0';
        if (mode == 1)
            net_os_set_packet_filter(net, 0); /* DHCP starts */
        else
            net_os_set_packet_filter(net, 1); /* DHCP ends */
#ifdef WL_CFG80211
        bytes_written = wl_cfg80211_set_btcoex_dhcp(net, command);
#endif
#endif
        break;

    case ANDROID_WIFI_CMD_SETSUSPENDOPT:
        //bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len);
        break;

    case ANDROID_WIFI_CMD_SETBAND:
        //uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
        //bytes_written = wldev_set_band(net, band);
        break;
    case ANDROID_WIFI_CMD_GETBAND:
        //bytes_written = wl_android_get_band(net, command, priv_cmd.total_len);
        break;

    case ANDROID_WIFI_CMD_COUNTRY:
        bytes_written = rtw_android_set_country(net, command, priv_cmd.total_len);
        break;

#ifdef PNO_SUPPORT
    case ANDROID_WIFI_CMD_PNOSSIDCLR_SET:
        //bytes_written = dhd_dev_pno_reset(net);
        break;
    case ANDROID_WIFI_CMD_PNOSETUP_SET:
        //bytes_written = wl_android_set_pno_setup(net, command, priv_cmd.total_len);
        break;
    case ANDROID_WIFI_CMD_PNOENABLE_SET:
        //uint pfn_enabled = *(command + strlen(CMD_PNOENABLE_SET) + 1) - '0';
        //bytes_written = dhd_dev_pno_enable(net, pfn_enabled);
        break;
#endif

    case ANDROID_WIFI_CMD_P2P_DEV_ADDR:
        bytes_written = rtw_android_get_p2p_dev_addr(net, command, priv_cmd.total_len);
        break;
    case ANDROID_WIFI_CMD_P2P_SET_NOA:
        //int skip = strlen(CMD_P2P_SET_NOA) + 1;
        //bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip, priv_cmd.total_len - skip);
        break;
    case ANDROID_WIFI_CMD_P2P_GET_NOA:
        //bytes_written = wl_cfg80211_get_p2p_noa(net, command, priv_cmd.total_len);
        break;
    case ANDROID_WIFI_CMD_P2P_SET_PS:
        //int skip = strlen(CMD_P2P_SET_PS) + 1;
        //bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip, priv_cmd.total_len - skip);
        break;

#ifdef CONFIG_IOCTL_CFG80211
    case ANDROID_WIFI_CMD_SET_AP_WPS_P2P_IE:
    {
        int skip = strlen(android_wifi_cmd_str[ANDROID_WIFI_CMD_SET_AP_WPS_P2P_IE]) + 3;
        bytes_written = rtw_cfg80211_set_mgnt_wpsp2pie(net, command + skip, priv_cmd.total_len - skip, *(command + skip - 2) - '0');
        break;
    }
#endif //CONFIG_IOCTL_CFG80211

#ifdef CONFIG_WFD
    case ANDROID_WIFI_CMD_WFD_ENABLE:
    {
        //	Commented by Albert 2012/07/24
        //	We can enable the WFD function by using the following command:
        //	wpa_cli driver wfd-enable

        struct wifi_display_info		*pwfd_info;
        _adapter*	padapter = ( _adapter * ) rtw_netdev_priv(net);

        pwfd_info = &padapter->wfd_info;
        pwfd_info->wfd_enable = _TRUE;
        break;
    }

    case ANDROID_WIFI_CMD_WFD_DISABLE:
    {
        //	Commented by Albert 2012/07/24
        //	We can disable the WFD function by using the following command:
        //	wpa_cli driver wfd-disable

        struct wifi_display_info		*pwfd_info;
        _adapter*	padapter = ( _adapter * ) rtw_netdev_priv(net);

        pwfd_info = &padapter->wfd_info;
        pwfd_info->wfd_enable = _FALSE;
        break;
    }
    case ANDROID_WIFI_CMD_WFD_SET_TCPPORT:
    {
        //	Commented by Albert 2012/07/24
        //	We can set the tcp port number by using the following command:
        //	wpa_cli driver wfd-set-tcpport = 554

        struct wifi_display_info		*pwfd_info;
        _adapter*	padapter = ( _adapter * ) rtw_netdev_priv(net);

        pwfd_info = &padapter->wfd_info;
        pwfd_info->rtsp_ctrlport = ( u16 ) get_int_from_command( priv_cmd.buf );
        break;
    }
    case ANDROID_WIFI_CMD_WFD_SET_MAX_TPUT:
    {


        break;
    }
    case ANDROID_WIFI_CMD_WFD_SET_DEVTYPE:
    {
        //	Commented by Albert 2012/08/28
        //	Specify the WFD device type ( WFD source/primary sink )

        struct wifi_display_info		*pwfd_info;
        _adapter*	padapter = ( _adapter * ) rtw_netdev_priv(net);

        pwfd_info = &padapter->wfd_info;
        pwfd_info->wfd_device_type = ( u8 ) get_int_from_command( priv_cmd.buf );

        pwfd_info->wfd_device_type &= WFD_DEVINFO_DUAL;
        break;
    }
#endif
    default:
        DBG_871X("Unknown PRIVATE command %s - ignored\n", command);
        snprintf(command, 3, "OK");
        bytes_written = strlen("OK");
    }

response:
    if (bytes_written >= 0) {
        if ((bytes_written == 0) && (priv_cmd.total_len > 0))
            command[0] = '\0';
        if (bytes_written >= priv_cmd.total_len) {
            DBG_871X("%s: bytes_written = %d\n", __FUNCTION__, bytes_written);
            bytes_written = priv_cmd.total_len;
        } else {
            bytes_written++;
        }
        priv_cmd.used_len = bytes_written;
        if (copy_to_user((void *)priv_cmd.buf, command, bytes_written)) {
            DBG_871X("%s: failed to copy data to user buffer\n", __FUNCTION__);
            ret = -EFAULT;
        }
    }
    else {
        ret = bytes_written;
    }

exit:
    rtw_unlock_suspend();
    if (command) {
        kfree(command);
    }

    return ret;
}
Beispiel #21
0
/* Incoming data */
static void zd1201_usbrx(struct urb *urb)
{
	struct zd1201 *zd = urb->context;
	int free = 0;
	unsigned char *data = urb->transfer_buffer;
	struct sk_buff *skb;
	unsigned char type;

	if (!zd)
		return;

	switch(urb->status) {
		case -EILSEQ:
		case -ENODEV:
		case -ETIME:
		case -ENOENT:
		case -EPIPE:
		case -EOVERFLOW:
		case -ESHUTDOWN:
			dev_warn(&zd->usb->dev, "%s: rx urb failed: %d\n",
			    zd->dev->name, urb->status);
			free = 1;
			goto exit;
	}
	
	if (urb->status != 0 || urb->actual_length == 0)
		goto resubmit;

	type = data[0];
	if (type == ZD1201_PACKET_EVENTSTAT || type == ZD1201_PACKET_RESOURCE) {
		memcpy(zd->rxdata, data, urb->actual_length);
		zd->rxlen = urb->actual_length;
		zd->rxdatas = 1;
		wake_up(&zd->rxdataq);
	}
	/* Info frame */
	if (type == ZD1201_PACKET_INQUIRE) {
		int i = 0;
		unsigned short infotype, framelen, copylen;
		framelen = le16_to_cpu(*(__le16*)&data[4]);
		infotype = le16_to_cpu(*(__le16*)&data[6]);

		if (infotype == ZD1201_INF_LINKSTATUS) {
			short linkstatus;

			linkstatus = le16_to_cpu(*(__le16*)&data[8]);
			switch(linkstatus) {
				case 1:
					netif_carrier_on(zd->dev);
					break;
				case 2:
					netif_carrier_off(zd->dev);
					break;
				case 3:
					netif_carrier_off(zd->dev);
					break;
				case 4:
					netif_carrier_on(zd->dev);
					break;
				default:
					netif_carrier_off(zd->dev);
			}
			goto resubmit;
		}
		if (infotype == ZD1201_INF_ASSOCSTATUS) {
			short status = le16_to_cpu(*(__le16*)(data+8));
			int event;
			union iwreq_data wrqu;

			switch (status) {
				case ZD1201_ASSOCSTATUS_STAASSOC:
				case ZD1201_ASSOCSTATUS_REASSOC:
					event = IWEVREGISTERED;
					break;
				case ZD1201_ASSOCSTATUS_DISASSOC:
				case ZD1201_ASSOCSTATUS_ASSOCFAIL:
				case ZD1201_ASSOCSTATUS_AUTHFAIL:
				default:
					event = IWEVEXPIRED;
			}
			memcpy(wrqu.addr.sa_data, data+10, ETH_ALEN);
			wrqu.addr.sa_family = ARPHRD_ETHER;

			/* Send event to user space */
			wireless_send_event(zd->dev, event, &wrqu, NULL);

			goto resubmit;
		}
		if (infotype == ZD1201_INF_AUTHREQ) {
			union iwreq_data wrqu;

			memcpy(wrqu.addr.sa_data, data+8, ETH_ALEN);
			wrqu.addr.sa_family = ARPHRD_ETHER;
			/* There isn't a event that trully fits this request.
			   We assume that userspace will be smart enough to
			   see a new station being expired and sends back a
			   authstation ioctl to authorize it. */
			wireless_send_event(zd->dev, IWEVEXPIRED, &wrqu, NULL);
			goto resubmit;
		}
		/* Other infotypes are handled outside this handler */
		zd->rxlen = 0;
		while (i < urb->actual_length) {
			copylen = le16_to_cpu(*(__le16*)&data[i+2]);
			/* Sanity check, sometimes we get junk */
			if (copylen+zd->rxlen > sizeof(zd->rxdata))
				break;
			memcpy(zd->rxdata+zd->rxlen, data+i+4, copylen);
			zd->rxlen += copylen;
			i += 64;
		}
		if (i >= urb->actual_length) {
			zd->rxdatas = 1;
			wake_up(&zd->rxdataq);
		}
		goto  resubmit;
	}
	/* Actual data */
	if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) {
		int datalen = urb->actual_length-1;
		unsigned short len, fc, seq;
		struct hlist_node *node;

		len = ntohs(*(__be16 *)&data[datalen-2]);
		if (len>datalen)
			len=datalen;
		fc = le16_to_cpu(*(__le16 *)&data[datalen-16]);
		seq = le16_to_cpu(*(__le16 *)&data[datalen-24]);

		if (zd->monitor) {
			if (datalen < 24)
				goto resubmit;
			if (!(skb = dev_alloc_skb(datalen+24)))
				goto resubmit;
			
			memcpy(skb_put(skb, 2), &data[datalen-16], 2);
			memcpy(skb_put(skb, 2), &data[datalen-2], 2);
			memcpy(skb_put(skb, 6), &data[datalen-14], 6);
			memcpy(skb_put(skb, 6), &data[datalen-22], 6);
			memcpy(skb_put(skb, 6), &data[datalen-8], 6);
			memcpy(skb_put(skb, 2), &data[datalen-24], 2);
			memcpy(skb_put(skb, len), data, len);
			skb->protocol = eth_type_trans(skb, zd->dev);
			zd->stats.rx_packets++;
			zd->stats.rx_bytes += skb->len;
			netif_rx(skb);
			goto resubmit;
		}
			
		if ((seq & IEEE80211_SCTL_FRAG) ||
		    (fc & IEEE80211_FCTL_MOREFRAGS)) {
			struct zd1201_frag *frag = NULL;
			char *ptr;

			if (datalen<14)
				goto resubmit;
			if ((seq & IEEE80211_SCTL_FRAG) == 0) {
				frag = kmalloc(sizeof(*frag), GFP_ATOMIC);
				if (!frag)
					goto resubmit;
				skb = dev_alloc_skb(IEEE80211_DATA_LEN +14+2);
				if (!skb) {
					kfree(frag);
					goto resubmit;
				}
				frag->skb = skb;
				frag->seq = seq & IEEE80211_SCTL_SEQ;
				skb_reserve(skb, 2);
				memcpy(skb_put(skb, 12), &data[datalen-14], 12);
				memcpy(skb_put(skb, 2), &data[6], 2);
				memcpy(skb_put(skb, len), data+8, len);
				hlist_add_head(&frag->fnode, &zd->fraglist);
				goto resubmit;
			}
			hlist_for_each_entry(frag, node, &zd->fraglist, fnode)
				if (frag->seq == (seq&IEEE80211_SCTL_SEQ))
					break;
			if (!frag)
				goto resubmit;
			skb = frag->skb;
			ptr = skb_put(skb, len);
			if (ptr)
				memcpy(ptr, data+8, len);
			if (fc & IEEE80211_FCTL_MOREFRAGS)
				goto resubmit;
			hlist_del_init(&frag->fnode);
			kfree(frag);
		} else {
			if (datalen<14)
static ssize_t debug_read_status(struct file *file, char __user *ubuf,
					size_t count, loff_t *ppos)
{
	struct gs_port *ui_dev = file->private_data;
	struct tty_struct       *tty;
	struct gserial		*gser;
	char *buf;
	unsigned long flags;
	int i = 0;
	int ret;
	int result = 0;

	if (!ui_dev) {
		printk(KERN_ERR "usb: ui_dev is NULL !!\n");
		return -EINVAL;
	}

	tty = ui_dev->port_tty;
	gser = ui_dev->port_usb;

	if(!tty || !gser) {
		printk(KERN_ERR "usb: tty or gser is NULL !!\n");
		return -EINVAL;
	}

	buf = kzalloc(sizeof(char) * BUF_SIZE, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	spin_lock_irqsave(&ui_dev->port_lock, flags);

	i += scnprintf(buf + i, BUF_SIZE - i,
		"nbytes_from_host: %lu\n", ui_dev->nbytes_from_host);

	i += scnprintf(buf + i, BUF_SIZE - i,
		"nbytes_to_tty: %lu\n", ui_dev->nbytes_to_tty);

	i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_OUT_txr: %lu\n",
			(ui_dev->nbytes_from_host - ui_dev->nbytes_to_tty));

	i += scnprintf(buf + i, BUF_SIZE - i,
		"nbytes_from_tty: %lu\n", ui_dev->nbytes_from_tty);

	i += scnprintf(buf + i, BUF_SIZE - i,
		"nbytes_to_host: %lu\n", ui_dev->nbytes_to_host);

	i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_IN_txr: %lu\n",
			(ui_dev->nbytes_from_tty - ui_dev->nbytes_to_host));

	if (tty)
		i += scnprintf(buf + i, BUF_SIZE - i,
			"tty_flags: %lu\n", tty->flags);

	if (gser->get_dtr) {
		result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
		i += scnprintf(buf + i, BUF_SIZE - i,
			"DTR_status: %d\n", result);
	}

	spin_unlock_irqrestore(&ui_dev->port_lock, flags);

	ret = simple_read_from_buffer(ubuf, count, ppos, buf, i);

	kfree(buf);

	return ret;
}
Beispiel #23
0
static int mxs_saif_probe(struct platform_device *pdev)
{
	struct resource *iores, *dmares;
	struct mxs_saif *saif;
	struct mxs_saif_platform_data *pdata;
	int ret = 0;

	if (pdev->id >= ARRAY_SIZE(mxs_saif))
		return -EINVAL;

	pdata = pdev->dev.platform_data;
	if (pdata && pdata->init) {
		ret = pdata->init();
		if (ret)
			return ret;
	}

	saif = kzalloc(sizeof(*saif), GFP_KERNEL);
	if (!saif)
		return -ENOMEM;

	mxs_saif[pdev->id] = saif;
	saif->id = pdev->id;

	saif->master_id = saif->id;
	if (pdata && pdata->get_master_id) {
		saif->master_id = pdata->get_master_id(saif->id);
		if (saif->master_id < 0 ||
			saif->master_id >= ARRAY_SIZE(mxs_saif))
			return -EINVAL;
	}

	saif->clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(saif->clk)) {
		ret = PTR_ERR(saif->clk);
		dev_err(&pdev->dev, "Cannot get the clock: %d\n",
			ret);
		goto failed_clk;
	}

	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!iores) {
		ret = -ENODEV;
		dev_err(&pdev->dev, "failed to get io resource: %d\n",
			ret);
		goto failed_get_resource;
	}

	if (!request_mem_region(iores->start, resource_size(iores),
				"mxs-saif")) {
		dev_err(&pdev->dev, "request_mem_region failed\n");
		ret = -EBUSY;
		goto failed_get_resource;
	}

	saif->base = ioremap(iores->start, resource_size(iores));
	if (!saif->base) {
		dev_err(&pdev->dev, "ioremap failed\n");
		ret = -ENODEV;
		goto failed_ioremap;
	}

	dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
	if (!dmares) {
		ret = -ENODEV;
		dev_err(&pdev->dev, "failed to get dma resource: %d\n",
			ret);
		goto failed_ioremap;
	}
	saif->dma_param.chan_num = dmares->start;

	saif->irq = platform_get_irq(pdev, 0);
	if (saif->irq < 0) {
		ret = saif->irq;
		dev_err(&pdev->dev, "failed to get irq resource: %d\n",
			ret);
		goto failed_get_irq1;
	}

	saif->dev = &pdev->dev;
	ret = request_irq(saif->irq, mxs_saif_irq, 0, "mxs-saif", saif);
	if (ret) {
		dev_err(&pdev->dev, "failed to request irq\n");
		goto failed_get_irq1;
	}

	saif->dma_param.chan_irq = platform_get_irq(pdev, 1);
	if (saif->dma_param.chan_irq < 0) {
		ret = saif->dma_param.chan_irq;
		dev_err(&pdev->dev, "failed to get dma irq resource: %d\n",
			ret);
		goto failed_get_irq2;
	}

	platform_set_drvdata(pdev, saif);

	ret = snd_soc_register_dai(&pdev->dev, &mxs_saif_dai);
	if (ret) {
		dev_err(&pdev->dev, "register DAI failed\n");
		goto failed_register;
	}

	saif->soc_platform_pdev = platform_device_alloc(
					"mxs-pcm-audio", pdev->id);
	if (!saif->soc_platform_pdev) {
		ret = -ENOMEM;
		goto failed_pdev_alloc;
	}

	platform_set_drvdata(saif->soc_platform_pdev, saif);
	ret = platform_device_add(saif->soc_platform_pdev);
	if (ret) {
		dev_err(&pdev->dev, "failed to add soc platform device\n");
		goto failed_pdev_add;
	}

	return 0;

failed_pdev_add:
	platform_device_put(saif->soc_platform_pdev);
failed_pdev_alloc:
	snd_soc_unregister_dai(&pdev->dev);
failed_register:
failed_get_irq2:
	free_irq(saif->irq, saif);
failed_get_irq1:
	iounmap(saif->base);
failed_ioremap:
	release_mem_region(iores->start, resource_size(iores));
failed_get_resource:
	clk_put(saif->clk);
failed_clk:
	kfree(saif);

	return ret;
}
/**
 * gserial_setup - initialize TTY driver for one or more ports
 * @g: gadget to associate with these ports
 * @count: how many ports to support
 * Context: may sleep
 *
 * The TTY stack needs to know in advance how many devices it should
 * plan to manage.  Use this call to set up the ports you will be
 * exporting through USB.  Later, connect them to functions based
 * on what configuration is activated by the USB host; and disconnect
 * them as appropriate.
 *
 * An example would be a two-configuration device in which both
 * configurations expose port 0, but through different functions.
 * One configuration could even expose port 1 while the other
 * one doesn't.
 *
 * Returns negative errno or zero.
 */
int gserial_setup(struct usb_gadget *g, unsigned count)
{
	unsigned			i;
	struct usb_cdc_line_coding	coding;
	int				status;

	if (count == 0 || count > N_PORTS)
		return -EINVAL;

	gs_tty_driver = alloc_tty_driver(count);
	if (!gs_tty_driver)
		return -ENOMEM;

	gs_tty_driver->driver_name = "g_serial";
	gs_tty_driver->name = PREFIX;
	/* uses dynamically assigned dev_t values */

	gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
	gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
	gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
				| TTY_DRIVER_RESET_TERMIOS;
	gs_tty_driver->init_termios = tty_std_termios;

	/* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
	 * MS-Windows.  Otherwise, most of these flags shouldn't affect
	 * anything unless we were to actually hook up to a serial line.
	 */
	gs_tty_driver->init_termios.c_cflag =
			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
	gs_tty_driver->init_termios.c_ispeed = 9600;
	gs_tty_driver->init_termios.c_ospeed = 9600;

	coding.dwDTERate = cpu_to_le32(9600);
	coding.bCharFormat = 8;
	coding.bParityType = USB_CDC_NO_PARITY;
	coding.bDataBits = USB_CDC_1_STOP_BITS;

	tty_set_operations(gs_tty_driver, &gs_tty_ops);

	gserial_wq = create_singlethread_workqueue("k_gserial");
	if (!gserial_wq) {
		status = -ENOMEM;
		goto fail;
	}

	/* make devices be openable */
	for (i = 0; i < count; i++) {
		mutex_init(&ports[i].lock);
		status = gs_port_alloc(i, &coding);
		if (status) {
			count = i;
			goto fail;
		}
	}
	n_ports = count;

	/* export the driver ... */
	status = tty_register_driver(gs_tty_driver);
	if (status) {
		pr_err("%s: cannot register, err %d\n",
				__func__, status);
		goto fail;
	}

	/* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
	for (i = 0; i < count; i++) {
		struct device	*tty_dev;

		tty_dev = tty_register_device(gs_tty_driver, i, &g->dev);
		if (IS_ERR(tty_dev))
			pr_warning("%s: no classdev for port %d, err %ld\n",
				__func__, i, PTR_ERR(tty_dev));
	}

	for (i = 0; i < count; i++)
		usb_debugfs_init(ports[i].port, i);

	pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
			count, (count == 1) ? "" : "s");

	return status;
fail:
	while (count--)
		kfree(ports[count].port);
	if (gserial_wq)
		destroy_workqueue(gserial_wq);
	put_tty_driver(gs_tty_driver);
	gs_tty_driver = NULL;
	return status;
}
Beispiel #25
0
/**
 * __cpufreq_cooling_register - helper function to create cpufreq cooling device
 * @np: a valid struct device_node to the cooling device device tree node
 * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
 *
 * This interface function registers the cpufreq cooling device with the name
 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
 * cooling devices. It also gives the opportunity to link the cooling device
 * with a device tree node, in order to bind it via the thermal DT code.
 *
 * Return: a valid struct thermal_cooling_device pointer on success,
 * on failure, it returns a corresponding ERR_PTR().
 */
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
			   const struct cpumask *clip_cpus)
{
	struct thermal_cooling_device *cool_dev;
	struct cpufreq_cooling_device *cpufreq_dev = NULL;
	unsigned int min = 0, max = 0;
	char dev_name[THERMAL_NAME_LENGTH];
	int ret = 0, i;
	struct cpufreq_policy policy;

	/* Verify that all the clip cpus have same freq_min, freq_max limit */
	for_each_cpu(i, clip_cpus) {
		/* continue if cpufreq policy not found and not return error */
		if (!cpufreq_get_policy(&policy, i))
			continue;
		if (min == 0 && max == 0) {
			min = policy.cpuinfo.min_freq;
			max = policy.cpuinfo.max_freq;
		} else {
			if (min != policy.cpuinfo.min_freq ||
			    max != policy.cpuinfo.max_freq)
				return ERR_PTR(-EINVAL);
		}
	}
	cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
			      GFP_KERNEL);
	if (!cpufreq_dev)
		return ERR_PTR(-ENOMEM);

	cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);

	ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
	if (ret) {
		kfree(cpufreq_dev);
		return ERR_PTR(-EINVAL);
	}

	snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
		 cpufreq_dev->id);

	cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
						      &cpufreq_cooling_ops);
	if (IS_ERR(cool_dev)) {
		release_idr(&cpufreq_idr, cpufreq_dev->id);
		kfree(cpufreq_dev);
		return cool_dev;
	}
	cpufreq_dev->cool_dev = cool_dev;
	cpufreq_dev->cpufreq_state = 0;
	mutex_lock(&cooling_cpufreq_lock);

	/* Register the notifier for first cpufreq cooling device */
	if (cpufreq_dev_count == 0)
		cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
					  CPUFREQ_POLICY_NOTIFIER);
	cpufreq_dev_count++;

	mutex_unlock(&cooling_cpufreq_lock);

	return cool_dev;
}
/*
 * gs_buf_free
 *
 * Free the buffer and all associated memory.
 */
static void gs_buf_free(struct gs_buf *gb)
{
	kfree(gb->buf_buf);
	gb->buf_buf = NULL;
}
static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
     
{				/* @ ADG ou ATO selon le cas */
	int i;
	unsigned char IndexCard;
	void __iomem *pmem;
	int ret = 0;
	volatile unsigned char byte_reset_it;
	struct st_ram_io *adgl;
	void __user *argp = (void __user *)arg;

	/* In general, the device is only openable by root anyway, so we're not
	   particularly concerned that bogus ioctls can flood the console. */

	adgl = kmalloc(sizeof(struct st_ram_io), GFP_KERNEL);
	if (!adgl)
		return -ENOMEM;

	if (copy_from_user(adgl, argp, sizeof(struct st_ram_io))) {
		kfree(adgl);
		return -EFAULT;
	}
	
	IndexCard = adgl->num_card-1;
	 
	if(cmd != 0 && cmd != 6 &&
	   ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
		static int warncount = 10;
		if (warncount) {
			printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
			warncount--;
		}
		kfree(adgl);
		return -EINVAL;
	}

	switch (cmd) {
		
	case 0:
		pmem = apbs[IndexCard].RamIO;
		for (i = 0; i < sizeof(struct st_ram_io); i++)
			((unsigned char *)adgl)[i]=readb(pmem++);
		if (copy_to_user(argp, adgl, sizeof(struct st_ram_io)))
			ret = -EFAULT;
		break;
	case 1:
		pmem = apbs[IndexCard].RamIO + CONF_END_TEST;
		for (i = 0; i < 4; i++)
			adgl->conf_end_test[i] = readb(pmem++);
		for (i = 0; i < 2; i++)
			adgl->error_code[i] = readb(pmem++);
		for (i = 0; i < 4; i++)
			adgl->parameter_error[i] = readb(pmem++);
		pmem = apbs[IndexCard].RamIO + VERS;
		adgl->vers = readb(pmem);
		pmem = apbs[IndexCard].RamIO + TYPE_CARD;
		for (i = 0; i < 20; i++)
			adgl->reserv1[i] = readb(pmem++);
		*(int *)&adgl->reserv1[20] =  
			(readb(apbs[IndexCard].RamIO + SERIAL_NUMBER) << 16) + 
			(readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 1) << 8) + 
			(readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 2) );

		if (copy_to_user(argp, adgl, sizeof(struct st_ram_io)))
			ret = -EFAULT;
		break;
	case 2:
		pmem = apbs[IndexCard].RamIO + CONF_END_TEST;
		for (i = 0; i < 10; i++)
			writeb(0xff, pmem++);
		writeb(adgl->data_from_pc_ready, 
		       apbs[IndexCard].RamIO + DATA_FROM_PC_READY);

		writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
		
		for (i = 0; i < MAX_BOARD; i++) {
			if (apbs[i].RamIO) {
				byte_reset_it = readb(apbs[i].RamIO + RAM_IT_TO_PC);
			}
		}
		break;
	case 3:
		pmem = apbs[IndexCard].RamIO + TIC_DES_FROM_PC;
		writeb(adgl->tic_des_from_pc, pmem);
		break;
	case 4:
		pmem = apbs[IndexCard].RamIO + TIC_OWNER_TO_PC;
		adgl->tic_owner_to_pc     = readb(pmem++);
		adgl->numcard_owner_to_pc = readb(pmem);
		if (copy_to_user(argp, adgl,sizeof(struct st_ram_io)))
			ret = -EFAULT;
		break;
	case 5:
		writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_OWNER_TO_PC);
		writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC);
		writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_ACK_FROM_PC);
		writeb(4, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
		writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
		break;
	case 6:
		printk(KERN_INFO "APPLICOM driver release .... V2.8.0 ($Revision: 1.3 $)\n");
		printk(KERN_INFO "Number of installed boards . %d\n", (int) numboards);
		printk(KERN_INFO "Segment of board ........... %X\n", (int) mem);
		printk(KERN_INFO "Interrupt IRQ number ....... %d\n", (int) irq);
		for (i = 0; i < MAX_BOARD; i++) {
			int serial;
			char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1];

			if (!apbs[i].RamIO)
				continue;

			for (serial = 0; serial < SERIAL_NUMBER - TYPE_CARD; serial++)
				boardname[serial] = readb(apbs[i].RamIO + TYPE_CARD + serial);
			boardname[serial] = 0;

			printk(KERN_INFO "Prom version board %d ....... V%d.%d %s",
			       i+1,
			       (int)(readb(apbs[IndexCard].RamIO + VERS) >> 4),
			       (int)(readb(apbs[IndexCard].RamIO + VERS) & 0xF),
			       boardname);


			serial = (readb(apbs[i].RamIO + SERIAL_NUMBER) << 16) + 
				(readb(apbs[i].RamIO + SERIAL_NUMBER + 1) << 8) + 
				(readb(apbs[i].RamIO + SERIAL_NUMBER + 2) );

			if (serial != 0)
				printk(" S/N %d\n", serial);
			else
				printk("\n");
		}
		if (DeviceErrorCount != 0)
			printk(KERN_INFO "DeviceErrorCount ........... %d\n", DeviceErrorCount);
		if (ReadErrorCount != 0)
			printk(KERN_INFO "ReadErrorCount ............. %d\n", ReadErrorCount);
		if (WriteErrorCount != 0)
			printk(KERN_INFO "WriteErrorCount ............ %d\n", WriteErrorCount);
		if (waitqueue_active(&FlagSleepRec))
			printk(KERN_INFO "Process in read pending\n");
		for (i = 0; i < MAX_BOARD; i++) {
			if (apbs[i].RamIO && waitqueue_active(&apbs[i].FlagSleepSend))
				printk(KERN_INFO "Process in write pending board %d\n",i+1);
		}
		break;
	default:
		printk(KERN_INFO "APPLICOM driver ioctl, unknown function code %d\n",cmd) ;
		ret = -EINVAL;
		break;
	}
	Dummy = readb(apbs[IndexCard].RamIO + VERS);
	kfree(adgl);
	return 0;
}
/*
 * gs_free_req
 *
 * Free a usb_request and its buffer.
 */
void gs_free_req(struct usb_ep *ep, struct usb_request *req)
{
	kfree(req->buf);
	usb_ep_free_request(ep, req);
}
Beispiel #29
0
static int fb_show_logo_line(struct fb_info *info, int rotate,
			     const struct linux_logo *logo, int y,
			     unsigned int n)
{
	u32 *palette = NULL, *saved_pseudo_palette = NULL;
	unsigned char *logo_new = NULL, *logo_rotate = NULL;
	struct fb_image image;

	/* Return if the frame buffer is not mapped or suspended */
	if (logo == NULL || info->state != FBINFO_STATE_RUNNING ||
	    info->flags & FBINFO_MODULE)
		return 0;

	image.depth = 8;
	image.data = logo->data;

	if (fb_logo.needs_cmapreset)
		fb_set_logocmap(info, logo);

	if (fb_logo.needs_truepalette ||
	    fb_logo.needs_directpalette) {
		palette = kmalloc(256 * 4, GFP_KERNEL);
		if (palette == NULL)
			return 0;

		if (fb_logo.needs_truepalette)
			fb_set_logo_truepalette(info, logo, palette);
		else
			fb_set_logo_directpalette(info, logo, palette);

		saved_pseudo_palette = info->pseudo_palette;
		info->pseudo_palette = palette;
	}

	if (fb_logo.depth <= 4) {
		logo_new = kmalloc(logo->width * logo->height, GFP_KERNEL);
		if (logo_new == NULL) {
			kfree(palette);
			if (saved_pseudo_palette)
				info->pseudo_palette = saved_pseudo_palette;
			return 0;
		}
		image.data = logo_new;
		fb_set_logo(info, logo, logo_new, fb_logo.depth);
	}

	image.dx = 0;
	image.dy = y;
	image.width = logo->width;
	image.height = logo->height;

	if (rotate) {
		logo_rotate = kmalloc(logo->width *
				      logo->height, GFP_KERNEL);
		if (logo_rotate)
			fb_rotate_logo(info, logo_rotate, &image, rotate);
	}

	fb_do_show_logo(info, &image, rotate, n);

	kfree(palette);
	if (saved_pseudo_palette != NULL)
		info->pseudo_palette = saved_pseudo_palette;
	kfree(logo_new);
	kfree(logo_rotate);
	return logo->height;
}
Beispiel #30
0
static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
	struct INFTLrecord *inftl;
	unsigned long temp;

	if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
		return;
	/* OK, this is moderately ugly.  But probably safe.  Alternatives? */
	if (memcmp(mtd->name, "DiskOnChip", 10))
		return;

	if (!mtd->block_isbad) {
		printk(KERN_ERR
"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
"Please use the new diskonchip driver under the NAND subsystem.\n");
		return;
	}

	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: add_mtd for %s\n", mtd->name);

	inftl = kzalloc(sizeof(*inftl), GFP_KERNEL);

	if (!inftl) {
		printk(KERN_WARNING "INFTL: Out of memory for data structures\n");
		return;
	}

	inftl->mbd.mtd = mtd;
	inftl->mbd.devnum = -1;

	inftl->mbd.tr = tr;

	if (INFTL_mount(inftl) < 0) {
		printk(KERN_WARNING "INFTL: could not mount device\n");
		kfree(inftl);
		return;
	}

	/* OK, it's a new one. Set up all the data structures. */

	/* Calculate geometry */
	inftl->cylinders = 1024;
	inftl->heads = 16;

	temp = inftl->cylinders * inftl->heads;
	inftl->sectors = inftl->mbd.size / temp;
	if (inftl->mbd.size % temp) {
		inftl->sectors++;
		temp = inftl->cylinders * inftl->sectors;
		inftl->heads = inftl->mbd.size / temp;

		if (inftl->mbd.size % temp) {
			inftl->heads++;
			temp = inftl->heads * inftl->sectors;
			inftl->cylinders = inftl->mbd.size / temp;
		}
	}

	if (inftl->mbd.size != inftl->heads * inftl->cylinders * inftl->sectors) {
		/*
		  Oh no we don't have
		   mbd.size == heads * cylinders * sectors
		*/
		printk(KERN_WARNING "INFTL: cannot calculate a geometry to "
		       "match size of 0x%lx.\n", inftl->mbd.size);
		printk(KERN_WARNING "INFTL: using C:%d H:%d S:%d "
			"(== 0x%lx sects)\n",
			inftl->cylinders, inftl->heads , inftl->sectors,
			(long)inftl->cylinders * (long)inftl->heads *
			(long)inftl->sectors );
	}

	if (add_mtd_blktrans_dev(&inftl->mbd)) {
		kfree(inftl->PUtable);
		kfree(inftl->VUtable);
		kfree(inftl);
		return;
	}
#ifdef PSYCHO_DEBUG
	printk(KERN_INFO "INFTL: Found new inftl%c\n", inftl->mbd.devnum + 'a');
#endif
	return;
}