Beispiel #1
0
struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
					 void *priv, const char *name, u32 caps,
					 u8 available_las)
{
	struct cec_adapter *adap;
	int res;

	if (WARN_ON(!caps))
		return ERR_PTR(-EINVAL);
	if (WARN_ON(!ops))
		return ERR_PTR(-EINVAL);
	if (WARN_ON(!available_las || available_las > CEC_MAX_LOG_ADDRS))
		return ERR_PTR(-EINVAL);
	adap = kzalloc(sizeof(*adap), GFP_KERNEL);
	if (!adap)
		return ERR_PTR(-ENOMEM);
	strlcpy(adap->name, name, sizeof(adap->name));
	adap->phys_addr = CEC_PHYS_ADDR_INVALID;
	adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
	adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
	adap->capabilities = caps;
	adap->available_log_addrs = available_las;
	adap->sequence = 0;
	adap->ops = ops;
	adap->priv = priv;
	memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
	mutex_init(&adap->lock);
	INIT_LIST_HEAD(&adap->transmit_queue);
	INIT_LIST_HEAD(&adap->wait_queue);
	init_waitqueue_head(&adap->kthread_waitq);

	adap->kthread = kthread_run(cec_thread_func, adap, "cec-%s", name);
	if (IS_ERR(adap->kthread)) {
		pr_err("cec-%s: kernel_thread() failed\n", name);
		res = PTR_ERR(adap->kthread);
		kfree(adap);
		return ERR_PTR(res);
	}

	if (!(caps & CEC_CAP_RC))
		return adap;

#if IS_REACHABLE(CONFIG_RC_CORE)
	/* Prepare the RC input device */
	adap->rc = rc_allocate_device(RC_DRIVER_SCANCODE);
	if (!adap->rc) {
		pr_err("cec-%s: failed to allocate memory for rc_dev\n",
		       name);
		kthread_stop(adap->kthread);
		kfree(adap);
		return ERR_PTR(-ENOMEM);
	}

	snprintf(adap->input_name, sizeof(adap->input_name),
		 "RC for %s", name);
	snprintf(adap->input_phys, sizeof(adap->input_phys),
		 "%s/input0", name);

	adap->rc->input_name = adap->input_name;
	adap->rc->input_phys = adap->input_phys;
	adap->rc->input_id.bustype = BUS_CEC;
	adap->rc->input_id.vendor = 0;
	adap->rc->input_id.product = 0;
	adap->rc->input_id.version = 1;
	adap->rc->driver_name = CEC_NAME;
	adap->rc->allowed_protocols = RC_BIT_CEC;
	adap->rc->priv = adap;
	adap->rc->map_name = RC_MAP_CEC;
	adap->rc->timeout = MS_TO_NS(100);
#else
	adap->capabilities &= ~CEC_CAP_RC;
#endif
	return adap;
}
Beispiel #2
0
/*
 * dim2_probe - dim2 probe handler
 * @pdev: platform device structure
 *
 * Register the dim2 interface with mostcore and initialize it.
 * Return 0 on success, negative on failure.
 */
static int dim2_probe(struct platform_device *pdev)
{
	struct dim2_hdm *dev;
	struct resource *res;
	int ret, i;
	struct kobject *kobj;
	int irq;

	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return -ENOMEM;

	dev->atx_idx = -1;

	platform_set_drvdata(pdev, dev);
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	dev->io_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(dev->io_base))
		return PTR_ERR(dev->io_base);

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "failed to get ahb0_int irq\n");
		return -ENODEV;
	}

	ret = devm_request_irq(&pdev->dev, irq, dim2_ahb_isr, 0,
			       "dim2_ahb0_int", dev);
	if (ret) {
		dev_err(&pdev->dev, "failed to request ahb0_int irq %d\n", irq);
		return ret;
	}

	irq = platform_get_irq(pdev, 1);
	if (irq < 0) {
		dev_err(&pdev->dev, "failed to get mlb_int irq\n");
		return -ENODEV;
	}

	ret = devm_request_irq(&pdev->dev, irq, dim2_mlb_isr, 0,
			       "dim2_mlb_int", dev);
	if (ret) {
		dev_err(&pdev->dev, "failed to request mlb_int irq %d\n", irq);
		return ret;
	}

	init_waitqueue_head(&dev->netinfo_waitq);
	dev->deliver_netinfo = 0;
	dev->netinfo_task = kthread_run(&deliver_netinfo_thread, (void *)dev,
					"dim2_netinfo");
	if (IS_ERR(dev->netinfo_task))
		return PTR_ERR(dev->netinfo_task);

	for (i = 0; i < DMA_CHANNELS; i++) {
		struct most_channel_capability *cap = dev->capabilities + i;
		struct hdm_channel *hdm_ch = dev->hch + i;

		INIT_LIST_HEAD(&hdm_ch->pending_list);
		INIT_LIST_HEAD(&hdm_ch->started_list);
		hdm_ch->is_initialized = false;
		snprintf(hdm_ch->name, sizeof(hdm_ch->name), "ca%d", i * 2 + 2);

		cap->name_suffix = hdm_ch->name;
		cap->direction = MOST_CH_RX | MOST_CH_TX;
		cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
				 MOST_CH_ISOC | MOST_CH_SYNC;
		cap->num_buffers_packet = MAX_BUFFERS_PACKET;
		cap->buffer_size_packet = MAX_BUF_SIZE_PACKET;
		cap->num_buffers_streaming = MAX_BUFFERS_STREAMING;
		cap->buffer_size_streaming = MAX_BUF_SIZE_STREAMING;
	}

	{
		const char *fmt;

		if (sizeof(res->start) == sizeof(long long))
			fmt = "dim2-%016llx";
		else if (sizeof(res->start) == sizeof(long))
			fmt = "dim2-%016lx";
		else
			fmt = "dim2-%016x";

		snprintf(dev->name, sizeof(dev->name), fmt, res->start);
	}

	dev->most_iface.interface = ITYPE_MEDIALB_DIM2;
	dev->most_iface.description = dev->name;
	dev->most_iface.num_channels = DMA_CHANNELS;
	dev->most_iface.channel_vector = dev->capabilities;
	dev->most_iface.configure = configure_channel;
	dev->most_iface.enqueue = enqueue;
	dev->most_iface.poison_channel = poison_channel;
	dev->most_iface.request_netinfo = request_netinfo;

	kobj = most_register_interface(&dev->most_iface);
	if (IS_ERR(kobj)) {
		ret = PTR_ERR(kobj);
		dev_err(&pdev->dev, "failed to register MOST interface\n");
		goto err_stop_thread;
	}

	ret = dim2_sysfs_probe(&dev->bus, kobj);
	if (ret)
		goto err_unreg_iface;

	ret = startup_dim(pdev);
	if (ret) {
		dev_err(&pdev->dev, "failed to initialize DIM2\n");
		goto err_destroy_bus;
	}

	return 0;

err_destroy_bus:
	dim2_sysfs_destroy(&dev->bus);
err_unreg_iface:
	most_deregister_interface(&dev->most_iface);
err_stop_thread:
	kthread_stop(dev->netinfo_task);

	return ret;
}
static int ft1000_probe(struct usb_interface *interface,
			const struct usb_device_id *id)
{
	struct usb_host_interface *iface_desc;
	struct usb_endpoint_descriptor *endpoint;
	struct usb_device *dev;
	unsigned numaltsetting;
	int i, ret = 0, size;

	struct ft1000_usb *ft1000dev;
	struct ft1000_info *pft1000info = NULL;
	const struct firmware *dsp_fw;

	ft1000dev = kzalloc(sizeof(struct ft1000_usb), GFP_KERNEL);
	if (!ft1000dev)
		return -ENOMEM;

	dev = interface_to_usbdev(interface);
	DEBUG("ft1000_probe: usb device descriptor info:\n");
	DEBUG("ft1000_probe: number of configuration is %d\n",
	      dev->descriptor.bNumConfigurations);

	ft1000dev->dev = dev;
	ft1000dev->status = 0;
	ft1000dev->net = NULL;
	ft1000dev->tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
	ft1000dev->rx_urb = usb_alloc_urb(0, GFP_ATOMIC);

	DEBUG("ft1000_probe is called\n");
	numaltsetting = interface->num_altsetting;
	DEBUG("ft1000_probe: number of alt settings is :%d\n", numaltsetting);
	iface_desc = interface->cur_altsetting;
	DEBUG("ft1000_probe: number of endpoints is %d\n",
	      iface_desc->desc.bNumEndpoints);
	DEBUG("ft1000_probe: descriptor type is %d\n",
	      iface_desc->desc.bDescriptorType);
	DEBUG("ft1000_probe: interface number is %d\n",
	      iface_desc->desc.bInterfaceNumber);
	DEBUG("ft1000_probe: alternatesetting is %d\n",
	      iface_desc->desc.bAlternateSetting);
	DEBUG("ft1000_probe: interface class is %d\n",
	      iface_desc->desc.bInterfaceClass);
	DEBUG("ft1000_probe: control endpoint info:\n");
	DEBUG("ft1000_probe: descriptor0 type -- %d\n",
	      iface_desc->endpoint[0].desc.bmAttributes);
	DEBUG("ft1000_probe: descriptor1 type -- %d\n",
	      iface_desc->endpoint[1].desc.bmAttributes);
	DEBUG("ft1000_probe: descriptor2 type -- %d\n",
	      iface_desc->endpoint[2].desc.bmAttributes);

	for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
		endpoint =
		    (struct usb_endpoint_descriptor *)&iface_desc->
		    endpoint[i].desc;
		DEBUG("endpoint %d\n", i);
		DEBUG("bEndpointAddress=%x, bmAttributes=%x\n",
		      endpoint->bEndpointAddress, endpoint->bmAttributes);
		if ((endpoint->bEndpointAddress & USB_DIR_IN)
		    && ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
			USB_ENDPOINT_XFER_BULK)) {
			ft1000dev->bulk_in_endpointAddr =
			    endpoint->bEndpointAddress;
			DEBUG("ft1000_probe: in: %d\n",
			      endpoint->bEndpointAddress);
		}

		if (!(endpoint->bEndpointAddress & USB_DIR_IN)
		    && ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
			USB_ENDPOINT_XFER_BULK)) {
			ft1000dev->bulk_out_endpointAddr =
			    endpoint->bEndpointAddress;
			DEBUG("ft1000_probe: out: %d\n",
			      endpoint->bEndpointAddress);
		}
	}

	DEBUG("bulk_in=%d, bulk_out=%d\n", ft1000dev->bulk_in_endpointAddr,
	      ft1000dev->bulk_out_endpointAddr);

	ret = request_firmware(&dsp_fw, "ft3000.img", &dev->dev);
	if (ret < 0) {
		pr_err("Error request_firmware().\n");
		goto err_fw;
	}

	size = max_t(uint, dsp_fw->size, 4096);
	pFileStart = kmalloc(size, GFP_KERNEL);

	if (!pFileStart) {
		release_firmware(dsp_fw);
		ret = -ENOMEM;
		goto err_fw;
	}

	memcpy(pFileStart, dsp_fw->data, dsp_fw->size);
	FileLength = dsp_fw->size;
	release_firmware(dsp_fw);

	DEBUG("ft1000_probe: start downloading dsp image...\n");

	ret = init_ft1000_netdev(ft1000dev);
	if (ret)
		goto err_load;

	pft1000info = netdev_priv(ft1000dev->net);

	DEBUG("In probe: pft1000info=%p\n", pft1000info);
	ret = dsp_reload(ft1000dev);
	if (ret) {
		pr_err("Problem with DSP image loading\n");
		goto err_load;
	}

	gPollingfailed = FALSE;
	ft1000dev->pPollThread =
	    kthread_run(ft1000_poll_thread, ft1000dev, "ft1000_poll");

	if (IS_ERR(ft1000dev->pPollThread)) {
		ret = PTR_ERR(ft1000dev->pPollThread);
		goto err_load;
	}

	msleep(500);

	while (!pft1000info->CardReady) {
		if (gPollingfailed) {
			ret = -EIO;
			goto err_thread;
		}
		msleep(100);
		DEBUG("ft1000_probe::Waiting for Card Ready\n");
	}

	DEBUG("ft1000_probe::Card Ready!!!! Registering network device\n");

	ret = reg_ft1000_netdev(ft1000dev, interface);
	if (ret)
		goto err_thread;

	ret = ft1000_init_proc(ft1000dev->net);
	if (ret)
		goto err_proc;

	ft1000dev->NetDevRegDone = 1;

	return 0;

err_proc:
	unregister_netdev(ft1000dev->net);
	free_netdev(ft1000dev->net);
err_thread:
	kthread_stop(ft1000dev->pPollThread);
err_load:
	kfree(pFileStart);
err_fw:
	kfree(ft1000dev);
	return ret;
}
Beispiel #4
0
static int ucb1400_ts_thread(void *_ucb)
{
	struct ucb1400 *ucb = _ucb;
	struct task_struct *tsk = current;
	int valid = 0;
	struct sched_param param = { .sched_priority = 1 };

	sched_setscheduler(tsk, SCHED_FIFO, &param);

	set_freezable();

	while (!kthread_should_stop()) {
		unsigned int x, y, p;
		long timeout;
		unsigned int i;

		ucb->ts_restart = 0;

		if (ucb->irq_pending) {
			ucb->irq_pending = 0;
			ucb1400_handle_pending_irq(ucb);
		}

		p = 0;
		x = 0;
		y = 0;
		for(i=0; i<8; i++)
		{
			ucb1400_adc_enable(ucb);
			p += ucb1400_ts_read_pressure(ucb);
			x += ucb1400_ts_read_xpos(ucb);
			y += ucb1400_ts_read_ypos(ucb);
			ucb1400_adc_disable(ucb);
			udelay(30);
		}
		x/=i;
		y/=i;
		p/=i;

		/* Switch back to interrupt mode. */
		ucb1400_ts_mode_int(ucb);

		msleep(10);

		if (ucb1400_ts_pen_down(ucb)) {
			ucb1400_ts_irq_enable(ucb);

			/*
			 * If we spat out a valid sample set last time,
			 * spit out a "pen off" sample here.
			 */
			if (valid) {
				ucb1400_ts_event_release(ucb->ts_idev);
				valid = 0;
			}

			timeout = MAX_SCHEDULE_TIMEOUT;
		} else {
			valid = 1;
			ucb1400_ts_evt_add(ucb->ts_idev, p, x, y);
			timeout = msecs_to_jiffies(10);
		}

		wait_event_interruptible_timeout(ucb->ts_wait,
			ucb->irq_pending || ucb->ts_restart || kthread_should_stop(),
			timeout);
		try_to_freeze();
	}

	/* Send the "pen off" if we are stopping with the pen still active */
	if (valid)
		ucb1400_ts_event_release(ucb->ts_idev);

	ucb->ts_task = NULL;
	return 0;
}

/*
 * A restriction with interrupts exists when using the ucb1400, as
 * the codec read/write routines may sleep while waiting for codec
 * access completion and uses semaphores for access control to the
 * AC97 bus.  A complete codec read cycle could take  anywhere from
 * 60 to 100uSec so we *definitely* don't want to spin inside the
 * interrupt handler waiting for codec access.  So, we handle the
 * interrupt by scheduling a RT kernel thread to run in process
 * context instead of interrupt context.
 */
static irqreturn_t ucb1400_hard_irq(int irqnr, void *devid)
{
	struct ucb1400 *ucb = devid;

	if (irqnr == ucb->irq) {
		disable_irq(ucb->irq);
		ucb->irq_pending = 1;
		wake_up(&ucb->ts_wait);
		return IRQ_HANDLED;
	}
	return IRQ_NONE;
}

static int ucb1400_ts_open(struct input_dev *idev)
{
	struct ucb1400 *ucb = input_get_drvdata(idev);
	int ret = 0;

	BUG_ON(ucb->ts_task);

	ucb->ts_task = kthread_run(ucb1400_ts_thread, ucb, "UCB1400_ts");
	if (IS_ERR(ucb->ts_task)) {
		ret = PTR_ERR(ucb->ts_task);
		ucb->ts_task = NULL;
	}

	return ret;
}

static void ucb1400_ts_close(struct input_dev *idev)
{
	struct ucb1400 *ucb = input_get_drvdata(idev);

	if (ucb->ts_task)
	{
		kthread_stop(ucb->ts_task);
		while(ucb->ts_task!=NULL) udelay(100);
	}
	ucb1400_ts_irq_disable(ucb);
	ucb1400_reg_write(ucb, UCB_TS_CR, 0);
}

#ifdef CONFIG_PM
static int ucb1400_ts_resume(struct device *dev)
{
	struct ucb1400 *ucb = dev_get_drvdata(dev);

	if (ucb->ts_task) {
		/*
		 * Restart the TS thread to ensure the
		 * TS interrupt mode is set up again
		 * after sleep.
		 */
		ucb->ts_restart = 1;
		wake_up(&ucb->ts_wait);
	}
	return 0;
}
#else
#define ucb1400_ts_resume NULL
#endif

#ifndef NO_IRQ
#define NO_IRQ	0
#endif

/*
 * Try to probe our interrupt, rather than relying on lots of
 * hard-coded machine dependencies.
 */
static int ucb1400_detect_irq(struct ucb1400 *ucb)
{
	unsigned long mask, timeout;
#if CONFIG_TOUCHSCREEN_UCB1400_IRQ == 0
	mask = probe_irq_on();
	if (!mask) {
		probe_irq_off(mask);
		return -EBUSY;
	}

	/* Enable the ADC interrupt. */
	ucb1400_reg_write(ucb, UCB_IE_RIS, UCB_IE_ADC);
	ucb1400_reg_write(ucb, UCB_IE_FAL, UCB_IE_ADC);
	ucb1400_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
	ucb1400_reg_write(ucb, UCB_IE_CLEAR, 0);

	/* Cause an ADC interrupt. */
	ucb1400_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA);
	ucb1400_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);

	/* Wait for the conversion to complete. */
	timeout = jiffies + HZ/2;
	while (!(ucb1400_reg_read(ucb, UCB_ADC_DATA) & UCB_ADC_DAT_VALID)) {
		cpu_relax();
		if (time_after(jiffies, timeout)) {
			printk(KERN_ERR "ucb1400: timed out in IRQ probe\n");
			probe_irq_off(mask);
			return -ENODEV;
		}
	}
	ucb1400_reg_write(ucb, UCB_ADC_CR, 0);

	/* Disable and clear interrupt. */
	ucb1400_reg_write(ucb, UCB_IE_RIS, 0);
	ucb1400_reg_write(ucb, UCB_IE_FAL, 0);
	ucb1400_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
	ucb1400_reg_write(ucb, UCB_IE_CLEAR, 0);

	/* Read triggered interrupt. */
	ucb->irq = probe_irq_off(mask);
#else
	ucb->irq = CONFIG_TOUCHSCREEN_UCB1400_IRQ;
#endif
	if (ucb->irq < 0 || ucb->irq == NO_IRQ)
		return -ENODEV;

	return 0;
}
Beispiel #5
0
static long
gpio_ioctl(struct file * file, unsigned int cmd,
	   unsigned long arg)

{
	unsigned long val;

	switch(cmd)
	{
		case GPIO_CMD_SET_BTN_RST:
   			if (copy_from_user(&val, (unsigned long*)arg, sizeof(val)))
	        	return -EFAULT;
			if (val)
				DO_RESET();
			break;
		case GPIO_CMD_GET_BTN_RST:
			val = BUTTON_RESET();
			if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
		       	return -EFAULT;
			break;
		case GPIO_CMD_SET_LED_BTN_WLAN:
			if (vs_sysid != VS_SYSID_ARETE)
			    return -EFAULT;
   			if (copy_from_user(&val, (unsigned long*)arg, sizeof(val)))
	        	    return -EFAULT;
	        	gpio_set(LED_BTN_WLAN_MASK,!val?LED_BTN_WLAN_MASK:0UL);
			break;
		case GPIO_CMD_GET_BTN_WLAN:
			if (vs_sysid != VS_SYSID_ARETE)
			    return -EFAULT;
			val = BUTTON_WLAN();
			if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
		       	return -EFAULT;
			break;
		case GPIO_CMD_SET_LEDS:
   			if (copy_from_user(&val, (unsigned long*)arg, sizeof(val)))
	        	return -EFAULT;
			gpio_set(MASK_LEDS, led2reg(val));
			break;
		case GPIO_CMD_GET_LEDS:
			val = LEDS(gpio_get());
			if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
		       	return -EFAULT;
			break;
		case GPIO_CMD_SET_LED_POWER:
   			if (copy_from_user(&val, (unsigned long*)arg, sizeof(val)))
	        	return -EFAULT;
			gpio_set(LED_POWER_MASK, val?LED_POWER_MASK:0UL);
			break;
		case GPIO_CMD_SET_LED_BLUE:
   			if (copy_from_user(&val, (unsigned long*)arg, sizeof(val)))
	        	return -EFAULT;
			gpio_set(LED_BLUE_MASK, val?LED_BLUE_MASK:0UL);
			break;
		case GPIO_CMD_SET_LED_GREEN:
   			if (copy_from_user(&val, (unsigned long*)arg, sizeof(val)))
	        	return -EFAULT;
			gpio_set(LED_GREEN_MASK, val?LED_GREEN_MASK:0UL);
			break;

		// buzzer
		case GPIO_CMD_SET_BUZZER:
   			if (copy_from_user(&val, (unsigned long*)arg, sizeof(val)))
	        	return -EFAULT;
			if (val == 1)
				set_buzzer(1);
			else if(val == 0)
				set_buzzer(0);
			else
				return -EINVAL;
			break;
		case GPIO_CMD_GET_BUZZER:
			val = (gpio_get() & BUZZER_MASK)?1:0;
			if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
		       	return -EFAULT;
			break;
		case GPIO_CMD_SET_BUZZER_FRQ:
   			if (copy_from_user(&val, (unsigned long*)arg, sizeof(val)))
	        	return -EFAULT;

			if((val < FREQ_MIN) || (val > FREQ_MAX))
				return -EINVAL;

			// check if thread has been already activated
			if(freq_thread != NULL)
			{
				kthread_stop(freq_thread);
				freq_thread = NULL;
			}

			buzzer_freq = val;
			freq_thread = kthread_run(buzzer_thread, NULL, "buzzer thread");
			if(IS_ERR(freq_thread))
			{
				printk("Error starting kthread\n");
				freq_thread = NULL;
			}
			break;
		case GPIO_CMD_GET_BUZZER_FRQ:
			val = buzzer_freq;
			if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
		       	return -EFAULT;
			break;

		// external GPIOs
		case GPIO_CMD_SET:
			{
				struct gpio_struct set;
				
    			if (copy_from_user(&set, (struct gpio_struct*)arg, sizeof(set)))
		        	return -EFAULT;
				
				gpio_ext_set(set.mask, set.value);
			}
			break;
		case GPIO_CMD_GET:
			val = gpio_ext_get();
    		if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
		       	return -EFAULT;
			break;
		case GPIO_CMD_SET_CTRL:
   			{
				struct gpio_struct set;
				
    			if (copy_from_user(&set, (struct gpio_struct*)arg, sizeof(set)))
		        	return -EFAULT;
				
			// relays are not to be switched to input and isolated inputs are not to be switched to output
			if(vs_sysid == VS_SYSID_ALENA)
			{
				if((set.mask & GPIO_BIT_0 && set.value & GPIO_BIT_0) || (set.mask & GPIO_BIT_1 && set.value & GPIO_BIT_1) ||
				    (set.mask & GPIO_BIT_6 && !(set.value & GPIO_BIT_6)) || (set.mask & GPIO_BIT_7 && !(set.value & GPIO_BIT_7)))
					return -EINVAL;
			}	
				gpio_ext_set_mode(set.mask, set.value);
			}
			break;
		case GPIO_CMD_GET_CTRL:
			val = gpio_ext_get_mode();
    		if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
		       	return -EFAULT;
			break;
		case GPIO_CMD_SET_IRQMASK:
   			{
				struct gpio_struct set;
				
    			if (copy_from_user(&set, (struct gpio_struct*)arg, sizeof(set)))
		        	return -EFAULT;
				
				gpio_ext_set_irqmask(set.mask, set.value);
			}
			break;
		case GPIO_CMD_GET_IRQMASK:
			val = gpio_ext_get_irqmask();
    		if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
		       	return -EFAULT;
			break;
		case GPIO_CMD_GET_CHANGE:
			val = gpio_ext_get_change();
    		if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
		       	return -EFAULT;
			break;
		case GPIO_CMD_GET_CHANGES:
   			if (copy_from_user(&val, (unsigned long*)arg, sizeof(unsigned long)))
	        	return -EFAULT;
			if (val >= NUMBER_OF_GPIOS)
				return -EFAULT;
    		if (copy_to_user((unsigned long*)arg, &gpio_ext_irq_changes[val], sizeof(gpio_ext_irq_changes[0])))
		       	return -EFAULT;
			break;
		default:
			return -EINVAL;
	}
	return 0;
}
Beispiel #6
0
static int trace_wakeup_test_thread(void *data)
{
	/* Make this a RT thread, doesn't need to be too high */
	struct sched_param param = { .sched_priority = 5 };
	struct completion *x = data;

	sched_setscheduler(current, SCHED_FIFO, &param);

	/* Make it know we have a new prio */
	complete(x);

	/* now go to sleep and let the test wake us up */
	set_current_state(TASK_INTERRUPTIBLE);
	schedule();

	/* we are awake, now wait to disappear */
	while (!kthread_should_stop()) {
		/*
		 * This is an RT task, do short sleeps to let
		 * others run.
		 */
		msleep(100);
	}

	return 0;
}

int
trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	struct task_struct *p;
	struct completion isrt;
	unsigned long count;
	int ret;

	init_completion(&isrt);

	/* create a high prio thread */
	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
	if (IS_ERR(p)) {
		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
		return -1;
	}

	/* make sure the thread is running at an RT prio */
	wait_for_completion(&isrt);

	/* start the tracing */
	tr->ctrl = 1;
	trace->init(tr);
	/* reset the max latency */
	tracing_max_latency = 0;

	/* sleep to let the RT thread sleep too */
	msleep(100);

	/*
	 * Yes this is slightly racy. It is possible that for some
	 * strange reason that the RT thread we created, did not
	 * call schedule for 100ms after doing the completion,
	 * and we do a wakeup on a task that already is awake.
	 * But that is extremely unlikely, and the worst thing that
	 * happens in such a case, is that we disable tracing.
	 * Honestly, if this race does happen something is horrible
	 * wrong with the system.
	 */

	wake_up_process(p);

	/* give a little time to let the thread wake up */
	msleep(100);

	/* stop the tracing. */
	tr->ctrl = 0;
	trace->ctrl_update(tr);
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);


	trace->reset(tr);

	tracing_max_latency = save_max;

	/* kill the thread */
	kthread_stop(p);

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	return ret;
}
Beispiel #7
0
static void __exit xio_hello_cleanup_module(void)
{
	/* Need to gracefull stop the client */
	kthread_stop(xio_main_th);
}
static int __init mpq_dmx_tspp_plugin_init(void)
{
	int i;
	int j;
	int ret;

	MPQ_DVB_DBG_PRINT("%s executed\n", __func__);

	for (i = 0; i < TSIF_COUNT; i++) {
		mpq_dmx_tspp_info.tsif[i].buffer_count =
				TSPP_BUFFER_COUNT(tspp_out_buffer_size);

		mpq_dmx_tspp_info.tsif[i].aggregate_ids =
			vzalloc(mpq_dmx_tspp_info.tsif[i].buffer_count *
				sizeof(int));
		if (NULL == mpq_dmx_tspp_info.tsif[i].aggregate_ids) {
			MPQ_DVB_ERR_PRINT(
				"%s: Failed to allocate memory for buffer descriptors aggregation\n",
				__func__);
			for (j = 0; j < i; j++) {
				kthread_stop(mpq_dmx_tspp_info.tsif[j].thread);
				vfree(mpq_dmx_tspp_info.tsif[j].aggregate_ids);
				mutex_destroy(&mpq_dmx_tspp_info.tsif[j].mutex);
			}
			return -ENOMEM;
		}
		mpq_dmx_tspp_info.tsif[i].channel_ref = 0;
		mpq_dmx_tspp_info.tsif[i].buff_index = 0;
		mpq_dmx_tspp_info.tsif[i].ch_mem_heap_handle = NULL;
		mpq_dmx_tspp_info.tsif[i].ch_mem_heap_virt_base = NULL;
		mpq_dmx_tspp_info.tsif[i].ch_mem_heap_phys_base = 0;
		atomic_set(&mpq_dmx_tspp_info.tsif[i].data_cnt, 0);

		for (j = 0; j < TSPP_MAX_PID_FILTER_NUM; j++) {
			mpq_dmx_tspp_info.tsif[i].filters[j].pid = -1;
			mpq_dmx_tspp_info.tsif[i].filters[j].ref_count = 0;
		}

		snprintf(mpq_dmx_tspp_info.tsif[i].name,
				TSIF_NAME_LENGTH,
				"dmx_tsif%d",
				i);

		init_waitqueue_head(&mpq_dmx_tspp_info.tsif[i].wait_queue);
		mpq_dmx_tspp_info.tsif[i].thread =
			kthread_run(
				mpq_dmx_tspp_thread, (void *)i,
				mpq_dmx_tspp_info.tsif[i].name);

		if (IS_ERR(mpq_dmx_tspp_info.tsif[i].thread)) {
			vfree(mpq_dmx_tspp_info.tsif[i].aggregate_ids);

			for (j = 0; j < i; j++) {
				kthread_stop(mpq_dmx_tspp_info.tsif[j].thread);
				vfree(mpq_dmx_tspp_info.tsif[j].aggregate_ids);
				mutex_destroy(&mpq_dmx_tspp_info.tsif[j].mutex);
			}

			MPQ_DVB_ERR_PRINT(
				"%s: kthread_run failed\n",
				__func__);

			return -ENOMEM;
		}

		mutex_init(&mpq_dmx_tspp_info.tsif[i].mutex);
	}

	ret = mpq_dmx_plugin_init(mpq_tspp_dmx_init);

	if (ret < 0) {
		MPQ_DVB_ERR_PRINT(
			"%s: mpq_dmx_plugin_init failed (errno=%d)\n",
			__func__,
			ret);

		for (i = 0; i < TSIF_COUNT; i++) {
			kthread_stop(mpq_dmx_tspp_info.tsif[i].thread);
			vfree(mpq_dmx_tspp_info.tsif[i].aggregate_ids);
			mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex);
		}
	}

	return ret;
}
/* Function Declaration for threads */
int oneHzDispatch(void* data) {

	int retval = 0;

	struct sched_param param = { .sched_priority = 95 };
	sched_setscheduler(current, SCHED_FIFO, &param);

	/* Wait for semaphore. Get it*/
	while(1) {
		if( ( retval = down_killable( &oneHzSemph ) ))
			return retval;
		
		oneHzFunc( NULL );

		if( !(dispatchFlag & (1 << OVERRUN_FLAG)))
			dispatchFlag |= (1 << ONE_HZ_FLAG);
	}

}

int tenHzDispatch(void* data) {

	int retval = 0;

	struct sched_param param = { .sched_priority = 96 };
	sched_setscheduler(current, SCHED_FIFO, &param);

	/* Wait for semaphore. Get it*/
	/* Call User function here */
	while(1) {
		if( ( retval = down_killable( &tenHzSemph ) ))
			return retval;

		tenHzFunc( NULL );

		if( !(dispatchFlag & (1 << OVERRUN_FLAG)))
			dispatchFlag |= (1 << TEN_HZ_FLAG);
	}
	
}

int hunHzDispatch(void* data) {

	/* Wait for semaphore. Get it*/
	/* Call User function here */
	int retval = 0;

	struct sched_param param = { .sched_priority = 97 };
	sched_setscheduler(current, SCHED_FIFO, &param);

	while(1) {
	if( ( retval = down_killable( &hunHzSemph ) ))
		return retval;

	hunHzFunc( NULL );

	if( !(dispatchFlag & (1 << OVERRUN_FLAG)))
		dispatchFlag |= (1 << HUN_HZ_FLAG);
	}

}

int exitSched(void *data){

	struct sched_param param = { .sched_priority = 98 };
	sched_setscheduler(current, SCHED_FIFO, &param);

	down_killable( &exitSemph );
	

	printk( KERN_ERR "Task overrun!!\n");

	if( !(dispatchFlag & (1 << ONE_HZ_FLAG)))
		printk(KERN_ERR "Possibly 1Hz Task\n");

	if( !(dispatchFlag & (1 << TEN_HZ_FLAG)))
		printk(KERN_ERR "Possibly 10Hz Task\n");


	if( !(dispatchFlag & (1 << HUN_HZ_FLAG)))
		printk(KERN_ERR "Possibly 100Hz Task\n");

	kthread_stop( task1Hz );
	kthread_stop( task10Hz ); 
	kthread_stop( task100Hz ); 

	

	return 0;

}

static int __init p_scheduler_init( void ) {


	/* Create Semaphores here */
	int retval = 0;
	ktime_t k1Hz, k10Hz, k100Hz;

	sema_init( &oneHzSemph, 1 );
	sema_init( &tenHzSemph, 1 );
	sema_init( &hunHzSemph, 1 );
	sema_init( &exitSemph, 1 );

	dispatchFlag = 0xFF;
	dispatchFlag &= ~( 1 << OVERRUN_FLAG );
	


	if( (retval = down_killable( &oneHzSemph)) ) {
		printk(KERN_ERR "Cannot acquire one hertz semaphore\n");
	 	return retval;
	 }

	if( (retval = down_killable( &tenHzSemph)) ) {
		printk(KERN_ERR "Cannot acquire ten hertz semaphore\n");
	 	return retval;
	 }

 	if( (retval = down_killable( &hunHzSemph)) ) {
 		printk(KERN_ERR "Cannot acquire hundred hertz semaphore\n");
	 	return retval;
	 }

	 if( (retval = down_killable( &exitSemph)) ) {
	 	printk(KERN_ERR "Cannot acquire exit semaphore\n");
	 	return retval;
	 }



	/* Start and initialize your timers */

	/* Thread Creation */
	exitThread = kthread_run( exitSched, NULL, "CleanupThread");
	task100Hz = kthread_run( hunHzDispatch, NULL, "Thread100Hz");
	task10Hz = kthread_run( tenHzDispatch, NULL, "Thread10Hz");
	task1Hz = kthread_run( oneHzDispatch, NULL, "Thread1Hz");

	k1Hz   	= ktime_set( 0, MS_TO_NS(997) ); 	//1000ms -> 1hz
	k10Hz  	= ktime_set( 0, MS_TO_NS(97) ); 	//100ms -> 10hz
	k100Hz 	= ktime_set( 0, MS_TO_NS(23) ); 	//10ms -> 100hz

	hrtimer_init( &timer100Hz, CLOCK_MONOTONIC, HRTIMER_MODE_REL );
	hrtimer_init( &timer10Hz, CLOCK_MONOTONIC, HRTIMER_MODE_REL );
	hrtimer_init( &timer1Hz, CLOCK_MONOTONIC, HRTIMER_MODE_REL );

	timer1Hz.function 	= 	&timer1HzCallback;
	timer10Hz.function 	=	&timer10HzCallback;	
	timer100Hz.function =	&timer100HzCallback;

	printk("Starting Timers\n");

	hrtimer_start( &timer100Hz, k100Hz, HRTIMER_MODE_REL);
	hrtimer_start( &timer10Hz, k10Hz, HRTIMER_MODE_REL);
	hrtimer_start( &timer1Hz, k1Hz, HRTIMER_MODE_REL);

	return 0;

}

static void __exit p_scheduler_cleanup( void ) {

	int retAllTimers;

	retAllTimers = hrtimer_cancel( &timer1Hz ) && hrtimer_cancel( &timer10Hz ) && hrtimer_cancel( &timer100Hz );

	if( retAllTimers )
		printk("Timers still in use\n");

	printk("Cancelling All Timers\n");
	up( &exitSemph );

	return;

}
Beispiel #10
0
static int trace_wakeup_test_thread(void *data)
{
	
	struct sched_param param = { .sched_priority = 5 };
	struct completion *x = data;

	sched_setscheduler(current, SCHED_FIFO, &param);

	
	complete(x);

	
	set_current_state(TASK_INTERRUPTIBLE);
	schedule();

	
	while (!kthread_should_stop()) {
		
		msleep(100);
	}

	return 0;
}

int
trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	struct task_struct *p;
	struct completion isrt;
	unsigned long count;
	int ret;

	init_completion(&isrt);

	
	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
	if (IS_ERR(p)) {
		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
		return -1;
	}

	
	wait_for_completion(&isrt);

	
	ret = tracer_init(trace, tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

	
	tracing_max_latency = 0;

	
	msleep(100);

	

	wake_up_process(p);

	
	msleep(100);

	
	tracing_stop();
	
	ret = trace_test_buffer(tr, NULL);
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);


	trace->reset(tr);
	tracing_start();

	tracing_max_latency = save_max;

	
	kthread_stop(p);

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	return ret;
}
Beispiel #11
0
static inline void cryptd_stop_thread(struct cryptd_state *state)
{
    BUG_ON(state->queue.qlen);
    kthread_stop(state->task);
}
Beispiel #12
0
static void my_exit(void)
{
	kthread_stop(my_thread);
	wake_up(&my_wait);
	misc_deregister(&my_misc);
}
/**
 * Module initialization function.
 *
 * Return	error code
 */
static int __init mpq_dmx_tsif_plugin_init(void)
{
	int i;
	int ret;

	MPQ_DVB_DBG_PRINT("%s executed\n", __func__);

	/* check module parameters validity */
	if (threshold < 1) {
		MPQ_DVB_ERR_PRINT(
			"%s: invalid threshold parameter, using %d instead\n",
			__func__, DMX_TSIF_PACKETS_IN_CHUNK_DEF);
		threshold = DMX_TSIF_PACKETS_IN_CHUNK_DEF;
	}
	if ((tsif_mode < 1) || (tsif_mode > 3)) {
		MPQ_DVB_ERR_PRINT(
			"%s: invalid mode parameter, using %d instead\n",
			__func__, DMX_TSIF_DRIVER_MODE_DEF);
		tsif_mode = DMX_TSIF_DRIVER_MODE_DEF;
	}

	for (i = 0; i < TSIF_COUNT; i++) {
		snprintf(mpq_dmx_tsif_info.tsif[i].name,
				TSIF_NAME_LENGTH,
				"dmx_tsif%d",
				i);

		atomic_set(&mpq_dmx_tsif_info.tsif[i].data_cnt, 0);
		init_waitqueue_head(&mpq_dmx_tsif_info.tsif[i].wait_queue);
		mpq_dmx_tsif_info.tsif[i].thread =
			kthread_run(
				mpq_dmx_tsif_thread, (void *)i,
				mpq_dmx_tsif_info.tsif[i].name);

		if (IS_ERR(mpq_dmx_tsif_info.tsif[i].thread)) {
			int j;

			for (j = 0; j < i; j++) {
				kthread_stop(mpq_dmx_tsif_info.tsif[j].thread);
				mutex_destroy(&mpq_dmx_tsif_info.tsif[j].mutex);
			}

			MPQ_DVB_ERR_PRINT(
				"%s: kthread_run failed\n",
				__func__);

			return -ENOMEM;
		}

		mutex_init(&mpq_dmx_tsif_info.tsif[i].mutex);

		mpq_dmx_tsif_info.tsif[i].tsif_driver.tsif_handler = NULL;
		mpq_dmx_tsif_info.tsif[i].ref_count = 0;
	}

	ret = mpq_dmx_plugin_init(mpq_tsif_dmx_init);

	if (ret < 0) {
		MPQ_DVB_ERR_PRINT(
			"%s: mpq_dmx_plugin_init failed (errno=%d)\n",
			__func__,
			ret);

		for (i = 0; i < TSIF_COUNT; i++) {
			kthread_stop(mpq_dmx_tsif_info.tsif[i].thread);
			mutex_destroy(&mpq_dmx_tsif_info.tsif[i].mutex);
		}
	}

	return ret;
}
Beispiel #14
0
static void __exit my_exit (void)
{
    kthread_stop (tsk);
    my_generic_exit ();
}
Beispiel #15
0
static int proc_read_status(char *page, char **start, off_t off, int count,
			    int *eof, void *data)
{
	char *p = page;
	int len;

	if (_reads[1] < 2) { /* at least two consecutive readings OK */
		p += sprintf(p, "Not ready\n");
	} else {
		p += sprintf(p, "T     :\t\t%d.%d\n", sns.t / 10, sns.t%10);
		p += sprintf(p, "RH    :\t\t%d.%d\n", sns.rh / 10, sns.rh%10);
		p += sprintf(p, "QUAL  :\t\t%d/%d %d%c\n", _reads[1], _reads[0],
			     _reads[1] * 100 / _reads[0], '\%');
	}
	len = (p - page) - off;
	if (len < 0) {
		len = 0;
	}

	*eof = (len <= count) ? 1 : 0;
	*start = page + off;

	return len;
}
#endif

static int __init am2301_init(void)
{
	int ret;

	printk(KERN_INFO "Init am2301\n");

	ret = gpio_request_one(_pin, GPIOF_OUT_INIT_HIGH, "AM2301");

	if (ret != 0) {
		printk(KERN_ERR "Unable to request GPIO, err: %d\n", ret);
		return ret;
	}

	_irq =  gpio_to_irq(_pin);
	if (_irq < 0) {
		printk(KERN_ERR "am2301: Unable to create IRQ\n");
		goto _cleanup_1;

	}

	init_waitqueue_head(&_queue);

        ret = request_irq(_irq, read_isr,
			  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
			  "read_isr", NULL);

	ts = kthread_create(read_thread, NULL, "am2301");

	if (ts) {
		wake_up_process(ts);
	} else {
		printk(KERN_ERR "am2301: Unable to create thread\n");
		goto _cleanup_2;
	}

#ifdef CONFIG_PROC_FS
	entry = create_proc_entry("am2301", S_IRUGO, NULL);
	if (!entry) {
		printk(KERN_ERR "am2301: Unable to create proc/am2301\n");
		goto _cleanup_3;
	}
	entry->read_proc = proc_read_status;
#endif
	return 0;

_cleanup_3:
	kthread_stop(ts);
_cleanup_2:
	free_irq(_irq, NULL);
_cleanup_1:
	gpio_free(_pin);

	return -1;
}
Beispiel #16
0
static int ccp5_init(struct ccp_device *ccp)
{
	struct device *dev = ccp->dev;
	struct ccp_cmd_queue *cmd_q;
	struct dma_pool *dma_pool;
	char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
	unsigned int qmr, qim, i;
	u64 status;
	u32 status_lo, status_hi;
	int ret;

	/* Find available queues */
	qim = 0;
	qmr = ioread32(ccp->io_regs + Q_MASK_REG);
	for (i = 0; i < MAX_HW_QUEUES; i++) {

		if (!(qmr & (1 << i)))
			continue;

		/* Allocate a dma pool for this queue */
		snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
			 ccp->name, i);
		dma_pool = dma_pool_create(dma_pool_name, dev,
					   CCP_DMAPOOL_MAX_SIZE,
					   CCP_DMAPOOL_ALIGN, 0);
		if (!dma_pool) {
			dev_err(dev, "unable to allocate dma pool\n");
			ret = -ENOMEM;
		}

		cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
		ccp->cmd_q_count++;

		cmd_q->ccp = ccp;
		cmd_q->id = i;
		cmd_q->dma_pool = dma_pool;
		mutex_init(&cmd_q->q_mutex);

		/* Page alignment satisfies our needs for N <= 128 */
		BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
		cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
		cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize,
						   &cmd_q->qbase_dma,
						   GFP_KERNEL);
		if (!cmd_q->qbase) {
			dev_err(dev, "unable to allocate command queue\n");
			ret = -ENOMEM;
			goto e_pool;
		}

		cmd_q->qidx = 0;
		/* Preset some register values and masks that are queue
		 * number dependent
		 */
		cmd_q->reg_control = ccp->io_regs +
				     CMD5_Q_STATUS_INCR * (i + 1);
		cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE;
		cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE;
		cmd_q->reg_int_enable = cmd_q->reg_control +
					CMD5_Q_INT_ENABLE_BASE;
		cmd_q->reg_interrupt_status = cmd_q->reg_control +
					      CMD5_Q_INTERRUPT_STATUS_BASE;
		cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE;
		cmd_q->reg_int_status = cmd_q->reg_control +
					CMD5_Q_INT_STATUS_BASE;
		cmd_q->reg_dma_status = cmd_q->reg_control +
					CMD5_Q_DMA_STATUS_BASE;
		cmd_q->reg_dma_read_status = cmd_q->reg_control +
					     CMD5_Q_DMA_READ_STATUS_BASE;
		cmd_q->reg_dma_write_status = cmd_q->reg_control +
					      CMD5_Q_DMA_WRITE_STATUS_BASE;

		init_waitqueue_head(&cmd_q->int_queue);

		dev_dbg(dev, "queue #%u available\n", i);
	}
	if (ccp->cmd_q_count == 0) {
		dev_notice(dev, "no command queues available\n");
		ret = -EIO;
		goto e_pool;
	}
	dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);

	/* Turn off the queues and disable interrupts until ready */
	for (i = 0; i < ccp->cmd_q_count; i++) {
		cmd_q = &ccp->cmd_q[i];

		cmd_q->qcontrol = 0; /* Start with nothing */
		iowrite32(cmd_q->qcontrol, cmd_q->reg_control);

		/* Disable the interrupts */
		iowrite32(0x00, cmd_q->reg_int_enable);
		ioread32(cmd_q->reg_int_status);
		ioread32(cmd_q->reg_status);

		/* Clear the interrupts */
		iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
	}

	dev_dbg(dev, "Requesting an IRQ...\n");
	/* Request an irq */
	ret = ccp->get_irq(ccp);
	if (ret) {
		dev_err(dev, "unable to allocate an IRQ\n");
		goto e_pool;
	}

	dev_dbg(dev, "Loading LSB map...\n");
	/* Copy the private LSB mask to the public registers */
	status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
	status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
	iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET);
	iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET);
	status = ((u64)status_hi<<30) | (u64)status_lo;

	dev_dbg(dev, "Configuring virtual queues...\n");
	/* Configure size of each virtual queue accessible to host */
	for (i = 0; i < ccp->cmd_q_count; i++) {
		u32 dma_addr_lo;
		u32 dma_addr_hi;

		cmd_q = &ccp->cmd_q[i];

		cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT);
		cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT;

		cmd_q->qdma_tail = cmd_q->qbase_dma;
		dma_addr_lo = low_address(cmd_q->qdma_tail);
		iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo);
		iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo);

		dma_addr_hi = high_address(cmd_q->qdma_tail);
		cmd_q->qcontrol |= (dma_addr_hi << 16);
		iowrite32(cmd_q->qcontrol, cmd_q->reg_control);

		/* Find the LSB regions accessible to the queue */
		ccp_find_lsb_regions(cmd_q, status);
		cmd_q->lsb = -1; /* Unassigned value */
	}

	dev_dbg(dev, "Assigning LSBs...\n");
	ret = ccp_assign_lsbs(ccp);
	if (ret) {
		dev_err(dev, "Unable to assign LSBs (%d)\n", ret);
		goto e_irq;
	}

	/* Optimization: pre-allocate LSB slots for each queue */
	for (i = 0; i < ccp->cmd_q_count; i++) {
		ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
		ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
	}

	dev_dbg(dev, "Starting threads...\n");
	/* Create a kthread for each queue */
	for (i = 0; i < ccp->cmd_q_count; i++) {
		struct task_struct *kthread;

		cmd_q = &ccp->cmd_q[i];

		kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
					 "%s-q%u", ccp->name, cmd_q->id);
		if (IS_ERR(kthread)) {
			dev_err(dev, "error creating queue thread (%ld)\n",
				PTR_ERR(kthread));
			ret = PTR_ERR(kthread);
			goto e_kthread;
		}

		cmd_q->kthread = kthread;
		wake_up_process(kthread);
	}

	dev_dbg(dev, "Enabling interrupts...\n");
	/* Enable interrupts */
	for (i = 0; i < ccp->cmd_q_count; i++) {
		cmd_q = &ccp->cmd_q[i];
		iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable);
	}

	dev_dbg(dev, "Registering device...\n");
	/* Put this on the unit list to make it available */
	ccp_add_device(ccp);

	ret = ccp_register_rng(ccp);
	if (ret)
		goto e_kthread;

	/* Register the DMA engine support */
	ret = ccp_dmaengine_register(ccp);
	if (ret)
		goto e_hwrng;

	return 0;

e_hwrng:
	ccp_unregister_rng(ccp);

e_kthread:
	for (i = 0; i < ccp->cmd_q_count; i++)
		if (ccp->cmd_q[i].kthread)
			kthread_stop(ccp->cmd_q[i].kthread);

e_irq:
	ccp->free_irq(ccp);

e_pool:
	for (i = 0; i < ccp->cmd_q_count; i++)
		dma_pool_destroy(ccp->cmd_q[i].dma_pool);

	return ret;
}
Beispiel #17
0
int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end,
			unsigned long features)
{

	int	status = 0;
	int	i;
	int ret;
	u8 mask[4];

	static struct irq_chip	twl6030_irq_chip;

	if (features & TWL6032_SUBCLASS)
		twl6030_interrupt_mapping = twl6032_interrupt_mapping_table;

	mask[1] = 0xFF;
	mask[2] = 0xFF;
	mask[3] = 0xFF;
	ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0],
			REG_INT_MSK_LINE_A, 3); /* MASK ALL INT LINES */
	ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0],
			REG_INT_MSK_STS_A, 3); /* MASK ALL INT STS */
	ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0],
			REG_INT_STS_A, 3); /* clear INT_STS_A,B,C */

	twl6030_irq_base = irq_base;
	twl6030_irq_end = irq_end;

	/* install an irq handler for each of the modules;
	 * clone dummy irq_chip since PIH can't *do* anything
	 */
	twl6030_irq_chip = dummy_irq_chip;
	twl6030_irq_chip.name = "twl6030";
	twl6030_irq_chip.irq_set_type = NULL;
	twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;

	for (i = irq_base; i < irq_end; i++) {
		irq_set_chip_and_handler(i, &twl6030_irq_chip,
					 handle_simple_irq);
		irq_set_chip_data(i, (void *)irq_num);
		activate_irq(i);
	}

	twl6030_irq_next = i;
	pr_info("twl6030: %s (irq %d) chaining IRQs %d..%d\n", "PIH",
			irq_num, irq_base, twl6030_irq_next - 1);

	/* install an irq handler to demultiplex the TWL6030 interrupt */
	init_completion(&irq_event);
	task = kthread_run(twl6030_irq_thread, (void *)irq_num, "twl6030-irq");
	if (IS_ERR(task)) {
		pr_err("twl6030: could not create irq %d thread!\n", irq_num);
		status = PTR_ERR(task);
		goto fail_kthread;
	}

	status = request_irq(irq_num, handle_twl6030_pih, IRQF_DISABLED,
				"TWL6030-PIH", &irq_event);
	if (status < 0) {
		pr_err("twl6030: could not claim irq%d: %d\n", irq_num, status);
		goto fail_irq;
	}

	twl_irq = irq_num;
	register_pm_notifier(&twl6030_irq_pm_notifier_block);

	status = twl6030_vlow_init(twl6030_irq_base + TWL_VLOW_INTR_OFFSET);
	if (status < 0)
		goto fail_vlow;

	return status;

fail_vlow:
	free_irq(irq_num, &irq_event);

fail_irq:
	kthread_stop(task);

fail_kthread:
	for (i = irq_base; i < irq_end; i++)
		irq_set_chip_and_handler(i, NULL, NULL);
	return status;
}
static void __exit trace_event_exit(void)
{
	kthread_stop(simple_tsk);
}
Beispiel #19
0
static int
zpios_threads_run(run_args_t *run_args)
{
	struct task_struct *tsk, **tsks;
	thread_data_t *thr = NULL;
	zpios_time_t *tt = &(run_args->stats.total_time);
	zpios_time_t *tw = &(run_args->stats.wr_time);
	zpios_time_t *tr = &(run_args->stats.rd_time);
	int i, rc = 0, tc = run_args->thread_count;

	tsks = kmem_zalloc(sizeof(struct task_struct *) * tc, KM_SLEEP);
	if (tsks == NULL) {
		rc = -ENOMEM;
		goto cleanup2;
	}

	run_args->threads = kmem_zalloc(sizeof(thread_data_t *) * tc, KM_SLEEP);
	if (run_args->threads == NULL) {
		rc = -ENOMEM;
		goto cleanup;
	}

	init_waitqueue_head(&run_args->waitq);
	run_args->threads_done = 0;

	/* Create all the needed threads which will sleep until awoken */
	for (i = 0; i < tc; i++) {
		thr = kmem_zalloc(sizeof(thread_data_t), KM_SLEEP);
		if (thr == NULL) {
			rc = -ENOMEM;
			goto taskerr;
		}

		thr->thread_no = i;
		thr->run_args = run_args;
		thr->rc = 0;
		mutex_init(&thr->lock, NULL, MUTEX_DEFAULT, NULL);
		run_args->threads[i] = thr;

		tsk = kthread_create(zpios_thread_main, (void *)thr,
		                     "%s/%d", "zpios_io", i);
		if (IS_ERR(tsk)) {
			rc = -EINVAL;
			goto taskerr;
		}

		tsks[i] = tsk;
	}

	tt->start = zpios_timespec_now();

	/* Wake up all threads for write phase */
	(void)zpios_upcall(run_args->pre, PHASE_PRE_WRITE, run_args, 0);
	for (i = 0; i < tc; i++)
		wake_up_process(tsks[i]);

	/* Wait for write phase to complete */
	tw->start = zpios_timespec_now();
	wait_event(run_args->waitq, zpios_thread_done(run_args));
	tw->stop = zpios_timespec_now();
	(void)zpios_upcall(run_args->post, PHASE_POST_WRITE, run_args, rc);

	for (i = 0; i < tc; i++) {
		thr = run_args->threads[i];

		mutex_enter(&thr->lock);

		if (!rc && thr->rc)
			rc = thr->rc;

		run_args->stats.wr_data += thr->stats.wr_data;
		run_args->stats.wr_chunks += thr->stats.wr_chunks;
		mutex_exit(&thr->lock);
	}

	if (rc) {
		/* Wake up all threads and tell them to exit */
		for (i = 0; i < tc; i++) {
			mutex_enter(&thr->lock);
			thr->rc = rc;
			mutex_exit(&thr->lock);

			wake_up_process(tsks[i]);
		}
		goto out;
	}

	mutex_enter(&run_args->lock_ctl);
	ASSERT(run_args->threads_done == run_args->thread_count);
	run_args->threads_done = 0;
	mutex_exit(&run_args->lock_ctl);

	/* Wake up all threads for read phase */
	(void)zpios_upcall(run_args->pre, PHASE_PRE_READ, run_args, 0);
        for (i = 0; i < tc; i++)
		wake_up_process(tsks[i]);

	/* Wait for read phase to complete */
	tr->start = zpios_timespec_now();
	wait_event(run_args->waitq, zpios_thread_done(run_args));
	tr->stop = zpios_timespec_now();
	(void)zpios_upcall(run_args->post, PHASE_POST_READ, run_args, rc);

	for (i = 0; i < tc; i++) {
		thr = run_args->threads[i];

		mutex_enter(&thr->lock);

		if (!rc && thr->rc)
			rc = thr->rc;

		run_args->stats.rd_data += thr->stats.rd_data;
		run_args->stats.rd_chunks += thr->stats.rd_chunks;
		mutex_exit(&thr->lock);
	}
out:
	tt->stop  = zpios_timespec_now();
	tt->delta = zpios_timespec_sub(tt->stop, tt->start);
	tw->delta = zpios_timespec_sub(tw->stop, tw->start);
	tr->delta = zpios_timespec_sub(tr->stop, tr->start);

cleanup:
	kmem_free(tsks, sizeof(struct task_struct *) * tc);
cleanup2:
	/* Returns first encountered thread error (if any) */
	return rc;

taskerr:
	/* Destroy all threads that were created successfully */
	for (i = 0; i < tc; i++)
		if (tsks[i] != NULL)
			(void) kthread_stop(tsks[i]);

	goto cleanup;
}
Beispiel #20
0
static int wakeup_event_thread(void *param)
{
	struct wakeup_ctrl *ctrl = (struct wakeup_ctrl *)param;
	struct sched_param sch_param = {.sched_priority = 1};

	sched_setscheduler(current, SCHED_RR, &sch_param);
	while (1) {
		wait_for_completion(&ctrl->event);
		if (kthread_should_stop())
			break;
		wakeup_event_handler(ctrl);
		enable_irq(ctrl->wakeup_irq);
		if ((ctrl->usb_irq > 0) && (ctrl->wakeup_irq != ctrl->usb_irq))
			enable_irq(ctrl->usb_irq);
	}
	return 0;
}

static int  wakeup_dev_probe(struct platform_device *pdev)
{
	struct fsl_usb2_wakeup_platform_data *pdata;
	struct wakeup_ctrl *ctrl = NULL;
	int status;

	printk(KERN_INFO "IMX usb wakeup probe\n");

	if (!pdev || !pdev->dev.platform_data)
		return -ENODEV;
	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
	if (!ctrl)
		return -ENOMEM;
	pdata = pdev->dev.platform_data;
	ctrl->pdata = pdata;
	init_completion(&ctrl->event);
	ctrl->wakeup_irq = platform_get_irq(pdev, 0);
	status = request_irq(ctrl->wakeup_irq, usb_wakeup_handler, IRQF_SHARED, "usb_wakeup", (void *)ctrl);
	if (status)
		goto error1;
	ctrl->usb_irq = platform_get_irq(pdev, 1);

	ctrl->thread = kthread_run(wakeup_event_thread, (void *)ctrl, "usb_wakeup thread");
	status = IS_ERR(ctrl->thread) ? -1 : 0;
	if (status)
		goto error2;
	g_ctrl = ctrl;

	return 0;
error2:
	free_irq(ctrl->wakeup_irq, (void *)ctrl);
error1:
	kfree(ctrl);
	return status;
}

static int  wakeup_dev_exit(struct platform_device *pdev)
{
	if (g_ctrl->thread) {
		complete(&g_ctrl->event);
		kthread_stop(g_ctrl->thread);
	}
	free_irq(g_ctrl->wakeup_irq, (void *)g_ctrl);
	kfree(g_ctrl);
	return 0;
}
static struct platform_driver wakeup_d = {
	.probe   = wakeup_dev_probe,
	.remove  = wakeup_dev_exit,
	.driver = {
		.name = "usb_wakeup",
	},
};

static int __init wakeup_dev_init(void)
{
	return platform_driver_register(&wakeup_d);
}
static void __exit wakeup_dev_uninit(void)
{
	platform_driver_unregister(&wakeup_d);
}
Beispiel #21
0
void dlm_recoverd_stop(struct dlm_ls *ls)
{
	kthread_stop(ls->ls_recoverd_task);
}
Beispiel #22
0
static int hsictty_port_probe(struct usb_serial_port *port)
{
	int i;
	struct urb *urb;
	struct hsictty_port_private *portdata;
	struct usb_serial *serial = port->serial;
#ifndef USE_READ_WORK
	char task_name[50] = { 0 };
#endif

	portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
	if (!portdata) {
		hsictty_error("%s: alloc mem failed\n", __func__);
		return -ENOMEM;
	}
	portdata->opened = 0;
	portdata->lch_opened = 0;
	portdata->channel = port->number;

	INIT_LIST_HEAD(&portdata->pool);
	spin_lock_init(&portdata->pool_lock);
	sema_init(&portdata->ch_sem_w, 1);
	sema_init(&portdata->ch_sem_r, 1);
	init_completion(&portdata->tx_notifier);
	init_completion(&portdata->rx_push_notifier);
	init_usb_anchor(&portdata->delayed_urb);
#ifdef USE_READ_WORK
	INIT_WORK(&portdata->hsictty_read_work, hsictty_read_work);
#else
	sprintf(task_name, "hsictty_rx_task%d", portdata->channel);
	portdata->thread_exit = 0;
	portdata->rx_task = kthread_create(rx_threadfn, portdata, task_name);
	init_completion(&portdata->rx_notifier);
	if (portdata->rx_task)
		wake_up_process(portdata->rx_task);
#endif

	for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); ++i) {
		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (urb == NULL) {
			hsictty_dbg("%s: in urb alloc failed.\n", __func__);
			goto error;
		}
		portdata->in_urbs[i] = urb;

		portdata->in_buffer[i] =
		    usb_alloc_coherent(serial->dev, IN_BUFLEN, GFP_KERNEL,
				       &urb->transfer_dma);
		if (!portdata->in_buffer[i]) {
			hsictty_dbg
			    ("%s: in urb dma buffer alloc failed.\n", __func__);
			goto error;
		}

		/* Fill URB using supplied data. */
		usb_fill_bulk_urb(urb, serial->dev,
				  usb_sndbulkpipe(serial->dev,
						  port->bulk_in_endpointAddress)
				  | USB_DIR_IN, portdata->in_buffer[i],
				  IN_BUFLEN, hsictty_read_callback, port);
		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
	}

	for (i = 0; i < ARRAY_SIZE(portdata->out_urbs); ++i) {
		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (urb == NULL) {
			hsictty_dbg("%s: in urb alloc failed.\n", __func__);
			goto error;
		}
		portdata->out_urbs[i] = urb;

		portdata->out_buffer[i] =
		    usb_alloc_coherent(serial->dev, OUT_BUFLEN, GFP_KERNEL,
				       &urb->transfer_dma);
		if (!portdata->out_buffer[i]) {
			hsictty_dbg
			    ("%s: in urb dma buffer alloc failed.\n", __func__);
			goto error;
		}
		/* Fill URB using supplied data. */
		usb_fill_bulk_urb(urb, serial->dev,
				  usb_sndbulkpipe(serial->dev,
						  port->
						  bulk_out_endpointAddress)
				  | USB_DIR_OUT, portdata->out_buffer[i],
				  OUT_BUFLEN, hsictty_write_callback, port);
		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
	}

	usb_set_serial_port_data(port, portdata);

	return 0;

error:
	for (i = 0; i < ARRAY_SIZE(portdata->out_urbs); ++i) {
		urb = portdata->out_urbs[i];
		if (!urb)
			continue;
		if (portdata->out_buffer[i])
			usb_free_coherent(serial->dev, OUT_BUFLEN,
					  portdata->out_buffer[i],
					  urb->transfer_dma);
		usb_free_urb(urb);
	}

	for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); ++i) {
		urb = portdata->in_urbs[i];
		if (!urb)
			continue;
		if (portdata->in_buffer[i])
			usb_free_coherent(serial->dev, IN_BUFLEN,
					  portdata->in_buffer[i],
					  urb->transfer_dma);
		usb_free_urb(urb);
	}
#ifndef USE_READ_WORK
	if (portdata->rx_task) {
		portdata->thread_exit = 1;
		complete_all(&portdata->rx_notifier);
		kthread_stop(portdata->rx_task);
		portdata->rx_task = NULL;
	}
#endif


	kfree(portdata);

	return -EINVAL;
}
static int mv_probe(struct platform_device *pdev)
{
	struct crypto_priv *cp;
	struct resource *res;
	int irq;
	int ret;

	if (cpg) {
		printk(KERN_ERR "Second crypto dev?\n");
		return -EEXIST;
	}

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
	if (!res)
		return -ENXIO;

	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
	if (!cp)
		return -ENOMEM;

	spin_lock_init(&cp->lock);
	crypto_init_queue(&cp->queue, 50);
	cp->reg = ioremap(res->start, res->end - res->start + 1);
	if (!cp->reg) {
		ret = -ENOMEM;
		goto err;
	}

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
	if (!res) {
		ret = -ENXIO;
		goto err_unmap_reg;
	}
	cp->sram_size = res->end - res->start + 1;
	cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
	cp->sram = ioremap(res->start, cp->sram_size);
	if (!cp->sram) {
		ret = -ENOMEM;
		goto err_unmap_reg;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0 || irq == NO_IRQ) {
		ret = irq;
		goto err_unmap_sram;
	}
	cp->irq = irq;

	platform_set_drvdata(pdev, cp);
	cpg = cp;

	cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
	if (IS_ERR(cp->queue_th)) {
		ret = PTR_ERR(cp->queue_th);
		goto err_thread;
	}

	ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
			cp);
	if (ret)
		goto err_unmap_sram;

	writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
	writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);

	ret = crypto_register_alg(&mv_aes_alg_ecb);
	if (ret)
		goto err_reg;

	ret = crypto_register_alg(&mv_aes_alg_cbc);
	if (ret)
		goto err_unreg_ecb;
	return 0;
err_unreg_ecb:
	crypto_unregister_alg(&mv_aes_alg_ecb);
err_thread:
	free_irq(irq, cp);
err_reg:
	kthread_stop(cp->queue_th);
err_unmap_sram:
	iounmap(cp->sram);
err_unmap_reg:
	iounmap(cp->reg);
err:
	kfree(cp);
	cpg = NULL;
	platform_set_drvdata(pdev, NULL);
	return ret;
}
Beispiel #24
0
int pfq_setsockopt(struct socket *sock,
                int level, int optname,
                char __user * optval,
#if(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31))
                unsigned
#endif
                int optlen)
{
        struct pfq_sock *so = pfq_sk(sock->sk);
        struct pfq_rx_opt * ro;
        struct pfq_tx_opt * to;

        bool found = true;

        if (so == NULL)
                return -EINVAL;

        ro = &so->rx_opt;
        to = &so->tx_opt;

        switch(optname)
        {
        case Q_SO_TOGGLE_QUEUE:
        {
                int active;
                if (optlen != sizeof(active))
                        return -EINVAL;
                if (copy_from_user(&active, optval, optlen))
                        return -EFAULT;

                if (active)
                {
                        if (!so->mem_addr)
                        {
                                struct pfq_queue_hdr * queue;

                                /* alloc queue memory */

                                if (pfq_shared_queue_alloc(so, pfq_queue_total_mem(so)) < 0)
                                {
                                        return -ENOMEM;
                                }

                                /* so->mem_addr and so->mem_size are correctly configured */

                                /* initialize queues headers */

                                queue = (struct pfq_queue_hdr *)so->mem_addr;

                                /* initialize rx queue header */

                                queue->rx.data              = (1L << 24);
                                queue->rx.poll_wait         = 0;
                                queue->rx.size              = so->rx_opt.size;
                                queue->rx.slot_size         = so->rx_opt.slot_size;

                                queue->tx.producer.index    = 0;
                                queue->tx.producer.cache    = 0;
                                queue->tx.consumer.index    = 0;
                                queue->tx.consumer.cache    = 0;

                                queue->tx.size_mask         = so->tx_opt.size - 1;
                                queue->tx.max_len           = so->tx_opt.maxlen;
                                queue->tx.size              = so->tx_opt.size;
                                queue->tx.slot_size         = so->tx_opt.slot_size;

                                /* update the queues base_addr */

                                so->rx_opt.base_addr = so->mem_addr + sizeof(struct pfq_queue_hdr);
                                so->tx_opt.base_addr = so->mem_addr + sizeof(struct pfq_queue_hdr) + pfq_queue_mpdb_mem(so);

                                /* commit both the queues */

                                smp_wmb();

                                so->rx_opt.queue_ptr = &queue->rx;
                                so->tx_opt.queue_ptr = &queue->tx;

                                pr_devel("[PFQ|%d] queue: rx_size:%d rx_slot_size:%d tx_size:%d tx_slot_size:%d\n", so->id, queue->rx.size,
                                                queue->rx.slot_size,
                                                queue->tx.size,
                                                queue->tx.slot_size);
                        }
                }
                else
                {
                        if (so->tx_opt.thread)
                        {
                                pr_devel("[PFQ|%d] stopping TX thread...\n", so->id);
                                kthread_stop(so->tx_opt.thread);
                                so->tx_opt.thread = NULL;
                        }

                        msleep(Q_GRACE_PERIOD);

                        pfq_shared_queue_free(so);
                }

        } break;

        case Q_SO_GROUP_BIND:
        {
                struct pfq_binding bind;
                if (optlen != sizeof(struct pfq_binding))
                        return -EINVAL;

                if (copy_from_user(&bind, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, bind.gid, "add binding");

                pfq_devmap_update(map_set, bind.if_index, bind.hw_queue, bind.gid);
        } break;

        case Q_SO_GROUP_UNBIND:
        {
                struct pfq_binding bind;
                if (optlen != sizeof(struct pfq_binding))
                        return -EINVAL;

                if (copy_from_user(&bind, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, bind.gid, "remove binding");

                pfq_devmap_update(map_reset, bind.if_index, bind.hw_queue, bind.gid);
        } break;

        case Q_SO_EGRESS_BIND:
        {
                struct pfq_binding info;

                if (optlen != sizeof(info))
                        return -EINVAL;
                if (copy_from_user(&info, optval, optlen))
                        return -EFAULT;

                rcu_read_lock();
                if (!dev_get_by_index_rcu(sock_net(&so->sk), info.if_index))
                {
                        rcu_read_unlock();
                        pr_devel("[PFQ|%d] TX bind: invalid if_index:%d\n", so->id, info.if_index);
                        return -EPERM;
                }
                rcu_read_unlock();

                if (info.hw_queue < -1)
                {
                        pr_devel("[PFQ|%d] TX bind: invalid queue:%d\n", so->id, info.hw_queue);
                        return -EPERM;
                }

                so->egress_index = info.if_index;
                so->egress_queue = info.hw_queue;

                pr_devel("[PFQ|%d] egress bind: if_index:%d hw_queue:%d\n", so->id, so->egress_index, so->egress_queue);

        } break;

        case Q_SO_EGRESS_UNBIND:
        {
                so->egress_index = 0;
                so->egress_queue = 0;
                pr_devel("[PFQ|%d] egress unbind.\n", so->id);

        } break;

        case Q_SO_SET_RX_TSTAMP:
        {
                int tstamp;
                if (optlen != sizeof(so->rx_opt.tstamp))
                        return -EINVAL;

                if (copy_from_user(&tstamp, optval, optlen))
                        return -EFAULT;

                tstamp = tstamp ? 1 : 0;

                /* update the timestamp_enabled counter */

                atomic_add(tstamp - so->rx_opt.tstamp, &timestamp_enabled);
                so->rx_opt.tstamp = tstamp;

                pr_devel("[PFQ|%d] timestamp_enabled counter: %d\n", so->id, atomic_read(&timestamp_enabled));
        } break;

        case Q_SO_SET_RX_CAPLEN:
        {
                typeof(so->rx_opt.caplen) caplen;

                if (optlen != sizeof(caplen))
                        return -EINVAL;
                if (copy_from_user(&caplen, optval, optlen))
                        return -EFAULT;

                if (caplen > (size_t)cap_len) {
                        pr_devel("[PFQ|%d] invalid caplen:%zu (max: %d)\n", so->id, caplen, cap_len);
                        return -EPERM;
                }

                so->rx_opt.caplen = caplen;

                so->rx_opt.slot_size = MPDB_QUEUE_SLOT_SIZE(so->rx_opt.caplen);

                pr_devel("[PFQ|%d] caplen:%zu -> slot_size:%zu\n",
                                so->id, so->rx_opt.caplen, so->rx_opt.slot_size);
        } break;

        case Q_SO_SET_RX_SLOTS:
        {
                typeof(so->rx_opt.size) slots;

                if (optlen != sizeof(slots))
                        return -EINVAL;
                if (copy_from_user(&slots, optval, optlen))
                        return -EFAULT;

                if (slots > (size_t)rx_queue_slots) {
                        pr_devel("[PFQ|%d] invalid rx slots:%zu (max: %d)\n", so->id, slots, rx_queue_slots);
                        return -EPERM;
                }

                so->rx_opt.size = slots;

                pr_devel("[PFQ|%d] rx_queue_slots:%zu\n", so->id, so->rx_opt.size);
        } break;

        case Q_SO_SET_TX_MAXLEN:
        {
                typeof (so->tx_opt.maxlen) maxlen;
                if (optlen != sizeof(maxlen))
                        return -EINVAL;
                if (copy_from_user(&maxlen, optval, optlen))
                        return -EFAULT;

                if (maxlen > (size_t)max_len) {
                        pr_devel("[PFQ|%d] invalid maxlen:%zu (max: %d)\n", so->id, maxlen, max_len);
                        return -EPERM;
                }

                so->tx_opt.maxlen = maxlen;

                so->tx_opt.slot_size = SPSC_QUEUE_SLOT_SIZE(so->tx_opt.maxlen); /* max_len: max length */

                pr_devel("[PFQ|%d] tx_slot_size:%zu\n", so->id, so->rx_opt.slot_size);
        } break;

        case Q_SO_SET_TX_SLOTS:
        {
                typeof (so->tx_opt.size) slots;

                if (optlen != sizeof(slots))
                        return -EINVAL;
                if (copy_from_user(&slots, optval, optlen))
                        return -EFAULT;

                if (slots & (slots-1))
                {
                        pr_devel("[PFQ|%d] tx slots must be a power of two.\n", so->id);
                        return -EINVAL;
                }

                if (slots > (size_t)tx_queue_slots) {
                        pr_devel("[PFQ|%d] invalid tx slots:%zu (max: %d)\n", so->id, slots, tx_queue_slots);
                        return -EPERM;
                }

                so->tx_opt.size = slots;

                pr_devel("[PFQ|%d] tx_queue_slots:%zu\n", so->id, so->tx_opt.size);
        } break;

        case Q_SO_GROUP_LEAVE:
        {
                int gid;
                if (optlen != sizeof(gid))
                        return -EINVAL;
                if (copy_from_user(&gid, optval, optlen))
                        return -EFAULT;

                if (pfq_leave_group(gid, so->id) < 0) {
                        return -EFAULT;
                }

                pr_devel("[PFQ|%d] leave: gid:%d\n", so->id, gid);
        } break;

        case Q_SO_GROUP_FPROG:
        {
                struct pfq_fprog fprog;
                if (optlen != sizeof(fprog))
                        return -EINVAL;

                if (copy_from_user(&fprog, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, fprog.gid, "group fprog");

                if (fprog.fcode.len > 0)  /* set the filter */
                {
                        struct sk_filter *filter = pfq_alloc_sk_filter(&fprog.fcode);
                        if (filter == NULL)
                        {
                                pr_devel("[PFQ|%d] fprog error: alloc_sk_filter for gid:%d\n", so->id, fprog.gid);
                                return -EINVAL;
                        }

                        __pfq_set_group_filter(fprog.gid, filter);

                        pr_devel("[PFQ|%d] fprog: gid:%d (fprog len %d bytes)\n", so->id, fprog.gid, fprog.fcode.len);
                }
                else 	/* reset the filter */
                {
                        __pfq_set_group_filter(fprog.gid, NULL);

                        pr_devel("[PFQ|%d] fprog: gid:%d (resetting filter)\n", so->id, fprog.gid);
                }

        } break;

        case Q_SO_GROUP_VLAN_FILT_TOGGLE:
        {
                struct pfq_vlan_toggle vlan;

                if (optlen != sizeof(vlan))
                        return -EINVAL;
                if (copy_from_user(&vlan, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, vlan.gid, "group vlan filt toggle");

                __pfq_toggle_group_vlan_filters(vlan.gid, vlan.toggle);

                pr_devel("[PFQ|%d] vlan filters %s for gid:%d\n", so->id, (vlan.toggle ? "enabled" : "disabled"), vlan.gid);
        } break;

        case Q_SO_GROUP_VLAN_FILT:
        {
                struct pfq_vlan_toggle filt;

                if (optlen != sizeof(filt))
                        return -EINVAL;

                if (copy_from_user(&filt, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, filt.gid, "group vlan filt");

                if (filt.vid < -1 || filt.vid > 4094) {
                        pr_devel("[PFQ|%d] vlan_set error: gid:%d invalid vid:%d!\n", so->id, filt.gid, filt.vid);
                        return -EINVAL;
                }

                if (!__pfq_vlan_filters_enabled(filt.gid)) {
                        pr_devel("[PFQ|%d] vlan_set error: vlan filters disabled for gid:%d!\n", so->id, filt.gid);
                        return -EPERM;
                }

                if (filt.vid  == -1) /* any */
                {
                        int i;
                        for(i = 1; i < 4095; i++)
                                __pfq_set_group_vlan_filter(filt.gid, filt.toggle, i);
                }
                else
                {
                        __pfq_set_group_vlan_filter(filt.gid, filt.toggle, filt.vid);
                }

                pr_devel("[PFQ|%d] vlan_set filter vid %d for gid:%d\n", so->id, filt.vid, filt.gid);
        } break;

        case Q_SO_TX_THREAD_BIND:
        {
                struct pfq_binding info;

                if (optlen != sizeof(info))
                        return -EINVAL;
                if (copy_from_user(&info, optval, optlen))
                        return -EFAULT;

                rcu_read_lock();
                if (!dev_get_by_index_rcu(sock_net(&so->sk), info.if_index))
                {
                        rcu_read_unlock();
                        pr_devel("[PFQ|%d] TX bind: invalid if_index:%d\n", so->id, info.if_index);
                        return -EPERM;
                }
                rcu_read_unlock();

                if (info.hw_queue < -1)
                {
                        pr_devel("[PFQ|%d] TX bind: invalid queue:%d\n", so->id, info.hw_queue);
                        return -EPERM;
                }

                to->if_index = info.if_index;
                to->hw_queue = info.hw_queue;

                pr_devel("[PFQ|%d] TX bind: if_index:%d hw_queue:%d\n", so->id, to->if_index, to->hw_queue);

        } break;

        case Q_SO_TX_THREAD_START:
        {
                int cpu;

                if (to->thread)
                {
                        pr_devel("[PFQ|%d] TX thread already created on cpu %d!\n", so->id, to->cpu);
                        return -EPERM;
                }
                if (to->if_index == -1)
                {
                        pr_devel("[PFQ|%d] socket TX not bound to any device!\n", so->id);
                        return -EPERM;
                }
                if (to->queue_ptr == NULL)
                {
                        pr_devel("[PFQ|%d] socket not enabled!\n", so->id);
                        return -EPERM;
                }

                if (optlen != sizeof(cpu))
                        return -EINVAL;

                if (copy_from_user(&cpu, optval, optlen))
                        return -EFAULT;

                if (cpu < -1 || (cpu > -1  && !cpu_online(cpu)))
                {
                        pr_devel("[PFQ|%d] invalid cpu (%d)!\n", so->id, cpu);
                        return -EPERM;
                }

                to->cpu = cpu;

                pr_devel("[PFQ|%d] creating TX thread on cpu %d -> if_index:%d hw_queue:%d\n", so->id, to->cpu, to->if_index, to->hw_queue);

                to->thread = kthread_create_on_node(pfq_tx_thread,
                                so,
                                to->cpu == -1 ? -1 : cpu_to_node(to->cpu),
                                "pfq_tx_%d", so->id);

                if (IS_ERR(to->thread)) {
                        printk(KERN_INFO "[PFQ] kernel_thread() create failed on cpu %d!\n", to->cpu);
                        return PTR_ERR(to->thread);
                }

                if (to->cpu != -1)
                        kthread_bind(to->thread, to->cpu);

        } break;

        case Q_SO_TX_THREAD_STOP:
        {
                pr_devel("[PFQ|%d] stopping TX thread...\n", so->id);

                if (!to->thread)
                {
                        pr_devel("[PFQ|%d] TX thread not running!\n", so->id);
                        return -EPERM;
                }

                kthread_stop(to->thread);
                to->thread = NULL;

                pr_devel("[PFQ|%d] stop TX thread: done.\n", so->id);

        } break;

        case Q_SO_TX_THREAD_WAKEUP:
        {
                if (to->if_index == -1)
                {
                        pr_devel("[PFQ|%d] socket TX not bound to any device!\n", so->id);
                        return -EPERM;
                }
                if (!to->thread)
                {
                        pr_devel("[PFQ|%d] TX thread not running!\n", so->id);
                        return -EPERM;
                }

                wake_up_process(to->thread);
        } break;

        case Q_SO_TX_QUEUE_FLUSH:
        {
                struct net_device *dev;

                if (to->if_index == -1)
                {
                        pr_devel("[PFQ|%d] socket TX not bound to any device!\n", so->id);
                        return -EPERM;
                }

                if (to->thread && to->thread->state == TASK_RUNNING)
                {
                        pr_devel("[PFQ|%d] TX thread is running!\n", so->id);
                        return -EPERM;
                }

                if (to->queue_ptr == NULL)
                {
                        pr_devel("[PFQ|%d] socket not enabled!\n", so->id);
                        return -EPERM;
                }

                dev = dev_get_by_index(sock_net(&so->sk), to->if_index);
                if (!dev)
                {
                        pr_devel("[PFQ|%d] No such device (if_index = %d)\n", so->id, to->if_index);
                        return -EPERM;
                }

                pfq_tx_queue_flush(to, dev, get_cpu(), NUMA_NO_NODE);
                put_cpu();

                dev_put(dev);
        } break;

        case Q_SO_GROUP_FUNCTION:
        {
                struct pfq_group_computation tmp;
                struct pfq_computation_descr *descr;
                size_t psize, ucsize;

                struct pfq_computation_tree *comp;
                void *context;

                if (optlen != sizeof(tmp))
                        return -EINVAL;
                if (copy_from_user(&tmp, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, tmp.gid, "group computation");

                if (copy_from_user(&psize, tmp.prog, sizeof(size_t)))
                        return -EFAULT;

                pr_devel("[PFQ|%d] computation size: %zu\n", so->id, psize);

                ucsize = sizeof(size_t) * 2 + psize * sizeof(struct pfq_functional_descr);

                descr = kmalloc(ucsize, GFP_KERNEL);
                if (descr == NULL) {
                        pr_devel("[PFQ|%d] computation: out of memory!\n", so->id);
                        return -ENOMEM;
                }

                if (copy_from_user(descr, tmp.prog, ucsize)) {
                        pr_devel("[PFQ|%d] computation: copy_from_user error!\n", so->id);
                        kfree(descr);
                        return -EFAULT;
                }

                /* print user computation */

                pr_devel_computation_descr(descr);

		/* ensure the correctness of the specified functional computation */

		if (pfq_validate_computation_descr(descr) < 0) {
                        pr_devel("[PFQ|%d] invalid expression!\n", so->id);
                        return -EFAULT;
		}

                /* allocate context */

                context = pfq_context_alloc(descr);
                if (context == NULL) {
                        pr_devel("[PFQ|%d] context: alloc error!\n", so->id);
                        kfree(descr);
                        return -EFAULT;
                }

                /* allocate struct pfq_computation_tree */

                comp = pfq_computation_alloc(descr);
                if (comp == NULL) {
                        pr_devel("[PFQ|%d] computation: alloc error!\n", so->id);
                        kfree(context);
                        kfree(descr);
                        return -EFAULT;
                }

                /* link the functional computation */

                if (pfq_computation_rtlink(descr, comp, context) < 0) {
                        pr_devel("[PFQ|%d] computation aborted!", so->id);
			kfree(context);
			kfree(descr);
			kfree(comp);
                        return -EPERM;
                }

		/* print executable tree data structure */

		pr_devel_computation_tree(comp);

		/* exec init functions */

		if (pfq_computation_init(comp) < 0) {
                        pr_devel("[PFQ|%d] computation initialization aborted!", so->id);
                        kfree(context);
                        kfree(descr);
                        kfree(comp);
                        return -EPERM;
		}

                /* set the new program */

                if (pfq_set_group_prog(tmp.gid, comp, context) < 0) {
                        pr_devel("[PFQ|%d] set group program error!\n", so->id);
                        kfree(context);
                        kfree(descr);
                        kfree(comp);
                        return -EPERM;
                }

		kfree(descr);
                return 0;

        } break;

        default:
        {
                found = false;
        } break;

        }

        return found ? 0 : sock_setsockopt(sock, level, optname, optval, optlen);
}
Beispiel #25
0
static void __exit voyager_thread_stop(void)
{
	kthread_stop(voyager_thread);
}
Beispiel #26
0
/******************************************************************************
 *    function:   smt113j_spi_thread_Init
 *    brief   :   
 *    date    :   
 *    author  :   
 *
 *    return  :   none
 *    input   :   none
 *    output  :   none
 ******************************************************************************/
static int smt113j_spi_thread_Init ( void )
{
	int ret = 0;
	struct sched_param param = { .sched_priority = 99 };
	
	spi_work_thread = kmalloc ( sizeof ( smt113j_spi_thread_t ), GFP_KERNEL );
	
	if ( !spi_work_thread ) 
	{
		ERROR_PRINT ("smt113j_spi_thread_Init : Kmalloc Error");
		return ( -EFAULT );
	}

	spi_work_thread->status = SMT113J_SPI_SYNC_STOP;

	spin_lock_init( &spi_work_thread->tmm_lock );

	init_waitqueue_head ( &spi_work_thread->thread_wait );

	/*** Thread generation and run ***/
	spi_work_thread->thread_task = kthread_run ( smt113j_spi_thread_loop, 
												 NULL, 
												 "SMT113J_SPI_Task" );
	
	if ( IS_ERR ( spi_work_thread->thread_task )) 
	{
		ERROR_PRINT ("smt113j_spi_thread_Init : kthread_run error : %p", 
					 spi_work_thread->thread_task);
		goto ERROR2;
	}
	
	ret = sched_setscheduler ( spi_work_thread->thread_task, 
							   SCHED_FIFO, 
							   &param );
	
	if ( ret < 0 ) 
	{
		ERROR_PRINT (
			"smt113j_spi_thread_Init : sched_setscheduler error ret[ %d ]", 
			ret );
		goto ERROR3;
	}

	printk("smt113j_spi_thread_Init: End\n");

	return ( ret );

ERROR3:
	spi_work_thread->status = SMT113J_SPI_SYNC_STOP;
	wake_up_interruptible ( &( spi_work_thread->thread_wait ));
	kthread_stop ( spi_work_thread->thread_task );

ERROR2:
	kfree ( spi_work_thread );

	ERROR_PRINT ("smt113j_spi_thread_Init : Error");
	
	return ( ret );
}


/******************************************************************************
 *    function:   smt113j_spi_thread_Start
 *    brief   :   
 *    date    :   
 *    author  :   
 *
 *    return  :   none
 *    input   :   none
 *    output  :   none
 ******************************************************************************/
static int smt113j_spi_thread_Start ( void )
{
	printk("-> spi_work_thread->status[%d]\n", spi_work_thread->status );

	/* multiple check */
	if ( SMT113J_SPI_SYNC_RUN == spi_work_thread->status ) 
	{
		DEBUG_PRINT("smt113j_spi_thread_Start( Double ) : End!");
		return ( 0 );
	}
	/* thread run status set */
	spi_work_thread->status = SMT113J_SPI_SYNC_RUN;
	
	pwrite = 0;
	pread  = 0;
	
	wake_lock(&smt113j_spi_wake_lock);
	
	/* wakeup event */
	wake_up_interruptible ( &(spi_work_thread->thread_wait ));

	DEBUG_PRINT("smt113j_spi_thread_Start : End!");
	DEBUG_PRINT("-> spi_work_thread->status[%d]", spi_work_thread->status );

	return ( 0 );
}
Beispiel #27
0
static int sdio_irq_thread(void *_host)
{
	struct mmc_host *host = _host;
	struct sched_param param = { .sched_priority = 1 };
	unsigned long period, idle_period;
	int ret;

	sched_setscheduler(current, SCHED_FIFO, &param);

	/*
	 * We want to allow for SDIO cards to work even on non SDIO
	 * aware hosts.  One thing that non SDIO host cannot do is
	 * asynchronous notification of pending SDIO card interrupts
	 * hence we poll for them in that case.
	 */
	idle_period = msecs_to_jiffies(10);
	period = (host->caps & MMC_CAP_SDIO_IRQ) ?
		MAX_SCHEDULE_TIMEOUT : idle_period;

	pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
		 mmc_hostname(host), period);

	do {
		/*
		 * We claim the host here on drivers behalf for a couple
		 * reasons:
		 *
		 * 1) it is already needed to retrieve the CCCR_INTx;
		 * 2) we want the driver(s) to clear the IRQ condition ASAP;
		 * 3) we need to control the abort condition locally.
		 *
		 * Just like traditional hard IRQ handlers, we expect SDIO
		 * IRQ handlers to be quick and to the point, so that the
		 * holding of the host lock does not cover too much work
		 * that doesn't require that lock to be held.
		 */
		ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
		if (ret)
			break;
#ifdef CONFIG_ARCH_EMXX
		ret = process_sdio_pending_irqs(host->card[0]);
#else
		ret = process_sdio_pending_irqs(host->card);
#endif
		mmc_release_host(host);

		/*
		 * Give other threads a chance to run in the presence of
		 * errors.
		 */
		if (ret < 0) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (!kthread_should_stop())
				schedule_timeout(HZ);
			set_current_state(TASK_RUNNING);
		}

		/*
		 * Adaptive polling frequency based on the assumption
		 * that an interrupt will be closely followed by more.
		 * This has a substantial benefit for network devices.
		 */
		if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
			if (ret > 0)
				period /= 2;
			else {
				period++;
				if (period > idle_period)
					period = idle_period;
			}
		}

		set_current_state(TASK_INTERRUPTIBLE);
		if (host->caps & MMC_CAP_SDIO_IRQ)
			host->ops->enable_sdio_irq(host, 1);
		if (!kthread_should_stop())
			schedule_timeout(period);
		set_current_state(TASK_RUNNING);
	} while (!kthread_should_stop());

	if (host->caps & MMC_CAP_SDIO_IRQ)
		host->ops->enable_sdio_irq(host, 0);

	pr_debug("%s: IRQ thread exiting with code %d\n",
		 mmc_hostname(host), ret);

	return ret;
}

static int sdio_card_irq_get(struct mmc_card *card)
{
	struct mmc_host *host = card->host;

	WARN_ON(!host->claimed);

	if (!host->sdio_irqs++) {
		atomic_set(&host->sdio_irq_thread_abort, 0);
		host->sdio_irq_thread =
			kthread_run(sdio_irq_thread, host, "ksdioirqd/%s",
				mmc_hostname(host));
		if (IS_ERR(host->sdio_irq_thread)) {
			int err = PTR_ERR(host->sdio_irq_thread);
			host->sdio_irqs--;
			return err;
		}
	}

	return 0;
}

static int sdio_card_irq_put(struct mmc_card *card)
{
	struct mmc_host *host = card->host;

	WARN_ON(!host->claimed);
	BUG_ON(host->sdio_irqs < 1);

	if (!--host->sdio_irqs) {
		atomic_set(&host->sdio_irq_thread_abort, 1);
		kthread_stop(host->sdio_irq_thread);
	}

	return 0;
}

/**
 *	sdio_claim_irq - claim the IRQ for a SDIO function
 *	@func: SDIO function
 *	@handler: IRQ handler callback
 *
 *	Claim and activate the IRQ for the given SDIO function. The provided
 *	handler will be called when that IRQ is asserted.  The host is always
 *	claimed already when the handler is called so the handler must not
 *	call sdio_claim_host() nor sdio_release_host().
 */
int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
{
	int ret;
	unsigned char reg;

	BUG_ON(!func);
	BUG_ON(!func->card);

	pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));

	if (func->irq_handler) {
		pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
		return -EBUSY;
	}

	ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
	if (ret)
		return ret;

	reg |= 1 << func->num;

	reg |= 1; /* Master interrupt enable */

	ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
	if (ret)
		return ret;

	func->irq_handler = handler;
	ret = sdio_card_irq_get(func->card);
	if (ret)
		func->irq_handler = NULL;

	return ret;
}
static void thread_exit(void)
{
	kthread_stop(thread1);
	printk(KERN_ALERT"Exiting Module\n");
}
Beispiel #29
0
/* Main entry */
static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
    int error = -ENOMEM;
    struct memstick_host *host;
    struct r592_device *dev;

    /* Allocate memory */
    host = memstick_alloc_host(sizeof(struct r592_device), &pdev->dev);
    if (!host)
        goto error1;

    dev = memstick_priv(host);
    dev->host = host;
    dev->pci_dev = pdev;
    pci_set_drvdata(pdev, dev);

    /* pci initialization */
    error = pci_enable_device(pdev);
    if (error)
        goto error2;

    pci_set_master(pdev);
    error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
    if (error)
        goto error3;

    error = pci_request_regions(pdev, DRV_NAME);
    if (error)
        goto error3;

    dev->mmio = pci_ioremap_bar(pdev, 0);
    if (!dev->mmio)
        goto error4;

    dev->irq = pdev->irq;
    spin_lock_init(&dev->irq_lock);
    spin_lock_init(&dev->io_thread_lock);
    init_completion(&dev->dma_done);
    INIT_KFIFO(dev->pio_fifo);
    setup_timer(&dev->detect_timer,
                r592_detect_timer, (long unsigned int)dev);

    /* Host initialization */
    host->caps = MEMSTICK_CAP_PAR4;
    host->request = r592_submit_req;
    host->set_param = r592_set_param;
    r592_check_dma(dev);

    dev->io_thread = kthread_run(r592_process_thread, dev, "r592_io");
    if (IS_ERR(dev->io_thread)) {
        error = PTR_ERR(dev->io_thread);
        goto error5;
    }

    /* This is just a precation, so don't fail */
    dev->dummy_dma_page = pci_alloc_consistent(pdev, PAGE_SIZE,
                          &dev->dummy_dma_page_physical_address);
    r592_stop_dma(dev , 0);

    if (request_irq(dev->irq, &r592_irq, IRQF_SHARED,
                    DRV_NAME, dev))
        goto error6;

    r592_update_card_detect(dev);
    if (memstick_add_host(host))
        goto error7;

    message("driver successfully loaded");
    return 0;
error7:
    free_irq(dev->irq, dev);
error6:
    if (dev->dummy_dma_page)
        pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page,
                            dev->dummy_dma_page_physical_address);

    kthread_stop(dev->io_thread);
error5:
    iounmap(dev->mmio);
error4:
    pci_release_regions(pdev);
error3:
    pci_disable_device(pdev);
error2:
    memstick_free_host(host);
error1:
    return error;
}
Beispiel #30
0
/*=============================================================================
	Function	: kfront_init
	Description	:
	Input		:
	Output		:
	Return		:
=============================================================================*/
int kfront_init(void)
{
	void *data;
	int rv = -1;
	int i;

	printk("FP driver for abip 55hd\nVersion %d.%02d (c) 2010 Sysifos\n", FP55_MODULE_VERSION / 100, FP55_MODULE_VERSION % 100);
	printk("Built %s %s\n", __DATE__, __TIME__);

	sema_init(&sem, 0);
	sema_init(&sem_lock, 1);

	/*	alloc key buffer	*/
	data = vmalloc(KEY_BUF_SIZE);
	if (!data)
		return -ENOMEM;

	dvb_ringbuffer_init(&ci_rbuffer, data, KEY_BUF_SIZE);

	kfront = vmalloc(sizeof(struct kfront_tag));
	if (!kfront)
		return -ENOMEM;

	/*	open pio	*/
	kfront->clk = stpio_request_pin(PIO_CLOCK_PORT, PIO_CLOCK_BIT, "clock", STPIO_OUT);
	kfront->data = stpio_request_pin(PIO_DATA_PORT, PIO_DATA_BIT, "data", STPIO_BIDIR/*STPIO_OUT*/);
	kfront->stb = stpio_request_pin(PIO_STB_PORT, PIO_STB_BIT, "stb", STPIO_OUT);

	if (!kfront->clk || !kfront->data || !kfront->stb)
	{
		dprintk("%s: kfront->clk=%p, kfront->data=%p kfront->stb=%p open error\n", __func__, kfront->clk, kfront->data, kfront->stb);
		goto error1;
	}

	/*	initialize	*/
	initial();

	/*	start task	*/
	kfront->bQuit = 0;

	kfront->th = kthread_create(kfront_thread, kfront, "kfront");

	if (IS_ERR(kfront->th))
	{
		dprintk("%s: unable to start task\n", __func__);
		goto error2;
	}
	else
	{
		wake_up_process(kfront->th);
	}

	/*	register device	*/
	if (register_chrdev(KFRONT_MAJOR, "kfront0", &kfront_fops))
	{
		dprintk("%s:: Unable to register driver\n", __func__);
		goto error3;
	}

	// input device init
	fp_button_dev = input_allocate_device();
	if (!fp_button_dev)
	{
		printk("FP: ERR: Not enough memory\n");

		rv = -ENOMEM;
		goto error3;
	}


	printk("FP: register key events:");
	set_bit(EV_KEY, fp_button_dev->evbit);
	memset(fp_button_dev->keybit, 0, sizeof(fp_button_dev->keybit));
	for (i = 0; i < FP_KEYS_MAX; i++)
		if (fp_keys[i].key)
		{
			set_bit(fp_keys[i].key, fp_button_dev->keybit);
			printk(" [%s]", fp_keys[i].name);
		}
	printk("\n");

	fp_button_dev->name = "Frontpanel";

	if (input_register_device(fp_button_dev))
	{
		printk("FP: ERR: Failed to register input device\n");

		rv = -ENOMEM;
		goto error3;
	}

	return 0; // all is ok


error3:
	if (kfront->th)
	{
		kfront->bQuit = 1;
		kthread_stop(kfront->th);
	}
error2:
	if (kfront->clk)
		stpio_free_pin(kfront->clk);
	if (kfront->data)
		stpio_free_pin(kfront->data);
	if (kfront->stb)
		stpio_free_pin(kfront->stb);
error1:
	if (kfront)
	{
		vfree(kfront);
		kfront = NULL;
	}
	if (data)
		vfree(data);

	return -1;
}