Пример #1
0
static void ld_usb_disconnect(struct usb_interface *intf)
{
	struct ld_usb *dev;
	int minor;

	dev = usb_get_intfdata(intf);
	usb_set_intfdata(intf, NULL);

	minor = intf->minor;

	
	usb_deregister_dev(intf, &ld_usb_class);

	mutex_lock(&dev->mutex);

	
	if (!dev->open_count) {
		mutex_unlock(&dev->mutex);
		ld_usb_delete(dev);
	} else {
		dev->intf = NULL;
		
		wake_up_interruptible_all(&dev->read_wait);
		wake_up_interruptible_all(&dev->write_wait);
		mutex_unlock(&dev->mutex);
	}

	dev_info(&intf->dev, "LD USB Device #%d now disconnected\n",
		 (minor - USB_LD_MINOR_BASE));
}
Пример #2
0
/**
 *	ld_usb_disconnect
 *
 *	Called by the usb core when the device is removed from the system.
 */
static void ld_usb_disconnect(struct usb_interface *intf)
{
	struct ld_usb *dev;
	int minor;

	dev = usb_get_intfdata(intf);
	usb_set_intfdata(intf, NULL);

	minor = intf->minor;

	/* give back our minor */
	usb_deregister_dev(intf, &ld_usb_class);

	mutex_lock(&dev->mutex);

	/* if the device is not opened, then we clean up right now */
	if (!dev->open_count) {
		mutex_unlock(&dev->mutex);
		ld_usb_delete(dev);
	} else {
		dev->intf = NULL;
		/* wake up pollers */
		wake_up_interruptible_all(&dev->read_wait);
		wake_up_interruptible_all(&dev->write_wait);
		mutex_unlock(&dev->mutex);
	}

	dev_info(&intf->dev, "LD USB Device #%d now disconnected\n",
		 (minor - USB_LD_MINOR_BASE));
}
Пример #3
0
static int sblock_recover(uint8_t dst, uint8_t channel)
{
	struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
	struct sblock_ring *ring = NULL;
	volatile struct sblock_ring_header *ringhd = NULL;
	volatile struct sblock_ring_header *poolhd = NULL;
	unsigned long pflags, qflags;
	int i, j;

	if (!sblock) {
		return -ENODEV;
	}

	ring = sblock->ring;
	ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
	poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);

	sblock->state = SBLOCK_STATE_IDLE;
	wake_up_interruptible_all(&ring->getwait);
	wake_up_interruptible_all(&ring->recvwait);

	spin_lock_irqsave(&ring->r_txlock, pflags);
	/* clean txblks ring */
	ringhd->txblk_wrptr = ringhd->txblk_rdptr;

	spin_lock_irqsave(&ring->p_txlock, qflags);
	/* recover txblks pool */
	poolhd->txblk_rdptr = poolhd->txblk_wrptr;
	for (i = 0, j = 0; i < poolhd->txblk_count; i++) {
		if (ring->txrecord[i] == SBLOCK_BLK_STATE_DONE) {
			ring->p_txblks[j].addr = i * sblock->txblksz + poolhd->txblk_addr;
			ring->p_txblks[j].length = sblock->txblksz;
			poolhd->txblk_wrptr = poolhd->txblk_wrptr + 1;
			j++;
		}
	}
	spin_unlock_irqrestore(&ring->p_txlock, qflags);
	spin_unlock_irqrestore(&ring->r_txlock, pflags);


	spin_lock_irqsave(&ring->r_rxlock, pflags);
	/* clean rxblks ring */
	ringhd->rxblk_rdptr = ringhd->rxblk_wrptr;

	spin_lock_irqsave(&ring->p_rxlock, qflags);
	/* recover rxblks pool */
	poolhd->rxblk_wrptr = poolhd->rxblk_rdptr;
	for (i = 0, j = 0; i < poolhd->rxblk_count; i++) {
		if (ring->rxrecord[i] == SBLOCK_BLK_STATE_DONE) {
			ring->p_rxblks[j].addr = i * sblock->rxblksz + poolhd->rxblk_addr;
			ring->p_rxblks[j].length = sblock->rxblksz;
			poolhd->rxblk_wrptr = poolhd->rxblk_wrptr + 1;
			j++;
		}
	}
	spin_unlock_irqrestore(&ring->p_rxlock, qflags);
	spin_unlock_irqrestore(&ring->r_rxlock, pflags);

	return 0;
}
static int c2c_set_bit(int bit)
{
	struct c2c_genio *genio = &c2c_dev->genio[bit];
	unsigned long long sclk_end;

	if (unlikely((c2c_dev->setter_mask & (1 << bit)) == 0)) {
		dev_err(c2c_dev->dev, "bit %d not alloc in setter_mask\n", bit);
		return -EINVAL;
	}

	if (likely(c2c_dev->pwr_is_on)) {
		fastpoll_timeout_init(&sclk_end);

		while ((READ_GENO(genio->prcm) & genio->mask) != 0) {
			if (unlikely(fastpoll_timeout_check(&sclk_end))) {
				if ((READ_GENO(genio->prcm) & genio->mask) != 0)
					goto poll_err;
				else
					break;
			}
		}
		SET_GENI(genio->mask, genio->prcm);
		while ((READ_GENO(genio->prcm) & genio->mask) == 0) {
			if (unlikely(fastpoll_timeout_check(&sclk_end))) {
				if ((READ_GENO(genio->prcm) & genio->mask) == 0)
					goto poll_err;
				else
					break;
			}
		}
		CLEAR_GENI(genio->mask, genio->prcm);
#ifdef CONFIG_DEBUG_FS
		genio->hs_cnt++;
#endif
	} else {
		MASK_GENO(genio->mask, genio->prcm);
		genio->pending = 1;
		smp_wmb();
		SET_GENI(genio->mask, genio->prcm);
		UNMASK_GENO(genio->mask, genio->prcm);
		if (unlikely(!c2c_dev->pwr_last_req)) {
			dev_err(c2c_dev->dev, "set_bit: force power-on req\n");
			request_c2c_wakeup(true);
		}
		c2c_dev->protection_event = 1;
		wake_up_interruptible_all(&c2c_dev->waitq);
	}
	return 0;

poll_err:
	genio->poll_timeout = 1;
	c2c_dev->protection_event = 1;
	wake_up_interruptible_all(&c2c_dev->waitq);
	return -1;
}
Пример #5
0
static void a2xx_cp_intrcallback(struct kgsl_device *device)
{
	unsigned int status = 0, num_reads = 0, master_status = 0;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
	int i;

	adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status);
	while (!status && (num_reads < VALID_STATUS_COUNT_MAX) &&
		(master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) {
		adreno_regread(device, REG_CP_INT_STATUS, &status);
		adreno_regread(device, REG_MASTER_INT_SIGNAL,
					&master_status);
		num_reads++;
	}
	if (num_reads > 1)
		KGSL_DRV_WARN(device,
			"Looped %d times to read REG_CP_INT_STATUS\n",
			num_reads);

	trace_kgsl_a2xx_irq_status(device, master_status, status);

	if (!status) {
		if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
			KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
			wake_up_interruptible_all(&device->wait_queue);
		} else
			KGSL_DRV_WARN(device, "Spurious interrput detected\n");
		return;
	}

	for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) {
		if (status & kgsl_cp_error_irqs[i].mask) {
			KGSL_CMD_CRIT(rb->device, "%s\n",
				 kgsl_cp_error_irqs[i].message);

			kgsl_pwrctrl_irq(rb->device, KGSL_PWRFLAGS_OFF);
		}
	}

	
	status &= CP_INT_MASK;
	adreno_regwrite(device, REG_CP_INT_ACK, status);

	if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
		KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n");
		queue_work(device->work_queue, &device->ts_expired_ws);
		wake_up_interruptible_all(&device->wait_queue);
	}
}
static irqreturn_t fast_getter_hdl(int irq, void *data)
{
	/* polling protection: here use sched_clock, with 1ms timeout */
	struct c2c_genio *genio = (struct c2c_genio *) data;
	unsigned long long sclk_end;

	if (likely(genio->event_cb))
		genio->event_cb(genio->event_data);

	SET_GENI(genio->mask, genio->prcm);
	fastpoll_timeout_init(&sclk_end);
	while ((READ_GENO(genio->prcm) & genio->mask) != 0) {
		if (unlikely(fastpoll_timeout_check(&sclk_end)))
			goto poll_err;
	}
	CLEAR_GENI(genio->mask, genio->prcm);
#ifdef CONFIG_DEBUG_FS
	genio->hs_cnt++;
#endif
	return IRQ_HANDLED;
poll_err:
	genio->poll_timeout = 1;
	c2c_dev->protection_event = 1;
	wake_up_interruptible_all(&c2c_dev->waitq);
	return IRQ_HANDLED;
}
static void ap9540_c2c_clear(struct ap9540_c2c *c2c)
{
	genio_unregister_driver(&ap9540_c2c_genio_apis);
	del_timer(&c2c->powerup_timer);
	c2c->reset_flag = 1;
	c2c->protection_event = 1;
	wake_up_interruptible_all(&c2c->waitq);
	flush_scheduled_work();
	if (c2c->wumod_gpio >= 0)
		gpio_free(c2c->wumod_gpio);
	c2c->wumod_gpio = -1;
	if (c2c->irq1 >= 0)
		free_irq(c2c->irq1, c2c);
	c2c->irq1 = -1;
	if (c2c->dbgdir)
		debugfs_remove_recursive(c2c->dbgdir);
	if (c2c->c2c_base)
		iounmap(c2c->c2c_base);
	if (c2c->prcm_base)
		iounmap(c2c->prcm_base);
	c2c->c2c_base = NULL;
	c2c->prcm_base = NULL;

	prcmu_unregister_modem("c2c");
	upap_unregister_notifier(UPAP_NFYID_C2C_NOTIF, &c2c_powerup_nb);

	ap9540_c2c_deep_debug_exit();
}
/* notify work */
static void dsm_work_func(struct work_struct *work)
{
	int i;
	struct dsm_client *client;

	DSM_LOG_DEBUG("%s enter\n", __func__);
	mutex_lock(&g_dsm_server.mtx_lock);
	smp_rmb();
	for(i=0; i<CLIENT_SIZE; i++){
		/* whether it is a valid client */
		if(test_bit(DSM_CLIENT_VAILD_BIT, &g_dsm_server.client_flag[i])){
			DSM_LOG_DEBUG("No.%d client name %s flag 0x%lx\n", i,
				g_dsm_server.client_list[i]->client_name, g_dsm_server.client_flag[i]);
			/* whether the client report error msg, clear a bit and return its old value */
			if(!test_and_clear_bit(DSM_CLIENT_NOTIFY_BIT, &g_dsm_server.client_flag[i]))
				continue;

			client = g_dsm_server.client_list[i];
			if(client == NULL){
				DSM_LOG_INFO("%d client is null client.\n",i);
				continue;
			}
			/* wake up wait queue */
			wake_up_interruptible_all(&client->waitq);
			DSM_LOG_INFO("%s finish notify\n", client->client_name);
		}
	}
	mutex_unlock(&g_dsm_server.mtx_lock);
	DSM_LOG_DEBUG("%s exit\n", __func__);

	return;
}
Пример #9
0
static int pollSwitches(void *unused) {
	while (!kthread_should_stop()) {

		unsigned char value = ioread8(switchesIOBase);
		if (value != lastValue) {
			if (lastValue != -1) {
				// Add new event to queue
				if (!kfifo_in_locked(&events, &value, 1, &eventsLock)) {
					printk(KERN_WARNING MODULE_LABEL "Event buffer is full, new event was ignored!\n");
				}

				//printk("notify wait queue\n");
				wake_up_interruptible_all(&areEventsAvailableWaitQueue);
			}

			lastValue = value;
		}

		if (!kthread_should_stop()) {
			msleep_interruptible(100);
		}
	}

	return 0;
}
Пример #10
0
static
void aio_sync_all(struct aio_output *output, struct list_head *tmp_list)
{
	unsigned long long latency;
	int err;

	output->fdsync_active = true;
	atomic_inc(&output->total_fdsync_count);
	
	latency = TIME_STATS(
		&timings[2],
		err = aio_sync(output->mf->mf_filp)
		);
	
	threshold_check(&aio_sync_threshold, latency);

	output->fdsync_active = false;
	wake_up_interruptible_all(&output->fdsync_event);
	if (err < 0) {
		MARS_ERR("FDSYNC error %d\n", err);
	}
	
	/* Signal completion for the whole list.
	 * No locking needed, it's on the stack.
	 */
	_complete_all(tmp_list, output, err);
}
static int me1400_ext_irq_io_irq_stop(struct me_subdevice *subdevice,
				      struct file *filep,
				      int channel, int flags)
{
	me1400_ext_irq_subdevice_t *instance;
	unsigned long cpu_flags;
	uint8_t tmp;
	int err = ME_ERRNO_SUCCESS;

	PDEBUG("executed.\n");

	instance = (me1400_ext_irq_subdevice_t *) subdevice;

	if (flags) {
		PERROR("Invalid flag specified.\n");
		return ME_ERRNO_INVALID_FLAGS;
	}

	if (channel) {
		PERROR("Invalid channel.\n");
		return ME_ERRNO_INVALID_CHANNEL;
	}

	ME_SUBDEVICE_ENTER;

	spin_lock_irqsave(&instance->subdevice_lock, cpu_flags);
	spin_lock(instance->clk_src_reg_lock);
//                      // Disable IRQ on PLX
//                      tmp = inb(instance->plx_intcs_reg) & ( ~(PLX_LOCAL_INT1_EN | PLX_LOCAL_INT1_POL | PLX_PCI_INT_EN));
//                      outb(tmp, instance->plx_intcs_reg);
//                      PDEBUG_REG("ctrl_reg outb(PLX:0x%lX)=0x%x\n", instance->plx_intcs_reg, tmp);

	switch (instance->device_id) {
	case PCI_DEVICE_ID_MEILHAUS_ME140C:
	case PCI_DEVICE_ID_MEILHAUS_ME140D:
		tmp = inb(instance->ctrl_reg);
		tmp &= ~ME1400CD_EXT_IRQ_CLK_EN;
		outb(tmp, instance->ctrl_reg);
		PDEBUG_REG("ctrl_reg outl(0x%lX+0x%lX)=0x%x\n",
			   instance->reg_base,
			   instance->ctrl_reg - instance->reg_base, tmp);

		break;

	default:
		outb(0x00, instance->ctrl_reg);
		PDEBUG_REG("ctrl_reg outl(0x%lX+0x%lX)=0x%x\n",
			   instance->reg_base,
			   instance->ctrl_reg - instance->reg_base, 0x00);
		break;
	}
	spin_unlock(instance->clk_src_reg_lock);
	instance->rised = -1;
	spin_unlock_irqrestore(&instance->subdevice_lock, cpu_flags);
	wake_up_interruptible_all(&instance->wait_queue);

	ME_SUBDEVICE_EXIT;

	return err;
}
Пример #12
0
static inline
void _enqueue(struct aio_threadinfo *tinfo, struct aio_mref_aspect *mref_a, int prio, bool at_end)
{
	unsigned long flags;
#if 1
	prio++;
	if (unlikely(prio < 0)) {
		prio = 0;
	} else if (unlikely(prio >= MARS_PRIO_NR)) {
		prio = MARS_PRIO_NR - 1;
	}
#else
	prio = 0;
#endif

	mref_a->enqueue_stamp = cpu_clock(raw_smp_processor_id());

	traced_lock(&tinfo->lock, flags);

	if (at_end) {
		list_add_tail(&mref_a->io_head, &tinfo->mref_list[prio]);
	} else {
		list_add(&mref_a->io_head, &tinfo->mref_list[prio]);
	}
	tinfo->queued[prio]++;
	atomic_inc(&tinfo->queued_sum);

	traced_unlock(&tinfo->lock, flags);

	atomic_inc(&tinfo->total_enqueue_count);

	wake_up_interruptible_all(&tinfo->event);
}
static irqreturn_t lcdc_isr(int irq, void *data)
{
	uint32_t val;
        struct sprd_lcd_controller *lcdc = (struct sprd_lcd_controller *)data;
	struct sprdfb_device *dev = lcdc->dev;

	val = lcdc_read(LCDC_IRQ_STATUS);

	if (val & 1) { /* lcdc done isr */
		lcdc_write(1, LCDC_IRQ_CLR);

#ifdef CONFIG_FB_LCD_OVERLAY_SUPPORT
	if(SPRD_OVERLAY_STATUS_STARTED == lcdc->overlay_state){
		overlay_close(dev);
	}
#endif


		if (dev->ctrl->set_timing) {
			dev->ctrl->set_timing(dev,LCD_REGISTER_TIMING);
		}

		lcdc->vsync_done = 1;
		if (dev->vsync_waiter) {
			wake_up_interruptible_all(&(lcdc->vsync_queue));
			dev->vsync_waiter = 0;
		}
		pr_debug(KERN_INFO "lcdc_done_isr !\n");

	}

	return IRQ_HANDLED;
}
Пример #14
0
static void notify_up(u32 contr)
{
	struct capi20_appl *ap;
	struct capi_ctr *ctr;
	u16 applid;

	mutex_lock(&capi_controller_lock);

	if (showcapimsgs & 1)
		printk(KERN_DEBUG "kcapi: notify up contr %d\n", contr);

	ctr = get_capi_ctr_by_nr(contr);
	if (ctr) {
		if (ctr->state == CAPI_CTR_RUNNING)
			goto unlock_out;

		ctr->state = CAPI_CTR_RUNNING;

		for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
			ap = __get_capi_appl_by_nr(applid);
			if (ap)
				register_appl(ctr, applid, &ap->rparam);
		}

		wake_up_interruptible_all(&ctr->state_wait_queue);
	} else
		printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr);

unlock_out:
	mutex_unlock(&capi_controller_lock);
}
Пример #15
0
int me4700_fi_io_reset_subdevice(me_subdevice_t* subdevice, struct file* filep, int flags)
{
	me4700_fi_subdevice_t* instance;
	uint32_t tmp;

	instance = (me4700_fi_subdevice_t *) subdevice;

	PDEBUG("executed idx=%d.\n", instance->base.idx);

	if (flags)
	{
		PERROR("Invalid flags specified. Must be ME_IO_RESET_SUBDEVICE_NO_FLAGS.\n");
		return ME_ERRNO_INVALID_FLAGS;
	}

	ME_SUBDEVICE_ENTER;
		ME_LOCK_PROTECTOR;
			instance->status = fi_status_none;
			me_readl(instance->base.dev, &tmp, instance->ctrl_reg);
			// Disable FI line. Disable interrupt.
			tmp &= ~(ME4700_FI_START_STOP_MASK << (ME4700_FI_START_STOP_BIT_BASE + (instance->base.idx << 1)));
			me_writel(instance->base.dev, tmp, instance->ctrl_reg);
			instance->divider = 0x0;
			instance->first_counter = 0x0;
			instance->period = 0x0;
			instance->low = 0x0;
			instance->high = 0x0;
		ME_UNLOCK_PROTECTOR;
		wake_up_interruptible_all(&instance->wait_queue);
	ME_SUBDEVICE_EXIT;

	return ME_ERRNO_SUCCESS;
}
Пример #16
0
static int netlink_release(struct socket *sock)
{
	struct sock *sk = sock->sk;

	if (!sk)
		return 0;

	netlink_remove(sk);

	spin_lock(&sk->protinfo.af_netlink->cb_lock);
	if (sk->protinfo.af_netlink->cb) {
		sk->protinfo.af_netlink->cb->done(sk->protinfo.af_netlink->cb);
		netlink_destroy_callback(sk->protinfo.af_netlink->cb);
		sk->protinfo.af_netlink->cb = NULL;
		__sock_put(sk);
	}
	spin_unlock(&sk->protinfo.af_netlink->cb_lock);

	/* OK. Socket is unlinked, and, therefore,
	   no new packets will arrive */

	sock_orphan(sk);
	sock->sk = NULL;
	wake_up_interruptible_all(&sk->protinfo.af_netlink->wait);

	skb_queue_purge(&sk->write_queue);

	sock_put(sk);
	return 0;
}
Пример #17
0
void sblock_put(uint8_t dst, uint8_t channel, struct sblock *blk)
{
	struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
	struct sblock_ring *ring = NULL;
	volatile struct sblock_ring_header *poolhd = NULL;
	unsigned long flags;
	int txpos;
	int index;

	if (!sblock) {
		return;
	}

	ring = sblock->ring;
	poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);

	spin_lock_irqsave(&ring->p_txlock, flags);
	txpos = sblock_get_ringpos(poolhd->txblk_rdptr - 1, poolhd->txblk_count);
	ring->r_txblks[txpos].addr = blk->addr - sblock->smem_virt + sblock->smem_addr;
	ring->r_txblks[txpos].length = poolhd->txblk_size;
	poolhd->txblk_rdptr = poolhd->txblk_rdptr - 1;
	if ((int)(poolhd->txblk_wrptr - poolhd->txblk_rdptr) == 1) {
		wake_up_interruptible_all(&(ring->getwait));
	}
	index = sblock_get_index((blk->addr - ring->txblk_virt), sblock->txblksz);
	ring->txrecord[index] = SBLOCK_BLK_STATE_DONE;

	spin_unlock_irqrestore(&ring->p_txlock, flags);
}
irqreturn_t mxr_irq_handler(int irq, void *dev_data)
{
	struct mxr_device *mdev = dev_data;
	u32 val;

	spin_lock(&mdev->reg_slock);
	val = mxr_read(mdev, MXR_INT_STATUS);

	/* wake up process waiting for VSYNC */
	if (val & MXR_INT_STATUS_VSYNC) {
		mdev->vsync_timestamp = ktime_get();
		wake_up_interruptible_all(&mdev->vsync_wait);
	}

	/* clear interrupts.
	   vsync is updated after write MXR_CFG_LAYER_UPDATE bit */
	if (val & MXR_INT_CLEAR_VSYNC)
		mxr_write_mask(mdev, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);

	val = mxr_irq_underrun_handle(mdev, val);
	mxr_write(mdev, MXR_INT_STATUS, val);

	spin_unlock(&mdev->reg_slock);
	return IRQ_HANDLED;
}
Пример #19
0
int me8200_do_io_reset_subdevice(struct me_subdevice* subdevice, struct file* filep, int flags)
{
	me8200_do_subdevice_t* instance;
	uint8_t tmp;

	PDEBUG("executed.\n");

	if (flags)
	{
		PERROR("Invalid flags specified. Must be ME_IO_RESET_SUBDEVICE_NO_FLAGS.\n");
		return ME_ERRNO_INVALID_FLAGS;
	}

	instance = (me8200_do_subdevice_t *)subdevice;

	ME_SUBDEVICE_ENTER;
		ME_LOCK_PROTECTOR;
			me_writeb(instance->base.dev, 0x00, instance->port_reg);
			ME_SPIN_LOCK(instance->irq_mode_lock);
				me_readb(instance->base.dev, &tmp, instance->irq_mode_reg);
				tmp &= ~(ME8200_IRQ_MODE_BIT_ENABLE_POWER << (ME8200_IRQ_MODE_POWER_SHIFT * instance->base.idx));
				me_writeb(instance->base.dev, tmp, instance->irq_mode_reg);
			ME_SPIN_UNLOCK(instance->irq_mode_lock);
			instance->rised = -1;
			instance->count = 0;
		ME_UNLOCK_PROTECTOR;
	wake_up_interruptible_all(&instance->wait_queue);
	ME_SUBDEVICE_EXIT;

	return ME_ERRNO_SUCCESS;
}
Пример #20
0
/* Workaround for non-implemented aio_fsync()
 */
static
int aio_sync_thread(void *data)
{
	struct aio_threadinfo *tinfo = data;
	struct aio_output *output = tinfo->output;
	
	MARS_DBG("sync thread has started on '%s'.\n", output->brick->brick_path);
	//set_user_nice(current, -20);

	while (!brick_thread_should_stop() || atomic_read(&tinfo->queued_sum) > 0) {
		LIST_HEAD(tmp_list);
		unsigned long flags;
		int i;

		output->fdsync_active = false;
		wake_up_interruptible_all(&output->fdsync_event);

		wait_event_interruptible_timeout(
			tinfo->event,
			atomic_read(&tinfo->queued_sum) > 0,
			HZ / 4);

		traced_lock(&tinfo->lock, flags);
		for (i = 0; i < MARS_PRIO_NR; i++) {
			struct list_head *start = &tinfo->mref_list[i];
			if (!list_empty(start)) {
				// move over the whole list
				list_replace_init(start, &tmp_list);
				atomic_sub(tinfo->queued[i], &tinfo->queued_sum);
				tinfo->queued[i] = 0;
				break;
			}
		}
		traced_unlock(&tinfo->lock, flags);

		if (!list_empty(&tmp_list)) {
			aio_sync_all(output, &tmp_list);
		}
	}

	MARS_DBG("sync thread has stopped.\n");
	tinfo->terminated = true;
	wake_up_interruptible_all(&tinfo->terminate_event);
	return 0;
}
static int c2c_powerup_notif(struct notifier_block *nb,
		unsigned long event, void *data)
{
	if (c2c_dev == NULL)
		return -ENODEV;
	c2c_dev->pwr_is_on = 1;
	wake_up_interruptible_all(&c2c_dev->waitq);
	return 0;
}
Пример #22
0
static int hone_event_handler(struct notifier_block *nb, unsigned long val, void *v)
{
	struct hone_reader *reader =
			container_of(nb, struct hone_reader, nb);

	if (enqueue_event(reader, v))
		wake_up_interruptible_all(&reader->event_wait_queue);

	return 0;
}
Пример #23
0
static int me8100_di_io_reset_subdevice(struct me_subdevice *subdevice,
					struct file *filep, int flags)
{
	me8100_di_subdevice_t *instance;
	unsigned short ctrl;
	unsigned long cpu_flags;

	PDEBUG("executed.\n");

	instance = (me8100_di_subdevice_t *) subdevice;

	if (flags) {
		PERROR("Invalid flag specified.\n");
		return ME_ERRNO_INVALID_FLAGS;
	}

	ME_SUBDEVICE_ENTER;

	spin_lock_irqsave(&instance->subdevice_lock, cpu_flags);
	spin_lock(instance->ctrl_reg_lock);
	ctrl = inw(instance->ctrl_reg);
	ctrl &= ~(ME8100_DIO_CTRL_BIT_INTB_1 | ME8100_DIO_CTRL_BIT_INTB_0);
	outw(ctrl, instance->ctrl_reg);
	PDEBUG_REG("ctrl_reg outl(0x%lX+0x%lX)=0x%x\n", instance->reg_base,
		   instance->ctrl_reg - instance->reg_base, ctrl);
	spin_unlock(instance->ctrl_reg_lock);

	outw(0, instance->mask_reg);
	PDEBUG_REG("mask_reg outw(0x%lX+0x%lX)=0x%x\n", instance->reg_base,
		   instance->mask_reg - instance->reg_base, 0);
	outw(0, instance->pattern_reg);
	PDEBUG_REG("pattern_reg outw(0x%lX+0x%lX)=0x%x\n", instance->reg_base,
		   instance->pattern_reg - instance->reg_base, 0);
	instance->rised = -1;
	instance->irq_count = 0;
	instance->filtering_flag = 0;
	spin_unlock_irqrestore(&instance->subdevice_lock, cpu_flags);

	outl(PLX_INTCSR_LOCAL_INT1_EN |
	     PLX_INTCSR_LOCAL_INT1_POL |
	     PLX_INTCSR_LOCAL_INT2_EN |
	     PLX_INTCSR_LOCAL_INT2_POL |
	     PLX_INTCSR_PCI_INT_EN, instance->irq_status_reg);
	PDEBUG_REG("plx:irq_status_reg outl(0x%lX)=0x%x\n",
		   instance->irq_status_reg,
		   PLX_INTCSR_LOCAL_INT1_EN | PLX_INTCSR_LOCAL_INT1_POL |
		   PLX_INTCSR_LOCAL_INT2_EN | PLX_INTCSR_LOCAL_INT2_POL |
		   PLX_INTCSR_PCI_INT_EN);

	wake_up_interruptible_all(&instance->wait_queue);
	ME_SUBDEVICE_EXIT;

	return ME_ERRNO_SUCCESS;
}
Пример #24
0
irqreturn_t decon_ext_dsi_irq_handler(int irq, void *dev_data)
{
	struct decon_device *decon = dev_data;
	ktime_t timestamp = ktime_get();
	u32 irq_sts_reg;
	u32 wb_irq_sts_reg;

	spin_lock(&decon->slock);

	irq_sts_reg = decon_read(decon->id, VIDINTCON1);
	wb_irq_sts_reg = decon_read(decon->id, VIDINTCON3);
	if (irq_sts_reg & VIDINTCON1_INT_FRAME) {
		/* VSYNC interrupt, accept it */
		decon_write_mask(decon->id, VIDINTCON1, ~0, VIDINTCON1_INT_FRAME);
		decon->vsync_info.timestamp = timestamp;
		wake_up_interruptible_all(&decon->vsync_info.wait);
	}
	if (irq_sts_reg & VIDINTCON1_INT_FIFO) {
		decon_err("DECON-ext FIFO underrun\n");
		decon_write_mask(decon->id, VIDINTCON1, ~0, VIDINTCON1_INT_FIFO);
	}
	if (irq_sts_reg & VIDINTCON1_INT_I80) {
		decon_write_mask(decon->id, VIDINTCON1, ~0, VIDINTCON1_INT_I80);
		wake_up_interruptible_all(&decon->wait_frmdone);
	}
#if 0 
	if (wb_irq_sts_reg & VIDINTCON3_WB_FRAME_DONE) {
		decon_dbg("write-back frame done\n");
		DISP_SS_EVENT_LOG(DISP_EVT_WB_FRAME_DONE, &decon->sd, ktime_set(0, 0));
		decon_write_mask(decon->id, VIDINTCON3, ~0, VIDINTCON3_WB_FRAME_DONE);
		atomic_set(&decon->wb_done, STATE_DONE);
		wake_up_interruptible_all(&decon->wait_frmdone);
		decon_reg_per_frame_off(decon->id);
		decon_reg_update_standalone(decon->id);
		decon_reg_wb_swtrigger(decon->id);
		decon_reg_wait_stop_status_timeout(decon->id, 20 * 1000);
	}
#endif
	spin_unlock(&decon->slock);
	return IRQ_HANDLED;
}
Пример #25
0
static int me8200_di_io_irq_stop(me_subdevice_t * subdevice,
				 struct file *filep, int channel, int flags)
{
	me8200_di_subdevice_t *instance;
	uint8_t tmp;
	unsigned long status;

	PDEBUG("executed.\n");

	instance = (me8200_di_subdevice_t *) subdevice;

	if (flags) {
		PERROR("Invalid flag specified.\n");
		return ME_ERRNO_INVALID_FLAGS;
	}

	if (channel) {
		PERROR("Invalid channel specified.\n");
		return ME_ERRNO_INVALID_CHANNEL;
	}

	ME_SUBDEVICE_ENTER spin_lock_irqsave(&instance->subdevice_lock, status);
	spin_lock(instance->irq_ctrl_lock);
	tmp = inb(instance->irq_ctrl_reg);
	tmp |=
	    (ME8200_DI_IRQ_CTRL_BIT_ENABLE <<
	     (ME8200_DI_IRQ_CTRL_SHIFT * instance->di_idx));
	outb(tmp, instance->irq_ctrl_reg);
	PDEBUG_REG("irq_ctrl_reg outb(0x%lX+0x%lX)=0x%x\n", instance->reg_base,
		   instance->irq_ctrl_reg - instance->reg_base, tmp);
	tmp &=
	    ~(ME8200_DI_IRQ_CTRL_BIT_ENABLE <<
	      (ME8200_DI_IRQ_CTRL_SHIFT * instance->di_idx));
	tmp |=
	    (ME8200_DI_IRQ_CTRL_BIT_CLEAR <<
	     (ME8200_DI_IRQ_CTRL_SHIFT * instance->di_idx));
//                      tmp &= ~(ME8200_DI_IRQ_CTRL_BIT_CLEAR << (ME8200_DI_IRQ_CTRL_SHIFT * instance->di_idx));
	outb(tmp, instance->irq_ctrl_reg);
	PDEBUG_REG("irq_ctrl_reg outb(0x%lX+0x%lX)=0x%x\n", instance->reg_base,
		   instance->irq_ctrl_reg - instance->reg_base, tmp);
	spin_unlock(instance->irq_ctrl_lock);

	instance->rised = -1;
	instance->status_value = 0;
	instance->status_value_edges = 0;
	instance->filtering_flag = 0;
	spin_unlock_irqrestore(&instance->subdevice_lock, status);
	wake_up_interruptible_all(&instance->wait_queue);

	ME_SUBDEVICE_EXIT;

	return ME_ERRNO_SUCCESS;
}
Пример #26
0
static irqreturn_t me0600_isr(int irq, void *dev_id)
{
	me0600_ext_irq_subdevice_t *instance;
	uint32_t status;
	uint32_t mask = PLX_INTCSR_PCI_INT_EN;
	irqreturn_t ret = IRQ_HANDLED;

	instance = (me0600_ext_irq_subdevice_t *) dev_id;

	if (irq != instance->irq) {
		PERROR("Incorrect interrupt num: %d.\n", irq);
		return IRQ_NONE;
	}

	PDEBUG("executed.\n");

	if (instance->lintno > 1) {
		PERROR_CRITICAL
		    ("%s():Wrong subdevice index=%d plx:irq_status_reg=0x%04X.\n",
		     __func__, instance->lintno, inl(instance->intcsr));
		return IRQ_NONE;
	}

	spin_lock(&instance->subdevice_lock);
	spin_lock(instance->intcsr_lock);
	status = inl(instance->intcsr);
	switch (instance->lintno) {
	case 0:
		mask |= PLX_INTCSR_LOCAL_INT1_STATE | PLX_INTCSR_LOCAL_INT1_EN;
		break;
	case 1:
		mask |= PLX_INTCSR_LOCAL_INT2_STATE | PLX_INTCSR_LOCAL_INT2_EN;
		break;
	}

	if ((status & mask) == mask) {
		instance->rised = 1;
		instance->n++;
		inb(instance->reset_reg);
		PDEBUG("Interrupt detected.\n");
	} else {
		PINFO
		    ("%ld Shared interrupt. %s(): idx=0 plx:irq_status_reg=0x%04X\n",
		     jiffies, __func__, status);
		ret = IRQ_NONE;
	}
	spin_unlock(instance->intcsr_lock);
	spin_unlock(&instance->subdevice_lock);

	wake_up_interruptible_all(&instance->wait_queue);

	return ret;
}
Пример #27
0
static int me0600_ext_irq_io_irq_stop(struct me_subdevice *subdevice,
				      struct file *filep,
				      int channel, int flags)
{
	me0600_ext_irq_subdevice_t *instance;
	int err = ME_ERRNO_SUCCESS;
	uint32_t tmp;
	unsigned long cpu_flags;

	PDEBUG("executed.\n");

	instance = (me0600_ext_irq_subdevice_t *) subdevice;

	if (flags) {
		PERROR("Invalid flag specified.\n");
		return ME_ERRNO_INVALID_FLAGS;
	}

	if (instance->lintno > 1) {
		PERROR("Wrong idx=%d.\n", instance->lintno);
		return ME_ERRNO_INVALID_SUBDEVICE;
	}

	if (channel) {
		PERROR("Invalid channel specified.\n");
		return ME_ERRNO_INVALID_CHANNEL;
	}

	ME_SUBDEVICE_ENTER;

	spin_lock_irqsave(&instance->subdevice_lock, cpu_flags);
	spin_lock(instance->intcsr_lock);
	tmp = inl(instance->intcsr);
	switch (instance->lintno) {
	case 0:
		tmp &= ~PLX_INTCSR_LOCAL_INT1_EN;
		break;
	case 1:
		tmp &= ~PLX_INTCSR_LOCAL_INT2_EN;
		break;
	}
	outl(tmp, instance->intcsr);
	PDEBUG_REG("intcsr outl(plx:0x%X)=0x%x\n", instance->intcsr, tmp);
	spin_unlock(instance->intcsr_lock);
	instance->rised = -1;
	spin_unlock_irqrestore(&instance->subdevice_lock, cpu_flags);
	wake_up_interruptible_all(&instance->wait_queue);

	ME_SUBDEVICE_EXIT;

	return err;
}
static void ipc_ready_hdl(void *data)
{
	struct c2c_genio *genio = (struct c2c_genio *) data;

	mutex_lock(&c2c_dev->lock);
	if (genio->level)
		c2c_dev->init_flags |= GENIO_INIT_IPC_READY;
	else
		c2c_dev->init_flags |= GENIO_INIT_IPC_READY_ACK;
	mutex_unlock(&c2c_dev->lock);

	wake_up_interruptible_all(&c2c_dev->waitq);
}
Пример #29
0
int tegra_sema_signal(struct tegra_sema_info *info)
{
	unsigned long flags;

	if (!info)
		return -EINVAL;

	spin_lock_irqsave(&info->lock, flags);
	info->count++;
	wake_up_interruptible_all(&info->wq);
	spin_unlock_irqrestore(&info->lock, flags);
	return 0;
}
Пример #30
0
void do_flush(void)
{
	speakup_info.flushing = 1;
	synth_buffer_clear();
	if (synth->alive) {
		if (pitch_shift) {
			synth_printf("%s", pitch_buff);
			pitch_shift = 0;
		}
	}
	wake_up_interruptible_all(&speakup_event);
	wake_up_process(speakup_task);
}