void rtc_update(unsigned long num, unsigned long events)
{
	spin_lock(&rtc_lock);
	rtc_irq_data = (rtc_irq_data + (num << 8)) | events;
	spin_unlock(&rtc_lock);

	wake_up_interruptible(&rtc_wait);
	kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
}
Exemple #2
0
static void n_tty_write_wakeup(struct tty_struct *tty)
{
	if (tty->fasync)
	{
 		set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
		kill_fasync(&tty->fasync, SIGIO, POLL_OUT);
	}
	return;
}
Exemple #3
0
/**
 * n_hdlc_tty_receive - Called by tty driver when receive data is available
 * @tty	- pointer to tty instance data
 * @data - pointer to received data
 * @flags - pointer to flags for data
 * @count - count of received data in bytes
 *
 * Called by tty low level driver when receive data is available. Data is
 * interpreted as one HDLC frame.
 */
static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
			       char *flags, int count)
{
	register struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
	register struct n_hdlc_buf *buf;

	if (debuglevel >= DEBUG_LEVEL_INFO)	
		printk("%s(%d)n_hdlc_tty_receive() called count=%d\n",
			__FILE__,__LINE__, count);
		
	/* This can happen if stuff comes in on the backup tty */
	if (!n_hdlc || tty != n_hdlc->tty)
		return;
		
	/* verify line is using HDLC discipline */
	if (n_hdlc->magic != HDLC_MAGIC) {
		printk("%s(%d) line not using HDLC discipline\n",
			__FILE__,__LINE__);
		return;
	}
	
	if ( count>maxframe ) {
		if (debuglevel >= DEBUG_LEVEL_INFO)	
			printk("%s(%d) rx count>maxframesize, data discarded\n",
			       __FILE__,__LINE__);
		return;
	}

	/* get a free HDLC buffer */	
	buf = n_hdlc_buf_get(&n_hdlc->rx_free_buf_list);
	if (!buf) {
		/* no buffers in free list, attempt to allocate another rx buffer */
		/* unless the maximum count has been reached */
		if (n_hdlc->rx_buf_list.count < MAX_RX_BUF_COUNT)
			buf = kmalloc(N_HDLC_BUF_SIZE, GFP_ATOMIC);
	}
	
	if (!buf) {
		if (debuglevel >= DEBUG_LEVEL_INFO)	
			printk("%s(%d) no more rx buffers, data discarded\n",
			       __FILE__,__LINE__);
		return;
	}
		
	/* copy received data to HDLC buffer */
	memcpy(buf->buf,data,count);
	buf->count=count;

	/* add HDLC buffer to list of received frames */
	n_hdlc_buf_put(&n_hdlc->rx_buf_list, buf);
	
	/* wake up any blocked reads and perform async signalling */
	wake_up_interruptible (&tty->read_wait);
	if (n_hdlc->tty->fasync != NULL)
		kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN);

}	/* end of n_hdlc_tty_receive() */
static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
			       char *flags, int count)
{
	register struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
	register struct n_hdlc_buf *buf;

	if (debuglevel >= DEBUG_LEVEL_INFO)	
		printk("%s(%d)n_hdlc_tty_receive() called count=%d\n",
			__FILE__,__LINE__, count);
		
	
	if (!n_hdlc || tty != n_hdlc->tty)
		return;
		
	
	if (n_hdlc->magic != HDLC_MAGIC) {
		printk("%s(%d) line not using HDLC discipline\n",
			__FILE__,__LINE__);
		return;
	}
	
	if ( count>maxframe ) {
		if (debuglevel >= DEBUG_LEVEL_INFO)	
			printk("%s(%d) rx count>maxframesize, data discarded\n",
			       __FILE__,__LINE__);
		return;
	}

		
	buf = n_hdlc_buf_get(&n_hdlc->rx_free_buf_list);
	if (!buf) {
		
		
		if (n_hdlc->rx_buf_list.count < MAX_RX_BUF_COUNT)
			buf = kmalloc(N_HDLC_BUF_SIZE, GFP_ATOMIC);
	}
	
	if (!buf) {
		if (debuglevel >= DEBUG_LEVEL_INFO)	
			printk("%s(%d) no more rx buffers, data discarded\n",
			       __FILE__,__LINE__);
		return;
	}
		
	
	memcpy(buf->buf,data,count);
	buf->count=count;

	
	n_hdlc_buf_put(&n_hdlc->rx_buf_list, buf);
	
	
	wake_up_interruptible (&tty->read_wait);
	if (n_hdlc->tty->fasync != NULL)
		kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN);

}	
Exemple #5
0
static ssize_t write_flag(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
	flag = buf[0] - '0';

	//给所有FASYNC标志的应用程序, 发送信号
	kill_fasync(&sigio_list, SIGIO, POLL_IN);

	return count;
}
static size_t vs_sdlc_kernel_write(void *context, const void *buf, size_t count)
{
	struct vl_sync_port *port = (struct vl_sync_port *)context;
        struct vl_sync_port *paired_port = NULL;
	struct n_hdlc *n_hdlc = NULL;
	struct n_hdlc_buf *tbuf;
	//unsigned long flags;

	/* Validate the pointers */
	if(!port || !IS_OPEN(port))
		return -ENODEV;

        paired_port = port->paired_port;
        if (!paired_port || !IS_OPEN(paired_port))
                return -EINVAL;

        // Write into paired_port receive buffer, if available
	n_hdlc = paired_port->n_hdlc;
        if (!n_hdlc)
                return -EIO;
                
	/* Verify frame size */
	if (count > MAX_HDLC_FRAME_SIZE) {
		pr_debug("vs_sdlc_kernel_write: truncating user packet from %lu to %d\n",
				(unsigned long) count, MAX_HDLC_FRAME_SIZE);
		count = MAX_HDLC_FRAME_SIZE;
	}


        // Lock paired_port
	//spin_lock_irqsave(&paired_port->ctrl_lock, flags);

	/* Allocate transmit buffer */
	if (!(tbuf = n_hdlc_buf_get(&n_hdlc->rx_free_buf_list))) {
		//spin_unlock_irqrestore(&paired_port->ctrl_lock, flags);
		return -EAGAIN;
	}

	/* Copy the user's buffer */
	memcpy(tbuf->buf, buf, count);

pr_debug("vs_sdlc_kernel_write: %d bytes: %02x\n", count, tbuf->buf[0]);
	/* Send the data */
	tbuf->count = count;
	n_hdlc_buf_put(&n_hdlc->rx_buf_list,tbuf);

	//spin_unlock_irqrestore(&paired_port->ctrl_lock, flags);

        /* Wake up any blocked reads on paired port and perform async signalling */
        wake_up_interruptible (&paired_port->read_wait);
        if (paired_port->fasync_queue != NULL)
                kill_fasync (&paired_port->fasync_queue, SIGIO, POLL_IN);
        
	return count;

}
void timer_callback(unsigned long data)
{
	/* stop the calling function from sleeping */
	if (async_queue)
		kill_fasync(&async_queue, SIGIO, POLL_IN);

	/* delete timer */
	del_timer(mytimer);
	active = 0;
}
static ssize_t globalfifo_write(struct file *filp, const char __user *buf,
				size_t count, loff_t *ppos)
{
	struct globalfifo_dev *dev = filp->private_data;
	int ret;
	DECLARE_WAITQUEUE(wait, current);

	mutex_lock(&dev->mutex);
	add_wait_queue(&dev->w_wait, &wait);

	while (dev->current_len == GLOBALFIFO_SIZE) {
		if (filp->f_flags & O_NONBLOCK) {
			ret = -EAGAIN;
			goto out;
		}
		__set_current_state(TASK_INTERRUPTIBLE);
		mutex_unlock(&dev->mutex);

		schedule();
		if (signal_pending(current)) {
			ret = -ERESTARTSYS;
			goto out2;
		}

		mutex_lock(&dev->mutex);
	}

	if (count > GLOBALFIFO_SIZE - dev->current_len)
		count = GLOBALFIFO_SIZE - dev->current_len;
	
	if (copy_from_user(dev->mem + dev->current_len, buf, count)) {
		ret = -EFAULT;
		goto out;
	} else {
		dev->current_len += count;
		printk(KERN_INFO "written %d bytes(s), current_len:%d\n", count,
			dev->current_len);

		wake_up_interruptible(&dev->r_wait);

		if (dev->async_queue) {
			kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
			printk(KERN_DEBUG "%s kill SIGIO\n", __func__);
		}

		ret = count;
	}

out:
	mutex_unlock(&dev->mutex);
out2:
	remove_wait_queue(&dev->w_wait, &wait);
	set_current_state(TASK_RUNNING);
	return ret;
}
Exemple #9
0
static void ttlio_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
	ttlio_in = *ttlio_base;

	ttlio_irq_arrived = 1;
	teller++;
	
	/* wake up the waiting process */
	wake_up_interruptible(&ttlio_wait);
	kill_fasync(&ttlio_async_queue, SIGIO, POLL_IN);
}
Exemple #10
0
/* interrupt process Top half */
irqreturn_t xxx_interrupt(int irq, void *dev_id)
{
	g_count++;
	
	if (g_count % 100 == 0) {
		//发送信号SIGIO信号给fasync_struct 结构体所描述的PID,触发应用程序的SIGIO信号处理函数
		kill_fasync (&button_async, SIGIO, POLL_IN);
	}

    return IRQ_HANDLED;
}
Exemple #11
0
static int
pipe_release(struct inode *inode, int decr, int decw)
{
    down(PIPE_SEM(*inode));
    PIPE_READERS(*inode) -= decr;
    PIPE_WRITERS(*inode) -= decw;
    if (!PIPE_READERS(*inode) && !PIPE_WRITERS(*inode)) {
        struct pipe_inode_info *info = inode->i_pipe;
        inode->i_pipe = NULL;
        free_page((unsigned long) info->base);
        kfree(info);
    } else {
        wake_up_interruptible(PIPE_WAIT(*inode));
        kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
        kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
    }
    up(PIPE_SEM(*inode));

    return 0;
}
Exemple #12
0
static inline void rtc_wake_event(void)
{
#ifndef CONFIG_RTC_HISTOGRAM
	kill_fasync (&rtc_async_queue, SIGIO, POLL_IN);
#else
	if (!(rtc_status & RTC_IS_OPEN))
		return;

	switch (rtc_state) {
	/* Startup */
	case S_STARTUP:
		kill_fasync (&rtc_async_queue, SIGIO, POLL_IN);
		break;
	/* Waiting for an interrupt */
	case S_IDLE:
		kill_fasync (&rtc_async_queue, SIGIO, POLL_IN);
		last_interrupt_time = get_cycles();
		rtc_state = S_WAITING_FOR_READ;
		break;

	/* Signal has been delivered. waiting for rtc_read() */
	case S_WAITING_FOR_READ:
		/*
		 * Well foo.  The usermode application didn't
		 * schedule and read in time.
		 */
		last_interrupt_time = get_cycles();
		rtc_state = S_READ_MISSED;
		printk("Read missed before next interrupt\n");
		break;
	/* Signal has been delivered, read() deadline was missed */
	case S_READ_MISSED:
		/*
		 * Not much we can do here.  We're waiting for the usermode
		 * application to read the rtc
		 */
		last_interrupt_time = get_cycles();
		break;
	}
#endif
}
Exemple #13
0
static void mk712_output_packet(struct mk712_packet data)
{
        int head = queue->head;

        queue->buf[head] = data;
        head = (head + 1) & (MK712_BUF_SIZE-1);
        if (head != queue->tail) {
                queue->head = head;
                kill_fasync(&queue->fasync, SIGIO, POLL_IN);
                wake_up_interruptible(&queue->proc_list);
        }
}
Exemple #14
0
/**
 * rtc_handle_legacy_irq - AIE, UIE and PIE event hook
 * @rtc: pointer to the rtc device
 *
 * This function is called when an AIE, UIE or PIE mode interrupt
 * has occurred (or been emulated).
 *
 * Triggers the registered irq_task function callback.
 */
void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
{
	unsigned long flags;

	/* mark one irq of the appropriate mode */
	spin_lock_irqsave(&rtc->irq_lock, flags);
	rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode);
	spin_unlock_irqrestore(&rtc->irq_lock, flags);

	wake_up_interruptible(&rtc->irq_queue);
	kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
}
static inline void RxQueueWrite (unsigned char data)
{
	int head;
	head = rxq->head;
	rxq->buf[head] = data;
	head = (head + 1) & (SPI_RX_BUF_SIZE-1);
	if (head != rxq->tail) {
		rxq->head = head;
		if (rxq->fasync)
			kill_fasync(&rxq->fasync, SIGIO, POLL_IN);
	} /* else RxQ is full and not much we can do about it! */
}
Exemple #16
0
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
		       u64 nodeid, u64 nlookup)
{
	forget->nodeid = nodeid;
	forget->nlookup = nlookup;

	spin_lock(&fc->lock);
	fc->forget_list_tail->next = forget;
	fc->forget_list_tail = forget;
	wake_up(&fc->waitq);
	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
	spin_unlock(&fc->lock);
}
Exemple #17
0
static inline void ev_check(struct ps2ev_data *p, int event, u32 evbit)
{
    if (p->intr_mask & evbit) {
	p->intr_flag |= evbit;
	p->intr_count[event]++;
	if (p->wq)
	    wake_up_interruptible(&p->wq);
	if (p->fa)
	    kill_fasync(p->fa, SIGIO);
	if (p->sig)
	    send_sig(p->sig, p->ts, 1);
    }
}
Exemple #18
0
static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
{
	req->in.h.len = sizeof(struct fuse_in_header) +
		len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
	list_add_tail(&req->list, &fc->pending[is_rt(fc)]);
	req->state = FUSE_REQ_PENDING;
	if (!req->waiting) {
		req->waiting = 1;
		atomic_inc(&fc->num_waiting);
	}
	wake_up(&fc->waitq[is_rt(fc)]);
	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
}
Exemple #19
0
static void user_notify_callback(void *event)
{
	down(&event_mutex);
	if (CIRC_SPACE(pmic_events.head, pmic_events.tail, CIRC_BUF_MAX)) {
		pmic_events.buf[pmic_events.head] = (int)event;
		pmic_events.head = (pmic_events.head + 1) & (CIRC_BUF_MAX - 1);
	} else {
		pr_info("Failed to notify event to the user\n");
	}
	up(&event_mutex);

	kill_fasync(&pmic_dev_queue, SIGIO, POLL_IN);
}
Exemple #20
0
static void rtc_alm_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
    spin_lock (&rtc_lock);
    /* Update IRQ data & counter */
    rtc_irq_data += 0x100;
    rtc_irq_data |= (RTC_AF|RTC_IRQF);
    spin_unlock (&rtc_lock);

    /* Now do the rest of the actions */
    wake_up_interruptible(&rtc_wait);

    kill_fasync (&rtc_async_queue, SIGIO, POLL_IN);
}
static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
{
	/*
	 * Be careful: on some HP laptops the bios force DD when on battery and
	 * the lid is closed. This leads to interrupts as soon as a little move
	 * is done.
	 */
	atomic_inc(&lis3_dev.count);

	wake_up_interruptible(&lis3_dev.misc_wait);
	kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
	return IRQ_HANDLED;
}
Exemple #22
0
static void usbtmc_interrupt(struct urb *urb)
{
	struct usbtmc_device_data *data = urb->context;
	struct device *dev = &data->intf->dev;
	int status = urb->status;
	int rv;

	dev_dbg(&data->intf->dev, "int status: %d len %d\n",
		status, urb->actual_length);

	switch (status) {
	case 0: /* SUCCESS */
		/* check for valid STB notification */
		if (data->iin_buffer[0] > 0x81) {
			data->bNotify1 = data->iin_buffer[0];
			data->bNotify2 = data->iin_buffer[1];
			atomic_set(&data->iin_data_valid, 1);
			wake_up_interruptible(&data->waitq);
			goto exit;
		}
		/* check for SRQ notification */
		if (data->iin_buffer[0] == 0x81) {
			if (data->fasync)
				kill_fasync(&data->fasync,
					SIGIO, POLL_IN);

			atomic_set(&data->srq_asserted, 1);
			wake_up_interruptible(&data->waitq);
			goto exit;
		}
		dev_warn(dev, "invalid notification: %x\n", data->iin_buffer[0]);
		break;
	case -EOVERFLOW:
		dev_err(dev, "overflow with length %d, actual length is %d\n",
			data->iin_wMaxPacketSize, urb->actual_length);
	case -ECONNRESET:
	case -ENOENT:
	case -ESHUTDOWN:
	case -EILSEQ:
	case -ETIME:
		/* urb terminated, clean up */
		dev_dbg(dev, "urb terminated, status: %d\n", status);
		return;
	default:
		dev_err(dev, "unknown status received: %d\n", status);
	}
exit:
	rv = usb_submit_urb(urb, GFP_ATOMIC);
	if (rv)
		dev_err(dev, "usb_submit_urb failed: %d\n", rv);
}
Exemple #23
0
static KAL_INT32 eemcs_ipc_rx_callback(struct sk_buff *skb, KAL_UINT32 private_data)
{
    CCCI_BUFF_T *p_cccih = NULL;
    KAL_UINT32  node_id;
    IPC_MSGSVC_TASKMAP_T *id_map;

	DEBUG_LOG_FUNCTION_ENTRY;

	if (skb){
		p_cccih = (CCCI_BUFF_T *)skb->data;
        DBGLOG(IPCD,TRA,"%s: CCCI_H(0x%08X, 0x%08X, %02d, 0x%08X)", __FUNCTION__,\
            p_cccih->data[0],p_cccih->data[1],p_cccih->channel, p_cccih->reserved);
	}
    
#ifndef _EEMCS_IPCD_LB_UT_
    /* Check IPC task id and extq_id */
    if ((id_map=unify_AP_id_2_local_id(p_cccih->reserved))==NULL)
   	{
	   DBGLOG(IPCD,ERR,"Wrong AP Unify id (%#x)@RX.!!! PACKET DROP !!!\n",p_cccih->reserved);
       dev_kfree_skb(skb);
       return KAL_SUCCESS ;
   	}
    node_id = id_map->task_id;
#else
    node_id = 0;
#endif
    if(IPCD_KERNEL == atomic_read(&eemcs_ipc_inst.ipc_node[node_id].dev_state)){
        ipc_ilm_t* p_ilm = NULL;
        skb_pull(skb, sizeof(CCCI_BUFF_T));
        p_ilm = (ipc_ilm_t*)(skb->data);
        p_ilm->dest_mod_id = p_cccih->reserved;
        p_ilm->local_para_ptr = (local_para_struct *)p_ilm+1;
        mtk_conn_md_bridge_send_msg((ipc_ilm_t*)(skb->data));
        dev_kfree_skb(skb);
        DBGLOG(IPCD, TRA, "IPC(%d) MT_CONN kernel rx callback", node_id);
    }
    else if(IPCD_OPEN == atomic_read(&eemcs_ipc_inst.ipc_node[node_id].dev_state)){
        skb_queue_tail(&eemcs_ipc_inst.ipc_node[node_id].rx_skb_list, skb); /* spin_lock_ireqsave inside, refering skbuff.c */
        atomic_inc(&eemcs_ipc_inst.ipc_node[node_id].rx_pkt_cnt);     /* increase rx_pkt_cnt */
        kill_fasync(&eemcs_ipc_inst.ipc_node[node_id].fasync, SIGIO, POLL_IN);
        wake_up_poll(&eemcs_ipc_inst.ipc_node[node_id].rx_waitq,POLLIN); /* wake up rx_waitq */
    }else{
		DBGLOG(IPCD,ERR,"PKT DROP while ipc dev(%d) closed", node_id);
		dev_kfree_skb(skb);
        eemcs_update_statistics(0, eemcs_ipc_inst.eemcs_port_id, RX, DROP);
    }
    
    DEBUG_LOG_FUNCTION_LEAVE;
	return KAL_SUCCESS ;
}
Exemple #24
0
static irqreturn_t gf_irq(int irq, void* handle)
{
    struct gf_dev *gf_dev = (struct gf_dev *)handle;
    u8 mode = 0x80;
    u8 status = 0;

	if (gf_dev->tz_enable == false) {
	    gf_spi_read_byte(gf_dev, GF_BUFFER_STATUS, &status);
	    GF_LOG_DEBUG("IRQ status = 0x%x\n", status);

	    if (!(status & GF_BUF_STA_MASK)) {
			GF_LOG_ERROR("Invalid IRQ = 0x%x\n", status);
			return IRQ_HANDLED;
	    }

	    gf_spi_read_byte(gf_dev, GF_MODE_STATUS, &mode);
	    GF_LOG_INFO("status = 0x%x, mode = %d\n", status, mode);

	    switch(mode) {
		case GF_FF_MODE:
		    if ((status & GF_HOME_KEY_MASK) && (status & GF_HOME_KEY_STA)) {
				GF_LOG_INFO("wake device\n");
				gf_spi_write_byte(gf_dev, GF_MODE_STATUS, 0x00);
				input_report_key(gf_dev->input, GF_FF_KEY, 1);
				input_sync(gf_dev->input);
				input_report_key(gf_dev->input, GF_FF_KEY, 0);
				input_sync(gf_dev->input);
		    } else {
				break;
		    }

		case GF_IMAGE_MODE:
#ifdef GF_FASYNC
		    if (gf_dev->async) {
				GF_LOG_INFO("async\n");
				kill_fasync(&gf_dev->async, SIGIO, POLL_IN);
		    }
#endif
		    break;

		case GF_KEY_MODE:
		    GF_LOG_INFO("Key mode: status = 0x%x\n", status);
		    if ((status & GF_KEY_MASK) && (status & GF_BUF_STA_MASK)) {
				if (status & GF_HOME_KEY_MASK) {
				    input_report_key(gf_dev->input, GF_INPUT_HOME_KEY, (status & GF_HOME_KEY_STA) >> 4);
				    input_sync(gf_dev->input);
				} else if (status & GF_MENU_KEY_MASK) {
				    input_report_key(gf_dev->input, GF_INPUT_MENU_KEY, (status & GF_MENU_KEY_STA) >> 2);
				    input_sync(gf_dev->input);
				} else if (status & GF_BACK_KEY_MASK) {
Exemple #25
0
static void qp_interrupt(int cpl, void *dev_id, struct pt_regs * regs)
{
	int head = queue->head;
	int maxhead = (queue->tail-1) & (QP_BUF_SIZE-1);

	add_mouse_randomness(queue->buf[head] = inb(qp_data));
	if (head != maxhead) {
		head++;
		head &= QP_BUF_SIZE-1;
	}
	queue->head = head;
	kill_fasync(&queue->fasync, SIGIO, POLL_IN);
	wake_up_interruptible(&queue->proc_list);
}
Exemple #26
0
static ssize_t scull_p_write_sz(struct file *filp, const char __user *buf, size_t count,
	loff_t *pos)
{
	scull_pipe_sz *dev = filp->private_data;
	int result;

	if(down_interruptible(&dev->sem))
	{
		return -ERESTARTSYS;
	}
	result = scull_getwritespace_sz(dev, filp);
	if(result)
	{
		return result;
	}

	count = min(count, (size_t)spacefree_sz(dev));
	if(dev->wp >= dev->rp)
	{
		count = min(count, (size_t)(dev->end - dev->wp));
	}
	else
	{
		count = min(count, (size_t)(dev->rp - dev->wp - 1)); // write to fill up tot rp -1
	}
	printk(KERN_ALERT "Going to accept %ld bytes to %p from %p\n",
		(long)count, dev->wp, buf);
	if(copy_from_user(dev->wp, buf, count))
	{
		up(&dev->sem);
		return -EFAULT;
	}
	dev->wp += count;
	if(dev->wp == dev->end)
	{
		dev->wp = dev->begin;
	}
	up(&dev->sem);

	wake_up_interruptible(&dev->inq);

	if(dev->async_queue)
	{
		kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
	}
	printk(KERN_ALERT "\"%s\" did write %ld bytes\n", 
		current->comm, (long)count);
	return count;
}
Exemple #27
0
static void
queue_request(struct gfskdev_conn *dc, struct gfskdev_req *req)
{
	req->in.h.unique = gfskdev_get_unique(dc);
	req->in.h.len = sizeof(struct gfskdev_in_header) +
		len_args(req->in.numargs, (struct gfskdev_arg *) req->in.args);
	list_add_tail(&req->list, &dc->pending);
	req->state = GFSKDEV_REQ_PENDING;
	if (!req->waiting) {
		req->waiting = 1;
		atomic_inc(&dc->num_waiting);
	}
	wake_up(&dc->waitq);
	kill_fasync(&dc->fasync, SIGIO, POLL_IN);
}
Exemple #28
0
static int gf_fb_notifier_callback(struct notifier_block *self,
	unsigned long event, void *data)
{
	struct gf_dev *gf_dev = container_of(self, struct gf_dev, fb_notifier);
	struct fb_event *evdata = data;
	int *blank;

	GF_LOG_DEBUG("event = %lu\n", event);

	if (evdata && evdata->data && event == FB_EVENT_BLANK) {
		blank = evdata->data;
		if (*blank == FB_BLANK_UNBLANK) {
			GF_LOG_INFO("FB_BLANK_UNBLANK\n");			
			if (gf_dev->device_available == 1) {
				gf_dev->fb_black = 0;
#ifdef GF_FASYNC
				if (gf_dev->async)
					kill_fasync(&gf_dev->async, SIGIO, POLL_IN);
#endif
				gf_dev->device_available = 1;
			}
		} else if (*blank == FB_BLANK_POWERDOWN) {
		    GF_LOG_INFO("FB_BLANK_POWERDOWN\n");
			if (gf_dev->device_available == 1) {
				gf_dev->fb_black = 1;
#ifdef GF_FASYNC
				if (gf_dev->async)
					kill_fasync(&gf_dev->async, SIGIO, POLL_IN);
#endif
				gf_dev->device_available = 1;
			}
		}
	}

	return 0;
}
Exemple #29
0
Fichier : pipe.c Projet : crond/dd
ssize_t scull_p_write(struct file *filp, const char *buf, size_t count,
                loff_t *f_pos)
{
    Scull_Pipe *dev = filp->private_data;
    
    if (f_pos != &filp->f_pos) return -ESPIPE;

    if (down_interruptible(&dev->sem))
        return -ERESTARTSYS;
    
    /* Make sure there's space to write */
    while (spacefree(dev) == 0) { /* full */
        up(&dev->sem);
        if (filp->f_flags & O_NONBLOCK)
            return -EAGAIN;
        PDEBUG("\"%s\" writing: going to sleep\n",current->comm);
        if (wait_event_interruptible(dev->outq, spacefree(dev) > 0))
            return -ERESTARTSYS; /* signal: tell the fs layer to handle it */
        if (down_interruptible(&dev->sem))
            return -ERESTARTSYS;
    }
    /* ok, space is there, accept something */
    count = min(count, spacefree(dev));
    if (dev->wp >= dev->rp)
        count = min(count, dev->end - dev->wp); /* up to end-of-buffer */
    else /* the write pointer has wrapped, fill up to rp-1 */
        count = min(count, dev->rp - dev->wp - 1);
    PDEBUG("Going to accept %li bytes to %p from %p\n",
           (long)count, dev->wp, buf);
    if (copy_from_user(dev->wp, buf, count)) {
        up (&dev->sem);
        return -EFAULT;
    }
    dev->wp += count;
    if (dev->wp == dev->end)
        dev->wp = dev->buffer; /* wrapped */
    up(&dev->sem);

    /* finally, awake any reader */
    wake_up_interruptible(&dev->inq);  /* blocked in read() and select() */

    /* and signal asynchronous readers, explained late in chapter 5 */
    if (dev->async_queue)
        kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
    PDEBUG("\"%s\" did write %li bytes\n",current->comm, (long)count);
    return count;
}
Exemple #30
0
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
		       u64 nodeid, u64 nlookup)
{
	forget->forget_one.nodeid = nodeid;
	forget->forget_one.nlookup = nlookup;

	spin_lock(&fc->lock);
	if (fc->connected) {
		fc->forget_list_tail->next = forget;
		fc->forget_list_tail = forget;
		wake_up(&fc->waitq[is_rt(fc)]);
		kill_fasync(&fc->fasync, SIGIO, POLL_IN);
	} else {
		kfree(forget);
	}
	spin_unlock(&fc->lock);
}