static void rs_close(struct tty_struct *tty, struct file * filp)
{
	struct cnxt_serial * info = (struct cnxt_serial *)tty->driver_data;
	unsigned long flags;

	if (!info || serial_paranoia_check(info, tty->device, "rs_close"))
		return;
	
	save_flags(flags); cli();
	
	if (tty_hung_up_p(filp)) {
		restore_flags(flags);
		return;
	}
	
	if ((tty->count == 1) && (info->count != 1)) {
		/*
		 * Uh, oh.  tty->count is 1, which means that the tty
		 * structure will be freed.  info->count should always
		 * be one in these conditions.  If it's greater than
		 * one, we've got real problems, since it means the
		 * serial port won't be shutdown.
		 */
		printk("rs_close: bad serial port count; tty->count is 1, "
		       "info->count is %d\n", info->count);
		info->count = 1;
	}
	if (--info->count < 0) {
		printk("rs_close: bad serial port count for ttyS%d: %d\n",
		       info->line, info->count);
		info->count = 0;
	}
	if (info->count) {
		restore_flags(flags);
		return;
	}
	// closing port so disable interrupts
	//set_ints_mode(0);
	info->use_ints = 0;

	info->flags |= S_CLOSING;
	/*
	 * Save the termios structure, since this port may have
	 * separate termios for callout and dialin.
	 */
	if (info->flags & S_NORMAL_ACTIVE)
		info->normal_termios = *tty->termios;
	if (info->flags & S_CALLOUT_ACTIVE)
		info->callout_termios = *tty->termios;
	/*
	 * Now we wait for the transmit buffer to clear; and we notify 
	 * the line discipline to only process XON/XOFF characters.
	 */
	tty->closing = 1;
	if (info->closing_wait != S_CLOSING_WAIT_NONE)
		tty_wait_until_sent(tty, info->closing_wait);
	/*
	 * At this point we stop accepting input.  To do this, we
	 * disable the receive line status interrupts, and tell the
	 * interrupt driver to stop checking the data ready bit in the
	 * line status register.
	 */

	shutdown(info);
	if (tty->driver.flush_buffer)
		tty->driver.flush_buffer(tty);
	if (tty->ldisc.flush_buffer)
		tty->ldisc.flush_buffer(tty);
	tty->closing = 0;
	info->event = 0;
	info->tty = 0;
	if (tty->ldisc.num != tty_ldiscs[N_TTY].num) {
		if (tty->ldisc.close)
			(tty->ldisc.close)(tty);
		tty->ldisc = tty_ldiscs[N_TTY];
		tty->termios->c_line = N_TTY;
		if (tty->ldisc.open)
			(tty->ldisc.open)(tty);
	}
	if (info->blocked_open) {
		if (info->close_delay) {
			current->state = TASK_INTERRUPTIBLE;
			schedule_timeout(info->close_delay);
	}
		wake_up_interruptible(&info->open_wait);
	}
	info->flags &= ~(S_NORMAL_ACTIVE|S_CALLOUT_ACTIVE|
			 S_CLOSING);
	wake_up_interruptible(&info->close_wait);
	restore_flags(flags);
}
static int s5p_hpd_irq_hdmi(int irq)
{
	u8 flag;
	int ret = IRQ_HANDLED;
	HPDIFPRINTK("\n");

	/* read flag register */
	flag = s5p_hdmi_reg_intc_status();

	if (s5p_hdmi_reg_get_hpd_status())
		s5p_hdmi_reg_intc_clear_pending(HDMI_IRQ_HPD_PLUG);
	else
		s5p_hdmi_reg_intc_clear_pending(HDMI_IRQ_HPD_UNPLUG);

	s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_UNPLUG, 0);
	s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_PLUG, 0);

	/* is this our interrupt? */
	if (!(flag & (1 << HDMI_IRQ_HPD_PLUG | 1 << HDMI_IRQ_HPD_UNPLUG))) {
		printk(KERN_WARNING "%s() flag is wrong : 0x%x\n",
		       __func__, flag);
		ret = IRQ_NONE;

		goto out;
	}

	if (flag == (1 << HDMI_IRQ_HPD_PLUG | 1 << HDMI_IRQ_HPD_UNPLUG)) {
		HPDIFPRINTK("HPD_HI && HPD_LO\n");

		if (last_hpd_state == HPD_HI && s5p_hdmi_reg_get_hpd_status())
			flag = 1 << HDMI_IRQ_HPD_UNPLUG;
		else
			flag = 1 << HDMI_IRQ_HPD_PLUG;
	}

	if (flag & (1 << HDMI_IRQ_HPD_PLUG)) {
		HPDIFPRINTK("HPD_HI\n");

		s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_UNPLUG, 1);
		if (atomic_read(&hpd_struct.state) == HPD_HI)
			return IRQ_HANDLED;

		atomic_set(&hpd_struct.state, HPD_HI);
		atomic_set(&poll_state, 1);

		last_hpd_state = HPD_HI;
		wake_up_interruptible(&hpd_struct.waitq);

	} else if (flag & (1 << HDMI_IRQ_HPD_UNPLUG)) {
		HPDIFPRINTK("HPD_LO\n");
#if defined(CONFIG_SAMSUNG_WORKAROUND_HPD_GLANCE) &&\
	!defined(CONFIG_SAMSUNG_MHL_9290)
		call_sched_mhl_hpd_handler();
#endif

		s5p_hdcp_stop();

		s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_PLUG, 1);
		if (atomic_read(&hpd_struct.state) == HPD_LO)
			return IRQ_HANDLED;

		atomic_set(&hpd_struct.state, HPD_LO);
		atomic_set(&poll_state, 1);

		last_hpd_state = HPD_LO;
#ifdef CONFIG_HDMI_CONTROLLED_BY_EXT_IC
		schedule_delayed_work(&ext_ic_control_dwork ,
				msecs_to_jiffies(1000));
#endif

		wake_up_interruptible(&hpd_struct.waitq);
	}

	schedule_work(&hpd_work);

 out:
	return IRQ_HANDLED;
}
Exemple #3
0
static void smd_tty_notify(void *priv, unsigned event)
{
	struct smd_tty_info *info = priv;
	unsigned long flags;

	switch (event) {
	case SMD_EVENT_DATA:
		spin_lock_irqsave(&info->reset_lock, flags);
		if (!info->is_open) {
			spin_unlock_irqrestore(&info->reset_lock, flags);
			break;
		}
		spin_unlock_irqrestore(&info->reset_lock, flags);
		/* There may be clients (tty framework) that are blocked
		 * waiting for space to write data, so if a possible read
		 * interrupt came in wake anyone waiting and disable the
		 * interrupts
		 */
		if (smd_write_avail(info->ch)) {
			smd_disable_read_intr(info->ch);
			if (info->tty)
				wake_up_interruptible(&info->tty->write_wait);
		}
		tasklet_hi_schedule(&info->tty_tsklt);
		break;

	case SMD_EVENT_OPEN:
		spin_lock_irqsave(&info->reset_lock, flags);
		info->in_reset = 0;
		info->in_reset_updated = 1;
		info->is_open = 1;
		wake_up_interruptible(&info->ch_opened_wait_queue);
		spin_unlock_irqrestore(&info->reset_lock, flags);
		break;

	case SMD_EVENT_CLOSE:
		spin_lock_irqsave(&info->reset_lock, flags);
		info->in_reset = 1;
		info->in_reset_updated = 1;
		info->is_open = 0;
		wake_up_interruptible(&info->ch_opened_wait_queue);
		spin_unlock_irqrestore(&info->reset_lock, flags);
		/* schedule task to send TTY_BREAK */
		tasklet_hi_schedule(&info->tty_tsklt);

		if (info->tty->index == LOOPBACK_IDX)
			schedule_delayed_work(&loopback_work,
					msecs_to_jiffies(1000));
		break;
#ifdef CONFIG_LGE_USES_SMD_DS_TTY
		/*           
                                                        
                                                    
                                                 
                                                       
                                                       
                                                   
                                                       
                                                 
                                     
   */
	case SMD_EVENT_REOPEN_READY:
		/* smd channel is closed completely */
		spin_lock_irqsave(&info->reset_lock, flags);
		info->in_reset = 1;
		info->in_reset_updated = 1;
		info->is_open = 0;
		wake_up_interruptible(&info->ch_opened_wait_queue);
		spin_unlock_irqrestore(&info->reset_lock, flags);
		break;
#endif
	}
}
Exemple #4
0
/* bulk read call back function. check the status of the urb. if transfer
 * failed return. then update the status and the tty send data to tty subsys.
 * submit urb again.
 */
static void spcp8x5_read_bulk_callback(struct urb *urb)
{
	struct usb_serial_port *port = urb->context;
	struct spcp8x5_private *priv = usb_get_serial_port_data(port);
	struct tty_struct *tty;
	unsigned char *data = urb->transfer_buffer;
	unsigned long flags;
	int i;
	int result = urb->status;
	u8 status;
	char tty_flag;

	dev_dbg(&port->dev, "start, result = %d, urb->actual_length = %d\n,",
		result, urb->actual_length);

	/* check the urb status */
	if (result) {
		if (!port->port.count)
			return;
		if (result == -EPROTO) {
			/* spcp8x5 mysteriously fails with -EPROTO */
			/* reschedule the read */
			urb->dev = port->serial->dev;
			result = usb_submit_urb(urb , GFP_ATOMIC);
			if (result)
				dev_dbg(&port->dev,
					"failed submitting read urb %d\n",
					result);
			return;
		}
		dev_dbg(&port->dev, "unable to handle the error, exiting.\n");
		return;
	}

	/* get tty_flag from status */
	tty_flag = TTY_NORMAL;

	spin_lock_irqsave(&priv->lock, flags);
	status = priv->line_status;
	priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
	spin_unlock_irqrestore(&priv->lock, flags);
	/* wake up the wait for termios */
	wake_up_interruptible(&priv->delta_msr_wait);

	/* break takes precedence over parity, which takes precedence over
	 * framing errors */
	if (status & UART_BREAK_ERROR)
		tty_flag = TTY_BREAK;
	else if (status & UART_PARITY_ERROR)
		tty_flag = TTY_PARITY;
	else if (status & UART_FRAME_ERROR)
		tty_flag = TTY_FRAME;
	dev_dbg(&port->dev, "tty_flag = %d\n", tty_flag);

	tty = tty_port_tty_get(&port->port);
	if (tty && urb->actual_length) {
		tty_buffer_request_room(tty, urb->actual_length + 1);
		/* overrun is special, not associated with a char */
		if (status & UART_OVERRUN_ERROR)
			tty_insert_flip_char(tty, 0, TTY_OVERRUN);
		for (i = 0; i < urb->actual_length; ++i)
			tty_insert_flip_char(tty, data[i], tty_flag);
		tty_flip_buffer_push(tty);
	}
	
	if (status & UART_DCD)
                usb_serial_handle_dcd_change(port, tty,
                           priv->line_status & MSR_STATUS_LINE_DCD);

	tty_kref_put(tty);

	/* Schedule the next read _if_ we are still open */
	if (port->port.count) {
		urb->dev = port->serial->dev;
		result = usb_submit_urb(urb , GFP_ATOMIC);
		if (result)
			dev_dbg(&port->dev, "failed submitting read urb %d\n",
				result);
	}

	return;
}
Exemple #5
0
static int unix_read(struct socket *sock, char *ubuf, int size, int nonblock)
{
    struct unix_proto_data *upd;
    int todo, avail;

    if ((todo = size) <= 0)
	return 0;

    upd = UN_DATA(sock);

    while (!(avail = UN_BUF_AVAIL(upd))) {
	if (sock->state != SS_CONNECTED)
	    return ((sock->state == SS_DISCONNECTING) ? 0 : -EINVAL);

	if (nonblock)
	    return -EAGAIN;

	sock->flags |= SO_WAITDATA;
	interruptible_sleep_on(sock->wait);
	sock->flags &= ~SO_WAITDATA;

	if (current->signal /* & ~current->blocked */ )
	    return -ERESTARTSYS;
    }

/*
 *	Copy from the read buffer into the user's buffer,
 *	watching for wraparound. Then we wake up the writer.
 */

    down(&upd->sem);
    do {
	int part, cando;

	if (avail <= 0) {
	    printk("UNIX: read: avail is negative (%d)\n", avail);
	    send_sig(SIGKILL, current, 1);
	    return -EPIPE;
	}

	if ((cando = todo) > avail)
	    cando = avail;

	if (cando > (part = UN_BUF_SIZE - upd->bp_tail))
	    cando = part;

	memcpy_tofs(ubuf, upd->buf + upd->bp_tail, cando);
	upd->bp_tail = (upd->bp_tail + cando) & (UN_BUF_SIZE - 1);
	ubuf += cando;
	todo -= cando;

	if (sock->state == SS_CONNECTED) {
	    wake_up_interruptible(sock->conn->wait);
#if 0
	    sock_wake_async(sock->conn, 2);
#endif
	}
	avail = UN_BUF_AVAIL(upd);
    } while (todo && avail);

    up(&upd->sem);

    return (size - todo);
}
Exemple #6
0
/**
 * pcm3718_fifo_isr - interrupt service routine for without fifo
 *                    data acquisition
 *
 * ptr: point to the private data of device object
 *
 * interrupt when fifo is half full, read the fifo data into
 * user buffer, until user buffer is half full or full, send
 * 'buffer change' or 'terminate' event to user.
 */
static void pcm3718_fifo_isr(private_data *ptr)
{
	private_data *privdata = ptr;
	INT16U tmp;
	INT16U i;
	adv_user_page *page = NULL;
	/* recieve data */
	i = 0;
	do {
		page = privdata->user_pages + privdata->page_index;
		if (privdata->item >= page->length) {
			privdata->page_index++;
			privdata->item = 0;
		}
	
		privdata->page_index %= privdata->page_num;
		privdata->cur_index %= privdata->conv_num;
	
		page = privdata->user_pages + privdata->page_index;

		
        	// read data
        	tmp = advInp( privdata, 23 ) & 0x00ff;               
        	tmp = tmp | ( ( advInp( privdata, 24 ) << 8 ) & 0xff00 );
		memcpy((INT16U *) (page->page_addr + page->offset + privdata->item),
		       &tmp, sizeof(INT16U));
		
		privdata->item += 2;
		privdata->cur_index++;
		i++;
	} while ((i < privdata->half_fifo_size)
		 && (privdata->cur_index < privdata->conv_num));
	

	/* set event */
	if (privdata->cur_index == privdata->conv_num / 2) { /* buffer change */
		privdata->half_ready = 1;

		adv_process_info_set_event_all(&privdata->ptr_process_info, 1, 1);
	}

	if (privdata->cur_index == privdata->conv_num) {
		privdata->half_ready = 2;

	     
		if (!privdata->cyclic) { /* terminate */
			adv_process_info_set_event_all(&privdata->ptr_process_info, 2, 1);
	           	advOutp( privdata, 9,  0 );           // disable interrupt
	           	advOutp( privdata, 6,  0 );           // disable interrupt
		} else {		/* buffer change */
			adv_process_info_set_event_all(&privdata->ptr_process_info, 1, 1);
		}
	}

 		     
	if (privdata->overrun_flag) { /* overrun */
		adv_process_info_set_event_all(&privdata->ptr_process_info,
					       3,
					       1);
		wake_up_interruptible(&privdata->event_wait);
	} else {
		privdata->overrun_flag = 1;
	}


	if (privdata->cur_index == privdata->int_cnt) { /* interrupt count */
		adv_process_info_set_event_all(&privdata->ptr_process_info, 0, 1);
	}
	
       advOutp( privdata,  20,  0 );  // clear interrupt request

	wake_up_interruptible(&privdata->event_wait);
}
Exemple #7
0
/**
 * pcm3718_nofifo_isr - interrupt service routine for without fifo
 *                      data acquisition
 *
 * ptr: point to the private data of device object
 *
 * interrupt when an A/D conversion occurs, read the fifo data
 * into user buffer, until user buffer is half full or full,se
 * nd 'buffer change' or 'terminate' event to user.
 */
static void pcm3718_nofifo_isr(private_data *ptr)
{
	private_data *privdata = ptr;
	adv_user_page *page = NULL;
	INT16U tmp;

	page = privdata->user_pages + privdata->page_index;	
	if (privdata->item >= page->length) {
		privdata->page_index++;
		privdata->item = 0;
	}
	
	privdata->page_index %= privdata->page_num;
	privdata->cur_index %= privdata->conv_num;
	
	page = privdata->user_pages + privdata->page_index;
	if(advInp(privdata,8)&0x80) 
		return;
    	tmp = advInp( privdata, 0 ) ;  
    	tmp = tmp | (  advInp( privdata, 1 ) << 8 ) ; 

	memcpy((INT16U *) (page->page_addr + page->offset + privdata->item),
	      &tmp, sizeof(INT16U));
	privdata->cur_index++;
	privdata->item += 2;
		
	/* set event */
	if (privdata->cur_index == privdata->conv_num / 2) { /* buffer change */
		privdata->half_ready = 1;
		privdata->trans += privdata->conv_num/2;
		adv_process_info_set_event_all(&privdata->ptr_process_info, 0, 1);
		if (privdata->half_ready == 2) {
			privdata->overrun_flag = 1;
			adv_process_info_set_event_all(&privdata->ptr_process_info,
						       3,
						       1);
		}
	}

	if (privdata->cur_index == privdata->conv_num) {
		privdata->half_ready = 2;
		privdata->trans += privdata->conv_num/2;
	     
		if (!privdata->cyclic) { /* terminate */
			adv_process_info_set_event_all(&privdata->ptr_process_info, 2, 1);
	           	advOutp( privdata, 9,  0 );           // disable interrupt
	           	advOutp( privdata, 6,  0 );           // disable interrupt
			advOutp(privdata,8,0);
		//	privdata->ai_stop = 1;
		} 
		adv_process_info_set_event_all(&privdata->ptr_process_info, 1, 1);
		     
		//printk("user buf 1024 :%x\n",privdata->user_buf[1024]);
		//printk("user buf 1025 :%x\n",privdata->user_buf[1025]);
		//printk("user buf 2026 :%x\n",privdata->user_buf[1026]);
		
		if (privdata->half_ready == 1) {
			privdata->overrun_flag = 1;
			adv_process_info_set_event_all(&privdata->ptr_process_info,
						       3,
						       1);
		}
	}

	wake_up_interruptible(&privdata->event_wait);
 }
/*******************************************************************
*  FUNCTION: read_citty_buffer
*
*  DESCRIPTION: To read the data from the CINET buffer (either Rx or Tx)
*
*	Note: don't need to pass in count (size), since the buffer should know
*			the length of the data
*
*  RETURNS: utlFAILED or utlSUCCESS
*
*******************************************************************/
size_t read_citty_buffer(char *buf, struct buf_struct * cittyBuf, short action )
{
	size_t count;
	unsigned char *pbuf;
	struct semaphore * lSem;
	size_t retval = 0;
	int curBufIndex;

	lSem = &(cittyBuf->gSem);
	F_ENTER();

	/* Enter critial section */
	if (down_interruptible(lSem) )
		return -ERESTARTSYS;


	while ( cittyBuf->iBufIn == cittyBuf->iBufOut )
	{
		up( lSem ); /* release the lock */

		//if (filp->f_flags & O_NONBLOCK)
		//	return -EAGAIN;

		PDEBUG("\"%s\" reading: going to sleep with action %d", current->comm, action);

		if (wait_event_interruptible(cittyBuf->gInq, (cittyBuf->iBufIn != cittyBuf->iBufOut)))
		{
			//printk("waiting_event_interruptible is interrupted.\n");

			/* now waken up by signal, get the lock and process it */
			if (down_interruptible(lSem) )
			{
				printk("Error down_interruptible.\n");
				return -ERESTARTSYS;
			}
			break;
			/* coment out: to avoid crash, signal is waken up?*/
			//return -ERESTARTSYS; /* signal: tell the fs layer to handle it */

		}
		/* otherwise loop, but first reacquire the lock */
		if (down_interruptible( lSem ))
		{
			printk("Error down_interruptible.\n");
			return -ERESTARTSYS;
		}

	}

	/* Double check */
	if (cittyBuf->iBufIn == cittyBuf->iBufOut )
	{
		up( lSem ); /* release the lock */
		//return -ERESTARTSYS;
		return 0;
	}
	PDEBUG( "There is something to read!" );

	curBufIndex = cittyBuf->iBufOut++;
	pbuf = cittyBuf->pBuf[ curBufIndex  ];

	/*
	 *  Check if it is flipped
	 */
	if ( cittyBuf->iBufOut >= NUM_CITTY_BUF)
	{
		cittyBuf->iBufOut = cittyBuf->iBufOut % NUM_CITTY_BUF;
	}

	/* just to make sure */
	if ( pbuf == NULL )
	{
		printk("Nothing to read.\n");
		retval = 0;
		up( lSem );
		//return EIO;
		return 0;
	}

	/* read only up to the size of data or the buffer */
	count = cittyBuf->iDatalen[curBufIndex];
	if ( count > CITTY_BUF_SIZE )
		count = CITTY_BUF_SIZE;


	//#if 0
	if ( action == COPY_TO_USER )
	{
		PDEBUG( "read_citty_buffer: copy to user with count: %d and buf index %d", count, curBufIndex );

		if (copy_to_user(buf, pbuf, count))
		{
			up( lSem );
			printk("read_citty_buffer: Copy to User failed.\n");
			return -EFAULT;
		}
	}
	else if (action == COPY_TO_CITTY )
	{

		PDEBUG("read_citty_buffer: This shouldn't be called in CI TTY");
	}

	retval = count;


	/* exit critical section */
	up( lSem );

	/* finally, awake any writers and return */
	wake_up_interruptible( &cittyBuf->gOutq );
	PDEBUG("\"%s\" did read %li bytes", current->comm, (long)count);

	//#endif

	F_LEAVE();

	return retval;

}
size_t write_citty_buffer(struct buf_struct * cittyBuf,
			  const char *buf, size_t count, short action )
{
	unsigned char *pbuf;
	struct semaphore * lSem;
	int curBufIndex;

	DEFINE_WAIT(wait);

	F_ENTER();

	/* make it a non-blocking write*/
	if (spacefree( cittyBuf ) == 0 )
	{
		printk("\"%s\" warning: Write Buffer overflow.\n", current->comm);
		return -EIO;
	}

	lSem = &(cittyBuf->gSem);

	if (down_interruptible( lSem ))
	{
		printk("\"%s\" Error: Unable to down SEM.\n", current->comm);
		return -ERESTARTSYS;
	}

	/* Make sure there's space to write */
	while (spacefree( cittyBuf ) == 0)   /* full */

	{
		PDEBUG("\"%s\" Going to define wait:", current->comm );


		up( lSem );     /* release the lock */

		//if (filp->f_flags & O_NONBLOCK)
		//	return -EAGAIN;

		PDEBUG("\"%s\" writing: going to sleep", current->comm);
		prepare_to_wait(&cittyBuf->gOutq, &wait, TASK_INTERRUPTIBLE);

		if (spacefree( cittyBuf ) == 0)
		{
			schedule(); /* seem like it is bad: scheduling while atomic */
		}
		finish_wait(&cittyBuf->gOutq, &wait);
		if (signal_pending(current))
		{
			printk("\"%s\" Error: Unable to signal_pending.\n", current->comm);
			return -ERESTARTSYS; /* signal: tell the fs layer to handle it */
		}
		if (down_interruptible( lSem ))
		{
			printk("\"%s\" Error: Unable to down SEM.\n", current->comm);
			return -ERESTARTSYS;
		}
	}

	curBufIndex = cittyBuf->iBufIn++;
	pbuf = cittyBuf->pBuf[ curBufIndex ];

	PDEBUG("\"%s\" Going to check flip", current->comm );
	/*
	 *  Check if it is flipped
	 */
	if ( cittyBuf->iBufIn >= NUM_CITTY_BUF)
	{
		cittyBuf->iBufIn = cittyBuf->iBufIn % NUM_CITTY_BUF;
	}

	/* Check space */
	if (pbuf == NULL)
	{
		printk("warning: Buffer overflowed.\n");
		up( lSem );
		return EIO;
	}

	/* ok, space is there, accept something */
	/* write only up to the size of the buffer */
	if (count > CITTY_BUF_SIZE)
	{
		count = CITTY_BUF_SIZE;
		printk("warning: Buffer too size to write.\n");
	}

	if ( action == COPY_FROM_USER )
	{
		PDEBUG("write_citty_buffer: going to copy_from_user at buf index %d and count %d", curBufIndex, count);
		if (copy_from_user((pbuf), buf, count ))
		{
			up( lSem );
			return -EFAULT;
		}
	}
	else if ( action == COPY_FROM_CITTY)
	{
		/* it is from the cinet_hard_start_xmit */
		PDEBUG("write_citty_buffer: going to COPY_FROM_CITTY at buf index %d and count %d", curBufIndex, count);
		memcpy(pbuf, buf, count);

	}
	else
	{
		printk("undefined action.\n");
	}

	/* saving datalen */
	cittyBuf->iDatalen[ curBufIndex ] = count;

	up( lSem );

	/* finally, awake any reader */
	wake_up_interruptible(&cittyBuf->gInq);  /* blocked in read() and select() */

	F_LEAVE();

	return count;
}
static void gs_deinit_port(int port_num)
{
	struct gs_port *port;
	struct gserial	*gser;

    pr_info("Davis: enter gs_deinit_port port_num %d\n", port_num);
	if (!gs_tty_driver || port_num >= n_ports)
		return;

    tty_port_relay_unregister_gs(gs_tty_driver);

	/* we "know" gserial_cleanup() hasn't been called */
	port = ports[port_num].port;
	
	spin_lock_irq(&port->port_lock);

	if (port->open_count != 1) {
		if (port->open_count == 0)
			WARN_ON(1);
		else
			--port->open_count;
		goto exit;
	}

	pr_info("gs_deinit_port: ttyGS%d ...\n", port->port_num);

	/* mark port as closing but in use; we can drop port lock
	 * and sleep if necessary
	 */
	port->openclose = true;
	port->open_count = 0;

	gser = port->port_usb;
	//if (gser && gser->disconnect) Davis: temp
	//	gser->disconnect(gser); Davis: temp

	/* wait for circular write buffer to drain, disconnect, or at
	 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
	 */
	if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
		spin_unlock_irq(&port->port_lock);
		wait_event_interruptible_timeout(port->drain_wait,
					gs_writes_finished(port),
					GS_CLOSE_TIMEOUT * HZ);
		spin_lock_irq(&port->port_lock);
		gser = port->port_usb;
	}

	/* Iff we're disconnected, there can be no I/O in flight so it's
	 * ok to free the circular buffer; else just scrub it.  And don't
	 * let the push tasklet fire again until we're re-opened.
	 */
	if (gser == NULL)
		gs_buf_free(&port->port_write_buf);
	else
		gs_buf_clear(&port->port_write_buf);

	//tty->driver_data = NULL;Davis: temp
	port->port_tty = NULL;

	port->openclose = false;

	pr_debug("gs_deinit_port: ttyGS%d done!\n",
			port->port_num);

	wake_up_interruptible(&port->close_wait);
exit:
	spin_unlock_irq(&port->port_lock);
}
Exemple #11
0
/*{{{  MonitorRecordEvent*/
void MonitorRecordEvent        (struct DeviceContext_s*         Context,
                                unsigned int                    SourceId,
                                monitor_event_code_t            EventCode,
                                unsigned long long              TimeStamp,
                                unsigned int                    Parameters[MONITOR_PARAMETER_COUNT],
                                const char*                     Description)
{
    struct EventQueue_s*                EventList;
    unsigned int                        Next;
    unsigned int                        EventReceived   = false;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
    unsigned int                        Flags;
#else
    unsigned long                       Flags;
#endif
    struct EventRecord_s*               EventRecord;
    struct EventValue_s*                StoredEvent;

    StoredEvent                 = &(Context->StoredEventValues[EventCode & MONITOR_EVENT_INDEX_MASK]);

    if (Parameters)
        memcpy (StoredEvent->Parameters, Parameters, sizeof(Parameters));
    StoredEvent->Count++;

    if ((EventCode & MONITOR_EVENT_REPORT_ON_REQUEST) != 0)
        return;

    if ((Context->Status.subsystem_mask & EventCode) == 0)
        return;

    EventList                   = &Context->EventQueue;

    spin_lock_irqsave (&EventList->Lock, Flags);

    Next                        = (EventList->Write + 1) % MAX_MONITOR_EVENT;
    if (Next == EventList->Read)
    {
        EventList->LostCount++;
        EventList->Read         = (EventList->Read + 1) % MAX_MONITOR_EVENT;
    }

    EventRecord                 = &(EventList->Event[EventList->Write]);
    if (Parameters)
        EventRecord->RecordLength       = snprintf ((char*)&(EventRecord->EventString), MONITOR_EVENT_RECORD_SIZE,
                                          "%s at %lluus source %d (0x%x, 0x%x, 0x%x, 0x%x) \"%s\"\n",
                                          EventName(EventCode), TimeStamp, SourceId,
                                          Parameters[0], Parameters[1], Parameters[2], Parameters[3], Description);
    else
        EventRecord->RecordLength       = snprintf ((char*)&(EventRecord->EventString), MONITOR_EVENT_RECORD_SIZE,
                                          "%s at %lluus source %d \"%s\"\n",
                                          EventName(EventCode), TimeStamp, SourceId, Description);
    EventRecord->EventString[MONITOR_EVENT_RECORD_SIZE-1]       = '\n';
    if (EventRecord->RecordLength > MONITOR_EVENT_RECORD_SIZE)
        EventRecord->RecordLength       = MONITOR_EVENT_RECORD_SIZE;

    EventList->Write                    = Next;
    EventReceived                       = true;

    spin_unlock_irqrestore (&EventList->Lock, Flags);

    /*MONITOR_DEBUG ("Write:%d, Read:%d: Message:%s\n", EventList->Write, EventList->Read, (char*)(EventRecord->EventString));*/

    if (EventReceived)
        wake_up_interruptible (&EventList->EventReceived);

}
Exemple #12
0
int diag_device_write(void *buf, int proc_num, struct diag_request *write_ptr)
{
	int i, err = 0;

	if (driver->logging_mode == MEMORY_DEVICE_MODE) {
		if (proc_num == APPS_DATA) {
			for (i = 0; i < driver->poolsize_write_struct; i++)
				if (driver->buf_tbl[i].length == 0) {
					driver->buf_tbl[i].buf = buf;
					driver->buf_tbl[i].length =
								 driver->used;
#ifdef DIAG_DEBUG
					printk(KERN_INFO "\n ENQUEUE buf ptr"
						   " and length is %x , %d\n",
						   (unsigned int)(driver->buf_
				tbl[i].buf), driver->buf_tbl[i].length);
#endif
					break;
				}
		}
		for (i = 0; i < driver->num_clients; i++)
			if (driver->client_map[i].pid ==
						 driver->logging_process_id)
				break;
		if (i < driver->num_clients) {
			driver->data_ready[i] |= MEMORY_DEVICE_LOG_TYPE;
			wake_up_interruptible(&driver->wait_q);
		} else
			return -EINVAL;
	} else if (driver->logging_mode == NO_LOGGING_MODE) {
		if (proc_num == MODEM_DATA) {
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			queue_work(driver->diag_wq, &(driver->
							diag_read_smd_work));
		} else if (proc_num == QDSP_DATA) {
			driver->in_busy_qdsp_1 = 0;
			driver->in_busy_qdsp_2 = 0;
			queue_work(driver->diag_wq, &(driver->
						diag_read_smd_qdsp_work));
		}
		err = -1;
	}
#ifdef CONFIG_DIAG_OVER_USB
	else if (driver->logging_mode == USB_MODE) {
		if (proc_num == APPS_DATA) {
			driver->write_ptr_svc = (struct diag_request *)
			(diagmem_alloc(driver, sizeof(struct diag_request),
				 POOL_TYPE_WRITE_STRUCT));
			if (driver->write_ptr_svc) {
				driver->write_ptr_svc->length = driver->used;
				driver->write_ptr_svc->buf = buf;
				err = usb_diag_write(driver->legacy_ch,
						driver->write_ptr_svc);
			} else
				err = -1;
		} else if (proc_num == MODEM_DATA) {
			write_ptr->buf = buf;
#ifdef DIAG_DEBUG
			printk(KERN_INFO "writing data to USB,"
				"pkt length %d\n", write_ptr->length);
			print_hex_dump(KERN_DEBUG, "Written Packet Data to"
					   " USB: ", 16, 1, DUMP_PREFIX_ADDRESS,
					    buf, write_ptr->length, 1);
#endif /* DIAG DEBUG */
			err = usb_diag_write(driver->legacy_ch, write_ptr);
		} else if (proc_num == QDSP_DATA) {
			write_ptr->buf = buf;
			err = usb_diag_write(driver->legacy_ch, write_ptr);
		}
#ifdef CONFIG_DIAG_SDIO_PIPE
		else if (proc_num == SDIO_DATA) {
			if (machine_is_msm8x60_charm_surf() ||
					machine_is_msm8x60_charm_ffa() ||
					machine_is_p5_lte() ||
					machine_is_p8_lte() ||
					machine_is_p4_lte() ) {
				write_ptr->buf = buf;
				err = usb_diag_write(driver->mdm_ch, write_ptr);
			} else
				pr_err("diag: Incorrect data while USB write");
		}
#endif
		APPEND_DEBUG('d');
	}
#endif /* DIAG OVER USB */
    return err;
}
/*
 * iowarrior_write
 */
static ssize_t iowarrior_write(struct file *file,
			       const char __user *user_buffer,
			       size_t count, loff_t *ppos)
{
	struct iowarrior *dev;
	int retval = 0;
	char *buf = NULL;	/* for IOW24 and IOW56 we need a buffer */
	struct urb *int_out_urb = NULL;

	dev = file->private_data;

	mutex_lock(&dev->mutex);
	/* verify that the device wasn't unplugged */
	if (!dev->present) {
		retval = -ENODEV;
		goto exit;
	}
	dbg("%s - minor %d, count = %zd", __func__, dev->minor, count);
	/* if count is 0 we're already done */
	if (count == 0) {
		retval = 0;
		goto exit;
	}
	/* We only accept full reports */
	if (count != dev->report_size) {
		retval = -EINVAL;
		goto exit;
	}
	switch (dev->product_id) {
	case USB_DEVICE_ID_CODEMERCS_IOW24:
	case USB_DEVICE_ID_CODEMERCS_IOWPV1:
	case USB_DEVICE_ID_CODEMERCS_IOWPV2:
	case USB_DEVICE_ID_CODEMERCS_IOW40:
		/* IOW24 and IOW40 use a synchronous call */
		buf = kmalloc(count, GFP_KERNEL);
		if (!buf) {
			retval = -ENOMEM;
			goto exit;
		}
		if (copy_from_user(buf, user_buffer, count)) {
			retval = -EFAULT;
			kfree(buf);
			goto exit;
		}
		retval = usb_set_report(dev->interface, 2, 0, buf, count);
		kfree(buf);
		goto exit;
		break;
	case USB_DEVICE_ID_CODEMERCS_IOW56:
		/* The IOW56 uses asynchronous IO and more urbs */
		if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) {
			/* Wait until we are below the limit for submitted urbs */
			if (file->f_flags & O_NONBLOCK) {
				retval = -EAGAIN;
				goto exit;
			} else {
				retval = wait_event_interruptible(dev->write_wait,
								  (!dev->present || (atomic_read (&dev-> write_busy) < MAX_WRITES_IN_FLIGHT)));
				if (retval) {
					/* we were interrupted by a signal */
					retval = -ERESTART;
					goto exit;
				}
				if (!dev->present) {
					/* The device was unplugged */
					retval = -ENODEV;
					goto exit;
				}
				if (!dev->opened) {
					/* We were closed while waiting for an URB */
					retval = -ENODEV;
					goto exit;
				}
			}
		}
		atomic_inc(&dev->write_busy);
		int_out_urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!int_out_urb) {
			retval = -ENOMEM;
			dbg("%s Unable to allocate urb ", __func__);
			goto error_no_urb;
		}
		buf = usb_alloc_coherent(dev->udev, dev->report_size,
					 GFP_KERNEL, &int_out_urb->transfer_dma);
		if (!buf) {
			retval = -ENOMEM;
			dbg("%s Unable to allocate buffer ", __func__);
			goto error_no_buffer;
		}
		usb_fill_int_urb(int_out_urb, dev->udev,
				 usb_sndintpipe(dev->udev,
						dev->int_out_endpoint->bEndpointAddress),
				 buf, dev->report_size,
				 iowarrior_write_callback, dev,
				 dev->int_out_endpoint->bInterval);
		int_out_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
		if (copy_from_user(buf, user_buffer, count)) {
			retval = -EFAULT;
			goto error;
		}
		retval = usb_submit_urb(int_out_urb, GFP_KERNEL);
		if (retval) {
			dbg("%s submit error %d for urb nr.%d", __func__,
			    retval, atomic_read(&dev->write_busy));
			goto error;
		}
		/* submit was ok */
		retval = count;
		usb_free_urb(int_out_urb);
		goto exit;
		break;
	default:
		/* what do we have here ? An unsupported Product-ID ? */
		dev_err(&dev->interface->dev, "%s - not supported for product=0x%x\n",
			__func__, dev->product_id);
		retval = -EFAULT;
		goto exit;
		break;
	}
error:
	usb_free_coherent(dev->udev, dev->report_size, buf,
			  int_out_urb->transfer_dma);
error_no_buffer:
	usb_free_urb(int_out_urb);
error_no_urb:
	atomic_dec(&dev->write_busy);
	wake_up_interruptible(&dev->write_wait);
exit:
	mutex_unlock(&dev->mutex);
	return retval;
}
/*
 * USB callback handler for reading data
 */
static void iowarrior_callback(struct urb *urb)
{
	struct iowarrior *dev = urb->context;
	int intr_idx;
	int read_idx;
	int aux_idx;
	int offset;
	int status = urb->status;
	int retval;

	switch (status) {
	case 0:
		/* success */
		break;
	case -ECONNRESET:
	case -ENOENT:
	case -ESHUTDOWN:
		return;
	default:
		goto exit;
	}

	spin_lock(&dev->intr_idx_lock);
	intr_idx = atomic_read(&dev->intr_idx);
	/* aux_idx become previous intr_idx */
	aux_idx = (intr_idx == 0) ? (MAX_INTERRUPT_BUFFER - 1) : (intr_idx - 1);
	read_idx = atomic_read(&dev->read_idx);

	/* queue is not empty and it's interface 0 */
	if ((intr_idx != read_idx)
	    && (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0)) {
		/* + 1 for serial number */
		offset = aux_idx * (dev->report_size + 1);
		if (!memcmp
		    (dev->read_queue + offset, urb->transfer_buffer,
		     dev->report_size)) {
			/* equal values on interface 0 will be ignored */
			spin_unlock(&dev->intr_idx_lock);
			goto exit;
		}
	}

	/* aux_idx become next intr_idx */
	aux_idx = (intr_idx == (MAX_INTERRUPT_BUFFER - 1)) ? 0 : (intr_idx + 1);
	if (read_idx == aux_idx) {
		/* queue full, dropping oldest input */
		read_idx = (++read_idx == MAX_INTERRUPT_BUFFER) ? 0 : read_idx;
		atomic_set(&dev->read_idx, read_idx);
		atomic_set(&dev->overflow_flag, 1);
	}

	/* +1 for serial number */
	offset = intr_idx * (dev->report_size + 1);
	memcpy(dev->read_queue + offset, urb->transfer_buffer,
	       dev->report_size);
	*(dev->read_queue + offset + (dev->report_size)) = dev->serial_number++;

	atomic_set(&dev->intr_idx, aux_idx);
	spin_unlock(&dev->intr_idx_lock);
	/* tell the blocking read about the new data */
	wake_up_interruptible(&dev->read_wait);

exit:
	retval = usb_submit_urb(urb, GFP_ATOMIC);
	if (retval)
		dev_err(&dev->interface->dev, "%s - usb_submit_urb failed with result %d\n",
			__func__, retval);

}
Exemple #15
0
static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
{
	struct socket *sock = sk->sk_socket;
	struct tipc_msg *msg = buf_msg(buf);
	u32 recv_q_len;

	/* Reject message if it is wrong sort of message for socket */

	/*
	 * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
	 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
	 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
	 */

	if (sock->state == SS_READY) {
		if (msg_connected(msg)) {
			msg_dbg(msg, "dispatch filter 1\n");
			return TIPC_ERR_NO_PORT;
		}
	} else {
		if (msg_mcast(msg)) {
			msg_dbg(msg, "dispatch filter 2\n");
			return TIPC_ERR_NO_PORT;
		}
		if (sock->state == SS_CONNECTED) {
			if (!msg_connected(msg)) {
				msg_dbg(msg, "dispatch filter 3\n");
				return TIPC_ERR_NO_PORT;
			}
		}
		else if (sock->state == SS_CONNECTING) {
			if (!msg_connected(msg) && (msg_errcode(msg) == 0)) {
				msg_dbg(msg, "dispatch filter 4\n");
				return TIPC_ERR_NO_PORT;
			}
		}
		else if (sock->state == SS_LISTENING) {
			if (msg_connected(msg) || msg_errcode(msg)) {
				msg_dbg(msg, "dispatch filter 5\n");
				return TIPC_ERR_NO_PORT;
			}
		}
		else if (sock->state == SS_DISCONNECTING) {
			msg_dbg(msg, "dispatch filter 6\n");
			return TIPC_ERR_NO_PORT;
		}
		else /* (sock->state == SS_UNCONNECTED) */ {
			if (msg_connected(msg) || msg_errcode(msg)) {
				msg_dbg(msg, "dispatch filter 7\n");
				return TIPC_ERR_NO_PORT;
			}
		}
	}

	/* Reject message if there isn't room to queue it */

	recv_q_len = (u32)atomic_read(&tipc_queue_size);
	if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
		if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
			return TIPC_ERR_OVERLOAD;
	}
	recv_q_len = skb_queue_len(&sk->sk_receive_queue);
	if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) {
		if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
			return TIPC_ERR_OVERLOAD;
	}

	/* Enqueue message (finally!) */

	msg_dbg(msg, "<DISP<: ");
	TIPC_SKB_CB(buf)->handle = msg_data(msg);
	atomic_inc(&tipc_queue_size);
	__skb_queue_tail(&sk->sk_receive_queue, buf);

	/* Initiate connection termination for an incoming 'FIN' */

	if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
		sock->state = SS_DISCONNECTING;
		tipc_disconnect_port(tipc_sk_port(sk));
	}

	if (waitqueue_active(sk->sk_sleep))
		wake_up_interruptible(sk->sk_sleep);
	return TIPC_OK;
}
/*****************************************************************************
* btwaln_em_ioctl
*****************************************************************************/
static int btwlan_em_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
    BTWLAN_EM_DEBUG("btwlan_em_ioctl ++\n");
    
    if(!pbtwlan_em)
    {
        BTWLAN_EM_ALERT("btwlan_em_ioctl failed get valid struct\n");
        return -EFAULT;
    }
    
    switch(cmd)
    {
        case BTWLAN_EM_IOCTL_SET_BTPWR:
        {
            unsigned long btpwr = 0;
            if (copy_from_user(&btpwr, (void*)arg, sizeof(unsigned long)))
                return -EFAULT;
                
            BTWLAN_EM_DEBUG("BTWLAN_EM_IOCTL_SET_BTPWR:%d\n", (int)btpwr);
            
            mutex_lock(&pbtwlan_em->sem);
            if (btpwr){
                mt_bt_power_on();
            }
            else{
                mt_bt_power_off();
            }
            mutex_unlock(&pbtwlan_em->sem);
            
            break;
        }
        case BTWLAN_EM_IOCTL_SET_WIFIPWR:
        {
            unsigned long wifipwr = 0;
            if (copy_from_user(&wifipwr, (void*)arg, sizeof(unsigned long)))
                return -EFAULT;
                
            BTWLAN_EM_DEBUG("BTWLAN_EM_IOCTL_SET_WIFIPWR:%d\n", (int)wifipwr);
            
            mutex_lock(&pbtwlan_em->sem);
            if (wifipwr){
                mt_wifi_power_on();
            }
            else{
                mt_wifi_power_off();
            }
            mutex_unlock(&pbtwlan_em->sem);
            
            break;
        }
        case BT_IOCTL_SET_EINT:
        {
            unsigned long bt_eint = 0;
            if (copy_from_user(&bt_eint, (void*)arg, sizeof(unsigned long)))
                return -EFAULT;
                
            BTWLAN_EM_DEBUG("BT_IOCTL_SET_EINT:%d\n", bt_eint);
            
            mutex_lock(&pbtwlan_em->sem);
            if (bt_eint){
                mt65xx_eint_unmask(CUST_EINT_BT_NUM);
                BTWLAN_EM_DEBUG("Set enable BT EINT\n");
            }
            else{
                mt65xx_eint_mask(CUST_EINT_BT_NUM);
                BTWLAN_EM_DEBUG("Set disable BT EINT\n");
                eint_mask = 1;
                wake_up_interruptible(&eint_wait);
            }
            mutex_unlock(&pbtwlan_em->sem);
            break;
        }
        
        default:
            BTWLAN_EM_ALERT("btwlan_em_ioctl not support\n");
            return -EPERM;
    }
    
    BTWLAN_EM_DEBUG("btwlan_em_ioctl --\n");
    return 0;
}
Exemple #17
0
static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
{
	struct tipc_msg *msg = buf_msg(buf);
	struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
	struct socket *sock;
	u32 recv_q_len;

	/* Reject message if socket is closing */

	if (!tsock)
		return TIPC_ERR_NO_PORT;

	/* Reject message if it is wrong sort of message for socket */

	/*
	 * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
	 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
	 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
	 */
	sock = tsock->sk.sk_socket;
	if (sock->state == SS_READY) {
		if (msg_connected(msg)) {
			msg_dbg(msg, "dispatch filter 1\n");
			return TIPC_ERR_NO_PORT;
		}
	} else {
		if (msg_mcast(msg)) {
			msg_dbg(msg, "dispatch filter 2\n");
			return TIPC_ERR_NO_PORT;
		}
		if (sock->state == SS_CONNECTED) {
			if (!msg_connected(msg)) {
				msg_dbg(msg, "dispatch filter 3\n");
				return TIPC_ERR_NO_PORT;
			}
		}
		else if (sock->state == SS_CONNECTING) {
			if (!msg_connected(msg) && (msg_errcode(msg) == 0)) {
				msg_dbg(msg, "dispatch filter 4\n");
				return TIPC_ERR_NO_PORT;
			}
		}
		else if (sock->state == SS_LISTENING) {
			if (msg_connected(msg) || msg_errcode(msg)) {
				msg_dbg(msg, "dispatch filter 5\n");
				return TIPC_ERR_NO_PORT;
			}
		}
		else if (sock->state == SS_DISCONNECTING) {
			msg_dbg(msg, "dispatch filter 6\n");
			return TIPC_ERR_NO_PORT;
		}
		else /* (sock->state == SS_UNCONNECTED) */ {
			if (msg_connected(msg) || msg_errcode(msg)) {
				msg_dbg(msg, "dispatch filter 7\n");
				return TIPC_ERR_NO_PORT;
			}
		}
	}

	/* Reject message if there isn't room to queue it */

	if (unlikely((u32)atomic_read(&tipc_queue_size) >
		     OVERLOAD_LIMIT_BASE)) {
		if (queue_overloaded(atomic_read(&tipc_queue_size),
				     OVERLOAD_LIMIT_BASE, msg))
			return TIPC_ERR_OVERLOAD;
	}
	recv_q_len = skb_queue_len(&tsock->sk.sk_receive_queue);
	if (unlikely(recv_q_len > (OVERLOAD_LIMIT_BASE / 2))) {
		if (queue_overloaded(recv_q_len,
				     OVERLOAD_LIMIT_BASE / 2, msg))
			return TIPC_ERR_OVERLOAD;
	}

	/* Initiate connection termination for an incoming 'FIN' */

	if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
		sock->state = SS_DISCONNECTING;
		/* Note: Use signal since port lock is already taken! */
		tipc_k_signal((Handler)async_disconnect, tport->ref);
	}

	/* Enqueue message (finally!) */

	msg_dbg(msg,"<DISP<: ");
	TIPC_SKB_CB(buf)->handle = msg_data(msg);
	atomic_inc(&tipc_queue_size);
	skb_queue_tail(&sock->sk->sk_receive_queue, buf);

	if (waitqueue_active(sock->sk->sk_sleep))
		wake_up_interruptible(sock->sk->sk_sleep);
	return TIPC_OK;
}
Exemple #18
0
void gs_close(struct tty_struct * tty, struct file * filp)
{
	unsigned long flags;
	struct gs_port *port;
	
	func_enter ();

	if (!tty) return;

	port = (struct gs_port *) tty->driver_data;

	if (!port) return;

	if (!port->tty) {
		/* This seems to happen when this is called from vhangup. */
		gs_dprintk (GS_DEBUG_CLOSE, "gs: Odd: port->tty is NULL\n");
		port->tty = tty;
	}

	save_flags(flags); cli();

	if (tty_hung_up_p(filp)) {
		restore_flags(flags);
		port->rd->hungup (port);
		func_exit ();
		return;
	}

	if ((tty->count == 1) && (port->count != 1)) {
		printk(KERN_ERR "gs: gs_close: bad port count;"
		       " tty->count is 1, port count is %d\n", port->count);
		port->count = 1;
	}
	if (--port->count < 0) {
		printk(KERN_ERR "gs: gs_close: bad port count: %d\n", port->count);
		port->count = 0;
	}
	if (port->count) {
		gs_dprintk(GS_DEBUG_CLOSE, "gs_close: count: %d\n", port->count);
		restore_flags(flags);
		func_exit ();
		return;
	}
	port->flags |= ASYNC_CLOSING;

	/*
	 * Now we wait for the transmit buffer to clear; and we notify 
	 * the line discipline to only process XON/XOFF characters.
	 */
	tty->closing = 1;
	/* if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
	   tty_wait_until_sent(tty, port->closing_wait); */

	/*
	 * At this point we stop accepting input.  To do this, we
	 * disable the receive line status interrupts, and tell the
	 * interrupt driver to stop checking the data ready bit in the
	 * line status register.
	 */

	port->rd->disable_rx_interrupts (port);

	/* close has no way of returning "EINTR", so discard return value */
	if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
		gs_wait_tx_flushed (port, port->closing_wait); 

	port->flags &= ~GS_ACTIVE;

	if (tty->driver->flush_buffer)
		tty->driver->flush_buffer(tty);
		
	tty_ldisc_flush(tty);
	tty->closing = 0;

	port->event = 0;
	port->rd->close (port);
	port->rd->shutdown_port (port);
	port->tty = NULL;

	if (port->blocked_open) {
		if (port->close_delay) {
			set_current_state (TASK_INTERRUPTIBLE);
			schedule_timeout(port->close_delay);
		}
		wake_up_interruptible(&port->open_wait);
	}
	port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING | ASYNC_INITIALIZED);
	wake_up_interruptible(&port->close_wait);

	restore_flags(flags);
	func_exit ();
}
Exemple #19
0
/**
 * pcm3718_dma_isr - interrupt service routine for DMA
 *                   data acquisition
 *
 * ptr: point to the private data of device object
 */
static void pcm3718_dma_isr(private_data *ptr)
{
	unsigned long ret,flags,i;
	private_data *privdata = ptr;
	adv_user_page *page = NULL;
	INT16U tmp;
	/* recieve data */
//while(advInp(privdata,8)&0x80) ;
//while(!advInp(privdata,8)&0x10) ;
	i = 0;
			privdata->item = 0;
			privdata->page_index=0;
//memset(privdata->user_buf,0,privdata->hwdmasize[0]);	

	//printk("----------%x convert %x\n",advInp(privdata,8)&0x10,privdata->hwdmasize[0]); 
   	advOutp( privdata,  0x08,  0 );   // clear interrupt request
   	advOutp( privdata,  0x19,  0 );   // clear interrupt request

	do {
		page = privdata->user_pages + privdata->page_index;
		if (privdata->item >= page->length) {
			privdata->page_index++;
			privdata->item = 0;
		}
	
		privdata->page_index %= privdata->page_num;
		privdata->cur_index %= privdata->conv_num;
		i++;
	
		page = privdata->user_pages + privdata->page_index;

		
        	// read data
        	tmp = privdata->dmabuf[privdata->cur_index];               
       
		memcpy((INT16U *) (page->page_addr + page->offset + privdata->item),
		       &tmp, sizeof(INT16U));
		
		//if(tmp&0x01!=1 ) {
		if(i<10 ) {
	//		printk("i :%x advin %x\n",i,tmp);
	//		printk("i :%x advin %x\n",i,advInp(privdata,1));
	//		printk("userbuf : %x\n",privdata->user_buf[i]);
	//		printk("hwuserbuf :%x\n",privdata->hwdmaptr[i]);

		}
		privdata->item += 2;
		privdata->cur_index++;
	} while (privdata->cur_index < privdata->conv_num);

//	memcpy(privdata->user_buf,privdata->dmabuf,privdata->hwdmasize[0]);
//memset(privdata->dmabuf,0,privdata->hwdmasize[0]);	
	///	printk("user buf 0 :%x\n",privdata->user_buf[0]);
	///	printk("user-1 buf %d :%x\n",i-1,privdata->user_buf[i-1]);
	///	printk("user buf %d :%x\n",i,privdata->user_buf[i]);
	//	printk("user buf 20 :%x\n",privdata->user_buf[20]);
	///	printk("cur_index:%d conv_num/2:%d tmp:%x\n",privdata->cur_index,privdata->conv_num/2,tmp);
		//printk("page index:%x page num:%x\n",privdata->page_index,privdata->page_num);

	if (!privdata->buf_stat) {
		privdata->cur_index = privdata->conv_num / 2;
		privdata->half_ready = 1;

		adv_process_info_set_event_all(&privdata->ptr_process_info, 0, 1);
	} else {
		privdata->cur_index = privdata->conv_num;
		privdata->half_ready = 2;
		privdata->trans += privdata->conv_num;
			adv_process_info_set_event_all(&privdata->ptr_process_info, 1, 1);

		if (!privdata->cyclic) { /* terminate */
			adv_process_info_set_event_all(&privdata->ptr_process_info, 2, 1);
	           	advOutp( privdata, 9,  0 );           // disable interrupt
	           	advOutp( privdata, 6,  0 );           // disable interrupt
			advOutp(privdata,8,0);
		//	privdata->ai_stop = 1;
		} else {	/* buffer change */

			if (privdata->overrun_flag==1) { /* overrun */
				adv_process_info_set_event_all(&privdata->ptr_process_info,
							       3,
							       1);
			} 
			privdata->overrun_flag = 1;
			
		}
	}

	if (privdata->cur_index == privdata->int_cnt) { /* interrupt count */
		//adv_process_info_set_event_all(&privdata->ptr_process_info, 0, 1);
	}
//	for(i=0;i<privdata->conv_num*sizeof(INT16U);i++){
//		privdata->user_buf[i] = privdata->dmabuf[i];
//	}

	if(privdata->cyclic){

        flags=claim_dma_lock();
        disable_dma(privdata->ioDMAbase);
	clear_dma_ff(privdata->ioDMAbase);
        set_dma_mode(privdata->ioDMAbase, DMA_MODE_READ);
        //set_dma_mode(privdata->ioDMAbase, DMA_MODE_READ|DMA_AUTOINIT);
        set_dma_addr(privdata->ioDMAbase, privdata->hwdmaptr);
        set_dma_count(privdata->ioDMAbase, privdata->hwdmasize[0]);
	ret = get_dma_residue(privdata->ioDMAbase);
        release_dma_lock(flags);
	}

        enable_dma(privdata->ioDMAbase);
	wake_up_interruptible(&privdata->event_wait);
	
	//privdata->buf_stat = !privdata->buf_stat;
}
Exemple #20
0
void gs_set_termios (struct tty_struct * tty, 
                     struct termios * old_termios)
{
	struct gs_port *port;
	int baudrate, tmp, rv;
	struct termios *tiosp;

	func_enter();

	if (!tty) return;

	port = tty->driver_data;

	if (!port) return;

	tiosp = tty->termios;

	if (gs_debug & GS_DEBUG_TERMIOS) {
		gs_dprintk (GS_DEBUG_TERMIOS, "termios structure (%p):\n", tiosp);
	}

#if 0
	/* This is an optimization that is only allowed for dumb cards */
	/* Smart cards require knowledge of iflags and oflags too: that 
	   might change hardware cooking mode.... */
#endif
	if (old_termios) {
		if(   (tiosp->c_iflag == old_termios->c_iflag)
		   && (tiosp->c_oflag == old_termios->c_oflag)
		   && (tiosp->c_cflag == old_termios->c_cflag)
		   && (tiosp->c_lflag == old_termios->c_lflag)
		   && (tiosp->c_line  == old_termios->c_line)
		   && (memcmp(tiosp->c_cc, old_termios->c_cc, NCC) == 0)) {
			gs_dprintk(GS_DEBUG_TERMIOS, "gs_set_termios: optimized away\n");
			return /* 0 */;
		}
	} else 
		gs_dprintk(GS_DEBUG_TERMIOS, "gs_set_termios: no old_termios: "
		           "no optimization\n");

	if(old_termios && (gs_debug & GS_DEBUG_TERMIOS)) {
		if(tiosp->c_iflag != old_termios->c_iflag)  printk("c_iflag changed\n");
		if(tiosp->c_oflag != old_termios->c_oflag)  printk("c_oflag changed\n");
		if(tiosp->c_cflag != old_termios->c_cflag)  printk("c_cflag changed\n");
		if(tiosp->c_lflag != old_termios->c_lflag)  printk("c_lflag changed\n");
		if(tiosp->c_line  != old_termios->c_line)   printk("c_line changed\n");
		if(!memcmp(tiosp->c_cc, old_termios->c_cc, NCC)) printk("c_cc changed\n");
	}

	baudrate = tiosp->c_cflag & CBAUD;
	if (baudrate & CBAUDEX) {
		baudrate &= ~CBAUDEX;
		if ((baudrate < 1) || (baudrate > 4))
			tiosp->c_cflag &= ~CBAUDEX;
		else
			baudrate += 15;
	}

	baudrate = gs_baudrates[baudrate];
	if ((tiosp->c_cflag & CBAUD) == B38400) {
		if (     (port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
			baudrate = 57600;
		else if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
			baudrate = 115200;
		else if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
			baudrate = 230400;
		else if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
			baudrate = 460800;
		else if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)
			baudrate = (port->baud_base / port->custom_divisor);
	}

	/* I recommend using THIS instead of the mess in termios (and
	   duplicating the above code). Next we should create a clean
	   interface towards this variable. If your card supports arbitrary
	   baud rates, (e.g. CD1400 or 16550 based cards) then everything
	   will be very easy..... */
	port->baud = baudrate;

	/* Two timer ticks seems enough to wakeup something like SLIP driver */
	/* Baudrate/10 is cps. Divide by HZ to get chars per tick. */
	tmp = (baudrate / 10 / HZ) * 2;			 

	if (tmp <                 0) tmp = 0;
	if (tmp >= SERIAL_XMIT_SIZE) tmp = SERIAL_XMIT_SIZE-1;

	port->wakeup_chars = tmp;

	/* We should really wait for the characters to be all sent before
	   changing the settings. -- CAL */
	rv = gs_wait_tx_flushed (port, MAX_SCHEDULE_TIMEOUT);
	if (rv < 0) return /* rv */;

	rv = port->rd->set_real_termios(port);
	if (rv < 0) return /* rv */;

	if ((!old_termios || 
	     (old_termios->c_cflag & CRTSCTS)) &&
	    !(      tiosp->c_cflag & CRTSCTS)) {
		tty->stopped = 0;
		gs_start(tty);
	}

#ifdef tytso_patch_94Nov25_1726
	/* This "makes sense", Why is it commented out? */

	if (!(old_termios->c_cflag & CLOCAL) &&
	    (tty->termios->c_cflag & CLOCAL))
		wake_up_interruptible(&info->open_wait);
#endif

	func_exit();
	return /* 0 */;
}
/*
 * First write to nvram, if fatal error, that is the only
 * place we log the info.  The error will be picked up
 * on the next reboot by rtasd.  If not fatal, run the
 * method for the type of error.  Currently, only RTAS
 * errors have methods implemented, but in the future
 * there might be a need to store data in nvram before a
 * call to panic().
 *
 * XXX We write to nvram periodically, to indicate error has
 * been written and sync'd, but there is a possibility
 * that if we don't shutdown correctly, a duplicate error
 * record will be created on next reboot.
 */
void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
{
	unsigned long offset;
	unsigned long s;
	int len = 0;

	pr_debug("rtasd: logging event\n");
	if (buf == NULL)
		return;

	spin_lock_irqsave(&rtasd_log_lock, s);

	/* get length and increase count */
	switch (err_type & ERR_TYPE_MASK) {
	case ERR_TYPE_RTAS_LOG:
		len = log_rtas_len(buf);
		if (!(err_type & ERR_FLAG_BOOT))
			error_log_cnt++;
		break;
	case ERR_TYPE_KERNEL_PANIC:
	default:
		WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
		spin_unlock_irqrestore(&rtasd_log_lock, s);
		return;
	}

	/* Write error to NVRAM */
	if (logging_enabled && !(err_type & ERR_FLAG_BOOT))
		nvram_write_error_log(buf, len, err_type, error_log_cnt);

	/*
	 * rtas errors can occur during boot, and we do want to capture
	 * those somewhere, even if nvram isn't ready (why not?), and even
	 * if rtasd isn't ready. Put them into the boot log, at least.
	 */
	if ((err_type & ERR_TYPE_MASK) == ERR_TYPE_RTAS_LOG)
		printk_log_rtas(buf, len);

	/* Check to see if we need to or have stopped logging */
	if (fatal || !logging_enabled) {
		logging_enabled = 0;
		WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
		spin_unlock_irqrestore(&rtasd_log_lock, s);
		return;
	}

	/* call type specific method for error */
	switch (err_type & ERR_TYPE_MASK) {
	case ERR_TYPE_RTAS_LOG:
		offset = rtas_error_log_buffer_max *
			((rtas_log_start+rtas_log_size) & LOG_NUMBER_MASK);

		/* First copy over sequence number */
		memcpy(&rtas_log_buf[offset], (void *) &error_log_cnt, sizeof(int));

		/* Second copy over error log data */
		offset += sizeof(int);
		memcpy(&rtas_log_buf[offset], buf, len);

		if (rtas_log_size < LOG_NUMBER)
			rtas_log_size += 1;
		else
			rtas_log_start += 1;

		WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
		spin_unlock_irqrestore(&rtasd_log_lock, s);
		wake_up_interruptible(&rtas_log_wait);
		break;
	case ERR_TYPE_KERNEL_PANIC:
	default:
		WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
		spin_unlock_irqrestore(&rtasd_log_lock, s);
		return;
	}

}
/*
 * gs_start_tx
 *
 * This function finds available write requests, calls
 * gs_send_packet to fill these packets with data, and
 * continues until either there are no more write requests
 * available or no more data to send.  This function is
 * run whenever data arrives or write requests are available.
 *
 * Context: caller owns port_lock; port_usb is non-null.
 */
static int gs_start_tx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
    struct list_head	*pool = &port->write_pool;
    struct usb_ep		*in = port->port_usb->in;
    int			status = 0;
    static long 		prev_len;
    bool			do_tty_wake = false;

    while (!list_empty(pool)) {
        struct usb_request	*req;
        int			len;

        req = list_entry(pool->next, struct usb_request, list);
        len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
        if (len == 0) {
            /* Queue zero length packet */
            /* Bug fix : change & operator to && operator */
            /* If we want't check zlp-condition, have to use && operator */
            if (prev_len && (prev_len % in->maxpacket == 0)) {
                req->length = 0;
                list_del(&req->list);

                spin_unlock(&port->port_lock);
                status = usb_ep_queue(in, req, GFP_ATOMIC);
                spin_lock(&port->port_lock);
                if (!port->port_usb) {
                    gs_free_req(in, req);
                    break;
                }
                if (status) {
                    printk(KERN_ERR "%s: %s err %d\n",
                           __func__, "queue", status);
                    list_add(&req->list, pool);
                }
                prev_len = 0;
            }
            wake_up_interruptible(&port->drain_wait);
            break;
        }
        do_tty_wake = true;

        req->length = len;
        list_del(&req->list);

        pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
                  port->port_num, len, *((u8 *)req->buf),
                  *((u8 *)req->buf+1), *((u8 *)req->buf+2));

        /* Drop lock while we call out of driver; completions
         * could be issued while we do so.  Disconnection may
         * happen too; maybe immediately before we queue this!
         *
         * NOTE that we may keep sending data for a while after
         * the TTY closed (dev->ioport->port_tty is NULL).
         */
        spin_unlock(&port->port_lock);
        status = usb_ep_queue(in, req, GFP_ATOMIC);
        spin_lock(&port->port_lock);
        /*
         * If port_usb is NULL, gserial disconnect is called
         * while the spinlock is dropped and all requests are
         * freed. Free the current request here.
         */
        if (!port->port_usb) {
            do_tty_wake = false;
            gs_free_req(in, req);
            break;
        }
        if (status) {
            pr_debug("%s: %s %s err %d\n",
                     __func__, "queue", in->name, status);
            list_add(&req->list, pool);
            break;
        }
        prev_len = req->length;

    }

    if (do_tty_wake && port->port_tty)
        tty_wakeup(port->port_tty);
    return status;
}
Exemple #23
0
static void on_receive_block(struct r3964_info *pInfo)
{
   unsigned int length;
   struct r3964_client_info *pClient;
   struct r3964_block_header *pBlock;
   
   length=pInfo->rx_position;

   /* compare byte checksum characters: */
   if(pInfo->flags & R3964_BCC)
   {
      if(pInfo->bcc!=pInfo->last_rx)
      {
         TRACE_PE("checksum error - got %x but expected %x",
                pInfo->last_rx, pInfo->bcc);
         pInfo->flags |= R3964_CHECKSUM;
      }
   }

   /* check for errors (parity, overrun,...): */
   if(pInfo->flags & R3964_ERROR)
   {
      TRACE_PE("on_receive_block - transmission failed error %x",
             pInfo->flags & R3964_ERROR);
      
      put_char(pInfo, NAK);
      flush(pInfo);
      if(pInfo->nRetry<R3964_MAX_RETRIES)
      {
         pInfo->state=R3964_WAIT_FOR_RX_REPEAT;
         pInfo->nRetry++;
	 mod_timer(&pInfo->tmr, jiffies + R3964_TO_RX_PANIC);
      }
      else
      {
         TRACE_PE("on_receive_block - failed after max retries");
         pInfo->state=R3964_IDLE;
      }
      return;
   }

   
   /* received block; submit DLE: */
   put_char(pInfo, DLE);
   flush(pInfo);
   del_timer_sync(&pInfo->tmr);
   TRACE_PS(" rx success: got %d chars", length);

   /* prepare struct r3964_block_header: */
   pBlock = kmalloc(length+sizeof(struct r3964_block_header), GFP_KERNEL);
   TRACE_M("on_receive_block - kmalloc %x",(int)pBlock);

   if(pBlock==NULL)
      return;

   pBlock->length = length;
   pBlock->data   = ((unsigned char*)pBlock)+sizeof(struct r3964_block_header);
   pBlock->locks  = 0;
   pBlock->next   = NULL;
   pBlock->owner  = NULL;

   memcpy(pBlock->data, pInfo->rx_buf, length);

   /* queue block into rx_queue: */
   add_rx_queue(pInfo, pBlock);

   /* notify attached client processes: */
   for(pClient=pInfo->firstClient; pClient; pClient=pClient->next)
   {
      if(pClient->sig_flags & R3964_SIG_DATA)
      {
         add_msg(pClient, R3964_MSG_DATA, length, R3964_OK, pBlock);
      }
   }
   wake_up_interruptible (&pInfo->read_wait);
   
   pInfo->state = R3964_IDLE;

   trigger_transmit(pInfo);
}
/*
 * RX work queue takes data out of the RX queue and hands it up to the TTY
 * layer until it refuses to take any more data (or is throttled back).
 * Then it issues reads for any further data.
 *
 * If the RX queue becomes full enough that no usb_request is queued,
 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
 * can be buffered before the TTY layer's buffers (currently 64 KB).
 */
static void gs_rx_push(struct work_struct *w)
{
    struct gs_port		*port = container_of(w, struct gs_port, push);
    struct tty_struct	*tty;
    struct list_head	*queue = &port->read_queue;
    bool			disconnect = false;
    bool			do_push = false;

    /* hand any queued data to the tty */
    spin_lock_irq(&port->port_lock);
    tty = port->port_tty;
    while (!list_empty(queue)) {
        struct usb_request	*req;

        req = list_first_entry(queue, struct usb_request, list);

        /* discard data if tty was closed */
        if (!tty)
            goto recycle;

        /* leave data queued if tty was rx throttled */
        if (test_bit(TTY_THROTTLED, &tty->flags))
            break;

        switch (req->status) {
        case -ESHUTDOWN:
            disconnect = true;
            pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
            break;

        default:
            /* presumably a transient fault */
            pr_warning(PREFIX "%d: unexpected RX status %d\n",
                       port->port_num, req->status);
        /* FALLTHROUGH */
        case 0:
            /* normal completion */
            break;
        }

        /* push data to (open) tty */
        if (req->actual) {
            char		*packet = req->buf;
            unsigned	size = req->actual;
            unsigned	n;
            int		count;

            /* we may have pushed part of this packet already... */
            n = port->n_read;
            if (n) {
                packet += n;
                size -= n;
            }

            count = tty_insert_flip_string(tty, packet, size);
            if (count)
                do_push = true;
            if (count != size) {
                /* stop pushing; TTY layer can't handle more */
                port->n_read += count;
                pr_vdebug(PREFIX "%d: rx block %d/%d\n",
                          port->port_num,
                          count, req->actual);
                break;
            }
            port->n_read = 0;
        }
recycle:
        list_move(&req->list, &port->read_pool);
    }

    /* Push from tty to ldisc; this is immediate with low_latency, and
     * may trigger callbacks to this driver ... so drop the spinlock.
     */
    if (tty && do_push) {
        spin_unlock_irq(&port->port_lock);
        tty_flip_buffer_push(tty);
        wake_up_interruptible(&tty->read_wait);
        spin_lock_irq(&port->port_lock);

        /* tty may have been closed */
        tty = port->port_tty;
    }


    /* We want our data queue to become empty ASAP, keeping data
     * in the tty and ldisc (not here).  If we couldn't push any
     * this time around, there may be trouble unless there's an
     * implicit tty_unthrottle() call on its way...
     *
     * REVISIT we should probably add a timer to keep the work queue
     * from starving ... but it's not clear that case ever happens.
     */
    if (!list_empty(queue) && tty) {
        if (!test_bit(TTY_THROTTLED, &tty->flags)) {
            if (do_push)
                queue_work(gserial_wq, &port->push);
            else
                pr_warning(PREFIX "%d: RX not scheduled?\n",
                           port->port_num);
        }
    }

    /* If we're still connected, refill the USB RX queue. */
    if (!disconnect && port->port_usb)
        gs_start_rx(port);

    spin_unlock_irq(&port->port_lock);
}
Exemple #25
0
static int unix_write(struct socket *sock, char *ubuf, int size, int nonblock)
{
    struct unix_proto_data *pupd;
    int todo, space;

    if ((todo = size) <= 0)
	return 0;

    if (sock->state != SS_CONNECTED) {
	if (sock->state == SS_DISCONNECTING) {
	    send_sig(SIGPIPE, current, 1);
	    return -EPIPE;
	}
	return -EINVAL;
    }

    pupd = UN_DATA(sock)->peerupd;	/* safer than sock->conn */

    while (!(space = UN_BUF_SPACE(pupd))) {
	sock->flags |= SO_NOSPACE;

	if (nonblock)
	    return -EAGAIN;

	sock->flags &= ~SO_NOSPACE;
	interruptible_sleep_on(sock->wait);

	if (current->signal /* & ~current->blocked */ )
	    return -ERESTARTSYS;

	if (sock->state == SS_DISCONNECTING) {
	    send_sig(SIGPIPE, current, 1);
	    return -EPIPE;
	}
    }

/*
 *	Copy from the user's buffer to the write buffer,
 *	watching for wraparound. Then we wake up the reader.
 */

    down(&pupd->sem);
    do {
	int part, cando;

	if (space <= 0) {
	    printk("UNIX: write: space is negative (%d)\n", space);
	    send_sig(SIGKILL, current, 1);
	    return -EPIPE;
	}

	/*
	 *      We may become disconnected inside this loop, so watch
	 *      for it (peerupd is safe until we close).
	 */

	if (sock->state == SS_DISCONNECTING) {
	    send_sig(SIGPIPE, current, 1);
	    up(&pupd->sem);
	    return -EPIPE;
	}

	if ((cando = todo) > space)
	    cando = space;

	if (cando > (part = UN_BUF_SIZE - pupd->bp_head))
	    cando = part;

	memcpy_fromfs(pupd->buf + pupd->bp_head, ubuf, cando);
	pupd->bp_head = (pupd->bp_head + cando) & (UN_BUF_SIZE - 1);

	ubuf += cando;
	todo -= cando;

	if (sock->state == SS_CONNECTED) {
	    wake_up_interruptible(sock->conn->wait);
#if 0
	    sock_wake_async(sock->conn, 1);
#endif
	}
	space = UN_BUF_SPACE(pupd);
    } while (todo && space);

    up(&pupd->sem);

    return (size - todo);
}
Exemple #26
0
static void dtlk_timer_tick(unsigned long data)
{
    TRACE_TEXT(" dtlk_timer_tick");
    wake_up_interruptible(&dtlk_process_list);
}
Exemple #27
0
void masq_remove_proc(struct task_struct *tsk, int update_nlchild) {
    struct bproc_masq_master_t *m;

    /* update the child counts on the parent process(es) but only if
     * we're going away in a silent exit.  (i.e. tsk == current and
     * we're running, not from wait->release->unmasq in which case tsk
     * is some other process and a zombie.) */
    if (update_nlchild) {
	if (BPROC_ISMASQ(tsk->real_parent)) {
	    tsk->real_parent->bproc.nlchild++;
	    wake_up_interruptible(&tsk->real_parent->wait_chldexit);
	}
	if (tsk->parent != tsk->real_parent && BPROC_ISMASQ(tsk->parent)) {
	    tsk->parent->bproc.nlchild++;
	    wake_up_interruptible(&tsk->parent->wait_chldexit);
	}
    }

    /* Remove this process from the list of processes for this master.
     * If this was the last reference to it, free the master structure
     * as well. */
    m = tsk->bproc.master;
    list_del(&tsk->bproc.list);
    tsk->bproc.master = 0;
    if (atomic_dec_and_test(&m->count))
	kfree(m);

    /*ptrace_disable ? */
    if (tsk->state < TASK_ZOMBIE) {
	/* Since we're trying to disappear silently, we should
	 * reparent ourselves to init which will do the wait() on
	 * us. */
	ptrace_unlink(tsk);
	tsk->exit_signal = SIGCHLD;
	set_parents(tsk, child_reaper, child_reaper);
    }

#if 0
    /* Shed child processes - we just have them re-select parents.  If
     * this is being called from release() we shouldn't have any
     * children...  */
    while (!list_empty(&tsk->children)) {
	struct task_struct *child;
	child = list_entry(tsk->children.next, struct task_struct, sibling);

	if (!BPROC_ISMASQ(child) || child->bproc.master != m)
	    printk(KERN_ERR "bproc: masq_remove_proc: child isn't in my"
		   " process space!\n");

	masq_select_parents(child->bproc.master, child);

	if (child->parent == tsk || child->real_parent == tsk) {
	    printk(KERN_CRIT "bproc: masq: child is still mine! me=%d child=%d\n",
		   tsk->pid, child->pid);
	}
    }
    while (!list_empty(&tsk->ptrace_children)) {
	struct task_struct *child;
	child = list_entry(tsk->ptrace_children.next, struct task_struct,
			   ptrace_list);

	if (!BPROC_ISMASQ(child) || child->bproc.master != m)
	    printk(KERN_ERR "bproc: masq_remove_proc: child isn't in my"
		   " process space!\n");

	masq_select_parents(child->bproc.master, child);

	if (child->parent == tsk || child->real_parent == tsk) {
	    printk(KERN_CRIT "bproc: masq: child is still mine! me=%d child=%d\n",
		   tsk->pid, child->pid);
	}
    }
#endif
}
Exemple #28
0
static void usb_tranzport_interrupt_in_callback(struct urb *urb)
{
	struct usb_tranzport *dev = urb->context;
	unsigned int next_ring_head;
	int retval = -1;

	if (urb->status) {
		if (urb->status == -ENOENT ||
		    urb->status == -ECONNRESET ||
		    urb->status == -ESHUTDOWN) {
			goto exit;
		} else {
			dbg_info(&dev->intf->dev, "%s: nonzero status received: %d\n",
				 __FUNCTION__, urb->status);
			goto resubmit; /* maybe we can recover */
		}
	}

	if (urb->actual_length != 8) {
		dev_warn(&dev->intf->dev,
			 "Urb length was %d bytes!! Do something intelligent \n", urb->actual_length);
	} else {
		dbg_info(&dev->intf->dev, "%s: received: %02x%02x%02x%02x%02x%02x%02x%02x\n",
			 __FUNCTION__, dev->interrupt_in_buffer[0],dev->interrupt_in_buffer[1],dev->interrupt_in_buffer[2],dev->interrupt_in_buffer[3],dev->interrupt_in_buffer[4],dev->interrupt_in_buffer[5],dev->interrupt_in_buffer[6],dev->interrupt_in_buffer[7]);
#if SUPPRESS_EXTRA_OFFLINE_EVENTS
		if(dev->offline == 2 && dev->interrupt_in_buffer[1] == 0xff) { goto resubmit; }
		if(dev->offline == 1 && dev->interrupt_in_buffer[1] == 0xff) { dev->offline = 2; goto resubmit; }

/* Always pass one offline event up the stack */
		if(dev->offline > 0 && dev->interrupt_in_buffer[1] != 0xff) { dev->offline = 0; }
		if(dev->offline == 0 && dev->interrupt_in_buffer[1] == 0xff) { dev->offline = 1; }

#endif
		dbg_info(&dev->intf->dev, "%s: head, tail are %x, %x\n", __FUNCTION__,dev->ring_head,dev->ring_tail);

		next_ring_head = (dev->ring_head+1) % ring_buffer_size;

		if (next_ring_head != dev->ring_tail) {
			memcpy(&((*dev->ring_buffer)[dev->ring_head]), dev->interrupt_in_buffer, urb->actual_length);
			dev->ring_head = next_ring_head;
			retval = 0;
			memset(dev->interrupt_in_buffer, 0, urb->actual_length);
		} else {
			dev_warn(&dev->intf->dev,
				 "Ring buffer overflow, %d bytes dropped\n",
				 urb->actual_length);
			memset(dev->interrupt_in_buffer, 0, urb->actual_length);
		}
	}

resubmit:
	/* resubmit if we're still running */
	if (dev->interrupt_in_running && dev->intf) {
		retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
		if (retval)
			dev_err(&dev->intf->dev,
				"usb_submit_urb failed (%d)\n", retval);
	}

exit:
	dev->interrupt_in_done = 1;
	wake_up_interruptible(&dev->read_wait);
}
Exemple #29
0
static void jsm_carrier(struct jsm_channel *ch)
{
	struct jsm_board *bd;

	int virt_carrier = 0;
	int phys_carrier = 0;

	jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev, "start\n");
	if (!ch)
		return;

	bd = ch->ch_bd;

	if (!bd)
		return;

	if (ch->ch_mistat & UART_MSR_DCD) {
		jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev,
			"mistat: %x D_CD: %x\n", ch->ch_mistat, ch->ch_mistat & UART_MSR_DCD);
		phys_carrier = 1;
	}

	if (ch->ch_c_cflag & CLOCAL)
		virt_carrier = 1;

	jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev,
		"DCD: physical: %d virt: %d\n", phys_carrier, virt_carrier);

	/*
	 * Test for a VIRTUAL carrier transition to HIGH.
	 */
	if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {

		/*
		 * When carrier rises, wake any threads waiting
		 * for carrier in the open routine.
		 */

		jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev,
			"carrier: virt DCD rose\n");

		if (waitqueue_active(&(ch->ch_flags_wait)))
			wake_up_interruptible(&ch->ch_flags_wait);
	}

	/*
	 * Test for a PHYSICAL carrier transition to HIGH.
	 */
	if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {

		/*
		 * When carrier rises, wake any threads waiting
		 * for carrier in the open routine.
		 */

		jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev,
			"carrier: physical DCD rose\n");

		if (waitqueue_active(&(ch->ch_flags_wait)))
			wake_up_interruptible(&ch->ch_flags_wait);
	}

	/*
	 *  Test for a PHYSICAL transition to low, so long as we aren't
	 *  currently ignoring physical transitions (which is what "virtual
	 *  carrier" indicates).
	 *
	 *  The transition of the virtual carrier to low really doesn't
	 *  matter... it really only means "ignore carrier state", not
	 *  "make pretend that carrier is there".
	 */
	if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0)
			&& (phys_carrier == 0)) {
		/*
		 *	When carrier drops:
		 *
		 *	Drop carrier on all open units.
		 *
		 *	Flush queues, waking up any task waiting in the
		 *	line discipline.
		 *
		 *	Send a hangup to the control terminal.
		 *
		 *	Enable all select calls.
		 */
		if (waitqueue_active(&(ch->ch_flags_wait)))
			wake_up_interruptible(&ch->ch_flags_wait);
	}

	/*
	 *  Make sure that our cached values reflect the current reality.
	 */
	if (virt_carrier == 1)
		ch->ch_flags |= CH_FCAR;
	else
		ch->ch_flags &= ~CH_FCAR;

	if (phys_carrier == 1)
		ch->ch_flags |= CH_CD;
	else
		ch->ch_flags &= ~CH_CD;
}
/*
 * vpif_channel_isr: It changes status of the displayed buffer, takes next
 * buffer from the queue and sets its address in VPIF registers
 */
static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
{
	struct vpif_device *dev = &vpif_obj;
	struct channel_obj *ch;
	struct common_obj *common;
	enum v4l2_field field;
	int fid = -1, i;
	int channel_id = 0;

	channel_id = *(int *)(dev_id);
	ch = dev->dev[channel_id];
	field = ch->common[VPIF_VIDEO_INDEX].fmt.fmt.pix.field;
	for (i = 0; i < VPIF_NUMOBJECTS; i++) {
		common = &ch->common[i];
		/* If streaming is started in this channel */
		if (0 == common->started)
			continue;

		if (1 == ch->vpifparams.std_info.frm_fmt) {
			if (list_empty(&common->dma_queue))
				continue;

			/* Progressive mode */
			if (!channel_first_int[i][channel_id]) {
				/* Mark status of the cur_frm to
				 * done and unlock semaphore on it */
				do_gettimeofday(&common->cur_frm->ts);
				common->cur_frm->state = VIDEOBUF_DONE;
				wake_up_interruptible(&common->cur_frm->done);
				/* Make cur_frm pointing to next_frm */
				common->cur_frm = common->next_frm;
			}

			channel_first_int[i][channel_id] = 0;
			process_progressive_mode(common);
		} else {
			/* Interlaced mode */
			/* If it is first interrupt, ignore it */

			if (channel_first_int[i][channel_id]) {
				channel_first_int[i][channel_id] = 0;
				continue;
			}

			if (0 == i) {
				ch->field_id ^= 1;
				/* Get field id from VPIF registers */
				fid = vpif_channel_getfid(ch->channel_id + 2);
				/* If fid does not match with stored field id */
				if (fid != ch->field_id) {
					/* Make them in sync */
					if (0 == fid)
						ch->field_id = fid;

					return IRQ_HANDLED;
				}
			}
			process_interlaced_mode(fid, common);
		}
	}

	return IRQ_HANDLED;
}