예제 #1
0
static void
serial8250_set_termios(struct uart_port *port, struct termios *termios,
		       struct termios *old)
{
	struct uart_8250_port *up = (struct uart_8250_port *)port;
	unsigned char cval, fcr = 0;
	unsigned long flags;
	unsigned int baud, quot;

	switch (termios->c_cflag & CSIZE) {
	case CS5:
		cval = 0x00;
		break;
	case CS6:
		cval = 0x01;
		break;
	case CS7:
		cval = 0x02;
		break;
	default:
	case CS8:
		cval = 0x03;
		break;
	}

	if (termios->c_cflag & CSTOPB)
		cval |= 0x04;
	if (termios->c_cflag & PARENB)
		cval |= UART_LCR_PARITY;
	if (!(termios->c_cflag & PARODD))
		cval |= UART_LCR_EPAR;
#ifdef CMSPAR
	if (termios->c_cflag & CMSPAR)
		cval |= UART_LCR_SPAR;
#endif

	/*
	 * Ask the core to calculate the divisor for us.
	 */
	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); 
	quot = serial8250_get_divisor(port, baud);
	quot = 0x35; /* FIXME */

	/*
	 * Work around a bug in the Oxford Semiconductor 952 rev B
	 * chip which causes it to seriously miscalculate baud rates
	 * when DLL is 0.
	 */
	if ((quot & 0xff) == 0 && up->port.type == PORT_16C950 &&
	    up->rev == 0x5201)
		quot ++;

	if (uart_config[up->port.type].flags & UART_USE_FIFO) {
		if (baud < 2400)
			fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIGGER_1;
		else
			fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIGGER_8;
	}

	/*
	 * Ok, we're now changing the port state.  Do it with
	 * interrupts disabled.
	 */
	spin_lock_irqsave(&up->port.lock, flags);

	/*
	 * Update the per-port timeout.
	 */
	uart_update_timeout(port, termios->c_cflag, baud);

	up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
	if (termios->c_iflag & INPCK)
		up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
	if (termios->c_iflag & (BRKINT | PARMRK))
		up->port.read_status_mask |= UART_LSR_BI;

	/*
	 * Characteres to ignore
	 */
	up->port.ignore_status_mask = 0;
	if (termios->c_iflag & IGNPAR)
		up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
	if (termios->c_iflag & IGNBRK) {
		up->port.ignore_status_mask |= UART_LSR_BI;
		/*
		 * If we're ignoring parity and break indicators,
		 * ignore overruns too (for real raw support).
		 */
		if (termios->c_iflag & IGNPAR)
			up->port.ignore_status_mask |= UART_LSR_OE;
	}

	/*
	 * ignore all characters if CREAD is not set
	 */
	if ((termios->c_cflag & CREAD) == 0)
		up->port.ignore_status_mask |= UART_LSR_DR;

	/*
	 * CTS flow control flag and modem status interrupts
	 */
	up->ier &= ~UART_IER_MSI;
	if (UART_ENABLE_MS(&up->port, termios->c_cflag))
		up->ier |= UART_IER_MSI;

	serial_out(up, UART_IER, up->ier);
	serial_outp(up, 0x28, quot & 0xffff);
	up->lcr = cval;					/* Save LCR */
	if (up->port.type != PORT_16750) {
		if (fcr & UART_FCR_ENABLE_FIFO) {
			/* emulated UARTs (Lucent Venus 167x) need two steps */
			serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
		}
		serial_outp(up, UART_FCR, fcr);		/* set fcr */
	}
	spin_unlock_irqrestore(&up->port.lock, flags);
}
예제 #2
0
static int serial8250_startup(struct uart_port *port)
{
	struct uart_8250_port *up = (struct uart_8250_port *)port;
	unsigned long flags;
	int retval;

	/*
	 * Clear the FIFO buffers and disable them.
	 * (they will be reeanbled in set_termios())
	 */
	if (uart_config[up->port.type].flags & UART_CLEAR_FIFO) {
		serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
		serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO |
				UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
		serial_outp(up, UART_FCR, 0);
	}

	/*
	 * Clear the interrupt registers.
	 */
	(void) serial_inp(up, UART_LSR);
	(void) serial_inp(up, UART_RX);
	(void) serial_inp(up, UART_IIR);
	(void) serial_inp(up, UART_MSR);

	/*
	 * At this point, there's no way the LSR could still be 0xff;
	 * if it is, then bail out, because there's likely no UART
	 * here.
	 */
	if (!(up->port.flags & UPF_BUGGY_UART) &&
	    (serial_inp(up, UART_LSR) == 0xff)) {
		printk("ttyS%d: LSR safety check engaged!\n", up->port.line);
		return -ENODEV;
	}

	retval = serial_link_irq_chain(up);
		if (retval)
			return retval;

	/*
	 * Now, initialize the UART
	 */
	serial_outp(up, UART_LCR, UART_LCR_WLEN8);

	spin_lock_irqsave(&up->port.lock, flags);
	if (up->port.flags & UPF_FOURPORT) {
		if (!is_real_interrupt(up->port.irq))
			up->port.mctrl |= TIOCM_OUT1;
	} else
		/*
		 * Most PC uarts need OUT2 raised to enable interrupts.
		 */
		if (is_real_interrupt(up->port.irq))
			up->port.mctrl |= TIOCM_OUT2;

	serial8250_set_mctrl(&up->port, up->port.mctrl);
	spin_unlock_irqrestore(&up->port.lock, flags);

	/*
	 * Finally, enable interrupts.  Note: Modem status interrupts
	 * are set via set_termios(), which will be occurring imminently
	 * anyway, so we don't enable them here.
	 */
	up->ier = UART_IER_RLSI | UART_IER_RDI;
	serial_outp(up, UART_IER, up->ier);

	if (up->port.flags & UPF_FOURPORT) {
		unsigned int icp;
		/*
		 * Enable interrupts on the AST Fourport board
		 */
		icp = (up->port.iobase & 0xfe0) | 0x01f;
		outb_p(0x80, icp);
		(void) inb_p(icp);
	}

	/*
	 * And clear the interrupt registers again for luck.
	 */
	(void) serial_inp(up, UART_LSR);
	(void) serial_inp(up, UART_RX);
	(void) serial_inp(up, UART_IIR);
	(void) serial_inp(up, UART_MSR);

	return 0;
}
예제 #3
0
int __msm_adsp_write(struct msm_adsp_module *module, unsigned dsp_queue_addr,
		   void *cmd_buf, size_t cmd_size)
{
	uint32_t ctrl_word;
	uint32_t dsp_q_addr;
	uint32_t dsp_addr;
	uint32_t cmd_id = 0;
	int cnt = 0;
	int ret_status = 0;
	unsigned long flags;
	struct adsp_info *info;

	if (!module || !cmd_buf) {
		MM_ERR("Called with NULL parameters\n");
		return -EINVAL;
	}
	info = module->info;
	spin_lock_irqsave(&adsp_write_lock, flags);

	if (module->state != ADSP_STATE_ENABLED) {
		spin_unlock_irqrestore(&adsp_write_lock, flags);
		MM_ERR("module %s not enabled before write\n", module->name);
		return -ENODEV;
	}
	if (adsp_validate_module(module->id)) {
		spin_unlock_irqrestore(&adsp_write_lock, flags);
		MM_ERR("module id validation failed %s  %d\n",
				module->name, module->id);
		return -ENXIO;
	}
	if (dsp_queue_addr >= QDSP_MAX_NUM_QUEUES) {
		spin_unlock_irqrestore(&adsp_write_lock, flags);
		MM_ERR("Invalid Queue Index: %d\n", dsp_queue_addr);
		return -ENXIO;
	}
	if (adsp_validate_queue(module->id, dsp_queue_addr, cmd_size)) {
		spin_unlock_irqrestore(&adsp_write_lock, flags);
		return -EINVAL;
	}
	dsp_q_addr = adsp_get_queue_offset(info, dsp_queue_addr);
	dsp_q_addr &= ADSP_RTOS_WRITE_CTRL_WORD_DSP_ADDR_M;

	/* Poll until the ADSP is ready to accept a command.
	 * Wait for 100us, return error if it's not responding.
	 * If this returns an error, we need to disable ALL modules and
	 * then retry.
	 */
	while (((ctrl_word = readl(info->write_ctrl)) &
		ADSP_RTOS_WRITE_CTRL_WORD_READY_M) !=
		ADSP_RTOS_WRITE_CTRL_WORD_READY_V) {
		if (cnt > 50) {
			MM_ERR("timeout waiting for DSP write ready\n");
			ret_status = -EIO;
			goto fail;
		}
		MM_DBG("waiting for DSP write ready\n");
		udelay(2);
		cnt++;
	}

	/* Set the mutex bits */
	ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_M);
	ctrl_word |=  ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_NAVAIL_V;

	/* Clear the command bits */
	ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_CMD_M);

	/* Set the queue address bits */
	ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_DSP_ADDR_M);
	ctrl_word |= dsp_q_addr;

	writel(ctrl_word, info->write_ctrl);

	/* Generate an interrupt to the DSP.  This notifies the DSP that
	 * we are about to send a command on this particular queue.  The
	 * DSP will in response change its state.
	 */
	writel(1, info->send_irq);

	/* Poll until the adsp responds to the interrupt; this does not
	 * generate an interrupt from the adsp.  This should happen within
	 * 5ms.
	 */
	cnt = 0;
	while ((readl(info->write_ctrl) &
		ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_M) ==
		ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_NAVAIL_V) {
		if (cnt > 2500) {
			MM_ERR("timeout waiting for adsp ack\n");
			ret_status = -EIO;
			goto fail;
		}
		udelay(2);
		cnt++;
	}

	/* Read the ctrl word */
	ctrl_word = readl(info->write_ctrl);

	if ((ctrl_word & ADSP_RTOS_WRITE_CTRL_WORD_STATUS_M) !=
	    ADSP_RTOS_WRITE_CTRL_WORD_NO_ERR_V) {
		ret_status = -EAGAIN;
		goto fail;
	} else {
		/* No error */
		/* Get the DSP buffer address */
		dsp_addr = (ctrl_word & ADSP_RTOS_WRITE_CTRL_WORD_DSP_ADDR_M) +
			   (uint32_t)MSM_AD5_BASE;

		if (dsp_addr < (uint32_t)(MSM_AD5_BASE + QDSP_RAMC_OFFSET)) {
			uint16_t *buf_ptr = (uint16_t *) cmd_buf;
			uint16_t *dsp_addr16 = (uint16_t *)dsp_addr;
			cmd_size /= sizeof(uint16_t);

			/* Save the command ID */
			cmd_id = (uint32_t) buf_ptr[0];

			/* Copy the command to DSP memory */
			cmd_size++;
			while (--cmd_size)
				*dsp_addr16++ = *buf_ptr++;
		} else {
			uint32_t *buf_ptr = (uint32_t *) cmd_buf;
			uint32_t *dsp_addr32 = (uint32_t *)dsp_addr;
			cmd_size /= sizeof(uint32_t);

			/* Save the command ID */
			cmd_id = buf_ptr[0];

			cmd_size++;
			while (--cmd_size)
				*dsp_addr32++ = *buf_ptr++;
		}

		/* Set the mutex bits */
		ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_M);
		ctrl_word |=  ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_NAVAIL_V;

		/* Set the command bits to write done */
		ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_CMD_M);
		ctrl_word |= ADSP_RTOS_WRITE_CTRL_WORD_CMD_WRITE_DONE_V;

		/* Set the queue address bits */
		ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_DSP_ADDR_M);
		ctrl_word |= dsp_q_addr;

		writel(ctrl_word, info->write_ctrl);

		/* Generate an interrupt to the DSP.  It does not respond with
		 * an interrupt, and we do not need to wait for it to
		 * acknowledge, because it will hold the mutex lock until it's
		 * ready to receive more commands again.
		 */
		writel(1, info->send_irq);

		module->num_commands++;
	} /* Ctrl word status bits were 00, no error in the ctrl word */

fail:
	spin_unlock_irqrestore(&adsp_write_lock, flags);
	return ret_status;
}
예제 #4
0
static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size = dev->rx_urb_size;

	if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
		netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
		usb_free_urb (urb);
		return -ENOMEM;
	}
	//skb_reserve (skb, NET_IP_ALIGN);

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->length = 0;

	usb_fill_bulk_urb (urb, dev->udev, dev->in,
		skb->data, size, rx_complete, skb);

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (dev->net) &&
	    netif_device_present (dev->net) &&
	    !test_bit (EVENT_RX_HALT, &dev->flags) &&
	    !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
		case -EPIPE:
			usbnet_defer_kevent (dev, EVENT_RX_HALT);
			break;
		case -ENOMEM:
			usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
			break;
		case -ENODEV:
			netif_dbg(dev, ifdown, dev->net, "device gone\n");
			netif_device_detach (dev->net);
			break;
		case -EHOSTUNREACH:
			retval = -ENOLINK;
			break;
		default:
			netif_dbg(dev, rx_err, dev->net,
				  "rx submit, %d\n", retval);
			tasklet_schedule (&dev->bh);
			break;
		case 0:
			__usbnet_queue_skb(&dev->rxq, skb, rx_start);
		}
	} else {
		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
	return retval;
}
예제 #5
0
/**
 * mempool_alloc - allocate an element from a specific memory pool
 * @pool:      pointer to the memory pool which was allocated via
 *             mempool_create().
 * @gfp_mask:  the usual allocation bitmask.
 *
 * this function only sleeps if the alloc_fn() function sleeps or
 * returns NULL. Note that due to preallocation, this function
 * *never* fails when called from process contexts. (it might
 * fail if called from an IRQ context.)
 * Note: using __GFP_ZERO is not supported.
 */
void * mempool_alloc_MikeT(mempool_t *pool, gfp_t gfp_mask, bool count)
{
	void *element;
	unsigned long flags;
	wait_queue_t wait;
	gfp_t gfp_temp;

	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
	might_sleep_if(gfp_mask & __GFP_WAIT);

	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
	gfp_mask |= __GFP_NOWARN;	/* failures are OK */

	gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);

repeat_alloc:

	element = pool->alloc(gfp_temp, pool->pool_data);
	if (likely(element != NULL))
		return element;

	spin_lock_irqsave(&pool->lock, flags);
	if (likely(pool->curr_nr)) {
		element = remove_element(pool);
		spin_unlock_irqrestore(&pool->lock, flags);
		/* paired with rmb in mempool_free(), read comment there */
		smp_wmb();
		/*
		 * Update the allocation stack trace as this is more useful
		 * for debugging.
		 */
		kmemleak_update_trace(element);
		return element;
	}

	/*
	 * We use gfp mask w/o __GFP_WAIT or IO for the first round.  If
	 * alloc failed with that and @pool was empty, retry immediately.
	 */
	if (gfp_temp != gfp_mask) {
		spin_unlock_irqrestore(&pool->lock, flags);
		gfp_temp = gfp_mask;
		goto repeat_alloc;
	}

	/* We must not sleep if !__GFP_WAIT */
	if (!(gfp_mask & __GFP_WAIT)) {
		spin_unlock_irqrestore(&pool->lock, flags);
		return NULL;
	}
    if(count)
        waitCounter++;
	/* Let's wait for someone else to return an element to @pool */
	init_wait(&wait);
	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);

	spin_unlock_irqrestore(&pool->lock, flags);

	/*
	 * FIXME: this should be io_schedule().  The timeout is there as a
	 * workaround for some DM problems in 2.6.18.
	 */
	io_schedule_timeout(5*HZ);

	finish_wait(&pool->wait, &wait);
	goto repeat_alloc;
}
예제 #6
0
static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct sgiseeq_private *sp = netdev_priv(dev);
    struct hpc3_ethregs *hregs = sp->hregs;
    unsigned long flags;
    struct sgiseeq_tx_desc *td;
    int len, entry;

    spin_lock_irqsave(&sp->tx_lock, flags);

    /* Setup... */
    len = skb->len;
    if (len < ETH_ZLEN) {
        if (skb_padto(skb, ETH_ZLEN)) {
            spin_unlock_irqrestore(&sp->tx_lock, flags);
            return NETDEV_TX_OK;
        }
        len = ETH_ZLEN;
    }

    dev->stats.tx_bytes += len;
    entry = sp->tx_new;
    td = &sp->tx_desc[entry];
    dma_sync_desc_cpu(dev, td);

    /* Create entry.  There are so many races with adding a new
     * descriptor to the chain:
     * 1) Assume that the HPC is off processing a DMA chain while
     *    we are changing all of the following.
     * 2) Do no allow the HPC to look at a new descriptor until
     *    we have completely set up it's state.  This means, do
     *    not clear HPCDMA_EOX in the current last descritptor
     *    until the one we are adding looks consistent and could
     *    be processes right now.
     * 3) The tx interrupt code must notice when we've added a new
     *    entry and the HPC got to the end of the chain before we
     *    added this new entry and restarted it.
     */
    td->skb = skb;
    td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
                                   len, DMA_TO_DEVICE);
    td->tdma.cntinfo = (len & HPCDMA_BCNT) |
                       HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
    dma_sync_desc_dev(dev, td);
    if (sp->tx_old != sp->tx_new) {
        struct sgiseeq_tx_desc *backend;

        backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
        dma_sync_desc_cpu(dev, backend);
        backend->tdma.cntinfo &= ~HPCDMA_EOX;
        dma_sync_desc_dev(dev, backend);
    }
    sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */

    /* Maybe kick the HPC back into motion. */
    if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
        kick_tx(dev, sp, hregs);

    if (!TX_BUFFS_AVAIL(sp))
        netif_stop_queue(dev);
    spin_unlock_irqrestore(&sp->tx_lock, flags);

    return NETDEV_TX_OK;
}
예제 #7
0
static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
				struct zfcp_adapter *adapter,
				struct scsi_cmnd *scsi_cmnd,
				struct zfcp_fsf_req *fsf_req,
				unsigned long old_req_id)
{
	struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
	struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
	unsigned long flags;
	struct fcp_rsp_iu *fcp_rsp;
	char *fcp_rsp_info = NULL, *fcp_sns_info = NULL;
	int offset = 0, buflen = 0;

	spin_lock_irqsave(&adapter->scsi_dbf_lock, flags);
	do {
		memset(rec, 0, sizeof(*rec));
		if (offset == 0) {
			strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
			strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
			if (scsi_cmnd != NULL) {
				if (scsi_cmnd->device) {
					rec->scsi_id = scsi_cmnd->device->id;
					rec->scsi_lun = scsi_cmnd->device->lun;
				}
				rec->scsi_result = scsi_cmnd->result;
				rec->scsi_cmnd = (unsigned long)scsi_cmnd;
				rec->scsi_serial = scsi_cmnd->serial_number;
				memcpy(rec->scsi_opcode, scsi_cmnd->cmnd,
					min((int)scsi_cmnd->cmd_len,
						ZFCP_DBF_SCSI_OPCODE));
				rec->scsi_retries = scsi_cmnd->retries;
				rec->scsi_allowed = scsi_cmnd->allowed;
			}
			if (fsf_req != NULL) {
				fcp_rsp = (struct fcp_rsp_iu *)
				    &(fsf_req->qtcb->bottom.io.fcp_rsp);
				fcp_rsp_info = (unsigned char *) &fcp_rsp[1];
				fcp_sns_info =
				    zfcp_get_fcp_sns_info_ptr(fcp_rsp);

				rec->rsp_validity = fcp_rsp->validity.value;
				rec->rsp_scsi_status = fcp_rsp->scsi_status;
				rec->rsp_resid = fcp_rsp->fcp_resid;
				if (fcp_rsp->validity.bits.fcp_rsp_len_valid)
					rec->rsp_code = *(fcp_rsp_info + 3);
				if (fcp_rsp->validity.bits.fcp_sns_len_valid) {
					buflen = min((int)fcp_rsp->fcp_sns_len,
						     ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
					rec->sns_info_len = buflen;
					memcpy(rec->sns_info, fcp_sns_info,
					       min(buflen,
						   ZFCP_DBF_SCSI_FCP_SNS_INFO));
					offset += min(buflen,
						      ZFCP_DBF_SCSI_FCP_SNS_INFO);
				}

				rec->fsf_reqid = (unsigned long)fsf_req;
				rec->fsf_seqno = fsf_req->seq_no;
				rec->fsf_issued = fsf_req->issued;
			}
			rec->old_fsf_reqid = old_req_id;
		} else {
			strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
			dump->total_size = buflen;
			dump->offset = offset;
			dump->size = min(buflen - offset,
					 (int)sizeof(struct
						     zfcp_scsi_dbf_record) -
					 (int)sizeof(struct zfcp_dbf_dump));
			memcpy(dump->data, fcp_sns_info + offset, dump->size);
			offset += dump->size;
		}
		debug_event(adapter->scsi_dbf, level, rec, sizeof(*rec));
	} while (offset < buflen);
	spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags);
}
예제 #8
0
static void musb_id_pin_work(struct work_struct *data)
{
	u8 devctl = 0;
	unsigned long flags;

	spin_lock_irqsave(&mtk_musb->lock, flags);
	musb_generic_disable(mtk_musb);
	spin_unlock_irqrestore(&mtk_musb->lock, flags);

	down(&mtk_musb->musb_lock);
	DBG(0, "work start, is_host=%d, boot mode(%d)\n", mtk_musb->is_host, get_boot_mode());
#ifdef CONFIG_MTK_KERNEL_POWER_OFF_CHARGING
	if (get_boot_mode() == KERNEL_POWER_OFF_CHARGING_BOOT || get_boot_mode() == LOW_POWER_OFF_CHARGING_BOOT) {
		DBG(0, "do nothing due to in power off charging\n");
		goto out;
	}
#endif
	if (mtk_musb->in_ipo_off) {
		DBG(0, "do nothing due to in_ipo_off\n");
		goto out;
	}

	mtk_musb->is_host = musb_is_host();
	DBG(0, "musb is as %s\n", mtk_musb->is_host?"host":"device");
	switch_set_state((struct switch_dev *)&otg_state, mtk_musb->is_host);

	if (mtk_musb->is_host) {
		/* setup fifo for host mode */
		ep_config_from_table_for_host(mtk_musb);
		wake_lock(&mtk_musb->usb_lock);
		musb_platform_set_vbus(mtk_musb, 1);

	/* for no VBUS sensing IP*/
	#if 1
		/* wait VBUS ready */
		msleep(100);
		/* clear session*/
		devctl = musb_readb(mtk_musb->mregs, MUSB_DEVCTL);
		musb_writeb(mtk_musb->mregs, MUSB_DEVCTL, (devctl&(~MUSB_DEVCTL_SESSION)));
		/* USB MAC OFF*/
		/* VBUSVALID=0, AVALID=0, BVALID=0, SESSEND=1, IDDIG=X, IDPULLUP=1 */
		USBPHY_SET8(0x6c, 0x11);
		USBPHY_CLR8(0x6c, 0x2e);
		USBPHY_SET8(0x6d, 0x3f);
		DBG(0, "force PHY to idle, 0x6d=%x, 0x6c=%x\n", USBPHY_READ8(0x6d), USBPHY_READ8(0x6c));
		/* wait */
		mdelay(5);
		/* restart session */
		devctl = musb_readb(mtk_musb->mregs, MUSB_DEVCTL);
		musb_writeb(mtk_musb->mregs, MUSB_DEVCTL, (devctl | MUSB_DEVCTL_SESSION));
		/* USB MAC ONand Host Mode*/
		/* VBUSVALID=1, AVALID=1, BVALID=1, SESSEND=0, IDDIG=0, IDPULLUP=1 */
		USBPHY_CLR8(0x6c, 0x10);
		USBPHY_SET8(0x6c, 0x2d);
		USBPHY_SET8(0x6d, 0x3f);
		DBG(0, "force PHY to host mode, 0x6d=%x, 0x6c=%x\n", USBPHY_READ8(0x6d), USBPHY_READ8(0x6c));
	#endif

		musb_start(mtk_musb);
		MUSB_HST_MODE(mtk_musb);
		switch_int_to_device(mtk_musb);
	} else {
		DBG(0, "devctl is %x\n", musb_readb(mtk_musb->mregs, MUSB_DEVCTL));
		musb_writeb(mtk_musb->mregs, MUSB_DEVCTL, 0);
		if (wake_lock_active(&mtk_musb->usb_lock))
			wake_unlock(&mtk_musb->usb_lock);
		musb_platform_set_vbus(mtk_musb, 0);

	/* for no VBUS sensing IP */
	#if 1
	/* USB MAC OFF*/
		/* VBUSVALID=0, AVALID=0, BVALID=0, SESSEND=1, IDDIG=X, IDPULLUP=1 */
		USBPHY_SET8(0x6c, 0x11);
		USBPHY_CLR8(0x6c, 0x2e);
		USBPHY_SET8(0x6d, 0x3f);
		DBG(0, "force PHY to idle, 0x6d=%x, 0x6c=%x\n", USBPHY_READ8(0x6d), USBPHY_READ8(0x6c));
	#endif

		musb_stop(mtk_musb);
		mtk_musb->xceiv->state = OTG_STATE_B_IDLE;
		MUSB_DEV_MODE(mtk_musb);
		switch_int_to_host(mtk_musb);
	}
out:
	DBG(0, "work end, is_host=%d\n", mtk_musb->is_host);
	up(&mtk_musb->musb_lock);

}
예제 #9
0
/*
 * This function gets called when a POSIX.1b interval timer expires.  It
 * is used as a callback from the kernel internal timer.  The
 * run_timer_list code ALWAYS calls with interrupts on.

 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
 */
static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
{
	struct k_itimer *timr;
	unsigned long flags;
	int si_private = 0;
	enum hrtimer_restart ret = HRTIMER_NORESTART;

	timr = container_of(timer, struct k_itimer, it.real.timer);
	spin_lock_irqsave(&timr->it_lock, flags);

	if (timr->it.real.interval.tv64 != 0)
		si_private = ++timr->it_requeue_pending;

	if (posix_timer_event(timr, si_private)) {
		/*
		 * signal was not sent because of sig_ignor
		 * we will not get a call back to restart it AND
		 * it should be restarted.
		 */
		if (timr->it.real.interval.tv64 != 0) {
			ktime_t now = hrtimer_cb_get_time(timer);

			/*
			 * FIXME: What we really want, is to stop this
			 * timer completely and restart it in case the
			 * SIG_IGN is removed. This is a non trivial
			 * change which involves sighand locking
			 * (sigh !), which we don't want to do late in
			 * the release cycle.
			 *
			 * For now we just let timers with an interval
			 * less than a jiffie expire every jiffie to
			 * avoid softirq starvation in case of SIG_IGN
			 * and a very small interval, which would put
			 * the timer right back on the softirq pending
			 * list. By moving now ahead of time we trick
			 * hrtimer_forward() to expire the timer
			 * later, while we still maintain the overrun
			 * accuracy, but have some inconsistency in
			 * the timer_gettime() case. This is at least
			 * better than a starved softirq. A more
			 * complex fix which solves also another related
			 * inconsistency is already in the pipeline.
			 */
#ifdef CONFIG_HIGH_RES_TIMERS
			{
				ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);

				if (timr->it.real.interval.tv64 < kj.tv64)
					now = ktime_add(now, kj);
			}
#endif
			timr->it_overrun += (unsigned int)
				hrtimer_forward(timer, now,
						timr->it.real.interval);
			ret = HRTIMER_RESTART;
			++timr->it_requeue_pending;
		}
	}

	unlock_timer(timr, flags);
	return ret;
}
예제 #10
0
int mpc5200_psc_ac97_gpio_reset(int psc_number)
{
	unsigned long flags;
	u32 gpio;
	u32 mux;
	int out;
	int reset;
	int sync;

	if ((!simple_gpio) || (!wkup_gpio))
		return -ENODEV;

	switch (psc_number) {
	case 0:
		reset   = PSC1_RESET;           
		sync    = PSC1_SYNC;            
		out     = PSC1_SDATA_OUT;       
		gpio    = MPC52xx_GPIO_PSC1_MASK;
		break;
	case 1:
		reset   = PSC2_RESET;           
		sync    = PSC2_SYNC;            
		out     = PSC2_SDATA_OUT;       
		gpio    = MPC52xx_GPIO_PSC2_MASK;
		break;
	default:
		pr_err(__FILE__ ": Unable to determine PSC, no ac97 "
		       "cold-reset will be performed\n");
		return -ENODEV;
	}

	spin_lock_irqsave(&gpio_lock, flags);

	
	mux = in_be32(&simple_gpio->port_config);
	out_be32(&simple_gpio->port_config, mux & (~gpio));

	
	setbits8(&wkup_gpio->wkup_gpioe, reset);
	setbits32(&simple_gpio->simple_gpioe, sync | out);

	setbits8(&wkup_gpio->wkup_ddr, reset);
	setbits32(&simple_gpio->simple_ddr, sync | out);

	
	clrbits32(&simple_gpio->simple_dvo, sync | out);
	clrbits8(&wkup_gpio->wkup_dvo, reset);

	
	udelay(1);

	
	setbits8(&wkup_gpio->wkup_dvo, reset);

	
	
	__delay(7);

	
	out_be32(&simple_gpio->port_config, mux);

	spin_unlock_irqrestore(&gpio_lock, flags);

	return 0;
}
예제 #11
0
static void h2p2_dbg_leds_event(led_event_t evt)
{
	unsigned long flags;

	spin_lock_irqsave(&lock, flags);

	if (!(led_state & LED_STATE_ENABLED) && evt != led_start)
		goto done;

	switch (evt) {
	case led_start:
		if (fpga)
			led_state |= LED_STATE_ENABLED;
		break;

	case led_stop:
	case led_halted:
		

		if (!(machine_is_omap_perseus2() || machine_is_omap_h4())) {
			gpio_set_value(GPIO_TIMER, 0);
			gpio_set_value(GPIO_IDLE, 0);
		}

		__raw_writew(~0, &fpga->leds);
		led_state &= ~LED_STATE_ENABLED;
		goto done;

	case led_claim:
		led_state |= LED_STATE_CLAIMED;
		hw_led_state = 0;
		break;

	case led_release:
		led_state &= ~LED_STATE_CLAIMED;
		break;

#ifdef CONFIG_LEDS_TIMER
	case led_timer:
		led_state ^= LED_TIMER_ON;

		if (machine_is_omap_perseus2() || machine_is_omap_h4())
			hw_led_state ^= H2P2_DBG_FPGA_P2_LED_TIMER;
		else {
			gpio_set_value(GPIO_TIMER,
					led_state & LED_TIMER_ON);
			goto done;
		}

		break;
#endif

#ifdef CONFIG_LEDS_CPU
	
	case led_idle_start:
		if (machine_is_omap_perseus2() || machine_is_omap_h4())
			hw_led_state &= ~H2P2_DBG_FPGA_P2_LED_IDLE;
		else {
			gpio_set_value(GPIO_IDLE, 1);
			goto done;
		}

		break;

	case led_idle_end:
		if (machine_is_omap_perseus2() || machine_is_omap_h4())
			hw_led_state |= H2P2_DBG_FPGA_P2_LED_IDLE;
		else {
			gpio_set_value(GPIO_IDLE, 0);
			goto done;
		}

		break;
#endif

	case led_green_on:
		hw_led_state |= H2P2_DBG_FPGA_LED_GREEN;
		break;
	case led_green_off:
		hw_led_state &= ~H2P2_DBG_FPGA_LED_GREEN;
		break;

	case led_amber_on:
		hw_led_state |= H2P2_DBG_FPGA_LED_AMBER;
		break;
	case led_amber_off:
		hw_led_state &= ~H2P2_DBG_FPGA_LED_AMBER;
		break;

	case led_red_on:
		hw_led_state |= H2P2_DBG_FPGA_LED_RED;
		break;
	case led_red_off:
		hw_led_state &= ~H2P2_DBG_FPGA_LED_RED;
		break;

	case led_blue_on:
		hw_led_state |= H2P2_DBG_FPGA_LED_BLUE;
		break;
	case led_blue_off:
		hw_led_state &= ~H2P2_DBG_FPGA_LED_BLUE;
		break;

	default:
		break;
	}


	if (led_state & LED_STATE_ENABLED)
		__raw_writew(~hw_led_state, &fpga->leds);

done:
	spin_unlock_irqrestore(&lock, flags);
}
예제 #12
0
/* size of returned buffer is part of USB spec */
static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
			u16 wIndex, char *buf, u16 wLength)
{
	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
	int status, lstatus, retval = 0, len = 0;
	unsigned int port = wIndex - 1;
	unsigned long port_addr = uhci->io_addr + USBPORTSC1 + 2 * port;
	u16 wPortChange, wPortStatus;
	unsigned long flags;

	if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) || uhci->dead)
		return -ETIMEDOUT;

	spin_lock_irqsave(&uhci->lock, flags);
	switch (typeReq) {

	case GetHubStatus:
		*(__le32 *)buf = cpu_to_le32(0);
		OK(4);		/* hub power */
	case GetPortStatus:
		if (port >= uhci->rh_numports)
			goto err;

		uhci_check_ports(uhci);
		status = inw(port_addr);

		/* Intel controllers report the OverCurrent bit active on.
		 * VIA controllers report it active off, so we'll adjust the
		 * bit value.  (It's not standardized in the UHCI spec.)
		 */
		if (to_pci_dev(hcd->self.controller)->vendor ==
				PCI_VENDOR_ID_VIA)
			status ^= USBPORTSC_OC;

		/* UHCI doesn't support C_RESET (always false) */
		wPortChange = lstatus = 0;
		if (status & USBPORTSC_CSC)
			wPortChange |= USB_PORT_STAT_C_CONNECTION;
		if (status & USBPORTSC_PEC)
			wPortChange |= USB_PORT_STAT_C_ENABLE;
		if ((status & USBPORTSC_OCC) && !ignore_oc)
			wPortChange |= USB_PORT_STAT_C_OVERCURRENT;

		if (test_bit(port, &uhci->port_c_suspend)) {
			wPortChange |= USB_PORT_STAT_C_SUSPEND;
			lstatus |= 1;
		}
		if (test_bit(port, &uhci->resuming_ports))
			lstatus |= 4;

		/* UHCI has no power switching (always on) */
		wPortStatus = USB_PORT_STAT_POWER;
		if (status & USBPORTSC_CCS)
			wPortStatus |= USB_PORT_STAT_CONNECTION;
		if (status & USBPORTSC_PE) {
			wPortStatus |= USB_PORT_STAT_ENABLE;
			if (status & SUSPEND_BITS)
				wPortStatus |= USB_PORT_STAT_SUSPEND;
		}
		if (status & USBPORTSC_OC)
			wPortStatus |= USB_PORT_STAT_OVERCURRENT;
		if (status & USBPORTSC_PR)
			wPortStatus |= USB_PORT_STAT_RESET;
		if (status & USBPORTSC_LSDA)
			wPortStatus |= USB_PORT_STAT_LOW_SPEED;

		if (wPortChange)
			dev_dbg(uhci_dev(uhci), "port %d portsc %04x,%02x\n",
					wIndex, status, lstatus);

		*(__le16 *)buf = cpu_to_le16(wPortStatus);
		*(__le16 *)(buf + 2) = cpu_to_le16(wPortChange);
		OK(4);
	case SetHubFeature:		/* We don't implement these */
	case ClearHubFeature:
		switch (wValue) {
		case C_HUB_OVER_CURRENT:
		case C_HUB_LOCAL_POWER:
			OK(0);
		default:
			goto err;
		}
		break;
	case SetPortFeature:
		if (port >= uhci->rh_numports)
			goto err;

		switch (wValue) {
		case USB_PORT_FEAT_SUSPEND:
			SET_RH_PORTSTAT(USBPORTSC_SUSP);
			OK(0);
		case USB_PORT_FEAT_RESET:
			SET_RH_PORTSTAT(USBPORTSC_PR);

			/* Reset terminates Resume signalling */
			uhci_finish_suspend(uhci, port, port_addr);

			/* USB v2.0 7.1.7.5 */
			uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
			OK(0);
		case USB_PORT_FEAT_POWER:
			/* UHCI has no power switching */
			OK(0);
		default:
			goto err;
		}
		break;
	case ClearPortFeature:
		if (port >= uhci->rh_numports)
			goto err;

		switch (wValue) {
		case USB_PORT_FEAT_ENABLE:
			CLR_RH_PORTSTAT(USBPORTSC_PE);

			/* Disable terminates Resume signalling */
			uhci_finish_suspend(uhci, port, port_addr);
			OK(0);
		case USB_PORT_FEAT_C_ENABLE:
			CLR_RH_PORTSTAT(USBPORTSC_PEC);
			OK(0);
		case USB_PORT_FEAT_SUSPEND:
			if (!(inw(port_addr) & USBPORTSC_SUSP)) {

				/* Make certain the port isn't suspended */
				uhci_finish_suspend(uhci, port, port_addr);
			} else if (!test_and_set_bit(port,
						&uhci->resuming_ports)) {
				SET_RH_PORTSTAT(USBPORTSC_RD);

				/* The controller won't allow RD to be set
				 * if the port is disabled.  When this happens
				 * just skip the Resume signalling.
				 */
				if (!(inw(port_addr) & USBPORTSC_RD))
					uhci_finish_suspend(uhci, port,
							port_addr);
				else
					/* USB v2.0 7.1.7.7 */
					uhci->ports_timeout = jiffies +
						msecs_to_jiffies(20);
			}
			OK(0);
		case USB_PORT_FEAT_C_SUSPEND:
			clear_bit(port, &uhci->port_c_suspend);
			OK(0);
		case USB_PORT_FEAT_POWER:
			/* UHCI has no power switching */
			goto err;
		case USB_PORT_FEAT_C_CONNECTION:
			CLR_RH_PORTSTAT(USBPORTSC_CSC);
			OK(0);
		case USB_PORT_FEAT_C_OVER_CURRENT:
			CLR_RH_PORTSTAT(USBPORTSC_OCC);
			OK(0);
		case USB_PORT_FEAT_C_RESET:
			/* this driver won't report these */
			OK(0);
		default:
			goto err;
		}
		break;
	case GetHubDescriptor:
		len = min_t(unsigned int, sizeof(root_hub_hub_des), wLength);
		memcpy(buf, root_hub_hub_des, len);
		if (len > 2)
			buf[2] = uhci->rh_numports;
		OK(len);
	default:
err:
		retval = -EPIPE;
	}
	spin_unlock_irqrestore(&uhci->lock, flags);

	return retval;
}
예제 #13
0
static void capture_urb_complete(struct urb *urb)
{
	struct ua101 *ua = urb->context;
	struct ua101_stream *stream = &ua->capture;
	unsigned long flags;
	unsigned int frames, write_ptr;
	bool do_period_elapsed;
	int err;

	if (unlikely(urb->status == -ENOENT ||		/* unlinked */
		     urb->status == -ENODEV ||		/* device removed */
		     urb->status == -ECONNRESET ||	/* unlinked */
		     urb->status == -ESHUTDOWN))	/* device disabled */
		goto stream_stopped;

	if (urb->status >= 0 && urb->iso_frame_desc[0].status >= 0)
		frames = urb->iso_frame_desc[0].actual_length /
			stream->frame_bytes;
	else
		frames = 0;

	spin_lock_irqsave(&ua->lock, flags);

	if (frames > 0 && test_bit(ALSA_CAPTURE_RUNNING, &ua->states))
		do_period_elapsed = copy_capture_data(stream, urb, frames);
	else
		do_period_elapsed = false;

	if (test_bit(USB_CAPTURE_RUNNING, &ua->states)) {
		err = usb_submit_urb(urb, GFP_ATOMIC);
		if (unlikely(err < 0)) {
			spin_unlock_irqrestore(&ua->lock, flags);
			dev_err(&ua->dev->dev, "USB request error %d: %s\n",
				err, usb_error_string(err));
			goto stream_stopped;
		}

		/* append packet size to FIFO */
		write_ptr = ua->rate_feedback_start;
		add_with_wraparound(ua, &write_ptr, ua->rate_feedback_count);
		ua->rate_feedback[write_ptr] = frames;
		if (ua->rate_feedback_count < ua->playback.queue_length) {
			ua->rate_feedback_count++;
			if (ua->rate_feedback_count ==
						ua->playback.queue_length)
				wake_up(&ua->rate_feedback_wait);
		} else {
			/*
			 * Ring buffer overflow; this happens when the playback
			 * stream is not running.  Throw away the oldest entry,
			 * so that the playback stream, when it starts, sees
			 * the most recent packet sizes.
			 */
			add_with_wraparound(ua, &ua->rate_feedback_start, 1);
		}
		if (test_bit(USB_PLAYBACK_RUNNING, &ua->states) &&
		    !list_empty(&ua->ready_playback_urbs))
			tasklet_schedule(&ua->playback_tasklet);
	}

	spin_unlock_irqrestore(&ua->lock, flags);

	if (do_period_elapsed)
		snd_pcm_period_elapsed(stream->substream);

	return;

stream_stopped:
	abort_usb_playback(ua);
	abort_usb_capture(ua);
	abort_alsa_playback(ua);
	abort_alsa_capture(ua);
}
예제 #14
0
int mdp4_mddi_pipe_commit(int cndx, int wait)
{
	int  i, undx;
	int mixer = 0;
	struct vsycn_ctrl *vctrl;
	struct vsync_update *vp;
	struct mdp4_overlay_pipe *pipe;
	struct mdp4_overlay_pipe *real_pipe;
	unsigned long flags;
	int need_dmap_wait = 0;
	int need_ov_wait = 0;
	int cnt = 0, clk_set_on = 0;

	vctrl = &vsync_ctrl_db[0];

	mutex_lock(&vctrl->update_lock);
	undx =  vctrl->update_ndx;
	vp = &vctrl->vlist[undx];
	pipe = vctrl->base_pipe;
	mixer = pipe->mixer_num;

	if (vp->update_cnt == 0) {
		mutex_unlock(&vctrl->update_lock);
		return cnt;
	}

	vctrl->update_ndx++;
	vctrl->update_ndx &= 0x01;
	vp->update_cnt = 0;     /* reset */
	if (vctrl->blt_free) {
		vctrl->blt_free--;
		if (vctrl->blt_free == 0)
			mdp4_free_writeback_buf(vctrl->mfd, mixer);
	}
	mutex_unlock(&vctrl->update_lock);


	spin_lock_irqsave(&vctrl->spin_lock, flags);
	vctrl->clk_control = 0;
	vctrl->pan_display++;
	if (!vctrl->clk_enabled) {
		clk_set_on = 1;
		vctrl->clk_enabled = 1;
		vctrl->expire_tick = VSYNC_EXPIRE_TICK;
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	if (clk_set_on) {
		pr_debug("%s: SET_CLK_ON\n", __func__);
		mdp_clk_ctrl(1);
		vsync_irq_enable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
	}

	/* free previous committed iommu back to pool */
	mdp4_overlay_iommu_unmap_freelist(MDP4_MIXER0);

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (pipe->ov_blt_addr) {
		/* Blt */
		if (vctrl->blt_wait) {
			INIT_COMPLETION(vctrl->dmap_comp);
			need_dmap_wait = 1;
		}
		if (vctrl->ov_koff != vctrl->ov_done) {
			INIT_COMPLETION(vctrl->ov_comp);
			need_ov_wait = 1;
		}
	} else {
		/* direct out */
		if (vctrl->dmap_koff != vctrl->dmap_done) {
			INIT_COMPLETION(vctrl->dmap_comp);
			pr_debug("%s: wait, ok=%d od=%d dk=%d dd=%d cpu=%d\n",
			 __func__, vctrl->ov_koff, vctrl->ov_done,
			vctrl->dmap_koff, vctrl->dmap_done, smp_processor_id());
			need_dmap_wait = 1;
		}
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	if (need_dmap_wait) {
		pr_debug("%s: wait4dmap\n", __func__);
		mdp4_mddi_wait4dmap(0);
	}

	if (need_ov_wait) {
		pr_debug("%s: wait4ov\n", __func__);
		mdp4_mddi_wait4ov(0);
	}

	if (pipe->ov_blt_addr) {
		if (vctrl->blt_end) {
			vctrl->blt_end = 0;
			pipe->ov_blt_addr = 0;
			pipe->dma_blt_addr =  0;
		}
	}

	if (vctrl->blt_change) {
		mdp4_overlayproc_cfg(pipe);
		mdp4_overlay_dmap_xy(pipe);
		vctrl->blt_change = 0;
	}

	pipe = vp->plist;
	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
		if (pipe->pipe_used) {
			cnt++;
			real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
			if (real_pipe && real_pipe->pipe_used) {
				/* pipe not unset */
			mdp4_overlay_vsync_commit(pipe);
			}
			/* free previous iommu to freelist
			* which will be freed at next
			* pipe_commit
			*/
			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
			pipe->pipe_used = 0; /* clear */
		}
	}

	mdp4_mixer_stage_commit(MDP4_MIXER0);

	pipe = vctrl->base_pipe;
	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (pipe->ov_blt_addr) {
		mdp4_mddi_blt_ov_update(pipe);
		pipe->ov_cnt++;
		vctrl->ov_koff++;
		INIT_COMPLETION(vctrl->ov_comp);
		vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
	} else {
		INIT_COMPLETION(vctrl->dmap_comp);
		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
		vctrl->dmap_koff++;
	}
	pr_debug("%s: kickoff, pid=%d\n", __func__, current->pid);
	/* kickoff overlay engine */
	mdp4_stat.kickoff_ov0++;
	outpdw(MDP_BASE + 0x0004, 0);
	mb(); /* make sure kickoff ececuted */
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	mdp4_stat.overlay_commit[pipe->mixer_num]++;

	if (wait) {
		if (pipe->ov_blt_addr)
			mdp4_mddi_wait4ov(0);
		else
			mdp4_mddi_wait4dmap(0);
	}

	return cnt;
}
예제 #15
0
int xhci_bus_resume(struct usb_hcd *hcd)
{
	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
	int max_ports, port_index;
	__le32 __iomem **port_array;
	struct xhci_bus_state *bus_state;
	u32 temp;
	unsigned long flags;

	max_ports = xhci_get_ports(hcd, &port_array);
	bus_state = &xhci->bus_state[hcd_index(hcd)];

	if (time_before(jiffies, bus_state->next_statechange))
		msleep(5);

	spin_lock_irqsave(&xhci->lock, flags);
	if (!HCD_HW_ACCESSIBLE(hcd)) {
		spin_unlock_irqrestore(&xhci->lock, flags);
		return -ESHUTDOWN;
	}

	/* delay the irqs */
	temp = xhci_readl(xhci, &xhci->op_regs->command);
	temp &= ~CMD_EIE;
	xhci_writel(xhci, temp, &xhci->op_regs->command);

	port_index = max_ports;
	while (port_index--) {
		/* Check whether need resume ports. If needed
		   resume port and disable remote wakeup */
		u32 temp;
		int slot_id;

		temp = xhci_readl(xhci, port_array[port_index]);
		if (DEV_SUPERSPEED(temp))
			temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
		else
			temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
		if (test_bit(port_index, &bus_state->bus_suspended) &&
		    (temp & PORT_PLS_MASK)) {
			if (DEV_SUPERSPEED(temp)) {
				xhci_set_link_state(xhci, port_array,
							port_index, XDEV_U0);
			} else {
				xhci_set_link_state(xhci, port_array,
						port_index, XDEV_RESUME);

				spin_unlock_irqrestore(&xhci->lock, flags);
				msleep(20);
				spin_lock_irqsave(&xhci->lock, flags);

				xhci_set_link_state(xhci, port_array,
							port_index, XDEV_U0);
			}
			/* wait for the port to enter U0 and report port link
			 * state change.
			 */
			spin_unlock_irqrestore(&xhci->lock, flags);
			msleep(20);
			spin_lock_irqsave(&xhci->lock, flags);

			/* Clear PLC */
			xhci_test_and_clear_bit(xhci, port_array, port_index,
						PORT_PLC);

			slot_id = xhci_find_slot_id_by_port(hcd,
					xhci, port_index + 1);
			if (slot_id)
				xhci_ring_device(xhci, slot_id);
		} else
			xhci_writel(xhci, temp, port_array[port_index]);

		if (hcd->speed != HCD_USB3) {
			/* disable remote wake up for USB 2.0 */
			__le32 __iomem *addr;
			u32 tmp;

			/* Add one to the port status register address to get
			 * the port power control register address.
			 */
			addr = port_array[port_index] + 1;
			tmp = xhci_readl(xhci, addr);
			tmp &= ~PORT_RWE;
			xhci_writel(xhci, tmp, addr);
		}
	}

	(void) xhci_readl(xhci, &xhci->op_regs->command);

	bus_state->next_statechange = jiffies + msecs_to_jiffies(5);
	/* re-enable irqs */
	temp = xhci_readl(xhci, &xhci->op_regs->command);
	temp |= CMD_EIE;
	xhci_writel(xhci, temp, &xhci->op_regs->command);
	temp = xhci_readl(xhci, &xhci->op_regs->command);

	spin_unlock_irqrestore(&xhci->lock, flags);
	return 0;
}
예제 #16
0
int mdp4_mddi_off(struct platform_device *pdev)
{
	int ret = 0;
	int cndx = 0;
	struct msm_fb_data_type *mfd;
	struct vsycn_ctrl *vctrl;
	struct mdp4_overlay_pipe *pipe;
	struct vsync_update *vp;
	int undx;
	int need_wait, cnt;
	unsigned long flags;

	pr_debug("%s+: pid=%d\n", __func__, current->pid);

	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);

	vctrl = &vsync_ctrl_db[cndx];
	pipe = vctrl->base_pipe;
	if (pipe == NULL) {
		pr_err("%s: NO base pipe\n", __func__);
		return ret;
	}

	need_wait = 0;
	mutex_lock(&vctrl->update_lock);
	atomic_set(&vctrl->suspend, 1);

	complete_all(&vctrl->vsync_comp);

	pr_debug("%s: clk=%d pan=%d\n", __func__,
			vctrl->clk_enabled, vctrl->pan_display);
	if (vctrl->clk_enabled)
		need_wait = 1;
	mutex_unlock(&vctrl->update_lock);

	cnt = 0;
	if (need_wait) {
		while (vctrl->clk_enabled) {
			msleep(20);
			cnt++;
			if (cnt > 10)
				break;
		}
	}

	if (cnt > 10) {
		spin_lock_irqsave(&vctrl->spin_lock, flags);
		vctrl->clk_control = 0;
		vctrl->clk_enabled = 0;
		vctrl->expire_tick = 0;
		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
		mdp_clk_ctrl(0);
		pr_err("%s: Error, SET_CLK_OFF by force\n", __func__);
	}

	/* sanity check, free pipes besides base layer */
	mdp4_overlay_unset_mixer(MDP4_MIXER0);
	mdp4_mixer_stage_down(pipe, 1);
	mdp4_overlay_pipe_free(pipe);
	vctrl->base_pipe = NULL;

	if (vctrl->vsync_enabled) {
		vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
		vctrl->vsync_enabled = 0;
	}

	undx =  vctrl->update_ndx;
	vp = &vctrl->vlist[undx];
	if (vp->update_cnt) {
		/*
		 * pipe's iommu will be freed at next overlay play
		 * and iommu_drop statistic will be increased by one
		 */
		pr_warn("%s: update_cnt=%d\n", __func__, vp->update_cnt);
		mdp4_mddi_pipe_clean(vp);
	}

	pr_debug("%s-:\n", __func__);
	return ret;
}
예제 #17
0
static void mdp4_mddi_do_blt(struct msm_fb_data_type *mfd, int enable)
{
	unsigned long flags;
	int cndx = 0;
	struct vsycn_ctrl *vctrl;
	struct mdp4_overlay_pipe *pipe;
	int need_wait = 0;

	vctrl = &vsync_ctrl_db[cndx];
	pipe = vctrl->base_pipe;

	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);

	if (mfd->ov0_wb_buf->write_addr == 0) {
		pr_err("%s: no blt_base assigned\n", __func__);
		return;
	}

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (enable && pipe->ov_blt_addr == 0) {
		vctrl->blt_change++;
		if (vctrl->dmap_koff != vctrl->dmap_done) {
			INIT_COMPLETION(vctrl->dmap_comp);
			need_wait = 1;
		}
	} else if (enable == 0 && pipe->ov_blt_addr) {
		vctrl->blt_change++;
		if (vctrl->ov_koff != vctrl->dmap_done) {
			INIT_COMPLETION(vctrl->dmap_comp);
			need_wait = 1;
		}
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	if (need_wait)
		mdp4_mddi_wait4dmap(0);

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (enable && pipe->ov_blt_addr == 0) {
		pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
		pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
		pipe->ov_cnt = 0;
		pipe->dmap_cnt = 0;
		vctrl->ov_koff = vctrl->dmap_koff;
		vctrl->ov_done = vctrl->dmap_done;
		vctrl->blt_free = 0;
		vctrl->blt_wait = 0;
		vctrl->blt_end = 0;
		mdp4_stat.blt_mddi++;
	} else if (enable == 0 && pipe->ov_blt_addr) {
		pipe->ov_blt_addr = 0;
		pipe->dma_blt_addr =  0;
		vctrl->blt_end = 1;
		vctrl->blt_free = 4;	/* 4 commits to free wb buf */
	}

	pr_debug("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__,
		vctrl->blt_change, enable, (int)pipe->ov_blt_addr);

	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
}
예제 #18
0
파일: slob.c 프로젝트: 020gzh/linux
/*
 * slob_free: entry point into the slob allocator.
 */
static void slob_free(void *block, int size)
{
	struct page *sp;
	slob_t *prev, *next, *b = (slob_t *)block;
	slobidx_t units;
	unsigned long flags;
	struct list_head *slob_list;

	if (unlikely(ZERO_OR_NULL_PTR(block)))
		return;
	BUG_ON(!size);

	sp = virt_to_page(block);
	units = SLOB_UNITS(size);

	spin_lock_irqsave(&slob_lock, flags);

	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
		/* Go directly to page allocator. Do not pass slob allocator */
		if (slob_page_free(sp))
			clear_slob_page_free(sp);
		spin_unlock_irqrestore(&slob_lock, flags);
		__ClearPageSlab(sp);
		page_mapcount_reset(sp);
		slob_free_pages(b, 0);
		return;
	}

	if (!slob_page_free(sp)) {
		/* This slob page is about to become partially free. Easy! */
		sp->units = units;
		sp->freelist = b;
		set_slob(b, units,
			(void *)((unsigned long)(b +
					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
		if (size < SLOB_BREAK1)
			slob_list = &free_slob_small;
		else if (size < SLOB_BREAK2)
			slob_list = &free_slob_medium;
		else
			slob_list = &free_slob_large;
		set_slob_page_free(sp, slob_list);
		goto out;
	}

	/*
	 * Otherwise the page is already partially free, so find reinsertion
	 * point.
	 */
	sp->units += units;

	if (b < (slob_t *)sp->freelist) {
		if (b + units == sp->freelist) {
			units += slob_units(sp->freelist);
			sp->freelist = slob_next(sp->freelist);
		}
		set_slob(b, units, sp->freelist);
		sp->freelist = b;
	} else {
		prev = sp->freelist;
		next = slob_next(prev);
		while (b > next) {
			prev = next;
			next = slob_next(prev);
		}

		if (!slob_last(prev) && b + units == next) {
			units += slob_units(next);
			set_slob(b, units, slob_next(next));
		} else
			set_slob(b, units, next);

		if (prev + slob_units(prev) == b) {
			units = slob_units(b) + slob_units(prev);
			set_slob(prev, units, slob_next(b));
		} else
			set_slob(prev, slob_units(prev), b);
	}
out:
	spin_unlock_irqrestore(&slob_lock, flags);
}
예제 #19
0
파일: slob.c 프로젝트: 020gzh/linux
/*
 * slob_alloc: entry point into the slob allocator.
 */
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
	struct page *sp;
	struct list_head *prev;
	struct list_head *slob_list;
	slob_t *b = NULL;
	unsigned long flags;

	if (size < SLOB_BREAK1)
		slob_list = &free_slob_small;
	else if (size < SLOB_BREAK2)
		slob_list = &free_slob_medium;
	else
		slob_list = &free_slob_large;

	spin_lock_irqsave(&slob_lock, flags);
	/* Iterate through each partially free page, try to find room */
	list_for_each_entry(sp, slob_list, lru) {
#ifdef CONFIG_NUMA
		/*
		 * If there's a node specification, search for a partial
		 * page with a matching node id in the freelist.
		 */
		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
			continue;
#endif
		/* Enough room on this page? */
		if (sp->units < SLOB_UNITS(size))
			continue;

		/* Attempt to alloc */
		prev = sp->lru.prev;
		b = slob_page_alloc(sp, size, align);
		if (!b)
			continue;

		/* Improve fragment distribution and reduce our average
		 * search time by starting our next search here. (see
		 * Knuth vol 1, sec 2.5, pg 449) */
		if (prev != slob_list->prev &&
				slob_list->next != prev->next)
			list_move_tail(slob_list, prev->next);
		break;
	}
	spin_unlock_irqrestore(&slob_lock, flags);

	/* Not enough space: must allocate a new page */
	if (!b) {
		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
		if (!b)
			return NULL;
		sp = virt_to_page(b);
		__SetPageSlab(sp);

		spin_lock_irqsave(&slob_lock, flags);
		sp->units = SLOB_UNITS(PAGE_SIZE);
		sp->freelist = b;
		INIT_LIST_HEAD(&sp->lru);
		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
		set_slob_page_free(sp, slob_list);
		b = slob_page_alloc(sp, size, align);
		BUG_ON(!b);
		spin_unlock_irqrestore(&slob_lock, flags);
	}
	if (unlikely((gfp & __GFP_ZERO) && b))
		memset(b, 0, size);
	return b;
}
예제 #20
0
static void bfin_mac_adjust_link(struct net_device *dev)
{
	struct bfin_mac_local *lp = netdev_priv(dev);
	struct phy_device *phydev = lp->phydev;
	unsigned long flags;
	int new_state = 0;

	spin_lock_irqsave(&lp->lock, flags);
	if (phydev->link) {
		/* Now we make sure that we can be in full duplex mode.
		 * If not, we operate in half-duplex mode. */
		if (phydev->duplex != lp->old_duplex) {
			u32 opmode = bfin_read_EMAC_OPMODE();
			new_state = 1;

			if (phydev->duplex)
				opmode |= FDMODE;
			else
				opmode &= ~(FDMODE);

			bfin_write_EMAC_OPMODE(opmode);
			lp->old_duplex = phydev->duplex;
		}

		if (phydev->speed != lp->old_speed) {
#if defined(CONFIG_BFIN_MAC_RMII)
			u32 opmode = bfin_read_EMAC_OPMODE();
			switch (phydev->speed) {
			case 10:
				opmode |= RMII_10;
				break;
			case 100:
				opmode &= ~(RMII_10);
				break;
			default:
				printk(KERN_WARNING
					"%s: Ack!  Speed (%d) is not 10/100!\n",
					DRV_NAME, phydev->speed);
				break;
			}
			bfin_write_EMAC_OPMODE(opmode);
#endif

			new_state = 1;
			lp->old_speed = phydev->speed;
		}

		if (!lp->old_link) {
			new_state = 1;
			lp->old_link = 1;
		}
	} else if (lp->old_link) {
		new_state = 1;
		lp->old_link = 0;
		lp->old_speed = 0;
		lp->old_duplex = -1;
	}

	if (new_state) {
		u32 opmode = bfin_read_EMAC_OPMODE();
		phy_print_status(phydev);
		pr_debug("EMAC_OPMODE = 0x%08x\n", opmode);
	}

	spin_unlock_irqrestore(&lp->lock, flags);
}
예제 #21
0
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
		u16 wIndex, char *buf, u16 wLength)
{
	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
	int max_ports;
	unsigned long flags;
	u32 temp, status;
	int retval = 0;
	__le32 __iomem **port_array;
	int slot_id;
	struct xhci_bus_state *bus_state;
	u16 link_state = 0;

	max_ports = xhci_get_ports(hcd, &port_array);
	bus_state = &xhci->bus_state[hcd_index(hcd)];

	spin_lock_irqsave(&xhci->lock, flags);
	switch (typeReq) {
	case GetHubStatus:
		/* No power source, over-current reported per port */
		memset(buf, 0, 4);
		break;
	case GetHubDescriptor:
		/* Check to make sure userspace is asking for the USB 3.0 hub
		 * descriptor for the USB 3.0 roothub.  If not, we stall the
		 * endpoint, like external hubs do.
		 */
		if (hcd->speed == HCD_USB3 &&
				(wLength < USB_DT_SS_HUB_SIZE ||
				 wValue != (USB_DT_SS_HUB << 8))) {
			xhci_dbg(xhci, "Wrong hub descriptor type for "
					"USB 3.0 roothub.\n");
			goto error;
		}
		xhci_hub_descriptor(hcd, xhci,
				(struct usb_hub_descriptor *) buf);
		break;
	case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
		if ((wValue & 0xff00) != (USB_DT_BOS << 8))
			goto error;

		if (hcd->speed != HCD_USB3)
			goto error;

		memcpy(buf, &usb_bos_descriptor,
				USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE);
		temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
		buf[12] = HCS_U1_LATENCY(temp);
		put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);

		spin_unlock_irqrestore(&xhci->lock, flags);
		return USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
	case GetPortStatus:
		if (!wIndex || wIndex > max_ports)
			goto error;
		wIndex--;
		status = 0;
		temp = xhci_readl(xhci, port_array[wIndex]);
		if (temp == 0xffffffff) {
			retval = -ENODEV;
			break;
		}
		xhci_dbg(xhci, "get port status, actual port %d status  = 0x%x\n", wIndex, temp);

		/* wPortChange bits */
		if (temp & PORT_CSC)
			status |= USB_PORT_STAT_C_CONNECTION << 16;
		if (temp & PORT_PEC)
			status |= USB_PORT_STAT_C_ENABLE << 16;
		if ((temp & PORT_OCC))
			status |= USB_PORT_STAT_C_OVERCURRENT << 16;
		if ((temp & PORT_RC))
			status |= USB_PORT_STAT_C_RESET << 16;
		/* USB3.0 only */
		if (hcd->speed == HCD_USB3) {
			if ((temp & PORT_PLC))
				status |= USB_PORT_STAT_C_LINK_STATE << 16;
			if ((temp & PORT_WRC))
				status |= USB_PORT_STAT_C_BH_RESET << 16;
		}

		if (hcd->speed != HCD_USB3) {
			if ((temp & PORT_PLS_MASK) == XDEV_U3
					&& (temp & PORT_POWER))
				status |= USB_PORT_STAT_SUSPEND;
		}
		if ((temp & PORT_PLS_MASK) == XDEV_RESUME &&
				!DEV_SUPERSPEED(temp)) {
			if ((temp & PORT_RESET) || !(temp & PORT_PE))
				goto error;
			if (time_after_eq(jiffies,
					bus_state->resume_done[wIndex])) {
				xhci_dbg(xhci, "Resume USB2 port %d\n",
					wIndex + 1);
				bus_state->resume_done[wIndex] = 0;
				xhci_set_link_state(xhci, port_array, wIndex,
							XDEV_U0);
				xhci_dbg(xhci, "set port %d resume\n",
					wIndex + 1);
				slot_id = xhci_find_slot_id_by_port(hcd, xhci,
								 wIndex + 1);
				if (!slot_id) {
					xhci_dbg(xhci, "slot_id is zero\n");
					goto error;
				}
				xhci_ring_device(xhci, slot_id);
				bus_state->port_c_suspend |= 1 << wIndex;
				bus_state->suspended_ports &= ~(1 << wIndex);
			} else {
				/*
				 * The resume has been signaling for less than
				 * 20ms. Report the port status as SUSPEND,
				 * let the usbcore check port status again
				 * and clear resume signaling later.
				 */
				status |= USB_PORT_STAT_SUSPEND;
			}
		}
		if ((temp & PORT_PLS_MASK) == XDEV_U0
			&& (temp & PORT_POWER)
			&& (bus_state->suspended_ports & (1 << wIndex))) {
			bus_state->suspended_ports &= ~(1 << wIndex);
			if (hcd->speed != HCD_USB3)
				bus_state->port_c_suspend |= 1 << wIndex;
		}
		if (temp & PORT_CONNECT) {
			status |= USB_PORT_STAT_CONNECTION;
			status |= xhci_port_speed(temp);
		}
		if (temp & PORT_PE)
			status |= USB_PORT_STAT_ENABLE;
		if (temp & PORT_OC)
			status |= USB_PORT_STAT_OVERCURRENT;
		if (temp & PORT_RESET)
			status |= USB_PORT_STAT_RESET;
		if (temp & PORT_POWER) {
			if (hcd->speed == HCD_USB3)
				status |= USB_SS_PORT_STAT_POWER;
			else
				status |= USB_PORT_STAT_POWER;
		}
		/* Port Link State */
		if (hcd->speed == HCD_USB3) {
			/* resume state is a xHCI internal state.
			 * Do not report it to usb core.
			 */
			if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
				status |= (temp & PORT_PLS_MASK);
		}
		if (bus_state->port_c_suspend & (1 << wIndex))
			status |= 1 << USB_PORT_FEAT_C_SUSPEND;
		xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
		put_unaligned(cpu_to_le32(status), (__le32 *) buf);
		break;
	case SetPortFeature:
		if (wValue == USB_PORT_FEAT_LINK_STATE)
			link_state = (wIndex & 0xff00) >> 3;
		wIndex &= 0xff;
		if (!wIndex || wIndex > max_ports)
			goto error;
		wIndex--;
		temp = xhci_readl(xhci, port_array[wIndex]);
		if (temp == 0xffffffff) {
			retval = -ENODEV;
			break;
		}
		temp = xhci_port_state_to_neutral(temp);
		/* FIXME: What new port features do we need to support? */
		switch (wValue) {
		case USB_PORT_FEAT_SUSPEND:
			temp = xhci_readl(xhci, port_array[wIndex]);
			if ((temp & PORT_PLS_MASK) != XDEV_U0) {
				/* Resume the port to U0 first */
				xhci_set_link_state(xhci, port_array, wIndex,
							XDEV_U0);
				spin_unlock_irqrestore(&xhci->lock, flags);
				msleep(10);
				spin_lock_irqsave(&xhci->lock, flags);
			}
			/* In spec software should not attempt to suspend
			 * a port unless the port reports that it is in the
			 * enabled (PED = ‘1’,PLS < ‘3’) state.
			 */
			temp = xhci_readl(xhci, port_array[wIndex]);
			if ((temp & PORT_PE) == 0 || (temp & PORT_RESET)
				|| (temp & PORT_PLS_MASK) >= XDEV_U3) {
				xhci_warn(xhci, "USB core suspending device "
					  "not in U0/U1/U2.\n");
				goto error;
			}

			slot_id = xhci_find_slot_id_by_port(hcd, xhci,
					wIndex + 1);
			if (!slot_id) {
				xhci_warn(xhci, "slot_id is zero\n");
				goto error;
			}
			/* unlock to execute stop endpoint commands */
			spin_unlock_irqrestore(&xhci->lock, flags);
			xhci_stop_device(xhci, slot_id, 1);
			spin_lock_irqsave(&xhci->lock, flags);

			xhci_set_link_state(xhci, port_array, wIndex, XDEV_U3);

			spin_unlock_irqrestore(&xhci->lock, flags);
			msleep(10); /* wait device to enter */
			spin_lock_irqsave(&xhci->lock, flags);

			temp = xhci_readl(xhci, port_array[wIndex]);
			bus_state->suspended_ports |= 1 << wIndex;
			break;
		case USB_PORT_FEAT_LINK_STATE:
			temp = xhci_readl(xhci, port_array[wIndex]);
			/* Software should not attempt to set
			 * port link state above '5' (Rx.Detect) and the port
			 * must be enabled.
			 */
			if ((temp & PORT_PE) == 0 ||
				(link_state > USB_SS_PORT_LS_RX_DETECT)) {
				xhci_warn(xhci, "Cannot set link state.\n");
				goto error;
			}

			if (link_state == USB_SS_PORT_LS_U3) {
				slot_id = xhci_find_slot_id_by_port(hcd, xhci,
						wIndex + 1);
				if (slot_id) {
					/* unlock to execute stop endpoint
					 * commands */
					spin_unlock_irqrestore(&xhci->lock,
								flags);
					xhci_stop_device(xhci, slot_id, 1);
					spin_lock_irqsave(&xhci->lock, flags);
				}
			}

			xhci_set_link_state(xhci, port_array, wIndex,
						link_state);

			spin_unlock_irqrestore(&xhci->lock, flags);
			msleep(20); /* wait device to enter */
			spin_lock_irqsave(&xhci->lock, flags);

			temp = xhci_readl(xhci, port_array[wIndex]);
			if (link_state == USB_SS_PORT_LS_U3)
				bus_state->suspended_ports |= 1 << wIndex;
			break;
		case USB_PORT_FEAT_POWER:
			/*
			 * Turn on ports, even if there isn't per-port switching.
			 * HC will report connect events even before this is set.
			 * However, khubd will ignore the roothub events until
			 * the roothub is registered.
			 */
			xhci_writel(xhci, temp | PORT_POWER,
					port_array[wIndex]);

			temp = xhci_readl(xhci, port_array[wIndex]);
			xhci_dbg(xhci, "set port power, actual port %d status  = 0x%x\n", wIndex, temp);
			break;
		case USB_PORT_FEAT_RESET:
			temp = (temp | PORT_RESET);
			xhci_writel(xhci, temp, port_array[wIndex]);

			temp = xhci_readl(xhci, port_array[wIndex]);
			xhci_dbg(xhci, "set port reset, actual port %d status  = 0x%x\n", wIndex, temp);
			break;
		case USB_PORT_FEAT_BH_PORT_RESET:
			temp |= PORT_WR;
			xhci_writel(xhci, temp, port_array[wIndex]);

			temp = xhci_readl(xhci, port_array[wIndex]);
			break;
		default:
			goto error;
		}
		/* unblock any posted writes */
		temp = xhci_readl(xhci, port_array[wIndex]);
		break;
	case ClearPortFeature:
		if (!wIndex || wIndex > max_ports)
			goto error;
		wIndex--;
		temp = xhci_readl(xhci, port_array[wIndex]);
		if (temp == 0xffffffff) {
			retval = -ENODEV;
			break;
		}
		/* FIXME: What new port features do we need to support? */
		temp = xhci_port_state_to_neutral(temp);
		switch (wValue) {
		case USB_PORT_FEAT_SUSPEND:
			temp = xhci_readl(xhci, port_array[wIndex]);
			xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n");
			xhci_dbg(xhci, "PORTSC %04x\n", temp);
			if (temp & PORT_RESET)
				goto error;
			if ((temp & PORT_PLS_MASK) == XDEV_U3) {
				if ((temp & PORT_PE) == 0)
					goto error;

				xhci_set_link_state(xhci, port_array, wIndex,
							XDEV_RESUME);
				spin_unlock_irqrestore(&xhci->lock, flags);
				msleep(20);
				spin_lock_irqsave(&xhci->lock, flags);
				xhci_set_link_state(xhci, port_array, wIndex,
							XDEV_U0);
			}
			bus_state->port_c_suspend |= 1 << wIndex;

			slot_id = xhci_find_slot_id_by_port(hcd, xhci,
					wIndex + 1);
			if (!slot_id) {
				xhci_dbg(xhci, "slot_id is zero\n");
				goto error;
			}
			xhci_ring_device(xhci, slot_id);
			break;
		case USB_PORT_FEAT_C_SUSPEND:
			bus_state->port_c_suspend &= ~(1 << wIndex);
		case USB_PORT_FEAT_C_RESET:
		case USB_PORT_FEAT_C_BH_PORT_RESET:
		case USB_PORT_FEAT_C_CONNECTION:
		case USB_PORT_FEAT_C_OVER_CURRENT:
		case USB_PORT_FEAT_C_ENABLE:
		case USB_PORT_FEAT_C_PORT_LINK_STATE:
			xhci_clear_port_change_bit(xhci, wValue, wIndex,
					port_array[wIndex], temp);
			break;
		case USB_PORT_FEAT_ENABLE:
			xhci_disable_port(hcd, xhci, wIndex,
					port_array[wIndex], temp);
			break;
		default:
			goto error;
		}
		break;
	default:
error:
		/* "stall" on error */
		retval = -EPIPE;
	}
	spin_unlock_irqrestore(&xhci->lock, flags);
	return retval;
}
예제 #22
0
static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
{
	struct fib_ioctl f;
	struct fib *fib;
	struct aac_fib_context *fibctx;
	int status;
	struct list_head * entry;
	unsigned long flags;

	if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
		return -EFAULT;
	/*
	 *	Verify that the HANDLE passed in was a valid AdapterFibContext
	 *
	 *	Search the list of AdapterFibContext addresses on the adapter
	 *	to be sure this is a valid address
	 */
	spin_lock_irqsave(&dev->fib_lock, flags);
	entry = dev->fib_list.next;
	fibctx = NULL;

	while (entry != &dev->fib_list) {
		fibctx = list_entry(entry, struct aac_fib_context, next);
		/*
		 *	Extract the AdapterFibContext from the Input parameters.
		 */
		if (fibctx->unique == f.fibctx) { /* We found a winner */
			break;
		}
		entry = entry->next;
		fibctx = NULL;
	}
	if (!fibctx) {
		spin_unlock_irqrestore(&dev->fib_lock, flags);
		dprintk ((KERN_INFO "Fib Context not found\n"));
		return -EINVAL;
	}

	if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
		 (fibctx->size != sizeof(struct aac_fib_context))) {
		spin_unlock_irqrestore(&dev->fib_lock, flags);
		dprintk ((KERN_INFO "Fib Context corrupt?\n"));
		return -EINVAL;
	}
	status = 0;
	/*
	 *	If there are no fibs to send back, then either wait or return
	 *	-EAGAIN
	 */
return_fib:
	if (!list_empty(&fibctx->fib_list)) {
		/*
		 *	Pull the next fib from the fibs
		 */
		entry = fibctx->fib_list.next;
		list_del(entry);

		fib = list_entry(entry, struct fib, fiblink);
		fibctx->count--;
		spin_unlock_irqrestore(&dev->fib_lock, flags);
		if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
			kfree(fib->hw_fib_va);
			kfree(fib);
			return -EFAULT;
		}
		/*
		 *	Free the space occupied by this copy of the fib.
		 */
		kfree(fib->hw_fib_va);
		kfree(fib);
		status = 0;
	} else {
예제 #23
0
int xhci_bus_suspend(struct usb_hcd *hcd)
{
	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
	int max_ports, port_index;
	__le32 __iomem **port_array;
	struct xhci_bus_state *bus_state;
	unsigned long flags;

	max_ports = xhci_get_ports(hcd, &port_array);
	bus_state = &xhci->bus_state[hcd_index(hcd)];

	spin_lock_irqsave(&xhci->lock, flags);

	if (hcd->self.root_hub->do_remote_wakeup) {
		port_index = max_ports;
		while (port_index--) {
			if (bus_state->resume_done[port_index] != 0) {
				spin_unlock_irqrestore(&xhci->lock, flags);
				xhci_dbg(xhci, "suspend failed because "
						"port %d is resuming\n",
						port_index + 1);
				return -EBUSY;
			}
		}
	}

	port_index = max_ports;
	bus_state->bus_suspended = 0;
	while (port_index--) {
		/* suspend the port if the port is not suspended */
		u32 t1, t2;
		int slot_id;

		t1 = xhci_readl(xhci, port_array[port_index]);
		t2 = xhci_port_state_to_neutral(t1);

		if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
			xhci_dbg(xhci, "port %d not suspended\n", port_index);
			slot_id = xhci_find_slot_id_by_port(hcd, xhci,
					port_index + 1);
			if (slot_id) {
				spin_unlock_irqrestore(&xhci->lock, flags);
				xhci_stop_device(xhci, slot_id, 1);
				spin_lock_irqsave(&xhci->lock, flags);
			}
			t2 &= ~PORT_PLS_MASK;
			t2 |= PORT_LINK_STROBE | XDEV_U3;
			set_bit(port_index, &bus_state->bus_suspended);
		}
		if (hcd->self.root_hub->do_remote_wakeup) {
			if (t1 & PORT_CONNECT) {
				t2 |= PORT_WKOC_E | PORT_WKDISC_E;
				t2 &= ~PORT_WKCONN_E;
			} else {
				t2 |= PORT_WKOC_E | PORT_WKCONN_E;
				t2 &= ~PORT_WKDISC_E;
			}
		} else
			t2 &= ~PORT_WAKE_BITS;

		t1 = xhci_port_state_to_neutral(t1);
		if (t1 != t2)
			xhci_writel(xhci, t2, port_array[port_index]);

		if (hcd->speed != HCD_USB3) {
			/* enable remote wake up for USB 2.0 */
			__le32 __iomem *addr;
			u32 tmp;

			/* Add one to the port status register address to get
			 * the port power control register address.
			 */
			addr = port_array[port_index] + 1;
			tmp = xhci_readl(xhci, addr);
			tmp |= PORT_RWE;
			xhci_writel(xhci, tmp, addr);
		}
	}
	hcd->state = HC_STATE_SUSPENDED;
	bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
	spin_unlock_irqrestore(&xhci->lock, flags);
	return 0;
}
예제 #24
0
/*
 * make sure the MIPI_DSI_WRITEBACK_SIZE defined at boardfile
 * has enough space h * w * 3 * 2
 */
static void mdp4_dsi_video_do_blt(struct msm_fb_data_type *mfd, int enable)
{
    unsigned long flag;
    int cndx = 0;
    struct vsycn_ctrl *vctrl;
    struct mdp4_overlay_pipe *pipe;

    vctrl = &vsync_ctrl_db[cndx];
    pipe = vctrl->base_pipe;

    mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);

    if (mfd->ov0_wb_buf->write_addr == 0) {
        pr_info("%s: no blt_base assigned\n", __func__);
        return;
    }

    spin_lock_irqsave(&vctrl->spin_lock, flag);
    if (enable && pipe->ov_blt_addr == 0) {
        pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
        pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
        pipe->ov_cnt = 0;
        pipe->dmap_cnt = 0;
        vctrl->ov_koff = 0;
        vctrl->ov_done = 0;
        vctrl->blt_free = 0;
        mdp4_stat.blt_dsi_video++;
        vctrl->blt_change++;
    } else if (enable == 0 && pipe->ov_blt_addr) {
        pipe->ov_blt_addr = 0;
        pipe->dma_blt_addr =  0;
        vctrl->blt_free = 4;	/* 4 commits to free wb buf */
        vctrl->blt_change++;
    }

    pr_info("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__,
            vctrl->blt_change, enable, (int)pipe->ov_blt_addr);

    if (!vctrl->blt_change) {
        spin_unlock_irqrestore(&vctrl->spin_lock, flag);
        return;
    }
    spin_unlock_irqrestore(&vctrl->spin_lock, flag);

    if (vctrl->blt_ctrl == OVERLAY_BLT_SWITCH_TG_OFF) {
        int tg_enabled;
        long long  vtime;

        pr_info("%s: blt enabled by switching TG off\n", __func__);
        vctrl->blt_change = 0;
        tg_enabled = inpdw(MDP_BASE + DSI_VIDEO_BASE) & 0x01;
        if (tg_enabled) {
            mdp4_dsi_video_wait4vsync(0, &vtime);
            MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
            mdp4_dsi_video_wait4dmap_done(0);
        }
        mdp4_overlayproc_cfg(pipe);
        mdp4_overlay_dmap_xy(pipe);
        if (tg_enabled) {
            /*
             * need wait for more than 1 ms to
             * make sure lanes' fifo is empty and
             * lanes in stop state befroe reset
             * controller
             */
            usleep(2000);
            mipi_dsi_sw_reset();
            MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
        }
    }
}
예제 #25
0
void r8712_createbss_cmd_callback(struct _adapter *padapter,
				  struct cmd_obj *pcmd)
{
	unsigned long irqL;
	u8 timer_cancelled;
	struct sta_info *psta = NULL;
	struct wlan_network *pwlan = NULL;
	struct	mlme_priv *pmlmepriv = &padapter->mlmepriv;
	struct ndis_wlan_bssid_ex *pnetwork = (struct ndis_wlan_bssid_ex *)
					      pcmd->parmbuf;
	struct wlan_network *tgt_network = &(pmlmepriv->cur_network);

	if ((pcmd->res != H2C_SUCCESS))
		_set_timer(&pmlmepriv->assoc_timer, 1);
	_cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
#ifdef __BIG_ENDIAN
	/* endian_convert */
	pnetwork->Length = le32_to_cpu(pnetwork->Length);
	pnetwork->Ssid.SsidLength = le32_to_cpu(pnetwork->Ssid.SsidLength);
	pnetwork->Privacy = le32_to_cpu(pnetwork->Privacy);
	pnetwork->Rssi = le32_to_cpu(pnetwork->Rssi);
	pnetwork->NetworkTypeInUse = le32_to_cpu(pnetwork->NetworkTypeInUse);
	pnetwork->Configuration.ATIMWindow = le32_to_cpu(pnetwork->
					Configuration.ATIMWindow);
	pnetwork->Configuration.DSConfig = le32_to_cpu(pnetwork->
					Configuration.DSConfig);
	pnetwork->Configuration.FHConfig.DwellTime = le32_to_cpu(pnetwork->
					Configuration.FHConfig.DwellTime);
	pnetwork->Configuration.FHConfig.HopPattern = le32_to_cpu(pnetwork->
					Configuration.FHConfig.HopPattern);
	pnetwork->Configuration.FHConfig.HopSet = le32_to_cpu(pnetwork->
					Configuration.FHConfig.HopSet);
	pnetwork->Configuration.FHConfig.Length = le32_to_cpu(pnetwork->
					Configuration.FHConfig.Length);
	pnetwork->Configuration.Length = le32_to_cpu(pnetwork->
					Configuration.Length);
	pnetwork->InfrastructureMode = le32_to_cpu(pnetwork->
					   InfrastructureMode);
	pnetwork->IELength = le32_to_cpu(pnetwork->IELength);
#endif
	spin_lock_irqsave(&pmlmepriv->lock, irqL);
	if ((pmlmepriv->fw_state) & WIFI_AP_STATE) {
		psta = r8712_get_stainfo(&padapter->stapriv,
					 pnetwork->MacAddress);
		if (!psta) {
			psta = r8712_alloc_stainfo(&padapter->stapriv,
						   pnetwork->MacAddress);
			if (psta == NULL)
				goto createbss_cmd_fail ;
		}
		r8712_indicate_connect(padapter);
	} else {
		pwlan = _r8712_alloc_network(pmlmepriv);
		if (pwlan == NULL) {
			pwlan = r8712_get_oldest_wlan_network(
				&pmlmepriv->scanned_queue);
			if (pwlan == NULL)
				goto createbss_cmd_fail;
			pwlan->last_scanned = jiffies;
		} else
			list_insert_tail(&(pwlan->list),
					 &pmlmepriv->scanned_queue.queue);
		pnetwork->Length = r8712_get_ndis_wlan_bssid_ex_sz(pnetwork);
		memcpy(&(pwlan->network), pnetwork, pnetwork->Length);
		pwlan->fixed = true;
		memcpy(&tgt_network->network, pnetwork,
			(r8712_get_ndis_wlan_bssid_ex_sz(pnetwork)));
		if (pmlmepriv->fw_state & _FW_UNDER_LINKING)
			pmlmepriv->fw_state ^= _FW_UNDER_LINKING;
		/* we will set _FW_LINKED when there is one more sat to
		 * join us (stassoc_event_callback) */
	}
createbss_cmd_fail:
	spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
	r8712_free_cmd_obj(pcmd);
}
예제 #26
0
int mdp4_dsi_video_pipe_commit(int cndx, int wait)
{

    int  i, undx;
    int mixer = 0;
    struct vsycn_ctrl *vctrl;
    struct vsync_update *vp;
    struct mdp4_overlay_pipe *pipe;
    struct mdp4_overlay_pipe *real_pipe;
    unsigned long flags;
    int cnt = 0;

    vctrl = &vsync_ctrl_db[cndx];

    mutex_lock(&vctrl->update_lock);
    undx =  vctrl->update_ndx;
    vp = &vctrl->vlist[undx];
    pipe = vctrl->base_pipe;
    mixer = pipe->mixer_num;

    if (vp->update_cnt == 0) {
        mutex_unlock(&vctrl->update_lock);
        return cnt;
    }

    vctrl->update_ndx++;
    vctrl->update_ndx &= 0x01;
    vp->update_cnt = 0;     /* reset */
    if (vctrl->blt_free) {
        vctrl->blt_free--;
        if (vctrl->blt_free == 0)
            mdp4_free_writeback_buf(vctrl->mfd, mixer);
    }
    mutex_unlock(&vctrl->update_lock);

    /* free previous committed iommu back to pool */
    mdp4_overlay_iommu_unmap_freelist(mixer);

    spin_lock_irqsave(&vctrl->spin_lock, flags);
    if (vctrl->ov_koff != vctrl->ov_done) {
        spin_unlock_irqrestore(&vctrl->spin_lock, flags);
        pr_err("%s: Error, frame dropped %d %d\n", __func__,
               vctrl->ov_koff, vctrl->ov_done);
        return 0;
    }
    spin_unlock_irqrestore(&vctrl->spin_lock, flags);

    if (vctrl->blt_change) {
        pipe = vctrl->base_pipe;
        spin_lock_irqsave(&vctrl->spin_lock, flags);
        INIT_COMPLETION(vctrl->dmap_comp);
        INIT_COMPLETION(vctrl->ov_comp);
        vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
        spin_unlock_irqrestore(&vctrl->spin_lock, flags);
        mdp4_dsi_video_wait4dmap(0);
        if (pipe->ov_blt_addr)
            mdp4_dsi_video_wait4ov(0);
    }

    pipe = vp->plist;

    for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
        if (pipe->pipe_used) {
            cnt++;
            real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
            if (real_pipe && real_pipe->pipe_used) {
                /* pipe not unset */
                mdp4_overlay_vsync_commit(pipe);
            }
            /* free previous iommu to freelist
            * which will be freed at next
            * pipe_commit
            */
            mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
            pipe->pipe_used = 0; /* clear */
        }
    }

    mdp4_mixer_stage_commit(mixer);

    /* start timing generator & mmu if they are not started yet */
    mdp4_overlay_dsi_video_start();

    pipe = vctrl->base_pipe;
    spin_lock_irqsave(&vctrl->spin_lock, flags);
    if (pipe->ov_blt_addr) {
        mdp4_dsi_video_blt_ov_update(pipe);
        pipe->ov_cnt++;
        INIT_COMPLETION(vctrl->ov_comp);
        vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
        mb();
        vctrl->ov_koff++;
        /* kickoff overlay engine */
        mdp4_stat.kickoff_ov0++;
        outpdw(MDP_BASE + 0x0004, 0);
    } else {
        /* schedule second phase update  at dmap */
        INIT_COMPLETION(vctrl->dmap_comp);
        vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
    }
    spin_unlock_irqrestore(&vctrl->spin_lock, flags);

    mdp4_stat.overlay_commit[pipe->mixer_num]++;

    if (wait) {
        if (pipe->ov_blt_addr)
            mdp4_dsi_video_wait4ov(0);
        else
            mdp4_dsi_video_wait4dmap(0);
    }

    return cnt;
}
예제 #27
0
static void playback_tasklet(unsigned long data)
{
	struct ua101 *ua = (void *)data;
	unsigned long flags;
	unsigned int frames;
	struct ua101_urb *urb;
	bool do_period_elapsed = false;
	int err;

	if (unlikely(!test_bit(USB_PLAYBACK_RUNNING, &ua->states)))
		return;

	/*
	 * Synchronizing the playback rate to the capture rate is done by using
	 * the same sequence of packet sizes for both streams.
	 * Submitting a playback URB therefore requires both a ready URB and
	 * the size of the corresponding capture packet, i.e., both playback
	 * and capture URBs must have been completed.  Since the USB core does
	 * not guarantee that playback and capture complete callbacks are
	 * called alternately, we use two FIFOs for packet sizes and read URBs;
	 * submitting playback URBs is possible as long as both FIFOs are
	 * nonempty.
	 */
	spin_lock_irqsave(&ua->lock, flags);
	while (ua->rate_feedback_count > 0 &&
	       !list_empty(&ua->ready_playback_urbs)) {
		/* take packet size out of FIFO */
		frames = ua->rate_feedback[ua->rate_feedback_start];
		add_with_wraparound(ua, &ua->rate_feedback_start, 1);
		ua->rate_feedback_count--;

		/* take URB out of FIFO */
		urb = list_first_entry(&ua->ready_playback_urbs,
				       struct ua101_urb, ready_list);
		list_del(&urb->ready_list);

		/* fill packet with data or silence */
		urb->urb.iso_frame_desc[0].length =
			frames * ua->playback.frame_bytes;
		if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states))
			do_period_elapsed |= copy_playback_data(&ua->playback,
								&urb->urb,
								frames);
		else
			memset(urb->urb.transfer_buffer, 0,
			       urb->urb.iso_frame_desc[0].length);

		/* and off you go ... */
		err = usb_submit_urb(&urb->urb, GFP_ATOMIC);
		if (unlikely(err < 0)) {
			spin_unlock_irqrestore(&ua->lock, flags);
			abort_usb_playback(ua);
			abort_alsa_playback(ua);
			dev_err(&ua->dev->dev, "USB request error %d: %s\n",
				err, usb_error_string(err));
			return;
		}
		ua->playback.substream->runtime->delay += frames;
	}
	spin_unlock_irqrestore(&ua->lock, flags);
	if (do_period_elapsed)
		snd_pcm_period_elapsed(ua->playback.substream);
}
예제 #28
0
int mdp4_dsi_video_off(struct platform_device *pdev)
{
    int ret = 0;
    int cndx = 0;
    struct msm_fb_data_type *mfd;
    struct vsycn_ctrl *vctrl;
    struct mdp4_overlay_pipe *pipe;
    struct vsync_update *vp;
    unsigned long flags;
    int undx, need_wait = 0;

    mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
    vctrl = &vsync_ctrl_db[cndx];
    pipe = vctrl->base_pipe;

    atomic_set(&vctrl->suspend, 1);
    atomic_set(&vctrl->vsync_resume, 0);

    msleep(20);	/* >= 17 ms */

    complete_all(&vctrl->vsync_comp);

    if (pipe->ov_blt_addr) {
        spin_lock_irqsave(&vctrl->spin_lock, flags);
        if (vctrl->ov_koff != vctrl->ov_done)
            need_wait = 1;
        spin_unlock_irqrestore(&vctrl->spin_lock, flags);
        if (need_wait)
            mdp4_dsi_video_wait4ov(0);
    }

    MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);

    dsi_video_enabled = 0;

    mdp_histogram_ctrl_all(FALSE);

    undx =  vctrl->update_ndx;
    vp = &vctrl->vlist[undx];
    if (vp->update_cnt) {
        /*
         * pipe's iommu will be freed at next overlay play
         * and iommu_drop statistic will be increased by one
         */
        vp->update_cnt = 0;     /* empty queue */
    }

    if (pipe) {
        /* sanity check, free pipes besides base layer */
        mdp4_overlay_unset_mixer(pipe->mixer_num);
        if (mfd->ref_cnt == 0) {
            /* adb stop */
            if (pipe->pipe_type == OVERLAY_TYPE_BF)
                mdp4_overlay_borderfill_stage_down(pipe);

            /* base pipe may change after borderfill_stage_down */
            pipe = vctrl->base_pipe;
            mdp4_mixer_stage_down(pipe, 1);
            mdp4_overlay_pipe_free(pipe);
            vctrl->base_pipe = NULL;
            msleep(20);
        } else {
            /* system suspending */
            mdp4_mixer_stage_down(vctrl->base_pipe, 1);
            mdp4_overlay_iommu_pipe_free(
                vctrl->base_pipe->pipe_ndx, 1);
        }
    }

    if (vctrl->vsync_irq_enabled) {
        vctrl->vsync_irq_enabled = 0;
        vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
    }

    /* mdp clock off */
    mdp_clk_ctrl(0);
    mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);

    return ret;
}
예제 #29
0
static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
{
	struct aac_fib_context * fibctx;
	int status;

	fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
	if (fibctx == NULL) {
		status = -ENOMEM;
	} else {
		unsigned long flags;
		struct list_head * entry;
		struct aac_fib_context * context;

		fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
		fibctx->size = sizeof(struct aac_fib_context);
		/*
		 *	Yes yes, I know this could be an index, but we have a
		 * better guarantee of uniqueness for the locked loop below.
		 * Without the aid of a persistent history, this also helps
		 * reduce the chance that the opaque context would be reused.
		 */
		fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
		/*
		 *	Initialize the mutex used to wait for the next AIF.
		 */
		init_MUTEX_LOCKED(&fibctx->wait_sem);
		fibctx->wait = 0;
		/*
		 *	Initialize the fibs and set the count of fibs on
		 *	the list to 0.
		 */
		fibctx->count = 0;
		INIT_LIST_HEAD(&fibctx->fib_list);
		fibctx->jiffies = jiffies/HZ;
		/*
		 *	Now add this context onto the adapter's
		 *	AdapterFibContext list.
		 */
		spin_lock_irqsave(&dev->fib_lock, flags);
		/* Ensure that we have a unique identifier */
		entry = dev->fib_list.next;
		while (entry != &dev->fib_list) {
			context = list_entry(entry, struct aac_fib_context, next);
			if (context->unique == fibctx->unique) {
				/* Not unique (32 bits) */
				fibctx->unique++;
				entry = dev->fib_list.next;
			} else {
				entry = entry->next;
			}
		}
		list_add_tail(&fibctx->next, &dev->fib_list);
		spin_unlock_irqrestore(&dev->fib_lock, flags);
		if (copy_to_user(arg, &fibctx->unique,
						sizeof(fibctx->unique))) {
			status = -EFAULT;
		} else {
			status = 0;
		}
	}
	return status;
}
예제 #30
-1
static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
					    size_t length, loff_t *offset)
{
	struct rc_pid_events_file_info *file_info = file->private_data;
	struct rc_pid_event_buffer *events = file_info->events;
	struct rc_pid_event *ev;
	char pb[RC_PID_PRINT_BUF_SIZE];
	int ret;
	int p;
	unsigned long status;

	/* Check if there is something to read. */
	if (events->next_entry == file_info->next_entry) {
		if (file->f_flags & O_NONBLOCK)
			return -EAGAIN;

		/* Wait */
		ret = wait_event_interruptible(events->waitqueue,
				events->next_entry != file_info->next_entry);

		if (ret)
			return ret;
	}

	/* Write out one event per call. I don't care whether it's a little
	 * inefficient, this is debugging code anyway. */
	spin_lock_irqsave(&events->lock, status);

	/* Get an event */
	ev = &(events->ring[file_info->next_entry]);
	file_info->next_entry = (file_info->next_entry + 1) %
				RC_PID_EVENT_RING_SIZE;

	/* Print information about the event. Note that userpace needs to
	 * provide large enough buffers. */
	length = length < RC_PID_PRINT_BUF_SIZE ?
		 length : RC_PID_PRINT_BUF_SIZE;
	p = snprintf(pb, length, "%u %lu ", ev->id, ev->timestamp);
	switch (ev->type) {
	case RC_PID_EVENT_TYPE_TX_STATUS:
		p += snprintf(pb + p, length - p, "tx_status %u %u",
			      !(ev->data.flags & IEEE80211_TX_STAT_ACK),
			      ev->data.tx_status.status.rates[0].idx);
		break;
	case RC_PID_EVENT_TYPE_RATE_CHANGE:
		p += snprintf(pb + p, length - p, "rate_change %d %d",
			      ev->data.index, ev->data.rate);
		break;
	case RC_PID_EVENT_TYPE_TX_RATE:
		p += snprintf(pb + p, length - p, "tx_rate %d %d",
			      ev->data.index, ev->data.rate);
		break;
	case RC_PID_EVENT_TYPE_PF_SAMPLE:
		p += snprintf(pb + p, length - p,
			      "pf_sample %d %d %d %d",
			      ev->data.pf_sample, ev->data.prop_err,
			      ev->data.int_err, ev->data.der_err);
		break;
	}
	p += snprintf(pb + p, length - p, "\n");

	spin_unlock_irqrestore(&events->lock, status);

	if (copy_to_user(buf, pb, p))
		return -EFAULT;

	return p;
}