Exemplo n.º 1
0
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context;
	struct eth_dev	*dev = ep->driver_data;
	int		status = req->status;
	bool		queue = 0;

	switch (status) {

	/* normal completion */
	case 0:
		skb_put(skb, req->actual);

		if (dev->unwrap) {
			unsigned long	flags;

			spin_lock_irqsave(&dev->lock, flags);
			if (dev->port_usb) {
				status = dev->unwrap(dev->port_usb,
							skb,
							&dev->rx_frames);
				if (status == -EINVAL)
					dev->net->stats.rx_errors++;
				else if (status == -EOVERFLOW)
					dev->net->stats.rx_over_errors++;
			} else {
				dev_kfree_skb_any(skb);
				status = -ENOTCONN;
			}
			spin_unlock_irqrestore(&dev->lock, flags);
		} else {
			skb_queue_tail(&dev->rx_frames, skb);
		}

		if (!status)
			queue = 1;
		break;

	/* software-driven interface shutdown */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		VDBG(dev, "rx shutdown, code %d\n", status);
		goto quiesce;

	/* for hardware automagic (such as pxa) */
	case -ECONNABORTED:		/* endpoint reset */
		DBG(dev, "rx %s reset\n", ep->name);
		defer_kevent(dev, WORK_RX_MEMORY);
quiesce:
		dev_kfree_skb_any(skb);
		goto clean;

	/* data overrun */
	case -EOVERFLOW:
		dev->net->stats.rx_over_errors++;
		/* FALLTHROUGH */

	default:
		queue = 1;
		dev_kfree_skb_any(skb);
		dev->net->stats.rx_errors++;
		DBG(dev, "rx status %d\n", status);
		break;
	}

clean:
	spin_lock(&dev->req_lock);
	list_add(&req->list, &dev->rx_reqs);
	spin_unlock(&dev->req_lock);

	if (queue)
		queue_work(uether_wq, &dev->rx_work);
}
static void enable_stm_feature(struct diag_smd_info *smd_info)
{
	driver->peripheral_supports_stm[smd_info->peripheral] = ENABLE_STM;
	smd_info->general_context = UPDATE_PERIPHERAL_STM_STATE;
	queue_work(driver->diag_cntl_wq, &(smd_info->diag_general_smd_work));
}
Exemplo n.º 3
0
/*
 * rrpc_move_valid_pages -- migrate live data off the block
 * @rrpc: the 'rrpc' structure
 * @block: the block from which to migrate live pages
 *
 * Description:
 *   GC algorithms may call this function to migrate remaining live
 *   pages off the block prior to erasing it. This function blocks
 *   further execution until the operation is complete.
 */
static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
{
	struct request_queue *q = rrpc->dev->q;
	struct rrpc_rev_addr *rev;
	struct nvm_rq *rqd;
	struct bio *bio;
	struct page *page;
	int slot;
	int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
	u64 phys_addr;
	DECLARE_COMPLETION_ONSTACK(wait);

	if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
		return 0;

	bio = bio_alloc(GFP_NOIO, 1);
	if (!bio) {
		pr_err("nvm: could not alloc bio to gc\n");
		return -ENOMEM;
	}

	page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
	if (!page) {
		bio_put(bio);
		return -ENOMEM;
	}

	while ((slot = find_first_zero_bit(rblk->invalid_pages,
					    nr_pgs_per_blk)) < nr_pgs_per_blk) {

		/* Lock laddr */
		phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;

try:
		spin_lock(&rrpc->rev_lock);
		/* Get logical address from physical to logical table */
		rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
		/* already updated by previous regular write */
		if (rev->addr == ADDR_EMPTY) {
			spin_unlock(&rrpc->rev_lock);
			continue;
		}

		rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
		if (IS_ERR_OR_NULL(rqd)) {
			spin_unlock(&rrpc->rev_lock);
			schedule();
			goto try;
		}

		spin_unlock(&rrpc->rev_lock);

		/* Perform read to do GC */
		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
		bio->bi_rw = READ;
		bio->bi_private = &wait;
		bio->bi_end_io = rrpc_end_sync_bio;

		/* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
		bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);

		if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
			pr_err("rrpc: gc read failed.\n");
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}
		wait_for_completion_io(&wait);
		if (bio->bi_error) {
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}

		bio_reset(bio);
		reinit_completion(&wait);

		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
		bio->bi_rw = WRITE;
		bio->bi_private = &wait;
		bio->bi_end_io = rrpc_end_sync_bio;

		bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);

		/* turn the command around and write the data back to a new
		 * address
		 */
		if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
			pr_err("rrpc: gc write failed.\n");
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}
		wait_for_completion_io(&wait);

		rrpc_inflight_laddr_release(rrpc, rqd);
		if (bio->bi_error)
			goto finished;

		bio_reset(bio);
	}

finished:
	mempool_free(page, rrpc->page_pool);
	bio_put(bio);

	if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) {
		pr_err("nvm: failed to garbage collect block\n");
		return -EIO;
	}

	return 0;
}

static void rrpc_block_gc(struct work_struct *work)
{
	struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
									ws_gc);
	struct rrpc *rrpc = gcb->rrpc;
	struct rrpc_block *rblk = gcb->rblk;
	struct nvm_dev *dev = rrpc->dev;
	struct nvm_lun *lun = rblk->parent->lun;
	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];

	mempool_free(gcb, rrpc->gcb_pool);
	pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);

	if (rrpc_move_valid_pages(rrpc, rblk))
		goto put_back;

	if (nvm_erase_blk(dev, rblk->parent))
		goto put_back;

	rrpc_put_blk(rrpc, rblk);

	return;

put_back:
	spin_lock(&rlun->lock);
	list_add_tail(&rblk->prio, &rlun->prio_list);
	spin_unlock(&rlun->lock);
}

/* the block with highest number of invalid pages, will be in the beginning
 * of the list
 */
static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
							struct rrpc_block *rb)
{
	if (ra->nr_invalid_pages == rb->nr_invalid_pages)
		return ra;

	return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
}

/* linearly find the block with highest number of invalid pages
 * requires lun->lock
 */
static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
{
	struct list_head *prio_list = &rlun->prio_list;
	struct rrpc_block *rblock, *max;

	BUG_ON(list_empty(prio_list));

	max = list_first_entry(prio_list, struct rrpc_block, prio);
	list_for_each_entry(rblock, prio_list, prio)
		max = rblock_max_invalid(max, rblock);

	return max;
}

static void rrpc_lun_gc(struct work_struct *work)
{
	struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
	struct rrpc *rrpc = rlun->rrpc;
	struct nvm_lun *lun = rlun->parent;
	struct rrpc_block_gc *gcb;
	unsigned int nr_blocks_need;

	nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;

	if (nr_blocks_need < rrpc->nr_luns)
		nr_blocks_need = rrpc->nr_luns;

	spin_lock(&rlun->lock);
	while (nr_blocks_need > lun->nr_free_blocks &&
					!list_empty(&rlun->prio_list)) {
		struct rrpc_block *rblock = block_prio_find_max(rlun);
		struct nvm_block *block = rblock->parent;

		if (!rblock->nr_invalid_pages)
			break;

		gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
		if (!gcb)
			break;

		list_del_init(&rblock->prio);

		BUG_ON(!block_is_full(rrpc, rblock));

		pr_debug("rrpc: selected block '%lu' for GC\n", block->id);

		gcb->rrpc = rrpc;
		gcb->rblk = rblock;
		INIT_WORK(&gcb->ws_gc, rrpc_block_gc);

		queue_work(rrpc->kgc_wq, &gcb->ws_gc);

		nr_blocks_need--;
	}
	spin_unlock(&rlun->lock);

	/* TODO: Hint that request queue can be started again */
}

static void rrpc_gc_queue(struct work_struct *work)
{
	struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
									ws_gc);
	struct rrpc *rrpc = gcb->rrpc;
	struct rrpc_block *rblk = gcb->rblk;
	struct nvm_lun *lun = rblk->parent->lun;
	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];

	spin_lock(&rlun->lock);
	list_add_tail(&rblk->prio, &rlun->prio_list);
	spin_unlock(&rlun->lock);

	mempool_free(gcb, rrpc->gcb_pool);
	pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
							rblk->parent->id);
}

static const struct block_device_operations rrpc_fops = {
	.owner		= THIS_MODULE,
};

static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
{
	unsigned int i;
	struct rrpc_lun *rlun, *max_free;

	if (!is_gc)
		return get_next_lun(rrpc);

	/* during GC, we don't care about RR, instead we want to make
	 * sure that we maintain evenness between the block luns.
	 */
	max_free = &rrpc->luns[0];
	/* prevent GC-ing lun from devouring pages of a lun with
	 * little free blocks. We don't take the lock as we only need an
	 * estimate.
	 */
	rrpc_for_each_lun(rrpc, rlun, i) {
		if (rlun->parent->nr_free_blocks >
					max_free->parent->nr_free_blocks)
			max_free = rlun;
	}

	return max_free;
}
Exemplo n.º 4
0
static irqreturn_t pn544_dev_clk_req_irq_handler(int irq, void *dev_id)
{
	struct pn544_dev *pn544_dev = dev_id;
	queue_work(pn544_dev->wq_clock, &pn544_dev->work_nfc_clock);
	return IRQ_HANDLED;
}
Exemplo n.º 5
0
static unsigned int ifx_spi_sync_read_write(struct ifx_spi_data *spi_data, unsigned int len)
{
	bool spi_suspend_failed;

	int status;
	int ipc_check;
	struct spi_message	m;
	struct spi_transfer	t = {
					    .tx_buf		= spi_data->ifx_tx_buffer,
			                    .rx_buf		= spi_data->ifx_rx_buffer,
					    .len		= len,
				    };

#ifdef IFX_SPI_TEGRA_TRANSFER_DURATION	
	static struct timeval transfer_start_time;
	static struct timeval transfer_end_time;
	unsigned long duration_time;
#endif

	spi_message_init(&m);
	spi_message_add_tail(&t, &m);

	IFX_SPI_DEBUG("");	
	if (spi_data->spi == NULL)
	{
		IFX_SPI_PRINTK("spi_data->spi = NULL");			
		status = -ESHUTDOWN;
	}
	else
	{
#ifdef IFX_SPI_TEGRA_TRANSFER_DURATION		
		IFX_SPI_PRINTK("!!! spi_transfer start !!!");
		do_gettimeofday(&transfer_start_time);		
		status = spi_sync(spi_data->spi, &m);
		do_gettimeofday(&transfer_end_time);
		duration_time = (transfer_end_time.tv_usec - transfer_start_time.tv_usec); //sec		
		IFX_SPI_PRINTK("!!! spi_transfer end is %06d ms  !!!", (duration_time/1000)); //milisec
#else		
		status = spi_sync(spi_data->spi, &m);
#endif
	}

	if (status == 0)
	{          
		status = m.status;
		if (status == 0)
		{
			status = m.actual_length;
		}
        	//reset 'count_transfer_failed' to zero, if spi transter succeeds at least one out of five times
	        count_transfer_failed = 0;		
	}
        else
	{
		ipc_check = ifx_modem_communicating();
		if(ipc_check == 0) 
		{
			IFX_SPI_PRINTK("transmission unsuccessful, [spi_sync] status:%d, count_Failed:%d\n", status, count_transfer_failed);
				
			spi_suspend_failed = spi_tegra_suspend_failed(spi_data->spi);
			if (spi_suspend_failed)
			{
				IFX_SPI_PRINTK("kernel_restart!!!, spi_suspend_failed=%d \n", spi_suspend_failed);		 
				kernel_restart(NULL);
			 }				
		}
		//increase 'count_transfer_failed', when spi transter fails
		count_transfer_failed++;		
    }
#ifdef IFX_SPI_TX_RX_BUF
        IFX_SPI_PRINTK("SPI TX BUFFER: ");
        for(i=0;i<16;i++)
	{
        	printk( "%02x ",spi_data->ifx_tx_buffer[i]);
        }
        IFX_SPI_PRINTK("\n");
        
        IFX_SPI_PRINTK("SPI RX BUFFER : ");
        for(i=0;i<16;i++)
	{	
        	printk( "%02x ",spi_data->ifx_rx_buffer[i]);
        }
#endif
	return status;
}

/*
 * Function is a Interrupt service routine, is called when SRDY signal goes HIGH. It set up transmission and
 * reception if it is a Slave initiated data transfer. For both the cases Master intiated/Slave intiated
 * transfer it starts data transfer. 
 */
static irqreturn_t ifx_spi_handle_srdy_irq(int irq, void *handle)
{
	struct ifx_spi_data *spi_data = (struct ifx_spi_data *)handle;
	int pin_val;	
	IFX_SPI_DEBUG("");

#ifdef IFX_TEGRA_EDGE_TRIGGER
	pin_val = gpio_get_value(IFX_SRDY);
	IFX_SPI_DEBUG("pin_val = %d", pin_val);

       if(pin_val == 0)
       {
	       printk("[SPI][SRDY_IRQ] pin value is ZERO.. Return!! \n");
		 IFX_SPI_DEBUG(" IRQF_TRIGGER_FALLING in the srdy irq is ignore !!! \n");
        	 return IRQ_HANDLED;
       }
#endif	   

#if 0	  
	if(spi_data && spi_data->ifx_tty)	//add to prevent the irq of srdy until spi opening
	{
		IFX_SPI_DEBUG("queue_work is done!");		
		queue_work(spi_data->ifx_wq, &spi_data->ifx_work);    
	}
	else
	{
		IFX_SPI_PRINTK("Unexpected interrupt happen!");	
		IFX_SPI_PRINTK("spi_data = 0x%p, 0x%p, spi_data->ifx_tty =0x%p", spi_data, spi_data->ifx_tty);			
	}
#else

#ifdef WAKE_LOCK_RESUME // HZ is 1sec
		IFX_SPI_DEBUG("[IFX_SRDY] wake lock : 0x%lx", &ifx_gspi_data->wake_lock);
		wake_lock_timeout(&ifx_gspi_data->wake_lock, msecs_to_jiffies(500));	//5,, Unexpected interrupt or power consumption
#endif	
	IFX_SPI_DEBUG("queue_work is done!");		
	queue_work(spi_data->ifx_wq, &spi_data->ifx_work);	  
#endif
	return IRQ_HANDLED; 
}


// ifx_master_initiated_transfer = 1; --> set called by ifx_spi_write() 
// ifx_master_initiated_transfer = 0; --> default 
static void ifx_spi_handle_work(struct work_struct *work)
{
	bool spi_tegra_suspended;

	struct ifx_spi_data *spi_data = container_of(work, struct ifx_spi_data, ifx_work);

#ifdef IFX_SPI_SPEED_MEASUREMENT
	int id = 0;
	unsigned long diff;
#endif

	IFX_SPI_DEBUG( " start");		
	// 20120904 jisil.park
    unsigned long reg;
    int pm_off_count;

    if(1 == spi_data->is_suspended)
	{
	   pm_off_count = 1;
	   printk("[SPI][handle_work] ifx_spi_handle_work INFO spi_data->is_suspended is (0x%x)\n", spi_data->is_suspended);

	   //wait for ap to return to resume state with a worst case scenario of 5sec
	   do
	   {		   
		   mdelay(1);
		   pm_off_count++;
		   
	   }while((1 == spi_data->is_suspended) && (pm_off_count<(5*200)));

	   printk("[EBS] ifx_spi_handle_work INFO EXIT is_suspend = 0x%x pm_off_count=%d\n", spi_data->is_suspended, pm_off_count);

	   if(1 == spi_data->is_suspended)
	   {
		  // To Do how to handle the PM OFF state during 1sec
		  printk("[SPI][handle_work] ifx_spi_handle_work error is_suspended is (0x%x)\n",spi_data->is_suspended);
	   }
    }
    // 20120904 jisil.park
	
	/* add to wait tx/rx data when tegra spi is suspended*/
	spi_tegra_suspended = spi_tegra_is_suspend(spi_data->spi);
	if (spi_tegra_suspended) 
	{
		IFX_SPI_PRINTK("spi_tegra is not resume !, spi_tegra_suspended = %d\n",spi_tegra_suspended);
		return;		
	}

	if (!spi_data->ifx_master_initiated_transfer)
	{
		IFX_SPI_TX_DEBUG("CP Start =================> \n");
#ifdef IFX_SPI_SPEED_MEASUREMENT
		do_gettimeofday(&ulStart[id]);
#endif		
		ifx_spi_setup_transmission(spi_data);
		ifx_spi_set_mrdy_signal(1);
		ifx_spi_send_and_receive_data(spi_data);

#ifdef IFX_SPI_SPEED_MEASUREMENT
		do_gettimeofday(&ulEnd[id]);
		diff = (ulEnd[id].tv_sec - ulStart[id].tv_sec) * 1000 * 1000 ;
		diff = (diff + (ulEnd[id].tv_usec - ulStart[id].tv_usec));
		ulRxThroughtput[id] = ((uiRxlen[id]*8*1000)/diff);
		IFX_SPI_PRINTK("[SPI %d] : RX time = %09d usec; %04d bytes; %06lu Kbps", 
						id, diff, IFX_SPI_MAX_BUF_SIZE+IFX_SPI_HEADER_SIZE, 
						((IFX_SPI_MAX_BUF_SIZE+IFX_SPI_HEADER_SIZE)*8*1000)/diff);
#endif

		
		/* Once data transmission is completed, the MRDY signal is lowered */
		if((spi_data->ifx_sender_buf_size == 0)  && (spi_data->ifx_receiver_buf_size == 0))
		{
			ifx_spi_set_mrdy_signal(0);
			ifx_spi_buffer_initialization(spi_data);
		}
		/* We are processing the slave initiated transfer in the mean time Mux has requested master initiated data transfer */
		/* Once Slave initiated transfer is complete then start Master initiated transfer */
		if(spi_data->ifx_master_initiated_transfer == 1)  //why check ? already ifx_master_initiated_transfer = 0
		{
		/* It is a condition where Slave has initiated data transfer and both SRDY and MRDY are high and at the end of data transfer		
	 	* MUX has some data to transfer. MUX initiates Master initiated transfer rising MRDY high, which will not be detected at Slave-MODEM.
	 	* So it was required to rise MRDY high again */
			udelay(MRDY_DELAY_TIME) ;	 	
	                ifx_spi_set_mrdy_signal(1);    		
		}
		IFX_SPI_TX_DEBUG("CP End =================> \n");			
	}
	else
	{
		IFX_SPI_TX_DEBUG("Interrupt by AP25 ===========> \n");		
		
		ifx_spi_setup_transmission(spi_data);     
			
#if defined(LGE_DUMP_SPI_BUFFER)
	dump_spi_buffer("SPI TX", &spi_data->ifx_tx_buffer[4], COL_SIZE);
#endif		
		
		ifx_spi_send_and_receive_data(spi_data);

		/* Once data transmission is completed, the MRDY signal is lowered */
		if(spi_data->ifx_sender_buf_size == 0)
		{
			if(spi_data->ifx_receiver_buf_size == 0)
			{		
				ifx_spi_set_mrdy_signal(0);
				udelay(MRDY_DELAY_TIME) ;				
				ifx_spi_buffer_initialization(spi_data);
			}

			IFX_SPI_TX_DEBUG("ifx_master_initiated_transfer set =  0 <============== \n");		
			spi_data->ifx_master_initiated_transfer = 0;
			complete(&spi_data->ifx_read_write_completion);
		}
	}

#ifdef IFX_SPI_TX_RX_THROUGHTPUT
	if(uiTxlen[spi_data->mdm_tty->index] || uiRxlen[spi_data->mdm_tty->index]) 
	{
		 //ulEnd = getuSecTime() - ulStart;
		 do_gettimeofday(&ulEnd[spi_data->ifx_tty->index]);
		 
		 uidiff[spi_data->ifx_tty->index] = (ulEnd[spi_data->ifx_tty->index].tv_sec - ulStart[spi_data->ifx_tty->index].tv_sec) * 1000 * 1000 ;
		 uidiff[spi_data->ifx_tty->index] = uidiff[spi_data->ifx_tty->index] + (ulEnd[spi_data->ifx_tty->index].tv_usec - ulStart[spi_data->ifx_tty->index].tv_usec);
		 ulTxThroughtput[spi_data->ifx_tty->index] = ((uiTxlen[spi_data->ifx_tty->index]*8*1000)/uidiff[spi_data->ifx_tty->index]);
		 ulRxThroughtput[spi_data->ifx_tty->index] = ((uiRxlen[spi_data->ifx_tty->index]*8*1000)/uidiff[spi_data->ifx_tty->index]);

	 	IFX_SPI_PRINTK("[SPI %d] time	= %d us, Tx(%dbytes) = %luKbps, Rx(%dbytes) = %luKbps, Max(%dbytes) = %luKbps\n", 
						spi_data->ifx_tty->index, uidiff[spi_data->mdm_tty->index], 
						uiTxlen[spi_data->ifx_tty->index], ulTxThroughtput[spi_data->ifx_tty->index], 
						uiRxlen[spi_data->ifx_tty->index], ulRxThroughtput[spi_data->ifx_tty->index],
						IFX_SPI_MAX_BUF_SIZE+IFX_SPI_HEADER_SIZE, 
						((IFX_SPI_MAX_BUF_SIZE+IFX_SPI_HEADER_SIZE)*8*1000)/uidiff[spi_data->ifx_tty->index]);

		uiTxlen[spi_data->ifx_tty->index] = uiRxlen[spi_data->ifx_tty->index] = 0;
		fWrite[spi_data->ifx_tty->index] = 0;
	}
#endif

	IFX_SPI_DEBUG( " end");
}
Exemplo n.º 6
0
static int hsi_ch_net_write(int chno, void *data, int len)
{
	/* Non blocking write */
	void *buf = NULL;
	static struct x_data *d = NULL;
	int n = 0;
	int flag = 1;
	int ret = 0;

	if (!data) {
#if MCM_DBG_ERR_LOG
		printk("\nmcm: data is NULL.\n");
#endif
		return -EINVAL;
	}

#ifdef XMD_TX_MULTI_PACKET
	if (d && hsi_channels[chno].write_queued == HSI_TRUE) {
		if (d->being_used == HSI_FALSE && (d->size + len) < HSI_MEM_LARGE_BLOCK_SIZE) {
#if MCM_DBG_LOG
			printk("\nmcm: Adding in the queued buffer for ch %d\n",chno);
#endif
			buf = d->buf + d->size;
			d->size += len;
			flag = 0;
		} else {
			flag = 1;
		}
	}
#endif
	if (flag) {
#ifdef XMD_TX_MULTI_PACKET
		buf = hsi_mem_alloc(HSI_MEM_LARGE_BLOCK_SIZE);
#else
		buf = hsi_mem_alloc(len);
#endif
		flag = 1;
	}

	if (!buf) {
#if MCM_DBG_ERR_LOG
		printk("\nmcm: Failed to alloc memory So Cannot transfer packet.\n");
#endif
#if 1
		hsi_channels[chno].tx_blocked = 1;
#endif
		return -ENOMEM;
	}

	memcpy(buf, data, len);

	if (flag) {
		d = NULL;
		n = write_q(&hsi_channels[chno].tx_q, buf, len, &d);
		if (n != 0) {
			hsi_channels[chno].pending_tx_msgs++;
		}
#if MCM_DBG_LOG
		printk("\nmcm: n = %d\n",n);
#endif
		if (n == 0) {
#if MCM_DBG_LOG
			printk("\nmcm: rmnet TX queue is full for channel %d, So cannot transfer this packet.\n",chno);
#endif
			hsi_channels[chno].tx_blocked = 1;
			hsi_mem_free(buf);

#if 1
			if (hsi_channels[chno].write_queued == HSI_TRUE) {
#if MCM_DBG_LOG
				printk("\nmcm: hsi_ch_net_write wq already in progress\n");
#endif
			}
			else {
				PREPARE_WORK(&hsi_channels[chno].write_work, hsi_write_work);
				queue_work(hsi_write_wq, &hsi_channels[chno].write_work);
			}
#endif
			ret = -EBUSY;
		} else if (n == 1) {
			PREPARE_WORK(&hsi_channels[chno].write_work, hsi_write_work);
			queue_work(hsi_write_wq, &hsi_channels[chno].write_work);
			ret = 0;
		}
	}

	return ret;
}
/*
 * RX tasklet takes data out of the RX queue and hands it up to the TTY
 * layer until it refuses to take any more data (or is throttled back).
 * Then it issues reads for any further data.
 *
 * If the RX queue becomes full enough that no usb_request is queued,
 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
 * can be buffered before the TTY layer's buffers (currently 64 KB).
 */
static void gs_rx_push(struct work_struct *w)
{
	struct gs_port		*port = container_of(w, struct gs_port, push);
	struct tty_struct	*tty;
	struct list_head	*queue = &port->read_queue;
	bool			disconnect = false;
	bool			do_push = false;

	/* hand any queued data to the tty */
	spin_lock_irq(&port->port_lock);
	tty = port->port_tty;
	while (!list_empty(queue)) {
		struct usb_request	*req;

		req = list_first_entry(queue, struct usb_request, list);

		/* discard data if tty was closed */
		if (!tty)
			goto recycle;

		/* leave data queued if tty was rx throttled */
		if (test_bit(TTY_THROTTLED, &tty->flags))
			break;

		switch (req->status) {
		case -ESHUTDOWN:
			disconnect = true;
			pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
			break;

		default:
			/* presumably a transient fault */
			pr_warning(PREFIX "%d: unexpected RX status %d\n",
					port->port_num, req->status);
			/* FALLTHROUGH */
		case 0:
			/* normal completion */
			break;
		}

		/* push data to (open) tty */
		if (req->actual) {
			char		*packet = req->buf;
			unsigned	size = req->actual;
			unsigned	n;
			int		count;

			/* we may have pushed part of this packet already... */
			n = port->n_read;
			if (n) {
				packet += n;
				size -= n;
			}

			count = tty_insert_flip_string(tty, packet, size);
			port->nbytes_to_tty += count;
			if (count)
				do_push = true;
			if (count != size) {
				/* stop pushing; TTY layer can't handle more */
				port->n_read += count;
				pr_vdebug(PREFIX "%d: rx block %d/%d\n",
						port->port_num,
						count, req->actual);
				break;
			}
			port->n_read = 0;
		}
recycle:
		list_move(&req->list, &port->read_pool);
		port->read_started--;
	}

	/* Push from tty to ldisc; this is immediate with low_latency, and
	 * may trigger callbacks to this driver ... so drop the spinlock.
	 */
	if (tty && do_push) {
		spin_unlock_irq(&port->port_lock);
		tty_flip_buffer_push(tty);
		wake_up_interruptible(&tty->read_wait);
		spin_lock_irq(&port->port_lock);

		/* tty may have been closed */
		tty = port->port_tty;
	}


	/* We want our data queue to become empty ASAP, keeping data
	 * in the tty and ldisc (not here).  If we couldn't push any
	 * this time around, there may be trouble unless there's an
	 * implicit tty_unthrottle() call on its way...
	 *
	 * REVISIT we should probably add a timer to keep the work queue
	 * from starving ... but it's not clear that case ever happens.
	 */
	if (!list_empty(queue) && tty) {
		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
			if (do_push)
				queue_work(gserial_wq, &port->push);
			else
				pr_warning(PREFIX "%d: RX not scheduled?\n",
					port->port_num);
		}
	}

	/* If we're still connected, refill the USB RX queue. */
	if (!disconnect && port->port_usb)
		gs_start_rx(port);

	spin_unlock_irq(&port->port_lock);
}
Exemplo n.º 8
0
irqreturn_t md32_irq_handler(int irq, void *dev_id)
{
    struct reg_md32_to_host_ipc *md32_irq;
    int reboot = 0;

    md32_irq = (struct reg_md32_to_host_ipc *)MD32_TO_HOST_ADDR;

    if(md32_irq->wdt_int)
    {
        md32_wdt_handler();
        md32_aee_stop();
#if 0
        md32_prepare_aed("md32 wdt", &work_md32_reboot.aed);
        mt_reg_sync_writel(0x0, MD32_BASE);
#endif
        md32_aee_status.m2h_irq = MD32_TO_HOST_REG;
        md32_irq->wdt_int = 0;
        reboot = 1;
    }

    if(md32_irq->pmem_disp_int)
    {
        md32_pmem_abort_handler();
        md32_aee_stop();
#if 0
        md32_prepare_aed("md32 pmem abort", &work_md32_reboot.aed);
        mt_reg_sync_writel(0x0, MD32_BASE);
#endif
        md32_aee_status.m2h_irq = MD32_TO_HOST_REG;
        md32_irq->pmem_disp_int = 0;
        reboot = 1;
    }

    if(md32_irq->dmem_disp_int)
    {
        md32_dmem_abort_handler();
        md32_aee_stop();
#if 0
        md32_prepare_aed("md32 dmem abort", &work_md32_reboot.aed);
        mt_reg_sync_writel(0x0, MD32_BASE);
#endif
        md32_aee_status.m2h_irq = MD32_TO_HOST_REG;
        md32_irq->dmem_disp_int = 0;
        reboot = 1;
    }

    if(md32_irq->md32_ipc_int)
    {
        md32_ipi_handler();
        md32_irq->ipc_md2host = 0;
        md32_irq->md32_ipc_int = 0;
    }

    MD32_TO_HOST_REG = 0x0;

    if(reboot)
    {
        queue_work(wq_md32_reboot, (struct work_struct *)&work_md32_reboot);
    }

    return IRQ_HANDLED;
}
Exemplo n.º 9
0
static void baseband_xmm_power2_work_func(struct work_struct *work)
{
	struct baseband_xmm_power_work_t *bbxmm_work
		= (struct baseband_xmm_power_work_t *) work;
	int err;

	pr_debug("%s bbxmm_work->state=%d\n", __func__, bbxmm_work->state);

	switch (bbxmm_work->state) {
	case BBXMM_WORK_UNINIT:
		pr_debug("BBXMM_WORK_UNINIT\n");
		/* free baseband irq(s) */
		if (free_ipc_ap_wake_irq) {
			free_irq(gpio_to_irq(baseband_power2_driver_data
				->modem.xmm.ipc_ap_wake), NULL);
			free_ipc_ap_wake_irq = 0;
		}
		break;
	case BBXMM_WORK_INIT:
		pr_debug("BBXMM_WORK_INIT\n");
		/* request baseband irq(s) */
		ipc_ap_wake_state = IPC_AP_WAKE_UNINIT;
		err = request_threaded_irq(
			gpio_to_irq(baseband_power2_driver_data->
			    modem.xmm.ipc_ap_wake),
			NULL,
			(modem_ver < XMM_MODEM_VER_1130)
			? baseband_xmm_power2_ver_lt_1130_ipc_ap_wake_irq2
			: baseband_xmm_power2_ver_ge_1130_ipc_ap_wake_irq2,
			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
			"BBXMM_POWER2_IPC_AP_WAKE_IRQ",
			NULL);
		if (err < 0) {
			pr_err("%s - request irq IPC_AP_WAKE_IRQ failed\n",
				__func__);
			return;
		}
		free_ipc_ap_wake_irq = 1;
		ipc_ap_wake_state = IPC_AP_WAKE_IRQ_READY;
		/* go to next state */
		bbxmm_work->state = (modem_flash && !modem_pm)
			? BBXMM_WORK_INIT_FLASH_STEP1
			: (modem_flash && modem_pm)
			? BBXMM_WORK_INIT_FLASH_PM_STEP1
			: (!modem_flash && modem_pm)
			? BBXMM_WORK_INIT_FLASHLESS_PM_STEP1
			: BBXMM_WORK_UNINIT;
		queue_work(workqueue, work);
		break;
	case BBXMM_WORK_INIT_FLASH_STEP1:
		pr_debug("BBXMM_WORK_INIT_FLASH_STEP1\n");
		break;
	case BBXMM_WORK_INIT_FLASH_PM_STEP1:
		pr_debug("BBXMM_WORK_INIT_FLASH_PM_STEP1\n");
		/* go to next state */
		bbxmm_work->state = (modem_ver < XMM_MODEM_VER_1130)
			? BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1
			: BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1;
		queue_work(workqueue, work);
		break;
	case BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1:
		pr_debug("BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1\n");
		break;
	case BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1:
		pr_debug("BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1\n");
		break;
	case BBXMM_WORK_INIT_FLASHLESS_PM_STEP1:
		pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_STEP1\n");
		/* go to next state */
		bbxmm_work->state = (modem_ver < XMM_MODEM_VER_1130)
			? BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_WAIT_IRQ
			: BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1;
		queue_work(workqueue, work);
		break;
	case BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_WAIT_IRQ:
		pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_WAIT_IRQ"
			" - waiting for IPC_AP_WAKE_IRQ to trigger step1\n");
		break;
	case BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP1:
		pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP1\n");
		baseband_xmm_power2_flashless_pm_ver_lt_1130_step1(work);
		break;
	case BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP2:
		pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP2\n");
		baseband_xmm_power2_flashless_pm_ver_lt_1130_step2(work);
		break;
	case BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1:
		pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1\n");
		baseband_xmm_power2_flashless_pm_ver_ge_1130_step1(work);
		break;
	case BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP2:
		pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP2\n");
		baseband_xmm_power2_flashless_pm_ver_ge_1130_step2(work);
		break;
	case BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP3:
		pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP3\n");
		baseband_xmm_power2_flashless_pm_ver_ge_1130_step3(work);
		break;
	case BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP4:
		pr_debug("BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP4\n");
		baseband_xmm_power2_flashless_pm_ver_ge_1130_step4(work);
		break;
	}

}
Exemplo n.º 10
0
void flash_timer_callback(unsigned long data)
{
	queue_work(flash_wq, (struct work_struct *)work );
	mod_timer(&flash_timer, jiffies + msecs_to_jiffies(10000));
}
Exemplo n.º 11
0
static int lge_hsd_probe(struct platform_device *pdev)
{
	int ret = 0;
	struct max1462x_platform_data *pdata = pdev->dev.platform_data;


	struct hsd_info *hi;

	HSD_DBG("lge_hsd_probe\n");

	hi = kzalloc(sizeof(struct hsd_info), GFP_KERNEL);

	if ( hi == NULL) {
		HSD_ERR("Failed to allloate headset per device info\n");
		return -ENOMEM;
	}

	if(pdev->dev.of_node){
		pdata = devm_kzalloc(&pdev->dev,sizeof(struct max1462x_platform_data),GFP_KERNEL);
		if(!pdata){
			HSD_ERR("Failed to allocate memory\n");
			return -ENOMEM;
		}
		pdev->dev.platform_data = pdata;

		max1462x_parse_dt(&pdev->dev,pdata);
	} else {
		pdata = devm_kzalloc(&pdev->dev,sizeof(struct max1462x_platform_data),GFP_KERNEL);
		if(!pdata){
			HSD_ERR("Failed to allocate memory\n");
			return -ENOMEM;
		}
		else
			pdata = pdev->dev.platform_data;
	}
	hi->key_code = pdata->key_code;

	platform_set_drvdata(pdev, hi);

	atomic_set(&hi->btn_state, 0);
	atomic_set(&hi->is_3_pole_or_not, 1);
	atomic_set(&hi->irq_key_enabled, FALSE);

	hi->gpio_mic_en = pdata->gpio_mic_en;
	hi->gpio_detect = pdata->gpio_detect;
	hi->gpio_key = pdata->gpio_key;
	hi->gpio_set_value_func = pdata->gpio_set_value_func;
	hi->gpio_get_value_func = pdata->gpio_get_value_func;
#ifdef CONFIG_SWITCH_MAX1462X_WA
	hi->latency_for_key = msecs_to_jiffies(80);
#else
	hi->latency_for_key = msecs_to_jiffies(50); /* convert milli to jiffies */
#endif
	mutex_init(&hi->mutex_lock);
	INIT_WORK(&hi->work, detect_work);
	INIT_DELAYED_WORK(&hi->work_for_key_pressed, button_pressed);
	INIT_DELAYED_WORK(&hi->work_for_key_released, button_released);

	ret = gpio_request(hi->gpio_mic_en, "gpio_mic_en");
	if (ret < 0) {
		HSD_ERR("Failed to configure gpio%d (gpio_mic_en) gpio_request\n", hi->gpio_mic_en);
		goto error_02;
	}

	ret = gpio_direction_output(hi->gpio_mic_en, 0);
	if (ret < 0) {
		HSD_ERR("Failed to configure gpio%d (gpio_mic_en) gpio_direction_input\n", hi->gpio_mic_en);
		goto error_02;
	}
	HSD_DBG("gpio_get_value_cansleep(hi->gpio_mic_en) = %d\n", gpio_get_value_cansleep(hi->gpio_mic_en));

	/* init gpio_detect */
	ret = gpio_request(hi->gpio_detect, "gpio_detect");
	if (ret < 0) {
		HSD_ERR("Failed to configure gpio%d (gpio_det) gpio_request\n", hi->gpio_detect);
		goto error_03;
	}

	ret = gpio_direction_input(hi->gpio_detect);
	if (ret < 0) {
		HSD_ERR("Failed to configure gpio%d (gpio_det) gpio_direction_input\n", hi->gpio_detect);
		goto error_03;
	}

	/*init gpio_key */
	ret = gpio_request(hi->gpio_key, "gpio_key");
	if (ret < 0) {
		HSD_ERR("Failed to configure gpio%d (gpio_key) gpio_request\n", hi->gpio_key);
		goto error_04;
	}

	ret = gpio_direction_input(hi->gpio_key);
	if (ret < 0) {
		HSD_ERR("Failed to configure gpio%d (gpio_key) gpio_direction_input\n", hi->gpio_key);
		goto error_04;
	}


	/* initialize irq of gpio_key */
	hi->irq_key = gpio_to_irq(hi->gpio_key);

	HSD_DBG("hi->irq_key = %d\n", hi->irq_key);

	if (hi->irq_key < 0) {
		HSD_ERR("Failed to get interrupt number\n");
		ret = hi->irq_key;
		goto error_06;
	}
	ret = request_threaded_irq(hi->irq_key, NULL, button_irq_handler,
			IRQF_TRIGGER_RISING|IRQF_TRIGGER_FALLING, pdev->name, hi);
	if (ret) {
		HSD_ERR("failed to request button irq\n");
		goto error_06;
	}

	ret = irq_set_irq_wake(hi->irq_key, 1);
	if (ret < 0) {
		HSD_ERR("Failed to set irq_key interrupt wake\n");
		goto error_06;
	}
	enable_irq(hi->irq_key);

	hi->irq_detect = gpio_to_irq(hi->gpio_detect);
	HSD_DBG("hi->irq_detect = %d\n", hi->irq_detect);

	if (hi->irq_detect < 0) {
		HSD_ERR("Failed to get interrupt number\n");
		ret = hi->irq_detect;
		goto error_07;
	}
	ret = request_threaded_irq(hi->irq_detect, NULL, earjack_det_irq_handler,
			IRQF_TRIGGER_RISING|IRQF_TRIGGER_FALLING, pdev->name, hi);

	if (ret) {
		HSD_ERR("failed to request button irq\n");
		goto error_07;
	}

	ret = irq_set_irq_wake(hi->irq_detect, 1);
	if (ret < 0) {
		HSD_ERR("Failed to set gpio_detect interrupt wake\n");
		goto error_07;
	}
	/* initialize switch device */
	hi->sdev.name = pdata->switch_name;
	hi->sdev.print_state = lge_hsd_print_state;
	hi->sdev.print_name = lge_hsd_print_name;

	ret = switch_dev_register(&hi->sdev);
	if (ret < 0) {
		HSD_ERR("Failed to register switch device\n");
		goto error_08;
	}

	/* initialize input device */
	hi->input = input_allocate_device();
	if (!hi->input) {
		HSD_ERR("Failed to allocate input device\n");
		ret = -ENOMEM;
		goto error_09;
	}

	hi->input->name = pdata->keypad_name;

	hi->input->id.vendor    = 0x0001;
	hi->input->id.product   = 1;
	hi->input->id.version   = 1;

	/* headset tx noise */
	{
		struct qpnp_vadc_result result;
		int acc_read_value = 0;
		int i, rc;
		int count = 3;

		for (i = 0; i < count; i++)
		{
			rc = qpnp_vadc_read(P_MUX6_1_1,&result);

			if (rc < 0)
			{
				if (rc == -ETIMEDOUT) {
					pr_err("[DEBUG]adc read timeout \n");
				} else {
					pr_err("[DEBUG]adc read error - %d\n", rc);
				}
			}
			else
			{
				acc_read_value = (int)result.physical;
				pr_info("%s: acc_read_value - %d\n", __func__, (int)result.physical);
				break;
			}
		}
	}

	set_bit(EV_SYN, hi->input->evbit);
	set_bit(EV_KEY, hi->input->evbit);
	set_bit(EV_SW, hi->input->evbit);
	set_bit(hi->key_code, hi->input->keybit);
	set_bit(SW_HEADPHONE_INSERT, hi->input->swbit);
	set_bit(SW_MICROPHONE_INSERT, hi->input->swbit);
	input_set_capability(hi->input, EV_KEY, KEY_MEDIA);
	input_set_capability(hi->input, EV_KEY, KEY_VOLUMEUP);
	input_set_capability(hi->input, EV_KEY, KEY_VOLUMEDOWN);
	ret = input_register_device(hi->input);
	if (ret) {
		HSD_ERR("Failed to register input device\n");
		goto error_09;
	}

	if (!(hi->gpio_get_value_func(hi->gpio_detect)))

#ifdef CONFIG_MAX1462X_USE_LOCAL_WORK_QUEUE
		/* to detect in initialization with eacjack insertion */
		queue_work(local_max1462x_workqueue, &(hi->work));
#else
	/* to detect in initialization with eacjack insertion */
	schedule_work(&(hi->work));
#endif
	return ret;

error_09:
	input_free_device(hi->input);
error_08:
	switch_dev_unregister(&hi->sdev);
error_07:
	free_irq(hi->irq_detect, 0);
error_06:
	free_irq(hi->irq_key, 0);
error_04:
	gpio_free(hi->gpio_key);
error_03:
	gpio_free(hi->gpio_detect);
error_02:
	gpio_free(hi->gpio_mic_en);
	kfree(hi);
	return ret;
}
Exemplo n.º 12
0
/*
 * wake up an asynchronous call
 */
static void afs_wake_up_async_call(struct afs_call *call)
{
	_enter("");
	queue_work(afs_async_calls, &call->async_work);
}
void touch_led_timedout(unsigned long ptr)
{
    queue_work(tkey_i2c_local->wq, &tkey_i2c_local->work);
}
Exemplo n.º 14
0
void queue_up_suspend_work(void)
{
	if (!work_pending(&suspend_work) && autosleep_state > PM_SUSPEND_ON)
		queue_work(autosleep_wq, &suspend_work);
}
Exemplo n.º 15
0
static ssize_t debug_flag_store(struct device *dev,
                                struct device_attribute *attr,
                                const char *buf, size_t count)
{
    unsigned long state = 0;

    HS_DBG();

    if (strncmp(buf, "enable", count - 1) == 0) {
        if (hi->debug_flag & DEBUG_FLAG_ADC) {
            HS_LOG("Debug work is already running");
            return count;
        }
        if (!debug_wq) {
            debug_wq = create_workqueue("debug");
            if (!debug_wq) {
                HS_LOG("Failed to create debug workqueue");
                return count;
            }
        }
        HS_LOG("Enable headset debug");
        mutex_lock(&hi->mutex_lock);
        hi->debug_flag |= DEBUG_FLAG_ADC;
        mutex_unlock(&hi->mutex_lock);
        queue_work(debug_wq, &debug_work);
    } else if (strncmp(buf, "disable", count - 1) == 0) {
        if (!(hi->debug_flag & DEBUG_FLAG_ADC)) {
            HS_LOG("Debug work has been stopped");
            return count;
        }
        HS_LOG("Disable headset debug");
        mutex_lock(&hi->mutex_lock);
        hi->debug_flag &= ~DEBUG_FLAG_ADC;
        mutex_unlock(&hi->mutex_lock);
        if (debug_wq) {
            flush_workqueue(debug_wq);
            destroy_workqueue(debug_wq);
            debug_wq = NULL;
        }
    } else if (strncmp(buf, "debug_log_enable", count - 1) == 0) {
        HS_LOG("Enable headset debug log");
        hi->debug_flag |= DEBUG_FLAG_LOG;
    } else if (strncmp(buf, "debug_log_disable", count - 1) == 0) {
        HS_LOG("Disable headset debug log");
        hi->debug_flag &= ~DEBUG_FLAG_LOG;
    } else if (strncmp(buf, "no_headset", count - 1) == 0) {
        HS_LOG("Headset simulation: no_headset");
        state = BIT_HEADSET | BIT_HEADSET_NO_MIC | BIT_35MM_HEADSET |
                BIT_USB_AUDIO_OUT;
        switch_send_event(state, 0);
    } else if (strncmp(buf, "35mm_mic", count - 1) == 0) {
        HS_LOG("Headset simulation: 35mm_mic");
        state = BIT_HEADSET | BIT_35MM_HEADSET;
        switch_send_event(state, 1);
    } else if (strncmp(buf, "35mm_no_mic", count - 1) == 0) {
        HS_LOG("Headset simulation: 35mm_no_mic");
        state = BIT_HEADSET_NO_MIC | BIT_35MM_HEADSET;
        switch_send_event(state, 1);
    } else if (strncmp(buf, "usb_audio", count - 1) == 0) {
        HS_LOG("Headset simulation: usb_audio");
        state = BIT_USB_AUDIO_OUT;
        switch_send_event(state, 1);
    } else {
        HS_LOG("Invalid parameter");
        return count;
    }

    return count;
}
Exemplo n.º 16
0
static irqreturn_t baseband_xmm_power2_ver_lt_1130_ipc_ap_wake_irq2
	(int irq, void *dev_id)
{
	int value;

	pr_debug("%s\n", __func__);

	/* check for platform data */
	if (!baseband_power2_driver_data)
		return IRQ_HANDLED;

	value = gpio_get_value(baseband_power2_driver_data->
		    modem.xmm.ipc_ap_wake);

	/* IPC_AP_WAKE state machine */
	if (ipc_ap_wake_state < IPC_AP_WAKE_IRQ_READY) {
		pr_err("%s - spurious irq\n", __func__);
	} else if (ipc_ap_wake_state == IPC_AP_WAKE_IRQ_READY) {
		if (!value) {
			pr_debug("%s - IPC_AP_WAKE_INIT1"
				" - got falling edge\n",
				__func__);
			/* go to IPC_AP_WAKE_INIT1 state */
			ipc_ap_wake_state = IPC_AP_WAKE_INIT1;
			/* queue work */
			baseband_xmm_power2_work->state =
				BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP1;
			queue_work(workqueue, (struct work_struct *)
				baseband_xmm_power2_work);
		} else {
			pr_debug("%s - IPC_AP_WAKE_INIT1"
				" - wait for falling edge\n",
				__func__);
		}
	} else if (ipc_ap_wake_state == IPC_AP_WAKE_INIT1) {
		if (!value) {
			pr_debug("%s - IPC_AP_WAKE_INIT2"
				" - wait for rising edge\n",
				__func__);
		} else {
			pr_debug("%s - IPC_AP_WAKE_INIT2"
				" - got rising edge\n",
				__func__);
			/* go to IPC_AP_WAKE_INIT2 state */
			ipc_ap_wake_state = IPC_AP_WAKE_INIT2;
			/* queue work */
			baseband_xmm_power2_work->state =
				BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP2;
			queue_work(workqueue, (struct work_struct *)
				baseband_xmm_power2_work);
		}
	} else {
		if (!value) {
			pr_debug("%s - falling\n", __func__);
			ipc_ap_wake_state = IPC_AP_WAKE_L;
		} else {
			pr_debug("%s - rising\n", __func__);
			ipc_ap_wake_state = IPC_AP_WAKE_H;
		}
		return baseband_xmm_power_ipc_ap_wake_irq(irq, dev_id);
	}

	return IRQ_HANDLED;
}
mali_error kbase_instr_hwcnt_enable_internal_sec(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_uk_hwcnt_setup *setup, bool firstcall)
{
	unsigned long flags, pm_flags;
	mali_error err = MALI_ERROR_FUNCTION_FAILED;
	u32 irq_mask;
	int ret;
	u64 shader_cores_needed;

	KBASE_DEBUG_ASSERT(NULL != kctx);
	KBASE_DEBUG_ASSERT(NULL != kbdev);
	KBASE_DEBUG_ASSERT(NULL != setup);
	KBASE_DEBUG_ASSERT(NULL == kbdev->hwcnt.suspended_kctx);

	if (firstcall) {
		shader_cores_needed = kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER);

		/* Override core availability policy to ensure all cores are available */
		kbase_pm_ca_instr_enable(kbdev);

		/* Mark the context as active so the GPU is kept turned on */
		kbase_pm_context_active(kbdev);

		/* Request the cores early on synchronously - we'll release them on any errors
		 * (e.g. instrumentation already active) */
		kbase_pm_request_cores_sync(kbdev, MALI_TRUE, shader_cores_needed);
	}

	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);

	if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) {
		/* GPU is being reset */
		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);

		wait_event_timeout(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0, kbdev->hwcnt.timeout);

		spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
	}

	if (kbdev->hwcnt.state != KBASE_INSTR_STATE_DISABLED) {
		/* Instrumentation is already enabled */
		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);

		if (firstcall)
			goto out_unrequest_cores;
		else
			goto out_err;
	}

	/* Enable interrupt */
	spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags);
	irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask | PRFCNT_SAMPLE_COMPLETED, NULL);
	spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags);

	/* In use, this context is the owner */
	kbdev->hwcnt.kctx = kctx;
	/* Remember the dump address so we can reprogram it later */
	kbdev->hwcnt.addr = setup->dump_buffer;

	if (firstcall) {
		/* Remember all the settings for suspend/resume */
		if (&kbdev->hwcnt.suspended_state != setup)
			memcpy(&kbdev->hwcnt.suspended_state, setup, sizeof(kbdev->hwcnt.suspended_state));

		/* Request the clean */
		kbdev->hwcnt.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
		kbdev->hwcnt.triggered = 0;
		/* Clean&invalidate the caches so we're sure the mmu tables for the dump buffer is valid */
		ret = queue_work(kbdev->hwcnt.cache_clean_wq, &kbdev->hwcnt.cache_clean_work);
		KBASE_DEBUG_ASSERT(ret);
	}
	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);

	if (firstcall) {
		/* Wait for cacheclean to complete */
		wait_event_timeout(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0, kbdev->hwcnt.timeout);
	}

	KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE);

	if (firstcall) {
		/* Schedule the context in */
		kbasep_js_schedule_privileged_ctx(kbdev, kctx);
		kbase_pm_context_idle(kbdev);
	} else {
		kbase_mmu_update(kctx);
	}

	/* Configure */
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_OFF, kctx);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),     setup->dump_buffer & 0xFFFFFFFF, kctx);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),     setup->dump_buffer >> 32,        kctx);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN),       setup->jm_bm,                    kctx);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN),   setup->shader_bm,                kctx);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_L3_CACHE_EN), setup->l3_cache_bm,              kctx);
	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN),   setup->mmu_l2_bm,                kctx);
	/* Due to PRLAM-8186 we need to disable the Tiler before we enable the HW counter dump. */
	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0, kctx);
	else
		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), setup->tiler_bm, kctx);

	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_MANUAL, kctx);

	/* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump */
	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), setup->tiler_bm, kctx);

	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);

	if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) {
		/* GPU is being reset */
		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);

		wait_event_timeout(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0, kbdev->hwcnt.timeout);

		spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
	}

	kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE;
	kbdev->hwcnt.triggered = 1;
	wake_up(&kbdev->hwcnt.wait);

	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);

	err = MALI_ERROR_NONE;

	kbdev->hwcnt.trig_exception = 0;

	dev_dbg(kbdev->dev, "HW counters dumping set-up for context %p", kctx);

	if (firstcall) {
		kbase_pm_context_idle(kbdev);
	}

	return err;
out_unrequest_cores:
	if (firstcall) {
		kbase_pm_unrequest_cores(kbdev, MALI_TRUE, shader_cores_needed);
		kbase_pm_context_idle(kbdev);
	}
 out_err:
	return err;
}
int tdmb_fc8080_spi_write_read(uint8* tx_data, int tx_length, uint8 *rx_data, int rx_length)
{
    int rc;

    struct spi_transfer    t = {
            .tx_buf        = tx_data,
            .rx_buf        = rx_data,
            .len        = tx_length+rx_length,
        };

    struct spi_message    m;

    if (fc8080_ctrl_info.spi_ptr == NULL)
    {
        printk("tdmb_fc8080_spi_write_read error txdata=0x%x, length=%d\n", (unsigned int)tx_data, tx_length+rx_length);
        return FALSE;
    }

    mutex_lock(&fc8080_ctrl_info.mutex);

    spi_message_init(&m);
    spi_message_add_tail(&t, &m);
    rc = spi_sync(fc8080_ctrl_info.spi_ptr, &m);

    if ( rc < 0 )
    {
        printk("tdmb_fc8080_spi_read_burst result(%d), actual_len=%d\n",rc, m.actual_length);
    }

    mutex_unlock(&fc8080_ctrl_info.mutex);

    return TRUE;
}

#ifdef FEATURE_DMB_USE_WORKQUEUE
static irqreturn_t broadcast_tdmb_spi_isr(int irq, void *handle)
{
    struct tdmb_fc8080_ctrl_blk* fc8080_info_p;

    fc8080_info_p = (struct tdmb_fc8080_ctrl_blk *)handle;
    if ( fc8080_info_p && fc8080_info_p->TdmbPowerOnState )
    {
        unsigned long flag;
        if (fc8080_info_p->spi_irq_status)
        {
            printk("######### spi read function is so late skip #########\n");
            return IRQ_HANDLED;
        }
//        printk("***** broadcast_tdmb_spi_isr coming *******\n");
        spin_lock_irqsave(&fc8080_info_p->spin_lock, flag);
        queue_work(fc8080_info_p->spi_wq, &fc8080_info_p->spi_work);
        spin_unlock_irqrestore(&fc8080_info_p->spin_lock, flag);
    }
    else
    {
        printk("broadcast_tdmb_spi_isr is called, but device is off state\n");
    }

    return IRQ_HANDLED;
}

static void broacast_tdmb_spi_work(struct work_struct *tdmb_work)
{
    struct tdmb_fc8080_ctrl_blk *pTdmbWorkData;

    pTdmbWorkData = container_of(tdmb_work, struct tdmb_fc8080_ctrl_blk, spi_work);
    if ( pTdmbWorkData )
    {
        tunerbb_drv_fc8080_isr_control(0);
        pTdmbWorkData->spi_irq_status = TRUE;
        broadcast_fc8080_drv_if_isr();
        pTdmbWorkData->spi_irq_status = FALSE;
        tunerbb_drv_fc8080_isr_control(1);
    }
    else
    {
        printk("~~~~~~~broadcast_tdmb_spi_work call but pTdmbworkData is NULL ~~~~~~~\n");
    }
}
#else
static irqreturn_t broadcast_tdmb_spi_event_handler(int irq, void *handle)
{
    struct tdmb_fc8080_ctrl_blk* fc8080_info_p;

    fc8080_info_p = (struct tdmb_fc8080_ctrl_blk *)handle;
    if ( fc8080_info_p && fc8080_info_p->TdmbPowerOnState )
    {
        if (fc8080_info_p->spi_irq_status)
        {
            printk("######### spi read function is so late skip ignore #########\n");
            return IRQ_HANDLED;
        }

        tunerbb_drv_fc8080_isr_control(0);
        fc8080_info_p->spi_irq_status = TRUE;
        broadcast_fc8080_drv_if_isr();
        fc8080_info_p->spi_irq_status = FALSE;
        tunerbb_drv_fc8080_isr_control(1);
    }
    else
    {
        printk("broadcast_tdmb_spi_isr is called, but device is off state\n");
    }

    return IRQ_HANDLED;
}
#endif

#ifdef FEATURE_DMB_USE_PINCTRL
static int tdmb_pinctrl_init(void)
{
    struct pinctrl *tdmb_pinctrl;
    struct pinctrl_state *gpio_state_suspend;

    tdmb_pinctrl = devm_pinctrl_get(&(fc8080_ctrl_info.pdev->dev));


    if(IS_ERR_OR_NULL(tdmb_pinctrl)) {
        pr_err("%s: Getting pinctrl handle failed\n", __func__);
        return -EINVAL;
    }
    gpio_state_suspend
     = pinctrl_lookup_state(tdmb_pinctrl, "gpio_tdmb_suspend");

     if(IS_ERR_OR_NULL(gpio_state_suspend)) {
         pr_err("%s: Failed to get the suspend state pinctrl handle\n", __func__);
         return -EINVAL;
    }

    if(pinctrl_select_state(tdmb_pinctrl, gpio_state_suspend)) {
        pr_err("%s: error on pinctrl_select_state for tdmb enable and irq pin\n", __func__);
        return -EINVAL;
    }
    else {
        printk("%s: success to set pinctrl_select_state for tdmb enable and irq pin\n", __func__);
    }

    return 0;
}
#endif
static int tdmb_configure_gpios(void)
{
    int rc = OK;
    int err_count = 0;

    fc8080_ctrl_info.dmb_en = of_get_named_gpio(fc8080_ctrl_info.pdev->dev.of_node,"tdmb-fc8080,en-gpio",0);

    rc = gpio_request(fc8080_ctrl_info.dmb_en, "DMB_EN");
    if (rc < 0) {
        err_count++;
        printk("%s:Failed GPIO DMB_EN request!!!\n",__func__);
    }

    fc8080_ctrl_info.dmb_irq = of_get_named_gpio(fc8080_ctrl_info.pdev->dev.of_node,"tdmb-fc8080,irq-gpio",0);

    rc = gpio_request(fc8080_ctrl_info.dmb_irq, "DMB_INT_N");
    if (rc < 0) {
        err_count++;
        printk("%s:Failed GPIO DMB_INT_N request!!!\n",__func__);
    }

#if defined (CONFIG_MACH_MSM8926_VFP_KR )||defined(CONFIG_MACH_MSM8916_YG_SKT_KR)
    fc8080_ctrl_info.dmb_ant = of_get_named_gpio(fc8080_ctrl_info.pdev->dev.of_node,"tdmb-fc8080,ant-gpio",0);

    rc = gpio_request(fc8080_ctrl_info.dmb_ant, "DMB_ANT");
    if (rc < 0) {
        err_count++;
        printk("%s:Failed GPIO DMB_ANT request!!!\n",__func__);
    }
    gpio_direction_output(fc8080_ctrl_info.dmb_ant,0);
#endif

    gpio_direction_output(fc8080_ctrl_info.dmb_en, 0);
    gpio_direction_input(fc8080_ctrl_info.dmb_irq);

    if(err_count > 0) rc = -EINVAL;

    return rc;
}
Exemplo n.º 19
0
static int diag_smd_read(void *ctxt, unsigned char *buf, int buf_len)
{
	int pkt_len = 0;
	int err = 0;
	int total_recd_partial = 0;
	int total_recd = 0;
	uint8_t buf_full = 0;
	unsigned char *temp_buf = NULL;
	uint32_t read_len = 0;
	struct diag_smd_info *smd_info = NULL;

	if (!ctxt || !buf || buf_len <= 0)
		return -EIO;

	smd_info = (struct diag_smd_info *)ctxt;
	if (!smd_info->hdl || !smd_info->inited ||
	    !atomic_read(&smd_info->opened))
		return -EIO;

	/*
	 * Always try to read the data if notification is received from smd
	 * In case if packet size is 0 release the wake source hold earlier
	 */
	err = wait_event_interruptible(smd_info->read_wait_q,
				       (smd_info->hdl != NULL) &&
				       (atomic_read(&smd_info->opened) == 1));
	if (err) {
		diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, 0);
		return -ERESTARTSYS;
	}

	/*
	 * Reset the buffers. Also release the wake source hold earlier.
	 */
	if (atomic_read(&smd_info->diag_state) == 0) {
		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
			 "%s closing read thread. diag state is closed\n",
			 smd_info->name);
		diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, 0);
		return 0;
	}

	if (!smd_info->hdl || !atomic_read(&smd_info->opened)) {
		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
			 "%s stopping read, hdl: %pK, opened: %d\n",
			 smd_info->name, smd_info->hdl,
			 atomic_read(&smd_info->opened));
		goto fail_return;
	}

	do {
		total_recd_partial = 0;
		temp_buf = buf + total_recd;
		pkt_len = smd_cur_packet_size(smd_info->hdl);
		if (pkt_len <= 0)
			break;

		if (total_recd + pkt_len > buf_len) {
			buf_full = 1;
			break;
		}

		while (total_recd_partial < pkt_len) {
			read_len = smd_read_avail(smd_info->hdl);
			if (!read_len) {
				wait_event_interruptible(smd_info->read_wait_q,
					   ((atomic_read(&smd_info->opened)) &&
					    smd_read_avail(smd_info->hdl)));

				if (!smd_info->hdl ||
				    !atomic_read(&smd_info->opened)) {
					DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
						"%s exiting from wait",
						smd_info->name);
					goto fail_return;
				}
			}

			if (pkt_len < read_len)
				goto fail_return;

			smd_read(smd_info->hdl, temp_buf, read_len);
			total_recd_partial += read_len;
			total_recd += read_len;
			temp_buf += read_len;
		}
	} while (pkt_len > 0);

	if ((smd_info->type == TYPE_DATA && pkt_len) || buf_full)
		err = queue_work(smd_info->wq, &(smd_info->read_work));

	if (total_recd > 0) {
		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s read total bytes: %d\n",
			 smd_info->name, total_recd);
		diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, total_recd);
	} else {
		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s error in read, err: %d\n",
			 smd_info->name, total_recd);
		goto fail_return;
	}
	return 0;

fail_return:
	diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, 0);
	return -EINVAL;
}
Exemplo n.º 20
0
static void _fan5403_chg_work_timer_func(unsigned long param)
{
	queue_work(chg_wqueue, &chg_work);
}
Exemplo n.º 21
0
static __devinit int sec_battery_probe(struct platform_device *pdev)
{
    struct sec_battery_platform_data *pdata = pdev->dev.platform_data;
    struct chg_data *chg;
    int ret = 0;

    pr_info("%s : Samsung Battery Driver Loading\n", __func__);

    chg = kzalloc(sizeof(*chg), GFP_KERNEL);
    if (!chg)
        return -ENOMEM;

    chg->pdata = pdata;

    if (!chg->pdata || !chg->pdata->adc_table) {
        pr_err("%s : No platform data & adc_table supplied\n", __func__);
        ret = -EINVAL;
        goto err_bat_table;
    }

    chg->psy_bat.name = "battery",
                 chg->psy_bat.type = POWER_SUPPLY_TYPE_BATTERY,
                              chg->psy_bat.properties = sec_battery_props,
                                           chg->psy_bat.num_properties = ARRAY_SIZE(sec_battery_props),
                                                        chg->psy_bat.get_property = sec_bat_get_property,

                                                                     chg->psy_usb.name = "usb",
                                                                                  chg->psy_usb.type = POWER_SUPPLY_TYPE_USB,
                                                                                               chg->psy_usb.supplied_to = supply_list,
                                                                                                            chg->psy_usb.num_supplicants = ARRAY_SIZE(supply_list),
                                                                                                                         chg->psy_usb.properties = sec_power_properties,
                                                                                                                                      chg->psy_usb.num_properties = ARRAY_SIZE(sec_power_properties),
                                                                                                                                                   chg->psy_usb.get_property = sec_usb_get_property,

                                                                                                                                                                chg->psy_ac.name = "ac",
                                                                                                                                                                            chg->psy_ac.type = POWER_SUPPLY_TYPE_MAINS,
                                                                                                                                                                                        chg->psy_ac.supplied_to = supply_list,
                                                                                                                                                                                                    chg->psy_ac.num_supplicants = ARRAY_SIZE(supply_list),
                                                                                                                                                                                                                chg->psy_ac.properties = sec_power_properties,
                                                                                                                                                                                                                            chg->psy_ac.num_properties = ARRAY_SIZE(sec_power_properties),
                                                                                                                                                                                                                                        chg->psy_ac.get_property = sec_ac_get_property,

                                                                                                                                                                                                                                                    chg->present = 1;
    chg->polling_interval = POLLING_INTERVAL;
    chg->bat_info.batt_health = POWER_SUPPLY_HEALTH_GOOD;
    chg->bat_info.batt_is_full = false;
    chg->set_charge_timeout = false;
    chg->bat_info.batt_improper_ta = false;
    chg->is_recharging = false;
#ifdef CONFIG_BATTERY_MAX17042
    // Get battery type from fuelgauge driver.
    if(chg->pdata && chg->pdata->fuelgauge_cb)
        chg->battery_type = (battery_type_t)chg->pdata->fuelgauge_cb(
                                REQ_TEST_MODE_INTERFACE, TEST_MODE_BATTERY_TYPE_CHECK, 0);

    // Check UV charging case.
    if(chg->pdata && chg->pdata->pmic_charger &&
            chg->pdata->pmic_charger->get_connection_status) {
        if(chg->pdata->pmic_charger->get_connection_status() &&
                check_UV_charging_case(chg))
            chg->low_batt_boot_flag = true;
    }
    else
        chg->low_batt_boot_flag = false;

    // init delayed work
    INIT_DELAYED_WORK(&chg->full_chg_work, full_comp_work_handler);

    // Init low batt check threshold values.
    if(chg->battery_type == SDI_BATTERY_TYPE)
        chg->check_start_vol = 3550;  // Under 3.55V
    else if(chg->battery_type == ATL_BATTERY_TYPE)
        chg->check_start_vol = 3450;  // Under 3.45V
#endif

    chg->cable_status = CABLE_TYPE_NONE;
    chg->charging_status = CHARGING_STATUS_NONE;

    mutex_init(&chg->mutex);

    platform_set_drvdata(pdev, chg);

    wake_lock_init(&chg->vbus_wake_lock, WAKE_LOCK_SUSPEND,
                   "vbus_present");
    wake_lock_init(&chg->work_wake_lock, WAKE_LOCK_SUSPEND,
                   "sec_battery_work");

    INIT_WORK(&chg->bat_work, sec_bat_work);

    chg->monitor_wqueue =
        create_freezeable_workqueue(dev_name(&pdev->dev));
    if (!chg->monitor_wqueue) {
        pr_err("Failed to create freezeable workqueue\n");
        ret = -ENOMEM;
        goto err_wake_lock;
    }

    chg->last_poll = alarm_get_elapsed_realtime();
    alarm_init(&chg->alarm, ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
               sec_battery_alarm);

    /* init power supplier framework */
    ret = power_supply_register(&pdev->dev, &chg->psy_bat);
    if (ret) {
        pr_err("Failed to register power supply psy_bat\n");
        goto err_wqueue;
    }

    ret = power_supply_register(&pdev->dev, &chg->psy_usb);
    if (ret) {
        pr_err("Failed to register power supply psy_usb\n");
        goto err_supply_unreg_bat;
    }

    ret = power_supply_register(&pdev->dev, &chg->psy_ac);
    if (ret) {
        pr_err("Failed to register power supply psy_ac\n");
        goto err_supply_unreg_usb;
    }

    sec_bat_create_attrs(chg->psy_bat.dev);

    chg->callbacks.set_cable = sec_bat_set_cable;
    chg->callbacks.set_status = sec_bat_set_status;
    chg->callbacks.force_update = sec_bat_force_update;
    if (chg->pdata->register_callbacks)
        chg->pdata->register_callbacks(&chg->callbacks);

    wake_lock(&chg->work_wake_lock);
    queue_work(chg->monitor_wqueue, &chg->bat_work);

    p1_lpm_mode_check(chg);

    return 0;

err_supply_unreg_ac:
    power_supply_unregister(&chg->psy_ac);
err_supply_unreg_usb:
    power_supply_unregister(&chg->psy_usb);
err_supply_unreg_bat:
    power_supply_unregister(&chg->psy_bat);
err_wqueue:
    destroy_workqueue(chg->monitor_wqueue);
    cancel_work_sync(&chg->bat_work);
    alarm_cancel(&chg->alarm);
err_wake_lock:
    wake_lock_destroy(&chg->work_wake_lock);
    wake_lock_destroy(&chg->vbus_wake_lock);
    mutex_destroy(&chg->mutex);
err_bat_table:
    kfree(chg);
    return ret;
}
Exemplo n.º 22
0
static enum hrtimer_restart aps_timer_func(struct hrtimer *timer)
{
	struct aps_data *aps = container_of(timer, struct aps_data, timer);		
	queue_work(aps_wq, &aps->work);
	return HRTIMER_NORESTART;
}
Exemplo n.º 23
0
/**
 * @brief Process a replay job
 *
 * Called from kbase_process_soft_job.
 *
 * On exit, if the job has completed, katom->event_code will have been updated.
 * If the job has not completed, and is replaying jobs, then the atom status
 * will have been reset to KBASE_JD_ATOM_STATE_QUEUED.
 *
 * @param[in] katom  The atom to be processed
 * @return           false if the atom has completed
 *                   true if the atom is replaying jobs
 */
bool kbase_replay_process(struct kbase_jd_atom *katom)
{
	struct kbase_context *kctx = katom->kctx;
	struct kbase_jd_context *jctx = &kctx->jctx;
	struct kbase_device *kbdev = kctx->kbdev;

	/* Don't replay this atom if these issues are not present in the
	 * hardware */
	if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11020) &&
			!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11024)) {
		dev_dbg(kbdev->dev, "Hardware does not need replay workaround");

		/* Signal failure to userspace */
		katom->event_code = BASE_JD_EVENT_JOB_INVALID;

		return false;
	}

	if (katom->event_code == BASE_JD_EVENT_DONE) {
		dev_dbg(kbdev->dev, "Previous job succeeded - not replaying\n");

		if (katom->retry_count)
			kbase_disjoint_state_down(kbdev);

		return false;
	}

	if (jctx->sched_info.ctx.is_dying) {
		dev_dbg(kbdev->dev, "Not replaying; context is dying\n");

		if (katom->retry_count)
			kbase_disjoint_state_down(kbdev);

		return false;
	}

	/* Check job exception type and source before replaying. */
	if (!kbase_replay_fault_check(katom)) {
		dev_dbg(kbdev->dev,
			"Replay cancelled on event %x\n", katom->event_code);
		/* katom->event_code is already set to the failure code of the
		 * previous job.
		 */
		return false;
	}

	dev_warn(kbdev->dev, "Replaying jobs retry=%d\n",
			katom->retry_count);

	katom->retry_count++;

	if (katom->retry_count > BASEP_JD_REPLAY_LIMIT) {
		dev_err(kbdev->dev, "Replay exceeded limit - failing jobs\n");

		kbase_disjoint_state_down(kbdev);

		/* katom->event_code is already set to the failure code of the
		   previous job */
		return false;
	}

	/* only enter the disjoint state once for the whole time while the replay is ongoing */
	if (katom->retry_count == 1)
		kbase_disjoint_state_up(kbdev);

	INIT_WORK(&katom->work, kbase_replay_process_worker);
	queue_work(kctx->event_workq, &katom->work);

	return true;
}
static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id)
{
    struct gpio_key_state *ks = dev_id;
    struct gpio_input_state *ds = ks->ds;
    int keymap_index = ks - ds->key_state;
    const struct gpio_event_direct_entry *key_entry;
    unsigned long irqflags;
#ifndef CONFIG_MFD_MAX8957
    int pressed;
#endif
    KEY_LOGD("%s, irq=%d, use_irq=%d\n", __func__, irq, ds->use_irq);

    if (!ds->use_irq)
        return IRQ_HANDLED;

    key_entry = &ds->info->keymap[keymap_index];

    if (key_entry->code == KEY_POWER && power_key_intr_flag == 0) {
        irq_set_irq_type(irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING);
        power_key_intr_flag = 1;
        KEY_LOGD("%s, keycode = %d, first intr", __func__, key_entry->code);
    }
    if (ds->info->debounce_time.tv64) {
        spin_lock_irqsave(&ds->irq_lock, irqflags);
        if (ks->debounce & DEBOUNCE_WAIT_IRQ) {
            ks->debounce = DEBOUNCE_UNKNOWN;
            if (ds->debounce_count++ == 0) {
                wake_lock(&ds->wake_lock);
#ifndef CONFIG_MFD_MAX8957
                hrtimer_start(
                    &ds->timer, ds->info->debounce_time,
                    HRTIMER_MODE_REL);
#endif
            }
            if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
                KEY_LOGD("gpio_event_input_irq_handler: "
                         "key %x-%x, %d (%d) start debounce\n",
                         ds->info->type, key_entry->code,
                         keymap_index, key_entry->gpio);
        } else {
            disable_irq_nosync(irq);
            ks->debounce = DEBOUNCE_UNSTABLE;
        }
        spin_unlock_irqrestore(&ds->irq_lock, irqflags);
    } else {
#ifdef CONFIG_MFD_MAX8957
        queue_work(ki_queue, &ks->work);
#else
        pressed = gpio_get_value(key_entry->gpio) ^
                  !(ds->info->flags & GPIOEDF_ACTIVE_HIGH);
        if (ds->info->flags & GPIOEDF_PRINT_KEYS)
            KEY_LOGD("gpio_event_input_irq_handler: key %x-%x, %d "
                     "(%d) changed to %d\n",
                     ds->info->type, key_entry->code, keymap_index,
                     key_entry->gpio, pressed);
        input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
                    key_entry->code, pressed);
        input_sync(ds->input_devs->dev[key_entry->dev]);
#endif
    }
    return IRQ_HANDLED;
}
int tdmb_fc8050_spi_write_read(uint8* tx_data, int tx_length, uint8 *rx_data, int rx_length)
{
	int rc;

	struct spi_transfer	t = {
			.tx_buf		= tx_data,
			.rx_buf		= rx_data,
			.len		= tx_length+rx_length,
		};

	struct spi_message	m;	

	if (fc8050_ctrl_info.spi_ptr == NULL)
	{
		printk("tdmb_fc8050_spi_write_read error txdata=0x%x, length=%d\n", (unsigned int)tx_data, tx_length+rx_length);
	}

	mutex_lock(&fc8050_ctrl_info.mutex);

	spi_message_init(&m);
	spi_message_add_tail(&t, &m);
	rc = spi_sync(fc8050_ctrl_info.spi_ptr, &m);

	if ( rc < 0 )
	{
		printk("tdmb_fc8050_spi_read_burst result(%d), actual_len=%d\n",rc, m.actual_length);
	}

	mutex_unlock(&fc8050_ctrl_info.mutex);

	return TRUE;
}

#ifdef FEATURE_DMB_USE_WORKQUEUE
static irqreturn_t broadcast_tdmb_spi_isr(int irq, void *handle)
{
	struct tdmb_fc8050_ctrl_blk* fc8050_info_p;
	unsigned long flag;

	fc8050_info_p = (struct tdmb_fc8050_ctrl_blk *)handle;	
	if ( fc8050_info_p && fc8050_info_p->TdmbPowerOnState )
	{
		if (fc8050_info_p->spi_irq_status)
		{			
			printk("######### spi read function is so late skip #########\n");			
			return IRQ_HANDLED;
		}		
//		printk("***** broadcast_tdmb_spi_isr coming *******\n");
		spin_lock_irqsave(&fc8050_info_p->spin_lock, flag);
		queue_work(fc8050_info_p->spi_wq, &fc8050_info_p->spi_work);
		spin_unlock_irqrestore(&fc8050_info_p->spin_lock, flag);    
	}
	else
	{
		printk("broadcast_tdmb_spi_isr is called, but device is off state\n");
	}

	return IRQ_HANDLED; 
}

static void broacast_tdmb_spi_work(struct work_struct *tdmb_work)
{
	struct tdmb_fc8050_ctrl_blk *pTdmbWorkData;

	pTdmbWorkData = container_of(tdmb_work, struct tdmb_fc8050_ctrl_blk, spi_work);
	if ( pTdmbWorkData )
	{
		fc8050_isr_control(0);
		pTdmbWorkData->spi_irq_status = TRUE;
		broadcast_drv_if_isr();
		pTdmbWorkData->spi_irq_status = FALSE;
		fc8050_isr_control(1);
	}
	else
	{
		printk("~~~~~~~broadcast_tdmb_spi_work call but pTdmbworkData is NULL ~~~~~~~\n");
	}
}
#else
static irqreturn_t broadcast_tdmb_spi_event_handler(int irq, void *handle)
{
	struct tdmb_fc8050_ctrl_blk* fc8050_info_p;

	fc8050_info_p = (struct tdmb_fc8050_ctrl_blk *)handle;
	if ( fc8050_info_p && fc8050_info_p->TdmbPowerOnState )
	{
		if (fc8050_info_p->spi_irq_status)
		{
			printk("######### spi read function is so late skip ignore #########\n");
			return IRQ_HANDLED;
		}

		fc8050_isr_control(0);
		fc8050_info_p->spi_irq_status = TRUE;
		broadcast_drv_if_isr();
		fc8050_info_p->spi_irq_status = FALSE;
		fc8050_isr_control(1);
	}
	else
	{
		printk("broadcast_tdmb_spi_isr is called, but device is off state\n");
	}

	return IRQ_HANDLED; 
}
#endif

static int tdmb_configure_gpios(void)
{
	int rc = OK;
	int err_count = 0;

	rc = gpio_request(DMB_RESET_N, "DMB_RESET_N");
	if (rc < 0) {
		err_count++;
		printk("%s:Failed GPIO DMB_RESET_N request!!!\n",__func__);
	}

	rc = gpio_request(DMB_EN, "DMB_EN");
	if (rc < 0) {
		err_count++;
		printk("%s:Failed GPIO DMB_EN request!!!\n",__func__);
	}

	rc = gpio_request(DMB_INT_N, "DMB_INT_N");
	if (rc < 0) {
		err_count++;
		printk("%s:Failed GPIO DMB_INT_N request!!!\n",__func__);
	}


	gpio_request(DMB_ANT_SEL_P_EAR, "DMB_ANT_SEL_P");
	if (rc < 0) {
		err_count++;
		printk("%s:Failed GPIO DMB_ANT_SEL_P request!!!\n",__func__);
	}

	gpio_request(DMB_ANT_SEL_N_INNER, "DMB_ANT_SEL_N");
	if (rc < 0) {
		err_count++;
		printk("%s:Failed GPIO DMB_ANT_SEL_N request!!!\n",__func__);
	}

	gpio_direction_output(DMB_RESET_N, 0);
	gpio_direction_output(DMB_EN, 0);
	gpio_direction_input(DMB_INT_N);

	gpio_set_value_cansleep(DMB_ANT_SEL_P_EAR, 1); /*PMIC Extended GPIO */
	gpio_set_value_cansleep(DMB_ANT_SEL_N_INNER, 0); /* PMIC Extended GPIO */

	if(err_count > 0) rc = -EINVAL;

	return rc;	
}
Exemplo n.º 26
0
static void mwifiex_usb_rx_complete(struct urb *urb)
{
	struct urb_context *context = (struct urb_context *)urb->context;
	struct mwifiex_adapter *adapter = context->adapter;
	struct sk_buff *skb = context->skb;
	struct usb_card_rec *card;
	int recv_length = urb->actual_length;
	int size, status;

	if (!adapter || !adapter->card) {
		pr_err("mwifiex adapter or card structure is not valid\n");
		return;
	}

	card = (struct usb_card_rec *)adapter->card;
	if (card->rx_cmd_ep == context->ep)
		atomic_dec(&card->rx_cmd_urb_pending);
	else
		atomic_dec(&card->rx_data_urb_pending);

	if (recv_length) {
		if (urb->status || (adapter->surprise_removed)) {
			dev_err(adapter->dev,
				"URB status is failed: %d\n", urb->status);
			/* Do not free skb in case of command ep */
			if (card->rx_cmd_ep != context->ep)
				dev_kfree_skb_any(skb);
			goto setup_for_next;
		}
		if (skb->len > recv_length)
			skb_trim(skb, recv_length);
		else
			skb_put(skb, recv_length - skb->len);

		atomic_inc(&adapter->rx_pending);
		status = mwifiex_usb_recv(adapter, skb, context->ep);

		dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
			recv_length, status);
		if (status == -EINPROGRESS) {
			queue_work(adapter->workqueue, &adapter->main_work);

			/* urb for data_ep is re-submitted now;
			 * urb for cmd_ep will be re-submitted in callback
			 * mwifiex_usb_recv_complete
			 */
			if (card->rx_cmd_ep == context->ep)
				return;
		} else {
			atomic_dec(&adapter->rx_pending);
			if (status == -1)
				dev_err(adapter->dev,
					"received data processing failed!\n");

			/* Do not free skb in case of command ep */
			if (card->rx_cmd_ep != context->ep)
				dev_kfree_skb_any(skb);
		}
	} else if (urb->status) {
		if (!adapter->is_suspended) {
			dev_warn(adapter->dev,
				 "Card is removed: %d\n", urb->status);
			adapter->surprise_removed = true;
		}
		dev_kfree_skb_any(skb);
		return;
	} else {
		/* Do not free skb in case of command ep */
		if (card->rx_cmd_ep != context->ep)
			dev_kfree_skb_any(skb);

		/* fall through setup_for_next */
	}

setup_for_next:
	if (card->rx_cmd_ep == context->ep)
		size = MWIFIEX_RX_CMD_BUF_SIZE;
	else
		size = MWIFIEX_RX_DATA_BUF_SIZE;

	mwifiex_usb_submit_rx_urb(context, size);

	return;
}
Exemplo n.º 27
0
static void key_garbage_collector(struct work_struct *work)
{
	static u8 gc_state;		
#define KEY_GC_REAP_AGAIN	0x01	
#define KEY_GC_REAPING_LINKS	0x02	
#define KEY_GC_SET_TIMER	0x04	
#define KEY_GC_REAPING_DEAD_1	0x10	
#define KEY_GC_REAPING_DEAD_2	0x20	
#define KEY_GC_REAPING_DEAD_3	0x40	
#define KEY_GC_FOUND_DEAD_KEY	0x80	

	struct rb_node *cursor;
	struct key *key;
	time_t new_timer, limit;

	kenter("[%lx,%x]", key_gc_flags, gc_state);

	limit = current_kernel_time().tv_sec;
	if (limit > key_gc_delay)
		limit -= key_gc_delay;
	else
		limit = key_gc_delay;

	
	gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
	gc_state <<= 1;
	if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
		gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;

	if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
		gc_state |= KEY_GC_REAPING_DEAD_1;
	kdebug("new pass %x", gc_state);

	new_timer = LONG_MAX;

	spin_lock(&key_serial_lock);
	cursor = rb_first(&key_serial_tree);

continue_scanning:
	while (cursor) {
		key = rb_entry(cursor, struct key, serial_node);
		cursor = rb_next(cursor);

		if (atomic_read(&key->usage) == 0)
			goto found_unreferenced_key;

		if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
			if (key->type == key_gc_dead_keytype) {
				gc_state |= KEY_GC_FOUND_DEAD_KEY;
				set_bit(KEY_FLAG_DEAD, &key->flags);
				key->perm = 0;
				goto skip_dead_key;
			}
		}

		if (gc_state & KEY_GC_SET_TIMER) {
			if (key->expiry > limit && key->expiry < new_timer) {
				kdebug("will expire %x in %ld",
				       key_serial(key), key->expiry - limit);
				new_timer = key->expiry;
			}
		}

		if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2))
			if (key->type == key_gc_dead_keytype)
				gc_state |= KEY_GC_FOUND_DEAD_KEY;

		if ((gc_state & KEY_GC_REAPING_LINKS) ||
		    unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
			if (key->type == &key_type_keyring)
				goto found_keyring;
		}

		if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3))
			if (key->type == key_gc_dead_keytype)
				goto destroy_dead_key;

	skip_dead_key:
		if (spin_is_contended(&key_serial_lock) || need_resched())
			goto contended;
	}

contended:
	spin_unlock(&key_serial_lock);

maybe_resched:
	if (cursor) {
		cond_resched();
		spin_lock(&key_serial_lock);
		goto continue_scanning;
	}

	kdebug("pass complete");

	if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) {
		new_timer += key_gc_delay;
		key_schedule_gc(new_timer);
	}

	if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
		kdebug("dead sync");
		synchronize_rcu();
	}

	if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 |
				 KEY_GC_REAPING_DEAD_2))) {
		if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) {
			kdebug("dead short");
			gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2);
			gc_state |= KEY_GC_REAPING_DEAD_3;
		} else {
			gc_state |= KEY_GC_REAP_AGAIN;
		}
	}

	if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) {
		kdebug("dead wake");
		smp_mb();
		clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
		wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE);
	}

	if (gc_state & KEY_GC_REAP_AGAIN)
		queue_work(system_nrt_wq, &key_gc_work);
	kleave(" [end %x]", gc_state);
	return;

found_unreferenced_key:
	kdebug("unrefd key %d", key->serial);
	rb_erase(&key->serial_node, &key_serial_tree);
	spin_unlock(&key_serial_lock);

	key_gc_unused_key(key);
	gc_state |= KEY_GC_REAP_AGAIN;
	goto maybe_resched;

found_keyring:
	spin_unlock(&key_serial_lock);
	kdebug("scan keyring %d", key->serial);
	key_gc_keyring(key, limit);
	goto maybe_resched;

destroy_dead_key:
	spin_unlock(&key_serial_lock);
	kdebug("destroy key %d", key->serial);
	down_write(&key->sem);
	key->type = &key_type_dead;
	if (key_gc_dead_keytype->destroy)
		key_gc_dead_keytype->destroy(key);
	memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
	up_write(&key->sem);
	goto maybe_resched;
}
Exemplo n.º 28
0
int iwl_scan_cancel(struct iwl_priv *priv)
{
	IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
	queue_work(priv->workqueue, &priv->abort_scan);
	return 0;
}
Exemplo n.º 29
0
int diag_device_write(void *buf, int proc_num)
{
	int i, err = 0;

	if (driver->logging_mode == USB_MODE) {
		if (proc_num == APPS_DATA) {
			driver->usb_write_ptr_svc = (struct diag_request *)
			(diagmem_alloc(driver, sizeof(struct diag_request),
				 POOL_TYPE_USB_STRUCT));

			if (!driver->usb_write_ptr_svc)
				return -ENOMEM;
			
			driver->usb_write_ptr_svc->length = driver->used;
			driver->usb_write_ptr_svc->buf = buf;
			err = diag_write(driver->usb_write_ptr_svc);
		} else if (proc_num == MODEM_DATA) {
				driver->usb_write_ptr->buf = buf;
#ifdef DIAG_DEBUG
				printk(KERN_INFO "writing data to USB,"
						 " pkt length %d \n",
				       driver->usb_write_ptr->length);
				print_hex_dump(KERN_DEBUG, "Written Packet Data"
					       " to USB: ", 16, 1, DUMP_PREFIX_
					       ADDRESS, buf, driver->
					       usb_write_ptr->length, 1);
#endif
			err = diag_write(driver->usb_write_ptr);
		} else if (proc_num == QDSP_DATA) {
			driver->usb_write_ptr_qdsp->buf = buf;
			err = diag_write(driver->usb_write_ptr_qdsp);
		}
		APPEND_DEBUG('k');
	} else if (driver->logging_mode == MEMORY_DEVICE_MODE) {
		if (proc_num == APPS_DATA) {
			for (i = 0; i < driver->poolsize_usb_struct; i++)
				if (driver->buf_tbl[i].length == 0) {
					driver->buf_tbl[i].buf = buf;
					driver->buf_tbl[i].length =
								 driver->used;
#ifdef DIAG_DEBUG
					printk(KERN_INFO "\n ENQUEUE buf ptr"
						   " and length is %x , %d\n",
						   (unsigned int)(driver->buf_
				tbl[i].buf), driver->buf_tbl[i].length);
#endif
					break;
				}
		}
		for (i = 0; i < driver->num_clients; i++)
			if (driver->client_map[i] == driver->logging_process_id)
				break;
		if (i < driver->num_clients) {
			driver->data_ready[i] |= MEMORY_DEVICE_LOG_TYPE;
			wake_up_interruptible(&driver->wait_q);
		} else
			return -EINVAL;
	} else if (driver->logging_mode == NO_LOGGING_MODE) {
		if (proc_num == MODEM_DATA) {
			driver->in_busy = 0;
			queue_work(driver->diag_wq, &(driver->
							diag_read_smd_work));
		} else if (proc_num == QDSP_DATA) {
			driver->in_busy_qdsp = 0;
			queue_work(driver->diag_wq, &(driver->
						diag_read_smd_qdsp_work));
		}
		err = -1;
	}
    return err;
}
Exemplo n.º 30
0
static int lge_dm_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
	unsigned long arg)
{
	short modem_number, result;
	struct dm_tty *lge_dm_tty_drv = NULL;

	int is_all_closed, i;

	lge_dm_tty_drv = lge_dm_tty;
	tty->driver_data = lge_dm_tty_drv;
	lge_dm_tty_drv->tty_str = tty;

	result = 0;

	if (_IOC_TYPE(cmd) != DM_TTY_IOCTL_MAGIC)
		return -EINVAL;

	switch (cmd) {
	case DM_TTY_MODEM_OPEN:

		if (copy_from_user((void *)&modem_number, (const void *)arg,
			sizeof(modem_number)) == 0)
			pr_info(DM_TTY_MODULE_NAME ": %s: lge_dm_tty_ioctl "
				"DM_TTY_IOCTL_MODEM_OPEN modem_number = %d\n",
					__func__, modem_number);

		if (lge_dm_tty_drv->is_modem_open[modem_number] == FALSE)
			lge_dm_tty_drv->is_modem_open[modem_number] = TRUE;
		else
			pr_err(DM_TTY_MODULE_NAME ": %s: already open "
				"modem_number = %d", __func__, modem_number);

		/* change path to DM APP */
		mutex_lock(&driver->diagchar_mutex);
		driver->logging_mode = DM_APP_MODE;
		mutex_unlock(&driver->diagchar_mutex);

		if (modem_number == Primary_modem_chip) {
			driver->in_busy_1 = 0;
			driver->in_busy_2 = 0;
			driver->in_busy_qdsp_1 = 0;
			driver->in_busy_qdsp_2 = 0;
			driver->in_busy_wcnss_1 = 0;
			driver->in_busy_wcnss_2 = 0;
			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_work));

			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));

			if (driver->ch_wcnss)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_wcnss_work));
		} else if (modem_number == Secondary_modem_chip) {
#ifdef CONFIG_DIAG_SDIO_PIPE
			driver->in_busy_sdio = 0;
			/* Poll SDIO channel to check for data */
			if (driver->sdio_ch)
				queue_work(driver->diag_sdio_wq,
					&(driver->diag_read_sdio_work));
#endif
		} else {
			pr_info(DM_TTY_MODULE_NAME ": %s: lge_dm_tty_ioctl "
				"DM_TTY_IOCTL_MODEM_OPEN"
				"error modem_number = %d\n",
					__func__, modem_number);
		}

		result = lge_dm_tty_drv->is_modem_open[modem_number];

		if (copy_to_user((void *)arg, (const void *)&result,
			sizeof(result)) == 0)
			pr_info(DM_TTY_MODULE_NAME ": %s: lge_dm_tty_ioctl "
				"DM_TTY_IOCTL_MODEM_OPEN"
				"result = %d\n", __func__, result);

		break;

	case DM_TTY_MODEM_CLOSE:
		if (copy_from_user((void *)&modem_number, (const void *)arg,
			sizeof(modem_number)) == 0)
			pr_info(DM_TTY_MODULE_NAME ": %s: lge_dm_tty_ioctl "
				"DM_TTY_IOCTL_MODEM_CLOSE modem_number = %d\n",
					__func__, modem_number);

		if (modem_number == 0) {

			/* close all modem chip */
			for (i = 0; i < NUM_MODEM_CHIP + 1; i++)
				lge_dm_tty_drv->is_modem_open[i] = FALSE;

			result = TRUE;

			pr_err(DM_TTY_MODULE_NAME ": %s: close all modem chip"
					, __func__);

		} else {

			if (lge_dm_tty_drv->is_modem_open[modem_number] == TRUE)
				lge_dm_tty_drv->is_modem_open[modem_number] =
					FALSE;
			else
				pr_err(DM_TTY_MODULE_NAME ": %s: "
					"already closed "
					"modem_number = %d", __func__,
					modem_number);

			/* check all modem chip closed */
			is_all_closed = TRUE;

		for (i = 0; i < NUM_MODEM_CHIP + 1; i++) {
				if (lge_dm_tty_drv->is_modem_open[i] == TRUE)
					is_all_closed = FALSE;
			}

			result = is_all_closed;

		}

		if (result == TRUE) {

			lge_dm_tty->set_logging = 0;

			/* change path to USB driver */
			mutex_lock(&driver->diagchar_mutex);
			driver->logging_mode = USB_MODE;
			mutex_unlock(&driver->diagchar_mutex);

			if (driver->usb_connected == 0)
				diagfwd_disconnect();
			else
				diagfwd_connect();

		}

		if (copy_to_user((void *)arg, (const void *)&result,
			sizeof(result)) == 0)
			pr_info(DM_TTY_MODULE_NAME ": %s: lge_dm_tty_ioctl "
				"DM_TTY_IOCTL_MODEM_CLOSE"
				"result = %d\n", __func__, result);

		break;

	case DM_TTY_MODEM_STATUS:
		if (copy_from_user((void *)&modem_number, (const void *)arg,
			sizeof(modem_number)) == 0)
			pr_info(DM_TTY_MODULE_NAME ": %s: lge_dm_tty_ioctl "
				"DM_TTY_IOCTL_MODEM_STATUS modem_number = %d\n",
					__func__, modem_number);

		result = lge_dm_tty_drv->is_modem_open[modem_number];

		if (copy_to_user((void *)arg, (const void *)&result,
			sizeof(result)) == 0)
			pr_info(DM_TTY_MODULE_NAME ": %s: lge_dm_tty_ioctl "
				"DM_TTY_IOCTL_MODEM_STATUS"
				"result = %d\n", __func__, result);
		break;

	case DM_TTY_DATA_TO_APP:
		if (copy_from_user((void *)&modem_number, (const void *)arg,
			sizeof(modem_number)) == 0)
			pr_info(DM_TTY_MODULE_NAME ": %s: lge_dm_tty_ioctl "
				"DM_TTY_IOCTL_DATA_TO_APP modem_number = %d\n",
					__func__, modem_number);

		if (copy_to_user((void *)arg, (const void*)&result,
			sizeof(result)) == 0)
			pr_info(DM_TTY_MODULE_NAME ": %s: lge_dm_tty_ioctl "
				"DM_TTY_IOCTL_DATA_TO_APP"
				"result = %d\n", __func__, result);
		break;

	case DM_TTY_DATA_TO_USB:
		if (copy_from_user((void *)&modem_number, (const void *)arg,
			sizeof(modem_number)) == 0)
			pr_info(DM_TTY_MODULE_NAME ": %s: lge_dm_tty_ioctl "
				"DM_TTY_IOCTL_DATA_TO_USB"
				"modem_number = %d\n", __func__, modem_number);

		if (copy_to_user((void *)arg, (const void *)&result,
			sizeof(result)) == 0)
			pr_info(DM_TTY_MODULE_NAME ": %s: lge_dm_tty_ioctl "
				"DM_TTY_IOCTL_DATA_TO_USB"
				"result = %d\n", __func__, result);
		break;

	default:
		pr_info(DM_TTY_MODULE_NAME ": %s:"
		"lge_dm_tty_ioctl error\n", __func__);
		break;

	}

	return 0;

}