Esempio n. 1
0
void esparser_audio_reset(struct stream_buf_s *buf)
{
    ulong flags;
	DEFINE_SPINLOCK(lock);

    spin_lock_irqsave(&lock, flags);

    WRITE_MPEG_REG(PARSER_AUDIO_WP,
                   READ_MPEG_REG(AIU_MEM_AIFIFO_START_PTR));
    WRITE_MPEG_REG(PARSER_AUDIO_RP,
                   READ_MPEG_REG(AIU_MEM_AIFIFO_START_PTR));

    WRITE_MPEG_REG(PARSER_AUDIO_START_PTR,
                   READ_MPEG_REG(AIU_MEM_AIFIFO_START_PTR));
    WRITE_MPEG_REG(PARSER_AUDIO_END_PTR,
                   READ_MPEG_REG(AIU_MEM_AIFIFO_END_PTR));
    CLEAR_MPEG_REG_MASK(PARSER_ES_CONTROL, ES_AUD_MAN_RD_PTR);

    WRITE_MPEG_REG(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT);
    CLEAR_MPEG_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT);

    buf->flag |= BUF_FLAG_PARSER;

    audio_data_parsed = 0;
    spin_unlock_irqrestore(&lock, flags);

    return;
}
Esempio n. 2
0
void flip_parport_bit(unsigned char bitnum)
{
	static unsigned char last_value;
	DEFINE_SPINLOCK(lock);
	unsigned long flags;
	unsigned char mask;
	unsigned char value;

	if (!debug_sync_parport) {
		if (printk_ratelimit()) {
			printk(KERN_NOTICE "%s: no debug parallel port\n",
			       THIS_MODULE->name);
		}
		return;
	}
	BUG_ON(bitnum > 7);
	mask = 1 << bitnum;
	spin_lock_irqsave(&lock, flags);
	value = last_value & ~mask;
	if (parport_toggles[bitnum] % 2)	/* square wave */
		value |= mask;
	last_value = value;
	parport_toggles[bitnum]++;
	spin_unlock_irqrestore(&lock, flags);
	parport_write_data(debug_sync_parport, value);
}
Esempio n. 3
0
void rm_audio_reset(void)
{
    ulong flags;
	DEFINE_SPINLOCK(lock);

    spin_lock_irqsave(&lock, flags);

    WRITE_MPEG_REG(PARSER_AUDIO_WP,
                   READ_MPEG_REG(AIU_MEM_AIFIFO_START_PTR));
    WRITE_MPEG_REG(PARSER_AUDIO_RP,
                   READ_MPEG_REG(AIU_MEM_AIFIFO_START_PTR));

    WRITE_MPEG_REG(PARSER_AUDIO_START_PTR,
                   READ_MPEG_REG(AIU_MEM_AIFIFO_START_PTR));
    WRITE_MPEG_REG(PARSER_AUDIO_END_PTR,
                   READ_MPEG_REG(AIU_MEM_AIFIFO_END_PTR));
    CLEAR_MPEG_REG_MASK(PARSER_ES_CONTROL, ES_AUD_MAN_RD_PTR);

    WRITE_MPEG_REG(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT);
    CLEAR_MPEG_REG_MASK(AIU_MEM_AIFIFO_BUF_CNTL, MEM_BUFCTRL_INIT);

    spin_unlock_irqrestore(&lock, flags);

    return;
}
Esempio n. 4
0
static ssize_t write_breakme(struct file *file, const char __user *buf,
				   size_t count, loff_t *ppos)
{
	char kbuf[MAX_BREAKME_WRITE + 1];
	DEFINE_SPINLOCK(lock_me_up);

	if (count) {
		if (count > MAX_BREAKME_WRITE)
			return -EINVAL;
		if (copy_from_user(&kbuf, buf, count))
			return -EFAULT;
		kbuf[min(count, sizeof(kbuf))-1] = '\0';

		/* Null pointer dereference */
		if (!strcmp(kbuf, "nullptr"))
			*(unsigned long *)0 = 0;
		/* BUG() */
		else if (!strcmp(kbuf, "bug"))
			BUG();
		/* hung_task stuck in unkillable D state */
		else if (!strcmp(kbuf, "hungtask"))
			schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
		/* Panic */
		else if (!strcmp(kbuf, "panic"))
			panic("Testing panic");
		/* Set up a deadlock (call this twice) */
		else if (!strcmp(kbuf, "deadlock"))
			spin_lock(&lock_me_up);
		/* lockup */
		else if (!strcmp(kbuf, "softlockup")) {
			while (1)
				;
		}
		/* lockup with interrupts enabled */
		else if (!strcmp(kbuf, "irqlockup")) {
			spin_lock(&lock_me_up);
			while (1)
				;
		}
		/* lockup with interrupts disabled */
		else if (!strcmp(kbuf, "nmiwatchdog")) {
			spin_lock_irq(&lock_me_up);
			while (1)
				;
		}
		else if (!strcmp(kbuf, "need_recovery"))
			chromeos_set_need_recovery();
	}
	return count;
}
Esempio n. 5
0
static int v3d_open(struct inode *inode, struct file *filp)
{
	int ret;
	unsigned long flags;

	v3d_t *dev = kmalloc(sizeof(v3d_t), GFP_KERNEL);
	if (!dev)
		return -ENOMEM;

//#define V3D_MEMPOOL_SIZE	(56*1024*1024)
	filp->private_data = dev;
	uint32_t size = (1024*1024*2);
	size += V3D_MEMPOOL_SIZE;
	dev->mempool.ptr = v3d_mempool_base;
	dev->mempool.addr = virt_to_phys(dev->mempool.ptr);

	dev->mempool.size = V3D_MEMPOOL_SIZE;

	sema_init(&dev->irq_sem, 0);
	DEFINE_SPINLOCK(lockz); // = SPIN_LOCK_UNLOCKED;
	dev->lock=lockz;
	dev->irq_flags.v3d_irq_flags = 0;
	dev->irq_flags.qpu_irq_flags = 0;
	dev->id = 0;

	ret = request_irq(IRQ_GRAPHICS, v3d_isr,
			IRQF_DISABLED | IRQF_SHARED, V3D_DEV_NAME, dev);
	if (ret)
		goto err;

	spin_lock_irqsave(&v3d_id_lock, flags);
	if (v3d_id == 0)
		v3d_id = 1;
	dev->id = v3d_id++;
	if (dev->id == V3D_SERVER_ID)
		v3d_in_use = 1;
	spin_unlock_irqrestore(&v3d_id_lock, flags);

	return ret;

err:
	if (dev)
		kfree(dev);
	return ret;
}
Esempio n. 6
0
void esparser_sub_reset(void)
{
    ulong flags;
	DEFINE_SPINLOCK(lock);
    u32 parser_sub_start_ptr;
    u32 parser_sub_end_ptr;

    spin_lock_irqsave(&lock, flags);

    parser_sub_start_ptr = READ_MPEG_REG(PARSER_SUB_START_PTR);
    parser_sub_end_ptr = READ_MPEG_REG(PARSER_SUB_END_PTR);

    WRITE_MPEG_REG(PARSER_SUB_START_PTR, parser_sub_start_ptr);
    WRITE_MPEG_REG(PARSER_SUB_END_PTR, parser_sub_end_ptr);
    WRITE_MPEG_REG(PARSER_SUB_RP, parser_sub_start_ptr);
    WRITE_MPEG_REG(PARSER_SUB_WP, parser_sub_start_ptr);
    SET_MPEG_REG_MASK(PARSER_ES_CONTROL, (7 << ES_SUB_WR_ENDIAN_BIT) | ES_SUB_MAN_RD_PTR);

    spin_unlock_irqrestore(&lock, flags);

    return;
}
Esempio n. 7
0
int module_handler ( struct notifier_block *nblock, unsigned long code, void *_param )
{
    unsigned long flags;
    struct module *param = _param;
    DEFINE_SPINLOCK(module_event_spinlock);

    spin_lock_irqsave(&module_event_spinlock, flags);

    switch ( param->state )
    {
        case MODULE_STATE_COMING:
            DEBUG("Detected insertion of module '%s', neutralizing init and exit routines\n", param->name);
            param->init = success_int;
            param->exit = success_void;
            break;

        default:
            break;
    }

    spin_unlock_irqrestore(&module_event_spinlock, flags);

    return NOTIFY_DONE;
}
Esempio n. 8
0
/* ****************************************************************************** */
void csl_dma_process_callback(DMA_Interrupt_t * intStatus)
{
	UInt32 channel, mask, chnl_count;
	DMA_Interrupt_t localIntStatus;
	UInt32 flags;

	DEFINE_SPINLOCK(lock);	
	spin_lock_irqsave(&lock, flags);
	localIntStatus.errInt = intStatus->errInt;
	localIntStatus.tcInt  = intStatus->tcInt;
	intStatus->errInt = 0;
	intStatus->tcInt = 0;
	spin_unlock_irqrestore(&lock, flags);

	/* dprintf(1, "csl_dma: csl_dma_process_callback\n"); */

	for (channel = 0; channel < TOTAL_DMA_CHANNELS; channel++) {
		mask = (0x1 << channel);
		if (chanArray[channel].bUsed == FALSE)
			continue;

#if (defined (_HERA_) || defined (_RHEA_) || defined (_SAMOA_))
		if (chanArray[channel].bUsed
		    && chanArray[channel].chanInfo.xferCompleteCb) {
			chanArray[channel].chanInfo.
			    xferCompleteCb((DMADRV_CALLBACK_STATUS_t)
					   DMADRV_CALLBACK_OK);
		}
#else
		
		if (localIntStatus.errInt & mask) {
			/* dprintf(1, "Eirlys errInt, channel: %d, %d\n", channel, TIMER_GetValue()); */
			if (chanArray[channel].bUsed
			    && chanArray[channel].chanInfo.xferCompleteCb) {
				chanArray[channel].chanInfo.
				    xferCompleteCb((DMADRV_CALLBACK_STATUS_t)
						   DMADRV_CALLBACK_FAIL);
				spin_lock_irqsave(&lock, flags);
				if (chanArray[channel].chanInfo.freeChan) {
					csl_dma_release_channel((DMA_CHANNEL)
								channel);
				}
				spin_unlock_irqrestore(&lock, flags);
			}
		} else if (localIntStatus.tcInt & mask) {
			if (chanArray[channel].bUsed
			    && chanArray[channel].chanInfo.xferCompleteCb) {
				/* dprintf(1, "Eirlys tcInt, %d, %d, 0x%x\n", channel, */
				/*        TIMER_GetValue(), chanArray[channel].chanInfo.xferCompleteCb); */

				chanArray[channel].chanInfo.
				    xferCompleteCb((DMADRV_CALLBACK_STATUS_t)
						   DMADRV_CALLBACK_OK);
				spin_lock_irqsave(&lock, flags);
					chanArray[channel].busy = FALSE;
				if (chanArray[channel].chanInfo.freeChan) {
					csl_dma_release_channel((DMA_CHANNEL)
								channel);
				}
				spin_unlock_irqrestore(&lock, flags);
			}
		}
#endif
	}
		for (chnl_count = 0; chnl_count < TOTAL_DMA_CHANNELS;
		     chnl_count++) {
			if (chanArray[chnl_count].busy == TRUE) {
				return;
			}
		}
		/* Set AHB clock request generated on per-channel basis
		 * This is to ensure Athena enters deep sleep/pedestal mode*/
		board_sysconfig(SYSCFG_CSL_DMA, SYSCFG_ENABLE);
	return;
}
int send_mbox_callback(void *arg)
{
	struct WMD_DEV_CONTEXT *dev_ctxt = (struct WMD_DEV_CONTEXT *)arg;
	u32 temp;
	unsigned long flags;
	DEFINE_SPINLOCK(irq_lock);

	if (!dev_ctxt)
		return -EFAULT;

	if (dev_ctxt->dwBrdState == BRD_DSP_HIBERNATION ||
	    dev_ctxt->dwBrdState == BRD_HIBERNATION) {
		/* Restart the peripheral clocks */
		DSP_PeripheralClocks_Enable(dev_ctxt, NULL);

#ifdef CONFIG_BRIDGE_WDT3
		dsp_wdt_enable(true);
#endif

		spin_lock_irqsave(&irq_lock, flags);
		/* Enabling Dpll in lock mode*/
		temp = (u32) *((REG_UWORD32 *)
				((u32) (dev_ctxt->cmbase) + 0x34));
		temp = (temp & 0xFFFFFFFE) | 0x1;
		*((REG_UWORD32 *) ((u32) (dev_ctxt->cmbase) + 0x34)) =
			(u32) temp;
		temp = (u32) *((REG_UWORD32 *)
				((u32) (dev_ctxt->cmbase) + 0x4));
		temp = (temp & 0xFFFFF08) | 0x37;

		*((REG_UWORD32 *) ((u32) (dev_ctxt->cmbase) + 0x4)) =
			(u32) temp;
		/* Restore mailbox settings */
		omap_mbox_restore_ctx(dev_ctxt->mbox);

		/*
		  * Short wake up source is any read access to an MMU
		  * registers. Making a MMU flush or accessing any other MMU
		  * register before getting to this point will have the IVA in
		  * ON state or Retention if the DSP has moved to that state,
		  * having the Short Wakeup again is redundant and brings
		  * issues when accessing the MMU after the DSP has started
		  * its restore sequence. We will access only if the DSP
		  * is not in RET or ON.
		  */
		HW_PWRST_IVA2RegGet(dev_ctxt->prmbase, &temp);
		if (!(temp & HW_PWR_STATE_RET) && !(temp & HW_PWR_STATE_ON))
			/*
			  * Flush the TLBs, this will generate the
			  * shortwakeup.  Also wait for the DSP to restore.
			  */
                        tlb_flush_all(dev_ctxt->dwDSPMmuBase);

		udelay(10);
                spin_unlock_irqrestore(&irq_lock, flags);
                dev_ctxt->dwBrdState = BRD_RUNNING;
        } else if (dev_ctxt->dwBrdState == BRD_RETENTION) {
                /* Restart the peripheral clocks */
                DSP_PeripheralClocks_Enable(dev_ctxt, NULL);
                dev_ctxt->dwBrdState = BRD_RUNNING;
        }

	return 0;
}