Beispiel #1
0
static int __init wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
{
	unsigned long send_status = 0, accept_status = 0;
	int maxlvt, timeout, num_starts, j;

	Dprintk("Asserting INIT.\n");

	/*
	 * Turn INIT on target chip
	 */
	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));

	/*
	 * Send IPI
	 */
	apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
				| APIC_DM_INIT);

	Dprintk("Waiting for send to finish...\n");
	timeout = 0;
	do {
		Dprintk("+");
		udelay(100);
		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
	} while (send_status && (timeout++ < 1000));

	mdelay(10);

	Dprintk("Deasserting INIT.\n");

	/* Target chip */
	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));

	/* Send IPI */
	apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);

	Dprintk("Waiting for send to finish...\n");
	timeout = 0;
	do {
		Dprintk("+");
		udelay(100);
		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
	} while (send_status && (timeout++ < 1000));

	atomic_set(&init_deasserted, 1);

	/*
	 * Should we send STARTUP IPIs ?
	 *
	 * Determine this based on the APIC version.
	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
	 */
	if (APIC_INTEGRATED(apic_version[phys_apicid]))
		num_starts = 2;
	else
		num_starts = 0;

	/*
	 * Run STARTUP IPI loop.
	 */
	Dprintk("#startup loops: %d.\n", num_starts);

	maxlvt = get_maxlvt();

	for (j = 1; j <= num_starts; j++) {
		Dprintk("Sending STARTUP #%d.\n",j);
		apic_read_around(APIC_SPIV);
		apic_write(APIC_ESR, 0);
		apic_read(APIC_ESR);
		Dprintk("After apic_write.\n");

		/*
		 * STARTUP IPI
		 */

		/* Target chip */
		apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));

		/* Boot on the stack */
		/* Kick the second */
		apic_write_around(APIC_ICR, APIC_DM_STARTUP
					| (start_rip >> 12));

		/*
		 * Give the other CPU some time to accept the IPI.
		 */
		udelay(300);

		Dprintk("Startup point 1.\n");

		Dprintk("Waiting for send to finish...\n");
		timeout = 0;
		do {
			Dprintk("+");
			udelay(100);
			send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
		} while (send_status && (timeout++ < 1000));

		/*
		 * Give the other CPU some time to accept the IPI.
		 */
		udelay(200);
		/*
		 * Due to the Pentium erratum 3AP.
		 */
		if (maxlvt > 3) {
			apic_read_around(APIC_SPIV);
			apic_write(APIC_ESR, 0);
		}
		accept_status = (apic_read(APIC_ESR) & 0xEF);
		if (send_status || accept_status)
			break;
	}
	Dprintk("After Startup.\n");

	if (send_status)
		printk(KERN_ERR "APIC never delivered???\n");
	if (accept_status)
		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);

	return (send_status | accept_status);
}
static int EplLinIoctl(struct inode *pDeviceFile_p,	// information about the device to open
		       struct file *pInstance_p,	// information about driver instance
		       unsigned int uiIoctlCmd_p,	// Ioctl command to execute
		       unsigned long ulArg_p)	// Ioctl command specific argument/parameter
{

	tEplKernel EplRet;
	int iErr;
	int iRet;

//    TRACE1("EPL: + EplLinIoctl (uiIoctlCmd_p=%d)...\n", uiIoctlCmd_p);

	iRet = -EINVAL;

	switch (uiIoctlCmd_p) {
		// ----------------------------------------------------------
	case EPLLIN_CMD_INITIALIZE:
		{
			tEplApiInitParam EplApiInitParam;

			iErr =
			    copy_from_user(&EplApiInitParam,
					   (const void *)ulArg_p,
					   sizeof(EplApiInitParam));
			if (iErr != 0) {
				iRet = -EIO;
				goto Exit;
			}

			EplApiInitParam.m_pfnCbEvent = EplLinCbEvent;
			EplApiInitParam.m_pfnCbSync = EplLinCbSync;

			EplRet = EplApiInitialize(&EplApiInitParam);

			uiEplState_g = EPL_STATE_RUNNING;

			iRet = (int)EplRet;
			break;
		}

		// ----------------------------------------------------------
	case EPLLIN_CMD_SHUTDOWN:
		{		// shutdown the threads

			// pass control to sync kernel thread, but signal termination
			atomic_set(&AtomicSyncState_g, EVENT_STATE_TERM);
			wake_up_interruptible(&WaitQueueCbSync_g);
			wake_up_interruptible(&WaitQueuePI_In_g);

			// pass control to event queue kernel thread
			atomic_set(&AtomicEventState_g, EVENT_STATE_TERM);
			wake_up_interruptible(&WaitQueueCbEvent_g);

			if (uiEplState_g == EPL_STATE_RUNNING) {	// post NmtEventSwitchOff
				EplRet =
				    EplApiExecNmtCommand(kEplNmtEventSwitchOff);

			}

			iRet = 0;
			break;
		}

		// ----------------------------------------------------------
	case EPLLIN_CMD_READ_LOCAL_OBJECT:
		{
			tEplLinLocalObject LocalObject;
			void *pData;

			iErr =
			    copy_from_user(&LocalObject, (const void *)ulArg_p,
					   sizeof(LocalObject));
			if (iErr != 0) {
				iRet = -EIO;
				goto Exit;
			}

			if ((LocalObject.m_pData == NULL)
			    || (LocalObject.m_uiSize == 0)) {
				iRet = (int)kEplApiInvalidParam;
				goto Exit;
			}

			pData = vmalloc(LocalObject.m_uiSize);
			if (pData == NULL) {	// no memory available
				iRet = -ENOMEM;
				goto Exit;
			}

			EplRet =
			    EplApiReadLocalObject(LocalObject.m_uiIndex,
						  LocalObject.m_uiSubindex,
						  pData, &LocalObject.m_uiSize);

			if (EplRet == kEplSuccessful) {
				iErr =
				    copy_to_user(LocalObject.m_pData, pData,
						 LocalObject.m_uiSize);

				vfree(pData);

				if (iErr != 0) {
					iRet = -EIO;
					goto Exit;
				}
				// return actual size (LocalObject.m_uiSize)
				iErr = put_user(LocalObject.m_uiSize,
						(unsigned int *)(ulArg_p +
								 (unsigned long)
								 &LocalObject.
								 m_uiSize -
								 (unsigned long)
								 &LocalObject));
				if (iErr != 0) {
					iRet = -EIO;
					goto Exit;
				}

			} else {
				vfree(pData);
			}

			iRet = (int)EplRet;
			break;
		}

		// ----------------------------------------------------------
	case EPLLIN_CMD_WRITE_LOCAL_OBJECT:
		{
			tEplLinLocalObject LocalObject;
			void *pData;

			iErr =
			    copy_from_user(&LocalObject, (const void *)ulArg_p,
					   sizeof(LocalObject));
			if (iErr != 0) {
				iRet = -EIO;
				goto Exit;
			}

			if ((LocalObject.m_pData == NULL)
			    || (LocalObject.m_uiSize == 0)) {
				iRet = (int)kEplApiInvalidParam;
				goto Exit;
			}

			pData = vmalloc(LocalObject.m_uiSize);
			if (pData == NULL) {	// no memory available
				iRet = -ENOMEM;
				goto Exit;
			}
			iErr =
			    copy_from_user(pData, LocalObject.m_pData,
					   LocalObject.m_uiSize);
			if (iErr != 0) {
				iRet = -EIO;
				goto Exit;
			}

			EplRet =
			    EplApiWriteLocalObject(LocalObject.m_uiIndex,
						   LocalObject.m_uiSubindex,
						   pData, LocalObject.m_uiSize);

			vfree(pData);

			iRet = (int)EplRet;
			break;
		}

	case EPLLIN_CMD_READ_OBJECT:
		{
			tEplLinSdoObject SdoObject;
			void *pData;
			tEplLinSdoBufHeader *pBufHeader;
			tEplSdoComConHdl *pSdoComConHdl;

			iErr =
			    copy_from_user(&SdoObject, (const void *)ulArg_p,
					   sizeof(SdoObject));
			if (iErr != 0) {
				iRet = -EIO;
				goto Exit;
			}

			if ((SdoObject.m_le_pData == NULL)
			    || (SdoObject.m_uiSize == 0)) {
				iRet = (int)kEplApiInvalidParam;
				goto Exit;
			}

			pBufHeader =
			    (tEplLinSdoBufHeader *)
			    vmalloc(sizeof(tEplLinSdoBufHeader) +
				    SdoObject.m_uiSize);
			if (pBufHeader == NULL) {	// no memory available
				iRet = -ENOMEM;
				goto Exit;
			}
			// initiate temporary buffer
			pBufHeader->m_pUserArg = SdoObject.m_pUserArg;	// original user argument pointer
			pBufHeader->m_pData = SdoObject.m_le_pData;	// original data pointer from app
			pData = pBufHeader + sizeof(tEplLinSdoBufHeader);

			if (SdoObject.m_fValidSdoComConHdl != FALSE) {
				pSdoComConHdl = &SdoObject.m_SdoComConHdl;
			} else {
				pSdoComConHdl = NULL;
			}

			EplRet =
			    EplApiReadObject(pSdoComConHdl,
					     SdoObject.m_uiNodeId,
					     SdoObject.m_uiIndex,
					     SdoObject.m_uiSubindex, pData,
					     &SdoObject.m_uiSize,
					     SdoObject.m_SdoType, pBufHeader);

			// return actual SDO handle (SdoObject.m_SdoComConHdl)
			iErr = put_user(SdoObject.m_SdoComConHdl,
					(unsigned int *)(ulArg_p +
							 (unsigned long)
							 &SdoObject.
							 m_SdoComConHdl -
							 (unsigned long)
							 &SdoObject));
			if (iErr != 0) {
				iRet = -EIO;
				goto Exit;
			}

			if (EplRet == kEplSuccessful) {
				iErr =
				    copy_to_user(SdoObject.m_le_pData, pData,
						 SdoObject.m_uiSize);

				vfree(pBufHeader);

				if (iErr != 0) {
					iRet = -EIO;
					goto Exit;
				}
				// return actual size (SdoObject.m_uiSize)
				iErr = put_user(SdoObject.m_uiSize,
						(unsigned int *)(ulArg_p +
								 (unsigned long)
								 &SdoObject.
								 m_uiSize -
								 (unsigned long)
								 &SdoObject));
				if (iErr != 0) {
					iRet = -EIO;
					goto Exit;
				}
			} else if (EplRet != kEplApiTaskDeferred) {	// error ocurred
				vfree(pBufHeader);
				if (iErr != 0) {
					iRet = -EIO;
					goto Exit;
				}
			}

			iRet = (int)EplRet;
			break;
		}

	case EPLLIN_CMD_WRITE_OBJECT:
		{
			tEplLinSdoObject SdoObject;
			void *pData;
			tEplLinSdoBufHeader *pBufHeader;
			tEplSdoComConHdl *pSdoComConHdl;

			iErr =
			    copy_from_user(&SdoObject, (const void *)ulArg_p,
					   sizeof(SdoObject));
			if (iErr != 0) {
				iRet = -EIO;
				goto Exit;
			}

			if ((SdoObject.m_le_pData == NULL)
			    || (SdoObject.m_uiSize == 0)) {
				iRet = (int)kEplApiInvalidParam;
				goto Exit;
			}

			pBufHeader =
			    (tEplLinSdoBufHeader *)
			    vmalloc(sizeof(tEplLinSdoBufHeader) +
				    SdoObject.m_uiSize);
			if (pBufHeader == NULL) {	// no memory available
				iRet = -ENOMEM;
				goto Exit;
			}
			// initiate temporary buffer
			pBufHeader->m_pUserArg = SdoObject.m_pUserArg;	// original user argument pointer
			pBufHeader->m_pData = SdoObject.m_le_pData;	// original data pointer from app
			pData = pBufHeader + sizeof(tEplLinSdoBufHeader);

			iErr =
			    copy_from_user(pData, SdoObject.m_le_pData,
					   SdoObject.m_uiSize);

			if (iErr != 0) {
				iRet = -EIO;
				goto Exit;
			}

			if (SdoObject.m_fValidSdoComConHdl != FALSE) {
				pSdoComConHdl = &SdoObject.m_SdoComConHdl;
			} else {
				pSdoComConHdl = NULL;
			}

			EplRet =
			    EplApiWriteObject(pSdoComConHdl,
					      SdoObject.m_uiNodeId,
					      SdoObject.m_uiIndex,
					      SdoObject.m_uiSubindex, pData,
					      SdoObject.m_uiSize,
					      SdoObject.m_SdoType, pBufHeader);

			// return actual SDO handle (SdoObject.m_SdoComConHdl)
			iErr = put_user(SdoObject.m_SdoComConHdl,
					(unsigned int *)(ulArg_p +
							 (unsigned long)
							 &SdoObject.
							 m_SdoComConHdl -
							 (unsigned long)
							 &SdoObject));
			if (iErr != 0) {
				iRet = -EIO;
				goto Exit;
			}

			if (EplRet != kEplApiTaskDeferred) {	// succeeded or error ocurred, but task not deferred
				vfree(pBufHeader);
			}

			iRet = (int)EplRet;
			break;
		}

		// ----------------------------------------------------------
	case EPLLIN_CMD_FREE_SDO_CHANNEL:
		{
			// forward SDO handle to EPL stack
			EplRet =
			    EplApiFreeSdoChannel((tEplSdoComConHdl) ulArg_p);

			iRet = (int)EplRet;
			break;
		}

#if (((EPL_MODULE_INTEGRATION) & (EPL_MODULE_NMT_MN)) != 0)
		// ----------------------------------------------------------
	case EPLLIN_CMD_MN_TRIGGER_STATE_CHANGE:
		{
			tEplLinNodeCmdObject NodeCmdObject;

			iErr =
			    copy_from_user(&NodeCmdObject,
					   (const void *)ulArg_p,
					   sizeof(NodeCmdObject));
			if (iErr != 0) {
				iRet = -EIO;
				goto Exit;
			}

			EplRet =
			    EplApiMnTriggerStateChange(NodeCmdObject.m_uiNodeId,
						       NodeCmdObject.
						       m_NodeCommand);
			iRet = (int)EplRet;
			break;
		}
#endif

		// ----------------------------------------------------------
	case EPLLIN_CMD_GET_EVENT:
		{
			tEplLinEvent Event;

			// save event structure
			iErr =
			    copy_from_user(&Event, (const void *)ulArg_p,
					   sizeof(Event));
			if (iErr != 0) {
				iRet = -EIO;
				goto Exit;
			}
			// save return code from application's event callback function
			RetCbEvent_g = Event.m_RetCbEvent;

			if (RetCbEvent_g == kEplShutdown) {
				// pass control to event queue kernel thread, but signal termination
				atomic_set(&AtomicEventState_g,
					   EVENT_STATE_TERM);
				wake_up_interruptible(&WaitQueueCbEvent_g);
				// exit with error -> EplApiProcess() will leave the infinite loop
				iRet = 1;
				goto Exit;
			}
			// pass control to event queue kernel thread
			atomic_set(&AtomicEventState_g, EVENT_STATE_IOCTL);
			wake_up_interruptible(&WaitQueueCbEvent_g);

			// fall asleep itself in own wait queue
			iErr = wait_event_interruptible(WaitQueueProcess_g,
							(atomic_read
							 (&AtomicEventState_g)
							 == EVENT_STATE_READY)
							||
							(atomic_read
							 (&AtomicEventState_g)
							 == EVENT_STATE_TERM));
			if (iErr != 0) {	// waiting was interrupted by signal
				// pass control to event queue kernel thread, but signal termination
				atomic_set(&AtomicEventState_g,
					   EVENT_STATE_TERM);
				wake_up_interruptible(&WaitQueueCbEvent_g);
				// exit with this error -> EplApiProcess() will leave the infinite loop
				iRet = iErr;
				goto Exit;
			} else if (atomic_read(&AtomicEventState_g) == EVENT_STATE_TERM) {	// termination in progress
				// pass control to event queue kernel thread, but signal termination
				wake_up_interruptible(&WaitQueueCbEvent_g);
				// exit with this error -> EplApiProcess() will leave the infinite loop
				iRet = 1;
				goto Exit;
			}
			// copy event to user space
			iErr =
			    copy_to_user(Event.m_pEventType, &EventType_g,
					 sizeof(EventType_g));
			if (iErr != 0) {	// not all data could be copied
				iRet = -EIO;
				goto Exit;
			}
			// $$$ d.k. perform SDO event processing
			if (EventType_g == kEplApiEventSdo) {
				void *pData;
				tEplLinSdoBufHeader *pBufHeader;

				pBufHeader =
				    (tEplLinSdoBufHeader *) pEventArg_g->m_Sdo.
				    m_pUserArg;
				pData =
				    pBufHeader + sizeof(tEplLinSdoBufHeader);

				if (pEventArg_g->m_Sdo.m_SdoAccessType ==
				    kEplSdoAccessTypeRead) {
					// copy read data to user space
					iErr =
					    copy_to_user(pBufHeader->m_pData,
							 pData,
							 pEventArg_g->m_Sdo.
							 m_uiTransferredByte);
					if (iErr != 0) {	// not all data could be copied
						iRet = -EIO;
						goto Exit;
					}
				}
				pEventArg_g->m_Sdo.m_pUserArg =
				    pBufHeader->m_pUserArg;
				vfree(pBufHeader);
			}

			iErr =
			    copy_to_user(Event.m_pEventArg, pEventArg_g,
					 min(sizeof(tEplApiEventArg),
					     Event.m_uiEventArgSize));
			if (iErr != 0) {	// not all data could be copied
				iRet = -EIO;
				goto Exit;
			}
			// return to EplApiProcess(), which will call the application's event callback function
			iRet = 0;

			break;
		}

		// ----------------------------------------------------------
	case EPLLIN_CMD_PI_SETUP:
		{
			EplRet = EplApiProcessImageSetup();
			iRet = (int)EplRet;

			break;
		}

		// ----------------------------------------------------------
	case EPLLIN_CMD_PI_IN:
		{
			tEplApiProcessImage ProcessImageIn;

			// save process image structure
			iErr =
			    copy_from_user(&ProcessImageIn,
					   (const void *)ulArg_p,
					   sizeof(ProcessImageIn));
			if (iErr != 0) {
				iRet = -EIO;
				goto Exit;
			}
			// pass control to event queue kernel thread
			atomic_set(&AtomicSyncState_g, EVENT_STATE_IOCTL);

			// fall asleep itself in own wait queue
			iErr = wait_event_interruptible(WaitQueuePI_In_g,
							(atomic_read
							 (&AtomicSyncState_g) ==
							 EVENT_STATE_READY)
							||
							(atomic_read
							 (&AtomicSyncState_g) ==
							 EVENT_STATE_TERM));
			if (iErr != 0) {	// waiting was interrupted by signal
				// pass control to sync kernel thread, but signal termination
				atomic_set(&AtomicSyncState_g,
					   EVENT_STATE_TERM);
				wake_up_interruptible(&WaitQueueCbSync_g);
				// exit with this error -> application will leave the infinite loop
				iRet = iErr;
				goto Exit;
			} else if (atomic_read(&AtomicSyncState_g) == EVENT_STATE_TERM) {	// termination in progress
				// pass control to sync kernel thread, but signal termination
				wake_up_interruptible(&WaitQueueCbSync_g);
				// exit with this error -> application will leave the infinite loop
				iRet = 1;
				goto Exit;
			}
			// exchange process image
			EplRet = EplApiProcessImageExchangeIn(&ProcessImageIn);

			// return to EplApiProcessImageExchangeIn()
			iRet = (int)EplRet;

			break;
		}

		// ----------------------------------------------------------
	case EPLLIN_CMD_PI_OUT:
		{
			tEplApiProcessImage ProcessImageOut;

			// save process image structure
			iErr =
			    copy_from_user(&ProcessImageOut,
					   (const void *)ulArg_p,
					   sizeof(ProcessImageOut));
			if (iErr != 0) {
				iRet = -EIO;
				goto Exit;
			}

			if (atomic_read(&AtomicSyncState_g) !=
			    EVENT_STATE_READY) {
				iRet = (int)kEplInvalidOperation;
				goto Exit;
			}
			// exchange process image
			EplRet =
			    EplApiProcessImageExchangeOut(&ProcessImageOut);

			// pass control to sync kernel thread
			atomic_set(&AtomicSyncState_g, EVENT_STATE_TERM);
			wake_up_interruptible(&WaitQueueCbSync_g);

			// return to EplApiProcessImageExchangeout()
			iRet = (int)EplRet;

			break;
		}

		// ----------------------------------------------------------
	case EPLLIN_CMD_NMT_COMMAND:
		{
			// forward NMT command to EPL stack
			EplRet = EplApiExecNmtCommand((tEplNmtEvent) ulArg_p);

			iRet = (int)EplRet;

			break;
		}

		// ----------------------------------------------------------
	default:
		{
			break;
		}
	}

      Exit:

//    TRACE1("EPL: - EplLinIoctl (iRet=%d)\n", iRet);
	return (iRet);

}
/**
 * shm_write_msg() - write message to shared memory
 * @shrm:	pointer to the shrm device information structure
 * @l2_header:	L2 header
 * @addr:	pointer to the message
 * @length:	length of the message to be written
 *
 * This function is called from net or char interface driver write operation.
 * Prior to calling this function the message is copied from the user space
 * buffer to the kernel buffer. This function based on the l2 header routes
 * the message to the respective channel and FIFO. Then makes a call to the
 * fifo write function where the message is written to the physical device.
 */
int shm_write_msg(struct shrm_dev *shrm, u8 l2_header,
					void *addr, u32 length)
{
	u8 channel = 0;
	int ret;

	dev_dbg(shrm->dev, "%s IN\n", __func__);

	if (boot_state != BOOT_DONE) {
		dev_err(shrm->dev,
				"error after boot done  call this fn\n");
		ret = -ENODEV;
		goto out;
	}

	if ((l2_header == L2_HEADER_ISI) ||
			(l2_header == L2_HEADER_RPC) ||
			(l2_header == L2_HEADER_SECURITY) ||
			(l2_header == L2_HEADER_COMMON_SIMPLE_LOOPBACK) ||
			(l2_header == L2_HEADER_COMMON_ADVANCED_LOOPBACK) ||
			(l2_header == L2_HEADER_IPCCTRL) ||
			(l2_header == L2_HEADER_IPCDATA)) {
		channel = 0;
		if (shrm_common_tx_state == SHRM_SLEEP_STATE)
			shrm_common_tx_state = SHRM_PTR_FREE;
		else if (shrm_common_tx_state == SHRM_IDLE)
			shrm_common_tx_state = SHRM_PTR_FREE;

	} else if ((l2_header == L2_HEADER_AUDIO) ||
			(l2_header == L2_HEADER_AUDIO_SIMPLE_LOOPBACK) ||
			(l2_header == L2_HEADER_AUDIO_ADVANCED_LOOPBACK)) {
		if (shrm_audio_tx_state == SHRM_SLEEP_STATE)
			shrm_audio_tx_state = SHRM_PTR_FREE;
		else if (shrm_audio_tx_state == SHRM_IDLE)
			shrm_audio_tx_state = SHRM_PTR_FREE;

		channel = 1;
	} else {
		ret = -ENODEV;
		goto out;
	}
	ret = shm_write_msg_to_fifo(shrm, channel, l2_header, addr, length);
	if (ret < 0) {
		dev_err(shrm->dev, "write message to fifo failed\n");
		if (ret == -EAGAIN) {
			/* Start a timer so as to handle this gently */
			if(!atomic_read(&fifo_full)) {
				atomic_set(&fifo_full, 1);
				hrtimer_start(&fifo_full_timer, ktime_set(
						FIFO_FULL_TIMEOUT, 0),
						HRTIMER_MODE_REL);
			}
		}
		return ret;
	}
	/*
	 * notify only if new msg copied is the only unread one
	 * otherwise it means that reading process is ongoing
	 */
	if (is_the_only_one_unread_message(shrm, channel, length)) {

		/* Send Message Pending Noitication to CMT */
		if (channel == 0)
			queue_work(shrm->shm_common_ch_wr_wq,
					&shrm->send_ac_msg_pend_notify_0);
		else
			queue_work(shrm->shm_audio_ch_wr_wq,
					&shrm->send_ac_msg_pend_notify_1);

	}

	dev_dbg(shrm->dev, "%s OUT\n", __func__);
	return 0;

out:
	return ret;
}
Beispiel #4
0
/**
 *	iowarrior_probe
 *
 *	Called by the usb core when a new device is connected that it thinks
 *	this driver might be interested in.
 */
static int iowarrior_probe(struct usb_interface *interface,
			   const struct usb_device_id *id)
{
	struct usb_device *udev = interface_to_usbdev(interface);
	struct iowarrior *dev = NULL;
	struct usb_host_interface *iface_desc;
	struct usb_endpoint_descriptor *endpoint;
	int i;
	int retval = -ENOMEM;

	/* allocate memory for our device state and initialize it */
	dev = kzalloc(sizeof(struct iowarrior), GFP_KERNEL);
	if (dev == NULL) {
		dev_err(&interface->dev, "Out of memory\n");
		return retval;
	}

	mutex_init(&dev->mutex);

	atomic_set(&dev->intr_idx, 0);
	atomic_set(&dev->read_idx, 0);
	spin_lock_init(&dev->intr_idx_lock);
	atomic_set(&dev->overflow_flag, 0);
	init_waitqueue_head(&dev->read_wait);
	atomic_set(&dev->write_busy, 0);
	init_waitqueue_head(&dev->write_wait);

	dev->udev = udev;
	dev->interface = interface;

	iface_desc = interface->cur_altsetting;
	dev->product_id = le16_to_cpu(udev->descriptor.idProduct);

	/* set up the endpoint information */
	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
		endpoint = &iface_desc->endpoint[i].desc;

		if (usb_endpoint_is_int_in(endpoint))
			dev->int_in_endpoint = endpoint;
		if (usb_endpoint_is_int_out(endpoint))
			/* this one will match for the IOWarrior56 only */
			dev->int_out_endpoint = endpoint;
	}
	/* we have to check the report_size often, so remember it in the endianess suitable for our machine */
	dev->report_size = le16_to_cpu(dev->int_in_endpoint->wMaxPacketSize);
	if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
	    (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56))
		/* IOWarrior56 has wMaxPacketSize different from report size */
		dev->report_size = 7;

	/* create the urb and buffer for reading */
	dev->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!dev->int_in_urb) {
		dev_err(&interface->dev, "Couldn't allocate interrupt_in_urb\n");
		goto error;
	}
	dev->int_in_buffer = kmalloc(dev->report_size, GFP_KERNEL);
	if (!dev->int_in_buffer) {
		dev_err(&interface->dev, "Couldn't allocate int_in_buffer\n");
		goto error;
	}
	usb_fill_int_urb(dev->int_in_urb, dev->udev,
			 usb_rcvintpipe(dev->udev,
					dev->int_in_endpoint->bEndpointAddress),
			 dev->int_in_buffer, dev->report_size,
			 iowarrior_callback, dev,
			 dev->int_in_endpoint->bInterval);
	/* create an internal buffer for interrupt data from the device */
	dev->read_queue =
	    kmalloc(((dev->report_size + 1) * MAX_INTERRUPT_BUFFER),
		    GFP_KERNEL);
	if (!dev->read_queue) {
		dev_err(&interface->dev, "Couldn't allocate read_queue\n");
		goto error;
	}
	/* Get the serial-number of the chip */
	memset(dev->chip_serial, 0x00, sizeof(dev->chip_serial));
	usb_string(udev, udev->descriptor.iSerialNumber, dev->chip_serial,
		   sizeof(dev->chip_serial));
	if (strlen(dev->chip_serial) != 8)
		memset(dev->chip_serial, 0x00, sizeof(dev->chip_serial));

	/* Set the idle timeout to 0, if this is interface 0 */
	if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) {
	    usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
			    0x0A,
			    USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
			    0, NULL, 0, USB_CTRL_SET_TIMEOUT);
	}
	/* allow device read and ioctl */
	dev->present = 1;

	/* we can register the device now, as it is ready */
	usb_set_intfdata(interface, dev);

	retval = usb_register_dev(interface, &iowarrior_class);
	if (retval) {
		/* something prevented us from registering this driver */
		dev_err(&interface->dev, "Not able to get a minor for this device.\n");
		usb_set_intfdata(interface, NULL);
		goto error;
	}

	dev->minor = interface->minor;

	/* let the user know what node this device is now attached to */
	dev_info(&interface->dev, "IOWarrior product=0x%x, serial=%s interface=%d "
		 "now attached to iowarrior%d\n", dev->product_id, dev->chip_serial,
		 iface_desc->desc.bInterfaceNumber, dev->minor - IOWARRIOR_MINOR_BASE);
	return retval;

error:
	iowarrior_delete(dev);
	return retval;
}
Beispiel #5
0
static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
{
    memset(done, 0, sizeof(*done));
    atomic_set(&done->nr_todo, nr_todo);
    init_completion(&done->completion);
}
Beispiel #6
0
void mdp3_ctrl_reset_countdown(struct mdp3_session_data *session,
		struct msm_fb_data_type *mfd)
{
	if (mdp3_ctrl_get_intf_type(mfd) == MDP3_DMA_OUTPUT_SEL_DSI_CMD)
		atomic_set(&session->vsync_countdown, VSYNC_EXPIRE_TICK);
}
Beispiel #7
0
static int mdp3_ctrl_off(struct msm_fb_data_type *mfd)
{
	int rc = 0;
	struct mdp3_session_data *mdp3_session;
	struct mdss_panel_data *panel;

	pr_debug("mdp3_ctrl_off\n");
	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
	if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
		!mdp3_session->intf) {
		pr_err("mdp3_ctrl_on no device");
		return -ENODEV;
	}

	panel = mdp3_session->panel;
	mutex_lock(&mdp3_session->lock);

	if (panel && panel->set_backlight)
		panel->set_backlight(panel, 0);

	if (!mdp3_session->status) {
		pr_debug("fb%d is off already", mfd->index);
		goto off_error;
	}

	mdp3_ctrl_clk_enable(mfd, 1);

	mdp3_histogram_stop(mdp3_session, MDP_BLOCK_DMA_P);

	if (panel->event_handler)
		rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF, NULL);
	if (rc)
		pr_err("fail to turn off the panel\n");

	rc = mdp3_session->dma->stop(mdp3_session->dma, mdp3_session->intf);
	if (rc)
		pr_debug("fail to stop the MDP3 dma\n");
	msleep(20);

	mfd->panel_info->cont_splash_enabled = 0;

	mdp3_irq_deregister();

	pr_debug("mdp3_ctrl_off stop clock\n");
	if (mdp3_session->clk_on) {
		rc = mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P);
		if (rc)
			pr_err("mdp clock resource release failed\n");

		pr_debug("mdp3_ctrl_off stop dsi controller\n");
		if (panel->event_handler)
			rc = panel->event_handler(panel,
				MDSS_EVENT_BLANK, NULL);
		if (rc)
			pr_err("fail to turn off the panel\n");
	}

	mdp3_ctrl_notifier_unregister(mdp3_session,
		&mdp3_session->mfd->mdp_sync_pt_data.notifier);
	mdp3_enable_regulator(false);
	mdp3_session->vsync_enabled = 0;
	atomic_set(&mdp3_session->vsync_countdown, 0);
	atomic_set(&mdp3_session->dma_done_cnt, 0);
	mdp3_session->clk_on = 0;
	mdp3_session->in_splash_screen = 0;
off_error:
	mdp3_session->status = 0;
	if (!panel->panel_info.dynamic_switch_pending) {
		mdp3_bufq_deinit(&mdp3_session->bufq_out);
		if (mdp3_session->overlay.id != MSMFB_NEW_REQUEST) {
			mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
			mdp3_bufq_deinit(&mdp3_session->bufq_in);
		}
	}
	mutex_unlock(&mdp3_session->lock);
	return 0;
}
static int sdhci_pltfm_remove(struct platform_device *pdev)
{
	struct sdio_dev *dev = platform_get_drvdata(pdev);
	struct sdhci_host *host = dev->host;
	struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	int dead;
	u32 scratch;
	int ret = 0;

	atomic_set(&dev->initialized, 0);
	gDevs[dev->devtype] = NULL;

	if (dev->devtype == SDIO_DEV_TYPE_SDMMC && dev->cd_gpio >= 0) {
		free_irq(gpio_to_irq(dev->cd_gpio), dev);
		gpio_free(dev->cd_gpio);
	}

	if (dev->vdd_sdxc_regulator && dev->devtype == SDIO_DEV_TYPE_SDMMC) {
		/* Playing safe- if regulator is enabled, disable it first */
		if (regulator_is_enabled(dev->vdd_sdxc_regulator) > 0)
			regulator_disable(dev->vdd_sdxc_regulator);

		regulator_put(dev->vdd_sdxc_regulator);
	}

	device_remove_file(&pdev->dev, &dev_attr_card_ctrl);

	if (sdhci_pltfm_rpm_enabled(dev)) {
		pm_runtime_get_sync(dev->dev);
	} else {
		ret = sdhci_pltfm_clk_enable(dev, 1);
		if (ret)
			dev_err(dev->dev,
				"enable clock during pltfm remove failed\n");
	}

	dead = 0;
	scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
	if (scratch == (u32)-1)
		dead = 1;
	sdhci_remove_host(host, dead);

	if (sdhci_pltfm_rpm_enabled(dev)) {
		pm_runtime_put_sync_suspend(dev->dev);
	} else {
		ret = sdhci_pltfm_clk_enable(dev, 0);
		if (ret)
			dev_err(dev->dev,
				"disable clock during pltfm remove failed\n");
	}

#if !defined(CONFIG_MACH_BCM2850_FPGA) && !defined(CONFIG_MACH_BCM_FPGA)
	clk_disable(dev->sleep_clk);
	clk_put(dev->sleep_clk);
	clk_put(dev->peri_clk);
#endif

	sdhci_pltfm_runtime_pm_forbid(dev->dev);
	kfree(dev->cd_int_wake_lock_name);
	wake_lock_destroy(&dev->cd_int_wake_lock);
	platform_set_drvdata(pdev, NULL);
	if (dev->devtype == SDIO_DEV_TYPE_EMMC ||
		dev->devtype == SDIO_DEV_TYPE_SDMMC) {
		kfree(pdev->dev.platform_data);
		pdev->dev.platform_data = NULL;
	}
	kfree(dev);
	iounmap(host->ioaddr);
	release_mem_region(iomem->start, resource_size(iomem));
	sdhci_free_host(host);

	return 0;
}
static int sdhci_pltfm_probe(struct platform_device *pdev)
{
	struct sdhci_host *host;
	struct sdio_dev *dev;
	struct resource *iomem;
	struct sdio_platform_cfg *hw_cfg = NULL;
	char devname[MAX_DEV_NAME_SIZE];
	int ret = 0;
	char *emmc_regulator = NULL;

	pr_debug("%s: ENTRY\n", __func__);

	BUG_ON(pdev == NULL);

	hw_cfg = (struct sdio_platform_cfg *)pdev->dev.platform_data;
	if (pdev->dev.of_node) {
		u32 val;
		const char *prop;
		if (!pdev->dev.platform_data)
			hw_cfg = kzalloc(sizeof(struct sdio_platform_cfg),
				GFP_KERNEL);

		if (!hw_cfg) {
			dev_err(&pdev->dev,
				"unable to allocate mem for private data\n");
			ret = -ENOMEM;
			goto err;
		}
		if (of_property_read_u32(pdev->dev.of_node, "id", &val)) {
			dev_err(&pdev->dev, "id read failed in %s\n", __func__);
			goto err_free_priv_data_mem;
		}

		hw_cfg->id = val;
		pdev->id = val;

		if (of_property_read_u32(pdev->dev.of_node, "data-pullup",
			&val)) {
			dev_err(&pdev->dev, "data-pullup read failed in %s\n",
			__func__);
			goto err_free_priv_data_mem;
		}

		hw_cfg->data_pullup = val;

		if (of_property_read_u32(pdev->dev.of_node, "devtype", &val)) {
			dev_err(&pdev->dev, "devtype read failed in %s\n",
			__func__);
			goto err_free_priv_data_mem;
		}

		hw_cfg->devtype = val;

		if (of_property_read_u32(pdev->dev.of_node, "flags", &val)) {
			dev_err(&pdev->dev, "flags read failed in %s\n",
			__func__);
			goto err_free_priv_data_mem;
		}

		hw_cfg->flags = val;

		if (of_property_read_u32(pdev->dev.of_node, "quirks", &val)) {
			dev_warn(&pdev->dev, "quirks not available in %s\n",
			__func__);
			val = 0;
		}

		hw_cfg->quirks = val;

		if (of_property_read_u32(pdev->dev.of_node, "quirks2", &val)) {
			dev_warn(&pdev->dev, "quirks2 not available in %s\n",
			__func__);
			val = 0;
		}

		hw_cfg->quirks2 = val;

		if (of_property_read_u32(pdev->dev.of_node, "pm_caps", &val)) {
			dev_warn(&pdev->dev, "pm_caps not available in %s\n",
			__func__);
			val = 0;
		}

		hw_cfg->pm_caps = val;

		if (of_property_read_string(pdev->dev.of_node, "peri-clk-name",
			&prop)) {
			dev_err(&pdev->dev, "peri-clk-name read failed in %s\n",
			__func__);
			goto err_free_priv_data_mem;
		}

		hw_cfg->peri_clk_name = (char *)prop;

		if (of_property_read_string(pdev->dev.of_node, "ahb-clk-name",
			&prop)) {
			dev_err(&pdev->dev, "ahb-clk-name read failed in %s\n",
			__func__);
			goto err_free_priv_data_mem;
		}

		hw_cfg->ahb_clk_name = (char *)prop;

		if (of_property_read_string(pdev->dev.of_node, "sleep-clk-name",
			&prop)) {
			dev_err(&pdev->dev, "sleep-clk-name read failed in %s\n",
			__func__);
			goto err_free_priv_data_mem;
		}

		hw_cfg->sleep_clk_name = (char *)prop;

		if (of_property_read_u32(pdev->dev.of_node, "peri-clk-rate",
			&val)) {
			dev_err(&pdev->dev, "peri-clk-rate read failed in %s\n",
			__func__);
			goto err_free_priv_data_mem;
		}

		hw_cfg->peri_clk_rate = val;

		if (hw_cfg->devtype == SDIO_DEV_TYPE_SDMMC) {
			if (of_property_read_string(pdev->dev.of_node,
				"vddo-regulator-name", &prop)) {
				dev_err(&pdev->dev, "vddo-regulator-name read "\
				"failed in %s\n", __func__);
				goto err_free_priv_data_mem;
			}

			hw_cfg->vddo_regulator_name = (char *)prop;

			if (of_property_read_string(pdev->dev.of_node,
				"vddsdxc-regulator-name", &prop)) {
				dev_err(&pdev->dev, "vddsdxc-regulator-name"\
				"read failed in %s\n", __func__);
				goto err_free_priv_data_mem;
			}

			hw_cfg->vddsdxc_regulator_name = (char *)prop;


			if (of_property_read_u32(pdev->dev.of_node,
				"cd-gpio", &val)) {
				dev_err(&pdev->dev, "cd-gpio read failed in %s\n",
				__func__);
				goto err_free_priv_data_mem;
			}

			hw_cfg->cd_gpio = val;
		}

		else if (hw_cfg->devtype == SDIO_DEV_TYPE_EMMC) {

			if (of_property_read_u32(pdev->dev.of_node,
				"is-8bit", &val)) {
				dev_err(&pdev->dev, "is-8bit read failed in %s\n",
				__func__);
				goto err_free_priv_data_mem;
			}

			hw_cfg->is_8bit = val;
			if (!(of_property_read_string(pdev->dev.of_node,
					"vddsdmmc-regulator-name", &prop)))
				emmc_regulator = (char *)prop;
		}

		pdev->dev.platform_data = hw_cfg;
	}
	if (!hw_cfg) {
			dev_err(&pdev->dev, "hw_cfg is NULL\n");
			ret = -ENOMEM;
			goto err;
	}

	if (hw_cfg->devtype >= SDIO_DEV_TYPE_MAX) {
		dev_err(&pdev->dev, "unknown device type\n");
		ret = -EFAULT;
		goto err;
	}
	pr_debug("%s: GET PLATFORM RESOURCES\n", __func__);

	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!iomem) {
		ret = -ENOMEM;
		goto err;
	}

	/* Some PCI-based MFD need the parent here */
	if (pdev->dev.parent != &platform_bus)
		host =
		    sdhci_alloc_host(pdev->dev.parent, sizeof(struct sdio_dev));
	else
		host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdio_dev));
	if (IS_ERR(host)) {
		ret = PTR_ERR(host);
		goto err;
	}

	pr_debug("%s: ALLOC HOST\n", __func__);

	host->hw_name = "bcm_kona_sd";
	host->ops = &sdhci_pltfm_ops;
	host->irq = platform_get_irq(pdev, 0);
	host->quirks = SDHCI_QUIRK_NO_CARD_NO_RESET
	    | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
	    | SDHCI_QUIRK_32BIT_DMA_ADDR
	    | SDHCI_QUIRK_32BIT_DMA_SIZE | SDHCI_QUIRK_32BIT_ADMA_SIZE;


#ifndef CONFIG_SUPPORT_UHS_CARD
	if(hw_cfg->devtype == SDIO_DEV_TYPE_SDMMC)
		hw_cfg->quirks2 = SDHCI_QUIRK2_HOST_DISABLE_UHS;
	if(hw_cfg->devtype == SDIO_DEV_TYPE_WIFI)
		hw_cfg->quirks2 = SDHCI_QUIRK2_HOST_DISABLE_UHS;
#endif

#ifdef CONFIG_MACH_RHEA_DALTON2_EB30
        host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
#endif
	host->quirks |= hw_cfg->quirks;
	host->quirks2 |= hw_cfg->quirks2;

        pr_debug("%s: GET IRQ\n", __func__);

	if (hw_cfg->flags & KONA_SDIO_FLAGS_DEVICE_NON_REMOVABLE)
		host->mmc->caps |= MMC_CAP_NONREMOVABLE;

	if (!request_mem_region(iomem->start, resource_size(iomem),
				mmc_hostname(host->mmc))) {
		dev_err(&pdev->dev, "cannot request region\n");
		ret = -EBUSY;
		goto err_free_host;
	}

	host->ioaddr = ioremap(iomem->start, resource_size(iomem));
	if (!host->ioaddr) {
		dev_err(&pdev->dev, "failed to remap registers\n");
		ret = -ENOMEM;
		goto err_free_mem_region;
	}

	pr_debug("%s: MEM and IO REGION OKAY\n", __func__);

	dev = sdhci_priv(host);
	dev->dev = &pdev->dev;
	dev->host = host;
	dev->devtype = hw_cfg->devtype;
	dev->cd_gpio = hw_cfg->cd_gpio;
	host->mmc->parent = dev->dev;
	if (dev->devtype == SDIO_DEV_TYPE_WIFI)
		dev->wifi_gpio = &hw_cfg->wifi_gpio;
	if (dev->devtype == SDIO_DEV_TYPE_EMMC && emmc_regulator) {
		dev->vdd_sdxc_regulator = regulator_get(NULL, emmc_regulator);
		if (IS_ERR(dev->vdd_sdxc_regulator)) {
			dev->vdd_sdxc_regulator = NULL;
		}
	}

	if (dev->devtype == SDIO_DEV_TYPE_EMMC)
		host->detect_delay = 0;
	else
		host->detect_delay = 200;

	pr_debug("%s: DEV TYPE %x\n", __func__, dev->devtype);

	gDevs[dev->devtype] = dev;

	platform_set_drvdata(pdev, dev);

	snprintf(devname, sizeof(devname), "%s%d", DEV_NAME, pdev->id);

	/* enable clocks */
#ifdef CONFIG_MACH_BCM2850_FPGA
	if (clock) {		/* clock override */
		dev->clk_hz = clock;
	} else {
		dev->clk_hz = gClock[dev->devtype];
	}
#elif defined(CONFIG_MACH_BCM_FPGA)
	dev->clk_hz = hw_cfg->peri_clk_rate;
#else
	/* peripheral clock */
	dev->peri_clk = clk_get(&pdev->dev, hw_cfg->peri_clk_name);
	if (IS_ERR_OR_NULL(dev->peri_clk)) {
		ret = -EINVAL;
		goto err_unset_pltfm;
	}
	ret = clk_set_rate(dev->peri_clk, hw_cfg->peri_clk_rate);
	if (ret)
		goto err_peri_clk_put;

	/* sleep clock */
	dev->sleep_clk = clk_get(&pdev->dev, hw_cfg->sleep_clk_name);
	if (IS_ERR_OR_NULL(dev->sleep_clk)) {
		ret = -EINVAL;
		goto err_peri_clk_put;
	}

	ret = clk_enable(dev->sleep_clk);
	if (ret) {
		dev_err(&pdev->dev, "failed to enable sleep clock for %s\n",
			devname);
		goto err_sleep_clk_put;
	}

	ret = sdhci_pltfm_clk_enable(dev, 1);
	if (ret) {
		dev_err(&pdev->dev, "failed to initialize core clock for %s\n",
			devname);
		goto err_sleep_clk_disable;
	}

	dev->clk_hz = clk_get_rate(dev->peri_clk);
#endif

	dev->suspended = 0;

	if (hw_cfg->vddo_regulator_name) {
		ret =
		    sdhci_pltfm_regulator_init(dev,
					       hw_cfg->vddo_regulator_name);
#ifndef BCM_REGULATOR_SKIP_QUIRK
		if (ret < 0)
			goto err_term_clk;
#endif
	}

	if (hw_cfg->vddsdxc_regulator_name &&
			dev->devtype == SDIO_DEV_TYPE_SDMMC) {
		ret =
		    sdhci_pltfm_regulator_sdxc_init(dev,
					       hw_cfg->vddsdxc_regulator_name);
#ifndef BCM_REGULATOR_SKIP_QUIRK
		if (ret < 0)
			goto err_term_clk;
#endif
	}

	if (sd_detection_cmd_dev == NULL){
	sd_detection_cmd_dev =
		device_create(sec_class, NULL, 0,
				NULL, "sdcard");
	if (IS_ERR(sd_detection_cmd_dev))
		pr_err("Fail to create sysfs dev\n");

	if (device_create_file(sd_detection_cmd_dev,
				&dev_attr_status) < 0)
		pr_err("Fail to create sysfs file\n");
	}

	mutex_init(&dev->regulator_lock);
	kona_sdio_regulator_power(dev, 1);

	ret = bcm_kona_sd_reset(dev);
	if (ret)
		goto err_term_clk;

	ret = bcm_kona_sd_init(dev);
	if (ret)
		goto err_reset;

	if (hw_cfg->is_8bit)
		host->mmc->caps |= MMC_CAP_8_BIT_DATA;

	/* Note that sdhci_add_host calls --> mmc_add_host, which in turn
	 * checks for the flag MMC_PM_IGNORE_PM_NOTIFY before registering a PM
	 * notifier for the specific instance of SDIO host controller. For
	 * WiFi case, we don't want to get notified, becuase then from there
	 * mmc_power_off is called which will reset the Host registers that
	 * needs to be re-programmed by starting SDIO handsake again. We want
	 * to prevent this in case of WiFi. So enable MMC_PM_IGNORE_PM_NOTIFY
	 * flag, so that notifier never gets registered.
	 */
	if (dev->devtype == SDIO_DEV_TYPE_WIFI) {
		/* The Wireless LAN drivers call the API sdio_get_host_pm_caps
		 * to know the PM capabilities of the driver, which would
		 * return pm_caps. While the internal code decides based on
		 * pm_flags, the pm_caps also should reflect the same.
		 */
		host->mmc->pm_caps =
		    MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY;
		host->mmc->pm_flags =
		    MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY;
	}

	host->mmc->pm_caps |= hw_cfg->pm_caps;

#if !defined(CONFIG_MACH_BCM_FPGA_E)
	/* Enable 1.8V DDR operation for e.MMC */
	if (dev->devtype == SDIO_DEV_TYPE_EMMC)
		host->mmc->caps |= MMC_CAP_1_8V_DDR;
#endif

	/* Don't issue SLEEP command to e.MMC device */
	if (dev->devtype == SDIO_DEV_TYPE_EMMC)
		host->mmc->caps2 |= MMC_CAP2_NO_SLEEP_CMD;

	/*
	 * This has to be done before sdhci_add_host.
	 * As soon as we add the host, request
	 * starts. If we dont enable this here, the
	 * runtime get and put of sdhci will fallback to
	 * clk_enable and clk_disable which will conflict
	 * with the PM runtime when it gets enabled just
	 * after sdhci_add_host. Now with this, the RPM
	 * calls will fail until RPM is enabled, but things
	 * will work well, as we have clocks enabled till the
	 * probe ends.
	 */

	dev->runtime_pm_enabled = 1;

	ret = sdhci_add_host(host);
	if (ret)
		goto err_reset;

	ret = device_create_file(&pdev->dev, &dev_attr_card_ctrl);
	if (ret)
		goto err_rm_host;

	/* Should be done only after sdhci_add_host */
	sdhci_pltfm_runtime_pm_init(dev->dev);

	if (dev->devtype == SDIO_DEV_TYPE_SDMMC) {
		/* support SD card detect interrupts for insert/removal */
		host->mmc->card_detect_cap = true;
	}

	/* if device is eMMC, emulate card insert right here */
	if (dev->devtype == SDIO_DEV_TYPE_EMMC) {
		ret = bcm_kona_sd_card_emulate(dev, 1);
		if (ret) {
			dev_err(&pdev->dev,
				"unable to emulate card insertion\n");
			goto err_rm_sysfs;
		}
		pr_info("%s: card insert emulated!\n", devname);
	} else if (dev->devtype == SDIO_DEV_TYPE_SDMMC && dev->cd_gpio >= 0) {

		dev->cd_int_wake_lock_name = kasprintf(GFP_KERNEL,
				"%s_cd_int", devname);

		if (!dev->cd_int_wake_lock_name) {
			dev_err(&pdev->dev,
				"error allocating mem for wake_lock_name\n");
			goto err_rm_sysfs;
		}

		wake_lock_init(&dev->cd_int_wake_lock, WAKE_LOCK_SUSPEND,
				dev->cd_int_wake_lock_name);


		ret = gpio_request(dev->cd_gpio, "sdio cd");

		if (ret < 0) {
			dev_err(&pdev->dev, "Unable to request GPIO pin %d\n",
				dev->cd_gpio);
			goto err_rm_sysfs;
		}
		gpio_direction_input(dev->cd_gpio);

		ret = request_threaded_irq(gpio_to_irq(dev->cd_gpio),
					   cd_irq_handler,
					   sdhci_pltfm_cd_interrupt,
					   IRQF_TRIGGER_FALLING |
					   IRQF_TRIGGER_RISING  |
					   IRQF_NO_SUSPEND, "sdio cd", dev);
		if (ret) {
			dev_err(&pdev->dev,
				"Unable to request card detection irq=%d"
				" for gpio=%d\n",
				gpio_to_irq(dev->cd_gpio), dev->cd_gpio);
			goto err_free_cd_gpio;
		}

		/* Set debounce for SD Card detect to maximum value (128ms)
		 *
		 * NOTE-1: If gpio_set_debounce() returns error we still
		 * continue with the default debounce value set. Another reason
		 * for doing this is that on rhea-ray boards the SD Detect GPIO
		 * is on GPIO Expander and gpio_set_debounce() will return error
		 * and if we return error from here, then probe() would fail and
		 * SD detection would always fail.
		 *
		 * NOTE-2: We also give a msleep() of the "debounce" time here
		 * so that we give enough time for the debounce to stabilize
		 * before we read the gpio value in gpio_get_value_cansleep().
		 */
		ret =
		    gpio_set_debounce(dev->cd_gpio,
				      (SD_DETECT_GPIO_DEBOUNCE_128MS * 1000));
		if (ret < 0) {
			dev_err(&pdev->dev, "%s: gpio set debounce failed."
				"default debounce value assumed\n", __func__);
		}

		/* Sleep for 128ms to allow debounce to stabilize */
		msleep(SD_DETECT_GPIO_DEBOUNCE_128MS);

		/*
		 * Since the card detection GPIO interrupt is configured to be
		 * edge sensitive, check the initial GPIO value here, emulate
		 * only if the card is present
		 */
		if (gpio_get_value_cansleep(dev->cd_gpio) == 0)
			bcm_kona_sd_card_emulate(dev, 1);
		else
			/* If card is not present disable the regulator */
			kona_sdio_regulator_power(dev, 0);
	}

	/* Force insertion interrupt, in case of no card detect registered.
	 */
	if (dev->cd_gpio < 0)
		bcm_kona_sd_card_emulate(dev, 1);
#ifdef CONFIG_BRCM_UNIFIED_DHD_SUPPORT
	if ((dev->devtype == SDIO_DEV_TYPE_WIFI) &&
	    (hw_cfg->register_status_notify != NULL)) {
		hw_cfg->register_status_notify(kona_sdio_status_notify_cb,
					       host, host->mmc);
	}
	pr_debug("%s: CALL BACK IS REGISTERED\n", __func__);

#endif

	atomic_set(&dev->initialized, 1);
	sdhci_pltfm_clk_enable(dev, 0);

	pr_info("%s: initialized properly\n", devname);

	return 0;

err_free_cd_gpio:
	if (dev->devtype == SDIO_DEV_TYPE_SDMMC && dev->cd_gpio >= 0)
		gpio_free(dev->cd_gpio);

err_rm_sysfs:
	device_remove_file(&pdev->dev, &dev_attr_card_ctrl);

err_rm_host:
	sdhci_remove_host(host, 0);

err_reset:
	bcm_kona_sd_reset(dev);

err_term_clk:
	sdhci_pltfm_clk_enable(dev, 0);

#if !defined(CONFIG_MACH_BCM2850_FPGA) && !defined(CONFIG_MACH_BCM_FPGA)
err_sleep_clk_disable:
	clk_disable(dev->sleep_clk);

err_sleep_clk_put:
	clk_put(dev->sleep_clk);

err_peri_clk_put:
	clk_put(dev->peri_clk);

err_unset_pltfm:
	platform_set_drvdata(pdev, NULL);
	iounmap(host->ioaddr);
#endif

err_free_mem_region:
	release_mem_region(iomem->start, resource_size(iomem));

err_free_host:
	sdhci_free_host(host);

err_free_priv_data_mem:
	if (pdev->dev.of_node) {
		ret = -EFAULT;
		kfree(hw_cfg);
	}
err:
	pr_err("Probing of sdhci-pltfm %d failed: %d\n", pdev->id,
	       ret);
	return ret;
}
Beispiel #10
0
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct Scsi_Host *host;
	struct fc_lport *lp;
	struct fnic *fnic;
	mempool_t *pool;
	int err;
	int i;
	unsigned long flags;

	/*
	 * Allocate SCSI Host and set up association between host,
	 * local port, and fnic
	 */
	lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
	if (!lp) {
		printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
		err = -ENOMEM;
		goto err_out;
	}
	host = lp->host;
	fnic = lport_priv(lp);
	fnic->lport = lp;
	fnic->ctlr.lp = lp;

	snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
		 host->host_no);

	host->transportt = fnic_fc_transport;

	err = fnic_stats_debugfs_init(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
				"Failed to initialize debugfs for stats\n");
		fnic_stats_debugfs_remove(fnic);
	}

	/* Setup PCI resources */
	pci_set_drvdata(pdev, fnic);

	fnic->pdev = pdev;

	err = pci_enable_device(pdev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI device, aborting.\n");
		goto err_out_free_hba;
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI resources, aborting\n");
		goto err_out_disable_device;
	}

	pci_set_master(pdev);

	/* Query PCI controller on system for DMA addressing
	 * limitation for the device.  Try 64-bit first, and
	 * fail to 32-bit.
	 */
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "No usable DMA configuration "
				     "aborting\n");
			goto err_out_release_regions;
		}
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 32-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	} else {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 64-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	}

	/* Map vNIC resources from BAR0 */
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "BAR0 not memory-map'able, aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
	fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
	fnic->bar0.len = pci_resource_len(pdev, 0);

	if (!fnic->bar0.vaddr) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot memory-map BAR0 res hdr, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
	if (!fnic->vdev) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC registration failed, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_iounmap;
	}

	err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
			    vnic_dev_open_done, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev open failed, aborting.\n");
		goto err_out_vnic_unregister;
	}

	err = vnic_dev_init(fnic->vdev, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev init failed, aborting.\n");
		goto err_out_dev_close;
	}

	err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC get MAC addr failed \n");
		goto err_out_dev_close;
	}
	/* set data_src for point-to-point mode and to keep it non-zero */
	memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);

	/* Get vNIC configuration */
	err = fnic_get_vnic_config(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Get vNIC configuration failed, "
			     "aborting.\n");
		goto err_out_dev_close;
	}

	/* Configure Maximum Outstanding IO reqs*/
	if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
		host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
					max_t(u32, FNIC_MIN_IO_REQ,
					fnic->config.io_throttle_count));
	}
	fnic->fnic_max_tag_id = host->can_queue;

	host->max_lun = fnic->config.luns_per_tgt;
	host->max_id = FNIC_MAX_FCP_TARGET;
	host->max_cmd_len = FCOE_MAX_CMD_LEN;

	fnic_get_res_counts(fnic);

	err = fnic_set_intr_mode(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to set intr mode, "
			     "aborting.\n");
		goto err_out_dev_close;
	}

	err = fnic_alloc_vnic_resources(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc vNIC resources, "
			     "aborting.\n");
		goto err_out_clear_intr;
	}


	/* initialize all fnic locks */
	spin_lock_init(&fnic->fnic_lock);

	for (i = 0; i < FNIC_WQ_MAX; i++)
		spin_lock_init(&fnic->wq_lock[i]);

	for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
		spin_lock_init(&fnic->wq_copy_lock[i]);
		fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
		fnic->fw_ack_recd[i] = 0;
		fnic->fw_ack_index[i] = -1;
	}

	for (i = 0; i < FNIC_IO_LOCKS; i++)
		spin_lock_init(&fnic->io_req_lock[i]);

	fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
	if (!fnic->io_req_pool)
		goto err_out_free_resources;

	pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
	if (!pool)
		goto err_out_free_ioreq_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;

	pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
	if (!pool)
		goto err_out_free_dflt_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;

	/* setup vlan config, hw inserts vlan header */
	fnic->vlan_hw_insert = 1;
	fnic->vlan_id = 0;

	/* Initialize the FIP fcoe_ctrl struct */
	fnic->ctlr.send = fnic_eth_send;
	fnic->ctlr.update_mac = fnic_update_mac;
	fnic->ctlr.get_src_addr = fnic_get_mac;
	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
		shost_printk(KERN_INFO, fnic->lport->host,
			     "firmware supports FIP\n");
		/* enable directed and multicast */
		vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
		vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
		vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
		fnic->set_vlan = fnic_set_vlan;
		fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
		timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0);
		spin_lock_init(&fnic->vlans_lock);
		INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
		INIT_WORK(&fnic->event_work, fnic_handle_event);
		skb_queue_head_init(&fnic->fip_frame_queue);
		INIT_LIST_HEAD(&fnic->evlist);
		INIT_LIST_HEAD(&fnic->vlans);
	} else {
		shost_printk(KERN_INFO, fnic->lport->host,
			     "firmware uses non-FIP mode\n");
		fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
		fnic->ctlr.state = FIP_ST_NON_FIP;
	}
	fnic->state = FNIC_IN_FC_MODE;

	atomic_set(&fnic->in_flight, 0);
	fnic->state_flags = FNIC_FLAGS_NONE;

	/* Enable hardware stripping of vlan header on ingress */
	fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);

	/* Setup notification buffer area */
	err = fnic_notify_set(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc notify buffer, aborting.\n");
		goto err_out_free_max_pool;
	}

	/* Setup notify timer when using MSI interrupts */
	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
		timer_setup(&fnic->notify_timer, fnic_notify_timer, 0);

	/* allocate RQ buffers and post them to RQ*/
	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "fnic_alloc_rq_frame can't alloc "
				     "frame\n");
			goto err_out_free_rq_buf;
		}
	}

	/*
	 * Initialization done with PCI system, hardware, firmware.
	 * Add host to SCSI
	 */
	err = scsi_add_host(lp->host, &pdev->dev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "fnic: scsi_add_host failed...exiting\n");
		goto err_out_free_rq_buf;
	}

	/* Start local port initiatialization */

	lp->link_up = 0;

	lp->max_retry_count = fnic->config.flogi_retries;
	lp->max_rport_retry_count = fnic->config.plogi_retries;
	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
			      FCP_SPPF_CONF_COMPL);
	if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
		lp->service_params |= FCP_SPPF_RETRY;

	lp->boot_time = jiffies;
	lp->e_d_tov = fnic->config.ed_tov;
	lp->r_a_tov = fnic->config.ra_tov;
	lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
	fc_set_wwnn(lp, fnic->config.node_wwn);
	fc_set_wwpn(lp, fnic->config.port_wwn);

	fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0);

	if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
			       FCPIO_HOST_EXCH_RANGE_END, NULL)) {
		err = -ENOMEM;
		goto err_out_remove_scsi_host;
	}

	fc_lport_init_stats(lp);
	fnic->stats_reset_time = jiffies;

	fc_lport_config(lp);

	if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
		       sizeof(struct fc_frame_header))) {
		err = -EINVAL;
		goto err_out_free_exch_mgr;
	}
	fc_host_maxframe_size(lp->host) = lp->mfs;
	fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;

	sprintf(fc_host_symbolic_name(lp->host),
		DRV_NAME " v" DRV_VERSION " over %s", fnic->name);

	spin_lock_irqsave(&fnic_list_lock, flags);
	list_add_tail(&fnic->list, &fnic_list);
	spin_unlock_irqrestore(&fnic_list_lock, flags);

	INIT_WORK(&fnic->link_work, fnic_handle_link);
	INIT_WORK(&fnic->frame_work, fnic_handle_frame);
	skb_queue_head_init(&fnic->frame_queue);
	skb_queue_head_init(&fnic->tx_queue);

	/* Enable all queues */
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_enable(&fnic->wq[i]);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_enable(&fnic->rq[i]);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_enable(&fnic->wq_copy[i]);

	fc_fabric_login(lp);

	vnic_dev_enable(fnic->vdev);

	err = fnic_request_intr(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Unable to request irq.\n");
		goto err_out_free_exch_mgr;
	}

	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_unmask(&fnic->intr[i]);

	fnic_notify_timer_start(fnic);

	return 0;

err_out_free_exch_mgr:
	fc_exch_mgr_free(lp);
err_out_remove_scsi_host:
	fc_remove_host(lp->host);
	scsi_remove_host(lp->host);
err_out_free_rq_buf:
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	vnic_dev_notify_unset(fnic->vdev);
err_out_free_max_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
err_out_free_ioreq_pool:
	mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
	fnic_free_vnic_resources(fnic);
err_out_clear_intr:
	fnic_clear_intr_mode(fnic);
err_out_dev_close:
	vnic_dev_close(fnic->vdev);
err_out_vnic_unregister:
	vnic_dev_unregister(fnic->vdev);
err_out_iounmap:
	fnic_iounmap(fnic);
err_out_release_regions:
	pci_release_regions(pdev);
err_out_disable_device:
	pci_disable_device(pdev);
err_out_free_hba:
	fnic_stats_debugfs_remove(fnic);
	scsi_host_put(lp->host);
err_out:
	return err;
}
/*
 * Init a fresh posix_acl
 */
void
posix_acl_init(struct posix_acl *acl, int count)
{
	atomic_set(&acl->a_refcount, 1);
	acl->a_count = count;
}
static int debugfs_atomic_t_set(void *data, u64 val)
{
	atomic_set((atomic_t *)data, val);
	return 0;
}
Beispiel #13
0
int mdp4_lcdc_on(struct platform_device *pdev)
{
	int lcdc_width;
	int lcdc_height;
	int lcdc_bpp;
	int lcdc_border_clr;
	int lcdc_underflow_clr;
	int lcdc_hsync_skew;

	int hsync_period;
	int hsync_ctrl;
	int vsync_period;
	int display_hctl;
	int display_v_start;
	int display_v_end;
	int active_hctl;
	int active_h_start;
	int active_h_end;
	int active_v_start;
	int active_v_end;
	int ctrl_polarity;
	int h_back_porch;
	int h_front_porch;
	int v_back_porch;
	int v_front_porch;
	int hsync_pulse_width;
	int vsync_pulse_width;
	int hsync_polarity;
	int vsync_polarity;
	int data_en_polarity;
	int hsync_start_x;
	int hsync_end_x;
	uint8 *buf;
	unsigned int buf_offset;
	int bpp, ptype;
	struct fb_info *fbi;
	struct fb_var_screeninfo *var;
	struct msm_fb_data_type *mfd;
	struct mdp4_overlay_pipe *pipe;
	int ret = 0;
	int cndx = 0;
	struct vsycn_ctrl *vctrl;

	vctrl = &vsync_ctrl_db[cndx];
	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);

	if (!mfd)
		return -ENODEV;

	if (mfd->key != MFD_KEY)
		return -EINVAL;

	vctrl->mfd = mfd;
	vctrl->dev = mfd->fbi->dev;

	
	mdp_clk_ctrl(1);

	fbi = mfd->fbi;
	var = &fbi->var;

	bpp = fbi->var.bits_per_pixel / 8;
	buf = (uint8 *) fbi->fix.smem_start;
	buf_offset = calc_fb_offset(mfd, fbi, bpp);

	if (vctrl->base_pipe == NULL) {
		ptype = mdp4_overlay_format2type(mfd->fb_imgType);
		if (ptype < 0)
			printk(KERN_INFO "%s: format2type failed\n", __func__);
		pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0);
		if (pipe == NULL)
			printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
		pipe->pipe_used++;
		pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
		pipe->mixer_num  = MDP4_MIXER0;
		pipe->src_format = mfd->fb_imgType;
		mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_LCDC);
		ret = mdp4_overlay_format2pipe(pipe);
		if (ret < 0)
			printk(KERN_INFO "%s: format2pipe failed\n", __func__);

		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
		pipe->ov_blt_addr = 0;
		pipe->dma_blt_addr = 0;

		vctrl->base_pipe = pipe; 
	} else {
		pipe = vctrl->base_pipe;
	}


	pipe->src_height = fbi->var.yres;
	pipe->src_width = fbi->var.xres;
	pipe->src_h = fbi->var.yres;
	pipe->src_w = fbi->var.xres;
	pipe->src_y = 0;
	pipe->src_x = 0;
	pipe->dst_h = fbi->var.yres;
	pipe->dst_w = fbi->var.xres;

	if (mfd->display_iova)
		pipe->srcp0_addr = mfd->display_iova + buf_offset;
	else
		pipe->srcp0_addr = (uint32)(buf + buf_offset);

	pipe->srcp0_ystride = fbi->fix.line_length;
	pipe->bpp = bpp;

	mdp4_overlay_mdp_pipe_req(pipe, mfd);

	atomic_set(&vctrl->suspend, 0);

	mdp4_overlay_dmap_xy(pipe);
	mdp4_overlay_dmap_cfg(mfd, 1);
	mdp4_overlay_rgb_setup(pipe);
	mdp4_overlayproc_cfg(pipe);

	mdp4_overlay_reg_flush(pipe, 1);
	mdp4_mixer_stage_up(pipe, 0);


	h_back_porch = var->left_margin;
	h_front_porch = var->right_margin;
	v_back_porch = var->upper_margin;
	v_front_porch = var->lower_margin;
	hsync_pulse_width = var->hsync_len;
	vsync_pulse_width = var->vsync_len;
	lcdc_border_clr = mfd->panel_info.lcdc.border_clr;
	lcdc_underflow_clr = mfd->panel_info.lcdc.underflow_clr;
	lcdc_hsync_skew = mfd->panel_info.lcdc.hsync_skew;

	lcdc_width = var->xres + mfd->panel_info.lcdc.xres_pad;
	lcdc_height = var->yres + mfd->panel_info.lcdc.yres_pad;
	lcdc_bpp = mfd->panel_info.bpp;

	hsync_period =
	    hsync_pulse_width + h_back_porch + h_front_porch;
	if ((mfd->panel_info.type == LVDS_PANEL) &&
		(mfd->panel_info.lvds.channel_mode == LVDS_DUAL_CHANNEL_MODE))
		hsync_period += lcdc_width / 2;
	else
		hsync_period += lcdc_width;
	hsync_ctrl = (hsync_period << 16) | hsync_pulse_width;
	hsync_start_x = hsync_pulse_width + h_back_porch;
	hsync_end_x = hsync_period - h_front_porch - 1;
	display_hctl = (hsync_end_x << 16) | hsync_start_x;

	vsync_period =
	    (vsync_pulse_width + v_back_porch + lcdc_height +
	     v_front_porch) * hsync_period;
	display_v_start =
	    (vsync_pulse_width + v_back_porch) * hsync_period + lcdc_hsync_skew;
	display_v_end =
	    vsync_period - (v_front_porch * hsync_period) + lcdc_hsync_skew - 1;

	if (lcdc_width != var->xres) {
		active_h_start = hsync_start_x + first_pixel_start_x;
		active_h_end = active_h_start + var->xres - 1;
		active_hctl =
		    ACTIVE_START_X_EN | (active_h_end << 16) | active_h_start;
	} else {
		active_hctl = 0;
	}

	if (lcdc_height != var->yres) {
		active_v_start =
		    display_v_start + first_pixel_start_y * hsync_period;
		active_v_end = active_v_start + (var->yres) * hsync_period - 1;
		active_v_start |= ACTIVE_START_Y_EN;
	} else {
		active_v_start = 0;
		active_v_end = 0;
	}


#ifdef CONFIG_FB_MSM_MDP40
	if (mfd->panel_info.lcdc.is_sync_active_high) {
		hsync_polarity = 0;
		vsync_polarity = 0;
	} else {
		hsync_polarity = 1;
		vsync_polarity = 1;
	}
	lcdc_underflow_clr |= 0x80000000;	
#else
	hsync_polarity = 0;
	vsync_polarity = 0;
#endif
	data_en_polarity = 0;

	ctrl_polarity =
	    (data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);

	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x4, hsync_ctrl);
	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x8, vsync_period);
	MDP_OUTP(MDP_BASE + LCDC_BASE + 0xc, vsync_pulse_width * hsync_period);
	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x10, display_hctl);
	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x14, display_v_start);
	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x18, display_v_end);
	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x28, lcdc_border_clr);
	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x2c, lcdc_underflow_clr);
	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x30, lcdc_hsync_skew);
	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x38, ctrl_polarity);
	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x1c, active_hctl);
	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x20, active_v_start);
	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x24, active_v_end);
	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);

	mdp_histogram_ctrl_all(TRUE);
	return ret;
}
Beispiel #14
0
static void __init do_boot_cpu (int apicid)
{
	struct task_struct *idle;
	unsigned long boot_error;
	int timeout, cpu;
	unsigned long start_rip;

	cpu = ++cpucount;
	/*
	 * We can't use kernel_thread since we must avoid to
	 * reschedule the child.
	 */
	idle = fork_idle(cpu);
	if (IS_ERR(idle))
		panic("failed fork for CPU %d", cpu);
	x86_cpu_to_apicid[cpu] = apicid;

	cpu_pda[cpu].pcurrent = idle;

	start_rip = setup_trampoline();

	init_rsp = idle->thread.rsp; 
	per_cpu(init_tss,cpu).rsp0 = init_rsp;
	initial_code = start_secondary;
	clear_ti_thread_flag(idle->thread_info, TIF_FORK);

	printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid, 
	       start_rip, init_rsp);

	/*
	 * This grunge runs the startup process for
	 * the targeted processor.
	 */

	atomic_set(&init_deasserted, 0);

	Dprintk("Setting warm reset code and vector.\n");

	CMOS_WRITE(0xa, 0xf);
	local_flush_tlb();
	Dprintk("1.\n");
	*((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
	Dprintk("2.\n");
	*((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
	Dprintk("3.\n");

	/*
	 * Be paranoid about clearing APIC errors.
	 */
	if (APIC_INTEGRATED(apic_version[apicid])) {
		apic_read_around(APIC_SPIV);
		apic_write(APIC_ESR, 0);
		apic_read(APIC_ESR);
	}

	/*
	 * Status is now clean
	 */
	boot_error = 0;

	/*
	 * Starting actual IPI sequence...
	 */
	boot_error = wakeup_secondary_via_INIT(apicid, start_rip); 

	if (!boot_error) {
		/*
		 * allow APs to start initializing.
		 */
		Dprintk("Before Callout %d.\n", cpu);
		cpu_set(cpu, cpu_callout_map);
		Dprintk("After Callout %d.\n", cpu);

		/*
		 * Wait 5s total for a response
		 */
		for (timeout = 0; timeout < 50000; timeout++) {
			if (cpu_isset(cpu, cpu_callin_map))
				break;	/* It has booted */
			udelay(100);
		}

		if (cpu_isset(cpu, cpu_callin_map)) {
			/* number CPUs logically, starting from 1 (BSP is 0) */
			Dprintk("OK.\n");
			print_cpu_info(&cpu_data[cpu]);
			Dprintk("CPU has booted.\n");
		} else {
			boot_error = 1;
			if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
					== 0xA5)
				/* trampoline started but...? */
				printk("Stuck ??\n");
			else
				/* trampoline code not run */
				printk("Not responding.\n");
#if APIC_DEBUG
			inquire_remote_apic(apicid);
#endif
		}
	}
	if (boot_error) {
		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
		clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
		cpucount--;
		x86_cpu_to_apicid[cpu] = BAD_APICID;
		x86_cpu_to_log_apicid[cpu] = BAD_APICID;
	}
}
Beispiel #15
0
static int lge_hsd_probe(struct platform_device *pdev)
{
	int ret = 0;
	struct fsa8008_platform_data *pdata = pdev->dev.platform_data;

	struct hsd_info *hi;

	HSD_DBG("lge_hsd_probe");

	hi = kzalloc(sizeof(struct hsd_info), GFP_KERNEL);

	if (NULL == hi) {
		HSD_ERR("Failed to allloate headset per device info\n");
		return -ENOMEM;
	}

	hi->key_code = pdata->key_code;

	platform_set_drvdata(pdev, hi);

	atomic_set(&hi->btn_state, 0);
	atomic_set(&hi->is_3_pole_or_not, 1);

	hi->gpio_detect = pdata->gpio_detect;
	hi->gpio_mic_en = pdata->gpio_mic_en;
	hi->gpio_jpole = pdata->gpio_jpole;
	hi->gpio_key = pdata->gpio_key;
	hi->set_headset_mic_bias = pdata->set_headset_mic_bias;

	hi->latency_for_detection = pdata->latency_for_detection;
#ifdef CONFIG_LGE_AUDIO_FSA8008_MODIFY
	hi->latency_for_key = FSA8008_KEY_PRESS_DLY_MS;
	hi->gpio_key_cnt = 0;
	INIT_DELAYED_WORK(&hi->work_for_insert, insert_headset);
	INIT_DELAYED_WORK(&hi->work_for_remove, remove_headset);
	INIT_DELAYED_WORK(&hi->work_for_key_det_enable, button_enable);
#else
	hi->latency_for_key = 200 /* milli */ * HZ / 1000; /* convert milli to jiffies */
	INIT_DELAYED_WORK(&hi->work, detect_work);
#endif
	mutex_init(&hi->mutex_lock);
	INIT_DELAYED_WORK(&hi->work_for_key_pressed, button_pressed);
	INIT_DELAYED_WORK(&hi->work_for_key_released, button_released);

	/* initialize gpio_detect */
	ret = gpio_request(hi->gpio_detect, "gpio_detect");
	if (ret < 0) {
		HSD_ERR("Failed to configure gpio%d (gpio_detect) gpio_request\n", hi->gpio_detect);
		goto error_01;
	}

	ret = gpio_direction_input(hi->gpio_detect);
	if (ret < 0) {
		HSD_ERR("Failed to configure gpio%d (gpio_detect) gpio_direction_input\n", hi->gpio_detect);
		goto error_02;
	}

	/* initialize gpio_jpole */
	ret = gpio_request(hi->gpio_jpole, "gpio_jpole");
	if (ret < 0) {
		HSD_ERR("Failed to configure gpio%d (gpio_jpole) gpio_request\n", hi->gpio_jpole);
		goto error_02;
	}

	ret = gpio_direction_input(hi->gpio_jpole);
	if (ret < 0) {
		HSD_ERR("Failed to configure gpio%d (gpio_jpole) gpio_direction_input\n", hi->gpio_jpole);
		goto error_03;
	}
	
	/* initialize gpio_key */
	ret = gpio_request(hi->gpio_key, "gpio_key");
	if (ret < 0) {
		HSD_ERR("Failed to configure gpio%d (gpio_key) gpio_request\n", hi->gpio_key);
		goto error_03;
	}

	ret = gpio_direction_input(hi->gpio_key);
	if (ret < 0) {
		HSD_ERR("Failed to configure gpio%d (gpio_key) gpio_direction_input\n", hi->gpio_key);
		goto error_04;
	}

	/* initialize gpio_mic_en */
	ret = gpio_request(hi->gpio_mic_en, "gpio_mic_en");
	if (ret < 0) {
		HSD_ERR("Failed to configure gpio%d (gpio_mic_en) gpio_request\n", hi->gpio_mic_en);
		goto error_04;
	}

	ret = gpio_direction_output(hi->gpio_mic_en, 0);
	if (ret < 0) {
		HSD_ERR("Failed to configure gpio%d (gpio_mic_en) gpio_direction_output\n", hi->gpio_mic_en);
		goto error_05;
	}

	/* initialize irq of gpio_jpole */
	hi->irq_detect = gpio_to_irq(hi->gpio_detect);

	HSD_DBG("hi->irq_detect = %d\n", hi->irq_detect);

	if (hi->irq_detect < 0) {
		HSD_ERR("Failed to get interrupt number\n");
		ret = hi->irq_detect;
		goto error_05;
	}

//LGE_START, MYUNGWON.KIM, When Sleep IRQ Doesn't work
	ret = request_threaded_irq(hi->irq_detect, NULL, gpio_irq_handler,
					IRQF_TRIGGER_RISING|IRQF_TRIGGER_FALLING|IRQF_NO_SUSPEND, pdev->name, hi);
//LGE_END, MYUNGWON.KIM

	if (ret) {
		HSD_ERR("failed to request button irq");
		goto error_05;
	}

	ret = irq_set_irq_wake(hi->irq_detect, 1);
	if (ret < 0) {
		HSD_ERR("Failed to set irq_detect interrupt wake\n");
		goto error_06;
	}

	/* initialize irq of gpio_key */
	hi->irq_key = gpio_to_irq(hi->gpio_key);

	HSD_DBG("hi->irq_key = %d\n", hi->irq_key);

	if (hi->irq_key < 0) {
		HSD_ERR("Failed to get interrupt number\n");
		ret = hi->irq_key;
		goto error_06;
	}

//LGE_START, MYUNGWON.KIM, When Sleep IRQ Doesn't work
	ret = request_threaded_irq(hi->irq_key, NULL, button_irq_handler,
					IRQF_TRIGGER_RISING|IRQF_TRIGGER_FALLING|IRQF_NO_SUSPEND, pdev->name, hi);
//LGE_END, MYUNGWON.KIM

	if (ret) {
		HSD_ERR("failed to request button irq");
		goto error_06;
	}

	disable_irq(hi->irq_key);

	ret = irq_set_irq_wake(hi->irq_key, 1);
	if (ret < 0) {
		HSD_ERR("Failed to set irq_key interrupt wake\n");
		goto error_07;
	}

	/* initialize switch device */
	hi->sdev.name = pdata->switch_name;
	hi->sdev.print_state = lge_hsd_print_state;
	hi->sdev.print_name = lge_hsd_print_name;

	ret = switch_dev_register(&hi->sdev);
	if (ret < 0) {
		HSD_ERR("Failed to register switch device\n");
		goto error_07;
	}

	/* initialize input device */
	hi->input = input_allocate_device();
	if (!hi->input) {
		HSD_ERR("Failed to allocate input device\n");
		ret = -ENOMEM;
		goto error_08;
	}

	hi->input->name = pdata->keypad_name;

	hi->input->id.vendor    = 0x0001;
	hi->input->id.product   = 1;
	hi->input->id.version   = 1;

	/*input_set_capability(hi->input, EV_SW, SW_HEADPHONE_INSERT);*/
	set_bit(EV_SYN, hi->input->evbit);
	set_bit(EV_KEY, hi->input->evbit);
	set_bit(EV_SW, hi->input->evbit);
	set_bit(hi->key_code, hi->input->keybit);
	set_bit(SW_HEADPHONE_INSERT, hi->input->swbit);
	set_bit(SW_MICROPHONE_INSERT, hi->input->swbit);

	ret = input_register_device(hi->input);
	if (ret) {
		HSD_ERR("Failed to register input device\n");
		goto error_09;
	}
#ifdef CONFIG_LGE_AUDIO_FSA8008_MODIFY
	if (gpio_get_value_cansleep(hi->gpio_detect) == EARJACK_INSERTED) {
		queue_delayed_work(local_fsa8008_workqueue, &(hi->work_for_insert), 0); /* to detect in initialization with eacjack insertion */
	}
#else
	if (!gpio_get_value_cansleep(hi->gpio_detect))
#ifdef CONFIG_FSA8008_USE_LOCAL_WORK_QUEUE
		queue_delayed_work(local_fsa8008_workqueue, &(hi->work), 0); /* to detect in initialization with eacjack insertion */
#else
		schedule_delayed_work(&(hi->work), 0); /* to detect in initialization with eacjack insertion */
#endif
#endif

#ifdef AT_TEST_GPKD
	err = device_create_file(&pdev->dev, &dev_attr_hookkeylog);
#endif

	return ret;

error_09:
	input_free_device(hi->input);
error_08:
	switch_dev_unregister(&hi->sdev);

error_07:
	free_irq(hi->irq_key, 0);
error_06:
	free_irq(hi->irq_detect, 0);

error_05:
	gpio_free(hi->gpio_mic_en);
error_04:
	gpio_free(hi->gpio_key);
error_03:
	gpio_free(hi->gpio_jpole);
error_02:
	gpio_free(hi->gpio_detect);

error_01:
	mutex_destroy(&hi->mutex_lock);
	kfree(hi);

	return ret;
}
Beispiel #16
0
static int __init tspdrv_init(void)
{
    int nRet, i;   /* initialized below */

    atomic_set(&g_nDebugLevel, DBL_INFO);
#ifdef VIBE_RUNTIME_RECORD
    atomic_set(&g_bRuntimeRecord, 0);
    DbgOut((DBL_ERROR, "*** tspdrv: runtime recorder feature is ON for debugging which should be OFF in release version.\n"
                        "*** tspdrv: please turn off the feature by removing VIBE_RUNTIME_RECODE macro.\n"));
#endif
    DbgOut((DBL_INFO, "tspdrv: init_module.\n"));

#ifdef IMPLEMENT_AS_CHAR_DRIVER
    g_nMajor = register_chrdev(0, MODULE_NAME, &fops);
    if (g_nMajor < 0) 
    {
        DbgOut((DBL_ERROR, "tspdrv: can't get major number.\n"));
        return g_nMajor;
    }
#else
    nRet = misc_register(&miscdev);
	if (nRet) 
    {
        DbgOut((DBL_ERROR, "tspdrv: misc_register failed.\n"));
		return nRet;
	}
#endif

	nRet = platform_device_register(&platdev);
	if (nRet) 
    {
        DbgOut((DBL_ERROR, "tspdrv: platform_device_register failed.\n"));
    }

	nRet = platform_driver_register(&platdrv);
	if (nRet) 
    {
        DbgOut((DBL_ERROR, "tspdrv: platform_driver_register failed.\n"));
    }

    DbgRecorderInit(());

    ImmVibeSPI_ForceOut_Initialize();
    VibeOSKernelLinuxInitTimer();
    ResetOutputData();

    /* Get and concatenate device name and initialize data buffer */
    g_cchDeviceName = 0;
    for (i=0; i<NUM_ACTUATORS; i++)
    {
        char *szName = g_szDeviceName + g_cchDeviceName;
        ImmVibeSPI_Device_GetName(i, szName, VIBE_MAX_DEVICE_NAME_LENGTH);

        /* Append version information and get buffer length */
        strcat(szName, VERSION_STR);
        g_cchDeviceName += strlen(szName);

    }

    wake_lock_init(&tspdrv_wakelock, WAKE_LOCK_SUSPEND, MODULE_NAME);

    return 0;
}
static int sdio_irq_thread(void *_host)
{
	struct mmc_host *host = _host;
	struct sched_param param = { .sched_priority = 1 };
	unsigned long period, idle_period;
	int ret;

	sched_setscheduler(current, SCHED_FIFO, &param);

	/*
	 * We want to allow for SDIO cards to work even on non SDIO
	 * aware hosts.  One thing that non SDIO host cannot do is
	 * asynchronous notification of pending SDIO card interrupts
	 * hence we poll for them in that case.
	 */
	idle_period = msecs_to_jiffies(10);
	period = (host->caps & MMC_CAP_SDIO_IRQ) ?
		MAX_SCHEDULE_TIMEOUT : idle_period;

	pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
		 mmc_hostname(host), period);

	do {
		/*
		 * We claim the host here on drivers behalf for a couple
		 * reasons:
		 *
		 * 1) it is already needed to retrieve the CCCR_INTx;
		 * 2) we want the driver(s) to clear the IRQ condition ASAP;
		 * 3) we need to control the abort condition locally.
		 *
		 * Just like traditional hard IRQ handlers, we expect SDIO
		 * IRQ handlers to be quick and to the point, so that the
		 * holding of the host lock does not cover too much work
		 * that doesn't require that lock to be held.
		 */
		ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
		if (ret)
			break;
		ret = process_sdio_pending_irqs(host);
		mmc_release_host(host);

		/*
		 * Give other threads a chance to run in the presence of
		 * errors.
		 */
		if (ret < 0) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (!kthread_should_stop())
				schedule_timeout(HZ);
			set_current_state(TASK_RUNNING);
		}

		/*
		 * Adaptive polling frequency based on the assumption
		 * that an interrupt will be closely followed by more.
		 * This has a substantial benefit for network devices.
		 */
		if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
			if (ret > 0)
				period /= 2;
			else {
				period++;
				if (period > idle_period)
					period = idle_period;
			}
		}

		set_current_state(TASK_INTERRUPTIBLE);
		if (host->caps & MMC_CAP_SDIO_IRQ) {
			mmc_host_clk_hold(host);
			host->sdio_irq_pending = false;
			host->ops->enable_sdio_irq(host, 1);
			mmc_host_clk_release(host);
		}
		if (!kthread_should_stop())
			schedule_timeout(period);
		set_current_state(TASK_RUNNING);
	} while (!kthread_should_stop());

	if (host->caps & MMC_CAP_SDIO_IRQ) {
		mmc_host_clk_hold(host);
		host->ops->enable_sdio_irq(host, 0);
		mmc_host_clk_release(host);
	}

	pr_debug("%s: IRQ thread exiting with code %d\n",
		 mmc_hostname(host), ret);

	return ret;
}

static int sdio_card_irq_get(struct mmc_card *card)
{
	struct mmc_host *host = card->host;

	WARN_ON(!host->claimed);

	if (!host->sdio_irqs++) {
		atomic_set(&host->sdio_irq_thread_abort, 0);
		host->sdio_irq_thread =
			kthread_run(sdio_irq_thread, host, "ksdioirqd/%s",
				mmc_hostname(host));
		if (IS_ERR(host->sdio_irq_thread)) {
			int err = PTR_ERR(host->sdio_irq_thread);
			host->sdio_irqs--;
			return err;
		}
	}

	return 0;
}

static int sdio_card_irq_put(struct mmc_card *card)
{
	struct mmc_host *host = card->host;

	WARN_ON(!host->claimed);
	BUG_ON(host->sdio_irqs < 1);

	if (!--host->sdio_irqs) {
		atomic_set(&host->sdio_irq_thread_abort, 1);
		kthread_stop(host->sdio_irq_thread);
	}

	return 0;
}

/* If there is only 1 function registered set sdio_single_irq */
static void sdio_single_irq_set(struct mmc_card *card)
{
	struct sdio_func *func;
	int i;

	card->sdio_single_irq = NULL;
	if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
	    card->host->sdio_irqs == 1)
		for (i = 0; i < card->sdio_funcs; i++) {
		       func = card->sdio_func[i];
		       if (func && func->irq_handler) {
			       card->sdio_single_irq = func;
			       break;
		       }
	       }
}

/**
 *	sdio_claim_irq - claim the IRQ for a SDIO function
 *	@func: SDIO function
 *	@handler: IRQ handler callback
 *
 *	Claim and activate the IRQ for the given SDIO function. The provided
 *	handler will be called when that IRQ is asserted.  The host is always
 *	claimed already when the handler is called so the handler must not
 *	call sdio_claim_host() nor sdio_release_host().
 */
int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
{
	int ret;
	unsigned char reg;

	BUG_ON(!func);
	BUG_ON(!func->card);

	pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));

	if (func->irq_handler) {
		pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
		return -EBUSY;
	}

	ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
	if (ret)
		return ret;

	reg |= 1 << func->num;

	reg |= 1; /* Master interrupt enable */

	ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
	if (ret)
		return ret;

	func->irq_handler = handler;
	ret = sdio_card_irq_get(func->card);
	if (ret)
		func->irq_handler = NULL;
	sdio_single_irq_set(func->card);

	return ret;
}
Beispiel #18
0
static int ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
#endif
{
    switch (cmd)
    {
        case TSPDRV_SET_MAGIC_NUMBER:
            file->private_data = (void*)TSPDRV_MAGIC_NUMBER;
            break;

        case TSPDRV_ENABLE_AMP:
	    if (wake_lock_active(&tspdrv_wakelock))
		wake_unlock(&tspdrv_wakelock);
	    wake_lock(&tspdrv_wakelock);

            ImmVibeSPI_ForceOut_AmpEnable(arg);
#ifdef VIBE_RUNTIME_RECORD
            if (atomic_read(&g_bRuntimeRecord)) {
                DbgRecord((arg,";------- TSPDRV_ENABLE_AMP ---------\n"));
            }
#else
            DbgRecorderReset((arg));
            DbgRecord((arg,";------- TSPDRV_ENABLE_AMP ---------\n"));
#endif
            break;

        case TSPDRV_DISABLE_AMP:
            ImmVibeSPI_ForceOut_AmpDisable(arg);
#ifdef VIBE_RUNTIME_RECORD
            if (atomic_read(&g_bRuntimeRecord)) {
                DbgRecord((arg,";------- TSPDRV_DISABLE_AMP ---------\n"));
            }
#endif
	    if (wake_lock_active(&tspdrv_wakelock))
		wake_unlock(&tspdrv_wakelock);

            break;

        case TSPDRV_GET_NUM_ACTUATORS:
            return NUM_ACTUATORS;

        case TSPDRV_SET_DBG_LEVEL:
            {
                long nDbgLevel;
                if (0 != copy_from_user((void *)&nDbgLevel, (const void __user *)arg, sizeof(long))) {
                    /* Error copying the data */
                    DbgOut((DBL_ERROR, "copy_from_user failed to copy debug level data.\n"));
                    return -1;
                }

                if (DBL_TEMP <= nDbgLevel &&  nDbgLevel <= DBL_OVERKILL) {
                    atomic_set(&g_nDebugLevel, nDbgLevel);
                } else {
                    DbgOut((DBL_ERROR, "Invalid debug level requested, ignored."));
                }

                break;
            }

        case TSPDRV_GET_DBG_LEVEL:
            return atomic_read(&g_nDebugLevel);

#ifdef VIBE_RUNTIME_RECORD
        case TSPDRV_SET_RUNTIME_RECORD_FLAG:
            {
                long nRecordFlag;
                if (0 != copy_from_user((void *)&nRecordFlag, (const void __user *)arg, sizeof(long))) {
                    /* Error copying the data */
                    DbgOut((DBL_ERROR, "copy_from_user failed to copy runtime record flag.\n"));
                    return -1;
                }

                atomic_set(&g_bRuntimeRecord, nRecordFlag);
                if (nRecordFlag) {
                    int i;
                    for (i=0; i<NUM_ACTUATORS; i++) { 
                        DbgRecorderReset((i));
                    }
                }
                break;
            }
        case TSPDRV_GET_RUNTIME_RECORD_FLAG:
            return atomic_read(&g_bRuntimeRecord);
        case TSPDRV_SET_RUNTIME_RECORD_BUF_SIZE:
            {
                long nRecorderBufSize;
                if (0 != copy_from_user((void *)&nRecorderBufSize, (const void __user *)arg, sizeof(long))) {
                    /* Error copying the data */
                    DbgOut((DBL_ERROR, "copy_from_user failed to copy recorder buffer size.\n"));
                    return -1;
                }

                if (0 == DbgSetRecordBufferSize(nRecorderBufSize)) {
                    DbgOut((DBL_ERROR, "DbgSetRecordBufferSize failed.\n"));
                    return -1;
                }
                break;
            }
        case TSPDRV_GET_RUNTIME_RECORD_BUF_SIZE:
            return DbgGetRecordBufferSize();
#endif

        case TSPDRV_SET_DEVICE_PARAMETER:
            {
                device_parameter deviceParam;

                if (0 != copy_from_user((void *)&deviceParam, (const void __user *)arg, sizeof(deviceParam)))
                {
                    /* Error copying the data */
                    DbgOut((DBL_ERROR, "tspdrv: copy_from_user failed to copy kernel parameter data.\n"));
                    return -1;
                }

                switch (deviceParam.nDeviceParamID)
                {
                    case VIBE_KP_CFG_UPDATE_RATE_MS:
                        /* Update the timer period */
                        g_nTimerPeriodMs = deviceParam.nDeviceParamValue;



#ifdef CONFIG_HIGH_RES_TIMERS
                        /* For devices using high resolution timer we need to update the ktime period value */
                        g_ktTimerPeriod = ktime_set(0, g_nTimerPeriodMs * 1000000);
#endif
                        break;

                    case VIBE_KP_CFG_FREQUENCY_PARAM1:
                    case VIBE_KP_CFG_FREQUENCY_PARAM2:
                    case VIBE_KP_CFG_FREQUENCY_PARAM3:
                    case VIBE_KP_CFG_FREQUENCY_PARAM4:
                    case VIBE_KP_CFG_FREQUENCY_PARAM5:
                    case VIBE_KP_CFG_FREQUENCY_PARAM6:
#if 0
                        if (0 > ImmVibeSPI_ForceOut_SetFrequency(deviceParam.nDeviceIndex, deviceParam.nDeviceParamID, deviceParam.nDeviceParamValue))
                        {
                            DbgOut((DBL_ERROR, "tspdrv: cannot set device frequency parameter.\n"));
                            return -1;
                        }
#endif
                        break;
                }
            }
        }
    return 0;
}
Beispiel #19
0
int mdp3_ctrl_init(struct msm_fb_data_type *mfd)
{
	struct device *dev = mfd->fbi->dev;
	struct msm_mdp_interface *mdp3_interface = &mfd->mdp;
	struct mdp3_session_data *mdp3_session = NULL;
	u32 intf_type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO;
	int rc;
	int splash_mismatch = 0;

	pr_debug("mdp3_ctrl_init\n");
	rc = mdp3_parse_dt_splash(mfd);
	if (rc)
		splash_mismatch = 1;

	mdp3_interface->on_fnc = mdp3_ctrl_on;
	mdp3_interface->off_fnc = mdp3_ctrl_off;
	mdp3_interface->do_histogram = NULL;
	mdp3_interface->cursor_update = NULL;
	mdp3_interface->dma_fnc = mdp3_ctrl_pan_display;
	mdp3_interface->ioctl_handler = mdp3_ctrl_ioctl_handler;
	mdp3_interface->kickoff_fnc = mdp3_ctrl_display_commit_kickoff;
	mdp3_interface->lut_update = mdp3_ctrl_lut_update;
	mdp3_interface->configure_panel = mdp3_update_panel_info;

	mdp3_session = kmalloc(sizeof(struct mdp3_session_data), GFP_KERNEL);
	if (!mdp3_session) {
		pr_err("fail to allocate mdp3 private data structure");
		return -ENOMEM;
	}
	memset(mdp3_session, 0, sizeof(struct mdp3_session_data));
	mutex_init(&mdp3_session->lock);
	INIT_WORK(&mdp3_session->clk_off_work, mdp3_dispatch_clk_off);
	INIT_WORK(&mdp3_session->dma_done_work, mdp3_dispatch_dma_done);
	atomic_set(&mdp3_session->vsync_countdown, 0);
	mutex_init(&mdp3_session->histo_lock);
	mdp3_session->dma = mdp3_get_dma_pipe(MDP3_DMA_CAP_ALL);
	if (!mdp3_session->dma) {
		rc = -ENODEV;
		goto init_done;
	}

	rc = mdp3_dma_init(mdp3_session->dma);
	if (rc) {
		pr_err("fail to init dma\n");
		goto init_done;
	}

	intf_type = mdp3_ctrl_get_intf_type(mfd);
	mdp3_session->intf = mdp3_get_display_intf(intf_type);
	if (!mdp3_session->intf) {
		rc = -ENODEV;
		goto init_done;
	}
	rc = mdp3_intf_init(mdp3_session->intf);
	if (rc) {
		pr_err("fail to init interface\n");
		goto init_done;
	}

	mdp3_session->dma->output_config.out_sel = intf_type;
	mdp3_session->mfd = mfd;
	mdp3_session->panel = dev_get_platdata(&mfd->pdev->dev);
	mdp3_session->status = mdp3_session->intf->active;
	mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
	mdp3_bufq_init(&mdp3_session->bufq_in);
	mdp3_bufq_init(&mdp3_session->bufq_out);
	mdp3_session->histo_status = 0;
	mdp3_session->lut_sel = 0;
	BLOCKING_INIT_NOTIFIER_HEAD(&mdp3_session->notifier_head);

	init_timer(&mdp3_session->vsync_timer);
	mdp3_session->vsync_timer.function = mdp3_vsync_timer_func;
	mdp3_session->vsync_timer.data = (u32)mdp3_session;
	mdp3_session->vsync_period = 1000 / mfd->panel_info->mipi.frame_rate;
	mfd->mdp.private1 = mdp3_session;
	init_completion(&mdp3_session->dma_completion);
	if (intf_type != MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
		mdp3_session->wait_for_dma_done = mdp3_wait_for_dma_done;

	rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group);
	if (rc) {
		pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
		goto init_done;
	}

	mdp3_session->vsync_event_sd = sysfs_get_dirent(dev->kobj.sd, NULL,
							"vsync_event");
	if (!mdp3_session->vsync_event_sd) {
		pr_err("vsync_event sysfs lookup failed\n");
		rc = -ENODEV;
		goto init_done;
	}

	rc = mdp3_create_sysfs_link(dev);
	if (rc)
		pr_warn("problem creating link to mdp sysfs\n");

	kobject_uevent(&dev->kobj, KOBJ_ADD);
	pr_debug("vsync kobject_uevent(KOBJ_ADD)\n");

	if (mdp3_get_cont_spash_en()) {
		mdp3_session->clk_on = 1;
		mdp3_session->in_splash_screen = 1;
		mdp3_ctrl_notifier_register(mdp3_session,
			&mdp3_session->mfd->mdp_sync_pt_data.notifier);
	}

	if (splash_mismatch) {
		pr_err("splash memory mismatch, stop splash\n");
		mdp3_ctrl_off(mfd);
	}

	mdp3_session->vsync_before_commit = true;
init_done:
	if (IS_ERR_VALUE(rc))
		kfree(mdp3_session);

	return rc;
}
int mdp4_dsi_cmd_off(struct platform_device *pdev)
{
	int ret = 0;
	int cndx = 0;
	struct msm_fb_data_type *mfd;
	struct vsycn_ctrl *vctrl;
	struct mdp4_overlay_pipe *pipe;
	struct vsync_update *vp;
	int undx;
	int need_wait, cnt;
	unsigned long flags;

	pr_debug("%s+: pid=%d\n", __func__, current->pid);

	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);

	vctrl = &vsync_ctrl_db[cndx];
	pipe = vctrl->base_pipe;
	if (pipe == NULL) {
		pr_err("%s: NO base pipe\n", __func__);
		return ret;
	}

	need_wait = 0;
	mutex_lock(&vctrl->update_lock);
	atomic_set(&vctrl->suspend, 1);

	complete_all(&vctrl->vsync_comp);

	pr_debug("%s: clk=%d pan=%d\n", __func__,
			vctrl->clk_enabled, vctrl->pan_display);
	if (vctrl->clk_enabled)
		need_wait = 1;
	mutex_unlock(&vctrl->update_lock);

	cnt = 0;
	if (need_wait) {
		while (vctrl->clk_enabled) {
			msleep(20);
			cnt++;
			if (cnt > 10)
				break;
		}
	}

	if (cnt > 10) {
		spin_lock_irqsave(&vctrl->spin_lock, flags);
		vctrl->clk_control = 0;
		vctrl->clk_enabled = 0;
		vctrl->expire_tick = 0;
		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
		mipi_dsi_clk_cfg(0);
		mdp_clk_ctrl(0);
		pr_err("%s: Error, SET_CLK_OFF by force\n", __func__);
	}

	/* sanity check, free pipes besides base layer */
	mdp4_overlay_unset_mixer(pipe->mixer_num);
	mdp4_mixer_stage_down(pipe, 1);
	mdp4_overlay_pipe_free(pipe);
	vctrl->base_pipe = NULL;

	undx =  vctrl->update_ndx;
	vp = &vctrl->vlist[undx];
	if (vp->update_cnt) {
		/*
		 * pipe's iommu will be freed at next overlay play
		 * and iommu_drop statistic will be increased by one
		 */
		vp->update_cnt = 0;     /* empty queue */
	}

	pr_debug("%s-:\n", __func__);
	return ret;
}
Beispiel #21
0
/*
 * USB callback handler for reading data
 */
static void iowarrior_callback(struct urb *urb)
{
	struct iowarrior *dev = urb->context;
	int intr_idx;
	int read_idx;
	int aux_idx;
	int offset;
	int status = urb->status;
	int retval;

	switch (status) {
	case 0:
		/* success */
		break;
	case -ECONNRESET:
	case -ENOENT:
	case -ESHUTDOWN:
		return;
	default:
		goto exit;
	}

	spin_lock(&dev->intr_idx_lock);
	intr_idx = atomic_read(&dev->intr_idx);
	/* aux_idx become previous intr_idx */
	aux_idx = (intr_idx == 0) ? (MAX_INTERRUPT_BUFFER - 1) : (intr_idx - 1);
	read_idx = atomic_read(&dev->read_idx);

	/* queue is not empty and it's interface 0 */
	if ((intr_idx != read_idx)
	    && (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0)) {
		/* + 1 for serial number */
		offset = aux_idx * (dev->report_size + 1);
		if (!memcmp
		    (dev->read_queue + offset, urb->transfer_buffer,
		     dev->report_size)) {
			/* equal values on interface 0 will be ignored */
			spin_unlock(&dev->intr_idx_lock);
			goto exit;
		}
	}

	/* aux_idx become next intr_idx */
	aux_idx = (intr_idx == (MAX_INTERRUPT_BUFFER - 1)) ? 0 : (intr_idx + 1);
	if (read_idx == aux_idx) {
		/* queue full, dropping oldest input */
		read_idx = (++read_idx == MAX_INTERRUPT_BUFFER) ? 0 : read_idx;
		atomic_set(&dev->read_idx, read_idx);
		atomic_set(&dev->overflow_flag, 1);
	}

	/* +1 for serial number */
	offset = intr_idx * (dev->report_size + 1);
	memcpy(dev->read_queue + offset, urb->transfer_buffer,
	       dev->report_size);
	*(dev->read_queue + offset + (dev->report_size)) = dev->serial_number++;

	atomic_set(&dev->intr_idx, aux_idx);
	spin_unlock(&dev->intr_idx_lock);
	/* tell the blocking read about the new data */
	wake_up_interruptible(&dev->read_wait);

exit:
	retval = usb_submit_urb(urb, GFP_ATOMIC);
	if (retval)
		dev_err(&dev->interface->dev, "%s - usb_submit_urb failed with result %d\n",
			__func__, retval);

}
PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
{
#if !defined(NO_HARDWARE)
	SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;

	
	if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0)
	{
		return PVRSRV_OK;
	}

	PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks"));

#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
#if defined(SYS_OMAP4_HAS_DVFS_FRAMEWORK)
	{
		struct gpu_platform_data *pdata;
		IMG_UINT32 max_freq_index;
		int res;

		pdata = (struct gpu_platform_data *)gpsPVRLDMDev->dev.platform_data;
		max_freq_index = psSysSpecData->ui32SGXFreqListSize - 2;

		
		if (psSysSpecData->ui32SGXFreqListIndex != max_freq_index)
		{
			PVR_ASSERT(pdata->device_scale != IMG_NULL);
			res = pdata->device_scale(&gpsPVRLDMDev->dev,
									  &gpsPVRLDMDev->dev,
									  psSysSpecData->pui32SGXFreqList[max_freq_index]);
			if (res == 0)
			{
				psSysSpecData->ui32SGXFreqListIndex = max_freq_index;
			}
			else if (res == -EBUSY)
			{
				PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Unable to scale SGX frequency (EBUSY)"));
				psSysSpecData->ui32SGXFreqListIndex = psSysSpecData->ui32SGXFreqListSize - 1;
			}
			else if (res < 0)
			{
				PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Unable to scale SGX frequency (%d)", res));
				psSysSpecData->ui32SGXFreqListIndex = psSysSpecData->ui32SGXFreqListSize - 1;
			}
		}
	}
#endif 
	{
		
		int res = pm_runtime_get_sync(&gpsPVRLDMDev->dev);
		if (res < 0)
		{
			PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: pm_runtime_get_sync failed (%d)", -res));
			return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
		}
	}
#endif 

	SysEnableSGXInterrupts(psSysData);

	
	atomic_set(&psSysSpecData->sSGXClocksEnabled, 1);

#else	
	PVR_UNREFERENCED_PARAMETER(psSysData);
#endif	
	return PVRSRV_OK;
}
struct net_device *alloc_ieee80211(int sizeof_priv)
{
	struct ieee80211_device *ieee;
	struct net_device *dev;
	int i,err;

	IEEE80211_DEBUG_INFO("Initializing...\n");

	dev = alloc_etherdev(sizeof(struct ieee80211_device) + sizeof_priv);
	if (!dev) {
		IEEE80211_ERROR("Unable to network device.\n");
		goto failed;
	}

	ieee = netdev_priv(dev);
	memset(ieee, 0, sizeof(struct ieee80211_device)+sizeof_priv);
	ieee->dev = dev;

	err = ieee80211_networks_allocate(ieee);
	if (err) {
		IEEE80211_ERROR("Unable to allocate beacon storage: %d\n",
				err);
		goto failed;
	}
	ieee80211_networks_initialize(ieee);


	/* Default fragmentation threshold is maximum payload size */
	ieee->fts = DEFAULT_FTS;
	ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
	ieee->open_wep = 1;

	/* Default to enabling full open WEP with host based encrypt/decrypt */
	ieee->host_encrypt = 1;
	ieee->host_decrypt = 1;
	ieee->ieee802_1x = 1; /* Default to supporting 802.1x */

	INIT_LIST_HEAD(&ieee->crypt_deinit_list);
	init_timer(&ieee->crypt_deinit_timer);
	ieee->crypt_deinit_timer.data = (unsigned long)ieee;
	ieee->crypt_deinit_timer.function = ieee80211_crypt_deinit_handler;

	spin_lock_init(&ieee->lock);
	spin_lock_init(&ieee->wpax_suitlist_lock);
	spin_lock_init(&ieee->bw_spinlock);
	spin_lock_init(&ieee->reorder_spinlock);
	//added by WB
	atomic_set(&(ieee->atm_chnlop), 0);
	atomic_set(&(ieee->atm_swbw), 0);

	ieee->wpax_type_set = 0;
 	ieee->wpa_enabled = 0;
 	ieee->tkip_countermeasures = 0;
 	ieee->drop_unencrypted = 0;
 	ieee->privacy_invoked = 0;
 	ieee->ieee802_1x = 1;
	ieee->raw_tx = 0;
	//ieee->hwsec_support = 1; //defalt support hw security. //use module_param instead.
	ieee->hwsec_active = 0; //disable hwsec, switch it on when necessary.

	ieee80211_softmac_init(ieee);

	ieee->pHTInfo = (RT_HIGH_THROUGHPUT*)kzalloc(sizeof(RT_HIGH_THROUGHPUT), GFP_KERNEL);
	if (ieee->pHTInfo == NULL)
	{
		IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for HTInfo\n");
		return NULL;
	}
	HTUpdateDefaultSetting(ieee);
	HTInitializeHTInfo(ieee); //may move to other place.
	TSInitialize(ieee);

	for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++)
		INIT_LIST_HEAD(&ieee->ibss_mac_hash[i]);

	for (i = 0; i < 17; i++) {
	  ieee->last_rxseq_num[i] = -1;
	  ieee->last_rxfrag_num[i] = -1;
	  ieee->last_packet_time[i] = 0;
	}

//These function were added to load crypte module autoly
	ieee80211_tkip_null();
	ieee80211_wep_null();
	ieee80211_ccmp_null();

	return dev;

 failed:
	if (dev)
		free_netdev(dev);

	return NULL;
}
IMG_VOID DisableSGXClocks(SYS_DATA *psSysData)
{
#if !defined(NO_HARDWARE)
	SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;

	
	if (atomic_read(&psSysSpecData->sSGXClocksEnabled) == 0)
	{
		return;
	}

	PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks"));

	SysDisableSGXInterrupts(psSysData);

#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
	{
		int res = pm_runtime_put_sync(&gpsPVRLDMDev->dev);
		if (res < 0)
		{
			PVR_DPF((PVR_DBG_ERROR, "DisableSGXClocks: pm_runtime_put_sync failed (%d)", -res));
		}
	}
#if defined(SYS_OMAP4_HAS_DVFS_FRAMEWORK)
	{
		struct gpu_platform_data *pdata;
		int res;

		pdata = (struct gpu_platform_data *)gpsPVRLDMDev->dev.platform_data;

		
		if (psSysSpecData->ui32SGXFreqListIndex != 0)
		{
			PVR_ASSERT(pdata->device_scale != IMG_NULL);
			res = pdata->device_scale(&gpsPVRLDMDev->dev,
									  &gpsPVRLDMDev->dev,
									  psSysSpecData->pui32SGXFreqList[0]);
			if (res == 0)
			{
				psSysSpecData->ui32SGXFreqListIndex = 0;
			}
			else if (res == -EBUSY)
			{
				PVR_DPF((PVR_DBG_WARNING, "DisableSGXClocks: Unable to scale SGX frequency (EBUSY)"));
				psSysSpecData->ui32SGXFreqListIndex = psSysSpecData->ui32SGXFreqListSize - 1;
			}
			else if (res < 0)
			{
				PVR_DPF((PVR_DBG_ERROR, "DisableSGXClocks: Unable to scale SGX frequency (%d)", res));
				psSysSpecData->ui32SGXFreqListIndex = psSysSpecData->ui32SGXFreqListSize - 1;
			}
		}
	}
#endif 
#endif 

	
	atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);

#else	
	PVR_UNREFERENCED_PARAMETER(psSysData);
#endif	
}
tEplKernel EplLinCbEvent(tEplApiEventType EventType_p,	// IN: event type (enum)
			 tEplApiEventArg *pEventArg_p,	// IN: event argument (union)
			 void *pUserArg_p)
{
	tEplKernel EplRet = kEplSuccessful;
	int iErr;

	// block any further call to this function, i.e. enter critical section
	iErr = down_interruptible(&SemaphoreCbEvent_g);
	if (iErr != 0) {	// waiting was interrupted by signal
		EplRet = kEplShutdown;
		goto Exit;
	}
	// wait for EplApiProcess() to call ioctl
	// normally it should be waiting already for us to pass a new event
	iErr = wait_event_interruptible(WaitQueueCbEvent_g,
					(atomic_read(&AtomicEventState_g) ==
					 EVENT_STATE_IOCTL)
					|| (atomic_read(&AtomicEventState_g) ==
					    EVENT_STATE_TERM));
	if ((iErr != 0) || (atomic_read(&AtomicEventState_g) == EVENT_STATE_TERM)) {	// waiting was interrupted by signal
		EplRet = kEplShutdown;
		goto LeaveCriticalSection;
	}
	// save event information for ioctl
	EventType_g = EventType_p;
	pEventArg_g = pEventArg_p;

	// pass control to application's event callback function, i.e. EplApiProcess()
	atomic_set(&AtomicEventState_g, EVENT_STATE_READY);
	wake_up_interruptible(&WaitQueueProcess_g);

	// now, the application's event callback function processes the event

	// wait for completion of application's event callback function, i.e. EplApiProcess() calls ioctl again
	iErr = wait_event_interruptible(WaitQueueCbEvent_g,
					(atomic_read(&AtomicEventState_g) ==
					 EVENT_STATE_IOCTL)
					|| (atomic_read(&AtomicEventState_g) ==
					    EVENT_STATE_TERM));
	if ((iErr != 0) || (atomic_read(&AtomicEventState_g) == EVENT_STATE_TERM)) {	// waiting was interrupted by signal
		EplRet = kEplShutdown;
		goto LeaveCriticalSection;
	}
	// read return code from application's event callback function
	EplRet = RetCbEvent_g;

      LeaveCriticalSection:
	up(&SemaphoreCbEvent_g);

      Exit:
	// check if NMT_GS_OFF is reached
	if (EventType_p == kEplApiEventNmtStateChange) {
		if (pEventArg_p->m_NmtStateChange.m_NewNmtState == kEplNmtGsOff) {	// NMT state machine was shut down
			TRACE0("EPL:   EplLinCbEvent(NMT_GS_OFF)\n");
			uiEplState_g = EPL_STATE_SHUTDOWN;
			atomic_set(&AtomicEventState_g, EVENT_STATE_TERM);
			wake_up(&WaitQueueRelease_g);
		} else {	// NMT state machine is running
			uiEplState_g = EPL_STATE_RUNNING;
		}
	}

	return EplRet;
}
Beispiel #26
0
static void insert_headset(struct work_struct *work)
{
	struct delayed_work *dwork = container_of(work, struct delayed_work, work);
	struct hsd_info *hi = container_of(dwork, struct hsd_info, work_for_insert);

	int earjack_type;

	int value = gpio_get_value_cansleep(hi->gpio_detect);

	if(value != EARJACK_INSERTED) {
		HSD_ERR("insert_headset but actually Fake inserted state!!\n");
		return;
	}
	else {
#ifdef HEADSET_REMOVE_ERROR
		insert_state_check = 1;
#endif
//		mutex_lock(&hi->mutex_lock);
//		switch_set_state(&hi->sdev, LGE_HEADSET);
//		mutex_unlock(&hi->mutex_lock);
	}

	HSD_DBG("insert_headset");

	if (hi->set_headset_mic_bias)
		hi->set_headset_mic_bias(TRUE);

	gpio_set_value_cansleep(hi->gpio_mic_en, 1);

	msleep(hi->latency_for_detection); // 75 -> 10 ms

	earjack_type = gpio_get_value_cansleep(hi->gpio_jpole);

	if (earjack_type == EARJACK_TYPE_3_POLE) {
		HSD_DBG("3 polarity earjack");

		if (hi->set_headset_mic_bias)
			hi->set_headset_mic_bias(FALSE);

		atomic_set(&hi->is_3_pole_or_not, 1);

		mutex_lock(&hi->mutex_lock);
		switch_set_state(&hi->sdev, LGE_HEADSET_NO_MIC);
		mutex_unlock(&hi->mutex_lock);

		input_report_switch(hi->input, SW_HEADPHONE_INSERT, 1);
		input_sync(hi->input);
	} else {
		HSD_DBG("4 polarity earjack");

		atomic_set(&hi->is_3_pole_or_not, 0);

		cancel_delayed_work_sync(&(hi->work_for_key_det_enable));
		queue_delayed_work(local_fsa8008_workqueue, &(hi->work_for_key_det_enable), FSA8008_KEY_EN_DELAY_MS );


		mutex_lock(&hi->mutex_lock);
		switch_set_state(&hi->sdev, LGE_HEADSET);
		mutex_unlock(&hi->mutex_lock);

		input_report_switch(hi->input, SW_HEADPHONE_INSERT, 1);
		input_sync(hi->input); // 2012-07-01, [email protected] - to prevent a lost uevent of earjack inserted
		input_report_switch(hi->input, SW_MICROPHONE_INSERT, 1);
		input_sync(hi->input);
	}
#ifdef HEADSET_REMOVE_ERROR
	insert_state_check = 0;
#endif

}
Beispiel #27
0
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
				    const struct ib_cq_init_attr *attr,
				    struct ib_ucontext *ib_context,
				    struct ib_udata *udata)
{
	int entries = attr->cqe;
	struct iwch_dev *rhp;
	struct iwch_cq *chp;
	struct iwch_create_cq_resp uresp;
	struct iwch_create_cq_req ureq;
	struct iwch_ucontext *ucontext = NULL;
	static int warned;
	size_t resplen;

	PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
	if (attr->flags)
		return ERR_PTR(-EINVAL);

	rhp = to_iwch_dev(ibdev);
	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
	if (!chp)
		return ERR_PTR(-ENOMEM);

	if (ib_context) {
		ucontext = to_iwch_ucontext(ib_context);
		if (!t3a_device(rhp)) {
			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
				kfree(chp);
				return ERR_PTR(-EFAULT);
			}
			chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
		}
	}

	if (t3a_device(rhp)) {

		/*
		 * T3A: Add some fluff to handle extra CQEs inserted
		 * for various errors.
		 * Additional CQE possibilities:
		 *      TERMINATE,
		 *      incoming RDMA WRITE Failures
		 *      incoming RDMA READ REQUEST FAILUREs
		 * NOTE: We cannot ensure the CQ won't overflow.
		 */
		entries += 16;
	}
	entries = roundup_pow_of_two(entries);
	chp->cq.size_log2 = ilog2(entries);

	if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
		kfree(chp);
		return ERR_PTR(-ENOMEM);
	}
	chp->rhp = rhp;
	chp->ibcq.cqe = 1 << chp->cq.size_log2;
	spin_lock_init(&chp->lock);
	spin_lock_init(&chp->comp_handler_lock);
	atomic_set(&chp->refcnt, 1);
	init_waitqueue_head(&chp->wait);
	if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
		cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
		kfree(chp);
		return ERR_PTR(-ENOMEM);
	}

	if (ucontext) {
		struct iwch_mm_entry *mm;

		mm = kmalloc(sizeof *mm, GFP_KERNEL);
		if (!mm) {
			iwch_destroy_cq(&chp->ibcq);
			return ERR_PTR(-ENOMEM);
		}
		uresp.cqid = chp->cq.cqid;
		uresp.size_log2 = chp->cq.size_log2;
		spin_lock(&ucontext->mmap_lock);
		uresp.key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		spin_unlock(&ucontext->mmap_lock);
		mm->key = uresp.key;
		mm->addr = virt_to_phys(chp->cq.queue);
		if (udata->outlen < sizeof uresp) {
			if (!warned++)
				printk(KERN_WARNING MOD "Warning - "
				       "downlevel libcxgb3 (non-fatal).\n");
			mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
					     sizeof(struct t3_cqe));
			resplen = sizeof(struct iwch_create_cq_resp_v0);
		} else {
			mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
					     sizeof(struct t3_cqe));
			uresp.memsize = mm->len;
			uresp.reserved = 0;
			resplen = sizeof uresp;
		}
		if (ib_copy_to_udata(udata, &uresp, resplen)) {
			kfree(mm);
			iwch_destroy_cq(&chp->ibcq);
			return ERR_PTR(-EFAULT);
		}
		insert_mmap(ucontext, mm);
	}
	PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
	     (unsigned long long) chp->cq.dma_addr);
	return &chp->ibcq;
}
Beispiel #28
0
static void insert_headset(struct hsd_info *hi)
{
	int earjack_type;
	static int insert_first=1;

	HSD_DBG("insert_headset");

	if (hi->set_headset_mic_bias) {
		if (insert_first == 0) {
			HSD_DBG("Execute the workaround codes...\n");
			/* Workaround code for increasing sleep current */
			hi->set_headset_mic_bias(TRUE);
			msleep(5);
			hi->set_headset_mic_bias(FALSE);
			msleep(5);
			hi->set_headset_mic_bias(TRUE);
			insert_first = 0;
		} else
			hi->set_headset_mic_bias(TRUE);
	}

	gpio_set_value_cansleep(hi->gpio_mic_en, 1);

	msleep(hi->latency_for_detection);

	earjack_type = gpio_get_value_cansleep(hi->gpio_jpole);

	if (earjack_type == 1) {
		HSD_DBG("3 polarity earjack");

		if (hi->set_headset_mic_bias)
			hi->set_headset_mic_bias(FALSE);

		atomic_set(&hi->is_3_pole_or_not, 1);

		mutex_lock(&hi->mutex_lock);
		switch_set_state(&hi->sdev, LGE_HEADSET_NO_MIC);
		mutex_unlock(&hi->mutex_lock);

		gpio_set_value_cansleep(hi->gpio_mic_en, 0);

		input_report_switch(hi->input, SW_HEADPHONE_INSERT, 1);
		input_sync(hi->input);
	} else {
		HSD_DBG("4 polarity earjack");

		atomic_set(&hi->is_3_pole_or_not, 0);

		mutex_lock(&hi->mutex_lock);
		switch_set_state(&hi->sdev, LGE_HEADSET);
		mutex_unlock(&hi->mutex_lock);

		if (!atomic_read(&hi->irq_key_enabled)) {
			unsigned long irq_flags;

			local_irq_save(irq_flags);
			enable_irq(hi->irq_key);
			local_irq_restore(irq_flags);

			atomic_set(&hi->irq_key_enabled, TRUE);
		}
		input_report_switch(hi->input, SW_HEADPHONE_INSERT, 1);
		input_report_switch(hi->input, SW_MICROPHONE_INSERT, 1);
		input_sync(hi->input);
	}

}
int shrm_protocol_init(struct shrm_dev *shrm,
			received_msg_handler common_rx_handler,
			received_msg_handler audio_rx_handler)
{
	int err;

	shm_dev = shrm;
	boot_state = BOOT_INIT;
	dev_info(shrm->dev, "IPC_ISA BOOT_INIT\n");
	rx_common_handler = common_rx_handler;
	rx_audio_handler = audio_rx_handler;
	atomic_set(&ac_sleep_disable_count, 0);

	is_earlydrop = cpu_is_u8500ed();
	if (is_earlydrop != 0x01) {
		hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
		timer.function = callback;
	}

	hrtimer_init(&mod_stuck_timer_0, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	mod_stuck_timer_0.function = shm_mod_stuck_timeout;
	hrtimer_init(&mod_stuck_timer_1, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	mod_stuck_timer_1.function = shm_mod_stuck_timeout;
	hrtimer_init(&fifo_full_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	fifo_full_timer.function = shm_fifo_full_timeout;

	shrm->shm_common_ch_wr_wq = create_singlethread_workqueue
		("shm_common_channel_irq");
	if (!shrm->shm_common_ch_wr_wq) {
		dev_err(shrm->dev, "failed to create work queue\n");
		return -ENOMEM;
	}
	shrm->shm_audio_ch_wr_wq = create_rt_workqueue
	  ("shm_audio_channel_irq");
	if (!shrm->shm_audio_ch_wr_wq) {
		dev_err(shrm->dev, "failed to create work queue\n");
		err = -ENOMEM;
		goto free_wq1;
	}
	shrm->shm_ac_wake_wq = create_rt_workqueue("shm_ac_wake_req");
	if (!shrm->shm_ac_wake_wq) {
		dev_err(shrm->dev, "failed to create work queue\n");
		err = -ENOMEM;
		goto free_wq2;
	}
	shrm->shm_ca_wake_wq = create_rt_workqueue("shm_ca_wake_req");
	if (!shrm->shm_ca_wake_wq) {
		dev_err(shrm->dev, "failed to create work queue\n");
		err = -ENOMEM;
		goto free_wq3;
	}
	shrm->shm_ac_sleep_wq = create_singlethread_workqueue
						("shm_ac_sleep_req");
	if (!shrm->shm_ac_sleep_wq) {
		dev_err(shrm->dev, "failed to create work queue\n");
		err = -ENOMEM;
		goto free_wq4;
	}
	shrm->shm_mod_stuck_wq = create_rt_workqueue("shm_mod_reset_req");
	if (!shrm->shm_mod_stuck_wq) {
		dev_err(shrm->dev, "failed to create work queue\n");
		err = -ENOMEM;
		goto free_wq5;
	}
	INIT_WORK(&shrm->send_ac_msg_pend_notify_0,
			send_ac_msg_pend_notify_0_work);
	INIT_WORK(&shrm->send_ac_msg_pend_notify_1,
			send_ac_msg_pend_notify_1_work);
	INIT_WORK(&shrm->shm_ca_wake_req, shm_ca_wake_req_work);
	INIT_WORK(&shrm->shm_ca_sleep_req, shm_ca_sleep_req_work);
	INIT_WORK(&shrm->shm_ac_sleep_req, shm_ac_sleep_req_work);
	INIT_WORK(&shrm->shm_ac_wake_req, shm_ac_wake_req_work);
	INIT_WORK(&shrm->shm_mod_reset_req, shm_mod_reset_work);

	/* set tasklet data */
	shm_ca_0_tasklet.data = (unsigned long)shrm;
	shm_ca_1_tasklet.data = (unsigned long)shrm;

	err = request_irq(IRQ_PRCMU_CA_SLEEP, shrm_prcmu_irq_handler,
			IRQF_NO_SUSPEND, "ca-sleep", shrm);
	if (err < 0) {
		dev_err(shm_dev->dev, "Failed alloc IRQ_PRCMU_CA_SLEEP.\n");
		goto free_wq6;
	}

	err = request_irq(IRQ_PRCMU_CA_WAKE, shrm_prcmu_irq_handler,
		IRQF_NO_SUSPEND, "ca-wake", shrm);
	if (err < 0) {
		dev_err(shm_dev->dev, "Failed alloc IRQ_PRCMU_CA_WAKE.\n");
		goto drop2;
	}

	err = request_irq(IRQ_PRCMU_MODEM_SW_RESET_REQ, shrm_prcmu_irq_handler,
			IRQF_NO_SUSPEND, "modem-sw-reset-req", shrm);
	if (err < 0) {
		dev_err(shm_dev->dev,
				"Failed alloc IRQ_PRCMU_MODEM_SW_RESET_REQ.\n");
		goto drop1;
	}

#ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET
	/* init netlink socket for user-space communication */
	shrm_nl_sk = netlink_kernel_create(NULL, NETLINK_SHRM, 1,
			shm_nl_receive, NULL, THIS_MODULE);

	if (!shrm_nl_sk) {
		dev_err(shm_dev->dev, "netlink socket creation failed\n");
		goto drop;
	}
#endif

	return 0;

#ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET
drop:
	free_irq(IRQ_PRCMU_MODEM_SW_RESET_REQ, NULL);
#endif
drop1:
	free_irq(IRQ_PRCMU_CA_WAKE, NULL);
drop2:
	free_irq(IRQ_PRCMU_CA_SLEEP, NULL);
free_wq6:
	destroy_workqueue(shrm->shm_mod_stuck_wq);
free_wq5:
	destroy_workqueue(shrm->shm_ac_sleep_wq);
free_wq4:
	destroy_workqueue(shrm->shm_ca_wake_wq);
free_wq3:
	destroy_workqueue(shrm->shm_ac_wake_wq);
free_wq2:
	destroy_workqueue(shrm->shm_audio_ch_wr_wq);
free_wq1:
	destroy_workqueue(shrm->shm_common_ch_wr_wq);
	return err;
}
Beispiel #30
0
static void __init synchronize_tsc_bp (void)
{
	int i;
	unsigned long long t0;
	unsigned long long sum, avg;
	long long delta;
	long one_usec;
	int buggy = 0;

	printk(KERN_INFO "checking TSC synchronization across %u CPUs: ",num_booting_cpus());

	one_usec = cpu_khz; 

	atomic_set(&tsc_start_flag, 1);
	wmb();

	/*
	 * We loop a few times to get a primed instruction cache,
	 * then the last pass is more or less synchronized and
	 * the BP and APs set their cycle counters to zero all at
	 * once. This reduces the chance of having random offsets
	 * between the processors, and guarantees that the maximum
	 * delay between the cycle counters is never bigger than
	 * the latency of information-passing (cachelines) between
	 * two CPUs.
	 */
	for (i = 0; i < NR_LOOPS; i++) {
		/*
		 * all APs synchronize but they loop on '== num_cpus'
		 */
		while (atomic_read(&tsc_count_start) != num_booting_cpus()-1) mb();
		atomic_set(&tsc_count_stop, 0);
		wmb();
		/*
		 * this lets the APs save their current TSC:
		 */
		atomic_inc(&tsc_count_start);

		sync_core();
		rdtscll(tsc_values[smp_processor_id()]);
		/*
		 * We clear the TSC in the last loop:
		 */
		if (i == NR_LOOPS-1)
			write_tsc(0, 0);

		/*
		 * Wait for all APs to leave the synchronization point:
		 */
		while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1) mb();
		atomic_set(&tsc_count_start, 0);
		wmb();
		atomic_inc(&tsc_count_stop);
	}

	sum = 0;
	for (i = 0; i < NR_CPUS; i++) {
		if (cpu_isset(i, cpu_callout_map)) {
		t0 = tsc_values[i];
		sum += t0;
	}
	}
	avg = sum / num_booting_cpus();

	sum = 0;
	for (i = 0; i < NR_CPUS; i++) {
		if (!cpu_isset(i, cpu_callout_map))
			continue;

		delta = tsc_values[i] - avg;
		if (delta < 0)
			delta = -delta;
		/*
		 * We report bigger than 2 microseconds clock differences.
		 */
		if (delta > 2*one_usec) {
			long realdelta;
			if (!buggy) {
				buggy = 1;
				printk("\n");
			}
			realdelta = delta / one_usec;
			if (tsc_values[i] < avg)
				realdelta = -realdelta;

			printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n",
				i, realdelta);
		}

		sum += delta;
	}
	if (!buggy)
		printk("passed.\n");
}