static int kwdt_thread(void *arg)
{

	struct sched_param param = {.sched_priority = RTPM_PRIO_WDT };
	struct rtc_time tm;
	struct timeval tv = { 0 };
	/* android time */
	struct rtc_time tm_android;
	struct timeval tv_android = { 0 };
	int cpu = 0;
	int local_bit = 0, loc_need_config = 0, loc_timeout = 0;
	struct wd_api *loc_wk_wdt = NULL;

	sched_setscheduler(current, SCHED_FIFO, &param);
	set_current_state(TASK_INTERRUPTIBLE);

	for (;;) {

		if (kthread_should_stop())
			break;
		spin_lock(&lock);
		cpu = smp_processor_id();
		loc_wk_wdt = g_wd_api;
		loc_need_config = g_need_config;
		loc_timeout = g_timeout;
		spin_unlock(&lock);
		/* printk("fwq loc_wk_wdt(%x),loc_wk_wdt->ready(%d)\n",loc_wk_wdt ,loc_wk_wdt->ready); */
		if (loc_wk_wdt && loc_wk_wdt->ready && g_enable) {
			if (loc_need_config) {
				/* daul  mode */
				loc_wk_wdt->wd_config(WDT_DUAL_MODE, loc_timeout);
				spin_lock(&lock);
				g_need_config = 0;
				spin_unlock(&lock);
			}
			/* printk("[WDK]  cpu-task=%d, current_pid=%d\n",  wk_tsk[cpu]->pid,  current->pid); */
			if (wk_tsk[cpu]->pid == current->pid) {
				/* only process WDT info if thread-x is on cpu-x */
				spin_lock(&lock);
				local_bit = kick_bit;
				printk_deferred("[WDK], local_bit:0x%x, cpu:%d,RT[%lld]\n", local_bit,
					     cpu, sched_clock());
				if ((local_bit & (1 << cpu)) == 0) {
					/* printk("[WDK]: set  WDT kick_bit\n"); */
					local_bit |= (1 << cpu);
					/* aee_rr_rec_wdk_kick_jiffies(jiffies); */
				}
				printk_deferred("[WDK], local_bit:0x%x, cpu:%d, check bit0x:%x,RT[%lld]\n",
				     local_bit, cpu, wk_check_kick_bit(), sched_clock());
				if (local_bit == wk_check_kick_bit()) {
					printk_deferred("[WDK]: kick Ex WDT,RT[%lld]\n",
						     sched_clock());
					mtk_wdt_restart(WD_TYPE_NORMAL);	/* for KICK external wdt */
					local_bit = 0;
				}
				kick_bit = local_bit;
				spin_unlock(&lock);

#ifdef CONFIG_LOCAL_WDT
				printk_deferred("[WDK]: cpu:%d, kick local wdt,RT[%lld]\n", cpu,
					     sched_clock());
				/* kick local wdt */
				mpcore_wdt_restart(WD_TYPE_NORMAL);
#endif
			}
		} else if (0 == g_enable) {
			printk("WDK stop to kick\n");
		} else {
			errmsg("No watch dog driver is hooked\n");
			BUG();
		}

		if (wk_tsk[cpu]->pid == current->pid) {
#if (DEBUG_WDK == 1)
			msleep(debug_sleep * 1000);
			dbgmsg("WD kicker woke up %d\n", debug_sleep);
#endif
			do_gettimeofday(&tv);
			tv_android = tv;
			rtc_time_to_tm(tv.tv_sec, &tm);
			tv_android.tv_sec -= sys_tz.tz_minuteswest * 60;
			rtc_time_to_tm(tv_android.tv_sec, &tm_android);
			printk_deferred("[thread:%d][RT:%lld] %d-%02d-%02d %02d:%02d:%02d.%u UTC; android time %d-%02d-%02d %02d:%02d:%02d.%03d\n",
			     current->pid, sched_clock(), tm.tm_year + 1900, tm.tm_mon + 1,
			     tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, (unsigned int)tv.tv_usec,
			     tm_android.tm_year + 1900, tm_android.tm_mon + 1, tm_android.tm_mday,
			     tm_android.tm_hour, tm_android.tm_min, tm_android.tm_sec,
			     (unsigned int)tv_android.tv_usec);
		}

		msleep((g_kinterval) * 1000);

#ifdef CONFIG_MTK_AEE_POWERKEY_HANG_DETECT
		if ((cpu == 0) && (wk_tsk[cpu]->pid == current->pid))	/* only effect at cpu0 */
		{
			if (aee_kernel_wdt_kick_api(g_kinterval) == WDT_PWK_HANG_FORCE_HWT) {
				printk_deferred("power key trigger HWT\n");
				cpus_kick_bit = 0xFFFF;	/* Try to force to HWT */
			}
		}
#endif
	}
	printk("[WDK] WDT kicker thread stop, cpu:%d, pid:%d\n", cpu, current->pid);
	return 0;
}
Exemplo n.º 2
0
static int stheno_request_thread( void *arg )
{
#if defined( USE_KMALLOC )
    struct euryale_block *euryale_block;
#else
    /* kernel stack size has 1024 bytes(?) */
    /*struct euryale_block euryale_block[MAX_SECTORS+1];*/
#endif
    unsigned long sector;
    unsigned long nr;
    struct request *req;
    struct bio_vec *bvec;
    struct req_iterator iter;
    int ret;
    int i;

#if defined( USE_KMALLOC )
    euryale_block = (struct euryale_block*)kmalloc( sizeof( struct euryale_block ) * (MAX_SECTORS + 1), GFP_KERNEL );
    if( euryale_block == NULL ){
        print_error( "stheno kmalloc failed.\n" );
    }
#endif
    while( 1 ){
        ret = wait_event_interruptible( stheno_wait_q, (kthread_should_stop() || stheno_wakeup == 1) );
        if( ret != 0 ) break;

        stheno_wakeup = 0;

        if( kthread_should_stop() ) break;

        wake_lock( &stheno_wakelock );

        while( 1 ){
            spin_lock_irq( stheno_queue->queue_lock );
            req = blk_fetch_request( stheno_queue );
            spin_unlock_irq( stheno_queue->queue_lock );
            if( req == NULL ) break;

#if 0 /* S Mod 2011.12.27 For ICS */
            if( !blk_fs_request( req ) ){
#else
            if( !(req->cmd_type == REQ_TYPE_FS) ){
#endif /* S Mod 2011.12.27 For ICS */
                ret = -EIO;
                goto skip;
            }
            if( stheno_read_sector0() != 0 ){
                ret = -EIO;
                goto skip;
            }
            sector = blk_rq_pos( req );
            nr = blk_rq_sectors( req );

/* S Add 2012.02.09 For ICS */
            if( nr == 0 ){
                ret = -EIO;
                goto skip;
            }
/* E Add 2012.02.09 For ICS */

#if defined( USE_KMALLOC )
            if( euryale_buffer == NULL ){
                ret = -EIO;
                goto skip;
            }
#endif
            i = 0;
            rq_for_each_segment( bvec, req, iter ){
                if( i >= MAX_SECTORS ){
                    print_error( "stheno euryale_block overrun error.\n" );
                    ret = -EIO;
                    goto skip;
                }
                euryale_block[i].buffer = page_address(bvec->bv_page) + bvec->bv_offset;
                euryale_block[i].length = bvec->bv_len;
                ++i;
            }
            euryale_block[i].buffer = 0; /* end of buffer */

            if( rq_data_dir( req ) == 0 ){
                ret = euryale_api_blockread( stheno_lbaoffset + sector, nr, euryale_block );
                print_debug( "stheno euryale_api_blockread sec=%ld nr=%ld ret=%d.\n", stheno_lbaoffset + sector, nr, ret );
            }else{
                ret = euryale_api_blockwrite( stheno_lbaoffset + sector, nr, euryale_block );
                print_debug( "stheno euryale_api_blockwrite sec=%ld nr=%ld ret=%d.\n", stheno_lbaoffset + sector, nr, ret );
            }
        skip:
            spin_lock_irq( stheno_queue->queue_lock );
            __blk_end_request_all( req, ret == 0 ? 0 : -EIO );
            /*__blk_end_request( req, ret == 0 ? 0 : -EIO, blk_rq_bytes( req ) );*/
            spin_unlock_irq( stheno_queue->queue_lock );
            /*print_debug( "stheno blk_end_request called.(ret=%d)\n", ret );*/
        }
        wake_unlock( &stheno_wakelock );
        /*print_debug( "stheno end of request.\n" );*/
    }
    print_debug("stheno_request_thread was terminated.\n");
#if defined( USE_KMALLOC )
    if( euryale_block != NULL ) kfree( euryale_block );
#endif
    return 0;
}
#endif

static void stheno_request( struct request_queue *q )
{
    /* caution : should be atomic procedure */
    stheno_wakeup = 1;
    wake_up_interruptible( &stheno_wait_q );
}
static int ps3_probe_thread(void *data)
{
	struct ps3_notification_device dev;
	int res;
	unsigned int irq;
	u64 lpar;
	void *buf;
	struct ps3_notify_cmd *notify_cmd;
	struct ps3_notify_event *notify_event;

	pr_debug(" -> %s:%u: kthread started\n", __func__, __LINE__);

	buf = kzalloc(512, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	lpar = ps3_mm_phys_to_lpar(__pa(buf));
	notify_cmd = buf;
	notify_event = buf;

	/* dummy system bus device */
	dev.sbd.bus_id = (u64)data;
	dev.sbd.dev_id = PS3_NOTIFICATION_DEV_ID;
	dev.sbd.interrupt_id = PS3_NOTIFICATION_INTERRUPT_ID;

	res = lv1_open_device(dev.sbd.bus_id, dev.sbd.dev_id, 0);
	if (res) {
		pr_err("%s:%u: lv1_open_device failed %s\n", __func__,
		       __LINE__, ps3_result(res));
		goto fail_free;
	}

	res = ps3_sb_event_receive_port_setup(&dev.sbd, PS3_BINDING_CPU_ANY,
					      &irq);
	if (res) {
		pr_err("%s:%u: ps3_sb_event_receive_port_setup failed %d\n",
		       __func__, __LINE__, res);
	       goto fail_close_device;
	}

	spin_lock_init(&dev.lock);

	res = request_irq(irq, ps3_notification_interrupt, IRQF_DISABLED,
			  "ps3_notification", &dev);
	if (res) {
		pr_err("%s:%u: request_irq failed %d\n", __func__, __LINE__,
		       res);
		goto fail_sb_event_receive_port_destroy;
	}

	/* Setup and write the request for device notification. */
	notify_cmd->operation_code = 0; /* must be zero */
	notify_cmd->event_mask = 1UL << notify_region_probe;

	res = ps3_notification_read_write(&dev, lpar, 1);
	if (res)
		goto fail_free_irq;

	/* Loop here processing the requested notification events. */
	do {
		try_to_freeze();

		memset(notify_event, 0, sizeof(*notify_event));

		res = ps3_notification_read_write(&dev, lpar, 0);
		if (res)
			break;

		pr_debug("%s:%u: notify event type 0x%llx bus id %llu dev id %llu"
			 " type %llu port %llu\n", __func__, __LINE__,
			 notify_event->event_type, notify_event->bus_id,
			 notify_event->dev_id, notify_event->dev_type,
			 notify_event->dev_port);

		if (notify_event->event_type != notify_region_probe ||
		    notify_event->bus_id != dev.sbd.bus_id) {
			pr_warning("%s:%u: bad notify_event: event %llu, "
				   "dev_id %llu, dev_type %llu\n",
				   __func__, __LINE__, notify_event->event_type,
				   notify_event->dev_id,
				   notify_event->dev_type);
			continue;
		}

		ps3_find_and_add_device(dev.sbd.bus_id, notify_event->dev_id);

	} while (!kthread_should_stop());

fail_free_irq:
	free_irq(irq, &dev);
fail_sb_event_receive_port_destroy:
	ps3_sb_event_receive_port_destroy(&dev.sbd, irq);
fail_close_device:
	lv1_close_device(dev.sbd.bus_id, dev.sbd.dev_id);
fail_free:
	kfree(buf);

	probe_task = NULL;

	pr_debug(" <- %s:%u: kthread finished\n", __func__, __LINE__);

	return 0;
}
Exemplo n.º 4
0
static int lge_dm_tty_read_thread(void *data)
{
	int i = 0;
	struct dm_tty *lge_dm_tty_drv = NULL;
	int copy_data = 0;
    unsigned long flags;

	lge_dm_tty_drv = lge_dm_tty;

	while (1) {

		wait_event_interruptible(lge_dm_tty->waitq,
			lge_dm_tty->set_logging);

		mutex_lock(&driver->diagchar_mutex);

		if ((lge_dm_tty->set_logging == 1)
				&& (driver->logging_mode == DM_APP_MODE)) {

			/* copy modem data */	
			for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++) {
				struct diag_smd_info *data = &driver->smd_data[i];

				if (data->in_busy_1 == 1) {
					lge_dm_tty_modem_response(
					lge_dm_tty_drv,
					data->buf_in_1,
					data->write_ptr_1->length);

					diag_ws_on_copy();
					copy_data = 1;

                    spin_lock_irqsave(&data->in_busy_lock, flags);
					data->in_busy_1 = 0;
                    spin_unlock_irqrestore(&data->in_busy_lock, flags);
				}

				if (data->in_busy_2 == 1) {
					lge_dm_tty_modem_response(
					lge_dm_tty_drv,
					data->buf_in_2,
					data->write_ptr_2->length);

					diag_ws_on_copy();
					copy_data = 1;

                    spin_lock_irqsave(&data->in_busy_lock, flags);
					data->in_busy_2 = 0;
                    spin_unlock_irqrestore(&data->in_busy_lock, flags);
				}
			}

			if(lge_dm_tty_drv->logging_mode == DM_APP_SDM)
			{
				for (i = 0; i < NUM_SMD_CMD_CHANNELS; i++) {
					struct diag_smd_info *cmd = &driver->smd_cmd[i];
						if (cmd->in_busy_1 == 1) {
							if(cmd->write_ptr_1->length > 0 && cmd->buf_in_1 != NULL){
								lge_dm_tty_modem_response(
								lge_dm_tty_drv,
								cmd->buf_in_1,
								cmd->write_ptr_1->length);
							}

							cmd->in_busy_1 = 0;
						}

				}
			}

			lge_dm_tty->set_logging = 0;

			for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++) {
				if (driver->smd_data[i].ch)
					queue_work(driver->diag_wq,
					&(driver->smd_data[i].diag_read_smd_work));
			}

		}

		mutex_unlock(&driver->diagchar_mutex);
		if (copy_data) {
			/*
			 * Flush any work that is currently pending on the data
			 * channels. This will ensure that the next read is not
			 * missed.
			 */
			for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++)
				flush_workqueue(driver->smd_data[i].wq);
			wake_up(&driver->smd_wait_q);
			diag_ws_on_copy_complete();
		}

		if (kthread_should_stop())
			break;
		mdelay(1);

	}

	return 0;

}
static int VibeOSKernelTimerProc(void* data)
{
	int nActuatorNotPlaying;
	int i;
	int bReachEndBuffer = 0;

	while (!kthread_should_stop()) {
		if (g_bTimerThreadStarted) {
			/* Block until we get woken up by timer tick */
			/* . only do this if we're not exiting entirely */
			wait_for_completion(&g_tspCompletion);

			/* Reinitialized completion so it isn't free by default */
			init_completion(&g_tspCompletion);
		}

		nActuatorNotPlaying = 0;

		/* Return right away if timer is not supposed to run */
		if (g_bTimerStarted) {
			for (i = 0; i < NUM_ACTUATORS; i++) {
				actuator_samples_buffer *pCurrentActuatorSample = &(g_SamplesBuffer[i]);

				if (-1 == pCurrentActuatorSample->nIndexPlayingBuffer) {
					nActuatorNotPlaying++;
					if ((NUM_ACTUATORS == nActuatorNotPlaying) && ((++g_nWatchdogCounter) > WATCHDOG_TIMEOUT)) {
						/*
						Nothing to play for all actuators,
						turn off the timer when we reach the watchdog tick count limit
						*/
						ImmVibeSPI_ForceOut_Set(i, 0);
						ImmVibeSPI_ForceOut_AmpDisable(i);
						VibeOSKernelLinuxStopTimer();
						/* Reset watchdog counter */
						g_nWatchdogCounter = 0;
					}
				} else {
					/* Play the current buffer */
					ImmVibeSPI_ForceOut_Set(i, pCurrentActuatorSample->actuatorSamples[(int)pCurrentActuatorSample->nIndexPlayingBuffer].dataBuffer[(int)(pCurrentActuatorSample->nIndexOutputValue++)]);

					if (pCurrentActuatorSample->nIndexOutputValue >= pCurrentActuatorSample->actuatorSamples[(int)pCurrentActuatorSample->nIndexPlayingBuffer].nBufferSize) {
						/* We were playing in the last tick */
						/* Reach the end of the current buffer */
						pCurrentActuatorSample->actuatorSamples[(int)pCurrentActuatorSample->nIndexPlayingBuffer].nBufferSize = 0;

						bReachEndBuffer = 1;

						/* Check stop request and empty buffer */
						if ((g_bStopRequested) || (0 == (pCurrentActuatorSample->actuatorSamples[(int)((pCurrentActuatorSample->nIndexPlayingBuffer) ^ 1)].nBufferSize))) {
							pCurrentActuatorSample->nIndexPlayingBuffer = -1;

							if (g_bStopRequested) {
								/* g_bStopReqested is set, so turn off all actuators */
								ImmVibeSPI_ForceOut_Set(i, 0);
								ImmVibeSPI_ForceOut_AmpDisable(i);

								/* If it's the last actuator, stop the timer */
								if (i == (NUM_ACTUATORS-1)) {
									VibeOSKernelLinuxStopTimer();

									/* Reset watchdog counter */
									g_nWatchdogCounter = 0;
								}
							}
						} else { /* The other buffer has data in it */
							/* Switch buffer */
							(pCurrentActuatorSample->nIndexPlayingBuffer) ^= 1;
							pCurrentActuatorSample->nIndexOutputValue = 0;
						}
					}
				}
			}
			/* Release the mutex if locked */
			if (bReachEndBuffer && VibeSemIsLocked(&g_hMutex)) {
				up(&g_hMutex);
			}
		}
	}
	return 0;
}
Exemplo n.º 6
0
Arquivo: cd_srv.c Projeto: GoodOkk/tfs
static int csrv_thread_routine(void *data)
{
	struct socket *lsock = NULL;
	struct socket *con_sock = NULL;
	struct csrv_con *con = NULL;
	int error = 0;

	while (!kthread_should_stop()) {
		if (!csrv_sock) {
			error = ksock_listen(&lsock, INADDR_ANY, 9111, 5);
			if (error) {
				klog(KL_ERR, "csock_listen err=%d", error);
				msleep_interruptible(LISTEN_RESTART_TIMEOUT_MS);
				continue;
			} else {
				mutex_lock(&csrv_lock);
				csrv_sock = lsock;
				mutex_unlock(&csrv_lock);
			}
		}

		if (csrv_sock && !csrv_stopping) {
			klog(KL_DEBUG, "accepting");
			error = ksock_accept(&con_sock, csrv_sock);
			if (error) {
				if (error == -EAGAIN)
					klog(KL_WARN, "csock_accept err=%d", error);
				else
					klog(KL_ERR, "csock_accept err=%d", error);
				continue;
			}
			klog(KL_DEBUG, "accepted con_sock=%p", con_sock);

			if (!csrv_con_start(con_sock)) {
				klog(KL_ERR, "csrv_con_start failed");
				ksock_release(con_sock);
				continue;
			}
		}
	}

	error = 0;
	klog(KL_INFO, "releasing listen socket");
	
	mutex_lock(&csrv_lock);
	lsock = csrv_sock;
	csrv_sock = NULL;
	mutex_unlock(&csrv_lock);

	if (lsock)
		ksock_release(lsock);
	
	klog(KL_INFO, "releasing cons");

	for (;;) {
		con = NULL;
		mutex_lock(&con_list_lock);
		if (!list_empty(&con_list)) {
			con = list_first_entry(&con_list, struct csrv_con, con_list);
			list_del_init(&con->con_list);		
		}
		mutex_unlock(&con_list_lock);
		if (!con)
			break;

		csrv_con_wait(con);
		csrv_con_free(con);
	}
Exemplo n.º 7
0
/* Main loop of the progression thread: keep eating from the buffer
 * and push to the serial port, waiting as needed
 *
 * For devices that have a "full" notification mechanism, the driver can
 * adapt the loop the way they prefer.
 */
void spk_do_catch_up(struct spk_synth *synth)
{
	u_char ch;
	unsigned long flags;
	unsigned long jiff_max;
	struct var_t *delay_time;
	struct var_t *full_time;
	struct var_t *jiffy_delta;
	int jiffy_delta_val;
	int delay_time_val;
	int full_time_val;

	jiffy_delta = get_var(JIFFY);
	full_time = get_var(FULL);
	delay_time = get_var(DELAY);

	spk_lock(flags);
	jiffy_delta_val = jiffy_delta->u.n.value;
	spk_unlock(flags);

	jiff_max = jiffies + jiffy_delta_val;
	while (!kthread_should_stop()) {
		spk_lock(flags);
		if (speakup_info.flushing) {
			speakup_info.flushing = 0;
			spk_unlock(flags);
			synth->flush(synth);
			continue;
		}
		if (synth_buffer_empty()) {
			spk_unlock(flags);
			break;
		}
		ch = synth_buffer_peek();
		set_current_state(TASK_INTERRUPTIBLE);
		full_time_val = full_time->u.n.value;
		spk_unlock(flags);
		if (ch == '\n')
			ch = synth->procspeech;
		if (!spk_serial_out(ch)) {
			schedule_timeout(msecs_to_jiffies(full_time_val));
			continue;
		}
		if ((jiffies >= jiff_max) && (ch == SPACE)) {
			spk_lock(flags);
			jiffy_delta_val = jiffy_delta->u.n.value;
			delay_time_val = delay_time->u.n.value;
			full_time_val = full_time->u.n.value;
			spk_unlock(flags);
			if (spk_serial_out(synth->procspeech))
				schedule_timeout(
					msecs_to_jiffies(delay_time_val));
			else
				schedule_timeout(
					msecs_to_jiffies(full_time_val));
			jiff_max = jiffies + jiffy_delta_val;
		}
		set_current_state(TASK_RUNNING);
		spk_lock(flags);
		synth_buffer_getc();
		spk_unlock(flags);
	}
	spk_serial_out(synth->procspeech);
}
static int lge_dm_tty_read_thread(void *data)
{
	int i = 0;
	struct dm_tty *lge_dm_tty_drv = NULL;
	int clear_read_wakelock;

	lge_dm_tty_drv = lge_dm_tty;

	/* make common header */
	dm_modem_response_header->dm_router_cmd = DM_APP_MODEM_RESPONSE;
	dm_modem_response_header->dm_router_type = DM_APP_NOTICE;

	while (1) {

		wait_event_interruptible(lge_dm_tty->waitq,
			lge_dm_tty->set_logging);

		mutex_lock(&driver->diagchar_mutex);

		clear_read_wakelock = 0;
		if ((lge_dm_tty->set_logging == 1)
				&& (driver->logging_mode == DM_APP_MODE)) {

			/* copy android data */
			/*
			for (i = 0; i < driver->poolsize_write_struct; i++) {
				if (driver->buf_tbl[i].length > 0 && (void *)driver->buf_tbl[i].buf != NULL) {
					lge_dm_tty_modem_response(
					lge_dm_tty_drv,
					Primary_modem_chip,
					(void *)driver->buf_tbl[i].buf,
					driver->buf_tbl[i].length);

					diagmem_free(driver, (unsigned char *)
					(driver->buf_tbl[i].buf), POOL_TYPE_HDLC);
					driver->buf_tbl[i].length = 0;
					driver->buf_tbl[i].buf = 0;
				}
			}
			*/

			/* copy modem data */							
			for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++) {
				struct diag_smd_info *data = &driver->smd_data[i];

				if (data->in_busy_1 == 1) {
					if(data->write_ptr_1->length > 0 && data->buf_in_1 != NULL)
					lge_dm_tty_modem_response(
					lge_dm_tty_drv,
					Primary_modem_chip,
					data->buf_in_1,
					data->write_ptr_1->length);

					if (!driver->real_time_mode) {
						process_lock_on_copy(&data->nrt_lock);
						clear_read_wakelock++;
					}
					
					data->in_busy_1 = 0;
				}

				if (data->in_busy_2 == 1) {
					if(data->write_ptr_2->length > 0 && data->buf_in_2 != NULL)
					lge_dm_tty_modem_response(
					lge_dm_tty_drv,
					Primary_modem_chip,
					data->buf_in_2,
					data->write_ptr_2->length);
					if (!driver->real_time_mode) {
						process_lock_on_copy(&data->nrt_lock);
						clear_read_wakelock++;
					}
					data->in_busy_2 = 0;
				}
			}
			

			lge_dm_tty->set_logging = 0;

			if (lge_dm_tty_drv->
				is_modem_open[Primary_modem_chip] == TRUE) {
				for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++) {
					if (driver->smd_data[i].ch)
						queue_work(driver->diag_wq,
						&(driver->smd_data[i].diag_read_smd_work));
				}			
			}

			}

		if (clear_read_wakelock) {
			for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++)
				process_lock_on_copy_complete(
					&driver->smd_data[i].nrt_lock);
	}

		mutex_unlock(&driver->diagchar_mutex);

		if (kthread_should_stop())
			break;
		mdelay(1);

	}

	return 0;

}
Exemplo n.º 9
0
static int trace_wakeup_test_thread(void *data)
{
	/* Make this a RT thread, doesn't need to be too high */
	struct sched_param param = { .sched_priority = 5 };
	struct completion *x = data;

	sched_setscheduler(current, SCHED_FIFO, &param);

	/* Make it know we have a new prio */
	complete(x);

	/* now go to sleep and let the test wake us up */
	set_current_state(TASK_INTERRUPTIBLE);
	schedule();

	/* we are awake, now wait to disappear */
	while (!kthread_should_stop()) {
		/*
		 * This is an RT task, do short sleeps to let
		 * others run.
		 */
		msleep(100);
	}

	return 0;
}

int
trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	struct task_struct *p;
	struct completion isrt;
	unsigned long count;
	int ret;

	init_completion(&isrt);

	/* create a high prio thread */
	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
	if (IS_ERR(p)) {
		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
		return -1;
	}

	/* make sure the thread is running at an RT prio */
	wait_for_completion(&isrt);

	/* start the tracing */
	ret = trace->init(tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

	/* reset the max latency */
	tracing_max_latency = 0;

	/* sleep to let the RT thread sleep too */
	msleep(100);

	/*
	 * Yes this is slightly racy. It is possible that for some
	 * strange reason that the RT thread we created, did not
	 * call schedule for 100ms after doing the completion,
	 * and we do a wakeup on a task that already is awake.
	 * But that is extremely unlikely, and the worst thing that
	 * happens in such a case, is that we disable tracing.
	 * Honestly, if this race does happen something is horrible
	 * wrong with the system.
	 */

	wake_up_process(p);

	/* give a little time to let the thread wake up */
	msleep(100);

	/* stop the tracing. */
	tracing_stop();
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);


	trace->reset(tr);
	tracing_start();

	tracing_max_latency = save_max;

	/* kill the thread */
	kthread_stop(p);

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	return ret;
}
static int touch_event_handler(void *unused)
{
    struct sched_param param = { .sched_priority = RTPM_PRIO_TPD }; 
    static int x1, y1, x2, y2, raw_x1, raw_y1, raw_x2, raw_y2;
    int temp_x1 = x1, temp_y1 = y1, temp_raw_x1 = raw_x1, temp_raw_y1 = raw_y1;
    int lastUp_x = 0, lastUp_y = 0;
    char buffer[10];
    int ret = -1, touching, oldtouching;//int pending = 0
    unsigned char Wrbuf[1] = {0};
    
    sched_setscheduler(current, SCHED_RR, &param); 
    do{
        set_current_state(TASK_INTERRUPTIBLE);
        while (tpd_halt) {tpd_flag = 0; msleep(20);}
        wait_event_interruptible(waiter, tpd_flag != 0);
        tpd_flag = 0;
        TPD_DEBUG_SET_TIME;
        set_current_state(TASK_RUNNING); 

        i2c_client->addr = ( i2c_client->addr & I2C_MASK_FLAG ) | I2C_ENEXT_FLAG;
        ret = i2c_master_send(i2c_client, Wrbuf, 1);
        if(ret != sizeof(Wrbuf))
        {
            TPD_DEBUG("[mtk-tpd] i2c write communcate error: 0x%x\n", ret);
            continue;
        }
        i2c_client->addr = ( ( i2c_client->addr & I2C_MASK_FLAG ) | I2C_DMA_FLAG ) | I2C_ENEXT_FLAG;
        ret = tpd_i2c_read(i2c_client, buffer, 7);
        buffer[7] = buffer[8] = buffer[9] = 0;
        if (ret != 7)//sizeof(buffer)
        {
            TPD_DEBUG("[mtk-tpd] i2c read communcate error: 0x%x\n", ret);
            continue;
        }
        i2c_client->addr = i2c_client->addr & I2C_MASK_FLAG;

        touching = buffer[0];
        if(touching > 0) 
        {
            raw_x1 = x1 = ((buffer[3] << 8) | buffer[2]);
            raw_y1 = y1 = ((buffer[5] << 8) | buffer[4]);
        }
        if(touching > 1)
        {
            raw_x2 = x2 = ((buffer[7] << 8) | buffer[6]);
            raw_y2 = y2 = ((buffer[9] << 8) | buffer[8]);
        }
        oldtouching = buffer[1]; 
        TPD_DEBUG("[mtk-tpd]:raw_x1:%d, raw_y1:%d, raw_x2:%d, raw_y2:%d\n", raw_x1, raw_y1, raw_x2, raw_y2);
        TPD_DEBUG("[mtk-tpd]:touch:%d, old_touch:%d\n", touching, oldtouching);
        switch(touching)
        {
        case 0:
            /* touching=0, oldtouching 0 is invalid */
            if(oldtouching > 0)
            {
                //tpd_up(raw_y1, raw_x1, y1, x1, 0);	
                //tpd_up(raw_x1, raw_y1, x1, y1, 0);
                lastUp_x = x1;
                lastUp_y = y1;
            }
            if(oldtouching > 1)
            {
                //tpd_up(raw_y2, raw_x2, y2, x2, 0);	
                //tpd_up(raw_x2, raw_y2, x2, y2, 0);
                //lastUp_x = x1;
                //lastUp_y = y1;
            }
            tpd_up(lastUp_x, lastUp_y, lastUp_x, lastUp_y, 0);
            break;
        case 1:
            tpd_calibrate(&x1, &y1);
            //tpd_down(raw_y1, raw_x1, y1, x1, 1);
            tpd_down(raw_x1, raw_y1, x1, y1, 1);
            if(oldtouching == 2)
            {
                if(abs(x1 - x2) < 2 && abs(y1 - y2) < 2) // need to adjust.
                {
                    //tpd_up(temp_raw_y1, temp_raw_x1, temp_y1, temp_x1, 0);
                    
                    //For ICS
                    //tpd_up(temp_raw_x1, temp_raw_y1, temp_x1, temp_y1, 0);
                }
                else
                {
                    //tpd_up(raw_y2, raw_x2, y2, x2, 0);
                    
                    //For ICS
                    //tpd_up(raw_x2, raw_y2, x2, y2, 0);
                }
            }
            break;
        case 2:
            tpd_calibrate(&x1, &y1);
            //tpd_down(raw_y1, raw_x1, y1, x1, 1);
            tpd_down(raw_x1, raw_y1, x1, y1, 1);
            //tpd_calibrate(&x2, &y2);
            //tpd_down(raw_y2, raw_x2, y2, x2, 1);
            //tpd_down(raw_x2, raw_y2, x2, y2, 1);
            break;
        default:
            TPD_DEBUG("[mtk-tpd] invalid touch num: 0x%x\n", touching);
            continue;
        }
        temp_x1 = x1;
        temp_y1 = y1;      
        temp_raw_x1 = raw_x1;
        temp_raw_y1 = raw_y1;
        input_sync(tpd->dev);
        
    } while (!kthread_should_stop()); 
    return 0;
}
Exemplo n.º 11
0
static int jffs2_garbage_collect_thread(void *_c)
{
	struct jffs2_sb_info *c = _c;

	allow_signal(SIGKILL);
	allow_signal(SIGSTOP);
	allow_signal(SIGCONT);

	c->gc_task = current;
	complete(&c->gc_thread_start);

	set_user_nice(current, 10);

	set_freezable();
	for (;;) {
		allow_signal(SIGHUP);
	again:
		spin_lock(&c->erase_completion_lock);
		if (!jffs2_thread_should_wake(c)) {
			set_current_state (TASK_INTERRUPTIBLE);
			spin_unlock(&c->erase_completion_lock);
			jffs2_dbg(1, "%s(): sleeping...\n", __func__);
			schedule();
		} else
			spin_unlock(&c->erase_completion_lock);
			

		/* Problem - immediately after bootup, the GCD spends a lot
		 * of time in places like jffs2_kill_fragtree(); so much so
		 * that userspace processes (like gdm and X) are starved
		 * despite plenty of cond_resched()s and renicing.  Yield()
		 * doesn't help, either (presumably because userspace and GCD
		 * are generally competing for a higher latency resource -
		 * disk).
		 * This forces the GCD to slow the hell down.   Pulling an
		 * inode in with read_inode() is much preferable to having
		 * the GC thread get there first. */
		schedule_timeout_interruptible(msecs_to_jiffies(50));

		if (kthread_should_stop()) {
			jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__);
			goto die;
		}

		/* Put_super will send a SIGKILL and then wait on the sem.
		 */
		while (signal_pending(current) || freezing(current)) {
			siginfo_t info;
			unsigned long signr;

			if (try_to_freeze())
				goto again;

			signr = dequeue_signal_lock(current, &current->blocked, &info);

			switch(signr) {
			case SIGSTOP:
				jffs2_dbg(1, "%s(): SIGSTOP received\n",
					  __func__);
				set_current_state(TASK_STOPPED);
				schedule();
				break;

			case SIGKILL:
				jffs2_dbg(1, "%s(): SIGKILL received\n",
					  __func__);
				goto die;

			case SIGHUP:
				jffs2_dbg(1, "%s(): SIGHUP received\n",
					  __func__);
				break;
			default:
				jffs2_dbg(1, "%s(): signal %ld received\n",
					  __func__, signr);
			}
		}
		/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
		disallow_signal(SIGHUP);

		jffs2_dbg(1, "%s(): pass\n", __func__);
		if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
			pr_notice("No space for garbage collection. Aborting GC thread\n");
			goto die;
		}
	}
 die:
	spin_lock(&c->erase_completion_lock);
	c->gc_task = NULL;
	spin_unlock(&c->erase_completion_lock);
	complete_and_exit(&c->gc_thread_exit, 0);
}
Exemplo n.º 12
0
static void do_catch_up(struct spk_synth *synth)
{
	u_char ch;
	unsigned long flags;
	unsigned long jiff_max;
	int timeout;
	int delay_time_val;
	int jiffy_delta_val;
	int full_time_val;
	struct var_t *delay_time;
	struct var_t *full_time;
	struct var_t *jiffy_delta;

	jiffy_delta = get_var(JIFFY);
	delay_time = get_var(DELAY);
	full_time = get_var(FULL);

	spk_lock(flags);
	jiffy_delta_val = jiffy_delta->u.n.value;
	spk_unlock(flags);

	jiff_max = jiffies + jiffy_delta_val;
	while (!kthread_should_stop()) {
		spk_lock(flags);
		if (speakup_info.flushing) {
			speakup_info.flushing = 0;
			spk_unlock(flags);
			synth->flush(synth);
			continue;
		}
		if (synth_buffer_empty()) {
			spk_unlock(flags);
			break;
		}
		set_current_state(TASK_INTERRUPTIBLE);
		full_time_val = full_time->u.n.value;
		spk_unlock(flags);
		if (synth_full()) {
			schedule_timeout(msecs_to_jiffies(full_time_val));
			continue;
		}
		set_current_state(TASK_RUNNING);
		timeout = SPK_XMITR_TIMEOUT;
		while (synth_writable()) {
			if (!--timeout)
				break;
			udelay(1);
		}
		spk_lock(flags);
		ch = synth_buffer_getc();
		spk_unlock(flags);
		if (ch == '\n')
			ch = PROCSPEECH;
		outb_p(ch, speakup_info.port_tts);
		if (jiffies >= jiff_max && ch == SPACE) {
			timeout = SPK_XMITR_TIMEOUT;
			while (synth_writable()) {
				if (!--timeout)
					break;
				udelay(1);
			}
			outb_p(PROCSPEECH, speakup_info.port_tts);
			spk_lock(flags);
			jiffy_delta_val = jiffy_delta->u.n.value;
			delay_time_val = delay_time->u.n.value;
			spk_unlock(flags);
			schedule_timeout(msecs_to_jiffies(delay_time_val));
			jiff_max = jiffies+jiffy_delta_val;
		}
	}
	timeout = SPK_XMITR_TIMEOUT;
	while (synth_writable()) {
		if (!--timeout)
			break;
		udelay(1);
	}
	outb_p(PROCSPEECH, speakup_info.port_tts);
}
static int mpq_dmx_tsif_thread(void *arg)
{
	struct mpq_demux *mpq_demux;
	struct tsif_driver_info *tsif_driver;
	size_t packets = 0;
	int tsif = (int)arg;
	int ret;

	do {
		ret = wait_event_interruptible(
			mpq_dmx_tsif_info.tsif[tsif].wait_queue,
			(atomic_read(
				&mpq_dmx_tsif_info.tsif[tsif].data_cnt) != 0) ||
			kthread_should_stop());

		if ((ret < 0) || kthread_should_stop()) {
			MPQ_DVB_DBG_PRINT("%s: exit\n", __func__);
			break;
		}

		if (mutex_lock_interruptible(
			&mpq_dmx_tsif_info.tsif[tsif].mutex))
			return -ERESTARTSYS;

		tsif_driver = &(mpq_dmx_tsif_info.tsif[tsif].tsif_driver);
		mpq_demux = mpq_dmx_tsif_info.tsif[tsif].mpq_demux;

		
		if (tsif_driver->tsif_handler == NULL) {
			mutex_unlock(&mpq_dmx_tsif_info.tsif[tsif].mutex);
			MPQ_DVB_DBG_PRINT(
				"%s: tsif was detached\n",
				__func__);
			continue;
		}

		tsif_get_state(
			tsif_driver->tsif_handler, &(tsif_driver->ri),
			&(tsif_driver->wi), &(tsif_driver->state));

		if ((tsif_driver->wi == tsif_driver->ri) ||
			(tsif_driver->state == tsif_state_stopped) ||
			(tsif_driver->state == tsif_state_error)) {

			mpq_demux->hw_notification_size = 0;

			mutex_unlock(&mpq_dmx_tsif_info.tsif[tsif].mutex);

			MPQ_DVB_DBG_PRINT(
				"%s: TSIF invalid state %d, %d, %d\n",
				__func__,
				tsif_driver->state,
				tsif_driver->wi,
				tsif_driver->ri);
			continue;
		}

		atomic_dec(&mpq_dmx_tsif_info.tsif[tsif].data_cnt);

		if (tsif_driver->wi > tsif_driver->ri) {
			packets = (tsif_driver->wi - tsif_driver->ri);
			mpq_demux->hw_notification_size = packets;

			dvb_dmx_swfilter_format(
				&mpq_demux->demux,
				(tsif_driver->data_buffer +
				(tsif_driver->ri * TSIF_PKT_SIZE)),
				(packets * TSIF_PKT_SIZE),
				DMX_TSP_FORMAT_192_TAIL);

			tsif_driver->ri =
				(tsif_driver->ri + packets) %
				tsif_driver->buffer_size;

			tsif_reclaim_packets(
				tsif_driver->tsif_handler,
					tsif_driver->ri);
		} else {
			packets = (tsif_driver->buffer_size - tsif_driver->ri);
			mpq_demux->hw_notification_size = packets;

			dvb_dmx_swfilter_format(
				&mpq_demux->demux,
				(tsif_driver->data_buffer +
				(tsif_driver->ri * TSIF_PKT_SIZE)),
				(packets * TSIF_PKT_SIZE),
				DMX_TSP_FORMAT_192_TAIL);

			
			tsif_driver->ri =
				(tsif_driver->ri + packets) %
				tsif_driver->buffer_size;

			packets = tsif_driver->wi;
			if (packets > 0) {
				mpq_demux->hw_notification_size += packets;

				dvb_dmx_swfilter_format(
					&mpq_demux->demux,
					(tsif_driver->data_buffer +
					(tsif_driver->ri * TSIF_PKT_SIZE)),
					(packets * TSIF_PKT_SIZE),
					DMX_TSP_FORMAT_192_TAIL);

				tsif_driver->ri =
					(tsif_driver->ri + packets) %
					tsif_driver->buffer_size;
			}

			tsif_reclaim_packets(
				tsif_driver->tsif_handler,
				tsif_driver->ri);
		}

		mutex_unlock(&mpq_dmx_tsif_info.tsif[tsif].mutex);
	} while (1);

	return 0;
}
Exemplo n.º 14
0
/* ent thread function */
static int entd(void *arg)
{
	struct super_block *super;
	entd_context *ent;
	int done = 0;

	super = arg;
	/* do_fork() just copies task_struct into the new
	   thread. ->fs_context shouldn't be copied of course. This shouldn't
	   be a problem for the rest of the code though.
	 */
	current->journal_info = NULL;

	ent = get_entd_context(super);

	while (!done) {
		try_to_freeze();

		spin_lock(&ent->guard);
		while (ent->nr_todo_reqs != 0) {
			struct wbq *rq;

			assert("", list_empty(&ent->done_list));

			/* take request from the queue head */
			rq = __get_wbq(ent);
			assert("", rq != NULL);
			ent->cur_request = rq;
			spin_unlock(&ent->guard);

			entd_set_comm("!");
			entd_flush(super, rq);

			put_wbq(rq);

			/*
			 * wakeup all requestors and iput their inodes
			 */
			spin_lock(&ent->guard);
			while (!list_empty(&ent->done_list)) {
				rq = list_entry(ent->done_list.next, struct wbq, link);
				list_del_init(&rq->link);
				ent->nr_done_reqs--;
				spin_unlock(&ent->guard);
				assert("", rq->written == 1);
				put_wbq(rq);
				spin_lock(&ent->guard);
			}
		}
		spin_unlock(&ent->guard);

		entd_set_comm(".");

		{
			DEFINE_WAIT(__wait);

			do {
				prepare_to_wait(&ent->wait, &__wait, TASK_INTERRUPTIBLE);
				if (kthread_should_stop()) {
					done = 1;
					break;
				}
				if (ent->nr_todo_reqs != 0)
					break;
				schedule();
			} while (0);
			finish_wait(&ent->wait, &__wait);
		}
	}
	BUG_ON(ent->nr_todo_reqs != 0);
	return 0;
}
static int boost_mig_sync_thread(void *data)
{
	int dest_cpu = (int) data;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;

	while (1) {
		wait_event_interruptible(s->sync_wq,
					s->pending || kthread_should_stop());

		if (kthread_should_stop())
			break;

		spin_lock_irqsave(&s->lock, flags);
		s->pending = false;
		src_cpu = s->src_cpu;
		spin_unlock_irqrestore(&s->lock, flags);

		ret = cpufreq_get_policy(&src_policy, src_cpu);
		if (ret)
			continue;

		ret = cpufreq_get_policy(&dest_policy, dest_cpu);
		if (ret)
			continue;

		if (src_policy.cur == src_policy.cpuinfo.min_freq) {
			pr_debug("No sync. Source CPU%d@%dKHz at min freq\n",
				 src_cpu, src_policy.cur);
			continue;
		}

		cancel_delayed_work_sync(&s->boost_rem);
		if (sync_threshold) {
			if (src_policy.cur >= sync_threshold)
				s->boost_min = sync_threshold;
			else
				s->boost_min = src_policy.cur;
		} else {
			s->boost_min = src_policy.cur;
		}
		/* Force policy re-evaluation to trigger adjust notifier. */
		get_online_cpus();
		if (cpu_online(src_cpu))
			/*
			 * Send an unchanged policy update to the source
			 * CPU. Even though the policy isn't changed from
			 * its existing boosted or non-boosted state
			 * notifying the source CPU will let the governor
			 * know a boost happened on another CPU and that it
			 * should re-evaluate the frequency at the next timer
			 * event without interference from a min sample time.
			 */
			cpufreq_update_policy(src_cpu);
		if (cpu_online(dest_cpu)) {
			cpufreq_update_policy(dest_cpu);
			queue_delayed_work_on(dest_cpu, cpu_boost_wq,
				&s->boost_rem, msecs_to_jiffies(boost_ms));
		} else {
			s->boost_min = 0;
		}
		put_online_cpus();
	}

	return 0;
}
Exemplo n.º 16
0
/*
 * This is a RT kernel thread that handles the ADC accesses
 * (mainly so we can use semaphores in the UCB1200 core code
 * to serialise accesses to the ADC).
 */
static int ucb1x00_thread(void *_ts)
{
	struct ucb1x00_ts *ts = _ts;
	DECLARE_WAITQUEUE(wait, current);
	int valid = 0;

	set_freezable();
	add_wait_queue(&ts->irq_wait, &wait);
	while (!kthread_should_stop()) {
		unsigned int x, y, p;
		signed long timeout;

		ts->restart = 0;

		ucb1x00_adc_enable(ts->ucb);

		x = ucb1x00_ts_read_xpos(ts);
		y = ucb1x00_ts_read_ypos(ts);
		p = ucb1x00_ts_read_pressure(ts);

		/*
		 * Switch back to interrupt mode.
		 */
		ucb1x00_ts_mode_int(ts);
		ucb1x00_adc_disable(ts->ucb);

		msleep(10);

		ucb1x00_enable(ts->ucb);


		if (ucb1x00_ts_pen_down(ts)) {
			set_current_state(TASK_INTERRUPTIBLE);

			ucb1x00_enable_irq(ts->ucb, UCB_IRQ_TSPX, machine_is_collie() ? UCB_RISING : UCB_FALLING);
			ucb1x00_disable(ts->ucb);

			/*
			 * If we spat out a valid sample set last time,
			 * spit out a "pen off" sample here.
			 */
			if (valid) {
				ucb1x00_ts_event_release(ts);
				valid = 0;
			}

			timeout = MAX_SCHEDULE_TIMEOUT;
		} else {
			ucb1x00_disable(ts->ucb);

			/*
			 * Filtering is policy.  Policy belongs in user
			 * space.  We therefore leave it to user space
			 * to do any filtering they please.
			 */
			if (!ts->restart) {
				ucb1x00_ts_evt_add(ts, p, x, y);
				valid = 1;
			}

			set_current_state(TASK_INTERRUPTIBLE);
			timeout = HZ / 100;
		}

		try_to_freeze();

		schedule_timeout(timeout);
	}

	remove_wait_queue(&ts->irq_wait, &wait);

	ts->rtask = NULL;
	return 0;
}
Exemplo n.º 17
0
static int draw_thread(void *arg)
{
	struct vfd_ioctl_data *data = (struct vfd_ioctl_data *) arg;
	char buf[sizeof(data->data) + 2 * DISPLAYWIDTH_MAX];
	char buf2[sizeof(data->data) + 2 * DISPLAYWIDTH_MAX];
	int len = data->length;
	int off = 0;
	int saved = 0;

	if (panel_version.DisplayInfo == YWPANEL_FP_DISPTYPE_LED && len > 2 && data->data[2] == '.')
		saved = 1;

	if (len - saved > YWPANEL_width) {
		memset(buf, ' ', sizeof(buf));
		off = YWPANEL_width - 1;
		memcpy(buf + off, data->data, len);
		len += off;
		buf[len + YWPANEL_width] = 0;
	} else {
		memcpy(buf, data->data, len);
		buf[len] = 0;
	}

	draw_thread_stop = 0;

	if (saved) {
		int i;
		for (i = 0; i < len; i++)
			buf2[i] = (buf[i] == '.') ? ' ' : buf[i];
		buf2[i] = 0;
	}

	if(len - saved > YWPANEL_width) {
		char *b = saved ? buf2 : buf;
		int pos;
		for(pos = 0; pos < len; pos++) {
			int i;
			if(kthread_should_stop()) {
				draw_thread_stop = 1;
				return 0;
			}

			YWPANEL_VFD_ShowString(b + pos);
			// sleep 200 ms
			for (i = 0; i < 5; i++) {
				if(kthread_should_stop()) {
					draw_thread_stop = 1;
					return 0;
				}
				msleep(40);
			}
		}
	}

	clear_display();
	if(len > 0)
		YWPANEL_VFD_ShowString(buf + off);

	draw_thread_stop = 1;
	return 0;
}
Exemplo n.º 18
0
__s32 i2sirx_i2so_mix_engine
(
    void *param
)
{
    __u32                        mix_len;
    __u32                        i2so_len;    
    __u32                        i2sirx_len;  
    __u32                        mix_free;  
    uint32_t                     drop_ms;
    uint32_t                     align_ms;
    struct i2sirx_i2so_mix_info *mix;

    mix = &i2sirx_i2so_mix_info;
    
    for (;;)
    {
        if (kthread_should_stop())
        {
            printk("%s,%d,now exit.\n", __FUNCTION__, __LINE__);

            return(-__LINE__);
        }
        
        if (I2SIRX_I2SO_MIX_STATE_RUN != mix->state)
        {
            msleep_interruptible(40);

            continue;
        }
     
        mix_free = snd_pcm_capture_hw_avail(mix->capture_subs->runtime);
        mix_free = frames_to_bytes(mix->capture_subs->runtime, mix_free);

        if (mix_free < mix->period_bytes)
        {
            msleep_interruptible(40);
            continue;
        }

        i2so_len = i2so_see2main_ring_avail();
        if (i2so_len < mix->period_bytes)
        {
            msleep_interruptible(40);
            continue;
        }   

        i2sirx_len = i2sirx_see2main_ring_avail();
        if (i2sirx_len < mix->period_bytes / 4)
        {
            msleep_interruptible(40);
            continue;
        }

        /* Drop first 500ms data to march up with kalaok pictures.
         * Drop duration(in ms) could be customized by user space.
        */
        drop_ms = ali_alsa_i2sirx_i2so_mix_drop_ms_get();
        if (time_before(jiffies, mix->open_jiffies + drop_ms))
        {
            i2so_see2main_ring_read(i2so_see2main_read_buf, mix->period_bytes);
            i2sirx_see2main_ring_read(i2sirx_see2main_read_buf,
                mix->period_bytes / 4);
            continue;
        }   

        /* Compensate SPO PCM delay caused by SEE audio buffer. 
         * Align duration(in ms) could be customized by user space.
        */
        if (0 == mix->i2so_see2main_1st_jiffies)
        {
            mix->i2so_see2main_1st_jiffies = jiffies;         
        }

        align_ms = ali_alsa_i2sirx_i2so_mix_align_ms_get();
        
        if (time_after(jiffies, mix->i2so_see2main_1st_jiffies + align_ms))
        {
            i2so_see2main_ring_read(i2so_see2main_read_buf, mix->period_bytes);
    
            /* For debug.
            */
            if (1 == ali_alsa_i2so_see2main_dump_en_get())
            {
                ali_alsa_dump_data("/data/data/ali_alsa_i2so_see2main_dump.pcm",
                    i2so_see2main_read_buf, mix->period_bytes);     
            }
        }
        else
        {
            memset(i2so_see2main_read_buf, 0, mix->period_bytes);
        }

        /* Read data from MIC, prepair for mixing.
        */
        i2sirx_len = i2sirx_see2main_ring_read(i2sirx_see2main_read_buf,
            mix->period_bytes / 4);

        /* For debug.
        */
        if (1 == ali_alsa_i2sirx_see2main_dump_en_get())
        {
            ali_alsa_dump_data("/data/data/ali_alsa_i2sirx_see2main_dump.pcm",
                i2sirx_see2main_read_buf, i2sirx_len);    
        }

		/* Mix it.
		 * SPO: 44.1K, 32 bit, LE;
		 * MIC: 44.1K, 16 bit, LE.
		*/
        i2sirx_i2so_mix(i2so_see2main_read_buf, mix->period_bytes,
            i2sirx_see2main_read_buf, i2sirx_len, i2sirx_i2so_mix_buf);  

        /* For debug.
        */
        if (1 == ali_alsa_i2sirx_i2so_mix_dump_en_get())
        {
            ali_alsa_dump_data("/data/data/ali_alsa_i2sirx_i2so_mix_dump.pcm",
                i2sirx_i2so_mix_buf, mix->period_bytes);          
        }
        
        i2sirx_i2so_mix_ring_write(i2sirx_i2so_mix_buf, mix->period_bytes);
        
        snd_pcm_period_elapsed(mix->capture_subs);
    }

    return(0);
}
Exemplo n.º 19
0
/**
 *  lbs_thread - handles the major jobs in the LBS driver.
 *  It handles all events generated by firmware, RX data received
 *  from firmware and TX data sent from kernel.
 *
 *  @data:	A pointer to &lbs_thread structure
 *  returns:	0
 */
static int lbs_thread(void *data)
{
	struct net_device *dev = data;
	struct lbs_private *priv = dev->ml_priv;
	wait_queue_t wait;

	lbs_deb_enter(LBS_DEB_THREAD);

	init_waitqueue_entry(&wait, current);

	for (;;) {
		int shouldsleep;
		u8 resp_idx;

		lbs_deb_thread("1: currenttxskb %p, dnld_sent %d\n",
				priv->currenttxskb, priv->dnld_sent);

		add_wait_queue(&priv->waitq, &wait);
		set_current_state(TASK_INTERRUPTIBLE);
		spin_lock_irq(&priv->driver_lock);

		if (kthread_should_stop())
			shouldsleep = 0;	/* Bye */
		else if (priv->surpriseremoved)
			shouldsleep = 1;	/* We need to wait until we're _told_ to die */
		else if (priv->psstate == PS_STATE_SLEEP)
			shouldsleep = 1;	/* Sleep mode. Nothing we can do till it wakes */
		else if (priv->cmd_timed_out)
			shouldsleep = 0;	/* Command timed out. Recover */
		else if (!priv->fw_ready)
			shouldsleep = 1;	/* Firmware not ready. We're waiting for it */
		else if (priv->dnld_sent)
			shouldsleep = 1;	/* Something is en route to the device already */
		else if (priv->tx_pending_len > 0)
			shouldsleep = 0;	/* We've a packet to send */
		else if (priv->resp_len[priv->resp_idx])
			shouldsleep = 0;	/* We have a command response */
		else if (priv->cur_cmd)
			shouldsleep = 1;	/* Can't send a command; one already running */
		else if (!list_empty(&priv->cmdpendingq) &&
					!(priv->wakeup_dev_required))
			shouldsleep = 0;	/* We have a command to send */
		else if (kfifo_len(&priv->event_fifo))
			shouldsleep = 0;	/* We have an event to process */
		else
			shouldsleep = 1;	/* No command */

		if (shouldsleep) {
			lbs_deb_thread("sleeping, connect_status %d, "
				"psmode %d, psstate %d\n",
				priv->connect_status,
				priv->psmode, priv->psstate);
			spin_unlock_irq(&priv->driver_lock);
			schedule();
		} else
			spin_unlock_irq(&priv->driver_lock);

		lbs_deb_thread("2: currenttxskb %p, dnld_send %d\n",
			       priv->currenttxskb, priv->dnld_sent);

		set_current_state(TASK_RUNNING);
		remove_wait_queue(&priv->waitq, &wait);

		lbs_deb_thread("3: currenttxskb %p, dnld_sent %d\n",
			       priv->currenttxskb, priv->dnld_sent);

		if (kthread_should_stop()) {
			lbs_deb_thread("break from main thread\n");
			break;
		}

		if (priv->surpriseremoved) {
			lbs_deb_thread("adapter removed; waiting to die...\n");
			continue;
		}

		lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n",
		       priv->currenttxskb, priv->dnld_sent);

		/* Process any pending command response */
		spin_lock_irq(&priv->driver_lock);
		resp_idx = priv->resp_idx;
		if (priv->resp_len[resp_idx]) {
			spin_unlock_irq(&priv->driver_lock);
			lbs_process_command_response(priv,
				priv->resp_buf[resp_idx],
				priv->resp_len[resp_idx]);
			spin_lock_irq(&priv->driver_lock);
			priv->resp_len[resp_idx] = 0;
		}
		spin_unlock_irq(&priv->driver_lock);

		/* Process hardware events, e.g. card removed, link lost */
		spin_lock_irq(&priv->driver_lock);
		while (kfifo_len(&priv->event_fifo)) {
			u32 event;

			if (kfifo_out(&priv->event_fifo,
				(unsigned char *) &event, sizeof(event)) !=
				sizeof(event))
					break;
			spin_unlock_irq(&priv->driver_lock);
			lbs_process_event(priv, event);
			spin_lock_irq(&priv->driver_lock);
		}
		spin_unlock_irq(&priv->driver_lock);

		if (priv->wakeup_dev_required) {
			lbs_deb_thread("Waking up device...\n");
			/* Wake up device */
			if (priv->exit_deep_sleep(priv))
				lbs_deb_thread("Wakeup device failed\n");
			continue;
		}

		/* command timeout stuff */
		if (priv->cmd_timed_out && priv->cur_cmd) {
			struct cmd_ctrl_node *cmdnode = priv->cur_cmd;

			netdev_info(dev, "Timeout submitting command 0x%04x\n",
				    le16_to_cpu(cmdnode->cmdbuf->command));
			lbs_complete_command(priv, cmdnode, -ETIMEDOUT);

#if 0 /* Not in RHEL */
			/* Reset card, but only when it isn't in the process
			 * of being shutdown anyway. */
			if (!dev->dismantle && priv->reset_card)
#else
			if (priv->reset_card)
#endif
				priv->reset_card(priv);
		}
		priv->cmd_timed_out = 0;

		if (!priv->fw_ready)
			continue;

		/* Check if we need to confirm Sleep Request received previously */
		if (priv->psstate == PS_STATE_PRE_SLEEP &&
		    !priv->dnld_sent && !priv->cur_cmd) {
			if (priv->connect_status == LBS_CONNECTED) {
				lbs_deb_thread("pre-sleep, currenttxskb %p, "
					"dnld_sent %d, cur_cmd %p\n",
					priv->currenttxskb, priv->dnld_sent,
					priv->cur_cmd);

				lbs_ps_confirm_sleep(priv);
			} else {
				/* workaround for firmware sending
				 * deauth/linkloss event immediately
				 * after sleep request; remove this
				 * after firmware fixes it
				 */
				priv->psstate = PS_STATE_AWAKE;
				netdev_alert(dev,
					     "ignore PS_SleepConfirm in non-connected state\n");
			}
		}

		/* The PS state is changed during processing of Sleep Request
		 * event above
		 */
		if ((priv->psstate == PS_STATE_SLEEP) ||
		    (priv->psstate == PS_STATE_PRE_SLEEP))
			continue;

		if (priv->is_deep_sleep)
			continue;

		/* Execute the next command */
		if (!priv->dnld_sent && !priv->cur_cmd)
			lbs_execute_next_command(priv);

		spin_lock_irq(&priv->driver_lock);
		if (!priv->dnld_sent && priv->tx_pending_len > 0) {
			int ret = priv->hw_host_to_card(priv, MVMS_DAT,
							priv->tx_pending_buf,
							priv->tx_pending_len);
			if (ret) {
				lbs_deb_tx("host_to_card failed %d\n", ret);
				priv->dnld_sent = DNLD_RES_RECEIVED;
			} else {
				mod_timer(&priv->tx_lockup_timer,
					  jiffies + (HZ * 5));
			}
			priv->tx_pending_len = 0;
			if (!priv->currenttxskb) {
				/* We can wake the queues immediately if we aren't
				   waiting for TX feedback */
				if (priv->connect_status == LBS_CONNECTED)
					netif_wake_queue(priv->dev);
				if (priv->mesh_dev &&
				    netif_running(priv->mesh_dev))
					netif_wake_queue(priv->mesh_dev);
			}
		}
		spin_unlock_irq(&priv->driver_lock);
	}

	del_timer(&priv->command_timer);
	del_timer(&priv->tx_lockup_timer);
	del_timer(&priv->auto_deepsleep_timer);

	lbs_deb_leave(LBS_DEB_THREAD);
	return 0;
}
Exemplo n.º 20
0
static int gc_thread_func(void *data)
{
	struct f2fs_sb_info *sbi = data;
	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
	long wait_ms;

	wait_ms = gc_th->min_sleep_time;

	do {
		if (try_to_freeze())
			continue;
		else
			wait_event_interruptible_timeout(*wq,
						kthread_should_stop(),
						msecs_to_jiffies(wait_ms));
		if (kthread_should_stop())
			break;

		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
			wait_ms = increase_sleep_time(gc_th, wait_ms);
			continue;
		}

		/*
		 * [GC triggering condition]
		 * 0. GC is not conducted currently.
		 * 1. There are enough dirty segments.
		 * 2. IO subsystem is idle by checking the # of writeback pages.
		 * 3. IO subsystem is idle by checking the # of requests in
		 *    bdev's request list.
		 *
		 * Note) We have to avoid triggering GCs too much frequently.
		 * Because it is possible that some segments can be
		 * invalidated soon after by user update or deletion.
		 * So, I'd like to wait some time to collect dirty segments.
		 */
		if (!mutex_trylock(&sbi->gc_mutex))
			continue;

		if (!is_idle(sbi)) {
			wait_ms = increase_sleep_time(gc_th, wait_ms);
			mutex_unlock(&sbi->gc_mutex);
			continue;
		}

		if (has_enough_invalid_blocks(sbi))
			wait_ms = decrease_sleep_time(gc_th, wait_ms);
		else
			wait_ms = increase_sleep_time(gc_th, wait_ms);

#ifdef CONFIG_F2FS_STAT_FS
		sbi->bg_gc++;
#endif

		/* if return value is not zero, no victim was selected */
		if (f2fs_gc(sbi))
			wait_ms = gc_th->no_gc_sleep_time;
	} while (!kthread_should_stop());
	return 0;
}
Exemplo n.º 21
0
/*
 * Kernel thread for USB reception of data
 *
 * This thread waits for a kick; once kicked, it will allocate an skb
 * and receive a single message to it from USB (using
 * i2400mu_rx()). Once received, it is passed to the generic i2400m RX
 * code for processing.
 *
 * When done processing, it runs some dirty statistics to verify if
 * the last 100 messages received were smaller than half of the
 * current RX buffer size. In that case, the RX buffer size is
 * halved. This will helps lowering the pressure on the memory
 * allocator.
 *
 * Hard errors force the thread to exit.
 */
static
int i2400mu_rxd(void *_i2400mu)
{
	int result = 0;
	struct i2400mu *i2400mu = _i2400mu;
	struct i2400m *i2400m = &i2400mu->i2400m;
	struct device *dev = &i2400mu->usb_iface->dev;
	struct net_device *net_dev = i2400m->wimax_dev.net_dev;
	size_t pending;
	int rx_size;
	struct sk_buff *rx_skb;
	unsigned long flags;

	d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
	spin_lock_irqsave(&i2400m->rx_lock, flags);
	BUG_ON(i2400mu->rx_kthread != NULL);
	i2400mu->rx_kthread = current;
	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
	while (1) {
		d_printf(2, dev, "RX: waiting for messages\n");
		pending = 0;
		wait_event_interruptible(
			i2400mu->rx_wq,
			(kthread_should_stop()	/* check this first! */
			 || (pending = atomic_read(&i2400mu->rx_pending_count)))
			);
		if (kthread_should_stop())
			break;
		if (pending == 0)
			continue;
		rx_size = i2400mu->rx_size;
		d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size);
		rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL);
		if (rx_skb == NULL) {
			dev_err(dev, "RX: can't allocate skb [%d bytes]\n",
				rx_size);
			msleep(50);	/* give it some time? */
			continue;
		}

		/* Receive the message with the payloads */
		rx_skb = i2400mu_rx(i2400mu, rx_skb);
		result = PTR_ERR(rx_skb);
		if (IS_ERR(rx_skb))
			goto out;
		atomic_dec(&i2400mu->rx_pending_count);
		if (rx_skb == NULL || rx_skb->len == 0) {
			/* some "ignorable" condition */
			kfree_skb(rx_skb);
			continue;
		}

		/* Deliver the message to the generic i2400m code */
		i2400mu->rx_size_cnt++;
		i2400mu->rx_size_acc += rx_skb->len;
		result = i2400m_rx(i2400m, rx_skb);
		if (result == -EIO
		    && edc_inc(&i2400mu->urb_edc,
			       EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
			goto error_reset;
		}

		/* Maybe adjust RX buffer size */
		i2400mu_rx_size_maybe_shrink(i2400mu);
	}
	result = 0;
out:
	spin_lock_irqsave(&i2400m->rx_lock, flags);
	i2400mu->rx_kthread = NULL;
	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
	d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
	return result;

error_reset:
	dev_err(dev, "RX: maximum errors in received buffer exceeded; "
		"resetting device\n");
	usb_queue_reset_device(i2400mu->usb_iface);
	goto out;
}
Exemplo n.º 22
0
/*
 * kmmpd will update the MMP sequence every s_mmp_update_interval seconds
 */
static int kmmpd(void *data)
{
	struct super_block *sb = ((struct mmpd_data *) data)->sb;
	struct buffer_head *bh = ((struct mmpd_data *) data)->bh;
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
	struct mmp_struct *mmp;
	ext4_fsblk_t mmp_block;
	u32 seq = 0;
	unsigned long failed_writes = 0;
	int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval);
	unsigned mmp_check_interval;
	unsigned long last_update_time;
	unsigned long diff;
	int retval;

	mmp_block = le64_to_cpu(es->s_mmp_block);
	mmp = (struct mmp_struct *)(bh->b_data);
	mmp->mmp_time = cpu_to_le64(get_seconds());
	/*
	 * Start with the higher mmp_check_interval and reduce it if
	 * the MMP block is being updated on time.
	 */
	mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
				 EXT4_MMP_MIN_CHECK_INTERVAL);
	mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
	bdevname(bh->b_bdev, mmp->mmp_bdevname);

	memcpy(mmp->mmp_nodename, init_utsname()->sysname,
	       sizeof(mmp->mmp_nodename));

	while (!kthread_should_stop()) {
		if (++seq > EXT4_MMP_SEQ_MAX)
			seq = 1;

		mmp->mmp_seq = cpu_to_le32(seq);
		mmp->mmp_time = cpu_to_le64(get_seconds());
		last_update_time = jiffies;

		retval = write_mmp_block(bh);
		/*
		 * Don't spew too many error messages. Print one every
		 * (s_mmp_update_interval * 60) seconds.
		 */
		if (retval && (failed_writes % 60) == 0) {
			ext4_error(sb, "Error writing to MMP block");
			failed_writes++;
		}

		if (!(le32_to_cpu(es->s_feature_incompat) &
		    EXT4_FEATURE_INCOMPAT_MMP)) {
			ext4_warning(sb, "kmmpd being stopped since MMP feature"
				     " has been disabled.");
			EXT4_SB(sb)->s_mmp_tsk = NULL;
			goto failed;
		}

		if (sb->s_flags & MS_RDONLY) {
			ext4_warning(sb, "kmmpd being stopped since filesystem "
				     "has been remounted as readonly.");
			EXT4_SB(sb)->s_mmp_tsk = NULL;
			goto failed;
		}

		diff = jiffies - last_update_time;
		if (diff < mmp_update_interval * HZ)
			schedule_timeout_interruptible(mmp_update_interval *
						       HZ - diff);

		/*
		 * We need to make sure that more than mmp_check_interval
		 * seconds have not passed since writing. If that has happened
		 * we need to check if the MMP block is as we left it.
		 */
		diff = jiffies - last_update_time;
		if (diff > mmp_check_interval * HZ) {
			struct buffer_head *bh_check = NULL;
			struct mmp_struct *mmp_check;

			retval = read_mmp_block(sb, &bh_check, mmp_block);
			if (retval) {
				ext4_error(sb, "error reading MMP data: %d",
					   retval);

				EXT4_SB(sb)->s_mmp_tsk = NULL;
				goto failed;
			}

			mmp_check = (struct mmp_struct *)(bh_check->b_data);
			if (mmp->mmp_seq != mmp_check->mmp_seq ||
			    memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename,
				   sizeof(mmp->mmp_nodename))) {
				dump_mmp_msg(sb, mmp_check,
					     "Error while updating MMP info. "
					     "The filesystem seems to have been"
					     " multiply mounted.");
				ext4_error(sb, "abort");
				goto failed;
			}
			put_bh(bh_check);
		}

		 /*
		 * Adjust the mmp_check_interval depending on how much time
		 * it took for the MMP block to be written.
		 */
		mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff / HZ,
					     EXT4_MMP_MAX_CHECK_INTERVAL),
					 EXT4_MMP_MIN_CHECK_INTERVAL);
		mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
	}

	/*
	 * Unmount seems to be clean.
	 */
	mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
	mmp->mmp_time = cpu_to_le64(get_seconds());

	retval = write_mmp_block(bh);

failed:
	kfree(data);
	brelse(bh);
	return retval;
}
Exemplo n.º 23
0
static int
qcaspi_spi_thread(void *data)
{
	struct qcaspi *qca = data;
	u16 intr_cause = 0;

	netdev_info(qca->net_dev, "SPI thread created\n");
	while (!kthread_should_stop()) {
		set_current_state(TASK_INTERRUPTIBLE);
		if ((qca->intr_req == qca->intr_svc) &&
		    (qca->txr.skb[qca->txr.head] == NULL) &&
		    (qca->sync == QCASPI_SYNC_READY))
			schedule();

		set_current_state(TASK_RUNNING);

		netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n",
			   qca->intr_req - qca->intr_svc,
			   qca->txr.skb[qca->txr.head]);

		qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE);

		if (qca->sync != QCASPI_SYNC_READY) {
			netdev_dbg(qca->net_dev, "sync: not ready %u, turn off carrier and flush\n",
				   (unsigned int)qca->sync);
			netif_stop_queue(qca->net_dev);
			netif_carrier_off(qca->net_dev);
			qcaspi_flush_tx_ring(qca);
			msleep(QCASPI_QCA7K_REBOOT_TIME_MS);
		}

		if (qca->intr_svc != qca->intr_req) {
			qca->intr_svc = qca->intr_req;
			start_spi_intr_handling(qca, &intr_cause);

			if (intr_cause & SPI_INT_CPU_ON) {
				qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON);

				/* not synced. */
				if (qca->sync != QCASPI_SYNC_READY)
					continue;

				qca->stats.device_reset++;
				netif_wake_queue(qca->net_dev);
				netif_carrier_on(qca->net_dev);
			}

			if (intr_cause & SPI_INT_RDBUF_ERR) {
				/* restart sync */
				netdev_dbg(qca->net_dev, "===> rdbuf error!\n");
				qca->stats.read_buf_err++;
				qca->sync = QCASPI_SYNC_UNKNOWN;
				continue;
			}

			if (intr_cause & SPI_INT_WRBUF_ERR) {
				/* restart sync */
				netdev_dbg(qca->net_dev, "===> wrbuf error!\n");
				qca->stats.write_buf_err++;
				qca->sync = QCASPI_SYNC_UNKNOWN;
				continue;
			}

			/* can only handle other interrupts
			 * if sync has occurred
			 */
			if (qca->sync == QCASPI_SYNC_READY) {
				if (intr_cause & SPI_INT_PKT_AVLBL)
					qcaspi_receive(qca);
			}

			end_spi_intr_handling(qca, intr_cause);
		}

		if (qca->sync == QCASPI_SYNC_READY)
			qcaspi_transmit(qca);
	}
	set_current_state(TASK_RUNNING);
	netdev_info(qca->net_dev, "SPI thread exit\n");

	return 0;
}
static void do_catch_up(struct spk_synth *synth)
{
	u_char ch;
	int timeout;
	unsigned long flags;
	unsigned long jiff_max;
	struct var_t *jiffy_delta;
	struct var_t *delay_time;
	struct var_t *full_time;
	int delay_time_val;
	int full_time_val;
	int jiffy_delta_val;

	jiffy_delta = spk_get_var(JIFFY);
	delay_time = spk_get_var(DELAY);
	full_time = spk_get_var(FULL);
spin_lock_irqsave(&speakup_info.spinlock, flags);
	jiffy_delta_val = jiffy_delta->u.n.value;
	spin_unlock_irqrestore(&speakup_info.spinlock, flags);

	jiff_max = jiffies + jiffy_delta_val;
	while (!kthread_should_stop()) {
		spin_lock_irqsave(&speakup_info.spinlock, flags);
		if (speakup_info.flushing) {
			speakup_info.flushing = 0;
			spin_unlock_irqrestore(&speakup_info.spinlock, flags);
			synth->flush(synth);
			continue;
		}
		if (synth_buffer_empty()) {
			spin_unlock_irqrestore(&speakup_info.spinlock, flags);
			break;
		}
		set_current_state(TASK_INTERRUPTIBLE);
		full_time_val = full_time->u.n.value;
		spin_unlock_irqrestore(&speakup_info.spinlock, flags);
		if (synth_full()) {
			schedule_timeout(msecs_to_jiffies(full_time_val));
			continue;
		}
		set_current_state(TASK_RUNNING);
		timeout = 1000;
		while (synth_writable())
			if (--timeout <= 0)
				break;
		if (timeout <= 0) {
			oops();
			break;
		}
		spin_lock_irqsave(&speakup_info.spinlock, flags);
		ch = synth_buffer_getc();
		spin_unlock_irqrestore(&speakup_info.spinlock, flags);
		if (ch == '\n')
			ch = PROCSPEECH;
		outb_p(ch, synth_port);
		SWAIT;
		if ((jiffies >= jiff_max) && (ch == SPACE)) {
			timeout = 1000;
			while (synth_writable())
				if (--timeout <= 0)
					break;
			if (timeout <= 0) {
				oops();
				break;
			}
			outb_p(PROCSPEECH, synth_port);
			spin_lock_irqsave(&speakup_info.spinlock, flags);
			jiffy_delta_val = jiffy_delta->u.n.value;
			delay_time_val = delay_time->u.n.value;
			spin_unlock_irqrestore(&speakup_info.spinlock, flags);
			schedule_timeout(msecs_to_jiffies(delay_time_val));
			jiff_max = jiffies+jiffy_delta_val;
		}
	}
	timeout = 1000;
	while (synth_writable())
		if (--timeout <= 0)
			break;
	if (timeout <= 0)
		oops();
	else
		outb_p(PROCSPEECH, synth_port);
}
Exemplo n.º 25
0
static int sdio_irq_thread(void *_host)
{
	struct mmc_host *host = _host;
	struct sched_param param = { .sched_priority = 1 };
	unsigned long period, idle_period;
	int ret;

	sched_setscheduler(current, SCHED_FIFO, &param);

	/*
	 * We want to allow for SDIO cards to work even on non SDIO
	 * aware hosts.  One thing that non SDIO host cannot do is
	 * asynchronous notification of pending SDIO card interrupts
	 * hence we poll for them in that case.
	 */
	idle_period = msecs_to_jiffies(10);
	period = (host->caps & MMC_CAP_SDIO_IRQ) ?
		MAX_SCHEDULE_TIMEOUT : idle_period;

	pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
		 mmc_hostname(host), period);

	do {
		/*
		 * We claim the host here on drivers behalf for a couple
		 * reasons:
		 *
		 * 1) it is already needed to retrieve the CCCR_INTx;
		 * 2) we want the driver(s) to clear the IRQ condition ASAP;
		 * 3) we need to control the abort condition locally.
		 *
		 * Just like traditional hard IRQ handlers, we expect SDIO
		 * IRQ handlers to be quick and to the point, so that the
		 * holding of the host lock does not cover too much work
		 * that doesn't require that lock to be held.
		 */
		ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
		if (ret)
			break;
		ret = process_sdio_pending_irqs(host->card);
		mmc_release_host(host);

		/*
		 * Give other threads a chance to run in the presence of
		 * errors.
		 */
		if (ret < 0) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (!kthread_should_stop())
				schedule_timeout(HZ);
			set_current_state(TASK_RUNNING);
		}

		/*
		 * Adaptive polling frequency based on the assumption
		 * that an interrupt will be closely followed by more.
		 * This has a substantial benefit for network devices.
		 */
		if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
			if (ret > 0)
				period /= 2;
			else {
				period++;
				if (period > idle_period)
					period = idle_period;
			}
		}

		set_current_state(TASK_INTERRUPTIBLE);
		if (host->caps & MMC_CAP_SDIO_IRQ)
			host->ops->enable_sdio_irq(host, 1);
		if (!kthread_should_stop())
			schedule_timeout(period);
		set_current_state(TASK_RUNNING);
	} while (!kthread_should_stop());

	if (host->caps & MMC_CAP_SDIO_IRQ)
		host->ops->enable_sdio_irq(host, 0);

	pr_debug("%s: IRQ thread exiting with code %d\n",
		 mmc_hostname(host), ret);

	return ret;
}

static int sdio_card_irq_get(struct mmc_card *card)
{
	struct mmc_host *host = card->host;

	WARN_ON(!host->claimed);

	if (!host->sdio_irqs++) {
		atomic_set(&host->sdio_irq_thread_abort, 0);
		host->sdio_irq_thread =
			kthread_run(sdio_irq_thread, host, "ksdioirqd/%s",
				mmc_hostname(host));
		if (IS_ERR(host->sdio_irq_thread)) {
			int err = PTR_ERR(host->sdio_irq_thread);
			host->sdio_irqs--;
			return err;
		}
	}

	return 0;
}

static int sdio_card_irq_put(struct mmc_card *card)
{
	struct mmc_host *host = card->host;

	WARN_ON(!host->claimed);
	BUG_ON(host->sdio_irqs < 1);

	if (!--host->sdio_irqs) {
		atomic_set(&host->sdio_irq_thread_abort, 1);
		kthread_stop(host->sdio_irq_thread);
	}

	return 0;
}

/**
 *	sdio_claim_irq - claim the IRQ for a SDIO function
 *	@func: SDIO function
 *	@handler: IRQ handler callback
 *
 *	Claim and activate the IRQ for the given SDIO function. The provided
 *	handler will be called when that IRQ is asserted.  The host is always
 *	claimed already when the handler is called so the handler must not
 *	call sdio_claim_host() nor sdio_release_host().
 */
int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
{
	int ret;
	unsigned char reg;

	BUG_ON(!func);
	BUG_ON(!func->card);

	pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));

	if (func->irq_handler) {
		pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
		return -EBUSY;
	}

	
	func->irq_handler = handler;
	ret = sdio_card_irq_get(func->card);
	if (ret) goto err_irq_get;

	ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
	if (ret) goto err_irq_en;

	reg |= 1 << func->num;

	reg |= 1; /* Master interrupt enable */

	ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
	if (ret) goto err_irq_en;

	
	
	/*
	func->irq_handler = handler;
	ret = sdio_card_irq_get(func->card);
	if (ret)
		func->irq_handler = NULL;
	*/

	return ret;

err_irq_en:
	sdio_card_irq_put(func->card);
err_irq_get:
	func->irq_handler = handler;
	return ret;
}
Exemplo n.º 26
0
static int hkey_poll_kthread(void *data)
{
	unsigned long t = 0;
	int offset, level;
	unsigned int keycode;
	u8 scancode;

	mutex_lock(&hkey_poll_mutex);

	offset = hkey_ec_get_offset();
	if (offset < 0) {
		vdbg_printk(LENSL_WARNING,
			"Failed to read hotkey register offset from EC\n");
		hkey_ec_prev_offset = 0;
	} else
		hkey_ec_prev_offset = offset;

	while (!kthread_should_stop() && hkey_poll_hz) {
		if (t == 0)
			t = 1000/hkey_poll_hz;
		t = msleep_interruptible(t);
		if (unlikely(kthread_should_stop()))
			break;
		try_to_freeze();
		if (t > 0)
			continue;
		offset = hkey_ec_get_offset();
		if (offset < 0) {
			vdbg_printk(LENSL_WARNING,
			   "Failed to read hotkey register offset from EC\n");
			continue;
		}
		if (offset == hkey_ec_prev_offset)
			continue;

		if (ec_read(0x0A + offset, &scancode)) {
			vdbg_printk(LENSL_WARNING,
				"Failed to read hotkey code from EC\n");
			continue;
		}
		keycode = ec_scancode_to_keycode(scancode);
		vdbg_printk(LENSL_DEBUG,
		   "Got hotkey keycode %d (scancode %d)\n", keycode, scancode);

		/* Special handling for brightness keys. We do it here and not
		   via an ACPI notifier in order to prevent possible conflicts
		   with video.c */
		if (keycode == KEY_BRIGHTNESSDOWN) {
			if (control_backlight && backlight) {
				level = lensl_bd_get_brightness(backlight);
				if (0 <= --level)
					lensl_bd_set_brightness_int(level);
			} else
				keycode = KEY_RESERVED;
		} else if (keycode == KEY_BRIGHTNESSUP) {
			if (control_backlight && backlight) {
				level = lensl_bd_get_brightness(backlight);
				if (backlight_levels.count > ++level)
					lensl_bd_set_brightness_int(level);
			} else
				keycode = KEY_RESERVED;
		}

		if (keycode != KEY_RESERVED) {
			input_report_key(hkey_inputdev, keycode, 1);
			input_sync(hkey_inputdev);
			input_report_key(hkey_inputdev, keycode, 0);
			input_sync(hkey_inputdev);
		}
		hkey_ec_prev_offset = offset;
	}

	mutex_unlock(&hkey_poll_mutex);
	return 0;
}
Exemplo n.º 27
0
static int
one_udp_thread( void *arg )
{
     OneUDP        *udp = arg;
     int            size;
     OneUDPMessage *message;

     struct sched_param param;

     param.sched_priority = 50;

     sched_setscheduler( current, SCHED_FIFO, &param );

     /* kernel thread initialization */
     udp->running = 1;

     printk(KERN_DEBUG "One/UDP: listening on port %d\n", ONE_PORT);

     message = kmalloc( sizeof(OneUDPMessage), GFP_KERNEL );
     if (!message) {
          printk( KERN_ERR "One/UDP: could not allocate %zu bytes for receiving messages\n", sizeof(OneUDPMessage) );
          return -ENOMEM;
     }

     /* main loop */
     while (!kthread_should_stop()) {
          size = ksocket_receive( udp->sock, &udp->addr, message, sizeof(OneUDPMessage) );

          if (signal_pending(current))
               break;

          if (size < 0)
               printk(KERN_DEBUG "One/UDP: error getting datagram, sock_recvmsg error = %d\n", size);
          else {
               int              ret;
               OneQueueDispatch dispatch;
               struct iovec     iov;

               ONE_DEBUG( "UDP: Received %d bytes\n", size);

               switch (message->type) {
                    case OUMT_DISPATCH:
                         ONE_DEBUG( "  -> DISPATCH\n" );
                         ONE_DEBUG( "     queue_id:     0x%08x\n", message->dispatch.header.queue_id );
                         ONE_DEBUG( "     flags:        0x%08x\n", message->dispatch.header.flags );
                         ONE_DEBUG( "     size:         %u\n", message->dispatch.header.size );
                         ONE_DEBUG( "     uncompressed: %u\n", message->dispatch.header.uncompressed );

                         if (message->dispatch.header.flags) {
                              printk(KERN_ERR "One/UDP: unsupported flags!\n" );
                              break;
                         }

                         if (message->dispatch.header.uncompressed > ONE_MAX_PACKET_SIZE) {
                              printk(KERN_ERR "One/UDP: uncompressed > ONE_MAX_PACKET_SIZE!\n" );
                              break;
                         }

                         if (message->dispatch.header.size > message->dispatch.header.uncompressed) {
                              printk(KERN_ERR "One/UDP: size (%u) > uncompressed (%u)!\n",
                                     message->dispatch.header.size, message->dispatch.header.uncompressed );
                              break;
                         }

                         one_core_lock( one_core );

                         dispatch.header    = message->dispatch.header;
                         dispatch.iov       = &iov;
                         dispatch.iov_count = 1;

                         iov.iov_base = message->dispatch.buf;
                         iov.iov_len  = message->dispatch.header.size;

                         ret = one_queue_dispatch( NULL, &dispatch );
                         if (ret)
                              printk(KERN_ERR "One/UDP: dispatch error %d!\n", ret );

                         one_core_unlock( one_core );
                         break;

                    case OUMT_ATTACH:
                         ONE_DEBUG( "  -> ATTACH\n" );
                         break;

                    case OUMT_DETACH:
                         ONE_DEBUG( "  -> DETACH\n" );
                         break;

                    default:
                         printk(KERN_ERR "One/UDP: unknown message type 0x%08x!\n", message->type );
                         break;
               }
          }
     }

     kfree( message );

     return 0;
}
static int ucb1400_ts_thread(void *_ucb)
{
    struct ucb1400_ts *ucb = _ucb;
    struct task_struct *tsk = current;
    int valid = 0;
    struct sched_param param = { .sched_priority = 1 };

    sched_setscheduler(tsk, SCHED_FIFO, &param);

    set_freezable();
    while (!kthread_should_stop()) {
        unsigned int x, y, p;
        long timeout;

        ucb->ts_restart = 0;

        if (ucb->irq_pending) {
            ucb->irq_pending = 0;
            ucb1400_handle_pending_irq(ucb);
        }

        ucb1400_adc_enable(ucb->ac97);
        x = ucb1400_ts_read_xpos(ucb);
        y = ucb1400_ts_read_ypos(ucb);
        p = ucb1400_ts_read_pressure(ucb);
        ucb1400_adc_disable(ucb->ac97);

        /* Switch back to interrupt mode. */
        ucb1400_ts_mode_int(ucb->ac97);

        msleep(10);

        if (ucb1400_ts_pen_down(ucb->ac97)) {
            ucb1400_ts_irq_enable(ucb->ac97);

            /*
             * If we spat out a valid sample set last time,
             * spit out a "pen off" sample here.
             */
            if (valid) {
                ucb1400_ts_event_release(ucb->ts_idev);
                valid = 0;
            }

            timeout = MAX_SCHEDULE_TIMEOUT;
        } else {
            valid = 1;
            ucb1400_ts_evt_add(ucb->ts_idev, p, x, y);
            timeout = msecs_to_jiffies(10);
        }

        wait_event_freezable_timeout(ucb->ts_wait,
                                     ucb->irq_pending || ucb->ts_restart ||
                                     kthread_should_stop(), timeout);
    }

    /* Send the "pen off" if we are stopping with the pen still active */
    if (valid)
        ucb1400_ts_event_release(ucb->ts_idev);

    ucb->ts_task = NULL;
    return 0;
}

/*
 * A restriction with interrupts exists when using the ucb1400, as
 * the codec read/write routines may sleep while waiting for codec
 * access completion and uses semaphores for access control to the
 * AC97 bus.  A complete codec read cycle could take  anywhere from
 * 60 to 100uSec so we *definitely* don't want to spin inside the
 * interrupt handler waiting for codec access.  So, we handle the
 * interrupt by scheduling a RT kernel thread to run in process
 * context instead of interrupt context.
 */
static irqreturn_t ucb1400_hard_irq(int irqnr, void *devid)
{
    struct ucb1400_ts *ucb = devid;

    if (irqnr == ucb->irq) {
        disable_irq(ucb->irq);
        ucb->irq_pending = 1;
        wake_up(&ucb->ts_wait);
        return IRQ_HANDLED;
    }
    return IRQ_NONE;
}

static int ucb1400_ts_open(struct input_dev *idev)
{
    struct ucb1400_ts *ucb = input_get_drvdata(idev);
    int ret = 0;

    BUG_ON(ucb->ts_task);

    ucb->ts_task = kthread_run(ucb1400_ts_thread, ucb, "UCB1400_ts");
    if (IS_ERR(ucb->ts_task)) {
        ret = PTR_ERR(ucb->ts_task);
        ucb->ts_task = NULL;
    }

    return ret;
}

static void ucb1400_ts_close(struct input_dev *idev)
{
    struct ucb1400_ts *ucb = input_get_drvdata(idev);

    if (ucb->ts_task)
        kthread_stop(ucb->ts_task);

    ucb1400_ts_irq_disable(ucb->ac97);
    ucb1400_reg_write(ucb->ac97, UCB_TS_CR, 0);
}

#ifndef NO_IRQ
#define NO_IRQ	0
#endif

/*
 * Try to probe our interrupt, rather than relying on lots of
 * hard-coded machine dependencies.
 */
static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
{
    unsigned long mask, timeout;

    mask = probe_irq_on();

    /* Enable the ADC interrupt. */
    ucb1400_reg_write(ucb->ac97, UCB_IE_RIS, UCB_IE_ADC);
    ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, UCB_IE_ADC);
    ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0xffff);
    ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);

    /* Cause an ADC interrupt. */
    ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, UCB_ADC_ENA);
    ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);

    /* Wait for the conversion to complete. */
    timeout = jiffies + HZ/2;
    while (!(ucb1400_reg_read(ucb->ac97, UCB_ADC_DATA) &
             UCB_ADC_DAT_VALID)) {
        cpu_relax();
        if (time_after(jiffies, timeout)) {
            printk(KERN_ERR "ucb1400: timed out in IRQ probe\n");
            probe_irq_off(mask);
            return -ENODEV;
        }
    }
    ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, 0);

    /* Disable and clear interrupt. */
    ucb1400_reg_write(ucb->ac97, UCB_IE_RIS, 0);
    ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, 0);
    ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0xffff);
    ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);

    /* Read triggered interrupt. */
    ucb->irq = probe_irq_off(mask);
    if (ucb->irq < 0 || ucb->irq == NO_IRQ)
        return -ENODEV;

    return 0;
}
Exemplo n.º 29
0
/*****************************************************************************
*Name         :
*Description  :
*Parameter    :
*Return       :
*Note         :
*****************************************************************************/
int nftl_test_thread(void *arg)
{
    struct _nftl_blk *nftl_blk = arg;
    unsigned long time;

    nftl_blk->time_flush = NFTL_FLUSH_DATA_TIME * HZ;
    //nftl_blk->time_flush = 30 * HZ;
    nftl_blk->time_flush = HZ;

    while (!kthread_should_stop()) {

        mutex_lock(nftl_blk->blk_lock);

        if(nftl_get_zone_write_cache_nums(nftl_blk->nftl_zone) > 32){
            time = jiffies;
           //if (time_after(time,nftl_blk->time + nftl_blk->time_flush)){
                nftl_blk->flush_write_cache(nftl_blk,8);
           //}
        }
//        else if(nftl_get_zone_write_cache_nums(nftl_blk->nftl_zone) > 200)
//        {
//           time = jiffies;
//           if (time_after(time,nftl_blk->time + nftl_blk->time_flush)){
//                nftl_blk->flush_write_cache(nftl_blk,6);
//           }
//        }
//        else if(nftl_get_zone_write_cache_nums(nftl_blk->nftl_zone) > 100)
//        {
//            time = jiffies;
//           if (time_after(time,nftl_blk->time + nftl_blk->time_flush)){
//                nftl_blk->flush_write_cache(nftl_blk,8);
//           }
//        }
        else
        {
            time = jiffies;
           if (time_after(time,nftl_blk->time + nftl_blk->time_flush + HZ)){
                nftl_blk->flush_write_cache(nftl_blk,2);
           }
        }

//#if  SUPPORT_WEAR_LEVELING
//        if(do_static_wear_leveling(nftl_blk->nftl_zone) != 0){
//            printk("nftl_thread do_static_wear_leveling error!\n");
//        }
//#endif

        if(garbage_collect(nftl_blk->nftl_zone) != 0){
            printk("nftl_thread garbage_collect error!\n");
        }

        if(do_prio_gc(nftl_blk->nftl_zone) != 0){
            printk("nftl_thread do_prio_gc error!\n");
        }

        mutex_unlock(nftl_blk->blk_lock);
        set_current_state(TASK_INTERRUPTIBLE);
        schedule_timeout(NFTL_SCHEDULE_TIMEOUT);
    }

    nftl_blk->nftl_thread = (void*)NULL;
    return 0;
}
Exemplo n.º 30
0
/******************************************************************************
 * The kernel thread that does the management of HDMI hotplugged devices
 */
int stmhdmi_manager(void *data)
{
struct stm_hdmi *hdmi = (struct stm_hdmi *)data;
unsigned long saveFlags;
int res;

  DPRINTK("Starting HDMI Thread for info = %p\n",hdmi);

  if((res = i2c_add_driver(&stmhdmi_driver)))
  {
    DPRINTK("Registering I2C driver failed\n");
    return 0;
  }

  /*
   * This does not probe the required I2C addresses to avoid confusing devices
   * with the SMBUS QUICK style transaction. Instead it creates I2C clients
   * assuming they exist, which as we only use them when a hotplug is detected
   * should be the case. Therefore we can do this once instead of connecting
   * and disconnecting on hotplug events.
   */
  stmhdmi_i2c_connect(hdmi);

  DPRINTK("Entering hotplug event loop\n");

  set_freezable();

  while(1)
  {
    stm_display_status_t current_status;

    if(wait_event_interruptible_timeout(hdmi->status_wait_queue,
                                        ((hdmi->status_changed != 0) || kthread_should_stop()),HZ/2))
    {
      if (try_to_freeze())
      {
        /*
         * Back around the loop, any pending work or kthread stop will get
         * picked up again immediately.
         */
        continue;
      }
    }

    mutex_lock(&(hdmi->lock));

    if(kthread_should_stop())
    {
      DPRINTK("HDMI Thread terminating for info = %p\n",hdmi);

      stm_display_output_stop(hdmi->hdmi_output);
      stmhdmi_i2c_disconnect(hdmi);
      i2c_del_driver(&stmhdmi_driver);

      mutex_unlock(&(hdmi->lock));

      return 0;
    }

    spin_lock_irqsave(&(hdmi->spinlock), saveFlags);

    /*
     * Handle the 1/2 second timeout to re-send the SPD info frame and
     * handle deferred disconnection after a HPD de-assert.
     */
    if(likely(hdmi->status_changed == 0))
    {
      spin_unlock_irqrestore(&(hdmi->spinlock), saveFlags);
      stmhdmi_handle_wait_queue_timeout(hdmi);
      mutex_unlock(&(hdmi->lock));
      continue;
    }

    /*
     * Handle a real HDMI state change
     */
    current_status = hdmi->status;
    hdmi->status_changed = 0;
    spin_unlock_irqrestore(&(hdmi->spinlock), saveFlags);

    DPRINTK("Handling HDMI State Change current_status = %d\n",current_status);

    switch(current_status)
    {
      case STM_DISPLAY_NEEDS_RESTART:
        stmhdmi_restart_display(hdmi);
        break;
      case STM_DISPLAY_DISCONNECTED:
        stmhdmi_disconnect_display(hdmi);
        break;
      case STM_DISPLAY_CONNECTED:
        stmhdmi_connect_display(hdmi);
        break;
    }

    DPRINTK("HDMI management loop finished\n");
    mutex_unlock(&(hdmi->lock));
  }
}