Esempio n. 1
1
static int balloon(void *_vballoon)
{
	struct virtio_balloon *vb = _vballoon;
	DEFINE_WAIT_FUNC(wait, woken_wake_function);

	set_freezable();
	while (!kthread_should_stop()) {
		s64 diff;

		try_to_freeze();

		add_wait_queue(&vb->config_change, &wait);
		for (;;) {
			if ((diff = towards_target(vb)) != 0 ||
			    vb->need_stats_update ||
			    kthread_should_stop() ||
			    freezing(current))
				break;
			wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
		}
		remove_wait_queue(&vb->config_change, &wait);

		if (vb->need_stats_update)
			stats_handle_request(vb);
		if (diff > 0)
			fill_balloon(vb, diff);
		else if (diff < 0)
			leak_balloon(vb, -diff);
		update_balloon_size(vb);

		/*
		 * For large balloon changes, we could spend a lot of time
		 * and always have work to do.  Be nice if preempt disabled.
		 */
		cond_resched();
	}
	return 0;
}
Esempio n. 2
0
static int
tfw_bmb_worker(void *data)
{
	int tn = (int)(long)data;
	TfwBmbTask *task = &bmb_task[tn];
	int attempt, send, k, i;
	unsigned long time_max;

	fuzz_init(&task->ctx, true);

	for (k = 0; k < niters; k++) {
		task->conn_attempt = 0;
		atomic_set(&task->conn_compl, 0);
		atomic_set(&task->conn_error, 0);
		atomic_set(&task->conn_rd_tail, 0);
		init_waitqueue_head(&task->conn_wq);

		for (i = 0; i < nconns; i++)
			tfw_bmb_connect(tn, i);

		set_freezable();
		time_max = jiffies + 60 * HZ;
		attempt = task->conn_attempt;
		do {
#define COND()	(atomic_read(&task->conn_compl) > 0 || \
		 atomic_read(&task->conn_error) == attempt)
			wait_event_freezable_timeout(task->conn_wq, COND(), HZ);
#undef COND
			if (atomic_read(&task->conn_compl) > 0)
				break;
			if (atomic_read(&task->conn_error) == attempt)
				goto release_sockets;
			if (jiffies > time_max) {
				TFW_ERR("worker exceeded maximum wait time\n");
				goto release_sockets;
			}
		} while (!kthread_should_stop());

		for (send = 0; send < nconns * nmessages; ) {
			int tail = atomic_read(&task->conn_rd_tail);
			for (i = 0; i < tail; i++){
				tfw_bmb_msg_send(tn, task->conn_rd[i]);
				send++;
			}
		}

release_sockets:
		atomic_add(attempt, &bmb_conn_attempt);
		atomic_add(atomic_read(&task->conn_compl), &bmb_conn_compl);
		atomic_add(atomic_read(&task->conn_error), &bmb_conn_error);

		tfw_bmb_release_sockets(tn);
	}

	task->task_struct = NULL;
	atomic_dec(&bmb_threads);
	wake_up(&bmb_task_wq);

	return 0;
}
/**
 * kthread_worker_fn - kthread function to process kthread_worker
 * @worker_ptr: pointer to initialized kthread_worker
 *
 * This function can be used as @threadfn to kthread_create() or
 * kthread_run() with @worker_ptr argument pointing to an initialized
 * kthread_worker.  The started kthread will process work_list until
 * the it is stopped with kthread_stop().  A kthread can also call
 * this function directly after extra initialization.
 *
 * Different kthreads can be used for the same kthread_worker as long
 * as there's only one kthread attached to it at any given time.  A
 * kthread_worker without an attached kthread simply collects queued
 * kthread_works.
 */
int kthread_worker_fn(void *worker_ptr)
{
	struct kthread_worker *worker = worker_ptr;
	struct kthread_work *work;

	WARN_ON(worker->task);
	worker->task = current;
	set_freezable();

repeat:
	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */

	if (kthread_should_stop()) {
		__set_current_state(TASK_RUNNING);
		spin_lock_irq(&worker->lock);
		worker->task = NULL;
		spin_unlock_irq(&worker->lock);
		return 0;
	}

	work = NULL;
	spin_lock_irq(&worker->lock);
	if (!list_empty(&worker->work_list)) {
		work = list_first_entry(&worker->work_list,
					struct kthread_work, node);
		list_del_init(&work->node);
	}
static int balloon(void *_vballoon)
{
	struct virtio_balloon *vb = _vballoon;

	set_freezable();
	while (!kthread_should_stop()) {
		s64 diff;

		try_to_freeze();
		wait_event_interruptible(vb->config_change,
					 (diff = towards_target(vb)) != 0
					 || vb->need_stats_update
					 || kthread_should_stop()
					 || freezing(current));
		if (vb->need_stats_update)
			stats_handle_request(vb);
		if (diff > 0)
			fill_balloon(vb, diff);
		else if (diff < 0)
			leak_balloon(vb, -diff);
		update_balloon_size(vb);

		/*
		 * For large balloon changes, we could spend a lot of time
		 * and always have work to do.  Be nice if preempt disabled.
		 */
		cond_resched();
	}
	return 0;
}
static int rts51x_scan_thread(void *__chip)
{
	struct rts51x_chip *chip = (struct rts51x_chip *)__chip;

	printk(KERN_DEBUG
	       "rts51x: device found at %d\n", chip->usb->pusb_dev->devnum);

	set_freezable();
	
	if (delay_use > 0) {
		printk(KERN_DEBUG "rts51x: waiting for device "
		       "to settle before scanning\n");
		wait_event_freezable_timeout(chip->usb->delay_wait,
					     test_bit(FLIDX_DONT_SCAN,
						      &chip->usb->dflags),
					     delay_use * HZ);
	}

	
	if (!test_bit(FLIDX_DONT_SCAN, &chip->usb->dflags)) {
		scsi_scan_host(rts51x_to_host(chip));
		printk(KERN_DEBUG "rts51x: device scan complete\n");

		
	}

	complete_and_exit(&chip->usb->scanning_done, 0);
}
Esempio n. 6
0
/*
 * This is the NFSv4 callback kernel thread.
 */
static int
nfs4_callback_svc(void *vrqstp)
{
    int err, preverr = 0;
    struct svc_rqst *rqstp = vrqstp;

    set_freezable();

    while (!kthread_should_stop()) {
        /*
         * Listen for a request on the socket
         */
        err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT);
        if (err == -EAGAIN || err == -EINTR) {
            preverr = err;
            continue;
        }
        if (err < 0) {
            if (err != preverr) {
                printk(KERN_WARNING "NFS: %s: unexpected error "
                       "from svc_recv (%d)\n", __func__, err);
                preverr = err;
            }
            schedule_timeout_uninterruptible(HZ);
            continue;
        }
        preverr = err;
        svc_process(rqstp);
    }
    return 0;
}
Esempio n. 7
0
static int test_func(void *data)
{
	struct test_thread_data *td = data;
	int ret;

	current->flags |= PF_MUTEX_TESTER;
	set_freezable();
	allow_signal(SIGHUP);

	for(;;) {

		set_current_state(TASK_INTERRUPTIBLE);

		if (td->opcode > 0) {
			set_current_state(TASK_RUNNING);
			ret = handle_op(td, 0);
			set_current_state(TASK_INTERRUPTIBLE);
			td->opcode = ret;
		}

		/* Wait for the next command to be executed */
		schedule();
		try_to_freeze();

		if (signal_pending(current))
			flush_signals(current);

		if(kthread_should_stop())
			break;
	}
	return 0;
}
Esempio n. 8
0
/*
 * The callback service for NFSv4.1 callbacks
 */
static int
nfs41_callback_svc(void *vrqstp)
{
	struct svc_rqst *rqstp = vrqstp;
	struct svc_serv *serv = rqstp->rq_server;
	struct rpc_rqst *req;
	int error;
	DEFINE_WAIT(wq);

	set_freezable();

	while (!kthread_freezable_should_stop(NULL)) {

		if (signal_pending(current))
			flush_signals(current);

		prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
		spin_lock_bh(&serv->sv_cb_lock);
		if (!list_empty(&serv->sv_cb_list)) {
			req = list_first_entry(&serv->sv_cb_list,
					struct rpc_rqst, rq_bc_list);
			list_del(&req->rq_bc_list);
			spin_unlock_bh(&serv->sv_cb_lock);
			finish_wait(&serv->sv_cb_waitq, &wq);
			dprintk("Invoking bc_svc_process()\n");
			error = bc_svc_process(serv, req, rqstp);
			dprintk("bc_svc_process() returned w/ error code= %d\n",
				error);
		} else {
Esempio n. 9
0
static int
kclient_thread_finish(void *data)
{
	int nattempt = atomic_read(&kclient_connect_nattempt);
	uint64_t time_max = (uint64_t)get_seconds() + KCLIENT_WAIT_MAX;

	set_freezable();
	do {
		long timeout = KCLIENT_WAIT_INTVL;
		int nerror = atomic_read(&kclient_connect_nerror);
		int ncomplete = atomic_read(&kclient_connect_ncomplete);

		if (ncomplete + nerror == nattempt) {
			break;
		}
		wait_event_freezable_timeout(kclient_finish_wq,
					     kthread_should_stop(),
					     timeout);
		if ((uint64_t)get_seconds() > time_max) {
			SS_ERR("%s exceeded maximum wait time of %d seconds\n",
				"kclient_thread_finish", KCLIENT_WAIT_MAX);
			break;
		}
	} while (!kthread_should_stop());

	kclient_release_sockets();
	kclient_finish_task = NULL;
	return 0;
}
Esempio n. 10
0
static int flush_delta_work(void *data)
{
	struct sb *sb = data;
	int err;

	set_freezable();

	/*
	 * Our parent may run at a different priority, just set us to normal
	 */
	set_user_nice(current, 0);

	while (!kthread_freezable_should_stop(NULL)) {
		if (test_bit(TUX3_COMMIT_PENDING_BIT, &sb->backend_state)) {
			clear_bit(TUX3_COMMIT_PENDING_BIT, &sb->backend_state);

			err = flush_delta(sb);
			/* FIXME: error handling */
		}

		set_current_state(TASK_INTERRUPTIBLE);
		if (!test_bit(TUX3_COMMIT_PENDING_BIT, &sb->backend_state) &&
		    !kthread_should_stop())
			schedule();
		__set_current_state(TASK_RUNNING);
	}

	return 0;
}
Esempio n. 11
0
static int pvr2_dvb_feed_func(struct pvr2_dvb_adapter *adap)
{
    int ret;
    unsigned int count;
    struct pvr2_buffer *bp;
    struct pvr2_stream *stream;

    pvr2_trace(PVR2_TRACE_DVB_FEED, "dvb feed thread started");
    set_freezable();

    stream = adap->channel.stream->stream;

    for (;;) {
        if (kthread_should_stop()) break;

        /* Not sure about this... */
        try_to_freeze();

        bp = pvr2_stream_get_ready_buffer(stream);
        if (bp != NULL) {
            count = pvr2_buffer_get_count(bp);
            if (count) {
                dvb_dmx_swfilter(
                    &adap->demux,
                    adap->buffer_storage[
                        pvr2_buffer_get_id(bp)],
                    count);
            } else {
                ret = pvr2_buffer_get_status(bp);
                if (ret < 0) break;
            }
            ret = pvr2_buffer_queue(bp);
            if (ret < 0) break;

            /* Since we know we did something to a buffer,
               just go back and try again.  No point in
               blocking unless we really ran out of
               buffers to process. */
            continue;
        }


        /* Wait until more buffers become available or we're
           told not to wait any longer. */
        ret = wait_event_interruptible(
            adap->buffer_wait_data,
            (pvr2_stream_get_ready_count(stream) > 0) ||
            kthread_should_stop());
        if (ret < 0) break;
    }

    /* If we get here and ret is < 0, then an error has occurred.
       Probably would be a good idea to communicate that to DVB core... */

    pvr2_trace(PVR2_TRACE_DVB_FEED, "dvb feed thread stopped");

    return 0;
}
static int pvr2_dvb_feed_func(struct pvr2_dvb_adapter *adap)
{
	int ret;
	unsigned int count;
	struct pvr2_buffer *bp;
	struct pvr2_stream *stream;

	pvr2_trace(PVR2_TRACE_DVB_FEED, "dvb feed thread started");
	set_freezable();

	stream = adap->channel.stream->stream;

	for (;;) {
		if (kthread_should_stop()) break;

		/*                        */
		try_to_freeze();

		bp = pvr2_stream_get_ready_buffer(stream);
		if (bp != NULL) {
			count = pvr2_buffer_get_count(bp);
			if (count) {
				dvb_dmx_swfilter(
					&adap->demux,
					adap->buffer_storage[
					    pvr2_buffer_get_id(bp)],
					count);
			} else {
				ret = pvr2_buffer_get_status(bp);
				if (ret < 0) break;
			}
			ret = pvr2_buffer_queue(bp);
			if (ret < 0) break;

			/*                                            
                                              
                                          
                          */
			continue;
		}


		/*                                                  
                                  */
		ret = wait_event_interruptible(
		    adap->buffer_wait_data,
		    (pvr2_stream_get_ready_count(stream) > 0) ||
		    kthread_should_stop());
		if (ret < 0) break;
	}

	/*                                                           
                                                                     */

	pvr2_trace(PVR2_TRACE_DVB_FEED, "dvb feed thread stopped");

	return 0;
}
Esempio n. 13
0
/*
 * This is the lockd kernel thread
 */
static int
lockd(void *vrqstp)
{
	int		err = 0;
	struct svc_rqst *rqstp = vrqstp;
	struct net *net = &init_net;
	struct lockd_net *ln = net_generic(net, lockd_net_id);

	/* try_to_freeze() is called from svc_recv() */
	set_freezable();

	/* Allow SIGKILL to tell lockd to drop all of its locks */
	allow_signal(SIGKILL);

	dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");

	/*
	 * The main request loop. We don't terminate until the last
	 * NFS mount or NFS daemon has gone away.
	 */
	while (!kthread_should_stop()) {
		long timeout = MAX_SCHEDULE_TIMEOUT;
		RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);

		/* update sv_maxconn if it has changed */
		rqstp->rq_server->sv_maxconn = nlm_max_connections;

		if (signalled()) {
			flush_signals(current);
			restart_grace();
			continue;
		}

		timeout = nlmsvc_retry_blocked();

		/*
		 * Find a socket with data available and call its
		 * recvfrom routine.
		 */
		err = svc_recv(rqstp, timeout);
		if (err == -EAGAIN || err == -EINTR)
			continue;
		dprintk("lockd: request from %s\n",
				svc_print_addr(rqstp, buf, sizeof(buf)));

		svc_process(rqstp);
	}
	flush_signals(current);
	if (nlmsvc_ops)
		nlmsvc_invalidate_all();
	nlm_shutdown_hosts();
	cancel_delayed_work_sync(&ln->grace_period_end);
	locks_end_grace(&ln->lockd_manager);
	return 0;
}
Esempio n. 14
0
static int kopald(void *unused)
{
	set_freezable();
	do {
		try_to_freeze();
		opal_poll_events(NULL);
		msleep_interruptible(opal_heartbeat);
	} while (!kthread_should_stop());

	return 0;
}
Esempio n. 15
0
static int tps65090_charger_poll_task(void *data)
{
	set_freezable();

	while (!kthread_should_stop()) {
		schedule_timeout_interruptible(POLL_INTERVAL);
		try_to_freeze();
		tps65090_charger_isr(-1, data);
	}
	return 0;
}
int update_counter_thread(void *data)
{
    TZ_RESULT ret;
    KREE_SESSION_HANDLE icnt_session;
    uint32_t result;
    uint32_t a, b, rate;
    uint32_t nsec = THREAD_COUNT_FREQ;

    ret = KREE_CreateSession(TZ_TA_ICNT_UUID, &icnt_session);
    if (ret != TZ_RESULT_SUCCESS)
    {
        printk("CreateSession error %d\n", ret);
        return 1;
    }

    result = TEECK_Icnt_Rate(icnt_session, &rate);
    if (result == TZ_RESULT_SUCCESS)
    {
	//printk("(yjdbg) rate: %d\n", rate);
	nsec = (0xffffffff / rate);
	nsec -= 600;
	//printk("(yjdbg) rate: %d\n", nsec);
    }

    set_freezable();

    for (;;) {
	if (kthread_should_stop())
	    break;

	if (try_to_freeze())
	    continue;

	result = TEECK_Icnt_Counter(icnt_session, &a, &b);
	if (result == TZ_RESULT_SUCCESS)
	{
	    //printk("(yjdbg) tz_test TZCMD_ICNT_COUNT: 0x%x, 0x%x\n", a, b);
	}

	schedule_timeout_interruptible(HZ * nsec);
    }

    ret = KREE_CloseSession(icnt_session);
    if (ret != TZ_RESULT_SUCCESS)
    {
        printk("CloseSession error %d\n", ret);    
	return 1;
    }

    return 0;
}
static int videobuf_dvb_thread(void *data)
{
	struct videobuf_dvb *dvb = data;
	struct videobuf_buffer *buf;
	unsigned long flags;
	int err;
	void *outp;

	dprintk("dvb thread started\n");
	set_freezable();
	videobuf_read_start(&dvb->dvbq);

	for (;;) {
		/* fetch next buffer */
		buf = list_entry(dvb->dvbq.stream.next,
				 struct videobuf_buffer, stream);
		list_del(&buf->stream);
		err = videobuf_waiton(buf,0,1);

		/* no more feeds left or stop_feed() asked us to quit */
		if (0 == dvb->nfeeds)
			break;
		if (kthread_should_stop())
			break;
		try_to_freeze();

		/* feed buffer data to demux */
		outp = videobuf_queue_to_vmalloc (&dvb->dvbq, buf);

		if (buf->state == VIDEOBUF_DONE)
			dvb_dmx_swfilter(&dvb->demux, outp,
					 buf->size);

		/* requeue buffer */
		list_add_tail(&buf->stream,&dvb->dvbq.stream);
		spin_lock_irqsave(dvb->dvbq.irqlock,flags);
		dvb->dvbq.ops->buf_queue(&dvb->dvbq,buf);
		spin_unlock_irqrestore(dvb->dvbq.irqlock,flags);
	}

	videobuf_read_stop(&dvb->dvbq);
	dprintk("dvb thread stopped\n");

	/* Hmm, linux becomes *very* unhappy without this ... */
	while (!kthread_should_stop()) {
		set_current_state(TASK_INTERRUPTIBLE);
		schedule();
	}
	return 0;
}
Esempio n. 18
0
/**
 * ubifs_bg_thread - UBIFS background thread function.
 * @info: points to the file-system description object
 *
 * This function implements various file-system background activities:
 * o when a write-buffer timer expires it synchronizes the appropriate
 *   write-buffer;
 * o when the journal is about to be full, it starts in-advance commit.
 *
 * Note, other stuff like background garbage collection may be added here in
 * future.
 */
int ubifs_bg_thread(void *info)
{
    int err;
    struct ubifs_info *c = info;

    ubifs_msg("background thread \"%s\" started, PID %d", c->vi.ubi_num,
              c->bgt_name, current->pid);
    set_freezable();

    while (1) {
        if (kthread_should_stop())
            break;

        if (try_to_freeze())
            continue;

        set_current_state(TASK_INTERRUPTIBLE);
        /* Check if there is something to do */
        if (!c->need_bgt) {
            /*
             * Nothing prevents us from going sleep now and
             * be never woken up and block the task which
             * could wait in 'kthread_stop()' forever.
             */
            if (kthread_should_stop())
                break;
            schedule();
            continue;
        } else
            __set_current_state(TASK_RUNNING);

        c->need_bgt = 0;
        err = ubifs_bg_wbufs_sync(c);
        if (err)
            ubifs_ro_mode(c, err);

        run_bg_commit(c);
        cond_resched();
    }

    ubifs_msg("background thread \"%s\" stops", c->vi.ubi_num,
              c->bgt_name);
    return 0;
}
Esempio n. 19
0
/*
 * This is the NFSv4 callback kernel thread.
 */
static int
nfs4_callback_svc(void *vrqstp)
{
	int err;
	struct svc_rqst *rqstp = vrqstp;

	set_freezable();

	while (!kthread_should_stop()) {
		/*
		 * Listen for a request on the socket
		 */
		err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT);
		if (err == -EAGAIN || err == -EINTR)
			continue;
		svc_process(rqstp);
	}
	return 0;
}
/*
 * Thread to send no-op commands to EC
 */
static void
NvEcPrivPingThread(void *args)
{
	NvError NvStatus = NvError_Success;
	NvEcRequest req;
	NvEcResponse resp;
	NvEcPrivState *ec = (NvEcPrivState *)args;
//Nvidia_patch_ for_ deviceLockup_and_audio_lost_issue[START]
	//set_freezable_with_signal();
	  set_freezable();
//Nvidia_patch_ for_ deviceLockup_and_audio_lost_issue[END]


	for (;;) {
	NvOsSemaphoreWait(ec->hPingSema);
	if (ec->exitPingThread)
		break;

	// send no-op commands
	DISP_MESSAGE(("NvEcPrivPingThread: Sending no-op command\n"));
	req.PacketType = NvEcPacketType_Request;
	req.RequestType = NvEcRequestResponseType_Control;
	req.RequestSubtype = (NvEcRequestResponseSubtype)
		NvEcControlSubtype_NoOperation;
	req.NumPayloadBytes = 0;

	NvStatus = NvEcSendRequest(
			ec->hEc,
			&req,
			&resp,
			sizeof(req),
			sizeof(resp));
	if (NvStatus != NvError_Success)
	DISP_MESSAGE(("NvEcPrivPingThread: no-op command send fail\n"));

	if (resp.Status != NvEcStatus_Success)
	DISP_MESSAGE(("NvEcPrivPingThread: no-op command fail\n"));

	DISP_MESSAGE(("NvEcPrivPingThread: no-op command sent\n"));
	ec->IsEcActive = NV_FALSE;
	}
}
Esempio n. 21
0
static int solo_thread(void *data)
{
	struct solo_dev *solo_dev = data;
	DECLARE_WAITQUEUE(wait, current);

	set_freezable();
	add_wait_queue(&solo_dev->disp_thread_wait, &wait);

	for (;;) {
		long timeout = schedule_timeout_interruptible(HZ);
		if (timeout == -ERESTARTSYS || kthread_should_stop())
			break;
		solo_thread_try(solo_dev);
		try_to_freeze();
	}

	remove_wait_queue(&solo_dev->disp_thread_wait, &wait);

	return 0;
}
Esempio n. 22
0
static int oom_reaper(void *unused)
{
	set_freezable();

	while (true) {
		struct task_struct *tsk = NULL;

		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
		spin_lock(&oom_reaper_lock);
		if (oom_reaper_list != NULL) {
			tsk = oom_reaper_list;
			oom_reaper_list = tsk->oom_reaper_list;
		}
		spin_unlock(&oom_reaper_lock);

		if (tsk)
			oom_reap_task(tsk);
	}

	return 0;
}
Esempio n. 23
0
static int hba_house_keeper(void *data)
{
	set_user_nice(current, -15);

	set_current_state(TASK_INTERRUPTIBLE);
#if LINUX_VERSION_CODE >  KERNEL_VERSION(2, 6, 22)
	set_freezable();
#endif
	while (!kthread_should_stop()) {
		try_to_freeze();
		if (!hba_msg_queue_empty() &&
		    MSG_QUEUE_IDLE == queue_state_get()) {
			set_current_state(TASK_RUNNING);
			mv_proc_queue();
		} else {
			schedule();
			set_current_state(TASK_INTERRUPTIBLE);
		}
	}

	return 0;
}
Esempio n. 24
0
/**
 * Slab mover thread.
 * Sits waiting for a condition to jump off and shovel some memory about
 */
static int mc_slab_rebalance(void *ignore)
{
	int was_busy = 0;

	set_freezable();
	while (1) {
		wait_event_freezable(slab_rebal.wq,
				     slab_rebal.signal ||
				     kthread_should_stop());

		if (kthread_should_stop())
			break;
		mutex_lock(&slab_rebal.lock);
		if (slab_rebal.signal == 1) {
			if (mc_slab_rebalance_start() < 0) {
				/* Handle errors with more specifity as required. */
				slab_rebal.signal = 0;
			}
			was_busy = 0;
		} else if (slab_rebal.signal &&
			   slab_rebal.slab_start) {
			was_busy = mc_slab_rebalance_move();
		}

		if (slab_rebal.done) {
			mc_slab_rebalance_finish();
		} else if (was_busy) {
			/*
			 * Stuck waiting for some items to unlock, so slow down
			 * a bit to give them a change to free up.
			 */
			msleep(1);
		}
		mutex_unlock(&slab_rebal.lock);
	}

	return 0;
}
Esempio n. 25
0
static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;

	current->flags |= PF_MEMALLOC;
	set_freezable();

	down(&mq->thread_sem);
	do {
		struct request *req = NULL;

		try_to_freeze();

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
		req = blk_fetch_request(q);
		mq->req = req;
		spin_unlock_irq(q->queue_lock);

		if (!req) {
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
				break;
			}
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
			continue;
		}
		set_current_state(TASK_RUNNING);

		mq->issue_fn(mq, req);
	} while (1);
	up(&mq->thread_sem);

	return 0;
}
static int do_compcache(void * nothing)
{
	int ret;
	set_freezable();

	for ( ; ; ) {
		ret = try_to_freeze();
		if (kthread_should_stop())
			break;

		if (atomic_read(&s_reclaim.kcompcached_running) == 1) {
			if (rtcc_reclaim_pages(number_of_reclaim_pages) < minimum_reclaim_pages)
				cancel_soft_reclaim();

			atomic_set(&s_reclaim.kcompcached_running, 0);
		}

		set_current_state(TASK_INTERRUPTIBLE);
		schedule();
	}

	return 0;
}
Esempio n. 27
0
static int tegra_cpufreq_dfsd(void *arg)
{
	unsigned long rate, last_rate;
	NvRmPmRequest req = 0;

	BUG_ON(!clk_cpu);

	preset_lpj = loops_per_jiffy;
	rate = clk_get_rate(clk_cpu);
	last_rate = rate;

	NvRmDfsSetState(rm_cpufreq, NvRmDfsRunState_ClosedLoop);
	set_freezable();

	while (!kthread_should_stop() && !(req & NvRmPmRequest_ExitFlag)) {

		req = NvRmPrivPmThread();

		if (try_to_freeze())
			continue;

		tegra_cpufreq_hotplug(req);

#ifdef CONFIG_USE_ARM_TWD_PRESCALER
		rate = clk_get_rate(clk_cpu);
		if (rate != last_rate) {
			local_timer_rescale(rate / 1000);
			smp_wmb();
			on_each_cpu(twd_set_prescaler, NULL, true);
			last_rate = rate;
		}
#endif
	}
	pr_info("dvfs thead shutdown\n");

	return 0;
}
Esempio n. 28
0
/*
 * This is the callback kernel thread.
 */
static int
nfs_callback_svc(void *vrqstp)
{
	int err, preverr = 0;
	struct svc_rqst *rqstp = vrqstp;

	set_freezable();

	/*
	 * FIXME: do we really need to run this under the BKL? If so, please
	 * add a comment about what it's intended to protect.
	 */
	lock_kernel();
	while (!kthread_should_stop()) {
		/*
		 * Listen for a request on the socket
		 */
		err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT);
		if (err == -EAGAIN || err == -EINTR) {
			preverr = err;
			continue;
		}
		if (err < 0) {
			if (err != preverr) {
				printk(KERN_WARNING "%s: unexpected error "
					"from svc_recv (%d)\n", __func__, err);
				preverr = err;
			}
			schedule_timeout_uninterruptible(HZ);
			continue;
		}
		preverr = err;
		svc_process(rqstp);
	}
	unlock_kernel();
	return 0;
}
Esempio n. 29
0
/*
 * This is the NFSv4 callback kernel thread.
 */
static int
nfs4_callback_svc(void *vrqstp)
{
	int err;
	struct svc_rqst *rqstp = vrqstp;

	set_freezable();

	while (!kthread_freezable_should_stop(NULL)) {

		if (signal_pending(current))
			flush_signals(current);
		/*
		 * Listen for a request on the socket
		 */
		err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT);
		if (err == -EAGAIN || err == -EINTR)
			continue;
		svc_process(rqstp);
	}
	svc_exit_thread(rqstp);
	module_put_and_exit(0);
	return 0;
}
Esempio n. 30
0
static int balloon(void *_vballoon)
{
	struct virtio_balloon *vb = _vballoon;

	set_freezable();
	while (!kthread_should_stop()) {
		s64 diff;

		try_to_freeze();
		wait_event_interruptible(vb->config_change,
					 (diff = towards_target(vb)) != 0
					 || vb->need_stats_update
					 || kthread_should_stop()
					 || freezing(current));
		if (vb->need_stats_update)
			stats_handle_request(vb);
		if (diff > 0)
			fill_balloon(vb, diff);
		else if (diff < 0)
			leak_balloon(vb, -diff);
		update_balloon_size(vb);
	}
	return 0;
}