Ejemplo n.º 1
0
/*
 * ---------------------------------------------------------------------------
 *  uf_ta_sample_ind_wq
 *
 *      Deferred work queue function to send Traffic Analysis sample
 *      indications to the SME.
 *      These are done in a deferred work queue for two reasons:
 *       - the CsrWifiRouterCtrl...Send() functions are not safe for atomic context
 *       - we want to load the main driver data path as lightly as possible
 *
 *      The TA classifications already come from a workqueue.
 *
 *  Arguments:
 *      work    Pointer to work queue item.
 *
 *  Returns:
 *      None.
 * ---------------------------------------------------------------------------
 */
    void
uf_ta_sample_ind_wq(struct work_struct *work)
{
    struct ta_sample_ind *ind = container_of(work, struct ta_sample_ind, task);
    unifi_priv_t *priv = container_of(ind, unifi_priv_t, ta_sample_ind_work);
    u16 interfaceTag = 0;

     unifi_trace(priv, UDBG5, "rxtcp %d txtcp %d rxudp %d txudp %d prio %d\n",
        priv->rxTcpThroughput,
        priv->txTcpThroughput,
        priv->rxUdpThroughput,
        priv->txUdpThroughput,
        priv->bh_thread.prio);

    if(priv->rxTcpThroughput > 1000)
    {
        if (bh_priority == -1 && priv->bh_thread.prio != 1)
        {
            struct sched_param param;
            priv->bh_thread.prio = 1;
            unifi_trace(priv, UDBG1, "%s new thread (RT) priority = %d\n",
                        priv->bh_thread.name, priv->bh_thread.prio);
            param.sched_priority = priv->bh_thread.prio;
            sched_setscheduler(priv->bh_thread.thread_task, SCHED_FIFO, &param);
        }
    } else
    {
        if (bh_priority == -1 && priv->bh_thread.prio != DEFAULT_PRIO)
        {
            struct sched_param param;
            param.sched_priority = 0;
            sched_setscheduler(priv->bh_thread.thread_task, SCHED_NORMAL, &param);
            priv->bh_thread.prio = DEFAULT_PRIO;
            unifi_trace(priv, UDBG1, "%s new thread priority = %d\n",
                        priv->bh_thread.name, priv->bh_thread.prio);
            set_user_nice(priv->bh_thread.thread_task, PRIO_TO_NICE(priv->bh_thread.prio));
        }
    }

    CsrWifiRouterCtrlTrafficSampleIndSend(priv->CSR_WIFI_SME_IFACEQUEUE, 0, interfaceTag, ind->stats);

    ind->in_use = 0;

} /* uf_ta_sample_ind_wq() */
Ejemplo n.º 2
0
VCOS_STATUS_T vcos_thread_create(VCOS_THREAD_T *thread,
                                 const char *name,
                                 VCOS_THREAD_ATTR_T *attrs,
                                 VCOS_THREAD_ENTRY_FN_T entry,
                                 void *arg)
{
   VCOS_STATUS_T st;
   struct task_struct *kthread;

   memset(thread, 0, sizeof(*thread));
   thread->magic     = VCOS_THREAD_MAGIC;
   strlcpy( thread->name, name, sizeof( thread->name ));
   thread->legacy    = attrs ? attrs->legacy : 0;
   thread->entry = entry;
   thread->arg = arg;

   if (!name)
   {
      vcos_assert(0);
      return VCOS_EINVAL;
   }

   st = vcos_semaphore_create(&thread->wait, NULL, 0);
   if (st != VCOS_SUCCESS)
   {
      return st;
   }

   st = vcos_semaphore_create(&thread->suspend, NULL, 0);
   if (st != VCOS_SUCCESS)
   {
      return st;
   }

   /*required for event groups */
   vcos_timer_create(&thread->_timer.timer, thread->name, NULL, NULL);

   kthread = kthread_create((int (*)(void *))vcos_thread_wrapper, (void*)thread, name);
   vcos_assert(kthread != NULL);
   set_user_nice(kthread, attrs->ta_priority);
   thread->thread.thread = kthread;
   wake_up_process(kthread);
   return VCOS_SUCCESS;
}
Ejemplo n.º 3
0
static int kaio_fsync_thread(void * data)
{
    struct ploop_io * io = data;
    struct ploop_device * plo = io->plo;

    set_user_nice(current, -20);

    spin_lock_irq(&plo->lock);
    while (!kthread_should_stop() || !list_empty(&io->fsync_queue)) {
        int err;
        struct ploop_request * preq;

        DEFINE_WAIT(_wait);
        for (;;) {
            prepare_to_wait(&io->fsync_waitq, &_wait, TASK_INTERRUPTIBLE);
            if (!list_empty(&io->fsync_queue) ||
                    kthread_should_stop())
                break;

            spin_unlock_irq(&plo->lock);
            schedule();
            spin_lock_irq(&plo->lock);
        }
        finish_wait(&io->fsync_waitq, &_wait);

        if (list_empty(&io->fsync_queue) && kthread_should_stop())
            break;

        preq = list_entry(io->fsync_queue.next, struct ploop_request, list);
        list_del(&preq->list);
        io->fsync_qlen--;
        if (!preq->prealloc_size)
            plo->st.bio_fsync++;
        spin_unlock_irq(&plo->lock);

        /* trick: preq->prealloc_size is actually new pos of eof */
        if (preq->prealloc_size) {
            err = kaio_truncate(io, io->files.file,
                                preq->prealloc_size >> (plo->cluster_log + 9));
            if (err)
                PLOOP_REQ_SET_ERROR(preq, -EIO);
        } else {
Ejemplo n.º 4
0
static int
thread_generic_wrapper(void *arg)
{
	thread_priv_t *tp = (thread_priv_t *)arg;
	void (*func)(void *);
	void *args;

	ASSERT(tp->tp_magic == TP_MAGIC);
	func = tp->tp_func;
	args = tp->tp_args;
	set_current_state(tp->tp_state);
	set_user_nice((kthread_t *)current, PRIO_TO_NICE(tp->tp_pri));
	kmem_free(tp->tp_name, tp->tp_name_size);
	kmem_free(tp, sizeof(thread_priv_t));

	if (func)
		func(args);

	return 0;
}
Ejemplo n.º 5
0
static int __init rtcc_init(void)
{
	krtccd = kthread_run(rtcc_thread, NULL, "krtccd");
	if (IS_ERR(krtccd)) {
		/* Failure at boot is fatal */
		BUG_ON(system_state == SYSTEM_BOOTING);
	}

	set_user_nice(krtccd, 5);

	atomic_set(&krtccd_enabled, 0);
	atomic_set(&need_to_reclaim, 1);
	atomic_set(&krtccd_running, 0);

#ifndef CONFIG_KSM_ANDROID
	idle_notifier_register(&rtcc_idle_nb);
#endif

	return 0;
}
Ejemplo n.º 6
0
static int ksoftirqd(void * __bind_cpu)
{
	set_user_nice(current, 19);
	current->flags |= PF_NOFREEZE;

	set_current_state(TASK_INTERRUPTIBLE);

	while (!kthread_should_stop()) {
		if (!local_softirq_pending())
			schedule();

		__set_current_state(TASK_RUNNING);

		while (local_softirq_pending()) {
			/* Preempt disable stops cpu going offline.
			   If already offline, we'll be on wrong CPU:
			   don't process */
			preempt_disable();
			if (cpu_is_offline((long)__bind_cpu))
				goto wait_to_die;
			do_softirq();
			preempt_enable();
			cond_resched();
		}

		set_current_state(TASK_INTERRUPTIBLE);
	}
	__set_current_state(TASK_RUNNING);
	return 0;

wait_to_die:
	preempt_enable();
	/* Wait for kthread_stop */
	set_current_state(TASK_INTERRUPTIBLE);
	while (!kthread_should_stop()) {
		schedule();
		set_current_state(TASK_INTERRUPTIBLE);
	}
	__set_current_state(TASK_RUNNING);
	return 0;
}
/**
 * wlan_logging_thread() - The WLAN Logger thread
 * @Arg - pointer to the HDD context
 *
 * This thread logs log message to App registered for the logs.
 */
static int wlan_logging_thread(void *Arg)
{
	int ret_wait_status = 0;

	set_user_nice(current, -2);

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	daemonize("wlan_logging_thread");
#endif

	while (!gwlan_logging.exit) {
		ret_wait_status = wait_event_interruptible(
		    gwlan_logging.wait_queue,
		    (!list_empty(&gwlan_logging.filled_list)
		  || gwlan_logging.exit));

		if (ret_wait_status == -ERESTARTSYS) {
			pr_err("%s: wait_event_interruptible returned -ERESTARTSYS",
				__func__);
			break;
		}

		if (gwlan_logging.exit) {
			pr_err("%s: Exiting the thread\n", __func__);
			break;
		}

		if (INVALID_PID == gapp_pid) {
			pr_err("%s: Invalid PID\n", __func__);
			continue;
		}

		send_filled_buffers_to_user();
	}

	complete_and_exit(&gwlan_logging.shutdown_comp, 0);

	pr_info("%s: Terminating\n", __func__);

	return 0;
}
Ejemplo n.º 8
0
static int ksoftirqd(void * __bind_cpu)
{
	daemonize("ksoftirqd/%d", (int) (long) __bind_cpu);
	set_user_nice(current, 19);

	__set_current_state(TASK_INTERRUPTIBLE);
	mb();
	__get_cpu_var(ksoftirqd) = current;

	for (;;) {
		if (!local_softirq_pending())
			schedule();
		__set_current_state(TASK_RUNNING);
		while (local_softirq_pending()) {
			do_softirq();
			if(need_resched())
				schedule();
		}
		__set_current_state(TASK_INTERRUPTIBLE);
	}
}
Ejemplo n.º 9
0
/*
 * This is the task which runs the usermode application
 */
static int ____call_usermodehelper(void *data)
{
	struct subprocess_info *sub_info = data;
	int retval;

	//TODO:RAWLINSON
	struct cpumask cpu_padrao = cpumask_of_cpu(CPUID_PADRAO);

	spin_lock_irq(&current->sighand->siglock);
	flush_signal_handlers(current, 1);
	spin_unlock_irq(&current->sighand->siglock);

	/* We can run anywhere, unlike our parent keventd(). */
	//TODO:RAWLINSON
	//set_cpus_allowed_ptr(current, cpu_all_mask); //TODO:RAWLINSON - CODIGO ORIGINAL...
	current->cpus_allowed = cpu_padrao;
	set_cpus_allowed_ptr(current, &cpu_padrao);

	/*
	 * Our parent is keventd, which runs with elevated scheduling priority.
	 * Avoid propagating that into the userspace child.
	 */
	set_user_nice(current, 0);

	if (sub_info->init) {
		retval = sub_info->init(sub_info);
		if (retval)
			goto fail;
	}

	retval = kernel_execve(sub_info->path,
			       (const char *const *)sub_info->argv,
			       (const char *const *)sub_info->envp);

	/* Exec failed? */
fail:
	sub_info->retval = retval;
	do_exit(0);
}
Ejemplo n.º 10
0
/*
 * Processing thread.
 * Currently one thread per tpg.
 */
int ft_thread(void *arg)
{
	struct ft_tpg *tpg = arg;
	struct se_queue_obj *qobj = &tpg->qobj;
	struct ft_cmd *cmd;
	int ret;

	set_user_nice(current, -20);

	while (!kthread_should_stop()) {
		ret = wait_event_interruptible(qobj->thread_wq,
			atomic_read(&qobj->queue_cnt) || kthread_should_stop());
		if (ret < 0 || kthread_should_stop())
			goto out;
		cmd = ft_dequeue_cmd(qobj);
		if (cmd)
			ft_exec_req(cmd);
	}

out:
	return 0;
}
Ejemplo n.º 11
0
static int hba_house_keeper(void *data)
{
	set_user_nice(current, -15);

	set_current_state(TASK_INTERRUPTIBLE);
#if LINUX_VERSION_CODE >  KERNEL_VERSION(2, 6, 22)
	set_freezable();
#endif
	while (!kthread_should_stop()) {
		try_to_freeze();
		if (!hba_msg_queue_empty() &&
		    MSG_QUEUE_IDLE == queue_state_get()) {
			set_current_state(TASK_RUNNING);
			mv_proc_queue();
		} else {
			schedule();
			set_current_state(TASK_INTERRUPTIBLE);
		}
	}

	return 0;
}
Ejemplo n.º 12
0
static void _mali_osk_wq_work_func(struct work_struct *work)
{
	mali_osk_wq_work_object_t *work_object;

	work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_work_object_t, work_handle);

#if MALI_LICENSE_IS_GPL
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
	/* We want highest Dynamic priority of the thread so that the Jobs depending
	** on this thread could be scheduled in time. Without this, this thread might
	** sometimes need to wait for some threads in user mode to finish its round-robin
	** time, causing *bubble* in the Mali pipeline. Thanks to the new implementation
	** of high-priority workqueue in new kernel, this only happens in older kernel.
	*/
	if (MALI_TRUE == work_object->high_pri) {
		set_user_nice(current, -19);
	}
#endif
#endif /* MALI_LICENSE_IS_GPL */

	work_object->handler(work_object->data);
}
Ejemplo n.º 13
0
/* Create a batch task with the specified callback and 
 * nice value of niceval, which determines its priority.
 */
void linsched_create_batch_task(void (*callback)(void), int niceval)
{
	struct sched_param params;

	/* If we cannot support any more tasks, return. */
	if (curr_task_id >= LINSCHED_MAX_TASKS)
		return;

	/* Create "batch" task and set its nice value. */
	__linsched_tasks[curr_task_id] = __linsched_create_task(callback);
	params.sched_priority = 0;
	sys_sched_setscheduler(__linsched_tasks[curr_task_id], SCHED_BATCH,
			       &params);
	set_user_nice(__linsched_tasks[curr_task_id], niceval);

	/* Print message. */
	printf("Created batch task 0x%x with nice value %d.\n",
	       (unsigned int)__linsched_tasks[curr_task_id], niceval);

	/* Increment task id. */
	curr_task_id++;
}
Ejemplo n.º 14
0
/* NEW VERSION
 */
void linsched_create_normal_task_binary(struct elf_binary* binary, int niceval)
{
        struct sched_param params;
        unsigned long long time_slice;
        int query[3];
	
	query[0] = binary->size;
	query[1] = binary->bss;
	query[2] = binary->input_size; 
	/* If we cannot support any more tasks, return. */
        if (curr_task_id >= LINSCHED_MAX_TASKS)
                return;
	time_start = sched_clock();
		
	sim_param = (query[0]+query[1]+query[2])/(30*2);	
	time_slice = knn(&query[0], 3);
	time_slice *= 1000000;
	//time_slice = 500000000;
        /* Create "normal" task and set its nice value. */
        __linsched_tasks[curr_task_id] = __linsched_create_task_binary(binary->callback, time_slice);
        params.sched_priority = 0;
        


	sys_sched_setscheduler(__linsched_tasks[curr_task_id], SCHED_NORMAL,
                               &params);
        set_user_nice(__linsched_tasks[curr_task_id], niceval);

        /* Print message. */
        printf("Created normal task 0x%x with nice value %d.\n",
               (unsigned int)__linsched_tasks[curr_task_id], niceval);

        /* Increment task id. */
        curr_task_id++;



}
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//AudioCtrlWorkThread
//	Worker thread, it query KFIFO for operation message and call HAL_AudioProcess.
//----------------------------------------------------------------
static void AudioCtrlWorkThread(struct work_struct *work)
{
	TMsgAudioCtrl	msgAudioCtrl;
	unsigned int len = 0;

    set_user_nice(current, -16);


	while(1)
	{
		//get operation code from fifo
		len = kfifo_out_locked(&sgThreadData.m_pkfifo, (unsigned char *)&msgAudioCtrl, sizeof(TMsgAudioCtrl), &sgThreadData.m_lock);
		if( (len != sizeof(TMsgAudioCtrl)) && (len!=0) )
			DEBUG("Error AUDIO_Ctrl len=%d expected %d in=%d, out=%d\n", len, sizeof(TMsgAudioCtrl), sgThreadData.m_pkfifo.in, sgThreadData.m_pkfifo.out);
		if(len == 0) //FIFO empty sleep
			return;
		
		//process the operation
		AUDIO_Ctrl_Process(msgAudioCtrl.action_code, &msgAudioCtrl.param, msgAudioCtrl.pCallBack,msgAudioCtrl.block);
	}

	return;
}
Ejemplo n.º 16
0
asmlinkage long sys_setpriority(int which, int who, int niceval)
{
	struct task_struct *p;
	int error;

	if (which > 2 || which < 0)
		return -EINVAL;

	/* normalize: avoid signed division (rounding problems) */
	error = -ESRCH;
	if (niceval < -20)
		niceval = -20;
	if (niceval > 19)
		niceval = 19;

	read_lock(&tasklist_lock);
	for_each_task(p) {
		if (!proc_sel(p, which, who))
			continue;
		if (p->uid != current->euid &&
			p->uid != current->uid && !capable(CAP_SYS_NICE)) {
			error = -EPERM;
			continue;
		}
        if (error == -ESRCH)
			error = 0;
		if (niceval < task_nice(p) && !capable(CAP_SYS_NICE))
			error = -EACCES;
		else {
            
            set_user_nice(p, niceval);
        }
	}
	read_unlock(&tasklist_lock);

	return error;
}
Ejemplo n.º 17
0
void ipcs_intr_workqueue_process(struct work_struct *work)
{
    static int first = 1;
    if (first)
    {
        first = 0;
        set_user_nice(current, -16);
    }


   if(IpcCPCrashCheck())
   {
		cp_crashed  = 1;
		if( BCMLOG_CPCRASH_MTD == BCMLOG_GetCpCrashDumpDevice() )
		{
		/* we kill AP when CP crashes */
			IPC_DEBUG(DBG_INFO, "Crashing AP now...\n\n");
#if defined(CONFIG_SEC_DEBUG)
    			cp_abort();
#else
    			abort();
#endif
		} else 
		{
		schedule_work(&g_ipc_info.cp_crash_dump_wq);
	}

		IPC_ProcessEvents();
	}else
	{
       IPC_ProcessEvents();  
#ifdef CONFIG_HAS_WAKELOCK 
       wake_unlock(&ipc_wake_lock);
#endif // CONFIG_HAS_WAKELOCK
   }
}
Ejemplo n.º 18
0
static int set_one_prio(struct task_struct *p, int niceval, int error)
{
	int no_nice;

	if (p->uid != current->euid &&
		p->euid != current->euid && !capable(CAP_SYS_NICE)) {
		error = -EPERM;
		goto out;
	}
	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
		error = -EACCES;
		goto out;
	}
	no_nice = security_task_setnice(p, niceval);
	if (no_nice) {
		error = no_nice;
		goto out;
	}
	if (error == -ESRCH)
		error = 0;
	set_user_nice(p, niceval);
out:
	return error;
}
Ejemplo n.º 19
0
int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);
	set_user_nice(tsk, KTHREAD_NICE_LEVEL);
	set_cpus_allowed_ptr(tsk, CPU_MASK_ALL_PTR);

	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}
Ejemplo n.º 20
0
static int privateRecvRawThread(void *pPtr)
{
  raw_socket *pRaw = (raw_socket *)pPtr;
  struct sk_buff *pSkb = NULL;
  KLEM_RAW_HEADER *pHdr = NULL;
  unsigned int uHdrLen = sizeof(KLEM_RAW_HEADER);

  set_user_nice(current, -20);

  while ((false == kthread_should_stop()) &&
         (false != pRaw->bConnected)) {
    /* Wait until we have a packet */
    privRecvWait(pRaw);

    if (true == pRaw->bConnected) {
      /* Get the packet. */
      pSkb = skb_dequeue(&pRaw->pSocket->sk->sk_receive_queue);

      if (NULL != pSkb) {
        pHdr = (KLEM_RAW_HEADER *)pSkb->data;

        if (LEMU == pRaw->pData->eMode) {
          if (pSkb->len >= uHdrLen) {
            /* Lets examine the incomming header */
            pHdr = (KLEM_RAW_HEADER *)pSkb->data;

            if (pRaw->uProtocol == ntohs(pHdr->uProtocol)) {
              if (pRaw->hdr.ui == ntohl(pHdr->uHeader)) {
                if (pRaw->uVersion == ntohl(pHdr->uVersion)) {

                  /* remove the header */
                  skb_pull(pSkb, uHdrLen);

                  /* call the klem80211 side, to recv packet. */
                  klem80211Recv(pRaw->pData, pSkb);

                  /* I know nothing */
                  pSkb = NULL;
                }
              }
            }
          }
        } else {
          /* call the klem80211 side, to recv packet. */
          klem80211Recv(pRaw->pData, pSkb);

          /* I know nothing */
          pSkb = NULL;
        }

        /* IF were are here, free that buffer, its not ours. */
        if (NULL != pSkb) {
          dev_kfree_skb(pSkb);
          pSkb = NULL;
        }
      }
    }
  }

  /* Wait to die */
  while ((false == kthread_should_stop())) {
    msleep(1000);
    KLEM_MSG("Waiting to die\n");
  }

  /* We are dead. */

  return 0;
}
Ejemplo n.º 21
0
/*
 * Thread for srb
 */
static int srb_thread(void *data)
{
	struct srb_device_s *dev;
	struct request *req;
	unsigned long flags;
	int th_id;
	int th_ret = 0;
	char buff[256];
	struct req_iterator iter;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
	struct bio_vec *bvec;
#else
	struct bio_vec bvec;
#endif
	struct srb_cdmi_desc_s *cdmi_desc;

	SRBDEV_LOG_DEBUG(((struct srb_device_s *)data), "Thread started with device %p", data);

	dev = data;

	/* Init thread specific values */
	spin_lock(&devtab_lock);
	th_id = dev->nb_threads;
	dev->nb_threads++;
	spin_unlock(&devtab_lock);

	set_user_nice(current, -20);
	while (!kthread_should_stop() || !list_empty(&dev->waiting_queue)) {
		/* wait for something to do */
		wait_event_interruptible(dev->waiting_wq,
					kthread_should_stop() ||
					!list_empty(&dev->waiting_queue));

		/* TODO: improve kthread termination, otherwise calling we can not
		  terminate a kthread calling kthread_stop() */
		/* if (kthread_should_stop()) {
			printk(KERN_INFO "srb_thread: immediate kthread exit\n");
			do_exit(0);
		} */

		spin_lock_irqsave(&dev->waiting_lock, flags);
		/* extract request */
		if (list_empty(&dev->waiting_queue)) {
			spin_unlock_irqrestore(&dev->waiting_lock, flags);
			continue;
		}
		req = list_entry(dev->waiting_queue.next, struct request,
				queuelist);
		list_del_init(&req->queuelist);
		spin_unlock_irqrestore(&dev->waiting_lock, flags);
		
		if (blk_rq_sectors(req) == 0) {
			blk_end_request_all(req, 0);
			continue;
		}

		req_flags_to_str(req->cmd_flags, buff);
		SRBDEV_LOG_DEBUG(dev, "thread %d: New REQ of type %s (%d) flags: %s (%llu)",
				 th_id, req_code_to_str(rq_data_dir(req)), rq_data_dir(req), buff,
                                 (unsigned long long)req->cmd_flags);
		if (req->cmd_flags & REQ_FLUSH) {
			SRBDEV_LOG_DEBUG(dev, "DEBUG CMD REQ_FLUSH\n");
		}
		/* XXX: Use iterator instead of internal function (cf linux/blkdev.h)
		 *  __rq_for_each_bio(bio, req) {
		 */
		rq_for_each_segment(bvec, req, iter) {
			if (iter.bio->bi_rw & REQ_FLUSH) {
				SRBDEV_LOG_DEBUG(dev, "DEBUG VR BIO REQ_FLUSH\n");
			}
		}

		/* Create scatterlist */
		cdmi_desc = dev->thread_cdmi_desc[th_id];
		sg_init_table(dev->thread_cdmi_desc[th_id]->sgl, DEV_NB_PHYS_SEGS);
		dev->thread_cdmi_desc[th_id]->sgl_size = blk_rq_map_sg(dev->q, req, dev->thread_cdmi_desc[th_id]->sgl);

		SRBDEV_LOG_DEBUG(dev, "scatter_list size %d [nb_seg = %d,"
		                 " sector = %lu, nr_sectors=%u w=%d]",
		                 DEV_NB_PHYS_SEGS,
		                 dev->thread_cdmi_desc[th_id]->sgl_size,
		                 blk_rq_pos(req), blk_rq_sectors(req),
		                 rq_data_dir(req) == WRITE);

		/* Call scatter function */
		th_ret = srb_xfer_scl(dev, dev->thread_cdmi_desc[th_id], req);

		SRBDEV_LOG_DEBUG(dev, "thread %d: REQ done with returned code %d",
		                 th_id, th_ret);
	
		/* No IO error testing for the moment */
		blk_end_request_all(req, 0);
	}

	return 0;
}
Ejemplo n.º 22
0
static int jffs2_garbage_collect_thread(void *_c)
{
	struct jffs2_sb_info *c = _c;

	daemonize("jffs2_gcd_mtd%d", c->mtd->index);
	allow_signal(SIGKILL);
	allow_signal(SIGSTOP);
	allow_signal(SIGCONT);

	c->gc_task = current;
	complete(&c->gc_thread_start);

	set_user_nice(current, 10);

	for (;;) {
		allow_signal(SIGHUP);

		if (!jffs2_thread_should_wake(c)) {
			set_current_state (TASK_INTERRUPTIBLE);
			D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
			/* Yes, there's a race here; we checked jffs2_thread_should_wake()
			   before setting current->state to TASK_INTERRUPTIBLE. But it doesn't
			   matter - We don't care if we miss a wakeup, because the GC thread
			   is only an optimisation anyway. */
			schedule();
		}

		if (try_to_freeze())
			continue;

		cond_resched();

		/* Put_super will send a SIGKILL and then wait on the sem.
		 */
		while (signal_pending(current)) {
			siginfo_t info;
			unsigned long signr;

			signr = dequeue_signal_lock(current, &current->blocked, &info);

			switch(signr) {
			case SIGSTOP:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n"));
				set_current_state(TASK_STOPPED);
				schedule();
				break;

			case SIGKILL:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n"));
				goto die;

			case SIGHUP:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n"));
				break;
			default:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr));
			}
		}
		/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
		disallow_signal(SIGHUP);

		D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n"));
		if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
			printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n");
			goto die;
		}
	}
 die:
	spin_lock(&c->erase_completion_lock);
	c->gc_task = NULL;
	spin_unlock(&c->erase_completion_lock);
	complete_and_exit(&c->gc_thread_exit, 0);
}
Ejemplo n.º 23
0
static void create_kthread(struct kthread_create_info *create)
{
	int pid;

	/* We want our own signal handler (we take no signals by default). */
	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
	if (pid < 0) {
		create->result = ERR_PTR(pid);
	} else {
		struct sched_param param = { .sched_priority = 0 };
		wait_for_completion(&create->started);
		read_lock(&tasklist_lock);
		create->result = find_task_by_pid_ns(pid, &init_pid_ns);
		read_unlock(&tasklist_lock);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler(create->result, SCHED_NORMAL, &param);
		set_user_nice(create->result, KTHREAD_NICE_LEVEL);
		set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR);
	}
	complete(&create->done);
}

/**
 * kthread_create - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run(), kthread_create_on_cpu().
 *
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which noone will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create(int (*threadfn)(void *data),
				   void *data,
				   const char namefmt[],
				   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	init_completion(&create.started);
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		va_list args;
		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @k: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *k, unsigned int cpu)
{
	if (k->state != TASK_UNINTERRUPTIBLE) {
		WARN_ON(1);
		return;
	}
	/* Must have done schedule() in kthread() before we set_task_cpu */
	wait_task_inactive(k, 0);
	set_task_cpu(k, cpu);
	k->cpus_allowed = cpumask_of_cpu(cpu);
	k->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit.  Your threadfn() must not call do_exit()
 * itself if you use this function!  This can also be called after
 * kthread_create() instead of calling wake_up_process(): the thread
 * will exit without calling threadfn().
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	int ret;

	mutex_lock(&kthread_stop_lock);

	/* It could exit after stop_info.k set, but before wake_up_process. */
	get_task_struct(k);

	/* Must init completion *before* thread sees kthread_stop_info.k */
	init_completion(&kthread_stop_info.done);
	smp_wmb();

	/* Now set kthread_should_stop() to true, and wake it up. */
	kthread_stop_info.k = k;
	wake_up_process(k);

	/* Once it dies, reset stop ptr, gather result and we're done. */
	wait_for_completion(&kthread_stop_info.done);
	kthread_stop_info.k = NULL;
	ret = kthread_stop_info.err;
	put_task_struct(k);
	mutex_unlock(&kthread_stop_lock);

	return ret;
}
static int mipi_dsi_on(struct platform_device *pdev)
{
	int ret = 0;
	u32 clk_rate;
	struct msm_fb_data_type *mfd;
	struct fb_info *fbi;
	struct fb_var_screeninfo *var;
	struct msm_panel_info *pinfo;
	struct mipi_panel_info *mipi;
	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
	u32 ystride, bpp, data;
	u32 dummy_xres, dummy_yres;
	u32 tmp;
	int target_type = 0;
	int old_nice;

	pr_debug("%s+:\n", __func__);

	mfd = platform_get_drvdata(pdev);
	fbi = mfd->fbi;
	var = &fbi->var;
	pinfo = &mfd->panel_info;
	/*
	 * Now first priority is to turn on LCD quickly for better
	 * user experience. We set current task to higher priority
	 * and restore it after panel is on.
	 */
	old_nice = task_nice(current);
	if (old_nice > -20)
		set_user_nice(current, -20);

	if (mipi_dsi_pdata && mipi_dsi_pdata->dsi_power_save)
		mipi_dsi_pdata->dsi_power_save(1);
#if defined(CONFIG_FB_MSM_MIPI_PANEL_POWERON_LP11)
	/*
	 * Fix for floating state of VDD line in toshiba chip
	 * */
	if (mipi_dsi_pdata && mipi_dsi_pdata->dsi_client_power_save)
		mipi_dsi_pdata->dsi_client_power_save(1);
#endif /* CONFIG_FB_MSM_MIPI_PANEL_POWERON_LP11 */

	cont_splash_clk_ctrl(0);
	mipi_dsi_prepare_ahb_clocks();

	mipi_dsi_ahb_ctrl(1);

	clk_rate = mfd->fbi->var.pixclock;
	clk_rate = min(clk_rate, mfd->panel_info.clk_max);

	mipi_dsi_phy_ctrl(1);

	if (mdp_rev == MDP_REV_42 && mipi_dsi_pdata)
		target_type = mipi_dsi_pdata->target_type;

	mipi_dsi_phy_init(0, &(mfd->panel_info), target_type);

	mipi_dsi_clk_enable();

	MIPI_OUTP(MIPI_DSI_BASE + 0x114, 1);
	MIPI_OUTP(MIPI_DSI_BASE + 0x114, 0);

	hbp = var->left_margin;
	hfp = var->right_margin;
	vbp = var->upper_margin;
	vfp = var->lower_margin;
	hspw = var->hsync_len;
	vspw = var->vsync_len;
	width = mfd->panel_info.xres;
	height = mfd->panel_info.yres;

	mipi  = &mfd->panel_info.mipi;
	if (mfd->panel_info.type == MIPI_VIDEO_PANEL) {
		dummy_xres = mfd->panel_info.lcdc.xres_pad;
		dummy_yres = mfd->panel_info.lcdc.yres_pad;

		if (mdp_rev >= MDP_REV_41) {
			MIPI_OUTP(MIPI_DSI_BASE + 0x20,
				((hspw + hbp + width + dummy_xres) << 16 |
				(hspw + hbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x24,
				((vspw + vbp + height + dummy_yres) << 16 |
				(vspw + vbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x28,
				(vspw + vbp + height + dummy_yres +
					vfp - 1) << 16 | (hspw + hbp +
					width + dummy_xres + hfp - 1));
		} else {
			/* DSI_LAN_SWAP_CTRL */
			MIPI_OUTP(MIPI_DSI_BASE + 0x00ac, mipi->dlane_swap);

			MIPI_OUTP(MIPI_DSI_BASE + 0x20,
				((hbp + width + dummy_xres) << 16 | (hbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x24,
				((vbp + height + dummy_yres) << 16 | (vbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x28,
				(vbp + height + dummy_yres + vfp) << 16 |
					(hbp + width + dummy_xres + hfp));
		}

		MIPI_OUTP(MIPI_DSI_BASE + 0x2c, (hspw << 16));
		MIPI_OUTP(MIPI_DSI_BASE + 0x30, 0);
		MIPI_OUTP(MIPI_DSI_BASE + 0x34, (vspw << 16));

	} else {		/* command mode */
		if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
			bpp = 3;
		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
			bpp = 3;
		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
			bpp = 2;
		else
			bpp = 3;	/* Default format set to RGB888 */

		ystride = width * bpp + 1;

		/* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
		data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE;
		MIPI_OUTP(MIPI_DSI_BASE + 0x5c, data);
		MIPI_OUTP(MIPI_DSI_BASE + 0x54, data);

		/* DSI_COMMAND_MODE_MDP_STREAM_TOTAL */
		data = height << 16 | width;
		MIPI_OUTP(MIPI_DSI_BASE + 0x60, data);
		MIPI_OUTP(MIPI_DSI_BASE + 0x58, data);
	}

	mipi_dsi_host_init(mipi);
#if defined(CONFIG_FB_MSM_MIPI_PANEL_POWERON_LP11)
	/*
	 * For TC358764 D2L IC, one of the requirement for power on
	 * is to maintain an LP11 state in data and clock lanes during
	 * power enabling and reset assertion. This change is to
	 * achieve that.
	 * */
	if (mipi_dsi_pdata && mipi_dsi_pdata->dsi_client_power_save) {
		u32 tmp_reg0c, tmp_rega8;
		mipi_dsi_pdata->dsi_client_reset();
		udelay(200);
		/* backup register values */
		tmp_reg0c = MIPI_INP(MIPI_DSI_BASE + 0x000c);
		tmp_rega8 = MIPI_INP(MIPI_DSI_BASE + 0xA8);
		/* Clear HS  mode assertion and related flags */
		MIPI_OUTP(MIPI_DSI_BASE + 0x0c, 0x8000);
		MIPI_OUTP(MIPI_DSI_BASE + 0xA8, 0x0);
		wmb();
		mdelay(5);
		/* restore previous values */
		MIPI_OUTP(MIPI_DSI_BASE + 0x0c, tmp_reg0c);
		MIPI_OUTP(MIPI_DSI_BASE + 0xa8, tmp_rega8);
		wmb();
	}
#endif /* CONFIG_FB_MSM_MIPI_PANEL_POWERON_LP11 */

#if defined(CONFIG_FB_MSM_MIPI_MAGNA_OLED_VIDEO_WVGA_PT)
	/* LP11 */
	tmp = MIPI_INP(MIPI_DSI_BASE + 0xA8);
	tmp &= ~(1<<28);
	MIPI_OUTP(MIPI_DSI_BASE + 0xA8, tmp);
	wmb();
	/* LP11 */

	usleep(5000);
	if (mipi_dsi_pdata && mipi_dsi_pdata->active_reset)
			mipi_dsi_pdata->active_reset(); /* high */
	usleep(10000);

#endif

	if (mipi->force_clk_lane_hs) {
		tmp = MIPI_INP(MIPI_DSI_BASE + 0xA8);
		tmp |= (1<<28);
		MIPI_OUTP(MIPI_DSI_BASE + 0xA8, tmp);
		wmb();
	}

	if (mdp_rev >= MDP_REV_41)
		mutex_lock(&mfd->dma->ov_mutex);
	else
		down(&mfd->dma->mutex);

	ret = panel_next_on(pdev);

	mipi_dsi_op_mode_config(mipi->mode);

	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
		if (pinfo->lcd.vsync_enable) {
			if (pinfo->lcd.hw_vsync_mode && vsync_gpio >= 0) {
				if (mdp_rev >= MDP_REV_41) {
					if (gpio_request(vsync_gpio,
						"MDP_VSYNC") == 0)
						gpio_direction_input(
							vsync_gpio);
					else
						pr_err("%s: unable to \
							request gpio=%d\n",
							__func__, vsync_gpio);
				} else if (mdp_rev == MDP_REV_303) {
					if (!tlmm_settings && gpio_request(
						vsync_gpio, "MDP_VSYNC") == 0) {
						ret = gpio_tlmm_config(
							GPIO_CFG(
							vsync_gpio, 1,
							GPIO_CFG_INPUT,
							GPIO_CFG_PULL_DOWN,
							GPIO_CFG_2MA),
							GPIO_CFG_ENABLE);

						if (ret) {
							pr_err(
							"%s: unable to config \
							tlmm = %d\n",
							__func__, vsync_gpio);
						}
						tlmm_settings = TRUE;

						gpio_direction_input(
							vsync_gpio);
					} else {
						if (!tlmm_settings) {
							pr_err(
							"%s: unable to request \
							gpio=%d\n",
							__func__, vsync_gpio);
						}
					}
				}
Ejemplo n.º 25
0
static int jffs2_garbage_collect_thread(void *_c)
{
	struct jffs2_sb_info *c = _c;

	allow_signal(SIGKILL);
	allow_signal(SIGSTOP);
	allow_signal(SIGCONT);

	c->gc_task = current;
	complete(&c->gc_thread_start);

	set_user_nice(current, 10);

	set_freezable();
	for (;;) {
		allow_signal(SIGHUP);
	again:
		spin_lock(&c->erase_completion_lock);
		if (!jffs2_thread_should_wake(c)) {
			set_current_state (TASK_INTERRUPTIBLE);
			spin_unlock(&c->erase_completion_lock);
			jffs2_dbg(1, "%s(): sleeping...\n", __func__);
			schedule();
		} else
			spin_unlock(&c->erase_completion_lock);
			

		/* Problem - immediately after bootup, the GCD spends a lot
		 * of time in places like jffs2_kill_fragtree(); so much so
		 * that userspace processes (like gdm and X) are starved
		 * despite plenty of cond_resched()s and renicing.  Yield()
		 * doesn't help, either (presumably because userspace and GCD
		 * are generally competing for a higher latency resource -
		 * disk).
		 * This forces the GCD to slow the hell down.   Pulling an
		 * inode in with read_inode() is much preferable to having
		 * the GC thread get there first. */
		schedule_timeout_interruptible(msecs_to_jiffies(50));

		if (kthread_should_stop()) {
			jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__);
			goto die;
		}

		/* Put_super will send a SIGKILL and then wait on the sem.
		 */
		while (signal_pending(current) || freezing(current)) {
			siginfo_t info;
			unsigned long signr;

			if (try_to_freeze())
				goto again;

			signr = dequeue_signal_lock(current, &current->blocked, &info);

			switch(signr) {
			case SIGSTOP:
				jffs2_dbg(1, "%s(): SIGSTOP received\n",
					  __func__);
				set_current_state(TASK_STOPPED);
				schedule();
				break;

			case SIGKILL:
				jffs2_dbg(1, "%s(): SIGKILL received\n",
					  __func__);
				goto die;

			case SIGHUP:
				jffs2_dbg(1, "%s(): SIGHUP received\n",
					  __func__);
				break;
			default:
				jffs2_dbg(1, "%s(): signal %ld received\n",
					  __func__, signr);
			}
		}
		/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
		disallow_signal(SIGHUP);

		jffs2_dbg(1, "%s(): pass\n", __func__);
		if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
			pr_notice("No space for garbage collection. Aborting GC thread\n");
			goto die;
		}
	}
 die:
	spin_lock(&c->erase_completion_lock);
	c->gc_task = NULL;
	spin_unlock(&c->erase_completion_lock);
	complete_and_exit(&c->gc_thread_exit, 0);
}
Ejemplo n.º 26
0
/**
 * kthread_create - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().
 *
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which noone will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create(int (*threadfn)(void *data),
				   void *data,
				   const char namefmt[],
				   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
		set_user_nice(create.result, KTHREAD_NICE_LEVEL);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}

	p->cpus_allowed = cpumask_of_cpu(cpu);
	p->rt.nr_cpus_allowed = 1;
	p->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /* it might have exited */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);
	set_user_nice(tsk, KTHREAD_NICE_LEVEL);
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
	set_mems_allowed(node_states[N_HIGH_MEMORY]);

	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}
/* # echo @ut_type @ut_prio @ut_tid > utest */
static ssize_t pts_utest_write(struct file *flip, const char *ubuf,
			       size_t cnt, loff_t *data)
{
	char buf[32];
	size_t copy_size = cnt;
	unsigned long val;
	int ut_type, ut_tid, ut_prio;
	int ret, i = 0, j;
	struct task_struct *p;


	if (cnt >= sizeof(buf))
		copy_size = 32 - 1;
	buf[copy_size] = '\0';

	if (copy_from_user(&buf, ubuf, copy_size))
		return -EFAULT;

	do { } while (buf[i++] != ' ');
	buf[(i - 1)] = '\0';
	ret = strict_strtoul(buf, 10, &val);
	ut_type = (int)val;

	j = i;
	do { } while (buf[i++] != ' ');
	buf[(i - 1)] = '\0';
	ret = strict_strtoul((const char *)(&buf[j]), 10, &val);
	ut_prio = (int)val;

	ret = strict_strtoul((const char *)(&buf[i]), 10, &val);
	ut_tid = (int)val;

	printk("%s: unit test %s tid %d prio %d j %d i %d", __func__,
	       (ut_type == PTS_USER) ? "user" :
		 ((ut_type == PTS_KRNL) ? "kernel" :
		   ((ut_type == PTS_BNDR) ? "binder" : "unknown")),
	       ut_tid, ut_prio, j, i);


	/* start to test api */
	p = find_task_by_vpid(ut_tid);
	if (!p) goto utest_out;

	if ((ut_prio >= 0) && (ut_prio < MAX_RT_PRIO)) {
		struct sched_param param;

		/* sched_priority is rt priority rather than effective one */
		ut_prio = MAX_RT_PRIO-1 - ut_prio;
		param.sched_priority = ut_prio | MT_ALLOW_RT_PRIO_BIT;

		switch (ut_type) {
		case PTS_USER:
			sched_setscheduler_syscall(p, SCHED_RR, &param);
			break;
		case PTS_KRNL:
			sched_setscheduler_nocheck(p, SCHED_RR, &param);
			break;
		case PTS_BNDR:
			sched_setscheduler_nocheck_binder(p, SCHED_RR, &param);
			break;
		default:
			break;
		}	
	} else { /* assume normal */
		switch (ut_type) {
		case PTS_USER:
			set_user_nice_syscall(p, PRIO_TO_NICE(ut_prio));
			break;
		case PTS_KRNL:
			set_user_nice(p, PRIO_TO_NICE(ut_prio));
			break;
		case PTS_BNDR:
			set_user_nice_binder(p, PRIO_TO_NICE(ut_prio));
			break;
		default:
			break;
		}	
	}

utest_out:
	return cnt;
}
Ejemplo n.º 28
0
/**
 * kthread_create - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run(), kthread_create_on_cpu().
 *
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which noone will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create(int (*threadfn)(void *data),
				   void *data,
				   const char namefmt[],
				   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	init_completion(&create.started);
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
		set_user_nice(create.result, KTHREAD_NICE_LEVEL);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @k: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *k, unsigned int cpu)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}
	set_task_cpu(k, cpu);
	k->cpus_allowed = cpumask_of_cpu(cpu);
	k->rt.nr_cpus_allowed = 1;
	k->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit.  Your threadfn() must not call do_exit()
 * itself if you use this function!  This can also be called after
 * kthread_create() instead of calling wake_up_process(): the thread
 * will exit without calling threadfn().
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	int ret;

	mutex_lock(&kthread_stop_lock);

	/* It could exit after stop_info.k set, but before wake_up_process. */
	get_task_struct(k);

	trace_sched_kthread_stop(k);

	/* Must init completion *before* thread sees kthread_stop_info.k */
	init_completion(&kthread_stop_info.done);
	smp_wmb();

	/* Now set kthread_should_stop() to true, and wake it up. */
	kthread_stop_info.k = k;
	wake_up_process(k);
	put_task_struct(k);

	/* Once it dies, reset stop ptr, gather result and we're done. */
	wait_for_completion(&kthread_stop_info.done);
	kthread_stop_info.k = NULL;
	ret = kthread_stop_info.err;
	mutex_unlock(&kthread_stop_lock);

	trace_sched_kthread_stop_ret(ret);

	return ret;
}
Ejemplo n.º 29
0
static int mtd_blktrans_thread(void *arg)
{
	struct mtd_blktrans_ops *tr = arg;
	struct request_queue *rq = tr->blkcore_priv->rq;

	/* we might get involved when memory gets low, so use PF_MEMALLOC */
	current->flags |= PF_MEMALLOC | PF_NOFREEZE;


	daemonize("%sd", tr->name);


	/* daemonize() doesn't do this for us since some kernel threads
	   actually want to deal with signals. We can't just call 
	   exit_sighand() since that'll cause an oops when we finally
	   do exit. */
	spin_lock_irq(&current->sighand->siglock);
	sigfillset(&current->blocked);
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

#ifdef CONFIG_MOT_WFN473
        set_user_nice(current, -20);
#endif

	spin_lock_irq(rq->queue_lock);
		
	while (!tr->blkcore_priv->exiting) {
		struct request *req;
		struct mtd_blktrans_dev *dev;
		int res = 0;
		DECLARE_WAITQUEUE(wait, current);

		req = elv_next_request(rq);

		if (!req) {
			add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
			set_current_state(TASK_INTERRUPTIBLE);

			spin_unlock_irq(rq->queue_lock);

			schedule();
			remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);

			spin_lock_irq(rq->queue_lock);

			continue;
		}

		dev = req->rq_disk->private_data;
		tr = dev->tr;

		spin_unlock_irq(rq->queue_lock);

		down(&dev->sem);
		res = do_blktrans_request(tr, dev, req);
		up(&dev->sem);

		spin_lock_irq(rq->queue_lock);

		end_request(req, res);
	}
	spin_unlock_irq(rq->queue_lock);

	complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
}
Ejemplo n.º 30
0
/*
 * One of these threads is started per cpu.  Each thread is responsible
 * for loading that cpu's bte interface and then writing to the
 * test buffer.  The transfers are set in a round-robin fashion.
 * The end result is that each test buffer is being written into
 * by the previous node and both cpu's at the same time as the
 * local bte is transferring it to the next node.
 */
static int
brt_notify_thrd(void *__bind_cpu)
{
	int bind_cpu = (long int)__bind_cpu;
	int cpu = cpu_logical_map(bind_cpu);
	nodepda_t *nxt_node;
	long tmout_itc_intvls;
	long tmout;
	long passes;
	long good_xfer_cnt;
	u64 src_phys, dst_phys;
	int i;
	volatile char *src_buf;
	u64 *notify;

	atomic_inc(&brt_thread_cnt);
	daemonize();
	set_user_nice(current, 19);
	sigfillset(&current->blocked);

	/* Migrate to the right CPU */
	set_cpus_allowed(current, 1UL << cpu);

	/* Calculate the uSec timeout itc offset. */
	tmout_itc_intvls = local_cpu_data->cyc_per_usec * hang_usec;

	if (local_cnodeid() == (numnodes - 1)) {
		nxt_node = NODEPDA(0);
	} else {
		nxt_node = NODEPDA(local_cnodeid() + 1);
	}

	src_buf = nodepda->bte_if[0].bte_test_buf;
	src_phys = __pa(src_buf);
	dst_phys = __pa(nxt_node->bte_if[0].bte_test_buf);

	notify = kmalloc(L1_CACHE_BYTES, GFP_KERNEL);
	ASSERT(!((u64) notify & L1_CACHE_MASK));

	printk("BTE Hang %d xfer 0x%lx -> 0x%lx, Notify=0x%lx\n",
	       smp_processor_id(), src_phys, dst_phys, (u64) notify);

	passes = 0;
	good_xfer_cnt = 0;

	/* Loop until signalled to exit. */
	while (!brt_exit_flag) {
		/*
		 * A hang will prevent further transfers.
		 * NOTE: Sometimes, it appears like a hang occurred and
		 * then transfers begin again.  This just means that
		 * there is NUMA congestion and the hang_usec param
		 * should be increased.
		 */
		if (!(*notify & IBLS_BUSY)) {
			if ((bte_copy(src_phys,
				      dst_phys,
				      4UL * L1_CACHE_BYTES,
				      BTE_NOTIFY,
				      (void *)notify)) != BTE_SUCCESS) {
				printk("<0>Cpu %d Could not "
				       "allocate a bte.\n",
				       smp_processor_id());
				continue;
			}

			tmout = ia64_get_itc() + tmout_itc_intvls;

			while ((*notify & IBLS_BUSY) &&
			       (ia64_get_itc() < tmout)) {


				/* Push data out with the processor. */
				for (i = 0; i < (4 * L1_CACHE_BYTES);
				     i += L1_CACHE_BYTES) {
					src_buf[i] = (passes % 128);
				}
			};

			if (*notify & IBLS_BUSY) {
				printk("<0>Cpu %d BTE appears to have "
				       "hung.\n", smp_processor_id());
			} else {
				good_xfer_cnt++;
			}
		}

		/* Every x passes, take a little break. */
		if (!(++passes % 40)) {
			passes = 0;
			schedule_timeout(0.01 * HZ);
		}
	}

	kfree(notify);

	printk("Cpu %d had %ld good passes\n",
	       smp_processor_id(), good_xfer_cnt);

	atomic_dec(&brt_thread_cnt);
	return (0);
}