static void ShowStatus(void)
{

	struct task_struct *t,*p;
	struct pid *pid;
	int count=0;
	InDumpAllStack=1;
	
	//show all kbt in init
	LOGE("[Hang_Detect] dump init all thread bt \n");
	if(init_pid)
	{	pid=find_get_pid(init_pid);
		t=p=get_pid_task(pid,PIDTYPE_PID);
		do 
		{
			sched_show_task_local(t);
		} while_each_thread(p, t);
	}	
	
	//show all kbt in surfaceflinger
	LOGE("[Hang_Detect] dump surfaceflinger all thread bt \n");
	if(surfaceflinger_pid)
	{	pid=find_get_pid(surfaceflinger_pid);
		t=p=get_pid_task(pid,PIDTYPE_PID);
		count=0;
		do 
		{
			sched_show_task_local(t);
			if((++count)%5==4)
				msleep(20);
		} while_each_thread(p, t);
	}	
	msleep(100);
	//show all kbt in system_server
	LOGE("[Hang_Detect] dump system_server all thread bt \n");
	if(system_server_pid)
	{	pid=find_get_pid(system_server_pid);
		t=p=get_pid_task(pid,PIDTYPE_PID);
		count=0;
		do 
		{
			sched_show_task_local(t);
			if((++count)%5==4)
				msleep(20);
		} while_each_thread(p, t);
	}	
	msleep(100);
	//show all D state thread kbt
	LOGE("[Hang_Detect] dump all D thread bt \n");
	show_state_filter_local(TASK_UNINTERRUPTIBLE); 
	debug_show_all_locks();
	system_server_pid=0;
	surfaceflinger_pid=0;
	init_pid=0;
	InDumpAllStack=0;
	msleep(10);
}
static int wmt_send_signal(int level)
{
	int ret = 0;
	int thro = level;

	if (tm_input_pid == 0) {
		mtk_cooler_amdtxctrl_dprintk("[%s] pid is empty\n", __func__);
		ret = -1;
	}

	mtk_cooler_amdtxctrl_dprintk_always("[%s] pid is %d, %d, %d\n", __func__, tm_pid, tm_input_pid, thro);

	if (ret == 0 && tm_input_pid != tm_pid) {
		tm_pid = tm_input_pid;
		pg_task = get_pid_task(find_vpid(tm_pid), PIDTYPE_PID);
	}

	if (ret == 0 && pg_task) {
		siginfo_t info;
		info.si_signo = SIGIO;
		info.si_errno = 1; // for md ul throttling
		info.si_code = thro;
		info.si_addr = NULL;
		ret = send_sig_info(SIGIO, &info, pg_task);
	}

	if (ret != 0) 
	    mtk_cooler_amdtxctrl_dprintk("[%s] ret=%d\n", __func__, ret);

	return ret;
}
static int _mtk_cl_sd_send_signal(void)
{
	int ret = 0;

	if (tm_input_pid == 0) {
		mtk_cooler_shutdown_dprintk("%s pid is empty\n", __func__);
		ret = -1;
	}

	mtk_cooler_shutdown_dprintk("%s pid is %d, %d\n", __func__, tm_pid, tm_input_pid);

	if (ret == 0 && tm_input_pid != tm_pid) {
		tm_pid = tm_input_pid;
		pg_task = get_pid_task(find_vpid(tm_pid), PIDTYPE_PID);
	}

	if (ret == 0 && pg_task) {
		siginfo_t info;

		info.si_signo = SIGIO;
		info.si_errno = 0;
		info.si_code = 1;
		info.si_addr = NULL;
		ret = send_sig_info(SIGIO, &info, pg_task);
	}

	if (ret != 0)
		mtk_cooler_shutdown_dprintk("%s ret=%d\n", __func__, ret);

	return ret;
}
예제 #4
0
파일: eventfd_link.c 프로젝트: goby/dpdk
static long
eventfd_link_ioctl_copy2(unsigned long arg)
{
	void __user *argp = (void __user *) arg;
	struct task_struct *task_target = NULL;
	struct file *file;
	struct files_struct *files;
	struct eventfd_copy2 eventfd_copy2;
	long ret = -EFAULT;

	if (copy_from_user(&eventfd_copy2, argp, sizeof(struct eventfd_copy2)))
		goto out;

	/*
	 * Find the task struct for the target pid
	 */
	ret = -ESRCH;

	task_target =
		get_pid_task(find_vpid(eventfd_copy2.pid), PIDTYPE_PID);
	if (task_target == NULL) {
		pr_info("Unable to find pid %d\n", eventfd_copy2.pid);
		goto out;
	}

	ret = -ESTALE;
	files = get_files_struct(task_target);
	if (files == NULL) {
		pr_info("Failed to get target files struct\n");
		goto out_task;
	}

	ret = -EBADF;
	file = fget_from_files(files, eventfd_copy2.fd);
	put_files_struct(files);

	if (file == NULL) {
		pr_info("Failed to get fd %d from target\n", eventfd_copy2.fd);
		goto out_task;
	}

	/*
	 * Install the file struct from the target process into the
	 * newly allocated file desciptor of the source process.
	 */
	ret = get_unused_fd_flags(eventfd_copy2.flags);
	if (ret < 0) {
		fput(file);
		goto out_task;
	}
	fd_install(ret, file);

out_task:
	put_task_struct(task_target);
out:
	return ret;
}
예제 #5
0
int monitor_fn(void* unused)
{
        tm->target_task = get_pid_task(tm->target_pid, PIDTYPE_PID);
        
        while(tm->target_task && pid_alive(tm->target_task)
              && !kthread_should_stop()){
                set_current_state(TASK_INTERRUPTIBLE);
                schedule_timeout(HZ);
                pr_info("taskmonitor: pid %d\tusr %d\tsys %d\n", target,
                        (int)tm->target_task->utime,
                        (int)tm->target_task->stime);
        }

        if(tm->target_task)
                put_task_struct(tm->target_task);
        
	pr_warn("monitor_fn: target task is no longer alive !\n");
        
        return 0;
}
예제 #6
0
파일: umem_odp.c 프로젝트: Cai900205/test
/**
 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
 *
 * Pins the range of pages passed in the argument, and maps them to
 * DMA addresses. The DMA addresses of the mapped pages is updated in
 * umem->odp_data->dma_list.
 *
 * Returns the number of pages mapped in success, negative error code
 * for failure.
 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
 * the function from completing its task.
 *
 * @umem: the umem to map and pin
 * @user_virt: the address from which we need to map.
 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
 *        bigger due to alignment, and may also be smaller in case of an error
 *        pinning or mapping a page. The actual pages mapped is returned in
 *        the return value.
 * @access_mask: bit mask of the requested access permissions for the given
 *               range.
 * @current_seq: the MMU notifiers sequance value for synchronization with
 *               invalidations. the sequance number is read from
 *               umem->odp_data->notifiers_seq before calling this function
 * @flags: IB_ODP_DMA_MAP_FOR_PREEFTCH is used to indicate that the function
 *	   was called from the prefetch verb. IB_ODP_DMA_MAP_FOR_PAGEFAULT is
 *	   used to indicate that the function was called from a pagefault
 *	   handler.
 */
int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
			      u64 access_mask, unsigned long current_seq,
			      enum ib_odp_dma_map_flags flags)
{
	struct task_struct *owning_process  = NULL;
	struct mm_struct   *owning_mm       = NULL;
	struct page       **local_page_list = NULL;
	u64 off;
	int j, k, ret = 0, start_idx, npages = 0;

	if (access_mask == 0)
		return -EINVAL;

	if (user_virt < ib_umem_start(umem) ||
	    user_virt + bcnt > ib_umem_end(umem))
		return -EFAULT;

	local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
	if (!local_page_list)
		return -ENOMEM;

	off = user_virt & (~PAGE_MASK);
	user_virt = user_virt & PAGE_MASK;
	bcnt += off; /* Charge for the first page offset as well. */

	owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
	if (owning_process == NULL) {
		ret = -EINVAL;
		goto out_no_task;
	}

	owning_mm = get_task_mm(owning_process);
	if (owning_mm == NULL) {
		ret = -EINVAL;
		goto out_put_task;
	}

	start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
	k = start_idx;

	while (bcnt > 0) {
		down_read(&owning_mm->mmap_sem);
		/*
		 * Note: this might result in redundent page getting. We can
		 * avoid this by checking dma_list to be 0 before calling
		 * get_user_pages. However, this make the code much more
		 * complex (and doesn't gain us much performance in most use
		 * cases).
		 */
		npages = get_user_pages(owning_process, owning_mm,
				     user_virt, min_t(size_t,
						      (bcnt - 1 + PAGE_SIZE) /
						      PAGE_SIZE,
						      PAGE_SIZE /
						      sizeof(struct page *)),
				     access_mask & ODP_WRITE_ALLOWED_BIT, 0,
				     local_page_list, NULL);
		up_read(&owning_mm->mmap_sem);

		if (npages < 0)
			break;

		bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
		user_virt += npages << PAGE_SHIFT;
		for (j = 0; j < npages; ++j) {
			ret = ib_umem_odp_map_dma_single_page(umem, k,
				local_page_list[j], access_mask,
				current_seq, flags);
			if (ret < 0)
				break;
			k++;
		}

		if (ret < 0) {
			/* Release left over pages when handling errors. */
			for (++j; j < npages; ++j)
				put_page(local_page_list[j]);
			break;
		}
	}

	if (ret >= 0) {
		if (npages < 0 && k == start_idx)
			ret = npages;
		else
			ret = k - start_idx;
	}

	mmput(owning_mm);
out_put_task:
	put_task_struct(owning_process);
out_no_task:
	free_page((unsigned long) local_page_list);
	return ret;
}
예제 #7
0
파일: umem_odp.c 프로젝트: Cai900205/test
void ib_umem_odp_release(struct ib_umem *umem)
{
	struct ib_ucontext *context = umem->context;

	/*
	 * Ensure that no more pages are mapped in the umem.
	 *
	 * It is the driver's responsibility to ensure, before calling us,
	 * that the hardware will not attempt to access the MR any more.
	 */
	ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
				    ib_umem_end(umem));

	down_write(&context->umem_mutex);
	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
		rbt_ib_umem_remove(&umem->odp_data->interval_tree,
				   &context->umem_tree);
	context->odp_mrs_count--;

	/*
	 * Downgrade the lock to a read lock. This ensures that the notifiers
	 * (who lock the mutex for reading) will be able to finish, and we
	 * will be able to enventually obtain the mmu notifiers SRCU. Note
	 * that since we are doing it atomically, no other user could register
	 * and unregister while we do the check.
	 */
	downgrade_write(&context->umem_mutex);
	if (!context->odp_mrs_count) {
		struct task_struct *owning_process = NULL;
		struct mm_struct *owning_mm        = NULL;
		owning_process = get_pid_task(context->tgid,
					      PIDTYPE_PID);
		if (owning_process == NULL)
			/*
			 * The process is already dead, notifier were removed
			 * already.
			 */
			goto out;

		owning_mm = get_task_mm(owning_process);
		if (owning_mm == NULL)
			/*
			 * The process' mm is already dead, notifier were
			 * removed already.
			 */
			goto out_put_task;
		mmu_notifier_unregister(&context->mn, owning_mm);

		mmput(owning_mm);

out_put_task:
		put_task_struct(owning_process);
	}
out:
	up_read(&context->umem_mutex);

	vfree(umem->odp_data->dma_list);
	vfree(umem->odp_data->page_list);
	kfree(umem->odp_data);
	kfree(umem);
}
예제 #8
0
파일: eventfd_link.c 프로젝트: goby/dpdk
static long
eventfd_link_ioctl_copy(unsigned long arg)
{
	void __user *argp = (void __user *) arg;
	struct task_struct *task_target = NULL;
	struct file *file;
	struct files_struct *files;
	struct fdtable *fdt;
	struct eventfd_copy eventfd_copy;
	long ret = -EFAULT;

	if (copy_from_user(&eventfd_copy, argp, sizeof(struct eventfd_copy)))
		goto out;

	/*
	 * Find the task struct for the target pid
	 */
	ret = -ESRCH;

	task_target =
		get_pid_task(find_vpid(eventfd_copy.target_pid), PIDTYPE_PID);
	if (task_target == NULL) {
		pr_info("Unable to find pid %d\n", eventfd_copy.target_pid);
		goto out;
	}

	ret = -ESTALE;
	files = get_files_struct(current);
	if (files == NULL) {
		pr_info("Failed to get current files struct\n");
		goto out_task;
	}

	ret = -EBADF;
	file = fget_from_files(files, eventfd_copy.source_fd);

	if (file == NULL) {
		pr_info("Failed to get fd %d from source\n",
			eventfd_copy.source_fd);
		put_files_struct(files);
		goto out_task;
	}

	/*
	 * Release the existing eventfd in the source process
	 */
	spin_lock(&files->file_lock);
	fput(file);
	filp_close(file, files);
	fdt = files_fdtable(files);
	fdt->fd[eventfd_copy.source_fd] = NULL;
	spin_unlock(&files->file_lock);

	put_files_struct(files);

	/*
	 * Find the file struct associated with the target fd.
	 */

	ret = -ESTALE;
	files = get_files_struct(task_target);
	if (files == NULL) {
		pr_info("Failed to get target files struct\n");
		goto out_task;
	}

	ret = -EBADF;
	file = fget_from_files(files, eventfd_copy.target_fd);
	put_files_struct(files);

	if (file == NULL) {
		pr_info("Failed to get fd %d from target\n",
			eventfd_copy.target_fd);
		goto out_task;
	}

	/*
	 * Install the file struct from the target process into the
	 * file desciptor of the source process,
	 */

	fd_install(eventfd_copy.source_fd, file);
	ret = 0;

out_task:
	put_task_struct(task_target);
out:
	return ret;
}