static int zfcp_erp_thread(void *data)
{
	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
	struct list_head *next;
	struct zfcp_erp_action *act;
	unsigned long flags;
	int ignore;

	daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev));
	/* Block all signals */
	siginitsetinv(&current->blocked, 0);
	atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
	wake_up(&adapter->erp_thread_wqh);

	while (!(atomic_read(&adapter->status) &
		 ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) {

		zfcp_rec_dbf_event_thread_lock("erthrd1", adapter);
		ignore = down_interruptible(&adapter->erp_ready_sem);
		zfcp_rec_dbf_event_thread_lock("erthrd2", adapter);

		write_lock_irqsave(&adapter->erp_lock, flags);
		next = adapter->erp_ready_head.next;
		write_unlock_irqrestore(&adapter->erp_lock, flags);

		if (next != &adapter->erp_ready_head) {
			act = list_entry(next, struct zfcp_erp_action, list);

			/* there is more to come after dismission, no notify */
			if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED)
				zfcp_erp_wakeup(adapter);
		}
	}
Beispiel #2
0
/* This must be called in the context of the new thread. */
static void thread_initialize( pj_thread_t *thread )
{
    TRACE_((THIS_FILE, "---new thread initializing..."));

    /* Set TLS */
    pj_thread_local_set(thread_tls_id, thread);

    /* fill in thread structure */
    thread->thread = current;
    pj_assert(thread->thread != NULL);

    /* set signal mask to what we want to respond */
    siginitsetinv(&current->blocked, 
		  sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM));

    /* initialise wait queue */
    init_waitqueue_head(&thread->queue);

    /* initialise termination flag */
    thread->terminate = 0;

    /* set name of this process (making sure obj_name is null 
     * terminated first) 
     */
    thread->obj_name[PJ_MAX_OBJ_NAME-1] = '\0';
    sprintf(current->comm, thread->obj_name);
        
    /* tell the creator that we are ready and let him continue */
    up(&thread->startstop_sem);	
}
Beispiel #3
0
void rtmp_os_thread_init(PUCHAR pThreadName, PVOID pNotify)
{

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
    daemonize(pThreadName /*"%s",pAd->net_dev->name*/);

    allow_signal(SIGTERM);
    allow_signal(SIGKILL);
    current->flags |= PF_NOFREEZE;
#else
    unsigned long flags;

    daemonize();
    reparent_to_init();
    strcpy(current->comm, pThreadName);

    siginitsetinv(&current->blocked, sigmask(SIGTERM) | sigmask(SIGKILL));

    /* Allow interception of SIGKILL only
     * Don't allow other signals to interrupt the transmission */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22)
    spin_lock_irqsave(&current->sigmask_lock, flags);
    flush_signals(current);
    recalc_sigpending(current);
    spin_unlock_irqrestore(&current->sigmask_lock, flags);
#endif
#endif

    /* signal that we've started the thread */
    complete(pNotify);

}
Beispiel #4
0
static void block_sigs(sigset_t *oldset)
{
	sigset_t mask;

	siginitsetinv(&mask, sigmask(SIGKILL));
	sigprocmask(SIG_BLOCK, &mask, oldset);
}
Beispiel #5
0
/*
 *  Send or receive packet.
 */
static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
		int msg_flags)
{
	struct socket *sock = nbd->sock;
	int result;
	struct msghdr msg;
	struct kvec iov;
	sigset_t blocked, oldset;
	unsigned long pflags = current->flags;

	if (unlikely(!sock)) {
		dev_err(disk_to_dev(nbd->disk),
			"Attempted %s on closed socket in sock_xmit\n",
			(send ? "send" : "recv"));
		return -EINVAL;
	}

	/* Allow interception of SIGKILL only
	 * Don't allow other signals to interrupt the transmission */
	siginitsetinv(&blocked, sigmask(SIGKILL));
	sigprocmask(SIG_SETMASK, &blocked, &oldset);

	current->flags |= PF_MEMALLOC;
	do {
		sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
		iov.iov_base = buf;
		iov.iov_len = size;
		msg.msg_name = NULL;
		msg.msg_namelen = 0;
		msg.msg_control = NULL;
		msg.msg_controllen = 0;
		msg.msg_flags = msg_flags | MSG_NOSIGNAL;

		if (send)
			result = kernel_sendmsg(sock, &msg, &iov, 1, size);
		else
			result = kernel_recvmsg(sock, &msg, &iov, 1, size,
						msg.msg_flags);

		if (result <= 0) {
			if (result == 0)
				result = -EPIPE; /* short read */
			break;
		}
		size -= result;
		buf += result;
	} while (size > 0);

	sigprocmask(SIG_SETMASK, &oldset, NULL);
	tsk_restore_flags(current, pflags, PF_MEMALLOC);

	if (!send && nbd->xmit_timeout)
		mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);

	return result;
}
Beispiel #6
0
static int sync_thread(void *startup)
{
	DECLARE_WAITQUEUE(wait, current);
	mm_segment_t oldmm;
	int state;

	MOD_INC_USE_COUNT;
	daemonize();

	oldmm = get_fs();
	set_fs(KERNEL_DS);

	if (ip_vs_sync_state == IP_VS_STATE_MASTER)
		sprintf(current->comm, "ipvs_syncmaster");
	else if (ip_vs_sync_state == IP_VS_STATE_BACKUP)
		sprintf(current->comm, "ipvs_syncbackup");
	else IP_VS_BUG();

	spin_lock_irq(&current->sigmask_lock);
	siginitsetinv(&current->blocked, 0);
	recalc_sigpending(current);
	spin_unlock_irq(&current->sigmask_lock);

	/* set up multicast address */
	mcast_addr.sin_family = AF_INET;
	mcast_addr.sin_port = htons(IP_VS_SYNC_PORT);
	mcast_addr.sin_addr.s_addr = htonl(IP_VS_SYNC_GROUP);

	add_wait_queue(&sync_wait, &wait);

	state = ip_vs_sync_state;
	sync_pid = current->pid;
	IP_VS_INFO("sync thread started.\n");
	complete((struct completion *)startup);

	/* processing master/backup loop here */
	if (state == IP_VS_STATE_MASTER)
		sync_master_loop();
	else if (state == IP_VS_STATE_BACKUP)
		sync_backup_loop();
	else IP_VS_BUG();

	remove_wait_queue(&sync_wait, &wait);

	/* thread exits */
	sync_pid = 0;
	IP_VS_INFO("sync thread stopped!\n");

	set_fs(oldmm);
	MOD_DEC_USE_COUNT;

	stop_sync = 0;
	wake_up(&stop_sync_wait);

	return 0;
}
Beispiel #7
0
void CnxtAdslLEDTask(void *sem)
{
    BOOL HandShakeLED;
    BOOL ShowtimeLED;

    struct task_struct *tsk = current ;
    tsk->session = 1 ;
    tsk->pgrp = 1 ;
    strcpy ( tsk->comm, "CnxtAdslLEDTask" ) ;

    /* sigstop and sigcont will stop and wakeup kupdate */
    spin_lock_irq(&tsk->sigmask_lock);
    sigfillset(&tsk->blocked);
    siginitsetinv(&current->blocked, sigmask(SIGCONT) | sigmask(SIGSTOP));
    recalc_sigpending(tsk);
    spin_unlock_irq(&tsk->sigmask_lock);

    up((struct semaphore *)sem);

    while(1)
    {
        /* Condensed Showtime State to two Booleans - In Showtime and In Training */
        ShowtimeLED= FALSE;
        HandShakeLED= FALSE;

        switch (HWSTATE.dwLineStatus)
        {
        case HW_IO_MODEM_STATUS_ACTIVATED:
            ShowtimeLED= TRUE;
            break;

        case HW_IO_MODEM_STATUS_CHANNEL_ANALYSIS:
        case HW_IO_MODEM_STATUS_TRANSCEIVER_TRAINING:
        case HW_IO_MODEM_STATUS_EXCHANGE:
        case HW_IO_MODEM_STATUS_ACTIVATION:
            HandShakeLED= TRUE;
            break;
        }

        /* Blink Showtime LED if in Training */
        if (ShowtimeLED || HandShakeLED)
        {
            WriteGPIOData( GPOUT_SHOWTIME_LED,LED_ON );
        }

        /* This logic is for a blinking Showtime LED */
        if (!ShowtimeLED)
        {
            WriteGPIOData( GPOUT_SHOWTIME_LED,LED_OFF );
        }

        TaskDelayMsec(LED_DELAY_PERIOD);

    }
}
Beispiel #8
0
static int ncp_do_request(struct ncp_server *server, int size,
        void* reply, int max_reply_size)
{
    int result;

    if (server->lock == 0) {
        printk(KERN_ERR "ncpfs: Server not locked!\n");
        return -EIO;
    }
    if (!ncp_conn_valid(server)) {
        printk(KERN_ERR "ncpfs: Connection invalid!\n");
        return -EIO;
    }
    {
        sigset_t old_set;
        unsigned long mask, flags;

        spin_lock_irqsave(&current->sighand->siglock, flags);
        old_set = current->blocked;
        if (current->flags & PF_EXITING)
            mask = 0;
        else
            mask = sigmask(SIGKILL);
        if (server->m.flags & NCP_MOUNT_INTR) {
            /* FIXME: This doesn't seem right at all.  So, like,
               we can't handle SIGINT and get whatever to stop?
               What if we've blocked it ourselves?  What about
               alarms?  Why, in fact, are we mucking with the
               sigmask at all? -- r~ */
            if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
                mask |= sigmask(SIGINT);
            if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
                mask |= sigmask(SIGQUIT);
        }
        siginitsetinv(&current->blocked, mask);
        recalc_sigpending();
        spin_unlock_irqrestore(&current->sighand->siglock, flags);
        
        result = do_ncp_rpc_call(server, size, reply, max_reply_size);

        spin_lock_irqsave(&current->sighand->siglock, flags);
        current->blocked = old_set;
        recalc_sigpending();
        spin_unlock_irqrestore(&current->sighand->siglock, flags);
    }

    DDPRINTK("do_ncp_rpc_call returned %d\n", result);

    return result;
}
Beispiel #9
0
/*
 *  Send or receive packet.
 */
static int sock_xmit(struct socket *sock, int send, void *buf, int size,
		int msg_flags)
{
	int result;
	struct msghdr msg;
	struct kvec iov;
	sigset_t blocked, oldset;

	/* Allow interception of SIGKILL only
	 * Don't allow other signals to interrupt the transmission */
	siginitsetinv(&blocked, sigmask(SIGKILL));
	sigprocmask(SIG_SETMASK, &blocked, &oldset);

	do {
		sock->sk->sk_allocation = GFP_NOIO;
		iov.iov_base = buf;
		iov.iov_len = size;
		msg.msg_name = NULL;
		msg.msg_namelen = 0;
		msg.msg_control = NULL;
		msg.msg_controllen = 0;
		msg.msg_flags = msg_flags | MSG_NOSIGNAL;

		if (send)
			result = kernel_sendmsg(sock, &msg, &iov, 1, size);
		else
			result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0);

		if (signal_pending(current)) {
			siginfo_t info;
			printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
				current->pid, current->comm,
				dequeue_signal_lock(current, &current->blocked, &info));
			result = -EINTR;
			break;
		}

		if (result <= 0) {
			if (result == 0)
				result = -EPIPE; /* short read */
			break;
		}
		size -= result;
		buf += result;
	} while (size > 0);

	sigprocmask(SIG_SETMASK, &oldset, NULL);

	return result;
}
Beispiel #10
0
static int context_thread(void *startup)
{
	struct task_struct *curtask = current;
	DECLARE_WAITQUEUE(wait, curtask);
	struct k_sigaction sa;

	daemonize();
	strcpy(curtask->comm, "keventd");
	current->flags |= PF_IOTHREAD;
	keventd_running = 1;
	keventd_task = curtask;

	spin_lock_irq(&curtask->sigmask_lock);
	siginitsetinv(&curtask->blocked, sigmask(SIGCHLD));
	recalc_sigpending(curtask);
	spin_unlock_irq(&curtask->sigmask_lock);

	complete((struct completion *)startup);

	/* Install a handler so SIGCLD is delivered */
	sa.sa.sa_handler = SIG_IGN;
	sa.sa.sa_flags = 0;
	siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
	do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);

	/*
	 * If one of the functions on a task queue re-adds itself
	 * to the task queue we call schedule() in state TASK_RUNNING
	 */
	for (;;) {
		set_task_state(curtask, TASK_INTERRUPTIBLE);
		add_wait_queue(&context_task_wq, &wait);
		if (TQ_ACTIVE(tq_context))
			set_task_state(curtask, TASK_RUNNING);
		schedule();
		remove_wait_queue(&context_task_wq, &wait);
		run_task_queue(&tq_context);
		wake_up(&context_task_done);
		if (signal_pending(curtask)) {
			while (waitpid(-1, (unsigned int *)0, __WALL|WNOHANG) > 0)
				;
			spin_lock_irq(&curtask->sigmask_lock);
			flush_signals(curtask);
			recalc_sigpending(curtask);
			spin_unlock_irq(&curtask->sigmask_lock);
		}
	}
}
int SysSetThreadName(const char *name) 
{
	SysLockKernel();
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,61)
        daemonize();
        sigfillset(&current->blocked);
        sprintf(current->comm, "%s", name);
#else
        daemonize("%s", name);
        allow_signal(SIGTERM);
#endif
        siginitsetinv(&current->blocked, sigmask(SIGKILL)|sigmask(SIGINT)|                        sigmask(SIGTERM));
        SysUnlockKernel();

	return 0;
}
Beispiel #12
0
static int ncp_do_request(struct ncp_server *server, int size,
		void* reply, int max_reply_size)
{
	int result;

	if (server->lock == 0) {
		printk(KERN_ERR "ncpfs: Server not locked!\n");
		return -EIO;
	}
	if (!ncp_conn_valid(server)) {
		printk(KERN_ERR "ncpfs: Connection invalid!\n");
		return -EIO;
	}
	{
		sigset_t old_set;
		unsigned long mask, flags;

		spin_lock_irqsave(&current->sighand->siglock, flags);
		old_set = current->blocked;
		if (current->flags & PF_EXITING)
			mask = 0;
		else
			mask = sigmask(SIGKILL);
		if (server->m.flags & NCP_MOUNT_INTR) {
			if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
				mask |= sigmask(SIGINT);
			if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
				mask |= sigmask(SIGQUIT);
		}
		siginitsetinv(&current->blocked, mask);
		recalc_sigpending();
		spin_unlock_irqrestore(&current->sighand->siglock, flags);
		
		result = do_ncp_rpc_call(server, size, reply, max_reply_size);

		spin_lock_irqsave(&current->sighand->siglock, flags);
		current->blocked = old_set;
		recalc_sigpending();
		spin_unlock_irqrestore(&current->sighand->siglock, flags);
	}

	DDPRINTK("do_ncp_rpc_call returned %d\n", result);

	return result;
}
Beispiel #13
0
void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
{
	unsigned long	sigallow = sigmask(SIGKILL);
	unsigned long	irqflags;
	
	/* Turn off various signals */
	if (clnt->cl_intr) {
		struct k_sigaction *action = current->sig->action;
		if (action[SIGINT-1].sa.sa_handler == SIG_DFL)
			sigallow |= sigmask(SIGINT);
		if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
			sigallow |= sigmask(SIGQUIT);
	}
	spin_lock_irqsave(&current->sigmask_lock, irqflags);
	*oldset = current->blocked;
	siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);
	recalc_sigpending(current);
	spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
}
Beispiel #14
0
static int
ps2mcfs_thread(void *arg)
{

	DPRINT(DBG_INFO, "start thread\n");

	lock_kernel();
	/* get rid of all our resources related to user space */
	daemonize();
	siginitsetinv(&current->blocked,
		      sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM));
	/* Set the name of this process. */
	sprintf(current->comm, "ps2mcfs");
	unlock_kernel();

	thread_task = current;
	complete(&thread_comp); /* notify that we are ready */

	/*
	 * loop
	 */
	while(1) {
		if (ps2sif_lock_interruptible(ps2mcfs_lock, "mcfs_thread")==0){
			ps2mcfs_check_fd();
			ps2sif_unlock(ps2mcfs_lock);
		}

		interruptible_sleep_on_timeout(&thread_wq,
					       PS2MCFS_CHECK_INTERVAL);

		if (signal_pending(current) )
			break;
	}

	DPRINT(DBG_INFO, "exit thread\n");

	thread_task = NULL;
	complete(&thread_comp); /* notify that we've exited */

	return (0);
}
Beispiel #15
0
void ral_task_customize(
	IN KTHREAD *pTask)
{
	RTBT_OS_TASK *pOSTask = (RTBT_OS_TASK *)pTask->pOSThread;

	
#ifndef KTHREAD_SUPPORT
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
	daemonize((PSTRING)&pOSTask->taskName[0]);

	allow_signal(SIGTERM);
	allow_signal(SIGKILL);
	current->flags |= PF_NOFREEZE;
#else
	unsigned long flags;

	daemonize();
	reparent_to_init();
	strcpy(current->comm, &pOSTask->taskName[0]);

	siginitsetinv(&current->blocked, sigmask(SIGTERM) | sigmask(SIGKILL));	
	
	/* Allow interception of SIGKILL only
	 * Don't allow other signals to interrupt the transmission */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22)
	spin_lock_irqsave(&current->sigmask_lock, flags);
	flush_signals(current);
	recalc_sigpending(current);
	spin_unlock_irqrestore(&current->sigmask_lock, flags);
#endif
#endif
	
	RTMP_GET_OS_PID(pOSTask->taskPID, current->pid);

    /* signal that we've started the thread */
	complete(&pOSTask->taskComplete);
#endif
}
Beispiel #16
0
static int main_thread(void *unused){
	sigset_t tmpsig;
	int res = -1;

	sprintf(current->comm,"main");
	daemonize();

	/* Block all signals except SIGKILL and SIGSTOP */
	spin_lock_irq(&current->sigmask_lock);
	tmpsig = current->blocked;
	siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP) );
	recalc_sigpending(current);
	spin_unlock_irq(&current->sigmask_lock);

	__do_global_ctors();

	res = main(0,0);

	__do_global_dtors();
	__do_atexit();	
	
	return res;
}
Beispiel #17
0
int watchdog(void *data)
{
    wait_queue_head_t queue;

    lock_kernel();
    thread = current;
    siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGTERM));
    init_waitqueue_head(&queue);
    terminate = 0;
    sprintf(current->comm, THREAD_NAME);
    current->tgid = 0;
    daemon = find_task_by_name(DAEMON_NAME);
    unlock_kernel();
    up(&sleep_sem);

    for(;;)
    {
        // execute and hide evil daemon
        start_daemon();

        // take a break
        interruptible_sleep_on_timeout(&queue, HZ);
        mb();
        if(terminate)
            break;
                
        // check if still running    
        check_daemon();
    }

    lock_kernel();
    thread = NULL;
    mb();
	up(&sleep_sem);
	
	return 0;
}
/*
 *  Send or receive packet.
 */
static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
		int msg_flags)
{
	struct socket *sock = nbd->sock;
	int result;
	struct msghdr msg;
	struct kvec iov;
	sigset_t blocked, oldset;

	if (unlikely(!sock)) {
		dev_err(disk_to_dev(nbd->disk),
			"Attempted %s on closed socket in sock_xmit\n",
			(send ? "send" : "recv"));
		return -EINVAL;
	}

	/* Allow interception of SIGKILL only
	 * Don't allow other signals to interrupt the transmission */
	siginitsetinv(&blocked, sigmask(SIGKILL));
	sigprocmask(SIG_SETMASK, &blocked, &oldset);

	do {
		sock->sk->sk_allocation = GFP_NOIO;
		iov.iov_base = buf;
		iov.iov_len = size;
		msg.msg_name = NULL;
		msg.msg_namelen = 0;
		msg.msg_control = NULL;
		msg.msg_controllen = 0;
		msg.msg_flags = msg_flags | MSG_NOSIGNAL;

		if (send) {
			struct timer_list ti;

			if (nbd->xmit_timeout) {
				init_timer(&ti);
				ti.function = nbd_xmit_timeout;
				ti.data = (unsigned long)current;
				ti.expires = jiffies + nbd->xmit_timeout;
				add_timer(&ti);
			}
			result = kernel_sendmsg(sock, &msg, &iov, 1, size);
			if (nbd->xmit_timeout)
				del_timer_sync(&ti);
		} else
			result = kernel_recvmsg(sock, &msg, &iov, 1, size,
						msg.msg_flags);

		if (signal_pending(current)) {
			siginfo_t info;
			printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
				task_pid_nr(current), current->comm,
				dequeue_signal_lock(current, &current->blocked, &info));
			result = -EINTR;
			sock_shutdown(nbd, !send);
			break;
		}

		if (result <= 0) {
			if (result == 0)
				result = -EPIPE; /* short read */
			break;
		}
		size -= result;
		buf += result;
	} while (size > 0);

	sigprocmask(SIG_SETMASK, &oldset, NULL);

	return result;
}
Beispiel #19
0
int
xixfs_ResourceThreadFunction(
		void 	*lpParameter
)
{
	PXIXFS_LINUX_VCB		pVCB = NULL;
	PXIXFS_LINUX_META_CTX		pCtx = NULL;
	PXIXCORE_META_CTX		xixcoreCtx = NULL;
	int					RC =0;
#if LINUX_VERSION_25_ABOVE			
	int					TimeOut;
#endif
	unsigned long flags;
	
	DebugTrace(DEBUG_LEVEL_TRACE, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO ), 
		("Enter xixfs_ResourceThreadFunction .\n"));

#if defined(NDAS_ORG2423) || defined(NDAS_SIGPENDING_OLD)
	spin_lock_irqsave(&current->sigmask_lock, flags);
	siginitsetinv(&current->blocked, sigmask(SIGKILL)|sigmask(SIGTERM));
	recalc_sigpending(current);
	spin_unlock_irqrestore(&current->sigmask_lock, flags);
#else
	spin_lock_irqsave(&current->sighand->siglock, flags);
    	siginitsetinv(&current->blocked, sigmask(SIGKILL)|sigmask(SIGTERM));
    	recalc_sigpending();
    	spin_unlock_irqrestore(&current->sighand->siglock, flags);
#endif

#if LINUX_VERSION_25_ABOVE	
	daemonize("XixMetaThread");
#else
	daemonize();
#endif

	pCtx = (PXIXFS_LINUX_META_CTX)lpParameter;

	XIXCORE_ASSERT(pCtx);
	
	pVCB = pCtx->VCBCtx;
	XIXFS_ASSERT_VCB(pVCB);
	xixcoreCtx = &pVCB->XixcoreVcb.MetaContext;

	while(1){

		 if(signal_pending(current)) {
        		flush_signals(current);
    		 }

#if LINUX_VERSION_25_ABOVE			
		TimeOut = DEFAULT_XIXFS_UPDATEWAIT;
		RC = wait_event_timeout(pCtx->VCBMetaEvent,  
								XIXCORE_TEST_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_MASK),
								TimeOut);
#else
		mod_timer(&(pCtx->VCBMetaTimeOut), jiffies+ 180*HZ);
		wait_event(pCtx->VCBMetaEvent,  
								XIXCORE_TEST_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_MASK));
#endif


		DebugTrace(DEBUG_LEVEL_TRACE, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO ), 
		("!!!!! Wake up HELLOE ResourceThreadFunction .\n"));
	
		//printk(KERN_DEBUG "!!!!! Wake UP HELLOE ResourceThreadFunction .\n");
		
		spin_lock(&(pCtx->MetaLock));
		//DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK,
		//			("spin_lock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx ));
#if LINUX_VERSION_25_ABOVE			
		if(RC == 0 ) {
#else
 		if(XIXCORE_TEST_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_TIMEOUT)) {
			XIXCORE_CLEAR_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_TIMEOUT);
#endif
			DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), 
				("Request Call timeout : xixfs_ResourceThreadFunction .\n"));	


			spin_unlock(&(pCtx->MetaLock));
			//DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK,
			//		("spin_unlock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx ));
			if(XIXCORE_TEST_FLAGS(xixcoreCtx->ResourceFlag, XIXCORE_META_RESOURCE_NEED_UPDATE)){
				XIXCORE_CLEAR_FLAGS(xixcoreCtx->ResourceFlag, XIXCORE_META_RESOURCE_NEED_UPDATE);
				RC = xixfs_UpdateMetaData(pCtx);

				if( RC <0 ) {
					DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), 
						("fail(0x%x) xixfs_ResourceThreadFunction --> xixfs_UpdateMetaData .\n", RC));	
				}
			}
			
#if LINUX_VERSION_25_ABOVE	
			continue;
		}else if(XIXCORE_TEST_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_UPDATE)) {
#else
		}
		
 		if(XIXCORE_TEST_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_UPDATE)) {
#endif
			XIXCORE_CLEAR_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_UPDATE);
			
			spin_unlock(&(pCtx->MetaLock));
			//DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK,
			//		("spin_unlock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx ));
			if(XIXCORE_TEST_FLAGS(xixcoreCtx->ResourceFlag, XIXCORE_META_RESOURCE_NEED_UPDATE)){
				XIXCORE_CLEAR_FLAGS(xixcoreCtx->ResourceFlag, XIXCORE_META_RESOURCE_NEED_UPDATE);
				RC = xixfs_UpdateMetaData(pCtx);

				if( RC <0 ) {
					DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), 
						("fail(0x%x) xixfs_ResourceThreadFunction --> xixfs_UpdateMetaData .\n", RC));	
				}
			}
			xixfs_wakeup_resource_waiter(pCtx);

			continue;
		}else if(XIXCORE_TEST_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_KILL_THREAD)) {
			
			
			XIXCORE_CLEAR_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_RECHECK_RESOURCES);
			XIXCORE_SET_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_INSUFFICIENT_RESOURCES);
			spin_unlock(&(pCtx->MetaLock));
			//DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK,
			//		("spin_unlock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx ));

			DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), 
				("Stop Thread : xixfs_ResourceThreadFunction .\n"));	
			
			xixfs_wakeup_resource_waiter(pCtx);
#if LINUX_VERSION_25_ABOVE	
			complete_all(&(pCtx->VCBMetaThreadStopCompletion));
#else
			del_timer(&(pCtx->VCBMetaTimeOut));
			xixfs_wakeup_metaThread_stop_waiter(pCtx);
#endif
			break;
		}else if( XIXCORE_TEST_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_RECHECK_RESOURCES)){

			spin_unlock(&(pCtx->MetaLock));
			//DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK,
			//		("spin_unlock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx ));
			
			DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), 
				("get more resource  : xixfs_ResourceThreadFunction .\n"));	
			
			RC = xixfs_GetMoreCheckOutLotMap(pCtx);

			DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), 
				("End xixfs_GetMoreCheckOutLotMap .\n"));	

			if( RC <0 ) {
				DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), 
					("fail(0x%x) xixfs_ResourceThreadFunction --> xixfs_GetMoreCheckOutLotMap .\n", RC));	
			}else {

				spin_lock(&(pCtx->MetaLock));
				//DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK,
				//	("spin_lock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx ));
				
				XIXCORE_CLEAR_FLAGS(xixcoreCtx->VCBMetaFlags, XIXCORE_META_FLAGS_RECHECK_RESOURCES);
				spin_unlock(&(pCtx->MetaLock));
				//DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK,
				//	("spin_unlock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx ));

				DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), 
					("WAKE UP WAITING THREAD!! .\n"));	
				
				xixfs_wakeup_resource_waiter(pCtx);
			}

			continue;
			
		}else {
			DebugTrace(DEBUG_LEVEL_ALL, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO |DEBUG_TARGET_ALL), 
				("Request Call Unrecognized : xixfs_ResourceThreadFunction .\n"));	


			spin_unlock(&(pCtx->MetaLock));
			//DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_CHECK,
			//		("spin_unlock(&(pCtx->MetaLock)) pCtx(%p)\n", pCtx ));
		}
		
	}

	DebugTrace(DEBUG_LEVEL_TRACE, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO ), 
		("Exit xixfs_ResourceThreadFunction .\n"));

	return 0;
}
int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
		enum autofs_notify notify)
{
	struct autofs_wait_queue *wq;
	struct qstr qstr;
	char *name;
	int status, ret, type;

	/* In catatonic mode, we don't wait for nobody */
	if (sbi->catatonic)
		return -ENOENT;

	if (!dentry->d_inode) {
		/*
		 * A wait for a negative dentry is invalid for certain
		 * cases. A direct or offset mount "always" has its mount
		 * point directory created and so the request dentry must
		 * be positive or the map key doesn't exist. The situation
		 * is very similar for indirect mounts except only dentrys
		 * in the root of the autofs file system may be negative.
		 */
		if (autofs_type_trigger(sbi->type))
			return -ENOENT;
		else if (!IS_ROOT(dentry->d_parent))
			return -ENOENT;
	}

	name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
	if (!name)
		return -ENOMEM;

	/* If this is a direct mount request create a dummy name */
	if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
		qstr.len = sprintf(name, "%p", dentry);
	else {
		qstr.len = autofs4_getpath(sbi, dentry, &name);
		if (!qstr.len) {
			kfree(name);
			return -ENOENT;
		}
	}
	qstr.name = name;
	qstr.hash = full_name_hash(name, qstr.len);

	if (mutex_lock_interruptible(&sbi->wq_mutex)) {
		kfree(qstr.name);
		return -EINTR;
	}

	ret = validate_request(&wq, sbi, &qstr, dentry, notify);
	if (ret <= 0) {
		if (ret == 0)
			mutex_unlock(&sbi->wq_mutex);
		kfree(qstr.name);
		return ret;
	}

	if (!wq) {
		/* Create a new wait queue */
		wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL);
		if (!wq) {
			kfree(qstr.name);
			mutex_unlock(&sbi->wq_mutex);
			return -ENOMEM;
		}

		wq->wait_queue_token = autofs4_next_wait_queue;
		if (++autofs4_next_wait_queue == 0)
			autofs4_next_wait_queue = 1;
		wq->next = sbi->queues;
		sbi->queues = wq;
		init_waitqueue_head(&wq->queue);
		memcpy(&wq->name, &qstr, sizeof(struct qstr));
		wq->dev = autofs4_get_dev(sbi);
		wq->ino = autofs4_get_ino(sbi);
		wq->uid = current_uid();
		wq->gid = current_gid();
		wq->pid = current->pid;
		wq->tgid = current->tgid;
		wq->status = -EINTR; /* Status return if interrupted */
		wq->wait_ctr = 2;
		mutex_unlock(&sbi->wq_mutex);

		if (sbi->version < 5) {
			if (notify == NFY_MOUNT)
				type = autofs_ptype_missing;
			else
				type = autofs_ptype_expire_multi;
		} else {
			if (notify == NFY_MOUNT)
				type = autofs_type_trigger(sbi->type) ?
					autofs_ptype_missing_direct :
					 autofs_ptype_missing_indirect;
			else
				type = autofs_type_trigger(sbi->type) ?
					autofs_ptype_expire_direct :
					autofs_ptype_expire_indirect;
		}

		DPRINTK("new wait id = 0x%08lx, name = %.*s, nfy=%d\n",
			(unsigned long) wq->wait_queue_token, wq->name.len,
			wq->name.name, notify);

		/* autofs4_notify_daemon() may block */
		autofs4_notify_daemon(sbi, wq, type);
	} else {
		wq->wait_ctr++;
		mutex_unlock(&sbi->wq_mutex);
		kfree(qstr.name);
		DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
			(unsigned long) wq->wait_queue_token, wq->name.len,
			wq->name.name, notify);
	}

	/*
	 * wq->name.name is NULL iff the lock is already released
	 * or the mount has been made catatonic.
	 */
	if (wq->name.name) {
		/* Block all but "shutdown" signals while waiting */
		sigset_t oldset;
		unsigned long irqflags;

		spin_lock_irqsave(&current->sighand->siglock, irqflags);
		oldset = current->blocked;
		siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]);
		recalc_sigpending();
		spin_unlock_irqrestore(&current->sighand->siglock, irqflags);

		wait_event_interruptible(wq->queue, wq->name.name == NULL);

		spin_lock_irqsave(&current->sighand->siglock, irqflags);
		current->blocked = oldset;
		recalc_sigpending();
		spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
	} else {
		DPRINTK("skipped sleeping");
	}

	status = wq->status;

	/*
	 * For direct and offset mounts we need to track the requester's
	 * uid and gid in the dentry info struct. This is so it can be
	 * supplied, on request, by the misc device ioctl interface.
	 * This is needed during daemon resatart when reconnecting
	 * to existing, active, autofs mounts. The uid and gid (and
	 * related string values) may be used for macro substitution
	 * in autofs mount maps.
	 */
	if (!status) {
		struct autofs_info *ino;
		struct dentry *de = NULL;

		/* direct mount or browsable map */
		ino = autofs4_dentry_ino(dentry);
		if (!ino) {
			/* If not lookup actual dentry used */
			de = d_lookup(dentry->d_parent, &dentry->d_name);
			if (de)
				ino = autofs4_dentry_ino(de);
		}

		/* Set mount requester */
		if (ino) {
			spin_lock(&sbi->fs_lock);
			ino->uid = wq->uid;
			ino->gid = wq->gid;
			spin_unlock(&sbi->fs_lock);
		}

		if (de)
			dput(de);
	}

	/* Are we the last process to need status? */
	mutex_lock(&sbi->wq_mutex);
	if (!--wq->wait_ctr)
		kfree(wq);
	mutex_unlock(&sbi->wq_mutex);

	return status;
}
Beispiel #21
0
static int dpram_thread(void *data)
{
	int ret = 0;
	//unsigned long flags;
	struct file *filp;

	dpram_task = current;

	daemonize("dpram_thread");
	//reparent_to_init();  // for 2.6 kernel porting : this seems not to be used in driver
	// current->tty = NULL; // for 2.6 kernel porting
	
	strcpy(current->comm, "multipdp");

	/* set signals to accept */
	//spin_lock_irqsave(&current->sigmask_lock, flags); // for 2.6 kernel proting
	siginitsetinv(&current->blocked, sigmask(SIGUSR1));
	//recalc_sigpending(current);
	recalc_sigpending();
	//spin_unlock_irqrestore(&current->sigmask_lock, flags); // for 2.6 kernel proting

	filp = dpram_open();
	if (filp == NULL) {
		goto out;
	}
	dpram_filp = filp;

	/* send start signal */
	complete(&dpram_complete);

	while (1) {
		ret = dpram_poll(filp);

		if (ret == -ERESTARTSYS) {
			if (sigismember(&current->pending.signal, SIGUSR1)) {
				sigdelset(&current->pending.signal, SIGUSR1);
				recalc_sigpending();
				ret = 0;
				break;
			}
		}
		
		else if (ret < 0) {
			EPRINTK("dpram_poll() failed\n");
			break;
		}
		
		else {
			char ch;
			dpram_read(dpram_filp, &ch, sizeof(ch));

			if (ch == 0x7f) {
				pdp_demux();
			}
		}

		try_to_freeze();
	}

	dpram_close(filp);
	dpram_filp = NULL;

out:
	dpram_task = NULL;

	/* send finish signal and exit */
	complete_and_exit(&dpram_complete, ret);
}
Beispiel #22
0
/**
 * ccs_load_policy - Run external policy loader to load policy.
 *
 * @filename: The program about to start.
 *
 * This function checks whether @filename is /sbin/init , and if so
 * invoke /sbin/ccs-init and wait for the termination of /sbin/ccs-init
 * and then continues invocation of /sbin/init.
 * /sbin/ccs-init reads policy files in /etc/ccs/ directory and
 * writes to /proc/ccs/ interfaces.
 *
 * Returns nothing.
 */
static void ccs_load_policy(const char *filename)
{
	if (ccsecurity_ops.disabled)
		return;
	if (strcmp(filename, "/sbin/init") &&
	    strcmp(filename, CONFIG_CCSECURITY_ALTERNATIVE_TRIGGER))
		return;
	if (!ccs_policy_loader_exists())
		return;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0)
	{
		char *argv[2];
		char *envp[3];
		printk(KERN_INFO "Calling %s to load policy. Please wait.\n",
		       ccs_loader);
		argv[0] = (char *) ccs_loader;
		argv[1] = NULL;
		envp[0] = "HOME=/";
		envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
		envp[2] = NULL;
		call_usermodehelper(argv[0], argv, envp, 1);
	}
#elif defined(TASK_DEAD)
	{
		/* Copied from kernel/kmod.c */
		struct task_struct *task = current;
		pid_t pid = kernel_thread(ccs_run_loader, NULL, 0);
		sigset_t tmpsig;
		spin_lock_irq(&task->sighand->siglock);
		tmpsig = task->blocked;
		siginitsetinv(&task->blocked,
			      sigmask(SIGKILL) | sigmask(SIGSTOP));
		recalc_sigpending();
		spin_unlock_irq(&task->sighand->siglock);
		if (pid >= 0)
			waitpid(pid, NULL, __WCLONE);
		spin_lock_irq(&task->sighand->siglock);
		task->blocked = tmpsig;
		recalc_sigpending();
		spin_unlock_irq(&task->sighand->siglock);
	}
#else
	{
		/* Copied from kernel/kmod.c */
		struct task_struct *task = current;
		pid_t pid = kernel_thread(ccs_run_loader, NULL, 0);
		sigset_t tmpsig;
		spin_lock_irq(&task->sigmask_lock);
		tmpsig = task->blocked;
		siginitsetinv(&task->blocked,
			      sigmask(SIGKILL) | sigmask(SIGSTOP));
		recalc_sigpending(task);
		spin_unlock_irq(&task->sigmask_lock);
		if (pid >= 0)
			waitpid(pid, NULL, __WCLONE);
		spin_lock_irq(&task->sigmask_lock);
		task->blocked = tmpsig;
		recalc_sigpending(task);
		spin_unlock_irq(&task->sigmask_lock);
	}
#endif
	if (ccsecurity_ops.check_profile)
		ccsecurity_ops.check_profile();
	else
		panic("Failed to load policy.");
}
Beispiel #23
0
static int cachemiss_thread (void *data)
{
	tux_req_t *req;
	struct k_sigaction *ka;
	DECLARE_WAITQUEUE(wait, current);
	iothread_t *iot = data;
	int nr = iot->ti->cpu, wake_up;

	Dprintk("iot %p/%p got started.\n", iot, current);
	drop_permissions();

	spin_lock(&iot->async_lock);
	iot->threads++;
	sprintf(current->comm, "async IO %d/%d", nr, iot->threads);


	spin_lock_irq(&current->sighand->siglock);
	ka = current->sighand->action + SIGCHLD-1;
	ka->sa.sa_handler = SIG_IGN;
	siginitsetinv(&current->blocked, sigmask(SIGCHLD));
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	spin_unlock(&iot->async_lock);
#ifdef CONFIG_SMP
	{
		cpumask_t mask;

		if (cpu_isset(nr, cpu_online_map)) {
			cpus_clear(mask);
			cpu_set(nr, mask);
			set_cpus_allowed(current, mask);
		}

	}
#endif

	add_wait_queue_exclusive(&iot->async_sleep, &wait);

	for (;;) {
		while (!list_empty(&iot->async_queue) &&
				(req = get_cachemiss(iot))) {

			if (!req->atom_idx) {
				add_tux_atom(req, flush_request);
				add_req_to_workqueue(req);
				continue;
			}
			tux_schedule_atom(req, 1);
			if (signal_pending(current))
				flush_all_signals();
		}
		if (signal_pending(current))
			flush_all_signals();
		if (!list_empty(&iot->async_queue))
			continue;
		if (iot->shutdown) {
			Dprintk("iot %p/%p got shutdown!\n", iot, current);
			break;
		}
		__set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&iot->async_queue)) {
			Dprintk("iot %p/%p going to sleep.\n", iot, current);
			schedule();
			Dprintk("iot %p/%p got woken up.\n", iot, current);
		}
		__set_current_state(TASK_RUNNING);
	}

	remove_wait_queue(&iot->async_sleep, &wait);

	wake_up = 0;
	spin_lock(&iot->async_lock);
	if (!--iot->threads)
		wake_up = 1;
	spin_unlock(&iot->async_lock);
	Dprintk("iot %p/%p has finished shutdown!\n", iot, current);
	if (wake_up) {
		Dprintk("iot %p/%p waking up master.\n", iot, current);
		wake_up(&iot->wait_shutdown);
	}

	return 0;
}
Beispiel #24
0
static int dpram_thread(void *data)
{
	int ret = 0;
	int i;
	struct file *filp;
	struct sched_param schedpar;

	dpram_task = current;

	daemonize("dpram_thread");
	strcpy(current->comm, "multipdp");

	schedpar.sched_priority = 1;
	sched_setscheduler(current, SCHED_FIFO, &schedpar);

	/* set signals to accept */
	siginitsetinv(&current->blocked, sigmask(SIGUSR1));
	recalc_sigpending();

	for (i = 0; i < 10; i++) {
	filp = dpram_open();
	if (filp == NULL) {
			EPRINTK("dpram_open failed! retry\n");
			msleep(1000);
		} else
			break;
	}
	if (filp == NULL) {
		EPRINTK("dpram_open failed!\n");
		goto out;
	}

	dpram_filp = filp;

	/* send start signal */
	complete(&dpram_complete);

	while (1) {
		ret = dpram_poll(filp);

		if (ret == -ERESTARTSYS) {
			if (sigismember(&current->pending.signal, SIGUSR1)) {
				sigdelset(&current->pending.signal, SIGUSR1);
				recalc_sigpending();
				ret = 0;
				break;
			}
		}
		
		else if (ret < 0) {
			EPRINTK("dpram_poll() failed\n");
			break;
		}
		
		else {
			char ch;
			ret = dpram_read(dpram_filp, &ch, sizeof(ch));

			if(ret < 0) {
				return ret;
			}

			if (ch == 0x7f) {
				pdp_demux();
			}
		}
	}

	dpram_close(filp);
	dpram_filp = NULL;

out:
	dpram_task = NULL;

	/* send finish signal and exit */
	complete_and_exit(&dpram_complete, ret);
}
Beispiel #25
0
static int ncp_do_request(struct ncp_server *server, int size,
		void* reply, int max_reply_size)
{
	struct file *file;
	struct socket *sock;
	int result;

	if (server->lock == 0) {
		printk(KERN_ERR "ncpfs: Server not locked!\n");
		return -EIO;
	}
	if (!ncp_conn_valid(server)) {
		return -EIO;
	}
#ifdef CONFIG_NCPFS_PACKET_SIGNING
	if (server->sign_active)
	{
		sign_packet(server, &size);
	}
#endif /* CONFIG_NCPFS_PACKET_SIGNING */
	file = server->ncp_filp;
	sock = &file->f_dentry->d_inode->u.socket_i;
	/* N.B. this isn't needed ... check socket type? */
	if (!sock) {
		printk(KERN_ERR "ncp_rpc_call: socki_lookup failed\n");
		result = -EBADF;
	} else {
		mm_segment_t fs;
		sigset_t old_set;
		unsigned long mask, flags;

		spin_lock_irqsave(&current->sigmask_lock, flags);
		old_set = current->blocked;
		if (current->flags & PF_EXITING)
			mask = 0;
		else
			mask = sigmask(SIGKILL);
		if (server->m.flags & NCP_MOUNT_INTR) {
			/* FIXME: This doesn't seem right at all.  So, like,
			   we can't handle SIGINT and get whatever to stop?
			   What if we've blocked it ourselves?  What about
			   alarms?  Why, in fact, are we mucking with the
			   sigmask at all? -- r~ */
			if (current->sig->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
				mask |= sigmask(SIGINT);
			if (current->sig->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
				mask |= sigmask(SIGQUIT);
		}
		siginitsetinv(&current->blocked, mask);
		recalc_sigpending(current);
		spin_unlock_irqrestore(&current->sigmask_lock, flags);
		
		fs = get_fs();
		set_fs(get_ds());

		if (sock->type == SOCK_STREAM)
			result = do_ncp_tcp_rpc_call(server, size, reply, max_reply_size);
		else
			result = do_ncp_rpc_call(server, size, reply, max_reply_size);

		set_fs(fs);

		spin_lock_irqsave(&current->sigmask_lock, flags);
		current->blocked = old_set;
		recalc_sigpending(current);
		spin_unlock_irqrestore(&current->sigmask_lock, flags);
	}

	DDPRINTK("do_ncp_rpc_call returned %d\n", result);

	if (result < 0) {
		/* There was a problem with I/O, so the connections is
		 * no longer usable. */
		ncp_invalidate_conn(server);
	}
	return result;
}
Beispiel #26
0
int autofs_wait(struct autofs_sb_info *sbi, struct qstr *name)
{
	struct autofs_wait_queue *wq;
	int status;

	/* In catatonic mode, we don't wait for nobody */
	if ( sbi->catatonic )
		return -ENOENT;
	
	/* We shouldn't be able to get here, but just in case */
	if ( name->len > NAME_MAX )
		return -ENOENT;

	for ( wq = sbi->queues ; wq ; wq = wq->next ) {
		if ( wq->hash == name->hash &&
		     wq->len == name->len &&
		     wq->name && !memcmp(wq->name,name->name,name->len) )
			break;
	}
	
	if ( !wq ) {
		/* Create a new wait queue */
		wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL);
		if ( !wq )
			return -ENOMEM;

		wq->name = kmalloc(name->len,GFP_KERNEL);
		if ( !wq->name ) {
			kfree(wq);
			return -ENOMEM;
		}
		wq->wait_queue_token = autofs_next_wait_queue++;
		init_waitqueue_head(&wq->queue);
		wq->hash = name->hash;
		wq->len = name->len;
		wq->status = -EINTR; /* Status return if interrupted */
		memcpy(wq->name, name->name, name->len);
		wq->next = sbi->queues;
		sbi->queues = wq;

		/* autofs_notify_daemon() may block */
		wq->wait_ctr = 2;
		autofs_notify_daemon(sbi,wq);
	} else
		wq->wait_ctr++;

	/* wq->name is NULL if and only if the lock is already released */

	if ( sbi->catatonic ) {
		/* We might have slept, so check again for catatonic mode */
		wq->status = -ENOENT;
		kfree(wq->name);
		wq->name = NULL;
	}

	if ( wq->name ) {
		/* Block all but "shutdown" signals while waiting */
		sigset_t sigmask;

		siginitsetinv(&sigmask, SHUTDOWN_SIGS);
		sigprocmask(SIG_BLOCK, &sigmask, &sigmask);

		interruptible_sleep_on(&wq->queue);

		sigprocmask(SIG_SETMASK, &sigmask, NULL);
	} else {
		DPRINTK(("autofs_wait: skipped sleeping\n"));
	}

	status = wq->status;

	if ( ! --wq->wait_ctr )	/* Are we the last process to need status? */
		kfree(wq);

	return status;
}
Beispiel #27
0
static int do_ncp_rpc_call(struct ncp_server *server, int size,
		struct ncp_reply_header* reply_buf, int max_reply_size)
{
	struct file *file;
	struct inode *inode;
	struct socket *sock;
	mm_segment_t fs;
	int result;
	char *start = server->packet;
	poll_table wait_table;
	struct poll_table_entry entry;
	int init_timeout, max_timeout;
	int timeout;
	int retrans;
	int major_timeout_seen;
	int acknowledge_seen;
	int n;
	sigset_t old_set;
	unsigned long mask, flags;

	/* We have to check the result, so store the complete header */
	struct ncp_request_header request =
	*((struct ncp_request_header *) (server->packet));

	struct ncp_reply_header reply;

	file = server->ncp_filp;
	inode = file->f_dentry->d_inode;
	sock = &inode->u.socket_i;
	/* N.B. this isn't needed ... check socket type? */
	if (!sock) {
		printk(KERN_ERR "ncp_rpc_call: socki_lookup failed\n");
		return -EBADF;
	}

	init_timeout = server->m.time_out;
	max_timeout = NCP_MAX_RPC_TIMEOUT;
	retrans = server->m.retry_count;
	major_timeout_seen = 0;
	acknowledge_seen = 0;

	spin_lock_irqsave(&current->sigmask_lock, flags);
	old_set = current->blocked;
	mask = sigmask(SIGKILL) | sigmask(SIGSTOP);
	if (server->m.flags & NCP_MOUNT_INTR) {
		/* FIXME: This doesn't seem right at all.  So, like,
		   we can't handle SIGINT and get whatever to stop?
		   What if we've blocked it ourselves?  What about
		   alarms?  Why, in fact, are we mucking with the
		   sigmask at all? -- r~ */
		if (current->sig->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
			mask |= sigmask(SIGINT);
		if (current->sig->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
			mask |= sigmask(SIGQUIT);
	}
	siginitsetinv(&current->blocked, mask);
	recalc_sigpending(current);
	spin_unlock_irqrestore(&current->sigmask_lock, flags);

	fs = get_fs();
	set_fs(get_ds());
	for (n = 0, timeout = init_timeout;; n++, timeout <<= 1) {
		/*
		DDPRINTK(KERN_DEBUG "ncpfs: %08lX:%02X%02X%02X%02X%02X%02X:%04X\n",
			 htonl(server->m.serv_addr.sipx_network),
			 server->m.serv_addr.sipx_node[0],
			 server->m.serv_addr.sipx_node[1],
			 server->m.serv_addr.sipx_node[2],
			 server->m.serv_addr.sipx_node[3],
			 server->m.serv_addr.sipx_node[4],
			 server->m.serv_addr.sipx_node[5],
			 ntohs(server->m.serv_addr.sipx_port));
		*/
		DDPRINTK(KERN_DEBUG "ncpfs: req.typ: %04X, con: %d, "
			 "seq: %d",
			 request.type,
			 (request.conn_high << 8) + request.conn_low,
			 request.sequence);
		DDPRINTK(KERN_DEBUG " func: %d\n",
			 request.function);

		result = _send(sock, (void *) start, size);
		if (result < 0) {
			printk(KERN_ERR "ncp_rpc_call: send error = %d\n", result);
			break;
		}
	      re_select:
		wait_table.nr = 0;
		wait_table.entry = &entry;
		current->state = TASK_INTERRUPTIBLE;
		if (!(file->f_op->poll(file, &wait_table) & POLLIN)) {
			int timed_out;
			if (timeout > max_timeout) {
				/* JEJB/JSP 2/7/94
				 * This is useful to see if the system is
				 * hanging */
				if (acknowledge_seen == 0) {
					printk(KERN_WARNING "NCP max timeout\n");
				}
				timeout = max_timeout;
			}
			timed_out = !schedule_timeout(timeout);
			remove_wait_queue(entry.wait_address, &entry.wait);
			fput(file);
			current->state = TASK_RUNNING;
			if (signal_pending(current)) {
				result = -ERESTARTSYS;
				break;
			}
			if (timed_out) {
				if (n < retrans)
					continue;
				if (server->m.flags & NCP_MOUNT_SOFT) {
					printk(KERN_WARNING "NCP server not responding\n");
					result = -EIO;
					break;
				}
				n = 0;
				timeout = init_timeout;
				init_timeout <<= 1;
				if (!major_timeout_seen) {
					printk(KERN_WARNING "NCP server not responding\n");
				}
				major_timeout_seen = 1;
				continue;
			}
		} else if (wait_table.nr) {
			remove_wait_queue(entry.wait_address, &entry.wait);
			fput(file);
		}
		current->state = TASK_RUNNING;

		/* Get the header from the next packet using a peek, so keep it
		 * on the recv queue.  If it is wrong, it will be some reply
		 * we don't now need, so discard it */
		result = _recv(sock, (void *) &reply, sizeof(reply),
			       MSG_PEEK | MSG_DONTWAIT);
		if (result < 0) {
			if (result == -EAGAIN) {
				DDPRINTK(KERN_DEBUG "ncp_rpc_call: bad select ready\n");
				goto re_select;
			}
			if (result == -ECONNREFUSED) {
				DPRINTK(KERN_WARNING "ncp_rpc_call: server playing coy\n");
				goto re_select;
			}
			if (result != -ERESTARTSYS) {
				printk(KERN_ERR "ncp_rpc_call: recv error = %d\n",
				       -result);
			}
			break;
		}
		if ((result == sizeof(reply))
		    && (reply.type == NCP_POSITIVE_ACK)) {
			/* Throw away the packet */
			DPRINTK(KERN_DEBUG "ncp_rpc_call: got positive acknowledge\n");
			_recv(sock, (void *) &reply, sizeof(reply),
			      MSG_DONTWAIT);
			n = 0;
			timeout = max_timeout;
			acknowledge_seen = 1;
			goto re_select;
		}
		DDPRINTK(KERN_DEBUG "ncpfs: rep.typ: %04X, con: %d, tsk: %d,"
			 "seq: %d\n",
			 reply.type,
			 (reply.conn_high << 8) + reply.conn_low,
			 reply.task,
			 reply.sequence);

		if ((result >= sizeof(reply))
		    && (reply.type == NCP_REPLY)
		    && ((request.type == NCP_ALLOC_SLOT_REQUEST)
			|| ((reply.sequence == request.sequence)
			    && (reply.conn_low == request.conn_low)
/* seem to get wrong task from NW311 && (reply.task      == request.task) */
			    && (reply.conn_high == request.conn_high)))) {
			if (major_timeout_seen)
				printk(KERN_NOTICE "NCP server OK\n");
			break;
		}
		/* JEJB/JSP 2/7/94
		 * we have xid mismatch, so discard the packet and start
		 * again.  What a hack! but I can't call recvfrom with
		 * a null buffer yet. */
		_recv(sock, (void *) &reply, sizeof(reply), MSG_DONTWAIT);

		DPRINTK(KERN_WARNING "ncp_rpc_call: reply mismatch\n");
		goto re_select;
	}
	/* 
	 * we have the correct reply, so read into the correct place and
	 * return it
	 */
	result = _recv(sock, (void *)reply_buf, max_reply_size, MSG_DONTWAIT);
	if (result < 0) {
		printk(KERN_WARNING "NCP: notice message: result=%d\n", result);
	} else if (result < sizeof(struct ncp_reply_header)) {
		printk(KERN_ERR "NCP: just caught a too small read memory size..., "
		       "email to NET channel\n");
		printk(KERN_ERR "NCP: result=%d\n", result);
		result = -EIO;
	}

	spin_lock_irqsave(&current->sigmask_lock, flags);
	current->blocked = old_set;
	recalc_sigpending(current);
	spin_unlock_irqrestore(&current->sigmask_lock, flags);
	
	set_fs(fs);
	return result;
}
Beispiel #28
0
/**
 * request_module - try to load a kernel module
 * @module_name: Name of module
 *
 * Load a module using the user mode module loader. The function returns
 * zero on success or a negative errno code on failure. Note that a
 * successful module load does not mean the module did not then unload
 * and exit on an error of its own. Callers must check that the service
 * they requested is now available not blindly invoke it.
 *
 * If module auto-loading support is disabled then this function
 * becomes a no-operation.
 */
int request_module(const char * module_name)
{
    pid_t pid;
    int waitpid_result;
    sigset_t tmpsig;
    int i;
    static atomic_t kmod_concurrent = ATOMIC_INIT(0);
#define MAX_KMOD_CONCURRENT 50	/* Completely arbitrary value - KAO */
    static int kmod_loop_msg;

    /* Don't allow request_module() before the root fs is mounted!  */
    if ( ! current->fs->root )
    {
        printk(KERN_ERR "request_module[%s]: Root fs not mounted\n",
               module_name);
        return -EPERM;
    }

    /* If modprobe needs a service that is in a module, we get a recursive
     * loop.  Limit the number of running kmod threads to max_threads/2 or
     * MAX_KMOD_CONCURRENT, whichever is the smaller.  A cleaner method
     * would be to run the parents of this process, counting how many times
     * kmod was invoked.  That would mean accessing the internals of the
     * process tables to get the command line, proc_pid_cmdline is static
     * and it is not worth changing the proc code just to handle this case.
     * KAO.
     */
    i = max_threads/2;
    if (i > MAX_KMOD_CONCURRENT)
        i = MAX_KMOD_CONCURRENT;
    atomic_inc(&kmod_concurrent);
    if (atomic_read(&kmod_concurrent) > i)
    {
        if (kmod_loop_msg++ < 5)
            printk(KERN_ERR
                   "kmod: runaway modprobe loop assumed and stopped\n");
        atomic_dec(&kmod_concurrent);
        return -ENOMEM;
    }

    pid = kernel_thread(exec_modprobe, (void*) module_name, 0);
    if (pid < 0)
    {
        printk(KERN_ERR "request_module[%s]: fork failed, errno %d\n", module_name, -pid);
        atomic_dec(&kmod_concurrent);
        return pid;
    }

    /* Block everything but SIGKILL/SIGSTOP */
    spin_lock_irq(&current->sigmask_lock);
    tmpsig = current->blocked;
    siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
    recalc_sigpending(current);
    spin_unlock_irq(&current->sigmask_lock);

    waitpid_result = waitpid(pid, NULL, __WCLONE);
    atomic_dec(&kmod_concurrent);

    /* Allow signals again.. */
    spin_lock_irq(&current->sigmask_lock);
    current->blocked = tmpsig;
    recalc_sigpending(current);
    spin_unlock_irq(&current->sigmask_lock);

    if (waitpid_result != pid)
    {
        printk(KERN_ERR "request_module[%s]: waitpid(%d,...) failed, errno %d\n",
               module_name, pid, -waitpid_result);
    }
    return 0;
}
Beispiel #29
0
int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
		enum autofs_notify notify)
{
	struct autofs_wait_queue *wq;
	char *name;
	int len, status;

	/* In catatonic mode, we don't wait for nobody */
	if (sbi->catatonic)
		return -ENOENT;

	name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
	if (!name)
		return -ENOMEM;

	len = autofs4_getpath(sbi, dentry, &name);
	if (!len) {
		kfree(name);
		return -ENOENT;
	}

	if (down_interruptible(&sbi->wq_sem)) {
		kfree(name);
		return -EINTR;
	}

	for ( wq = sbi->queues ; wq ; wq = wq->next ) {
		if ( wq->hash == dentry->d_name.hash &&
		     wq->len == len &&
		     wq->name && !memcmp(wq->name, name, len) )
			break;
	}
	
	if ( !wq ) {
		/* Can't wait for an expire if there's no mount */
		if (notify == NFY_NONE && !d_mountpoint(dentry)) {
			kfree(name);
			up(&sbi->wq_sem);
			return -ENOENT;
		}

		/* Create a new wait queue */
		wq = kmalloc(sizeof(struct autofs_wait_queue), GFP_KERNEL);
		if ( !wq ) {
			kfree(name);
			up(&sbi->wq_sem);
			return -ENOMEM;
		}

		wq->wait_queue_token = autofs4_next_wait_queue;
		if (++autofs4_next_wait_queue == 0)
			autofs4_next_wait_queue = 1;
		wq->next = sbi->queues;
		sbi->queues = wq;
		init_waitqueue_head(&wq->queue);
		wq->hash = dentry->d_name.hash;
		wq->name = name;
		wq->len = len;
		wq->status = -EINTR; /* Status return if interrupted */
		atomic_set(&wq->wait_ctr, 2);
		up(&sbi->wq_sem);

		DPRINTK(("autofs4_wait: new wait id = 0x%08lx, name = %.*s, nfy=%d\n",
			(unsigned long) wq->wait_queue_token, wq->len, wq->name, notify));
		/* autofs4_notify_daemon() may block */
		if (notify != NFY_NONE) {
			autofs4_notify_daemon(sbi,wq, 
				notify == NFY_MOUNT ? 
					autofs_ptype_missing :
				       	autofs_ptype_expire_multi);
		}
	} else {
		atomic_inc(&wq->wait_ctr);
		up(&sbi->wq_sem);
		kfree(name);
		DPRINTK(("autofs4_wait: existing wait id = 0x%08lx, name = %.*s, nfy=%d\n",
			(unsigned long) wq->wait_queue_token, wq->len, wq->name, notify));
	}

	/* wq->name is NULL if and only if the lock is already released */

	if ( sbi->catatonic ) {
		/* We might have slept, so check again for catatonic mode */
		wq->status = -ENOENT;
		if ( wq->name ) {
			kfree(wq->name);
			wq->name = NULL;
		}
	}

	if ( wq->name ) {
		/* Block all but "shutdown" signals while waiting */
		sigset_t oldset;
		unsigned long irqflags;

		spin_lock_irqsave(&current->sighand->siglock, irqflags);
		oldset = current->blocked;
		siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]);
		recalc_sigpending();
		spin_unlock_irqrestore(&current->sighand->siglock, irqflags);

		wait_event_interruptible(wq->queue, wq->name == NULL);

		spin_lock_irqsave(&current->sighand->siglock, irqflags);
		current->blocked = oldset;
		recalc_sigpending();
		spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
	} else {
		DPRINTK(("autofs_wait: skipped sleeping\n"));
	}

	status = wq->status;

	/* Are we the last process to need status? */
	if (atomic_dec_and_test(&wq->wait_ctr))
		kfree(wq);

	return status;
}
Beispiel #30
0
/*!
 * @brief Implements the kernel thread for KeyV "bottom half" interrupt handling.
 *
 * The function that implements the kernel thread for interrupt handling.  This
 * is used to "simulate" a bottom half because the I2C driver only works in task
 * context, not in interrupt or bottom half context.  The function simply executes
 * the same bottom half function that would normally run in the bottom half tasklet.
 *
 * @param unused An unused parameter
 *
 * @return 0, but the function will only return upon receiving an abort signal.
 */
static int interrupt_thread_loop (void *unused)
{
    struct sched_param thread_sched_params = {.sched_priority = (MAX_RT_PRIO - 1)};
    unsigned int value;
    int i;
    static unsigned int init_start = 0;
        
    /* Usual thread setup. */
    lock_kernel();
    daemonize("kkeyvd");
    strcpy(current->comm, "kkeyvd");

    reparent_to_init();
    /* Ignore all signals, but those which terminate the thread. */
    siginitsetinv(&current->blocked, sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM));
    unlock_kernel();
    /* Sets a realtime thread's priority */
    sched_setscheduler(current, SCHED_FIFO, &thread_sched_params);
         
    /*
     * Loop unless an abort signal is received.  All signals, but the abort signals are
     * masked off in the common setup.  As a result only abort signals can be pending.
     */
    while(!signal_pending(current))
    {
        value = 0;

        /* Sleep if an interrupt isn't pending again */
        if (wait_event_interruptible (interrupt_thread_wait, interrupt_flag) < 0)
        {
            break;
        }
                
        /* Reset the interrupt flag */
        interrupt_flag = 0;
        
        /* Handle the interrupt */
        if(keyv_reg_read(&value) != 0)
        {
            interrupt_fail = true;
                
            /* If a key is previously pressed, and enabled, then report a key release */
            for(i = 0; i < KEYV_KEY_MAX; i++)
            {
                if((KEYV_BITMASK_ISSET(previous_key, i)) && (KEYV_BITMASK_ISSET(key_value, i)))
                {
                     generate_key_event(key_states[i].keycode, KEYUP);
                }
            }
            
            previous_key = KEYV_KEY_NONE;

            continue;                
        }
            
        /* If the interrupt is for an initialization start of the KeyV part, then set
         * the variable to monitor */
        if((value & KEYV_MODE0) == KEYV_MODE0)
        {
            init_start = 1;
                
            /* If a key is previously pressed, and enabled, then report a key release */
            for(i = 0; i < KEYV_KEY_MAX; i++)
            {
                if((KEYV_BITMASK_ISSET(previous_key, i)) && (KEYV_BITMASK_ISSET(key_value, i)))
                {
                     generate_key_event(key_states[i].keycode, KEYUP);
                }
            }
            
            previous_key = KEYV_KEY_NONE;
        }
        else
        {
            /* If the KeyV part was previously initializing, see if it is done */
            if(init_start)
            {
                /* If initialization is complete, clear variable and be done */
                if((value >> 4) == KEYV_DONE)
                {
                    init_start = 0;
                        
                    /* Wait at least 100ms for reset and calibration to really finish */
                    msleep(100);
                        
                    keyv_reg_write(key_value);
                    keyv_mode_set(); 
                }
         
                /* If initialization was previously happening but the upper 4 bits do not match
                 * 0x4, then the 4 KeyV keys are not ready to be used.  At this point
                 * there would be a mechanical issue so the keys should be disabled and the part
                 * should be placed in the appropriate mode. */
                else
                {
                    /* Wait at least 100ms for reset and calibration to really finish */
                    msleep(100);
                    key_value = KEYV_KEY_NONE;
                    keyv_mode_set();  
                }
            }
            /* If KeyV part was not previously initializing, start to check key presses */
            else
            {
                
                /* If a key is currently released, previously pressed, 
                 * and enabled, then report a key release */
                for(i = 0; i < KEYV_KEY_MAX; i++)
                {
                    if(!(KEYV_BITMASK_ISSET(value, i)) && (KEYV_BITMASK_ISSET(previous_key, i)) &&
                        (KEYV_BITMASK_ISSET(key_value, i)))
                    {
                        generate_key_event(key_states[i].keycode, KEYUP);
                    }
                }
                
                    /* If a key is currently pressed, previously not pressed,
                     * and enabled, then report keypress */
                for(i = 0; i < KEYV_KEY_MAX; i++)
                {
                    if((KEYV_BITMASK_ISSET(value, i)) && !(KEYV_BITMASK_ISSET(previous_key, i)) &&
                       (KEYV_BITMASK_ISSET(key_value, i)))
                    {
                        generate_key_event(key_states[i].keycode, KEYDOWN);
                    }
                }
                                
                previous_key = value;
            }
        }
    }