Esempio n. 1
0
static void
kerext_process_swap(void *data) {
	struct kerargs_process_swap		*kap = data;
	PROCESS							*parent = kap->parent;
	PROCESS							*child = kap->child;
	CONNECT							*proc_cop;
	pid_t							pid;
	int								i;
	DEBUG							*swap;

	lock_kernel();
	
	/* Swap process containers */
	process_vector.vector[PINDEX(parent->pid)] = child;
	process_vector.vector[PINDEX(child->pid)] = parent;

	if(child->parent != parent) crash();
	process_swap(child);

	pid = parent->pid;
	parent->pid = child->pid;
	child->pid = pid;

	if(parent->ocb_list || child->ocb_list) {
		void 		*ocb_list;

		ocb_list = parent->ocb_list;
		parent->ocb_list = child->ocb_list;
		child->ocb_list = ocb_list;
	}

	if(parent->alarm) {
		TIMER	*alarm = parent->alarm;

		parent->alarm = 0;
		alarm->thread = child->valid_thp;
		child->alarm = alarm;
	}

	/* Flags to inherit */
#define FLAGS	(_NTO_PF_WAITINFO | \
				_NTO_PF_WAITDONE | \
				_NTO_PF_SLEADER | \
				_NTO_PF_ORPHAN_PGRP | \
				_NTO_PF_NOCLDSTOP | \
				_NTO_PF_PTRACED | \
				_NTO_PF_NOZOMBIE)
	i = parent->flags;
    parent->flags = (parent->flags & ~FLAGS) | (child->flags & FLAGS);
    child->flags = (child->flags & ~FLAGS) | (i & FLAGS);
#undef FLAGS

	// Find the child's connection to proc (It's always the first chancon entry)
	proc_cop = vector_lookup(&child->chancons, SYSMGR_COID & ~_NTO_SIDE_CHANNEL);

#ifndef NDEBUG
	// There should be two links, One from the Send() and one from the ConnectAttach()
	if(!proc_cop || !proc_cop->channel || proc_cop->links != 2) crash();
	if(proc_cop->un.lcl.pid != SYSMGR_PID || proc_cop->un.lcl.chid != SYSMGR_CHID) crash();
	if(parent->chancons.nentries != parent->chancons.nfree) crash();
	if(child->chancons.nentries != child->chancons.nfree + 1) crash();
	if(child->fdcons.nentries != child->fdcons.nfree) crash();
#endif

	for(i = 0 ; i < parent->fdcons.nentries ; ++i) {
		CONNECT							*cop;

		if((cop = vector_rem(&parent->fdcons, i))) {
			if(vector_add(&child->fdcons, cop, i) != i) {
				crash();
			}

			parent->nfds--;
			child->nfds++;

			cop->process = child;

			// Check if this connects to proc's channel, if so, share it...
			if(proc_cop && cop->channel == proc_cop->channel) {
				vector_rem(&child->chancons, SYSMGR_COID & ~_NTO_SIDE_CHANNEL);
				vector_add(&child->chancons, cop, SYSMGR_COID & ~_NTO_SIDE_CHANNEL);
				cop->links++;
				proc_cop->links--;
				proc_cop = 0;
			}
		}
	}

	swap = child->debugger;
	if((swap != NULL) && (swap->process == child)) {
		swap->process = parent;
	}
	swap = parent->debugger;
	if((swap != NULL) && (swap->process == parent)) {
		swap->process = child;
	}
	parent->debugger = child->debugger;
	child->debugger = swap;

	SETKSTATUS(actives[KERNCPU], 0);
}
Esempio n. 2
0
static int swap_out(unsigned int priority, int gfp_mask, unsigned long idle_time)
{
	struct task_struct * p;
	int counter;
	int __ret = 0;

	lock_kernel();
	/* 
	 * We make one or two passes through the task list, indexed by 
	 * assign = {0, 1}:
	 *   Pass 1: select the swappable task with maximal RSS that has
	 *         not yet been swapped out. 
	 *   Pass 2: re-assign rss swap_cnt values, then select as above.
	 *
	 * With this approach, there's no need to remember the last task
	 * swapped out.  If the swap-out fails, we clear swap_cnt so the 
	 * task won't be selected again until all others have been tried.
	 *
	 * Think of swap_cnt as a "shadow rss" - it tells us which process
	 * we want to page out (always try largest first).
	 */
	counter = (nr_threads << SWAP_SHIFT) >> priority;
	if (counter < 1)
		counter = 1;

	for (; counter >= 0; counter--) {
		unsigned long max_cnt = 0;
		struct mm_struct *best = NULL;
		int pid = 0;
		int assign = 0;
		int found_task = 0;
	select:
		read_lock(&tasklist_lock);
		p = init_task.next_task;
		for (; p != &init_task; p = p->next_task) {
			struct mm_struct *mm = p->mm;
			if (!p->swappable || !mm)
				continue;
	 		if (mm->rss <= 0)
				continue;
			/* Skip tasks which haven't slept long enough yet when idle-swapping. */
			if (idle_time && !assign && (!(p->state & TASK_INTERRUPTIBLE) ||
					time_after(p->sleep_time + idle_time * HZ, jiffies)))
				continue;
			found_task++;
			/* Refresh swap_cnt? */
			if (assign == 1) {
				mm->swap_cnt = (mm->rss >> SWAP_SHIFT);
				if (mm->swap_cnt < SWAP_MIN)
					mm->swap_cnt = SWAP_MIN;
			}
			if (mm->swap_cnt > max_cnt) {
				max_cnt = mm->swap_cnt;
				best = mm;
				pid = p->pid;
			}
		}
		read_unlock(&tasklist_lock);
		if (!best) {
			if (!assign && found_task > 0) {
				assign = 1;
				goto select;
			}
			goto out;
		} else {
			int ret;

			atomic_inc(&best->mm_count);
			ret = swap_out_mm(best, gfp_mask);
			mmdrop(best);

			__ret = 1;
			goto out;
		}
	}
Esempio n. 3
0
/**
 *	iowarrior_ioctl
 */
static long iowarrior_ioctl(struct file *file, unsigned int cmd,
							unsigned long arg)
{
	struct iowarrior *dev = NULL;
	__u8 *buffer;
	__u8 __user *user_buffer;
	int retval;
	int io_res;		/* checks for bytes read/written and copy_to/from_user results */

	dev = (struct iowarrior *)file->private_data;
	if (dev == NULL) {
		return -ENODEV;
	}

	buffer = kzalloc(dev->report_size, GFP_KERNEL);
	if (!buffer)
		return -ENOMEM;

	/* lock this object */
	lock_kernel();
	mutex_lock(&dev->mutex);

	/* verify that the device wasn't unplugged */
	if (!dev->present) {
		retval = -ENODEV;
		goto error_out;
	}

	dbg("%s - minor %d, cmd 0x%.4x, arg %ld", __func__, dev->minor, cmd,
	    arg);

	retval = 0;
	io_res = 0;
	switch (cmd) {
	case IOW_WRITE:
		if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 ||
		    dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV1 ||
		    dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV2 ||
		    dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW40) {
			user_buffer = (__u8 __user *)arg;
			io_res = copy_from_user(buffer, user_buffer,
						dev->report_size);
			if (io_res) {
				retval = -EFAULT;
			} else {
				io_res = usb_set_report(dev->interface, 2, 0,
							buffer,
							dev->report_size);
				if (io_res < 0)
					retval = io_res;
			}
		} else {
			retval = -EINVAL;
			dev_err(&dev->interface->dev,
				"ioctl 'IOW_WRITE' is not supported for product=0x%x.\n",
				dev->product_id);
		}
		break;
	case IOW_READ:
		user_buffer = (__u8 __user *)arg;
		io_res = usb_get_report(dev->udev,
					dev->interface->cur_altsetting, 1, 0,
					buffer, dev->report_size);
		if (io_res < 0)
			retval = io_res;
		else {
			io_res = copy_to_user(user_buffer, buffer, dev->report_size);
			if (io_res < 0)
				retval = -EFAULT;
		}
		break;
	case IOW_GETINFO:
		{
			/* Report available information for the device */
			struct iowarrior_info info;
			/* needed for power consumption */
			struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc;

			memset(&info, 0, sizeof(info));
			/* directly from the descriptor */
			info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
			info.product = dev->product_id;
			info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice);

			/* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */
			info.speed = le16_to_cpu(dev->udev->speed);
			info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber;
			info.report_size = dev->report_size;

			/* serial number string has been read earlier 8 chars or empty string */
			memcpy(info.serial, dev->chip_serial,
			       sizeof(dev->chip_serial));
			if (cfg_descriptor == NULL) {
				info.power = -1;	/* no information available */
			} else {
				/* the MaxPower is stored in units of 2mA to make it fit into a byte-value */
				info.power = cfg_descriptor->bMaxPower * 2;
			}
			io_res = copy_to_user((struct iowarrior_info __user *)arg, &info,
					 sizeof(struct iowarrior_info));
			if (io_res < 0)
				retval = -EFAULT;
			break;
		}
	default:
		/* return that we did not understand this ioctl call */
		retval = -ENOTTY;
		break;
	}
error_out:
	/* unlock the device */
	mutex_unlock(&dev->mutex);
	unlock_kernel();
	kfree(buffer);
	return retval;
}
Esempio n. 4
0
/*
 * This is the lockd kernel thread
 */
static void
lockd(struct svc_rqst *rqstp)
{
    int		err = 0;
    unsigned long grace_period_expire;

    /* Lock module and set up kernel thread */
    /* lockd_up is waiting for us to startup, so will
     * be holding a reference to this module, so it
     * is safe to just claim another reference
     */
    __module_get(THIS_MODULE);
    lock_kernel();

    /*
     * Let our maker know we're running.
     */
    nlmsvc_pid = current->pid;
    nlmsvc_serv = rqstp->rq_server;
    complete(&lockd_start_done);

    daemonize("lockd");

    /* Process request with signals blocked, but allow SIGKILL.  */
    allow_signal(SIGKILL);

    /* kick rpciod */
    rpciod_up();

    dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");

    if (!nlm_timeout)
        nlm_timeout = LOCKD_DFLT_TIMEO;
    nlmsvc_timeout = nlm_timeout * HZ;

    grace_period_expire = set_grace_period();

    /*
     * The main request loop. We don't terminate until the last
     * NFS mount or NFS daemon has gone away, and we've been sent a
     * signal, or else another process has taken over our job.
     */
    while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid) {
        long timeout = MAX_SCHEDULE_TIMEOUT;
        char buf[RPC_MAX_ADDRBUFLEN];

        if (signalled()) {
            flush_signals(current);
            if (nlmsvc_ops) {
                nlmsvc_invalidate_all();
                grace_period_expire = set_grace_period();
            }
        }

        /*
         * Retry any blocked locks that have been notified by
         * the VFS. Don't do this during grace period.
         * (Theoretically, there shouldn't even be blocked locks
         * during grace period).
         */
        if (!nlmsvc_grace_period) {
            timeout = nlmsvc_retry_blocked();
        } else if (time_before(grace_period_expire, jiffies))
            clear_grace_period();

        /*
         * Find a socket with data available and call its
         * recvfrom routine.
         */
        err = svc_recv(rqstp, timeout);
        if (err == -EAGAIN || err == -EINTR)
            continue;
        if (err < 0) {
            printk(KERN_WARNING
                   "lockd: terminating on error %d\n",
                   -err);
            break;
        }

        dprintk("lockd: request from %s\n",
                svc_print_addr(rqstp, buf, sizeof(buf)));

        svc_process(rqstp);
    }

    flush_signals(current);

    /*
     * Check whether there's a new lockd process before
     * shutting down the hosts and clearing the slot.
     */
    if (!nlmsvc_pid || current->pid == nlmsvc_pid) {
        if (nlmsvc_ops)
            nlmsvc_invalidate_all();
        nlm_shutdown_hosts();
        nlmsvc_pid = 0;
        nlmsvc_serv = NULL;
    } else
        printk(KERN_DEBUG
               "lockd: new process, skipping host shutdown\n");
    wake_up(&lockd_exit);

    /* Exit the RPC thread */
    svc_exit_thread(rqstp);

    /* release rpciod */
    rpciod_down();

    /* Release module */
    unlock_kernel();
    module_put_and_exit(0);
}
Esempio n. 5
0
asmlinkage int
sys_sysmips(int cmd, int arg1, int arg2, int arg3)
{
	int	*p;
	char	*name;
	int	flags, tmp, len, retval;

	lock_kernel();
	switch(cmd)
	{
	case SETNAME:
		retval = -EPERM;
		if (!capable(CAP_SYS_ADMIN))
			goto out;

		name = (char *) arg1;
		len = strlen_user(name);

		retval = len;
		if (len < 0)
			goto out;

		retval = -EINVAL;
		if (len == 0 || len > __NEW_UTS_LEN)
			goto out;

		copy_from_user(system_utsname.nodename, name, len);
		system_utsname.nodename[len] = '\0';
		retval = 0;
		goto out;

	case MIPS_ATOMIC_SET:
	/* This is page-faults safe */
	{ 
		static volatile int lock = 0;
		static struct wait_queue *wq = NULL;

		struct wait_queue wait = { current, NULL };
		int retry = 0;

		p = (int *) arg1;
		current->state = TASK_INTERRUPTIBLE;
		add_wait_queue(&wq, &wait);
		while (lock) {
			if (retry > 20) break;
			retry ++;

			if (signal_pending(current)) {
				remove_wait_queue(&wq, &wait);
				current->state = TASK_RUNNING;
				retval =   -EINTR;
				goto out;
			}
			schedule();
			current->state = TASK_INTERRUPTIBLE;
		}
		remove_wait_queue (&wq, &wait);
		current->state = TASK_RUNNING;

		if (lock) {
			retval = -EAGAIN;
			goto out;
		}
		lock ++;

		retval = verify_area(VERIFY_WRITE, p, sizeof(*p));
		if (retval) {
			goto out_atomic_set;
		}

		/* swap *p and arg2, this cause possibly page faults */
		if (__get_user(retval, p)) {
			retval = -EFAULT;
			goto out_atomic_set;
		}
		__put_user(arg2, p);

	out_atomic_set:
		lock --;
		wake_up_interruptible(&wq);
		goto out;
	}

	case MIPS_FIXADE:
		tmp = current->tss.mflags & ~3;
		current->tss.mflags = tmp | (arg1 & 3);
		retval = 0;
		goto out;

	case FLUSH_CACHE:
		flush_cache_all();
		retval = 0;
		goto out;

	case MIPS_RDNVRAM:
		retval = -EIO;
		goto out;
#ifdef CONFIG_CPU_R5900
	case MIPS_SYS_R5900:
		retval = mips_sys_r5900(arg1, arg2, arg3);
		goto out;
#endif
	default:
		retval = -EINVAL;
		goto out;
	}

out:
	unlock_kernel();
	return retval;
}
Esempio n. 6
0
asmlinkage int
sunos_mount(char __user *type, char __user *dir, int flags, void __user *data)
{
    int linux_flags = 0;
    int ret = -EINVAL;
    char *dev_fname = 0;
    char *dir_page, *type_page;

    if (!capable (CAP_SYS_ADMIN))
        return -EPERM;

    lock_kernel();
    /* We don't handle the integer fs type */
    if ((flags & SMNT_NEWTYPE) == 0)
        goto out;

    /* Do not allow for those flags we don't support */
    if (flags & (SMNT_GRPID|SMNT_NOSUB|SMNT_MULTI|SMNT_SYS5))
        goto out;

    if (flags & SMNT_REMOUNT)
        linux_flags |= MS_REMOUNT;
    if (flags & SMNT_RDONLY)
        linux_flags |= MS_RDONLY;
    if (flags & SMNT_NOSUID)
        linux_flags |= MS_NOSUID;

    dir_page = getname(dir);
    ret = PTR_ERR(dir_page);
    if (IS_ERR(dir_page))
        goto out;

    type_page = getname(type);
    ret = PTR_ERR(type_page);
    if (IS_ERR(type_page))
        goto out1;

    if (strcmp(type_page, "ext2") == 0) {
        dev_fname = getname(data);
    } else if (strcmp(type_page, "iso9660") == 0) {
        dev_fname = getname(data);
    } else if (strcmp(type_page, "minix") == 0) {
        dev_fname = getname(data);
    } else if (strcmp(type_page, "nfs") == 0) {
        ret = sunos_nfs_mount (dir_page, flags, data);
        goto out2;
    } else if (strcmp(type_page, "ufs") == 0) {
        printk("Warning: UFS filesystem mounts unsupported.\n");
        ret = -ENODEV;
        goto out2;
    } else if (strcmp(type_page, "proc")) {
        ret = -ENODEV;
        goto out2;
    }
    ret = PTR_ERR(dev_fname);
    if (IS_ERR(dev_fname))
        goto out2;
    ret = do_mount(dev_fname, dir_page, type_page, linux_flags, NULL);
    if (dev_fname)
        putname(dev_fname);
out2:
    putname(type_page);
out1:
    putname(dir_page);
out:
    unlock_kernel();
    return ret;
}
Esempio n. 7
0
int
osi_UFSTruncate(register struct osi_file *afile, afs_int32 asize)
{
    register afs_int32 code;
    struct osi_stat tstat;
    struct iattr newattrs;
    struct inode *inode = OSIFILE_INODE(afile);
    AFS_STATCNT(osi_Truncate);

    /* This routine only shrinks files, and most systems
     * have very slow truncates, even when the file is already
     * small enough.  Check now and save some time.
     */
    code = afs_osi_Stat(afile, &tstat);
    if (code || tstat.size <= asize)
	return code;
    MObtainWriteLock(&afs_xosi, 321);
    AFS_GUNLOCK();
#ifdef STRUCT_INODE_HAS_I_ALLOC_SEM
    down_write(&inode->i_alloc_sem);
#endif
#ifdef STRUCT_INODE_HAS_I_MUTEX
    mutex_lock(&inode->i_mutex);
#else
    down(&inode->i_sem);
#endif
    newattrs.ia_size = asize;
    newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
#if defined(AFS_LINUX24_ENV)
    newattrs.ia_ctime = CURRENT_TIME;

    /* avoid notify_change() since it wants to update dentry->d_parent */
    lock_kernel();
    code = inode_change_ok(inode, &newattrs);
    if (!code)
#ifdef INODE_SETATTR_NOT_VOID
	code = inode_setattr(inode, &newattrs);
#else
	inode_setattr(inode, &newattrs);
#endif
    unlock_kernel();
    if (!code)
	truncate_inode_pages(&inode->i_data, asize);
#else
    i_size_write(inode, asize);
    if (inode->i_sb->s_op && inode->i_sb->s_op->notify_change) {
	code = inode->i_sb->s_op->notify_change(&afile->dentry, &newattrs);
    }
    if (!code) {
	truncate_inode_pages(inode, asize);
	if (inode->i_op && inode->i_op->truncate)
	    inode->i_op->truncate(inode);
    }
#endif
    code = -code;
#ifdef STRUCT_INODE_HAS_I_MUTEX
    mutex_unlock(&inode->i_mutex);
#else
    up(&inode->i_sem);
#endif
#ifdef STRUCT_INODE_HAS_I_ALLOC_SEM
    up_write(&inode->i_alloc_sem);
#endif
    AFS_GLOCK();
    MReleaseWriteLock(&afs_xosi);
    return code;
}
Esempio n. 8
0
static int handle_op(struct test_thread_data *td, int lockwakeup)
{
	int i, id, ret = -EINVAL;

	switch(td->opcode) {

	case RTTEST_NOP:
		return 0;

	case RTTEST_LOCKCONT:
		td->mutexes[td->opdata] = 1;
		td->event = atomic_add_return(1, &rttest_event);
		return 0;

	case RTTEST_RESET:
		for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
			if (td->mutexes[i] == 4) {
				rt_mutex_unlock(&mutexes[i]);
				td->mutexes[i] = 0;
			}
		}

		if (!lockwakeup && td->bkl == 4) {
			unlock_kernel();
			td->bkl = 0;
		}
		return 0;

	case RTTEST_RESETEVENT:
		atomic_set(&rttest_event, 0);
		return 0;

	default:
		if (lockwakeup)
			return ret;
	}

	switch(td->opcode) {

	case RTTEST_LOCK:
	case RTTEST_LOCKNOWAIT:
		id = td->opdata;
		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
			return ret;

		td->mutexes[id] = 1;
		td->event = atomic_add_return(1, &rttest_event);
		rt_mutex_lock(&mutexes[id]);
		td->event = atomic_add_return(1, &rttest_event);
		td->mutexes[id] = 4;
		return 0;

	case RTTEST_LOCKINT:
	case RTTEST_LOCKINTNOWAIT:
		id = td->opdata;
		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
			return ret;

		td->mutexes[id] = 1;
		td->event = atomic_add_return(1, &rttest_event);
		ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
		td->event = atomic_add_return(1, &rttest_event);
		td->mutexes[id] = ret ? 0 : 4;
		return ret ? -EINTR : 0;

	case RTTEST_UNLOCK:
		id = td->opdata;
		if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
			return ret;

		td->event = atomic_add_return(1, &rttest_event);
		rt_mutex_unlock(&mutexes[id]);
		td->event = atomic_add_return(1, &rttest_event);
		td->mutexes[id] = 0;
		return 0;

	case RTTEST_LOCKBKL:
		if (td->bkl)
			return 0;
		td->bkl = 1;
		lock_kernel();
		td->bkl = 4;
		return 0;

	case RTTEST_UNLOCKBKL:
		if (td->bkl != 4)
			break;
		unlock_kernel();
		td->bkl = 0;
		return 0;

	default:
		break;
	}
	return ret;
}
Esempio n. 9
0
static int  __init scx200_acb_create(int base, int index)
{
	struct scx200_acb_iface *iface;
	struct i2c_adapter *adapter;
	int rc = 0;
	char description[64];

	iface = kmalloc(sizeof(*iface), GFP_KERNEL);
	if (!iface) {
		printk(KERN_ERR NAME ": can't allocate memory\n");
		rc = -ENOMEM;
		goto errout;
	}

	memset(iface, 0, sizeof(*iface));
	adapter = &iface->adapter;
	i2c_set_adapdata(adapter, iface);
	snprintf(adapter->name, I2C_NAME_SIZE, "SCx200 ACB%d", index);
	adapter->owner = THIS_MODULE;
	adapter->id = I2C_ALGO_SMBUS;
	adapter->algo = &scx200_acb_algorithm;

	init_MUTEX(&iface->sem);

	snprintf(description, sizeof(description), "NatSemi SCx200 ACCESS.bus [%s]", adapter->name);
	if (request_region(base, 8, description) == 0) {
		dev_err(&adapter->dev, "can't allocate io 0x%x-0x%x\n",
			base, base + 8-1);
		rc = -EBUSY;
		goto errout;
	}
	iface->base = base;

	rc = scx200_acb_probe(iface);
	if (rc) {
		dev_warn(&adapter->dev, "probe failed\n");
		goto errout;
	}

	scx200_acb_reset(iface);

	if (i2c_add_adapter(adapter) < 0) {
		dev_err(&adapter->dev, "failed to register\n");
		rc = -ENODEV;
		goto errout;
	}

	lock_kernel();
	iface->next = scx200_acb_list;
	scx200_acb_list = iface;
	unlock_kernel();

	return 0;

 errout:
	if (iface) {
		if (iface->base)
			release_region(iface->base, 8);
		kfree(iface);
	}
	return rc;
}
Esempio n. 10
0
asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
{	
	struct file * filp;
	unsigned int flag;
	int on, error = -EBADF;

	filp = fget(fd);
	if (!filp)
		goto out;

	error = security_file_ioctl(filp, cmd, arg);
	if (error) {
                fput(filp);
                goto out;
        }

	lock_kernel();
	switch (cmd) {
		case FIOCLEX:
			set_close_on_exec(fd, 1);
			break;

		case FIONCLEX:
			set_close_on_exec(fd, 0);
			break;

		case FIONBIO:
			if ((error = get_user(on, (int __user *)arg)) != 0)
				break;
			flag = O_NONBLOCK;
#ifdef __sparc__
			/* SunOS compatibility item. */
			if(O_NONBLOCK != O_NDELAY)
				flag |= O_NDELAY;
#endif
			if (on)
				filp->f_flags |= flag;
			else
				filp->f_flags &= ~flag;
			break;

		case FIOASYNC:
			if ((error = get_user(on, (int __user *)arg)) != 0)
				break;
			flag = on ? FASYNC : 0;

			/* Did FASYNC state change ? */
			if ((flag ^ filp->f_flags) & FASYNC) {
				if (filp->f_op && filp->f_op->fasync)
					error = filp->f_op->fasync(fd, filp, on);
				else error = -ENOTTY;
			}
			if (error != 0)
				break;

			if (on)
				filp->f_flags |= FASYNC;
			else
				filp->f_flags &= ~FASYNC;
			break;

		case FIOQSIZE:
			if (S_ISDIR(filp->f_dentry->d_inode->i_mode) ||
			    S_ISREG(filp->f_dentry->d_inode->i_mode) ||
			    S_ISLNK(filp->f_dentry->d_inode->i_mode)) {
				loff_t res = inode_get_bytes(filp->f_dentry->d_inode);
				error = copy_to_user((loff_t __user *)arg, &res, sizeof(res)) ? -EFAULT : 0;
			}
			else
				error = -ENOTTY;
			break;
		default:
			error = -ENOTTY;
			if (S_ISREG(filp->f_dentry->d_inode->i_mode))
				error = file_ioctl(filp, cmd, arg);
			else if (filp->f_op && filp->f_op->ioctl)
				error = filp->f_op->ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
	}
	unlock_kernel();
	fput(filp);

out:
	return error;
}
Esempio n. 11
0
/*
 * This is blatantly stolen from ext2fs
 */
static int
ufs_readdir (struct file * filp, void * dirent, filldir_t filldir)
{
	struct inode *inode = filp->f_dentry->d_inode;
	int error = 0;
	unsigned long offset, lblk;
	int i, stored;
	struct buffer_head * bh;
	struct ufs_dir_entry * de;
	struct super_block * sb;
	int de_reclen;
	unsigned flags;
	u64     blk= 0L;

	lock_kernel();

	sb = inode->i_sb;
	flags = UFS_SB(sb)->s_flags;

	UFSD(("ENTER, ino %lu  f_pos %lu\n", inode->i_ino, (unsigned long) filp->f_pos))

	stored = 0;
	bh = NULL;
	offset = filp->f_pos & (sb->s_blocksize - 1);

	while (!error && !stored && filp->f_pos < inode->i_size) {
		lblk = (filp->f_pos) >> sb->s_blocksize_bits;
		blk = ufs_frag_map(inode, lblk);
		if (!blk || !(bh = sb_bread(sb, blk))) {
			/* XXX - error - skip to the next block */
			printk("ufs_readdir: "
			       "dir inode %lu has a hole at offset %lu\n",
			       inode->i_ino, (unsigned long int)filp->f_pos);
			filp->f_pos += sb->s_blocksize - offset;
			continue;
		}

revalidate:
		/* If the dir block has changed since the last call to
		 * readdir(2), then we might be pointing to an invalid
		 * dirent right now.  Scan from the start of the block
		 * to make sure. */
		if (filp->f_version != inode->i_version) {
			for (i = 0; i < sb->s_blocksize && i < offset; ) {
				de = (struct ufs_dir_entry *)(bh->b_data + i);
				/* It's too expensive to do a full
				 * dirent test each time round this
				 * loop, but we do have to test at
				 * least that it is non-zero.  A
				 * failure will be detected in the
				 * dirent test below. */
				de_reclen = fs16_to_cpu(sb, de->d_reclen);
				if (de_reclen < 1)
					break;
				i += de_reclen;
			}
			offset = i;
			filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
				| offset;
			filp->f_version = inode->i_version;
		}

		while (!error && filp->f_pos < inode->i_size
		       && offset < sb->s_blocksize) {
			de = (struct ufs_dir_entry *) (bh->b_data + offset);
			/* XXX - put in a real ufs_check_dir_entry() */
			if ((de->d_reclen == 0) || (ufs_get_de_namlen(sb, de) == 0)) {
				filp->f_pos = (filp->f_pos &
				              (sb->s_blocksize - 1)) +
				               sb->s_blocksize;
				brelse(bh);
				unlock_kernel();
				return stored;
			}
			if (!ufs_check_dir_entry ("ufs_readdir", inode, de,
						   bh, offset)) {
				/* On error, skip the f_pos to the
				   next block. */
				filp->f_pos = (filp->f_pos |
				              (sb->s_blocksize - 1)) +
					       1;
				brelse (bh);
				unlock_kernel();
				return stored;
			}
			offset += fs16_to_cpu(sb, de->d_reclen);
			if (de->d_ino) {
				/* We might block in the next section
				 * if the data destination is
				 * currently swapped out.  So, use a
				 * version stamp to detect whether or
				 * not the directory has been modified
				 * during the copy operation. */
				unsigned long version = filp->f_version;
				unsigned char d_type = DT_UNKNOWN;

				UFSD(("filldir(%s,%u)\n", de->d_name,
							fs32_to_cpu(sb, de->d_ino)))
				UFSD(("namlen %u\n", ufs_get_de_namlen(sb, de)))

				if ((flags & UFS_DE_MASK) == UFS_DE_44BSD)
					d_type = de->d_u.d_44.d_type;
				error = filldir(dirent, de->d_name,
						ufs_get_de_namlen(sb, de), filp->f_pos,
						fs32_to_cpu(sb, de->d_ino), d_type);
				if (error)
					break;
				if (version != filp->f_version)
					goto revalidate;
				stored ++;
			}
			filp->f_pos += fs16_to_cpu(sb, de->d_reclen);
		}
		offset = 0;
		brelse (bh);
	}
Esempio n. 12
0
int sys_ptrace(long request, long pid, long addr, long data)
{
	struct task_struct *child;
	int ret = -EPERM;

	lock_kernel();
	if (request == PTRACE_TRACEME) {
		/* are we already being traced? */
		if (current->ptrace & PT_PTRACED)
			goto out;
		ret = security_ptrace(current->parent, current);
		if (ret)
			goto out;
		/* set the ptrace bit in the process flags. */
		current->ptrace |= PT_PTRACED;
		ret = 0;
		goto out;
	}
	ret = -ESRCH;
	read_lock(&tasklist_lock);
	child = find_task_by_pid(pid);
	if (child)
		get_task_struct(child);
	read_unlock(&tasklist_lock);
	if (!child)
		goto out;

	ret = -EPERM;
	if (pid == 1)		/* you may not mess with init */
		goto out_tsk;

	if (request == PTRACE_ATTACH) {
		ret = ptrace_attach(child);
		goto out_tsk;
	}

	ret = ptrace_check_attach(child, request == PTRACE_KILL);
	if (ret < 0)
		goto out_tsk;

	switch (request) {
	/* when I and D space are separate, these will need to be fixed. */
	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
	case PTRACE_PEEKDATA: {
		unsigned long tmp;
		int copied;

		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
		ret = -EIO;
		if (copied != sizeof(tmp))
			break;
		ret = put_user(tmp,(unsigned long __user *) data);
		break;
	}

	/* read the word at location addr in the USER area. */
	case PTRACE_PEEKUSR: {
		unsigned long index;
		unsigned long tmp;

		ret = -EIO;
		/* convert to index and check */
		index = (unsigned long) addr >> 3;
		if ((addr & 7) || (index > PT_FPSCR))
			break;

		if (index < PT_FPR0) {
			tmp = get_reg(child, (int)index);
		} else {
			flush_fp_to_thread(child);
			tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
		}
		ret = put_user(tmp,(unsigned long __user *) data);
		break;
	}

	/* If I and D space are separate, this will have to be fixed. */
	case PTRACE_POKETEXT: /* write the word at location addr. */
	case PTRACE_POKEDATA:
		ret = 0;
		if (access_process_vm(child, addr, &data, sizeof(data), 1)
				== sizeof(data))
			break;
		ret = -EIO;
		break;

	/* write the word at location addr in the USER area */
	case PTRACE_POKEUSR: {
		unsigned long index;

		ret = -EIO;
		/* convert to index and check */
		index = (unsigned long) addr >> 3;
		if ((addr & 7) || (index > PT_FPSCR))
			break;

		if (index == PT_ORIG_R3)
			break;
		if (index < PT_FPR0) {
			ret = put_reg(child, index, data);
		} else {
			flush_fp_to_thread(child);
			((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
			ret = 0;
		}
		break;
	}

	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
	case PTRACE_CONT: { /* restart after signal. */
		ret = -EIO;
		if (!valid_signal(data))
			break;
		if (request == PTRACE_SYSCALL)
			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
		else
			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
		child->exit_code = data;
		/* make sure the single step bit is not set. */
		clear_single_step(child);
		wake_up_process(child);
		ret = 0;
		break;
	}

	/*
	 * make the child exit.  Best I can do is send it a sigkill.
	 * perhaps it should be put in the status that it wants to
	 * exit.
	 */
	case PTRACE_KILL: {
		ret = 0;
		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
			break;
		child->exit_code = SIGKILL;
		/* make sure the single step bit is not set. */
		clear_single_step(child);
		wake_up_process(child);
		break;
	}

	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
		ret = -EIO;
		if (!valid_signal(data))
			break;
		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
		set_single_step(child);
		child->exit_code = data;
		/* give it a chance to run. */
		wake_up_process(child);
		ret = 0;
		break;
	}

	case PTRACE_GET_DEBUGREG: {
		ret = -EINVAL;
		/* We only support one DABR and no IABRS at the moment */
		if (addr > 0)
			break;
		ret = put_user(child->thread.dabr,
			       (unsigned long __user *)data);
		break;
	}

	case PTRACE_SET_DEBUGREG:
		ret = ptrace_set_debugreg(child, addr, data);
		break;

	case PTRACE_DETACH:
		ret = ptrace_detach(child, data);
		break;

	case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
		int i;
		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
		unsigned long __user *tmp = (unsigned long __user *)addr;

		for (i = 0; i < 32; i++) {
			ret = put_user(*reg, tmp);
			if (ret)
				break;
			reg++;
			tmp++;
		}
		break;
	}

	case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
		int i;
		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
		unsigned long __user *tmp = (unsigned long __user *)addr;

		for (i = 0; i < 32; i++) {
			ret = get_user(*reg, tmp);
			if (ret)
				break;
			reg++;
			tmp++;
		}
		break;
	}

	case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
		int i;
		unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
		unsigned long __user *tmp = (unsigned long __user *)addr;

		flush_fp_to_thread(child);

		for (i = 0; i < 32; i++) {
			ret = put_user(*reg, tmp);
			if (ret)
				break;
			reg++;
			tmp++;
		}
		break;
	}

	case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
		int i;
		unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
		unsigned long __user *tmp = (unsigned long __user *)addr;

		flush_fp_to_thread(child);

		for (i = 0; i < 32; i++) {
			ret = get_user(*reg, tmp);
			if (ret)
				break;
			reg++;
			tmp++;
		}
		break;
	}

#ifdef CONFIG_ALTIVEC
	case PTRACE_GETVRREGS:
		/* Get the child altivec register state. */
		flush_altivec_to_thread(child);
		ret = get_vrregs((unsigned long __user *)data, child);
		break;

	case PTRACE_SETVRREGS:
		/* Set the child altivec register state. */
		flush_altivec_to_thread(child);
		ret = set_vrregs(child, (unsigned long __user *)data);
		break;
#endif

	default:
		ret = ptrace_request(child, request, addr, data);
		break;
	}
out_tsk:
	put_task_struct(child);
out:
	unlock_kernel();
	return ret;
}
Esempio n. 13
0
/**
 * vxfs_readdir - read a directory
 * @fp:		the directory to read
 * @retp:	return buffer
 * @filler:	filldir callback
 *
 * Description:
 *   vxfs_readdir fills @retp with directory entries from @fp
 *   using the VFS supplied callback @filler.
 *
 * Returns:
 *   Zero.
 */
static int
vxfs_readdir(struct file *fp, void *retp, filldir_t filler)
{
	struct inode		*ip = fp->f_path.dentry->d_inode;
	struct super_block	*sbp = ip->i_sb;
	u_long			bsize = sbp->s_blocksize;
	u_long			page, npages, block, pblocks, nblocks, offset;
	loff_t			pos;

	lock_kernel();

	switch ((long)fp->f_pos) {
	case 0:
		if (filler(retp, ".", 1, fp->f_pos, ip->i_ino, DT_DIR) < 0)
			goto out;
		fp->f_pos++;
		/* fallthrough */
	case 1:
		if (filler(retp, "..", 2, fp->f_pos, VXFS_INO(ip)->vii_dotdot, DT_DIR) < 0)
			goto out;
		fp->f_pos++;
		/* fallthrough */
	}

	pos = fp->f_pos - 2;
	
	if (pos > VXFS_DIRROUND(ip->i_size)) {
		unlock_kernel();
		return 0;
	}

	npages = dir_pages(ip);
	nblocks = dir_blocks(ip);
	pblocks = VXFS_BLOCK_PER_PAGE(sbp);

	page = pos >> PAGE_CACHE_SHIFT;
	offset = pos & ~PAGE_CACHE_MASK;
	block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks;

	for (; page < npages; page++, block = 0) {
		caddr_t			kaddr;
		struct page		*pp;

		pp = vxfs_get_page(ip->i_mapping, page);
		if (IS_ERR(pp))
			continue;
		kaddr = (caddr_t)page_address(pp);

		for (; block <= nblocks && block <= pblocks; block++) {
			caddr_t			baddr, limit;
			struct vxfs_dirblk	*dbp;
			struct vxfs_direct	*de;

			baddr = kaddr + (block * bsize);
			limit = baddr + bsize - VXFS_DIRLEN(1);
	
			dbp = (struct vxfs_dirblk *)baddr;
			de = (struct vxfs_direct *)
				(offset ?
				 (kaddr + offset) :
				 (baddr + VXFS_DIRBLKOV(dbp)));

			for (; (caddr_t)de <= limit; de = vxfs_next_entry(de)) {
				int	over;

				if (!de->d_reclen)
					break;
				if (!de->d_ino)
					continue;

				offset = (caddr_t)de - kaddr;
				over = filler(retp, de->d_name, de->d_namelen,
					((page << PAGE_CACHE_SHIFT) | offset) + 2,
					de->d_ino, DT_UNKNOWN);
				if (over) {
					vxfs_put_page(pp);
					goto done;
				}
			}
			offset = 0;
		}
		vxfs_put_page(pp);
		offset = 0;
	}

done:
	fp->f_pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
out:
	unlock_kernel();
	return 0;
}
Esempio n. 14
0
static void
kerext_process_shutdown(void *data) {
	struct kerargs_process_shutdown	*args = data;
	PROCESS			*prp = args->prp;
	THREAD			*act = actives[KERNCPU];
	SIGTABLE		*stp;
	int				tid;
	int				i;
	int				try_again;

	lock_kernel();
	if(prp == NULL) {
		prp = act->process;
	}

	if(prp->memory) {
		// Make sure no threads reference the aspace before it's removed
		for(tid = 0; tid < prp->threads.nentries; tid++) {
			THREAD				*thp;

			if(VECP(thp, &prp->threads, tid)) {
				thp->aspace_prp = 0;
			}
		}

		try_again = 0;
		for(i = 0; i < NUM_PROCESSORS; ++i) {
			PROCESS *prp0;

			// The code between interrupt disable and enable should be short,
			// so that other CPUs will not have chance to get a interrupt and
			// switch aspace during the check
			InterruptDisable();
			if(get_inkernel() & INKERNEL_INTRMASK) {
				try_again = 1;
			}
			prp0 = *((PROCESS* volatile *)&aspaces_prp[i]);
			if(get_inkernel() & INKERNEL_INTRMASK) {
				try_again = 1;
			}
			InterruptEnable();

			if(prp == prp0) {
				// Switch current address space
				if(i == KERNCPU) {
					set_safe_aspace(i);
				} else {
					args->tlb_safe_cpu = i;
					SENDIPI(i, IPI_TLB_SAFE);
					try_again = 1;
				}
			}
		}
		if(try_again) {
			//Another CPU is pointing at this address space. We've sent
			//an IPI to get it to point at a safe entry (the process manager's),
			//but we have to wait until it's been processed before we can
			//destroy this address space.
			kererr(act, EAGAIN);
			return;
		}

		if(prp->flags & _NTO_PF_VFORKED) {
			// We're shutting down a vfork'd process. Don't free the
			// aspace, the parent process is still using it.
			prp->memory = NULL; 
		} else {
			memmgr.mdestroy(prp);
		}

		if(prp->memory || aspaces_prp[KERNCPU] == prp) {
			crash();
		}
		SETKSTATUS(act, 0);
		return;
	}

	// Remove all signal tables
	while((stp = prp->sig_table)) {
		prp->sig_table = stp->next;
		object_free(NULL, &sigtable_souls, stp);
	}

	// Check for guardian
	if(prp->guardian && (prp->guardian->flags & (_NTO_PF_LOADING | _NTO_PF_TERMING | _NTO_PF_ZOMBIE)) == 0) {
		process_swap(prp->guardian);
		prp->guardian = 0;
	}		

	if(prp == act->process) {
		if(!args->exec) {
			// if we need to, send a sigchld to the parent
			if (!(prp->flags & _NTO_PF_NOZOMBIE)) {
				signal_kill_process(prp->parent, SIGCHLD, prp->siginfo.si_code,
					prp->siginfo.si_value.sival_int, prp->siginfo.si_pid, 0 );
			}
			prp->siginfo.si_signo = SIGCHLD;
		}
		actives_prp[KERNCPU] = sysmgr_prp;
		thread_destroyall(act);
	} else {
		SETKSTATUS(act, prp->siginfo.si_status);
	}
}
Esempio n. 15
0
static struct dentry *autofs_root_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
	struct autofs_sb_info *sbi;
	int oz_mode;

	DPRINTK(("autofs_root_lookup: name = "));
	lock_kernel();
	autofs_say(dentry->d_name.name,dentry->d_name.len);

	if (dentry->d_name.len > NAME_MAX) {
		unlock_kernel();
		return ERR_PTR(-ENAMETOOLONG);/* File name too long to exist */
	}

	sbi = autofs_sbi(dir->i_sb);

	oz_mode = autofs_oz_mode(sbi);
	DPRINTK(("autofs_lookup: pid = %u, pgrp = %u, catatonic = %d, "
				"oz_mode = %d\n", task_pid_nr(current),
				task_pgrp_nr(current), sbi->catatonic,
				oz_mode));

	/*
	 * Mark the dentry incomplete, but add it. This is needed so
	 * that the VFS layer knows about the dentry, and we can count
	 * on catching any lookups through the revalidate.
	 *
	 * Let all the hard work be done by the revalidate function that
	 * needs to be able to do this anyway..
	 *
	 * We need to do this before we release the directory semaphore.
	 */
	dentry->d_op = &autofs_dentry_operations;
	dentry->d_flags |= DCACHE_AUTOFS_PENDING;
	d_add(dentry, NULL);

	mutex_unlock(&dir->i_mutex);
	autofs_revalidate(dentry, nd);
	mutex_lock(&dir->i_mutex);

	/*
	 * If we are still pending, check if we had to handle
	 * a signal. If so we can force a restart..
	 */
	if (dentry->d_flags & DCACHE_AUTOFS_PENDING) {
		/* See if we were interrupted */
		if (signal_pending(current)) {
			sigset_t *sigset = &current->pending.signal;
			if (sigismember (sigset, SIGKILL) ||
			    sigismember (sigset, SIGQUIT) ||
			    sigismember (sigset, SIGINT)) {
				unlock_kernel();
				return ERR_PTR(-ERESTARTNOINTR);
			}
		}
	}
	unlock_kernel();

	/*
	 * If this dentry is unhashed, then we shouldn't honour this
	 * lookup even if the dentry is positive.  Returning ENOENT here
	 * doesn't do the right thing for all system calls, but it should
	 * be OK for the operations we permit from an autofs.
	 */
	if (dentry->d_inode && d_unhashed(dentry))
		return ERR_PTR(-ENOENT);

	return NULL;
}
Esempio n. 16
0
int sys_ptrace(long request, long pid, long addr, long data)
{
	struct task_struct *child;
	int rval;

	lock_kernel();
	/* printk("sys_ptrace - PID:%u  ADDR:%08x  DATA:%08x\n",pid,addr,data); */

	if (request == PTRACE_TRACEME) {
		/* are we already being traced? */
		if (current->ptrace & PT_PTRACED) {
			rval = -EPERM;
			goto out;
		}
		/* set the ptrace bit in the process flags. */
		current->ptrace |= PT_PTRACED;
		rval = 0;
		goto out;
	}
	rval = -ESRCH;
	read_lock(&tasklist_lock);
	child = find_task_by_pid(pid);
	if (child)
		get_task_struct(child);
	read_unlock(&tasklist_lock);
	if (!child)
		goto out;

	rval = -EPERM;
	if (pid == 1)		/* you may not mess with init */
		goto out;

	if (request == PTRACE_ATTACH) {
		rval = ptrace_attach(child);
		goto out_tsk;
	}
	rval = -ESRCH;
	if (!(child->ptrace & PT_PTRACED))
		goto out_tsk;
	if (child->state != TASK_STOPPED) {
		if (request != PTRACE_KILL)
			goto out_tsk;
	}
	if (child->p_pptr != current)
		goto out_tsk;

	switch (request) {
		unsigned long val, copied;

	case PTRACE_PEEKTEXT: /* read word at location addr. */
	case PTRACE_PEEKDATA:
	/*	printk("PEEKTEXT/PEEKDATA at %08X\n",addr); */
		copied = access_process_vm(child, addr, &val, sizeof(val), 0);
		rval = -EIO;
		if (copied != sizeof(val))
			break;
		rval = put_user(val, (unsigned long *)data);
		goto out;

	case PTRACE_POKETEXT: /* write the word at location addr. */
	case PTRACE_POKEDATA:
		/* printk("POKETEXT/POKEDATA to %08X\n",addr); */
		rval = 0;
		if (access_process_vm(child, addr, &data, sizeof(data), 1)
		    == sizeof(data))
			break;
		rval = -EIO;
		goto out;

	/* Read/write the word at location ADDR in the registers.  */
	case PTRACE_PEEKUSR:
	case PTRACE_POKEUSR:
		rval = 0;
		if (addr >= PT_SIZE && request == PTRACE_PEEKUSR) {
			/* Special requests that don't actually correspond
			   to offsets in struct pt_regs.  */
			if (addr == PT_TEXT_ADDR)
			{
				val = child->mm->start_code;
			}
			else if (addr == PT_DATA_ADDR)
			{
				val = child->mm->start_data;
			}
			else if (addr == PT_TEXT_LEN)
			{
				val = child->mm->end_code
					- child->mm->start_code;
			}
			else if (addr == PT_DATA_LEN)
			{
				val = child->mm->end_data
					- child->mm->start_data;
			}
			else
			{
				rval = -EIO;
			}
		} else if (addr >= 0 && addr < PT_SIZE && (addr & 0x3) == 0) {
			microblaze_reg_t *reg_addr = reg_save_addr(addr, child);
			if (request == PTRACE_PEEKUSR)
			{
				val = *reg_addr;
			}
			else
			{
				*reg_addr = data;
			}
		} else
			rval = -EIO;

		if (rval == 0 && request == PTRACE_PEEKUSR)
		{
			rval = put_user (val, (unsigned long *)data);
		}
		goto out;

	/* Continue and stop at next (return from) syscall */
	case PTRACE_SYSCALL:
	/* Restart after a signal.  */
	case PTRACE_CONT:
		/* printk("PTRACE_CONT\n"); */
	/* Execute a single instruction. */
	case PTRACE_SINGLESTEP:
		/* printk("PTRACE_SINGLESTEP\n"); */
		rval = -EIO;
		if ((unsigned long) data > _NSIG)
			break;

		/* Turn CHILD's single-step flag on or off.  */
		/* printk("calling set_single_step\n"); */
		if (! set_single_step (child, request == PTRACE_SINGLESTEP))
			break;

		if (request == PTRACE_SYSCALL)
			child->ptrace |= PT_TRACESYS;
		else
			child->ptrace &= ~PT_TRACESYS;

		child->exit_code = data;
		/* printk("wakeup_process\n"); */
		wake_up_process(child);
		rval = 0;
		break;

	/*
	 * make the child exit.  Best I can do is send it a sigkill.
	 * perhaps it should be put in the status that it wants to
	 * exit.
	 */
	case PTRACE_KILL:
		/* printk("PTRACE_KILL\n"); */
		rval = 0;
		if (child->state == TASK_ZOMBIE)	/* already dead */
			break;
		child->exit_code = SIGKILL;
		wake_up_process(child);
		break;

	case PTRACE_DETACH: /* detach a process that was attached. */
		/* printk("PTRACE_DETACH\n"); */
		set_single_step (child, 0);  /* Clear single-step flag */
		rval = ptrace_detach(child, data);
		break;

	default:
		rval = -EIO;
		goto out;
	}

out_tsk:
	free_task_struct(child);
out:
	unlock_kernel();
	return rval;
}
Esempio n. 17
0
static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{
	struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
	struct autofs_dirhash *dh = &sbi->dirhash;
	struct autofs_dir_ent *ent;
	unsigned int n;
	int slsize;
	struct autofs_symlink *sl;
	struct inode *inode;

	DPRINTK(("autofs_root_symlink: %s <- ", symname));
	autofs_say(dentry->d_name.name,dentry->d_name.len);

	lock_kernel();
	if (!autofs_oz_mode(sbi)) {
		unlock_kernel();
		return -EACCES;
	}

	if (autofs_hash_lookup(dh, &dentry->d_name)) {
		unlock_kernel();
		return -EEXIST;
	}

	n = find_first_zero_bit(sbi->symlink_bitmap,AUTOFS_MAX_SYMLINKS);
	if (n >= AUTOFS_MAX_SYMLINKS) {
		unlock_kernel();
		return -ENOSPC;
	}

	set_bit(n,sbi->symlink_bitmap);
	sl = &sbi->symlink[n];
	sl->len = strlen(symname);
	sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
	if (!sl->data) {
		clear_bit(n,sbi->symlink_bitmap);
		unlock_kernel();
		return -ENOSPC;
	}

	ent = kmalloc(sizeof(struct autofs_dir_ent), GFP_KERNEL);
	if (!ent) {
		kfree(sl->data);
		clear_bit(n,sbi->symlink_bitmap);
		unlock_kernel();
		return -ENOSPC;
	}

	ent->name = kmalloc(dentry->d_name.len+1, GFP_KERNEL);
	if (!ent->name) {
		kfree(sl->data);
		kfree(ent);
		clear_bit(n,sbi->symlink_bitmap);
		unlock_kernel();
		return -ENOSPC;
	}

	memcpy(sl->data,symname,slsize);
	sl->mtime = get_seconds();

	ent->ino = AUTOFS_FIRST_SYMLINK + n;
	ent->hash = dentry->d_name.hash;
	memcpy(ent->name, dentry->d_name.name, 1+(ent->len = dentry->d_name.len));
	ent->dentry = NULL;	/* We don't keep the dentry for symlinks */

	autofs_hash_insert(dh,ent);

	inode = autofs_iget(dir->i_sb, ent->ino);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	d_instantiate(dentry, inode);
	unlock_kernel();
	return 0;
}
Esempio n. 18
0
asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
{
	struct task_struct *child;
	int ret;

	lock_kernel();
	ret = -EPERM;
	if (request == PTRACE_TRACEME) {
		/* are we already being traced? */
		if (current->ptrace & PT_PTRACED)
			goto out;
		/* set the ptrace bit in the process flags. */
		current->ptrace |= PT_PTRACED;
		ret = 0;
		goto out;
	}
	ret = -ESRCH;
	read_lock(&tasklist_lock);
	child = find_task_by_pid(pid);
	if (child)
		get_task_struct(child);
	read_unlock(&tasklist_lock);
	if (!child)
		goto out;

	ret = -EPERM;
	if (pid == 1)		/* you may not mess with init */
		goto out_tsk;

	if (request == PTRACE_ATTACH) {
		ret = ptrace_attach(child);
		goto out_tsk;
	}
	ret = ptrace_check_attach(child, request == PTRACE_KILL);
	if (ret < 0)
		goto out_tsk;

	switch (request) {
		case PTRACE_PEEKTEXT: /* read word at location addr. */ 
		case PTRACE_PEEKDATA: {
			unsigned long tmp;

			ret = read_long(child, addr, &tmp);
			if (ret < 0)
				break ;
			ret = put_user(tmp, (unsigned long *) data);
			break ;
		}

	/* read the word at location addr in the USER area. */
		case PTRACE_PEEKUSR: {
			unsigned long tmp = 0;
			
			if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
				ret = -EIO;
				break ;
			}
			
		        ret = 0;  /* Default return condition */
			addr = addr >> 2; /* temporary hack. */

			if (addr < H8300_REGS_NO)
				tmp = h8300_get_reg(child, addr);
			else {
				switch(addr) {
				case 49:
					tmp = child->mm->start_code;
					break ;
				case 50:
					tmp = child->mm->start_data;
					break ;
				case 51:
					tmp = child->mm->end_code;
					break ;
				case 52:
					tmp = child->mm->end_data;
					break ;
				default:
					ret = -EIO;
				}
			}
			if (!ret)
				ret = put_user(tmp,(unsigned long *) data);
			break ;
		}

      /* when I and D space are separate, this will have to be fixed. */
		case PTRACE_POKETEXT: /* write the word at location addr. */
		case PTRACE_POKEDATA:
			ret = 0;
			if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
				break;
			ret = -EIO;
			break;

		case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
			if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
				ret = -EIO;
				break ;
			}
			addr = addr >> 2; /* temporary hack. */
			    
			if (addr == PT_ORIG_ER0) {
				ret = -EIO;
				break ;
			}
			if (addr < H8300_REGS_NO) {
				ret = h8300_put_reg(child, addr, data);
				break ;
			}
			ret = -EIO;
			break ;
		case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
		case PTRACE_CONT: { /* restart after signal. */
			ret = -EIO;
			if ((unsigned long) data >= _NSIG)
				break ;
			if (request == PTRACE_SYSCALL)
				set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
			else
				clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
			child->exit_code = data;
			wake_up_process(child);
			/* make sure the single step bit is not set. */
			h8300_disable_trace(child);
			ret = 0;
		}

/*
 * make the child exit.  Best I can do is send it a sigkill. 
 * perhaps it should be put in the status that it wants to 
 * exit.
 */
		case PTRACE_KILL: {

			ret = 0;
			if (child->exit_state == EXIT_ZOMBIE) /* already dead */
				break;
			child->exit_code = SIGKILL;
			h8300_disable_trace(child);
			wake_up_process(child);
			break;
		}

		case PTRACE_SINGLESTEP: {  /* set the trap flag. */
			ret = -EIO;
			if ((unsigned long) data > _NSIG)
				break;
			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
			child->exit_code = data;
			h8300_enable_trace(child);
			wake_up_process(child);
			ret = 0;
			break;
		}

		case PTRACE_DETACH:	/* detach a process that was attached. */
			ret = ptrace_detach(child, data);
			break;

		case PTRACE_GETREGS: { /* Get all gp regs from the child. */
		  	int i;
			unsigned long tmp;
			for (i = 0; i < H8300_REGS_NO; i++) {
			    tmp = h8300_get_reg(child, i);
			    if (put_user(tmp, (unsigned long *) data)) {
				ret = -EFAULT;
				break;
			    }
			    data += sizeof(long);
			}
			ret = 0;
			break;
		}

		case PTRACE_SETREGS: { /* Set all gp regs in the child. */
			int i;
			unsigned long tmp;
			for (i = 0; i < H8300_REGS_NO; i++) {
			    if (get_user(tmp, (unsigned long *) data)) {
				ret = -EFAULT;
				break;
			    }
			    h8300_put_reg(child, i, tmp);
			    data += sizeof(long);
			}
			ret = 0;
			break;
		}

		default:
			ret = -EIO;
			break;
	}
out_tsk:
	put_task_struct(child);
out:
	unlock_kernel();
	return ret;
}
Esempio n. 19
0
asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
{
	struct task_struct *child;
	int ret = -EPERM;
	unsigned long tmp;
	int copied;
	ptrace_area   parea; 

	lock_kernel();
	if (request == PTRACE_TRACEME) 
	{
		/* are we already being traced? */
		if (current->ptrace & PT_PTRACED)
			goto out;
		/* set the ptrace bit in the process flags. */
		current->ptrace |= PT_PTRACED;
		ret = 0;
		goto out;
	}
	ret = -ESRCH;
	read_lock(&tasklist_lock);
	child = find_task_by_pid(pid);
	if (child)
		get_task_struct(child);
	read_unlock(&tasklist_lock);
	if (!child)
		goto out;
	ret = -EPERM;
	if (pid == 1)		/* you may not mess with init */
		goto out_tsk;
	if (request == PTRACE_ATTACH) 
	{
		ret = ptrace_attach(child);
		goto out_tsk;
	}
	ret = -ESRCH;
	// printk("child=%lX child->flags=%lX",child,child->flags);
	/* I added child!=current line so we can get the */
	/* ieee_instruction_pointer from the user structure DJB */
	if(child!=current)
	{
		if (!(child->ptrace & PT_PTRACED))
			goto out_tsk;
		if (child->state != TASK_STOPPED) 
		{
			if (request != PTRACE_KILL)
				goto out_tsk;
		}
		if (child->p_pptr != current)
			goto out_tsk;
	}
	switch (request) 
	{
		/* If I and D space are separate, these will need to be fixed. */
	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
	case PTRACE_PEEKDATA: 
		copied = access_process_vm(child,ADDR_BITS_REMOVE(addr), &tmp, sizeof(tmp), 0);
		ret = -EIO;
		if (copied != sizeof(tmp))
			break;
		ret = put_user(tmp,(unsigned long *) data);
		break;

		/* read the word at location addr in the USER area. */
	case PTRACE_PEEKUSR:
		ret=copy_user(child,addr,data,sizeof(unsigned long),1,0);
		break;

		/* If I and D space are separate, this will have to be fixed. */
	case PTRACE_POKETEXT: /* write the word at location addr. */
	case PTRACE_POKEDATA:
		ret = 0;
		if (access_process_vm(child,ADDR_BITS_REMOVE(addr), &data, sizeof(data), 1) == sizeof(data))
			break;
		ret = -EIO;
		break;

	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
		ret=copy_user(child,addr,(addr_t)&data,sizeof(unsigned long),0,1);
		break;

	case PTRACE_SYSCALL: 	/* continue and stop at next (return from) syscall */
	case PTRACE_CONT: 	 /* restart after signal. */
		ret = -EIO;
		if ((unsigned long) data >= _NSIG)
			break;
		if (request == PTRACE_SYSCALL)
			child->ptrace |= PT_TRACESYS;
		else
			child->ptrace &= ~PT_TRACESYS;
		child->exit_code = data;
		/* make sure the single step bit is not set. */
		clear_single_step(child);
		wake_up_process(child);
		ret = 0;
		break;

/*
 * make the child exit.  Best I can do is send it a sigkill. 
 * perhaps it should be put in the status that it wants to 
 * exit.
 */
	case PTRACE_KILL:
		ret = 0;
		if (child->state == TASK_ZOMBIE) /* already dead */
			break;
		child->exit_code = SIGKILL;
		clear_single_step(child);
		wake_up_process(child);
		/* make sure the single step bit is not set. */
		break;

	case PTRACE_SINGLESTEP:  /* set the trap flag. */
		ret = -EIO;
		if ((unsigned long) data >= _NSIG)
			break;
		child->ptrace &= ~PT_TRACESYS;
		child->exit_code = data;
		set_single_step(child);
		/* give it a chance to run. */
		wake_up_process(child);
		ret = 0;
		break;

	case PTRACE_DETACH:  /* detach a process that was attached. */
		ret = ptrace_detach(child, data);
		break;
	case PTRACE_PEEKUSR_AREA:
	case PTRACE_POKEUSR_AREA:
		if(copy_from_user(&parea,(void *)addr,sizeof(parea))==0)  
			ret=copy_user(child,parea.kernel_addr,parea.process_addr,
				      parea.len,1,(request==PTRACE_POKEUSR_AREA));
		else ret = -EFAULT;
		break;
	default:
		ret = -EIO;
		break;
	}
 out_tsk:
	free_task_struct(child);
 out:
	unlock_kernel();
	return ret;
}
Esempio n. 20
0
static int usb_dio_probe(struct usb_interface *intf, const struct usb_device_id *id) {
  struct usb_cdc_union_desc *union_header = NULL;
	unsigned char *buffer = intf->altsetting->extra;
	int buflen = intf->altsetting->extralen;
	struct usb_interface *control_interface;
	struct usb_interface *data_interface;
	struct usb_device *usb_dev = interface_to_usbdev(intf);
  int retval = 0;
  
  usb_dio_dev *dev;
  
  /* prevent usb_dio_probe() from racing usb_dio_disconnect() */
  lock_kernel();
  
  FUNC_HI();
  dev = kmalloc(sizeof(usb_dio_dev), GFP_KERNEL);
  if (dev == NULL) {
    err("Out of Memory");
    unlock_kernel();
    FUNC_ERR();
    return -ENOMEM;
  }
	memset(dev, 0x00, sizeof (usb_dio_dev));
  dev->dev = usb_get_dev(interface_to_usbdev(intf));
	kref_init(&(dev->kref));
  usb_set_intfdata(intf, dev);
  
  printk(KERN_ALERT "%s: === Starting device probe ===\n", usb_dio_driver.name);
  
  if (!buflen) {
    printk(KERN_ALERT "%s: === Invalid / unwanted device ===\n", usb_dio_driver.name);
    unlock_kernel();
    FUNC_ERR();
    return -EINVAL;
  }

  DPRINTK(KERN_ALERT "%s: == Chunk size    = %2d ==\n", usb_dio_driver.name, buffer[0]);
  DPRINTK(KERN_ALERT "%s: == Buffer length = %2d ==\n", usb_dio_driver.name, buflen);
	while (buflen > 0) {
		switch (buffer[2]) {
      case USB_CDC_UNION_TYPE: /* we've found it */
        DPRINTK(KERN_ALERT "%s: ==== USB_CDC_UNION_TYPE ==============\n", usb_dio_driver.name);
        if (union_header) {
          DPRINTK(KERN_ALERT "%s: ===== More than one union header! =====\n", usb_dio_driver.name);
          break;
        }
        union_header = (struct usb_cdc_union_desc *)buffer;
        break;
      default:
        DPRINTK(KERN_ALERT "%s: ==== Unwanted default... =============\n", usb_dio_driver.name);
        break;
		}
    DPRINTK(KERN_ALERT "%s: === continuation with %2d remaining... ===\n", usb_dio_driver.name, buflen - buffer[0]);
		buflen -= buffer[0];
		buffer += buffer[0];
	}
  DPRINTK(KERN_ALERT "%s: == complete with %2d remaining ==\n", usb_dio_driver.name, buflen);

  control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
  data_interface = usb_ifnum_to_if(usb_dev, union_header->bSlaveInterface0);
  if (!control_interface || !data_interface) {
    printk(KERN_ALERT "%s: === missing interface(s)! ===\n", usb_dio_driver.name);
    unlock_kernel();
    FUNC_ERR();
    return -ENODEV;
  }

  sema_init(&dev->buffer_sem,1);
  sema_init(&dev->buffer_empty_sem,1);

  dev->bulk_in = &(data_interface->cur_altsetting->endpoint[0].desc);
  dev->bulk_in_urb = NULL;
  dev->bulk_in_size = dev->bulk_in->wMaxPacketSize;
  dev->bulk_in_endpointAddr = dev->bulk_in->bEndpointAddress & 0xF;
  dev->bulk_in_endpointPipe = usb_rcvbulkpipe(dev->dev,dev->bulk_in_endpointAddr);
  dev->bulk_in_buffer = kmalloc(dev->bulk_in->wMaxPacketSize, GFP_KERNEL);
  dev->bulk_in_workqueue = create_singlethread_workqueue("Rx");
  INIT_WORK(&(dev->bulk_in_work.work),(void(*)(struct work_struct *))diodev_rx_work);
  dev->bulk_in_work.arg = NULL;
  dev->bulk_in_work.dev = dev;
  
  dev->bulk_out = &(data_interface->cur_altsetting->endpoint[1].desc);
  dev->bulk_out_endpointAddr = dev->bulk_out->bEndpointAddress & 0xF;
  dev->bulk_out_endpointPipe = usb_sndbulkpipe(dev->dev,dev->bulk_out_endpointAddr);
  dev->bulk_out_cb_urb_chain = NULL;
  sema_init(&dev->bulk_out_cb_urb_chain_sem,1);
  dev->bulk_out_urb_chain = NULL;
  sema_init(&dev->bulk_out_urb_chain_sem,1);
  dev->bulk_out_workqueue = create_singlethread_workqueue("Tx");
  INIT_WORK(&(dev->bulk_out_work.work),(void(*)(struct work_struct *))diodev_write_work);
  dev->bulk_out_work.arg = NULL;
  dev->bulk_out_work.dev = dev;
  
  dev->bulk_ctrl = &(control_interface->cur_altsetting->endpoint[0].desc);
  dev->bulk_ctrl_endpointAddr = dev->bulk_ctrl->bEndpointAddress & 0xF;
  
  retval = usb_register_dev(intf, &usb_dio_class);
  if (retval) {
    printk(KERN_ALERT "%s: Not able to get a minor for this device...\n", usb_dio_driver.name);
    usb_set_intfdata(intf, NULL);
  } else {
    printk(KERN_ALERT "%s: Minor device %d\n", usb_dio_driver.name, intf->minor);
  }
  
  dev->running = 1;
  diodev_rx_setup(dev);
  
  unlock_kernel();
  
  FUNC_BYE();
  return retval;
}
Esempio n. 21
0
/*
 * FIXME: use linux/kthread.h
 */
static int dvb_frontend_thread (void *data)
{
	struct dvb_frontend *fe = (struct dvb_frontend *) data;
	unsigned long timeout;
	char name [15];
	int quality = 0, delay = 3*HZ;
	fe_status_t s;
	int check_wrapped = 0;

	dprintk ("%s\n", __FUNCTION__);

	snprintf (name, sizeof(name), "kdvb-fe-%i", fe->dvb->num);

        lock_kernel ();
        daemonize (name);
        sigfillset (&current->blocked);
        unlock_kernel ();

	fe->status = 0;
	dvb_frontend_init (fe);
	fe->wakeup = 0;

	while (1) {
		up (&fe->sem);      /* is locked when we enter the thread... */

		timeout = wait_event_interruptible_timeout(fe->wait_queue,
							   dvb_frontend_should_wakeup(fe),
							   delay);
		if (0 != dvb_frontend_is_exiting (fe)) {
			/* got signal or quitting */
			break;
		}

		if (current->flags & PF_FREEZE)
			refrigerator(PF_FREEZE);

		if (down_interruptible (&fe->sem))
			break;

		/* if we've got no parameters, just keep idling */
		if (fe->state & FESTATE_IDLE) {
			delay = 3*HZ;
			quality = 0;
			continue;
		}

retune:
		/* get the frontend status */
		if (fe->state & FESTATE_RETUNE) {
			s = 0;
		} else {
			if (fe->ops->read_status)
				fe->ops->read_status(fe, &s);
			if (s != fe->status) {
			dvb_frontend_add_event (fe, s);
				fe->status = s;
			}
		}
		/* if we're not tuned, and we have a lock, move to the TUNED state */
		if ((fe->state & FESTATE_WAITFORLOCK) && (s & FE_HAS_LOCK)) {
			update_delay(&quality, &delay, fe->min_delay, s & FE_HAS_LOCK);
			fe->state = FESTATE_TUNED;

			/* if we're tuned, then we have determined the correct inversion */
			if ((!(fe->ops->info.caps & FE_CAN_INVERSION_AUTO)) &&
			    (fe->parameters.inversion == INVERSION_AUTO)) {
				fe->parameters.inversion = fe->inversion;
			}
			continue;
		}

		/* if we are tuned already, check we're still locked */
		if (fe->state & FESTATE_TUNED) {
			update_delay(&quality, &delay, fe->min_delay, s & FE_HAS_LOCK);

			/* we're tuned, and the lock is still good... */
			if (s & FE_HAS_LOCK)
				continue;
			else {
				/* if we _WERE_ tuned, but now don't have a lock,
				 * need to zigzag */
				fe->state = FESTATE_ZIGZAG_FAST;
				fe->started_auto_step = fe->auto_step;
				check_wrapped = 0;
			}
		}

		/* don't actually do anything if we're in the LOSTLOCK state,
		 * the frontend is set to FE_CAN_RECOVER, and the max_drift is 0 */
		if ((fe->state & FESTATE_LOSTLOCK) && 
		    (fe->ops->info.caps & FE_CAN_RECOVER) && (fe->max_drift == 0)) {
			update_delay(&quality, &delay, fe->min_delay, s & FE_HAS_LOCK);
						continue;
				}
	    
		/* don't do anything if we're in the DISEQC state, since this
		 * might be someone with a motorized dish controlled by DISEQC.
		 * If its actually a re-tune, there will be a SET_FRONTEND soon enough.	*/
		if (fe->state & FESTATE_DISEQC) {
			update_delay(&quality, &delay, fe->min_delay, s & FE_HAS_LOCK);
			continue;
				}

		/* if we're in the RETUNE state, set everything up for a brand
		 * new scan, keeping the current inversion setting, as the next
		 * tune is _very_ likely to require the same */
		if (fe->state & FESTATE_RETUNE) {
			fe->lnb_drift = 0;
			fe->auto_step = 0;
			fe->auto_sub_step = 0;
			fe->started_auto_step = 0;
			check_wrapped = 0;
		}

		/* fast zigzag. */
		if ((fe->state & FESTATE_SEARCHING_FAST) || (fe->state & FESTATE_RETUNE)) {
			delay = fe->min_delay;

			/* peform a tune */
			if (dvb_frontend_autotune(fe, check_wrapped)) {
				/* OK, if we've run out of trials at the fast speed.
				 * Drop back to slow for the _next_ attempt */
				fe->state = FESTATE_SEARCHING_SLOW;
				fe->started_auto_step = fe->auto_step;
				continue;
			}
			check_wrapped = 1;

			/* if we've just retuned, enter the ZIGZAG_FAST state.
			 * This ensures we cannot return from an
			 * FE_SET_FRONTEND ioctl before the first frontend tune
			 * occurs */
			if (fe->state & FESTATE_RETUNE) {
				fe->state = FESTATE_TUNING_FAST;
				goto retune;
			}
		}

		/* slow zigzag */
		if (fe->state & FESTATE_SEARCHING_SLOW) {
			update_delay(&quality, &delay, fe->min_delay, s & FE_HAS_LOCK);
		    
			/* Note: don't bother checking for wrapping; we stay in this
			 * state until we get a lock */
			dvb_frontend_autotune(fe, 0);
		}
	}

	if (dvb_shutdown_timeout) {
		if (dvb_powerdown_on_sleep)
			if (fe->ops->set_voltage)
				fe->ops->set_voltage(fe, SEC_VOLTAGE_OFF);
		if (fe->ops->sleep)
			fe->ops->sleep(fe);
	}

	fe->thread_pid = 0;
	mb();

	dvb_frontend_wakeup(fe);
	return 0;
}
Esempio n. 22
0
/*
 * Read a directory, using filldir to fill the dirent memory.
 * smb_proc_readdir does the actual reading from the smb server.
 *
 * The cache code is almost directly taken from ncpfs
 */
static int 
smb_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
	struct dentry *dentry = filp->f_path.dentry;
	struct inode *dir = dentry->d_inode;
	struct smb_sb_info *server = server_from_dentry(dentry);
	union  smb_dir_cache *cache = NULL;
	struct smb_cache_control ctl;
	struct page *page = NULL;
	int result;

	ctl.page  = NULL;
	ctl.cache = NULL;

	VERBOSE("reading %s/%s, f_pos=%d\n",
		DENTRY_PATH(dentry),  (int) filp->f_pos);

	result = 0;

	lock_kernel();

	switch ((unsigned int) filp->f_pos) {
	case 0:
		if (filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR) < 0)
			goto out;
		filp->f_pos = 1;
		/* fallthrough */
	case 1:
		if (filldir(dirent, "..", 2, 1, parent_ino(dentry), DT_DIR) < 0)
			goto out;
		filp->f_pos = 2;
	}

	/*
	 * Make sure our inode is up-to-date.
	 */
	result = smb_revalidate_inode(dentry);
	if (result)
		goto out;


	page = grab_cache_page(&dir->i_data, 0);
	if (!page)
		goto read_really;

	ctl.cache = cache = kmap(page);
	ctl.head  = cache->head;

	if (!PageUptodate(page) || !ctl.head.eof) {
		VERBOSE("%s/%s, page uptodate=%d, eof=%d\n",
			 DENTRY_PATH(dentry), PageUptodate(page),ctl.head.eof);
		goto init_cache;
	}

	if (filp->f_pos == 2) {
		if (jiffies - ctl.head.time >= SMB_MAX_AGE(server))
			goto init_cache;

		/*
		 * N.B. ncpfs checks mtime of dentry too here, we don't.
		 *   1. common smb servers do not update mtime on dir changes
		 *   2. it requires an extra smb request
		 *      (revalidate has the same timeout as ctl.head.time)
		 *
		 * Instead smbfs invalidates its own cache on local changes
		 * and remote changes are not seen until timeout.
		 */
	}

	if (filp->f_pos > ctl.head.end)
		goto finished;

	ctl.fpos = filp->f_pos + (SMB_DIRCACHE_START - 2);
	ctl.ofs  = ctl.fpos / SMB_DIRCACHE_SIZE;
	ctl.idx  = ctl.fpos % SMB_DIRCACHE_SIZE;

	for (;;) {
		if (ctl.ofs != 0) {
			ctl.page = find_lock_page(&dir->i_data, ctl.ofs);
			if (!ctl.page)
				goto invalid_cache;
			ctl.cache = kmap(ctl.page);
			if (!PageUptodate(ctl.page))
				goto invalid_cache;
		}
		while (ctl.idx < SMB_DIRCACHE_SIZE) {
			struct dentry *dent;
			int res;

			dent = smb_dget_fpos(ctl.cache->dentry[ctl.idx],
					     dentry, filp->f_pos);
			if (!dent)
				goto invalid_cache;

			res = filldir(dirent, dent->d_name.name,
				      dent->d_name.len, filp->f_pos,
				      dent->d_inode->i_ino, DT_UNKNOWN);
			dput(dent);
			if (res)
				goto finished;
			filp->f_pos += 1;
			ctl.idx += 1;
			if (filp->f_pos > ctl.head.end)
				goto finished;
		}
		if (ctl.page) {
			kunmap(ctl.page);
			SetPageUptodate(ctl.page);
			unlock_page(ctl.page);
			page_cache_release(ctl.page);
			ctl.page = NULL;
		}
		ctl.idx  = 0;
		ctl.ofs += 1;
	}
invalid_cache:
	if (ctl.page) {
		kunmap(ctl.page);
		unlock_page(ctl.page);
		page_cache_release(ctl.page);
		ctl.page = NULL;
	}
	ctl.cache = cache;
init_cache:
	smb_invalidate_dircache_entries(dentry);
	ctl.head.time = jiffies;
	ctl.head.eof = 0;
	ctl.fpos = 2;
	ctl.ofs = 0;
	ctl.idx = SMB_DIRCACHE_START;
	ctl.filled = 0;
	ctl.valid  = 1;
read_really:
	result = server->ops->readdir(filp, dirent, filldir, &ctl);
	if (result == -ERESTARTSYS && page)
		ClearPageUptodate(page);
	if (ctl.idx == -1)
		goto invalid_cache;	/* retry */
	ctl.head.end = ctl.fpos - 1;
	ctl.head.eof = ctl.valid;
finished:
	if (page) {
		cache->head = ctl.head;
		kunmap(page);
		if (result != -ERESTARTSYS)
			SetPageUptodate(page);
		unlock_page(page);
		page_cache_release(page);
	}
	if (ctl.page) {
		kunmap(ctl.page);
		SetPageUptodate(ctl.page);
		unlock_page(ctl.page);
		page_cache_release(ctl.page);
	}
out:
	unlock_kernel();
	return result;
}
Esempio n. 23
0
void
i386_init(void)
{
	extern char edata[], end[];

	// Before doing anything else, complete the ELF loading process.
	// Clear the uninitialized global data (BSS) section of our program.
	// This ensures that all static/global variables start out zero.
	memset(edata, 0, end - edata);

	// Initialize the console.
	// Can't call cprintf until after we do this!
	cons_init();

	cprintf("6828 decimal is %o octal!\n", 6828);

	// Lab 2 memory management initialization functions
	mem_init();

	// Lab 3 user environment initialization functions
	env_init();
	trap_init();

	// Lab 4 multiprocessor initialization functions
	mp_init();
	lapic_init();

	// Lab 4 multitasking initialization functions
	pic_init();

	// Acquire the big kernel lock before waking up APs
	// Your code here:
	//spin_initlock(&kernel_lock);
	lock_kernel();


	// Starting non-boot CPUs
	boot_aps();

#if defined(TEST)
	// Don't touch -- used by grading script!
	ENV_CREATE(TEST, ENV_TYPE_USER);
#else
	// Touch all you want.
	//ENV_CREATE(user_primes, ENV_TYPE_USER);
	//ENV_CREATE(user_idle, ENV_TYPE_USER);
	//ENV_CREATE(user_yield, ENV_TYPE_USER);
	ENV_CREATE(user_yield, ENV_TYPE_USER);
	ENV_CREATE(user_yield, ENV_TYPE_USER);
	ENV_CREATE(user_yield, ENV_TYPE_USER);

#endif // TEST*
/*
	cprintf("\n\nenvs[0] = %d, envs[1] = %d, envs[2]= %d\n",envs[0].env_status,
														envs[1].env_status,
														envs[2].env_status
														);
*/

	// Schedule and run the first user environment!
	sched_yield();
}
Esempio n. 24
0
static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
     
{				/* @ ADG ou ATO selon le cas */
	int i;
	unsigned char IndexCard;
	void __iomem *pmem;
	int ret = 0;
	volatile unsigned char byte_reset_it;
	struct st_ram_io *adgl;
	void __user *argp = (void __user *)arg;

	/* In general, the device is only openable by root anyway, so we're not
	   particularly concerned that bogus ioctls can flood the console. */

	adgl = memdup_user(argp, sizeof(struct st_ram_io));
	if (IS_ERR(adgl))
		return PTR_ERR(adgl);

	lock_kernel();	
	IndexCard = adgl->num_card-1;
	 
	if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
		static int warncount = 10;
		if (warncount) {
			printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
			warncount--;
		}
		kfree(adgl);
		unlock_kernel();
		return -EINVAL;
	}

	switch (cmd) {
		
	case 0:
		pmem = apbs[IndexCard].RamIO;
		for (i = 0; i < sizeof(struct st_ram_io); i++)
			((unsigned char *)adgl)[i]=readb(pmem++);
		if (copy_to_user(argp, adgl, sizeof(struct st_ram_io)))
			ret = -EFAULT;
		break;
	case 1:
		pmem = apbs[IndexCard].RamIO + CONF_END_TEST;
		for (i = 0; i < 4; i++)
			adgl->conf_end_test[i] = readb(pmem++);
		for (i = 0; i < 2; i++)
			adgl->error_code[i] = readb(pmem++);
		for (i = 0; i < 4; i++)
			adgl->parameter_error[i] = readb(pmem++);
		pmem = apbs[IndexCard].RamIO + VERS;
		adgl->vers = readb(pmem);
		pmem = apbs[IndexCard].RamIO + TYPE_CARD;
		for (i = 0; i < 20; i++)
			adgl->reserv1[i] = readb(pmem++);
		*(int *)&adgl->reserv1[20] =  
			(readb(apbs[IndexCard].RamIO + SERIAL_NUMBER) << 16) + 
			(readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 1) << 8) + 
			(readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 2) );

		if (copy_to_user(argp, adgl, sizeof(struct st_ram_io)))
			ret = -EFAULT;
		break;
	case 2:
		pmem = apbs[IndexCard].RamIO + CONF_END_TEST;
		for (i = 0; i < 10; i++)
			writeb(0xff, pmem++);
		writeb(adgl->data_from_pc_ready, 
		       apbs[IndexCard].RamIO + DATA_FROM_PC_READY);

		writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
		
		for (i = 0; i < MAX_BOARD; i++) {
			if (apbs[i].RamIO) {
				byte_reset_it = readb(apbs[i].RamIO + RAM_IT_TO_PC);
			}
		}
		break;
	case 3:
		pmem = apbs[IndexCard].RamIO + TIC_DES_FROM_PC;
		writeb(adgl->tic_des_from_pc, pmem);
		break;
	case 4:
		pmem = apbs[IndexCard].RamIO + TIC_OWNER_TO_PC;
		adgl->tic_owner_to_pc     = readb(pmem++);
		adgl->numcard_owner_to_pc = readb(pmem);
		if (copy_to_user(argp, adgl,sizeof(struct st_ram_io)))
			ret = -EFAULT;
		break;
	case 5:
		writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_OWNER_TO_PC);
		writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC);
		writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_ACK_FROM_PC);
		writeb(4, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
		writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
		break;
	case 6:
		printk(KERN_INFO "APPLICOM driver release .... V2.8.0 ($Revision: 1.1.1.1 $)\n");
		printk(KERN_INFO "Number of installed boards . %d\n", (int) numboards);
		printk(KERN_INFO "Segment of board ........... %X\n", (int) mem);
		printk(KERN_INFO "Interrupt IRQ number ....... %d\n", (int) irq);
		for (i = 0; i < MAX_BOARD; i++) {
			int serial;
			char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1];

			if (!apbs[i].RamIO)
				continue;

			for (serial = 0; serial < SERIAL_NUMBER - TYPE_CARD; serial++)
				boardname[serial] = readb(apbs[i].RamIO + TYPE_CARD + serial);
			boardname[serial] = 0;

			printk(KERN_INFO "Prom version board %d ....... V%d.%d %s",
			       i+1,
			       (int)(readb(apbs[IndexCard].RamIO + VERS) >> 4),
			       (int)(readb(apbs[IndexCard].RamIO + VERS) & 0xF),
			       boardname);


			serial = (readb(apbs[i].RamIO + SERIAL_NUMBER) << 16) + 
				(readb(apbs[i].RamIO + SERIAL_NUMBER + 1) << 8) + 
				(readb(apbs[i].RamIO + SERIAL_NUMBER + 2) );

			if (serial != 0)
				printk(" S/N %d\n", serial);
			else
				printk("\n");
		}
		if (DeviceErrorCount != 0)
			printk(KERN_INFO "DeviceErrorCount ........... %d\n", DeviceErrorCount);
		if (ReadErrorCount != 0)
			printk(KERN_INFO "ReadErrorCount ............. %d\n", ReadErrorCount);
		if (WriteErrorCount != 0)
			printk(KERN_INFO "WriteErrorCount ............ %d\n", WriteErrorCount);
		if (waitqueue_active(&FlagSleepRec))
			printk(KERN_INFO "Process in read pending\n");
		for (i = 0; i < MAX_BOARD; i++) {
			if (apbs[i].RamIO && waitqueue_active(&apbs[i].FlagSleepSend))
				printk(KERN_INFO "Process in write pending board %d\n",i+1);
		}
		break;
	default:
		ret = -ENOTTY;
		break;
	}
	Dummy = readb(apbs[IndexCard].RamIO + VERS);
	kfree(adgl);
	unlock_kernel();
	return 0;
}
Esempio n. 25
0
static int __init kernel_init(void * unused)
{
	/*
	 * Wait until kthreadd is all set-up.
	 */
	wait_for_completion(&kthreadd_done);
	lock_kernel();

	/*
	 * init can allocate pages on any node
	 */
	set_mems_allowed(node_states[N_HIGH_MEMORY]);
	/*
	 * init can run on any cpu.
	 */
	set_cpus_allowed_ptr(current, cpu_all_mask);
	/*
	 * Tell the world that we're going to be the grim
	 * reaper of innocent orphaned children.
	 *
	 * We don't want people to have to make incorrect
	 * assumptions about where in the task array this
	 * can be found.
	 */
	init_pid_ns.child_reaper = current;

	cad_pid = task_pid(current);

	smp_prepare_cpus(setup_max_cpus);

	do_pre_smp_initcalls();
	start_boot_trace();

	smp_init();
	sched_init_smp();

	do_basic_setup();

	/* Open the /dev/console on the rootfs, this should never fail */
	if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
		printk(KERN_WARNING "Warning: unable to open an initial console.\n");

	(void) sys_dup(0);
	(void) sys_dup(0);
	/*
	 * check if there is an early userspace init.  If yes, let it do all
	 * the work
	 */

	if (!ramdisk_execute_command)
		ramdisk_execute_command = "/init";

	if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
		ramdisk_execute_command = NULL;
		prepare_namespace();
	}

	/*
	 * Ok, we have completed the initial bootup, and
	 * we're essentially up and running. Get rid of the
	 * initmem segments and start the user-mode stuff..
	 */

	init_post();
	return 0;
}
Esempio n. 26
0
/*
 * This is the lockd kernel thread
 */
static void
lockd(struct svc_rqst *rqstp)
{
	struct svc_serv	*serv = rqstp->rq_server;
	int		err = 0;
	unsigned long grace_period_expire;

	/* Lock module and set up kernel thread */
	MOD_INC_USE_COUNT;
	lock_kernel();

	/*
	 * Let our maker know we're running.
	 */
	nlmsvc_pid = current->pid;
	up(&lockd_start);

	daemonize();
	reparent_to_init();
	sprintf(current->comm, "lockd");

	/* Process request with signals blocked.  */
	spin_lock_irq(&current->sighand->siglock);
	siginitsetinv(&current->blocked, sigmask(SIGKILL));
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	/* kick rpciod */
	rpciod_up();

	dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");

	if (!nlm_timeout)
		nlm_timeout = LOCKD_DFLT_TIMEO;
	nlmsvc_timeout = nlm_timeout * HZ;

	grace_period_expire = set_grace_period();

	/*
	 * The main request loop. We don't terminate until the last
	 * NFS mount or NFS daemon has gone away, and we've been sent a
	 * signal, or else another process has taken over our job.
	 */
	while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid)
	{
		long timeout = MAX_SCHEDULE_TIMEOUT;
		if (signalled()) {
			spin_lock_irq(&current->sighand->siglock);
			flush_signals(current);
			spin_unlock_irq(&current->sighand->siglock);
			if (nlmsvc_ops) {
				nlmsvc_ops->detach();
				grace_period_expire = set_grace_period();
			}
		}

		/*
		 * Retry any blocked locks that have been notified by
		 * the VFS. Don't do this during grace period.
		 * (Theoretically, there shouldn't even be blocked locks
		 * during grace period).
		 */
		if (!nlmsvc_grace_period)
			timeout = nlmsvc_retry_blocked();

		/*
		 * Find a socket with data available and call its
		 * recvfrom routine.
		 */
		err = svc_recv(serv, rqstp, timeout);
		if (err == -EAGAIN || err == -EINTR)
			continue;
		if (err < 0) {
			printk(KERN_WARNING
			       "lockd: terminating on error %d\n",
			       -err);
			break;
		}

		dprintk("lockd: request from %08x\n",
			(unsigned)ntohl(rqstp->rq_addr.sin_addr.s_addr));

		/*
		 * Look up the NFS client handle. The handle is needed for
		 * all but the GRANTED callback RPCs.
		 */
		rqstp->rq_client = NULL;
		if (nlmsvc_ops) {
			nlmsvc_ops->exp_readlock();
			rqstp->rq_client =
				nlmsvc_ops->exp_getclient(&rqstp->rq_addr);
		}

		if (nlmsvc_grace_period &&
		    time_before(grace_period_expire, jiffies))
			nlmsvc_grace_period = 0;
		svc_process(serv, rqstp);

		/* Unlock export hash tables */
		if (nlmsvc_ops)
			nlmsvc_ops->exp_unlock();
	}

	/*
	 * Check whether there's a new lockd process before
	 * shutting down the hosts and clearing the slot.
	 */
	if (!nlmsvc_pid || current->pid == nlmsvc_pid) {
		if (nlmsvc_ops)
			nlmsvc_ops->detach();
		nlm_shutdown_hosts();
		nlmsvc_pid = 0;
	} else
		printk(KERN_DEBUG
			"lockd: new process, skipping host shutdown\n");
	wake_up(&lockd_exit);
		
	/* Exit the RPC thread */
	svc_exit_thread(rqstp);

	/* release rpciod */
	rpciod_down();

	/* Release module */
	MOD_DEC_USE_COUNT;
}
Esempio n. 27
0
asmlinkage void __init start_kernel(void)
{
	char * command_line;
	extern struct kernel_param __start___param[], __stop___param[];

	smp_setup_processor_id();

	/*
	 * Need to run as early as possible, to initialize the
	 * lockdep hash:
	 */
	unwind_init();
	lockdep_init();
	debug_objects_early_init();
	cgroup_init_early();

	local_irq_disable();
	early_boot_irqs_off();
	early_init_irq_lock_class();

/*
 * Interrupts are still disabled. Do necessary setups, then
 * enable them
 */
	lock_kernel();
	tick_init();
	boot_cpu_init();
	page_address_init();
	printk(KERN_NOTICE);
	printk(linux_banner);
	setup_arch(&command_line);
	mm_init_owner(&init_mm, &init_task);
	setup_command_line(command_line);
	unwind_setup();
	setup_per_cpu_areas();
	setup_nr_cpu_ids();
	smp_prepare_boot_cpu();	/* arch-specific boot-cpu hooks */

	/*
	 * Set up the scheduler prior starting any interrupts (such as the
	 * timer interrupt). Full topology setup happens at smp_init()
	 * time - but meanwhile we still have a functioning scheduler.
	 */
	sched_init();
	/*
	 * Disable preemption - early bootup scheduling is extremely
	 * fragile until we cpu_idle() for the first time.
	 */
	preempt_disable();
	build_all_zonelists();
	page_alloc_init();
	printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
	parse_early_param();
	parse_args("Booting kernel", static_command_line, __start___param,
		   __stop___param - __start___param,
		   &unknown_bootoption);
	if (!irqs_disabled()) {
		printk(KERN_WARNING "start_kernel(): bug: interrupts were "
				"enabled *very* early, fixing it\n");
		local_irq_disable();
	}
	sort_main_extable();
	trap_init();
	rcu_init();
	init_IRQ();
	pidhash_init();
	init_timers();
	hrtimers_init();
	softirq_init();
	timekeeping_init();
	time_init();
	sched_clock_init();
	profile_init();
	if (!irqs_disabled())
		printk("start_kernel(): bug: interrupts were enabled early\n");
	early_boot_irqs_on();
	local_irq_enable();

	/*
	 * HACK ALERT! This is early. We're enabling the console before
	 * we've done PCI setups etc, and console_init() must be aware of
	 * this. But we do want output early, in case something goes wrong.
	 */
	console_init();
	if (panic_later)
		panic(panic_later, panic_param);

	lockdep_info();

	/*
	 * Need to run this when irqs are enabled, because it wants
	 * to self-test [hard/soft]-irqs on/off lock inversion bugs
	 * too:
	 */
	locking_selftest();

#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start && !initrd_below_start_ok &&
	    page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
		printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - "
		    "disabling it.\n",
		    page_to_pfn(virt_to_page((void *)initrd_start)),
		    min_low_pfn);
		initrd_start = 0;
	}
#endif
	vmalloc_init();
	vfs_caches_init_early();
	cpuset_init_early();
	page_cgroup_init();
	mem_init();
	enable_debug_pagealloc();
	cpu_hotplug_init();
	kmem_cache_init();
	debug_objects_mem_init();
	idr_init_cache();
	setup_per_cpu_pageset();
	numa_policy_init();
	if (late_time_init)
		late_time_init();
	calibrate_delay();
	pidmap_init();
	pgtable_cache_init();
	prio_tree_init();
	anon_vma_init();
#ifdef CONFIG_X86
	if (efi_enabled)
		efi_enter_virtual_mode();
#endif
	thread_info_cache_init();
	cred_init();
	fork_init(num_physpages);
	proc_caches_init();
	buffer_init();
	key_init();
	security_init();
	vfs_caches_init(num_physpages);
	radix_tree_init();
	signals_init();
	/* rootfs populating might need page-writeback */
	page_writeback_init();
#ifdef CONFIG_PROC_FS
	proc_root_init();
#endif
	cgroup_init();
	cpuset_init();
	taskstats_init_early();
	delayacct_init();

	check_bugs();

	acpi_early_init(); /* before LAPIC and SMP init */

	ftrace_init();

	/* Do the rest non-__init'ed, we're now alive */
	rest_init();
}
Esempio n. 28
0
void
trap(struct Trapframe *tf)
{

	// The environment may have set DF and some versions
	// of GCC rely on DF being clear
	asm volatile("cld" ::: "cc");

	// Halt the CPU if some other CPU has called panic()
	extern char *panicstr;
	if (panicstr)
		asm volatile("hlt");

	// Re-acqurie the big kernel lock if we were halted in
	// sched_yield()
	if (xchg(&thiscpu->cpu_status, CPU_STARTED) == CPU_HALTED)
		lock_kernel();
	// Check that interrupts are disabled.  If this assertion
	// fails, DO NOT be tempted to fix it by inserting a "cli" in
	// the interrupt path.
	assert(!(read_eflags() & FL_IF));

	if ((tf->tf_cs & 3) == 3) {
		// Trapped from user mode.
		// Acquire the big kernel lock before doing any
		// serious kernel work.
		// LAB 4: Your code here.
		lock_kernel();
		
		assert(curenv);

		// Garbage collect if current enviroment is a zombie
		if (curenv->env_status == ENV_DYING) {
			env_free(curenv);
			curenv = NULL;
			sched_yield();
		}

		// Copy trap frame (which is currently on the stack)
		// into 'curenv->env_tf', so that running the environment
		// will restart at the trap point.
		curenv->env_tf = *tf;
		// The trapframe on the stack should be ignored from here on.
		tf = &curenv->env_tf;
	}

	// Record that tf is the last real trapframe so
	// print_trapframe can print some additional information.
	last_tf = tf;

	// Dispatch based on what type of trap occurred
	trap_dispatch(tf);

	// If we made it to this point, then no other environment was
	// scheduled, so we should return to the current environment
	// if doing so makes sense.
	if (curenv && curenv->env_status == ENV_RUNNING)
		env_run(curenv);
	else
		sched_yield();
}
Esempio n. 29
0
/*
 * Reboot system call: for obvious reasons only root may call it,
 * and even root needs to set up some magic numbers in the registers
 * so that some mistake won't make this reboot the whole machine.
 * You can also set the meaning of the ctrl-alt-del-key here.
 *
 * reboot doesn't sync: do that yourself before calling this.
 */
asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
{
	char buffer[256];

	/* We only trust the superuser with rebooting the system. */
	if (!capable(CAP_SYS_BOOT))
		return -EPERM;

	/* For safety, we require "magic" arguments. */
	if (magic1 != LINUX_REBOOT_MAGIC1 ||
	    (magic2 != LINUX_REBOOT_MAGIC2 &&
	                magic2 != LINUX_REBOOT_MAGIC2A &&
			magic2 != LINUX_REBOOT_MAGIC2B &&
	                magic2 != LINUX_REBOOT_MAGIC2C))
		return -EINVAL;

	/* Instead of trying to make the power_off code look like
	 * halt when pm_power_off is not set do it the easy way.
	 */
	if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
		cmd = LINUX_REBOOT_CMD_HALT;

	lock_kernel();
	switch (cmd) {
	case LINUX_REBOOT_CMD_RESTART:
		kernel_restart(NULL);
		break;

	case LINUX_REBOOT_CMD_CAD_ON:
		C_A_D = 1;
		break;

	case LINUX_REBOOT_CMD_CAD_OFF:
		C_A_D = 0;
		break;

	case LINUX_REBOOT_CMD_HALT:
		kernel_halt();
		unlock_kernel();
		do_exit(0);
		break;

	case LINUX_REBOOT_CMD_POWER_OFF:
		kernel_power_off();
		unlock_kernel();
		do_exit(0);
		break;

	case LINUX_REBOOT_CMD_RESTART2:
		if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
			unlock_kernel();
			return -EFAULT;
		}
		buffer[sizeof(buffer) - 1] = '\0';

		kernel_restart(buffer);
		break;

	case LINUX_REBOOT_CMD_KEXEC:
		kernel_kexec();
		unlock_kernel();
		return -EINVAL;

#ifdef CONFIG_HIBERNATION
	case LINUX_REBOOT_CMD_SW_SUSPEND:
		{
			int ret = hibernate();
			unlock_kernel();
			return ret;
		}
#endif

	default:
		unlock_kernel();
		return -EINVAL;
	}
	unlock_kernel();
	return 0;
}
Esempio n. 30
0
static void
kerext_process_create(void *data) {
	struct kerargs_process_create *kap = data;
	pid_t							pid;
	PROCESS							*prp, *parent;
	THREAD							*act = actives[KERNCPU];
	int								status, i;
	struct _cred_info				info;

	if((parent = lookup_pid(kap->parent_pid)) == NULL) {
		kererr(act, ESRCH);
		return;
	}

	if (parent->num_processes >= parent->rlimit_vals_soft[RLIMIT_NPROC]) {
		kererr(act, EAGAIN);
		return;
	}

	lock_kernel();

	// Check that we haven't run out of process vector entries
	// The process index & PID_MASK should never be all zeros
	// or all ones. All ones could cause SYNC_*() to return
	// valid looking numbers from an uninitialized sync.
	if((process_vector.nentries - process_vector.nfree) >= PID_MASK - 1) {
		kererr(act, EAGAIN);
		return;
	}

	// Alloc a process entry.
	if((prp = object_alloc(NULL, &process_souls)) == NULL) {
		kererr(act, ENOMEM);
		return;
	}

	if(kap->parent_pid) {
		prp->flags = _NTO_PF_LOADING | _NTO_PF_NOZOMBIE | _NTO_PF_RING0;
		prp->lcp = kap->lcp;
	}
	snap_time(&prp->start_time, 1);

	MUTEX_INIT(prp, &prp->mpartlist_lock);
	MUTEX_INIT(prp, &prp->spartlist_lock);

	CRASHCHECK((kap->extra == NULL) || (kap->extra->mpart_list == NULL) || 
				((kap->extra->spart_list == NULL) && SCHEDPART_INSTALLED()));
	{
		part_list_t  *mpart_list = kap->extra->mpart_list;
		part_list_t  *spart_list = kap->extra->spart_list;

		/* first thing is to associate with all specified partitions */
		for (i=0; i<mpart_list->num_entries; i++)
		{
			if ((status = MEMPART_ASSOCIATE(prp, mpart_list->i[i].id, mpart_list->i[i].flags)) != EOK)
			{
				(void)MEMPART_DISASSOCIATE(prp, part_id_t_INVALID);
				(void)MUTEX_DESTROY(prp, &prp->mpartlist_lock);
				(void)MUTEX_DESTROY(prp, &prp->spartlist_lock);
				object_free(NULL, &process_souls, prp);
				kererr(act, status);
				return;
			}
		}
		if (SCHEDPART_INSTALLED())
		{
			for (i=0; i<spart_list->num_entries; i++)
			{
				if ((status = SCHEDPART_ASSOCIATE(prp, spart_list->i[i].id, spart_list->i[i].flags)) != EOK)
				{
					(void)MEMPART_DISASSOCIATE(prp, part_id_t_INVALID);
					(void)MUTEX_DESTROY(prp, &prp->mpartlist_lock);
					(void)MUTEX_DESTROY(prp, &prp->spartlist_lock);
					object_free(NULL, &process_souls, prp);
					kererr(act, status);
					return;
				}
			}
		}
	}

	// Allocate a vector for 1 thread but don't get a thread entry.
	if(vector_add(&prp->threads, NULL, 1) == -1) {
		(void)SCHEDPART_DISASSOCIATE(prp, part_id_t_INVALID);
		(void)MEMPART_DISASSOCIATE(prp, part_id_t_INVALID);
		(void)MUTEX_DESTROY(prp, &prp->mpartlist_lock);
		(void)MUTEX_DESTROY(prp, &prp->spartlist_lock);
		object_free(NULL, &process_souls, prp);
		kererr(act, ENOMEM);
		return;
	}

	// Add process to the process table vector.
	if((pid = vector_add(&process_vector, prp, 0)) == -1) {
		(void)SCHEDPART_DISASSOCIATE(prp, part_id_t_INVALID);
		(void)MEMPART_DISASSOCIATE(prp, part_id_t_INVALID);
		(void)MUTEX_DESTROY(prp, &prp->mpartlist_lock);
		(void)MUTEX_DESTROY(prp, &prp->spartlist_lock);
		vector_free(&prp->threads);
		object_free(NULL, &process_souls, prp);
		kererr(act, ENOMEM);
		return;
	}

	prp->boundry_addr = VM_KERN_SPACE_BOUNDRY;
	prp->pid = pid | pid_unique; 	// adjust pid_unique during process destroy
	SIGMASK_SPECIAL(&prp->sig_queue);

	// Call out to allow memory manager to initialize the address space
	if((status = memmgr.mcreate(prp)) != EOK) {
		(void)SCHEDPART_DISASSOCIATE(prp, part_id_t_INVALID);
		(void)MEMPART_DISASSOCIATE(prp, part_id_t_INVALID);
		(void)MUTEX_DESTROY(prp, &prp->mpartlist_lock);
		(void)MUTEX_DESTROY(prp, &prp->spartlist_lock);
		vector_rem(&process_vector, PINDEX(pid));
		vector_free(&prp->threads);
		object_free(NULL, &process_souls, prp);
		kererr(act, status);
		return;
	}

	// Inherit parents information
	info = parent->cred->info;
	info.sgid = parent->cred->info.egid;
	info.suid = 0;			// The loader will set to euid after loading...
	cred_set(&prp->cred, &info);
	prp->seq = 1;

	// inherit setrlimit/getrlimit settings 
	for(i=0; i < RLIM_NLIMITS; i++) {
		prp->rlimit_vals_soft[i] = parent->rlimit_vals_soft[i];
		prp->rlimit_vals_hard[i] = parent->rlimit_vals_hard[i];
	}
	prp->max_cpu_time = parent->max_cpu_time;

	// stop core file generation if RLIMIT_CORE is 0
	if (prp->rlimit_vals_soft[RLIMIT_CORE] == 0) {
		prp->flags |= _NTO_PF_NOCOREDUMP;
	}

	// Inherit default scheduling partition
	// from creating thread.
	prp->default_dpp = SELECT_DPP(act, prp, schedpart_getid(prp));

	prp->pgrp = parent->pgrp;
	prp->umask = parent->umask;
	prp->sig_ignore = parent->sig_ignore;
	SIGMASK_NO_KILLSTOP(&prp->sig_ignore);

	if((prp->limits = lookup_limits(parent->cred->info.euid))  ||
	   (prp->limits = parent->limits)) {
		prp->limits->links++;
	}

	if((prp->session = parent->session)) {
		atomic_add(&prp->session->links, 1);
	}

	// Link the new process in as a child of its creator.
	prp->child = NULL;
	prp->parent = parent;
	prp->sibling = parent->child;
	parent->child = prp;
	++parent->num_processes;
	_TRACE_PR_EMIT_CREATE(prp);
	SETKSTATUS(act, prp);
}