Пример #1
0
/**
 * vm_allocate - allocate zero-filled memory for specified address
 *
 * If "anywhere" argument is true, the "addr" argument will be
 * ignored.  In this case, the address of free space will be
 * found automatically.
 *
 * The allocated area has writable, user-access attribute by
 * default.  The "addr" and "size" argument will be adjusted
 * to page boundary.
 */
int
vm_allocate(task_t task, void **addr, size_t size, int anywhere)
{
	int err;
	void *uaddr;

	sched_lock();

	if (!task_valid(task)) {
		err = ESRCH;
		goto out;
	}
	if (task != cur_task() && !task_capable(CAP_MEMORY)) {
		err = EPERM;
		goto out;
	}
	if (umem_copyin(addr, &uaddr, sizeof(*addr))) {
		err = EFAULT;
		goto out;
	}
	if (anywhere == 0 && !user_area(*addr)) {
		err = EACCES;
		goto out;
	}

	err = do_allocate(task->map, &uaddr, size, anywhere);
	if (err == 0) {
		if (umem_copyout(&uaddr, addr, sizeof(uaddr)))
			err = EFAULT;
	}
 out:
	sched_unlock();
	return err;
}
Пример #2
0
/*
 * Change attribute of specified virtual address.
 *
 * The "addr" argument points to a memory region previously
 * allocated through a call to vm_allocate(). The attribute
 * type can be chosen a combination of VMA_READ, VMA_WRITE.
 * Note: VMA_EXEC is not supported, yet.
 */
int
vm_attribute(task_t task, void *addr, int attr)
{
	int err;

	sched_lock();
	if (attr == 0 || attr & ~(VMA_READ | VMA_WRITE)) {
		err = EINVAL;
		goto out;
	}
	if (!task_valid(task)) {
		err = ESRCH;
		goto out;
	}
	if (task != cur_task() && !task_capable(CAP_MEMORY)) {
		err = EPERM;
		goto out;
	}
	if (!user_area(addr)) {
		err = EFAULT;
		goto out;
	}

	err = do_attribute(task->map, addr, attr);
 out:
	sched_unlock();
	return err;
}
Пример #3
0
/**
 * vm_map - map another task's memory to current task.
 *
 * Note: This routine does not support mapping to the specific
 * address.
 */
int
vm_map(task_t target, void *addr, size_t size, void **alloc)
{
	int err;

	sched_lock();
	if (!task_valid(target)) {
		err = ESRCH;
		goto out;
	}
	if (target == cur_task()) {
		err = EINVAL;
		goto out;
	}
	if (!task_capable(CAP_MEMORY)) {
		err = EPERM;
		goto out;
	}
	if (!user_area(addr)) {
		err = EFAULT;
		goto out;
	}

	err = do_map(target->map, addr, size, alloc);
 out:
	sched_unlock();
	return err;
}
Пример #4
0
static int net_ioctl(device_t dev, u_long cmd, void *args)
{
	int id = get_id_from_device(dev);
	struct net_softc *nc = net_softc;
	struct net_driver *nd = nc->net_drvs[id];
	dbuf_t dbuf;

	LOG_FUNCTION_NAME_ENTRY();
	if (!task_capable(CAP_NETWORK))
		return EPERM;
	switch (cmd) {

	case NETIO_QUERY_NR_IF:
		if (copyout(&nc->nrdevs, args, sizeof(nc->nrdevs)))
			return EFAULT;
		break;
	case NETIO_GET_IF_CAPS:	
		break;
	case NETIO_GET_STATUS:
		break;
	case NETIO_START:
		nd->ops->start(nd);
		break;
	case NETIO_STOP:
		nd->ops->stop(nd);
		break;
	case NETIO_TX_QBUF:
		if (copyin(args, &dbuf, sizeof(dbuf_t)))
			return EFAULT;
		nd->ops->transmit(nd, dbuf);
		break;
	case NETIO_RX_QBUF:
		if (copyin(args, &dbuf, sizeof(dbuf_t)))
			return EFAULT;
		netdrv_q_rxbuf(nd, dbuf);
		break;
	case NETIO_TX_DQBUF:
		if (netdrv_dq_txbuf(nd, &dbuf))
			return ENOMEM;
		if (copyout(&dbuf, args, sizeof(dbuf_t)))
			return EFAULT;
		break;
	case NETIO_RX_DQBUF:
		if (netdrv_dq_rxbuf(nd, &dbuf))
			return ENOMEM;
		if (copyout(&dbuf, args, sizeof(dbuf_t)))
			return EFAULT;
		break;
	default:
		return EINVAL;
	};
	LOG_FUNCTION_NAME_EXIT(0);
	return 0;
}
Пример #5
0
static int
pm_open(device_t dev, int mode)
{
    struct pm_softc *sc = pm_softc;

    if (!task_capable(CAP_POWERMGMT))
        return EPERM;

    if (sc->isopen > 0)
        return EBUSY;

    sc->isopen++;
    return 0;
}
Пример #6
0
static int
pm_close(device_t dev)
{
    struct pm_softc *sc = pm_softc;

    if (!task_capable(CAP_POWERMGMT))
        return EPERM;

    if (sc->isopen != 1)
        return EINVAL;

    sc->isopen--;
    return 0;
}
Пример #7
0
Файл: sem.c Проект: AndrewD/prex
/*
 * sem_init - initialize a semaphore.
 *
 * sem_init() creates a new semaphore if the specified
 * semaphore does not exist yet. If the semaphore already
 * exists, it is re-initialized only if nobody is waiting for
 * it. The initial semaphore value is set to the requested
 * value.
 */
int
sem_init(sem_t *sem, u_int value)
{
	struct sem *s;
	int err = 0;

	if (value > MAXSEMVAL)
		return EINVAL;
	if (umem_copyin(sem, &s, sizeof(sem)))
		return EFAULT;

	/*
	 * An application can call sem_init() to reset the
	 * value of existing semaphore. So, we have to check
	 * whether the semaphore is already allocated.
	 */
	sched_lock();
	if (s && sem_valid(s)) {
		/*
		 * Semaphore already exists.
		 */
		if (s->task != cur_task() &&
		    !task_capable(CAP_SEMAPHORE))
			err = EPERM;
		else if (event_waiting(&s->event))
			err = EBUSY;
		else
			s->value = value;
	} else {
		/*
		 * Create new semaphore.
		 */
		if ((s = kmem_alloc(sizeof(*s))) == NULL)
			err = ENOSPC;
		else {
			event_init(&s->event, "semaphore");
			s->task = cur_task();
			s->value = value;
			s->magic = SEM_MAGIC;
			if (umem_copyout(&s, sem, sizeof(s))) {
				kmem_free(s);
				err = EFAULT;
			}
		}
	}
	sched_unlock();
	return err;
}
Пример #8
0
static int net_close(device_t dev)
{
	int id = get_id_from_device(dev);
	struct net_softc *nc = net_softc;

	if (!task_capable(CAP_NETWORK))
		return EPERM;
	if (id == 0xff) {
		nc->isopen = 0;
	} else {
		struct net_driver *drv =
			nc->net_drvs[id];
		drv->isopen = 0;
	}

	return 0;
}
Пример #9
0
Файл: sem.c Проект: AndrewD/prex
/*
 * sem_copyin - copy a semaphore from user space.
 *
 * It also checks whether the passed semaphore is valid.
 */
static int
sem_copyin(sem_t *usem, sem_t *ksem)
{
	sem_t s;

	if (umem_copyin(usem, &s, sizeof(usem)))
		return EFAULT;
	if (!sem_valid(s))
		return EINVAL;
	/*
	 * Need a capability to access semaphores created
	 * by another task.
	 */
	if (s->task != cur_task() && !task_capable(CAP_SEMAPHORE))
		return EPERM;
	*ksem = s;
	return 0;
}
Пример #10
0
/*
 * Deallocate memory region for specified address.
 *
 * The "addr" argument points to a memory region previously
 * allocated through a call to vm_allocate() or vm_map(). The
 * number of bytes freed is the number of bytes of the
 * allocated region.  If one of the region of previous and
 * next are free, it combines with them, and larger free
 * region is created.
 */
int
vm_free(task_t task, void *addr)
{
	int err;

	sched_lock();
	if (!task_valid(task)) {
		err = ESRCH;
		goto out;
	}
	if (task != cur_task() && !task_capable(CAP_MEMORY)) {
		err = EPERM;
		goto out;
	}
	if (!user_area(addr)) {
		err = EFAULT;
		goto out;
	}

	err = do_free(task->map, addr);
 out:
	sched_unlock();
	return err;
}
Пример #11
0
static int net_open(device_t dev, int mode)
{
	int id = get_id_from_device(dev);
	struct net_softc *nc = net_softc;

	if (!task_capable(CAP_NETWORK))
		return EPERM;
	if (id == 0xff) {
		if (nc->isopen)
			return EBUSY;
		else {
			nc->isopen = 1;
			return 0;
		}
	} else {
		struct net_driver *drv =
			nc->net_drvs[id];
		if (drv->isopen)
			return EBUSY;
		drv->isopen = 1;
	}

	return 0;
}
Пример #12
0
static int
pm_ioctl(device_t dev, u_long cmd, void *arg)
{
    struct pm_softc *sc = pm_softc;
    int error = 0;
    int policy, state, event;

    if (!task_capable(CAP_POWERMGMT))
        return EPERM;

    switch (cmd) {

    case PMIOC_CONNECT:
        /* Connection request from the power server */
        if (copyin(arg, &sc->powtask, sizeof(task_t)))
            return EFAULT;
        DPRINTF(("pm: connect power server\n"));
        break;

    case PMIOC_QUERY_EVENT:
        event = sc->lastevt;
        sc->lastevt = PME_NO_EVENT;
        if (copyout(&event, arg, sizeof(int)))
            return EFAULT;
        DPRINTF(("pm: query event=%d\n", event));
        break;

    case PMIOC_SET_POWER:
        if (copyin(arg, &state, sizeof(int)))
            return EFAULT;

        switch (state) {
        case PWR_SUSPEND:
        case PWR_OFF:
        case PWR_REBOOT:
            pm_set_power(state);
            break;
        default:
            error = EINVAL;
            break;
        }
        break;

    case PMIOC_GET_POLICY:
        if (copyout(&sc->policy, arg, sizeof(int)))
            return EFAULT;
        DPRINTF(("pm: get policy %d\n", sc->policy));
        break;

    case PMIOC_SET_POLICY:
        if (copyin(arg, &policy, sizeof(int)))
            return EFAULT;
        if (policy != PM_POWERSAVE && policy != PM_PERFORMANCE)
            return EINVAL;

        DPRINTF(("pm: set policy %d\n", policy));

        if (policy == sc->policy) {
            /* same policy */
            break;
        }
        /* Call devctl() routine for all devices */
        device_broadcast(DEVCTL_PM_CHGPOLICY, &policy, 1);

        sc->policy = policy;
        if (policy == PM_POWERSAVE)
            pm_update_timer();
        else
            pm_stop_timer();
        break;

    case PMIOC_GET_SUSTMR:
        if (copyout(&sc->sustime, arg, sizeof(u_long)))
            return EFAULT;
        break;

    case PMIOC_SET_SUSTMR:
        if (copyin(arg, &sc->sustime, sizeof(u_long)))
            return EFAULT;
        DPRINTF(("pm: set sustmr=%d\n", sc->sustime));
        pm_update_timer();
        break;

    case PMIOC_GET_DIMTMR:
        if (copyout(&sc->dimtime, arg, sizeof(u_long)))
            return EFAULT;
        break;

    case PMIOC_SET_DIMTMR:
        if (copyin(arg, &sc->dimtime, sizeof(u_long)))
            return EFAULT;
        DPRINTF(("pm: set dimtmr=%d\n", sc->dimtime));
        pm_update_timer();
        break;

    default:
        return EINVAL;
    }
    return error;
}
Пример #13
0
Файл: msg.c Проект: AndrewD/prex
/*
 * Send a message.
 *
 * The current thread will be blocked until any other thread
 * receives the message and calls msg_reply() for the target
 * object. When new message has been reached to the object, it
 * will be received by highest priority thread waiting for
 * that message. A thread can send a message to any object if
 * it knows the object id.
 */
int
msg_send(object_t obj, void *msg, size_t size, u_long timeout)
{
	struct msg_header *hdr;
	thread_t th;
	void *kmsg;
	int rc;

	if (!user_area(msg))
		return EFAULT;

	if (size < sizeof(struct msg_header))
		return EINVAL;

	sched_lock();

	if (!object_valid(obj)) {
		sched_unlock();
		return EINVAL;
	}
	if (obj->owner != cur_task() && !task_capable(CAP_IPC)) {
		sched_unlock();
		return EPERM;
	}
	/*
	 * A thread can not send a message when the
	 * thread is already receiving from the target
	 * object. This will obviously cause a deadlock.
	 */
	if (obj == cur_thread->recvobj) {
		sched_unlock();
		return EDEADLK;
	}
	/*
	 * Translate message address to the kernel linear
	 * address.  So that a receiver thread can access
	 * the message via kernel pointer. We can catch
	 * the page fault here.
	 */
	if ((kmsg = kmem_map(msg, size)) == NULL) {
		/* Error - no physical address for the message */
		sched_unlock();
		return EFAULT;
	}
	/*
	 * The sender ID in the message header is filled
	 * by the kernel. So, the receiver can trust it.
	 */
	hdr = (struct msg_header *)kmsg;
	hdr->task = cur_task();

	/* Save information about the message block. */
	cur_thread->msgaddr = kmsg;
	cur_thread->msgsize = size;

	/*
	 * If receiver already exists, wake it up.
	 * Highest priority thread will get this message.
	 */
	if (!queue_empty(&obj->recvq)) {
		th = msg_dequeue(&obj->recvq);
		sched_unsleep(th, 0);
	}
	/*
	 * Sleep until we get a reply message.
	 * Note: Do not touch any data in the object
	 * structure after we wakeup. This is because the
	 * target object may be deleted during we were
	 * sleeping.
	 */
	cur_thread->sendobj = obj;
	msg_enqueue(&obj->sendq, cur_thread);
	rc = sched_tsleep(&ipc_event, timeout);
	if (rc == SLP_INTR)
		queue_remove(&cur_thread->ipc_link);
	cur_thread->sendobj = NULL;

	sched_unlock();

	/*
	 * Check sleep result.
	 */
	switch (rc) {
	case SLP_BREAK:
		return EAGAIN;	/* Receiver has been terminated */
	case SLP_INVAL:
		return EINVAL;	/* Object has been deleted */
	case SLP_INTR:
		return EINTR;	/* Exception */
	case SLP_TIMEOUT:
		return ETIMEDOUT;	/* Timeout */
	default:
		/* DO NOTHING */
		break;
	}
	return 0;
}