Beispiel #1
0
int vmm_semaphore_down(struct vmm_semaphore * sem)
{
	int rc;
	u32 value;

	/* Sanity Check */
	BUG_ON(!sem, "%s: NULL poniter to semaphore\n", __func__);
	BUG_ON(!vmm_scheduler_orphan_context(), 
		"%s: Down allowed in Orphan VCPU (or Thread) context only\n",
		 __func__);

	/* Decrement the semaphore */
	rc = VMM_EFAIL;
	while (rc) {
		/* Sleep if semaphore not available */
		while (!(value = arch_cpu_atomic_read(&sem->value))) {
			vmm_waitqueue_sleep(&sem->wq);
		}

		/* Try to decrement the semaphore */
		rc = arch_cpu_atomic_testnset(&sem->value, value, value - 1);
	}

	return rc;
}
Beispiel #2
0
int vmm_mutex_unlock(struct vmm_mutex *mut)
{
	int rc = VMM_EINVALID;
	irq_flags_t flags;
	struct vmm_vcpu *current_vcpu = vmm_scheduler_current_vcpu();

	BUG_ON(!mut);
	BUG_ON(!vmm_scheduler_orphan_context());

	vmm_spin_lock_irqsave(&mut->wq.lock, flags);

	if (mut->lock && mut->owner == current_vcpu) {
		mut->lock--;
		if (!mut->lock) {
			mut->owner = NULL;
			vmm_manager_vcpu_resource_remove(current_vcpu,
							 &mut->res);
			rc = __vmm_waitqueue_wakeall(&mut->wq);
			if (rc == VMM_ENOENT) {
				rc = VMM_OK;
			}
		} else {
			rc = VMM_OK;
		}
	}

	vmm_spin_unlock_irqrestore(&mut->wq.lock, flags);

	return rc;
}
Beispiel #3
0
int vmm_mutex_trylock(struct vmm_mutex *mut)
{
	int ret = 0;
	struct vmm_vcpu *current_vcpu = vmm_scheduler_current_vcpu();

	BUG_ON(!mut);
	BUG_ON(!vmm_scheduler_orphan_context());

	vmm_spin_lock_irq(&mut->wq.lock);

	if (!mut->lock) {
		mut->lock++;
		vmm_manager_vcpu_resource_add(current_vcpu, &mut->res);
		mut->owner = current_vcpu;
		ret = 1;
	} else if (mut->owner == current_vcpu) {
		/*
		 * If VCPU owning the lock try to acquire it again then let
		 * it acquire lock multiple times (as-per POSIX standard).
		 */
		mut->lock++;
		ret = 1;
	}

	vmm_spin_unlock_irq(&mut->wq.lock);

	return ret;
}
Beispiel #4
0
static int mutex_lock_common(struct vmm_mutex *mut, u64 *timeout)
{
    int rc = VMM_OK;

    BUG_ON(!mut);
    BUG_ON(!vmm_scheduler_orphan_context());

    vmm_spin_lock_irq(&mut->wq.lock);

    while (mut->lock) {
        rc = __vmm_waitqueue_sleep(&mut->wq, timeout);
        if (rc) {
            /* Timeout or some other failure */
            break;
        }
    }
    if (rc == VMM_OK) {
        mut->lock = 1;
        mut->owner = vmm_scheduler_current_vcpu();
    }

    vmm_spin_unlock_irq(&mut->wq.lock);

    return rc;
}
Beispiel #5
0
int vmm_mutex_unlock(struct vmm_mutex *mut)
{
    int rc = VMM_OK;
    irq_flags_t flags;

    BUG_ON(!mut);
    BUG_ON(!vmm_scheduler_orphan_context());

    vmm_spin_lock_irqsave(&mut->wq.lock, flags);

    if (mut->lock && mut->owner == vmm_scheduler_current_vcpu()) {
        mut->lock = 0;
        mut->owner = NULL;
        rc = __vmm_waitqueue_wakeall(&mut->wq);
    }

    vmm_spin_unlock_irqrestore(&mut->wq.lock, flags);

    return rc;
}
Beispiel #6
0
static int completion_wait_common(struct vmm_completion *cmpl, u64 *timeout)
{
	int rc = VMM_OK;

	BUG_ON(!cmpl);
	BUG_ON(arch_cpu_irq_disabled());
	BUG_ON(!vmm_scheduler_orphan_context());

	vmm_spin_lock_irq(&cmpl->wq.lock);

	if (!cmpl->done) {
		rc = __vmm_waitqueue_sleep(&cmpl->wq, timeout);
	}
	if (cmpl->done) {
		cmpl->done--;
	}

	vmm_spin_unlock_irq(&cmpl->wq.lock);

	return rc;
}
Beispiel #7
0
static int mutex_lock_common(struct vmm_mutex *mut, u64 *timeout)
{
	int rc = VMM_OK;
	irq_flags_t flags;
	struct vmm_vcpu *current_vcpu = vmm_scheduler_current_vcpu();

	BUG_ON(!mut);
	BUG_ON(!vmm_scheduler_orphan_context());

	vmm_spin_lock_irqsave(&mut->wq.lock, flags);

	while (mut->lock) {
		/*
		 * If VCPU owning the lock try to acquire it again then let
		 * it acquire lock multiple times (as-per POSIX standard).
		 */
		if (mut->owner == current_vcpu) {
			break;
		}
		rc = __vmm_waitqueue_sleep(&mut->wq, timeout);
		if (rc) {
			/* Timeout or some other failure */
			break;
		}
	}
	if (rc == VMM_OK) {
		if (!mut->lock) {
			mut->lock = 1;
			vmm_manager_vcpu_resource_add(current_vcpu,
						      &mut->res);
			mut->owner = current_vcpu;
		} else {
			mut->lock++;
		}
	}

	vmm_spin_unlock_irqrestore(&mut->wq.lock, flags);

	return rc;
}
Beispiel #8
0
u32 vmm_chardev_dowrite(struct vmm_chardev * cdev,
			u8 *src, u32 offset, u32 len, bool block)
{
	u32 b;
	bool sleep;

	if (cdev && cdev->write) {
		if (block) {
			b = 0;
			sleep = vmm_scheduler_orphan_context() ? TRUE : FALSE;
			while (b < len) {
				b += cdev->write(cdev, &src[b], 
					         offset + b, len - b, sleep);
			}
			return b;
		} else {
			return cdev->write(cdev, src, offset, len, FALSE);
		}
	} else {
		return 0;
	}
}
Beispiel #9
0
u64 vmm_blockdev_rw(struct vmm_blockdev *bdev, 
			enum vmm_request_type type,
			u8 *buf, u64 off, u64 len)
{
	u8 *tbuf;
	u64 tmp, first_lba, first_off, first_len;
	u64 middle_lba, middle_len;
	u64 last_lba, last_len;

	BUG_ON(!vmm_scheduler_orphan_context());

	if (!buf || !bdev || !len) {
		return 0;
	}

	if ((type != VMM_REQUEST_READ) &&
	    (type != VMM_REQUEST_WRITE)) {
		return 0;
	}

	if ((type == VMM_REQUEST_WRITE) &&
	   !(bdev->flags & VMM_BLOCKDEV_RW)) {
		return 0;
	}

	tmp = bdev->num_blocks * bdev->block_size;
	if ((off >= tmp) || ((off + len) > tmp)) {
		return 0;
	}

	first_lba = udiv64(off, bdev->block_size);
	first_off = off - first_lba * bdev->block_size;
	if (first_off) {
		first_len = bdev->block_size - first_off;
		first_len = (first_len < len) ? first_len : len;
	} else {
		if (len < bdev->block_size) {
			first_len = len;
		} else {
			first_len = 0;
		}
	}

	off += first_len;
	len -= first_len;

	middle_lba = udiv64(off, bdev->block_size);
	middle_len = udiv64(len, bdev->block_size) * bdev->block_size;

	off += middle_len;
	len -= middle_len;

	last_lba = udiv64(off, bdev->block_size);
	last_len = len;

	if (first_len || last_len) {
		tbuf = vmm_malloc(bdev->block_size);
		if (!tbuf) {
			return 0;
		}
	}

	tmp = 0;

	if (first_len) {
		if (blockdev_rw_blocks(bdev, VMM_REQUEST_READ,
					tbuf, first_lba, 1)) {
			goto done;
		}

		if (type == VMM_REQUEST_WRITE) {
			memcpy(&tbuf[first_off], buf, first_len);
			if (blockdev_rw_blocks(bdev, VMM_REQUEST_WRITE,
					tbuf, first_lba, 1)) {
				goto done;
			}
		} else {
			memcpy(buf, &tbuf[first_off], first_len);
		}

		buf += first_len;
		tmp += first_len;
	}

	if (middle_len) {
		if (blockdev_rw_blocks(bdev, type,
		buf, middle_lba, udiv64(middle_len, bdev->block_size))) {
			goto done;
		}

		buf += middle_len;
		tmp += middle_len;
	}

	if (last_len) {
		if (blockdev_rw_blocks(bdev, VMM_REQUEST_READ,
					tbuf, last_lba, 1)) {
			goto done;
		}

		if (type == VMM_REQUEST_WRITE) {
			memcpy(&tbuf[0], buf, last_len);
			if (blockdev_rw_blocks(bdev, VMM_REQUEST_WRITE,
					tbuf, last_lba, 1)) {
				goto done;
			}
		} else {
			memcpy(buf, &tbuf[0], last_len);
		}

		tmp += last_len;
	}

done:
	if (first_len || last_len) {
		vmm_free(tbuf);
	}

	return tmp;
}