Beispiel #1
0
/*
 * Init function for virtio
 * devices are in a single page above top of "normal" mem
 */
static int __init kvm_devices_init(void)
{
	int rc;

	if (!MACHINE_IS_KVM)
		return -ENODEV;

	kvm_root = root_device_register("kvm_s390");
	if (IS_ERR(kvm_root)) {
		rc = PTR_ERR(kvm_root);
		printk(KERN_ERR "Could not register kvm_s390 root device");
		return rc;
	}

	rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
	if (rc) {
		root_device_unregister(kvm_root);
		return rc;
	}

	kvm_devices = (void *) real_memory_size;

	INIT_WORK(&hotplug_work, hotplug_devices);

	service_subclass_irq_register();
	register_external_interrupt(0x2603, kvm_extint_handler);

	scan_devices();
	return 0;
}
Beispiel #2
0
/*
 * Map or unmap crashkernel memory
 */
static void crash_map_pages(int enable)
{
	unsigned long size = resource_size(&crashk_res);

	BUG_ON(crashk_res.start % KEXEC_CRASH_MEM_ALIGN ||
	       size % KEXEC_CRASH_MEM_ALIGN);
	if (enable)
		vmem_add_mapping(crashk_res.start, size);
	else
		vmem_remove_mapping(crashk_res.start, size);
}
Beispiel #3
0
int arch_add_memory(int nid, u64 start, u64 size)
{
	struct pglist_data *pgdat;
	struct zone *zone;
	int rc;

	pgdat = NODE_DATA(nid);
	zone = pgdat->node_zones + ZONE_MOVABLE;
	rc = vmem_add_mapping(start, size);
	if (rc)
		return rc;
	rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size));
	if (rc)
		vmem_remove_mapping(start, size);
	return rc;
}
Beispiel #4
0
/*
 * This routine finds the first virtqueue described in the configuration of
 * this device and sets it up.
 */
static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
				     unsigned index,
				     void (*callback)(struct virtqueue *vq),
				     const char *name)
{
	struct kvm_device *kdev = to_kvmdev(vdev);
	struct kvm_vqconfig *config;
	struct virtqueue *vq;
	int err;

	if (index >= kdev->desc->num_vq)
		return ERR_PTR(-ENOENT);

	if (!name)
		return NULL;

	config = kvm_vq_config(kdev->desc)+index;

	err = vmem_add_mapping(config->address,
			       vring_size(config->num,
					  KVM_S390_VIRTIO_RING_ALIGN));
	if (err)
		goto out;

	vq = vring_new_virtqueue(index, config->num, KVM_S390_VIRTIO_RING_ALIGN,
				 vdev, true, (void *) config->address,
				 kvm_notify, callback, name);
	if (!vq) {
		err = -ENOMEM;
		goto unmap;
	}

	/*
	 * register a callback token
	 * The host will sent this via the external interrupt parameter
	 */
	config->token = (u64) vq;

	vq->priv = config;
	return vq;
unmap:
	vmem_remove_mapping(config->address,
			    vring_size(config->num,
				       KVM_S390_VIRTIO_RING_ALIGN));
out:
	return ERR_PTR(err);
}
Beispiel #5
0
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
	unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long size_pages = PFN_DOWN(size);
	struct zone *zone;
	int rc;

	rc = vmem_add_mapping(start, size);
	if (rc)
		return rc;
	for_each_zone(zone) {
		if (zone_idx(zone) != ZONE_MOVABLE) {
			/* Add range within existing zone limits */
			zone_start_pfn = zone->zone_start_pfn;
			zone_end_pfn = zone->zone_start_pfn +
				       zone->spanned_pages;
		} else {
			/* Add remaining range to ZONE_MOVABLE */
			zone_start_pfn = start_pfn;
			zone_end_pfn = start_pfn + size_pages;
		}
		if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
			continue;
		nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
			   zone_end_pfn - start_pfn : size_pages;
		rc = __add_pages(nid, zone, start_pfn, nr_pages);
		if (rc)
			break;
		start_pfn += nr_pages;
		size_pages -= nr_pages;
		if (!size_pages)
			break;
	}
	if (rc)
		vmem_remove_mapping(start, size);
	return rc;
}
Beispiel #6
0
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
	unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
	unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long size_pages = PFN_DOWN(size);
	unsigned long nr_pages;
	int rc, zone_enum;

	rc = vmem_add_mapping(start, size);
	if (rc)
		return rc;

	while (size_pages > 0) {
		if (start_pfn < dma_end_pfn) {
			nr_pages = (start_pfn + size_pages > dma_end_pfn) ?
				   dma_end_pfn - start_pfn : size_pages;
			zone_enum = ZONE_DMA;
		} else if (start_pfn < normal_end_pfn) {
			nr_pages = (start_pfn + size_pages > normal_end_pfn) ?
				   normal_end_pfn - start_pfn : size_pages;
			zone_enum = ZONE_NORMAL;
		} else {
			nr_pages = size_pages;
			zone_enum = ZONE_MOVABLE;
		}
		rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum,
				 start_pfn, size_pages);
		if (rc)
			break;
		start_pfn += nr_pages;
		size_pages -= nr_pages;
	}
	if (rc)
		vmem_remove_mapping(start, size);
	return rc;
}