Example #1
0
static bool virtio_bln_do_io_request(struct kvm *kvm, struct bln_dev *bdev, struct virt_queue *queue)
{
	struct iovec iov[VIRTIO_BLN_QUEUE_SIZE];
	unsigned int len = 0;
	u16 out, in, head;
	u32 *ptrs, i;

	head	= virt_queue__get_iov(queue, iov, &out, &in, kvm);
	ptrs	= iov[0].iov_base;
	len	= iov[0].iov_len / sizeof(u32);

	for (i = 0 ; i < len ; i++) {
		void *guest_ptr;

		guest_ptr = guest_flat_to_host(kvm, ptrs[i] << VIRTIO_BALLOON_PFN_SHIFT);
		if (queue == &bdev->vqs[VIRTIO_BLN_INFLATE]) {
			madvise(guest_ptr, 1 << VIRTIO_BALLOON_PFN_SHIFT, MADV_DONTNEED);
			bdev->config.actual++;
		} else if (queue == &bdev->vqs[VIRTIO_BLN_DEFLATE]) {
			bdev->config.actual--;
		}
	}

	virt_queue__set_used_elem(queue, head, len);

	return true;
}
Example #2
0
u16 virtio_queue_get_head_iovec(struct virtio_queue *vq,
				u16 head, struct virtio_iovec *iov,
				u32 *ret_iov_cnt, u32 *ret_total_len)
{
	struct vring_desc *desc;
	u16 idx, max;
	int i = 0;

	if (!vq->addr) {
		*ret_iov_cnt = 0;
		*ret_total_len = 0;
		return 0;
	}

	idx = head;

	*ret_iov_cnt = 0;
	*ret_total_len = 0;
	max = vq->vring.num;
	desc = vq->vring.desc;

	if (desc[idx].flags & VRING_DESC_F_INDIRECT) {
#if 0
		max = desc[idx].len / sizeof(struct vring_desc);
		desc = guest_flat_to_host(kvm, desc[idx].addr);
		idx = 0;
#endif
	}

	do {
		iov[i].addr = desc[idx].addr;
		iov[i].len = desc[idx].len;

		*ret_total_len += desc[idx].len;

		if (desc[idx].flags & VRING_DESC_F_WRITE) {
			iov[i].flags = 1;  /* Write */
		} else {
			iov[i].flags = 0; /* Read */
		}

		i++;

	} while ((idx = next_desc(desc, idx, max)) != max);

	*ret_iov_cnt = i;

	return head;
}
Example #3
0
static int smp_pen_init(struct kvm *kvm)
{
	unsigned long pen_size, pen_start, jump_offset;

	if (!(kvm->nrcpus > 1))
		return 0;

	pen_size = &smp_pen_end - &smp_pen_start;
	pen_start = kvm->arch.smp_pen_guest_start;
	jump_offset = &smp_jump_addr - &smp_pen_start;

	kvm->arch.smp_jump_guest_start = pen_start + jump_offset;
	memcpy(guest_flat_to_host(kvm, pen_start), &smp_pen_start, pen_size);

	return 0;
}
Example #4
0
static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 page_size, u32 align,
		   u32 pfn)
{
	struct bln_dev *bdev = dev;
	struct virt_queue *queue;
	void *p;

	compat__remove_message(compat_id);

	queue		= &bdev->vqs[vq];
	queue->pfn	= pfn;
	p		= guest_flat_to_host(kvm, queue->pfn * page_size);

	thread_pool__init_job(&bdev->jobs[vq], kvm, virtio_bln_do_io, queue);
	vring_init(&queue->vring, VIRTIO_BLN_QUEUE_SIZE, p, align);

	return 0;
}
Example #5
0
int load_flat_binary(struct kvm *kvm, int fd_kernel, int fd_initrd, const char *kernel_cmdline)
{
	void *p;
	void *k_start;
	int nr;

	if (lseek(fd_kernel, 0, SEEK_SET) < 0)
		die_perror("lseek");

	p = k_start = guest_flat_to_host(kvm, KERNEL_LOAD_ADDR);

	while ((nr = read(fd_kernel, p, 65536)) > 0)
		p += nr;

	kvm->arch.is64bit = true;
	kvm->arch.entry_point = 0xffffffff81000000ull;

	pr_info("Loaded kernel to 0x%x (%ld bytes)", KERNEL_LOAD_ADDR, (long int)(p - k_start));

	return true;
}
Example #6
0
bool kvm__arch_load_kernel_image(struct kvm *kvm, int fd_kernel, int fd_initrd,
				 const char *kernel_cmdline)
{
	void *pos, *kernel_end, *limit;
	unsigned long guest_addr;
	ssize_t file_size;

	/*
	 * Linux requires the initrd and dtb to be mapped inside lowmem,
	 * so we can't just place them at the top of memory.
	 */
	limit = kvm->ram_start + min(kvm->ram_size, (u64)SZ_256M) - 1;

	pos = kvm->ram_start + ARM_KERN_OFFSET(kvm);
	kvm->arch.kern_guest_start = host_to_guest_flat(kvm, pos);
	file_size = read_file(fd_kernel, pos, limit - pos);
	if (file_size < 0) {
		if (errno == ENOMEM)
			die("kernel image too big to contain in guest memory.");

		die_perror("kernel read");
	}
	kernel_end = pos + file_size;
	pr_info("Loaded kernel to 0x%llx (%zd bytes)",
		kvm->arch.kern_guest_start, file_size);

	/*
	 * Now load backwards from the end of memory so the kernel
	 * decompressor has plenty of space to work with. First up is
	 * the device tree blob...
	 */
	pos = limit;
	pos -= (FDT_MAX_SIZE + FDT_ALIGN);
	guest_addr = ALIGN(host_to_guest_flat(kvm, pos), FDT_ALIGN);
	pos = guest_flat_to_host(kvm, guest_addr);
	if (pos < kernel_end)
		die("fdt overlaps with kernel image.");

	kvm->arch.dtb_guest_start = guest_addr;
	pr_info("Placing fdt at 0x%llx - 0x%llx",
		kvm->arch.dtb_guest_start,
		host_to_guest_flat(kvm, limit));
	limit = pos;

	/* ... and finally the initrd, if we have one. */
	if (fd_initrd != -1) {
		struct stat sb;
		unsigned long initrd_start;

		if (fstat(fd_initrd, &sb))
			die_perror("fstat");

		pos -= (sb.st_size + INITRD_ALIGN);
		guest_addr = ALIGN(host_to_guest_flat(kvm, pos), INITRD_ALIGN);
		pos = guest_flat_to_host(kvm, guest_addr);
		if (pos < kernel_end)
			die("initrd overlaps with kernel image.");

		initrd_start = guest_addr;
		file_size = read_file(fd_initrd, pos, limit - pos);
		if (file_size == -1) {
			if (errno == ENOMEM)
				die("initrd too big to contain in guest memory.");

			die_perror("initrd read");
		}

		kvm->arch.initrd_guest_start = initrd_start;
		kvm->arch.initrd_size = file_size;
		pr_info("Loaded initrd to 0x%llx (%llu bytes)",
			kvm->arch.initrd_guest_start,
			kvm->arch.initrd_size);
	} else {
		kvm->arch.initrd_size = 0;
	}

	return true;
}
Example #7
0
static int setup_fdt(struct kvm *kvm)
{
	struct device_header *dev_hdr;
	u8 staging_fdt[FDT_MAX_SIZE];
	u32 gic_phandle		= fdt__alloc_phandle();
	u64 mem_reg_prop[]	= {
		cpu_to_fdt64(kvm->arch.memory_guest_start),
		cpu_to_fdt64(kvm->ram_size),
	};
	void *fdt		= staging_fdt;
	void *fdt_dest		= guest_flat_to_host(kvm,
						     kvm->arch.dtb_guest_start);
	void (*generate_mmio_fdt_nodes)(void *, struct device_header *,
					void (*)(void *, u8));
	void (*generate_cpu_peripheral_fdt_nodes)(void *, struct kvm *, u32)
					= kvm->cpus[0]->generate_fdt_nodes;

	/* Create new tree without a reserve map */
	_FDT(fdt_create(fdt, FDT_MAX_SIZE));
	_FDT(fdt_finish_reservemap(fdt));

	/* Header */
	_FDT(fdt_begin_node(fdt, ""));
	_FDT(fdt_property_cell(fdt, "interrupt-parent", gic_phandle));
	_FDT(fdt_property_string(fdt, "compatible", "linux,dummy-virt"));
	_FDT(fdt_property_cell(fdt, "#address-cells", 0x2));
	_FDT(fdt_property_cell(fdt, "#size-cells", 0x2));

	/* /chosen */
	_FDT(fdt_begin_node(fdt, "chosen"));
	_FDT(fdt_property_cell(fdt, "linux,pci-probe-only", 1));
	_FDT(fdt_property_string(fdt, "bootargs", kern_cmdline));

	/* Initrd */
	if (kvm->arch.initrd_size != 0) {
		u32 ird_st_prop = cpu_to_fdt64(kvm->arch.initrd_guest_start);
		u32 ird_end_prop = cpu_to_fdt64(kvm->arch.initrd_guest_start +
					       kvm->arch.initrd_size);

		_FDT(fdt_property(fdt, "linux,initrd-start",
				   &ird_st_prop, sizeof(ird_st_prop)));
		_FDT(fdt_property(fdt, "linux,initrd-end",
				   &ird_end_prop, sizeof(ird_end_prop)));
	}
	_FDT(fdt_end_node(fdt));

	/* Memory */
	_FDT(fdt_begin_node(fdt, "memory"));
	_FDT(fdt_property_string(fdt, "device_type", "memory"));
	_FDT(fdt_property(fdt, "reg", mem_reg_prop, sizeof(mem_reg_prop)));
	_FDT(fdt_end_node(fdt));

	/* CPU and peripherals (interrupt controller, timers, etc) */
	generate_cpu_nodes(fdt, kvm);
	if (generate_cpu_peripheral_fdt_nodes)
		generate_cpu_peripheral_fdt_nodes(fdt, kvm, gic_phandle);

	/* Virtio MMIO devices */
	dev_hdr = device__first_dev(DEVICE_BUS_MMIO);
	while (dev_hdr) {
		generate_mmio_fdt_nodes = dev_hdr->data;
		generate_mmio_fdt_nodes(fdt, dev_hdr, generate_irq_prop);
		dev_hdr = device__next_dev(dev_hdr);
	}

	/* IOPORT devices (!) */
	dev_hdr = device__first_dev(DEVICE_BUS_IOPORT);
	while (dev_hdr) {
		generate_mmio_fdt_nodes = dev_hdr->data;
		generate_mmio_fdt_nodes(fdt, dev_hdr, generate_irq_prop);
		dev_hdr = device__next_dev(dev_hdr);
	}

	/* PCI host controller */
	pci__generate_fdt_nodes(fdt, gic_phandle);

	/* PSCI firmware */
	_FDT(fdt_begin_node(fdt, "psci"));
	_FDT(fdt_property_string(fdt, "compatible", "arm,psci"));
	_FDT(fdt_property_string(fdt, "method", "hvc"));
	_FDT(fdt_property_cell(fdt, "cpu_suspend", KVM_PSCI_FN_CPU_SUSPEND));
	_FDT(fdt_property_cell(fdt, "cpu_off", KVM_PSCI_FN_CPU_OFF));
	_FDT(fdt_property_cell(fdt, "cpu_on", KVM_PSCI_FN_CPU_ON));
	_FDT(fdt_property_cell(fdt, "migrate", KVM_PSCI_FN_MIGRATE));
	_FDT(fdt_end_node(fdt));

	/* Finalise. */
	_FDT(fdt_end_node(fdt));
	_FDT(fdt_finish(fdt));

	_FDT(fdt_open_into(fdt, fdt_dest, FDT_MAX_SIZE));
	_FDT(fdt_pack(fdt_dest));

	if (kvm->cfg.arch.dump_dtb_filename)
		dump_fdt(kvm->cfg.arch.dump_dtb_filename, fdt_dest);
	return 0;
}
Example #8
0
int load_elf_binary(struct kvm *kvm, int fd_kernel, int fd_initrd, const char *kernel_cmdline)
{
	union {
		Elf64_Ehdr ehdr;
		Elf32_Ehdr ehdr32;
	} eh;

	size_t nr;
	char *p;
	struct kvm__arch_elf_info ei;

	if (lseek(fd_kernel, 0, SEEK_SET) < 0)
		die_perror("lseek");

	nr = read(fd_kernel, &eh, sizeof(eh));
	if (nr != sizeof(eh)) {
		pr_info("Couldn't read %d bytes for ELF header.", (int)sizeof(eh));
		return false;
	}

	if (eh.ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
	    eh.ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
	    eh.ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
	    eh.ehdr.e_ident[EI_MAG3] != ELFMAG3 ||
	    (eh.ehdr.e_ident[EI_CLASS] != ELFCLASS64 && eh.ehdr.e_ident[EI_CLASS] != ELFCLASS32) ||
	    eh.ehdr.e_ident[EI_VERSION] != EV_CURRENT) {
		pr_info("Incompatible ELF header.");
		return false;
	}
	if (eh.ehdr.e_type != ET_EXEC || eh.ehdr.e_machine != EM_MIPS) {
		pr_info("Incompatible ELF not MIPS EXEC.");
		return false;
	}

	if (eh.ehdr.e_ident[EI_CLASS] == ELFCLASS64) {
		if (!kvm__arch_get_elf_64_info(&eh.ehdr, fd_kernel, &ei))
			return false;
		kvm->arch.is64bit = true;
	} else {
		if (!kvm__arch_get_elf_32_info(&eh.ehdr32, fd_kernel, &ei))
			return false;
		kvm->arch.is64bit = false;
	}

	kvm->arch.entry_point = ei.entry_point;

	if (lseek(fd_kernel, ei.offset, SEEK_SET) < 0)
		die_perror("lseek");

	p = guest_flat_to_host(kvm, ei.load_addr);

	pr_info("ELF Loading 0x%lx bytes from 0x%llx to 0x%llx",
		(unsigned long)ei.len, (unsigned long long)ei.offset, (unsigned long long)ei.load_addr);
	do {
		nr = read(fd_kernel, p, ei.len);
		if (nr < 0)
			die_perror("read");
		p += nr;
		ei.len -= nr;
	} while (ei.len);

	kvm__mips_install_cmdline(kvm);

	return true;
}