Ejemplo n.º 1
0
void kvm_cpu__setup_cpuid(struct kvm_cpu *vcpu)
{
	struct kvm_cpuid2 *kvm_cpuid;

	kvm_cpuid = calloc(1, sizeof(*kvm_cpuid) + MAX_KVM_CPUID_ENTRIES * sizeof(*kvm_cpuid->entries));

	kvm_cpuid->nent = MAX_KVM_CPUID_ENTRIES;
	if (ioctl(vcpu->kvm->sys_fd, KVM_GET_SUPPORTED_CPUID, kvm_cpuid) < 0)
		die_perror("KVM_GET_SUPPORTED_CPUID failed");

	filter_cpuid(kvm_cpuid);

	if (ioctl(vcpu->vcpu_fd, KVM_SET_CPUID2, kvm_cpuid) < 0)
		die_perror("KVM_SET_CPUID2 failed");

	free(kvm_cpuid);
}
Ejemplo n.º 2
0
void drain_notify(int fd) {
  char buffer[ sizeof(struct inotify_event) + NAME_MAX + 1 + sizeof(int) ];
  int len;

  len = read(fd, buffer, sizeof(buffer));
  if (-1 == len) die_perror("read(notify_fd)");

  return;
}
Ejemplo n.º 3
0
void die_perror_args(const char* s, ...) {
  char buffer[BUFSIZ];
  va_list ap;
  int save = errno;
  va_start(ap, s);
  if (vsnprintf(buffer, sizeof(buffer), s, ap) < 0) strncpy(buffer, s, sizeof(buffer));
  errno = save;
  die_perror(buffer);
}
Ejemplo n.º 4
0
void kvm_cpu__reset_vcpu(struct kvm_cpu *vcpu)
{
	struct kvm *kvm	= vcpu->kvm;
	struct kvm_one_reg reg;
	u32 data;

	/* Who said future-proofing was a good idea? */
	reg.addr = (u64)(unsigned long)&data;

	/* cpsr = IRQs/FIQs masked */
	data	= PSR_I_BIT | PSR_F_BIT | SVC_MODE;
	reg.id	= ARM_CORE_REG(usr_regs.ARM_cpsr);
	if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, &reg) < 0)
		die_perror("KVM_SET_ONE_REG failed (cpsr)");

	/* Secondary cores are stopped awaiting PSCI wakeup */
	if (vcpu->cpu_id != 0)
		return;

	/* r0 = 0 */
	data	= 0;
	reg.id	= ARM_CORE_REG(usr_regs.ARM_r0);
	if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, &reg) < 0)
		die_perror("KVM_SET_ONE_REG failed (r0)");

	/* r1 = machine type (-1) */
	data	= -1;
	reg.id	= ARM_CORE_REG(usr_regs.ARM_r1);
	if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, &reg) < 0)
		die_perror("KVM_SET_ONE_REG failed (r1)");

	/* r2 = physical address of the device tree blob */
	data	= kvm->arch.dtb_guest_start;
	reg.id	= ARM_CORE_REG(usr_regs.ARM_r2);
	if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, &reg) < 0)
		die_perror("KVM_SET_ONE_REG failed (r2)");

	/* pc = start of kernel image */
	data	= kvm->arch.kern_guest_start;
	reg.id	= ARM_CORE_REG(usr_regs.ARM_pc);
	if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, &reg) < 0)
		die_perror("KVM_SET_ONE_REG failed (pc)");
}
Ejemplo n.º 5
0
struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long cpu_id)
{
	struct kvm_cpu *vcpu;
	int mmap_size;
	struct kvm_enable_cap papr_cap = { .cap = KVM_CAP_PPC_PAPR };

	vcpu		= kvm_cpu__new(kvm);
	if (!vcpu)
		return NULL;

	vcpu->cpu_id	= cpu_id;

	vcpu->vcpu_fd = ioctl(vcpu->kvm->vm_fd, KVM_CREATE_VCPU, cpu_id);
	if (vcpu->vcpu_fd < 0)
		die_perror("KVM_CREATE_VCPU ioctl");

	mmap_size = ioctl(vcpu->kvm->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
	if (mmap_size < 0)
		die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");

	vcpu->kvm_run = mmap(NULL, mmap_size, PROT_RW, MAP_SHARED, vcpu->vcpu_fd, 0);
	if (vcpu->kvm_run == MAP_FAILED)
		die("unable to mmap vcpu fd");

	if (ioctl(vcpu->vcpu_fd, KVM_ENABLE_CAP, &papr_cap) < 0)
		die("unable to enable PAPR capability");

	/*
	 * We start all CPUs, directing non-primary threads into the kernel's
	 * secondary start point.  When we come to support SLOF, we will start
	 * only one and SLOF will RTAS call us to ask for others to be
	 * started.  (FIXME: make more generic & interface with whichever
	 * firmware a platform may be using.)
	 */
	vcpu->is_running = true;

	/* Register with IRQ controller (FIXME, assumes XICS) */
	xics_cpu_register(vcpu);

	return vcpu;
}
Ejemplo n.º 6
0
static int read_image(int fd, void **pos, void *limit)
{
	int count;

	while (((count = xread(fd, *pos, SZ_64K)) > 0) && *pos <= limit)
		*pos += count;

	if (pos < 0)
		die_perror("xread");

	return *pos < limit ? 0 : -ENOMEM;
}
Ejemplo n.º 7
0
void kvm__irq_trigger(struct kvm *kvm, int irq)
{
	struct kvm_irq_level irq_level;
	int ret;

	irq_level.irq = irq;
	irq_level.level = 1;

	ret = ioctl(kvm->vm_fd, KVM_IRQ_LINE, &irq_level);
	if (ret < 0)
		die_perror("KVM_IRQ_LINE ioctl");
}
Ejemplo n.º 8
0
void kvm__irq_line(struct kvm *kvm, int irq, int level)
{
	struct kvm_irq_level irq_level;
	int ret;

	irq_level.irq = irq;
	irq_level.level = level ? 1 : 0;

	ret = ioctl(kvm->vm_fd, KVM_IRQ_LINE, &irq_level);
	if (ret < 0)
		die_perror("KVM_IRQ_LINE ioctl");
}
Ejemplo n.º 9
0
int setup_signalfd(int sig, ...) {
  sigset_t signals;
  va_list ap;
  int signal_fd;

  if (-1 == sigemptyset(&signals)) die_perror("sigemptyset");
  va_start(ap, sig);

  while (0 != sig) {
    if (-1 == sigaddset(&signals, sig)) die_perror_args("sigaddset(%d)", sig);
    sig = va_arg(ap, int);
  }

  va_end(ap);

  if (-1 == sigprocmask(SIG_SETMASK, &signals, NULL)) die_perror("sigprocmask");

  signal_fd = signalfd(-1, &signals, SFD_NONBLOCK|SFD_CLOEXEC);
  if (-1 == signal_fd) die_perror("signalfd");

  return signal_fd;
}
Ejemplo n.º 10
0
void ds_code_gen_index_gen(const char *filename) {
    if (!(index_gen_file = fopen(filename, "w")))
        die_perror("fopen");

    write_generated_file_comment(index_gen_file, __FILE__);
    write_code(index_gen_file, "#include <limits.h>");
    uint32_t i = 0;
    for (; i < sizeof(ds_types) / sizeof(ds_types[0]); i++) {
        struct_fw_index(ds_types[i]);
        struct_index_key_rec_o2o(ds_types[i]);
        struct_index_key_rec_o2m(ds_types[i]);
        index_gen_fw_compar(ds_types[i]);
        index_gen_generate_mem_o2o(ds_types[i]);
        index_gen_generate_mem_o2m(ds_types[i]);
        _index_gen_generate_ds_file(ds_types[i]);
        index_gen_generate_ds_file_o2o(ds_types[i]);
        index_gen_generate_ds_file_o2m(ds_types[i]);
        index_gen_generate_ds_file_o2o_ht(ds_types[i]);
    }

    if (0 != fclose(index_gen_file))
        die_perror("fclose");
}
Ejemplo n.º 11
0
/* Write all the code fragments to a newly-chosen temporary file and
 * store the name of the file in code->path.
 */
static void write_code_file(struct code_state *code)
{
	/* mkstemp will fill this in with the actual unique path name. */
	char path_template[] = "/tmp/code_XXXXXX";
	int code_fd = mkstemp(path_template);
	if (code_fd < 0)
		die_perror("error making temp output file for code: mkstemp");

	assert(code->path == NULL);
	code->path = strdup(path_template);

	code->file = fdopen(code_fd, "w");
	if (code->file == NULL)
		die_perror("error opening temp output file for code: fdopen");

	write_preamble(code);
	write_all_fragments(code);

	if (fclose(code->file) != 0)
		die_perror("error closing temp output file for code: fclose");

	code->file = NULL;
}
Ejemplo n.º 12
0
/* To avoid issues with TIME_WAIT, FIN_WAIT1, and FIN_WAIT2 we use
 * dynamically-chosen, unique 4-tuples for each test. We implement the
 * picking of unique ports by binding a socket to port 0 and seeing
 * what port we are assigned. Note that we keep the socket fd open for
 * the lifetime of our process to ensure that the port is not
 * reused by a later test.
 */
static u16 ephemeral_port(void)
{
	int fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
	if (fd < 0)
		die_perror("socket");

	struct sockaddr_in addr;
	socklen_t addrlen = sizeof(addr);
	memset(&addr, 0, sizeof(addr));
	addr.sin_family = AF_INET;
	addr.sin_port = 0;		/* let the OS pick the port */
	if (bind(fd, (struct sockaddr *)&addr, addrlen) < 0)
		die_perror("bind");

	memset(&addr, 0, sizeof(addr));
	if (getsockname(fd, (struct sockaddr *)&addr, &addrlen) < 0)
		die_perror("getsockname");
	assert(addr.sin_family == AF_INET);

	if (listen(fd, 1) < 0)
		die_perror("listen");

	return ntohs(addr.sin_port);
}
Ejemplo n.º 13
0
static void dump_fdt(const char *dtb_file, void *fdt)
{
	int count, fd;

	fd = open(dtb_file, O_CREAT | O_TRUNC | O_RDWR, 0666);
	if (fd < 0)
		die("Failed to write dtb to %s", dtb_file);

	count = write(fd, fdt, FDT_MAX_SIZE);
	if (count < 0)
		die_perror("Failed to dump dtb");

	pr_info("Wrote %d bytes to dtb %s\n", count, dtb_file);
	close(fd);
}
Ejemplo n.º 14
0
/* Architecture-specific KVM init */
void kvm__arch_init(struct kvm *kvm, const char *hugetlbfs_path, u64 ram_size)
{
	int ret;

	kvm->ram_start = mmap_anon_or_hugetlbfs(kvm, hugetlbfs_path, ram_size);
	kvm->ram_size = ram_size;

	if (kvm->ram_start == MAP_FAILED)
		die("out of memory");

	madvise(kvm->ram_start, kvm->ram_size, MADV_MERGEABLE);

	ret = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP);
	if (ret < 0)
		die_perror("KVM_CREATE_IRQCHIP ioctl");
}
Ejemplo n.º 15
0
void kvm_cpu__enable_singlestep(struct kvm_cpu *vcpu)
{
	struct kvm_guest_debug debug = {
		.control	= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
	};

	if (ioctl(vcpu->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
		pr_warning("KVM_SET_GUEST_DEBUG failed");
}

static struct kvm_msrs *kvm_msrs__new(size_t nmsrs)
{
	struct kvm_msrs *vcpu = calloc(1, sizeof(*vcpu) + (sizeof(struct kvm_msr_entry) * nmsrs));

	if (!vcpu)
		die("out of memory");

	return vcpu;
}

#define KVM_MSR_ENTRY(_index, _data)	\
	(struct kvm_msr_entry) { .index = _index, .data = _data }

static void kvm_cpu__setup_msrs(struct kvm_cpu *vcpu)
{
	unsigned long ndx = 0;

	vcpu->msrs = kvm_msrs__new(100);

	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS,	0x0);
	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP,	0x0);
	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP,	0x0);
#ifdef CONFIG_X86_64
	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_STAR,			0x0);
	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_CSTAR,			0x0);
	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_KERNEL_GS_BASE,		0x0);
	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_SYSCALL_MASK,		0x0);
	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_LSTAR,			0x0);
#endif
	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_TSC,		0x0);

	vcpu->msrs->nmsrs	= ndx;

	if (ioctl(vcpu->vcpu_fd, KVM_SET_MSRS, vcpu->msrs) < 0)
		die_perror("KVM_SET_MSRS failed");
}
Ejemplo n.º 16
0
static bool kvm__arch_get_elf_64_info(Elf64_Ehdr *ehdr, int fd_kernel,
				      struct kvm__arch_elf_info *ei)
{
	int i;
	size_t nr;
	Elf64_Phdr phdr;

	if (ehdr->e_phentsize != sizeof(phdr)) {
		pr_info("Incompatible ELF PHENTSIZE %d", ehdr->e_phentsize);
		return false;
	}

	ei->entry_point = ehdr->e_entry;

	if (lseek(fd_kernel, ehdr->e_phoff, SEEK_SET) < 0)
		die_perror("lseek");

	phdr.p_type = PT_NULL;
	for (i = 0; i < ehdr->e_phnum; i++) {
		nr = read(fd_kernel, &phdr, sizeof(phdr));
		if (nr != sizeof(phdr)) {
			pr_info("Couldn't read %d bytes for ELF PHDR.", (int)sizeof(phdr));
			return false;
		}
		if (phdr.p_type == PT_LOAD)
			break;
	}
	if (phdr.p_type != PT_LOAD) {
		pr_info("No PT_LOAD Program Header found.");
		return false;
	}

	ei->load_addr = phdr.p_paddr;

	if ((ei->load_addr & 0xffffffffc0000000ull) == 0xffffffff80000000ull)
		ei->load_addr &= 0x1ffffffful; /* Convert KSEG{0,1} to physical. */
	if ((ei->load_addr & 0xc000000000000000ull) == 0x8000000000000000ull)
		ei->load_addr &= 0x07ffffffffffffffull; /* Convert XKPHYS to pysical */


	ei->len = phdr.p_filesz;
	ei->offset = phdr.p_offset;

	return true;
}
Ejemplo n.º 17
0
struct state *state_new(struct config *config,
                        struct script *script,
                        struct netdev *netdev)
{
	struct state *state = calloc(1, sizeof(struct state));

	if (pthread_mutex_init(&state->mutex, NULL) != 0)
		die_perror("pthread_mutex_init");

	run_lock(state);

	state->config = config;
	state->script = script;
	state->netdev = netdev;
	state->packets = packets_new();
	state->syscalls = syscalls_new(state);
	state->code = code_new(config);
	state->sockets = NULL;
	return state;
}
Ejemplo n.º 18
0
void kvm_cpu__enable_singlestep(struct kvm_cpu *vcpu)
{
	struct kvm_guest_debug debug = {
		.control	= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
	};

	if (ioctl(vcpu->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
		pr_warning("KVM_SET_GUEST_DEBUG failed");
}

void kvm_cpu__run(struct kvm_cpu *vcpu)
{
	int err;

	if (!vcpu->is_running)
		return;

	err = ioctl(vcpu->vcpu_fd, KVM_RUN, 0);
	if (err < 0 && (errno != EINTR && errno != EAGAIN))
		die_perror("KVM_RUN failed");
}
Ejemplo n.º 19
0
int load_flat_binary(struct kvm *kvm, int fd_kernel, int fd_initrd, const char *kernel_cmdline)
{
	void *p;
	void *k_start;
	int nr;

	if (lseek(fd_kernel, 0, SEEK_SET) < 0)
		die_perror("lseek");

	p = k_start = guest_flat_to_host(kvm, KERNEL_LOAD_ADDR);

	while ((nr = read(fd_kernel, p, 65536)) > 0)
		p += nr;

	kvm->arch.is64bit = true;
	kvm->arch.entry_point = 0xffffffff81000000ull;

	pr_info("Loaded kernel to 0x%x (%ld bytes)", KERNEL_LOAD_ADDR, (long int)(p - k_start));

	return true;
}
Ejemplo n.º 20
0
static void kvm_cpu__setup_regs(struct kvm_cpu *vcpu)
{
	/*
	 * FIXME: This assumes PPC64 and Linux guest.  It doesn't use the
	 * OpenFirmware entry method, but instead the "embedded" entry which
	 * passes the FDT address directly.
	 */
	struct kvm_regs *r = &vcpu->regs;

	if (vcpu->cpu_id == 0) {
		r->pc = KERNEL_START_ADDR;
		r->gpr[3] = vcpu->kvm->arch.fdt_gra;
		r->gpr[5] = 0;
	} else {
		r->pc = KERNEL_SECONDARY_START_ADDR;
		r->gpr[3] = vcpu->cpu_id;
	}
	r->msr = 0x8000000000001000UL; /* 64bit, non-HV, ME */

	if (ioctl(vcpu->vcpu_fd, KVM_SET_REGS, &vcpu->regs) < 0)
		die_perror("KVM_SET_REGS failed");
}
Ejemplo n.º 21
0
void state_free(struct state *state)
{
	/* We have to stop the system call thread first, since it's using
	 * sockets that we want to close and reset.
	 */
	syscalls_free(state, state->syscalls);

	/* Then we close the sockets and reset the connections, while
	 * we still have a netdev for injecting reset packets to free
	 * per-connection kernel state.
	 */
	close_all_sockets(state);

	netdev_free(state->netdev);
	packets_free(state->packets);
	code_free(state->code);

	run_unlock(state);
	if (pthread_mutex_destroy(&state->mutex) != 0)
		die_perror("pthread_mutex_destroy");

	memset(state, 0, sizeof(*state));  /* paranoia to help catch bugs */
	free(state);
}
Ejemplo n.º 22
0
bool kvm__arch_load_kernel_image(struct kvm *kvm, int fd_kernel, int fd_initrd,
				 const char *kernel_cmdline)
{
	void *pos, *kernel_end, *limit;
	unsigned long guest_addr;
	ssize_t file_size;

	/*
	 * Linux requires the initrd and dtb to be mapped inside lowmem,
	 * so we can't just place them at the top of memory.
	 */
	limit = kvm->ram_start + min(kvm->ram_size, (u64)SZ_256M) - 1;

	pos = kvm->ram_start + ARM_KERN_OFFSET(kvm);
	kvm->arch.kern_guest_start = host_to_guest_flat(kvm, pos);
	file_size = read_file(fd_kernel, pos, limit - pos);
	if (file_size < 0) {
		if (errno == ENOMEM)
			die("kernel image too big to contain in guest memory.");

		die_perror("kernel read");
	}
	kernel_end = pos + file_size;
	pr_info("Loaded kernel to 0x%llx (%zd bytes)",
		kvm->arch.kern_guest_start, file_size);

	/*
	 * Now load backwards from the end of memory so the kernel
	 * decompressor has plenty of space to work with. First up is
	 * the device tree blob...
	 */
	pos = limit;
	pos -= (FDT_MAX_SIZE + FDT_ALIGN);
	guest_addr = ALIGN(host_to_guest_flat(kvm, pos), FDT_ALIGN);
	pos = guest_flat_to_host(kvm, guest_addr);
	if (pos < kernel_end)
		die("fdt overlaps with kernel image.");

	kvm->arch.dtb_guest_start = guest_addr;
	pr_info("Placing fdt at 0x%llx - 0x%llx",
		kvm->arch.dtb_guest_start,
		host_to_guest_flat(kvm, limit));
	limit = pos;

	/* ... and finally the initrd, if we have one. */
	if (fd_initrd != -1) {
		struct stat sb;
		unsigned long initrd_start;

		if (fstat(fd_initrd, &sb))
			die_perror("fstat");

		pos -= (sb.st_size + INITRD_ALIGN);
		guest_addr = ALIGN(host_to_guest_flat(kvm, pos), INITRD_ALIGN);
		pos = guest_flat_to_host(kvm, guest_addr);
		if (pos < kernel_end)
			die("initrd overlaps with kernel image.");

		initrd_start = guest_addr;
		file_size = read_file(fd_initrd, pos, limit - pos);
		if (file_size == -1) {
			if (errno == ENOMEM)
				die("initrd too big to contain in guest memory.");

			die_perror("initrd read");
		}

		kvm->arch.initrd_guest_start = initrd_start;
		kvm->arch.initrd_size = file_size;
		pr_info("Loaded initrd to 0x%llx (%llu bytes)",
			kvm->arch.initrd_guest_start,
			kvm->arch.initrd_size);
	} else {
		kvm->arch.initrd_size = 0;
	}

	return true;
}
Ejemplo n.º 23
0
Archivo: mkwav.c Proyecto: blucz/tvon
int main() {
    output_file = fopen("output.wav", "wb");
    if (!output_file)
        die_perror("failed to open file");

    int samplerate    = SAMPLE_RATE;
    int channels      = 2;
    int bitspersample = 16;

    int datarate = samplerate * channels * (bitspersample / 8);
    int blockalign = channels * (bitspersample / 8);

    // write wav header
    output_write("RIFF", 4);
    long riff_len_pos = ftell(output_file);     // need to write 36+datalen to riff_len_pos
    write_le32(0);      
    output_write("WAVE", 4);
    output_write("fmt ", 4);
    write_le32(16);
    write_le16(1);
    write_le16(channels);
    write_le32(samplerate);
    write_le32(datarate);
    write_le16(blockalign);
    write_le16(bitspersample);
    output_write("data", 4);
    long wav_data_pos = ftell(output_file);     // need to write datalen to wav_data_pos
    write_le32(0);      

    int i;
    for (i = 0; i < 22050; i++) output_sample(0, 0);    // write 500ms of silence

    write_pulse(1, 4909, 1000000);         // command start
    write_pulse(0, 4320, 1000000);

    i = 0;
    unsigned code = 0xe0e040bf;
    for (i = 0; i < 32; i++) {
        int bit = (code >> (31 - i)) & 0x1;

        if (bit) {
            write_pulse(1, 818, 1000000);         // 1 bit
            write_pulse(0, 1425, 1000000);
        } else {
            write_pulse(1, 818, 1000000);         // 0 bit
            write_pulse(0, 325, 1000000);
        }
    }

    write_pulse(1, 717, 1000000);          // command stop
    write_pulse(0, 717, 1000000);

    for (i = 0; i < 22050; i++) output_sample(0, 0);    // write 500ms of silence

    fseek(output_file, riff_len_pos, SEEK_SET);
    write_le32(36 + datalen);

    fseek(output_file, wav_data_pos, SEEK_SET);
    write_le32(datalen);

    fclose(output_file);

    return 0;
}
Ejemplo n.º 24
0
int
main(int argc, char **argv)
{
	int		i, j;
	int		howfar = 0;
	int		open_flags;
	xfs_off_t	pos, end_pos;
	size_t		length;
	int		c, first_residue, tmp_residue;
	__uint64_t	size, sizeb;
	__uint64_t	numblocks = 0;
	int		wblocks = 0;
	int		num_threads = 0;
	struct dioattr	d;
	int		wbuf_size;
	int		wbuf_align;
	int		wbuf_miniosize;
	int		source_is_file = 0;
	int		buffered_output = 0;
	int		duplicate = 0;
	uint		btree_levels, current_level;
	ag_header_t	ag_hdr;
	xfs_mount_t	*mp;
	xfs_mount_t	mbuf;
	xfs_buf_t	*sbp;
	xfs_sb_t	*sb;
	xfs_agnumber_t	num_ags, agno;
	xfs_agblock_t	bno;
	xfs_daddr_t	begin, next_begin, ag_begin, new_begin, ag_end;
	struct xfs_btree_block *block;
	xfs_alloc_ptr_t	*ptr;
	xfs_alloc_rec_t	*rec_ptr;
	extern char	*optarg;
	extern int	optind;
	libxfs_init_t	xargs;
	thread_args	*tcarg;
	struct stat64	statbuf;

	progname = basename(argv[0]);

	setlocale(LC_ALL, "");
	bindtextdomain(PACKAGE, LOCALEDIR);
	textdomain(PACKAGE);

	while ((c = getopt(argc, argv, "bdL:V")) != EOF)  {
		switch (c) {
		case 'b':
			buffered_output = 1;
			break;
		case 'd':
			duplicate = 1;
			break;
		case 'L':
			logfile_name = optarg;
			break;
		case 'V':
			printf(_("%s version %s\n"), progname, VERSION);
			exit(0);
		case '?':
			usage();
		}
	}

	if (argc - optind < 2)
		usage();

	if (logfile_name)  {
		logfd = open(logfile_name, O_CREAT|O_WRONLY|O_EXCL, 0600);
	} else  {
		logfile_name = LOGFILE_NAME;
		logfd = mkstemp(logfile_name);
	}

	if (logfd < 0)  {
		fprintf(stderr, _("%s: couldn't open log file \"%s\"\n"),
			progname, logfile_name);
		perror(_("Aborting XFS copy - reason"));
		exit(1);
	}

	if ((logerr = fdopen(logfd, "w")) == NULL)  {
		fprintf(stderr, _("%s: couldn't set up logfile stream\n"),
			progname);
		perror(_("Aborting XFS copy - reason"));
		exit(1);
	}

	source_name = argv[optind];
	source_fd = -1;
	optind++;

	num_targets = argc - optind;
	if ((target = malloc(sizeof(target_control) * num_targets)) == NULL)  {
		do_log(_("Couldn't allocate target array\n"));
		die_perror();
	}
	for (i = 0; optind < argc; i++, optind++)  {
		target[i].name = argv[optind];
		target[i].fd = -1;
		target[i].position = -1;
		target[i].state = INACTIVE;
		target[i].error = 0;
		target[i].err_type = 0;
	}

	parent_pid = getpid();

	if (atexit(killall))  {
		do_log(_("%s: couldn't register atexit function.\n"), progname);
		die_perror();
	}

	/* open up source -- is it a file? */

	open_flags = O_RDONLY;

	if ((source_fd = open(source_name, open_flags)) < 0)  {
		do_log(_("%s:  couldn't open source \"%s\"\n"),
			progname, source_name);
		die_perror();
	}

	if (fstat64(source_fd, &statbuf) < 0)  {
		do_log(_("%s:  couldn't stat source \"%s\"\n"),
			progname, source_name);
		die_perror();
	}

	if (S_ISREG(statbuf.st_mode))
		source_is_file = 1;

	if (source_is_file && platform_test_xfs_fd(source_fd))  {
		if (fcntl(source_fd, F_SETFL, open_flags | O_DIRECT) < 0)  {
			do_log(_("%s: Cannot set direct I/O flag on \"%s\".\n"),
				progname, source_name);
			die_perror();
		}
		if (xfsctl(source_name, source_fd, XFS_IOC_DIOINFO, &d) < 0)  {
			do_log(_("%s: xfsctl on file \"%s\" failed.\n"),
				progname, source_name);
			die_perror();
		}

		wbuf_align = d.d_mem;
		wbuf_size = MIN(d.d_maxiosz, 1 * 1024 * 1024);
		wbuf_miniosize = d.d_miniosz;
	} else  {
		/* set arbitrary I/O params, miniosize at least 1 disk block */

		wbuf_align = getpagesize();
		wbuf_size = 1 * 1024 * 1024;
		wbuf_miniosize = -1;	/* set after mounting source fs */
	}

	if (!source_is_file)  {
		/*
		 * check to make sure a filesystem isn't mounted
		 * on the device
		 */
		if (platform_check_ismounted(source_name, NULL, &statbuf, 0))  {
			do_log(
	_("%s:  Warning -- a filesystem is mounted on the source device.\n"),
				progname);
			do_log(
	_("\t\tGenerated copies may be corrupt unless the source is\n"));
			do_log(
	_("\t\tunmounted or mounted read-only.  Copy proceeding...\n"));
		}
	}

	/* prepare the libxfs_init structure */

	memset(&xargs, 0, sizeof(xargs));
	xargs.isdirect = LIBXFS_DIRECT;
	xargs.isreadonly = LIBXFS_ISREADONLY;

	if (source_is_file)  {
		xargs.dname = source_name;
		xargs.disfile = 1;
	} else
		xargs.volname = source_name;

	if (!libxfs_init(&xargs))  {
		do_log(_("%s: couldn't initialize XFS library\n"
			"%s: Aborting.\n"), progname, progname);
		exit(1);
	}

	/* prepare the mount structure */

	sbp = libxfs_readbuf(xargs.ddev, XFS_SB_DADDR, 1, 0);
	memset(&mbuf, 0, sizeof(xfs_mount_t));
	sb = &mbuf.m_sb;
	libxfs_sb_from_disk(sb, XFS_BUF_TO_SBP(sbp));

	mp = libxfs_mount(&mbuf, sb, xargs.ddev, xargs.logdev, xargs.rtdev, 1);
	if (mp == NULL) {
		do_log(_("%s: %s filesystem failed to initialize\n"
			"%s: Aborting.\n"), progname, source_name, progname);
		exit(1);
	} else if (mp->m_sb.sb_inprogress)  {
		do_log(_("%s %s filesystem failed to initialize\n"
			"%s: Aborting.\n"), progname, source_name, progname);
		exit(1);
	} else if (mp->m_sb.sb_logstart == 0)  {
		do_log(_("%s: %s has an external log.\n%s: Aborting.\n"),
			progname, source_name, progname);
		exit(1);
	} else if (mp->m_sb.sb_rextents != 0)  {
		do_log(_("%s: %s has a real-time section.\n"
			"%s: Aborting.\n"), progname, source_name, progname);
		exit(1);
	}

	source_blocksize = mp->m_sb.sb_blocksize;
	source_sectorsize = mp->m_sb.sb_sectsize;

	if (wbuf_miniosize == -1)
		wbuf_miniosize = source_sectorsize;

	ASSERT(source_blocksize % source_sectorsize == 0);
	ASSERT(source_sectorsize % BBSIZE == 0);

	if (source_blocksize > source_sectorsize)  {
		/* get number of leftover sectors in last block of ag header */

		tmp_residue = ((XFS_AGFL_DADDR(mp) + 1) * source_sectorsize)
					% source_blocksize;
		first_residue = (tmp_residue == 0) ? 0 :
			source_blocksize - tmp_residue;
		ASSERT(first_residue % source_sectorsize == 0);
	} else if (source_blocksize == source_sectorsize)  {
		first_residue = 0;
	} else  {
		do_log(_("Error:  filesystem block size is smaller than the"
			" disk sectorsize.\nAborting XFS copy now.\n"));
		exit(1);
	}

	first_agbno = (((XFS_AGFL_DADDR(mp) + 1) * source_sectorsize)
				+ first_residue) / source_blocksize;
	ASSERT(first_agbno != 0);
	ASSERT( ((((XFS_AGFL_DADDR(mp) + 1) * source_sectorsize)
				+ first_residue) % source_blocksize) == 0);

	/* now open targets */

	open_flags = O_RDWR;

	for (i = 0; i < num_targets; i++)  {
		int	write_last_block = 0;

		if (stat64(target[i].name, &statbuf) < 0)  {
			/* ok, assume it's a file and create it */

			do_out(_("Creating file %s\n"), target[i].name);

			open_flags |= O_CREAT;
			if (!buffered_output)
				open_flags |= O_DIRECT;
			write_last_block = 1;
		} else if (S_ISREG(statbuf.st_mode))  {
			open_flags |= O_TRUNC;
			if (!buffered_output)
				open_flags |= O_DIRECT;
			write_last_block = 1;
		} else  {
			/*
			 * check to make sure a filesystem isn't mounted
			 * on the device
			 */
			if (platform_check_ismounted(target[i].name,
							NULL, &statbuf, 0))  {
				do_log(_("%s:  a filesystem is mounted "
					"on target device \"%s\".\n"
					"%s cannot copy to mounted filesystems."
					"  Aborting\n"),
					progname, target[i].name, progname);
				exit(1);
			}
		}

		target[i].fd = open(target[i].name, open_flags, 0644);
		if (target[i].fd < 0)  {
			do_log(_("%s:  couldn't open target \"%s\"\n"),
				progname, target[i].name);
			die_perror();
		}

		if (write_last_block)  {
			/* ensure regular files are correctly sized */

			if (ftruncate64(target[i].fd, mp->m_sb.sb_dblocks *
						source_blocksize))  {
				do_log(_("%s:  cannot grow data section.\n"),
					progname);
				die_perror();
			}
			if (platform_test_xfs_fd(target[i].fd))  {
				if (xfsctl(target[i].name, target[i].fd,
						XFS_IOC_DIOINFO, &d) < 0)  {
					do_log(
				_("%s:  xfsctl on \"%s\" failed.\n"),
						progname, target[i].name);
					die_perror();
				} else {
					wbuf_align = MAX(wbuf_align, d.d_mem);
					wbuf_size = MIN(d.d_maxiosz, wbuf_size);
					wbuf_miniosize = MAX(d.d_miniosz,
								wbuf_miniosize);
				}
			}
		} else  {
			char	*lb[XFS_MAX_SECTORSIZE] = { NULL };
			off64_t	off;

			/* ensure device files are sufficiently large */

			off = mp->m_sb.sb_dblocks * source_blocksize;
			off -= sizeof(lb);
			if (pwrite64(target[i].fd, lb, sizeof(lb), off) < 0)  {
				do_log(_("%s:  failed to write last block\n"),
					progname);
				do_log(_("\tIs target \"%s\" too small?\n"),
					target[i].name);
				die_perror();
			}
		}
	}

	/* initialize locks and bufs */

	if (pthread_mutex_init(&glob_masks.mutex, NULL) != 0)  {
		do_log(_("Couldn't initialize global thread mask\n"));
		die_perror();
	}
	glob_masks.num_working = 0;

	if (wbuf_init(&w_buf, wbuf_size, wbuf_align,
					wbuf_miniosize, 0) == NULL)  {
		do_log(_("Error initializing wbuf 0\n"));
		die_perror();
	}

	wblocks = wbuf_size / BBSIZE;

	if (wbuf_init(&btree_buf, MAX(source_blocksize, wbuf_miniosize),
				wbuf_align, wbuf_miniosize, 1) == NULL)  {
		do_log(_("Error initializing btree buf 1\n"));
		die_perror();
	}

	if (pthread_mutex_init(&mainwait,NULL) != 0)  {
		do_log(_("Error creating first semaphore.\n"));
		die_perror();
		exit(1);
	}
	/* need to start out blocking */
	pthread_mutex_lock(&mainwait);

	/* set up sigchild signal handler */

	signal(SIGCHLD, handler);
	signal_maskfunc(SIGCHLD, SIG_BLOCK);

	/* make children */

	if ((targ = malloc(num_targets * sizeof(thread_args))) == NULL)  {
		do_log(_("Couldn't malloc space for thread args\n"));
		die_perror();
		exit(1);
	}

	for (i = 0, tcarg = targ; i < num_targets; i++, tcarg++)  {
		if (!duplicate)
			platform_uuid_generate(&tcarg->uuid);
		else
			platform_uuid_copy(&tcarg->uuid, &mp->m_sb.sb_uuid);

		if (pthread_mutex_init(&tcarg->wait, NULL) != 0)  {
			do_log(_("Error creating thread mutex %d\n"), i);
			die_perror();
			exit(1);
		}
		/* need to start out blocking */
		pthread_mutex_lock(&tcarg->wait);
	}

	for (i = 0, tcarg = targ; i < num_targets; i++, tcarg++)  {
		tcarg->id = i;
		tcarg->fd = target[i].fd;

		target[i].state = ACTIVE;
		num_threads++;

		if (pthread_create(&target[i].pid, NULL,
					begin_reader, (void *)tcarg))  {
			do_log(_("Error creating thread for target %d\n"), i);
			die_perror();
		}
	}

	ASSERT(num_targets == num_threads);

	/* set up statistics */

	num_ags = mp->m_sb.sb_agcount;

	init_bar(mp->m_sb.sb_blocksize / BBSIZE
			* ((__uint64_t)mp->m_sb.sb_dblocks
			    - (__uint64_t)mp->m_sb.sb_fdblocks + 10 * num_ags));

	kids = num_targets;
	block = (struct xfs_btree_block *) btree_buf.data;

	for (agno = 0; agno < num_ags && kids > 0; agno++)  {
		/* read in first blocks of the ag */

		read_ag_header(source_fd, agno, &w_buf, &ag_hdr, mp,
			source_blocksize, source_sectorsize);

		/* set the in_progress bit for the first AG */

		if (agno == 0)
			ag_hdr.xfs_sb->sb_inprogress = 1;

		/* save what we need (agf) in the btree buffer */

		memmove(btree_buf.data, ag_hdr.xfs_agf, source_sectorsize);
		ag_hdr.xfs_agf = (xfs_agf_t *) btree_buf.data;
		btree_buf.length = source_blocksize;

		/* write the ag header out */

		write_wbuf();

		/* traverse btree until we get to the leftmost leaf node */

		bno = be32_to_cpu(ag_hdr.xfs_agf->agf_roots[XFS_BTNUM_BNOi]);
		current_level = 0;
		btree_levels = be32_to_cpu(ag_hdr.xfs_agf->
						agf_levels[XFS_BTNUM_BNOi]);

		ag_end = XFS_AGB_TO_DADDR(mp, agno,
				be32_to_cpu(ag_hdr.xfs_agf->agf_length) - 1)
				+ source_blocksize / BBSIZE;

		for (;;) {
			/* none of this touches the w_buf buffer */

			ASSERT(current_level < btree_levels);

			current_level++;

			btree_buf.position = pos = (xfs_off_t)
				XFS_AGB_TO_DADDR(mp,agno,bno) << BBSHIFT;
			btree_buf.length = source_blocksize;

			read_wbuf(source_fd, &btree_buf, mp);
			block = (struct xfs_btree_block *)
				 ((char *)btree_buf.data +
				  pos - btree_buf.position);

			ASSERT(be32_to_cpu(block->bb_magic) == XFS_ABTB_MAGIC);

			if (be16_to_cpu(block->bb_level) == 0)
				break;

			ptr = XFS_ALLOC_PTR_ADDR(mp, block, 1,
							mp->m_alloc_mxr[1]);
			bno = be32_to_cpu(ptr[0]);
		}

		/* align first data copy but don't overwrite ag header */

		pos = w_buf.position >> BBSHIFT;
		length = w_buf.length >> BBSHIFT;
		next_begin = pos + length;
		ag_begin = next_begin;

		ASSERT(w_buf.position % source_sectorsize == 0);

		/* handle the rest of the ag */

		for (;;) {
			if (be16_to_cpu(block->bb_level) != 0)  {
				do_log(
			_("WARNING:  source filesystem inconsistent.\n"));
				do_log(
			_("  A leaf btree rec isn't a leaf.  Aborting now.\n"));
				exit(1);
			}

			rec_ptr = XFS_ALLOC_REC_ADDR(mp, block, 1);
			for (i = 0; i < be16_to_cpu(block->bb_numrecs);
							i++, rec_ptr++)  {
				/* calculate in daddr's */

				begin = next_begin;

				/*
				 * protect against pathological case of a
				 * hole right after the ag header in a
				 * mis-aligned case
				 */

				if (begin < ag_begin)
					begin = ag_begin;

				/*
				 * round size up to ensure we copy a
				 * range bigger than required
				 */

				sizeb = XFS_AGB_TO_DADDR(mp, agno, 
					be32_to_cpu(rec_ptr->ar_startblock)) - 
						begin;
				size = roundup(sizeb <<BBSHIFT, wbuf_miniosize);
				if (size > 0)  {
					/* copy extent */

					w_buf.position = (xfs_off_t)
						begin << BBSHIFT;

					while (size > 0)  {
						/*
						 * let lower layer do alignment
						 */
						if (size > w_buf.size)  {
							w_buf.length = w_buf.size;
							size -= w_buf.size;
							sizeb -= wblocks;
							numblocks += wblocks;
						} else  {
							w_buf.length = size;
							numblocks += sizeb;
							size = 0;
						}

						read_wbuf(source_fd, &w_buf, mp);
						write_wbuf();

						w_buf.position += w_buf.length;

						howfar = bump_bar(
							howfar, numblocks);
					}
				}

				/* round next starting point down */

				new_begin = XFS_AGB_TO_DADDR(mp, agno,
						be32_to_cpu(rec_ptr->ar_startblock) +
					 	be32_to_cpu(rec_ptr->ar_blockcount));
				next_begin = rounddown(new_begin,
						w_buf.min_io_size >> BBSHIFT);
			}

			if (be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK)
				break;

			/* read in next btree record block */

			btree_buf.position = pos = (xfs_off_t)
				XFS_AGB_TO_DADDR(mp, agno, be32_to_cpu(
						block->bb_u.s.bb_rightsib)) << BBSHIFT;
			btree_buf.length = source_blocksize;

			/* let read_wbuf handle alignment */

			read_wbuf(source_fd, &btree_buf, mp);

			block = (struct xfs_btree_block *)
				 ((char *) btree_buf.data +
				  pos - btree_buf.position);

			ASSERT(be32_to_cpu(block->bb_magic) == XFS_ABTB_MAGIC);
		}

		/*
		 * write out range of used blocks after last range
		 * of free blocks in AG
		 */
		if (next_begin < ag_end)  {
			begin = next_begin;

			sizeb = ag_end - begin;
			size = roundup(sizeb << BBSHIFT, wbuf_miniosize);

			if (size > 0)  {
				/* copy extent */

				w_buf.position = (xfs_off_t) begin << BBSHIFT;

				while (size > 0)  {
					/*
					 * let lower layer do alignment
					 */
					if (size > w_buf.size)  {
						w_buf.length = w_buf.size;
						size -= w_buf.size;
						sizeb -= wblocks;
						numblocks += wblocks;
					} else  {
						w_buf.length = size;
						numblocks += sizeb;
						size = 0;
					}

					read_wbuf(source_fd, &w_buf, mp);
					write_wbuf();

					w_buf.position += w_buf.length;

					howfar = bump_bar(howfar, numblocks);
				}
			}
		}
	}

	if (kids > 0)  {
		if (!duplicate)  {

			/* write a clean log using the specified UUID */
			for (j = 0, tcarg = targ; j < num_targets; j++)  {
				w_buf.owner = tcarg;
				w_buf.length = rounddown(w_buf.size,
							 w_buf.min_io_size);
				pos = write_log_header(
							source_fd, &w_buf, mp);
				end_pos = write_log_trailer(
							source_fd, &w_buf, mp);
				w_buf.position = pos;
				memset(w_buf.data, 0, w_buf.length);

				while (w_buf.position < end_pos)  {
					do_write(tcarg);
					w_buf.position += w_buf.length;
				}
				tcarg++;
			}
		} else {
			num_ags = 1;
		}

		/* reread and rewrite superblocks (UUID and in-progress) */
		/* [backwards, so inprogress bit only updated when done] */

		for (i = num_ags - 1; i >= 0; i--)  {
			read_ag_header(source_fd, i, &w_buf, &ag_hdr, mp,
				source_blocksize, source_sectorsize);
			if (i == 0)
				ag_hdr.xfs_sb->sb_inprogress = 0;

			/* do each thread in turn, each has its own UUID */

			for (j = 0, tcarg = targ; j < num_targets; j++)  {
				platform_uuid_copy(&ag_hdr.xfs_sb->sb_uuid,
							&tcarg->uuid);
				do_write(tcarg);
				tcarg++;
			}
		}

		bump_bar(100, 0);
	}

	check_errors();
	killall();
	pthread_exit(NULL);
	/*NOTREACHED*/
	return 0;
}
Ejemplo n.º 25
0
void
handler(int sig)
{
	pid_t	pid = getpid();
	int	status, i;

	pid = wait(&status);

	kids--;

	for (i = 0; i < num_targets; i++)  {
		if (target[i].pid == pid)  {
			if (target[i].state == INACTIVE)  {
				/* thread got an I/O error */

				if (target[i].err_type == 0)  {
					do_warn(
		_("%s:  write error on target %d \"%s\" at offset %lld\n"),
						progname, i, target[i].name,
						target[i].position);
				} else  {
					do_warn(
		_("%s:  lseek64 error on target %d \"%s\" at offset %lld\n"),
						progname, i, target[i].name,
						target[i].position);
				}

				do_vfatal(target[i].error,
					_("Aborting target %d - reason"), i);

				if (kids == 0)  {
					do_log(
				_("Aborting XFS copy - no more targets.\n"));
					check_errors();
					pthread_exit(NULL);
				}

				signal(SIGCHLD, handler);
				return;
			} else  {
				/* it just croaked it bigtime, log it */

				do_warn(
	_("%s:  thread %d died unexpectedly, target \"%s\" incomplete\n"),
					progname, i, target[i].name);
				do_warn(_("%s:  offset was probably %lld\n"),
					progname, target[i].position);
				do_fatal(target[i].error,
					_("Aborting XFS copy - reason"));
				pthread_exit(NULL);
			}
		}
	}

	/* unknown child -- something very wrong */

	do_warn(_("%s: Unknown child died (should never happen!)\n"), progname);
	die_perror();
	pthread_exit(NULL);
	signal(SIGCHLD, handler);
}
Ejemplo n.º 26
0
/* Delete the temporary file at code->path. */
static void delete_code_file(struct code_state *code)
{
	if ((code->path != NULL) && (unlink(code->path) != 0))
		die_perror("error deleting code file: unlink:");
}
Ejemplo n.º 27
0
int load_elf_binary(struct kvm *kvm, int fd_kernel, int fd_initrd, const char *kernel_cmdline)
{
	union {
		Elf64_Ehdr ehdr;
		Elf32_Ehdr ehdr32;
	} eh;

	size_t nr;
	char *p;
	struct kvm__arch_elf_info ei;

	if (lseek(fd_kernel, 0, SEEK_SET) < 0)
		die_perror("lseek");

	nr = read(fd_kernel, &eh, sizeof(eh));
	if (nr != sizeof(eh)) {
		pr_info("Couldn't read %d bytes for ELF header.", (int)sizeof(eh));
		return false;
	}

	if (eh.ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
	    eh.ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
	    eh.ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
	    eh.ehdr.e_ident[EI_MAG3] != ELFMAG3 ||
	    (eh.ehdr.e_ident[EI_CLASS] != ELFCLASS64 && eh.ehdr.e_ident[EI_CLASS] != ELFCLASS32) ||
	    eh.ehdr.e_ident[EI_VERSION] != EV_CURRENT) {
		pr_info("Incompatible ELF header.");
		return false;
	}
	if (eh.ehdr.e_type != ET_EXEC || eh.ehdr.e_machine != EM_MIPS) {
		pr_info("Incompatible ELF not MIPS EXEC.");
		return false;
	}

	if (eh.ehdr.e_ident[EI_CLASS] == ELFCLASS64) {
		if (!kvm__arch_get_elf_64_info(&eh.ehdr, fd_kernel, &ei))
			return false;
		kvm->arch.is64bit = true;
	} else {
		if (!kvm__arch_get_elf_32_info(&eh.ehdr32, fd_kernel, &ei))
			return false;
		kvm->arch.is64bit = false;
	}

	kvm->arch.entry_point = ei.entry_point;

	if (lseek(fd_kernel, ei.offset, SEEK_SET) < 0)
		die_perror("lseek");

	p = guest_flat_to_host(kvm, ei.load_addr);

	pr_info("ELF Loading 0x%lx bytes from 0x%llx to 0x%llx",
		(unsigned long)ei.len, (unsigned long long)ei.offset, (unsigned long long)ei.load_addr);
	do {
		nr = read(fd_kernel, p, ei.len);
		if (nr < 0)
			die_perror("read");
		p += nr;
		ei.len -= nr;
	} while (ei.len);

	kvm__mips_install_cmdline(kvm);

	return true;
}
Ejemplo n.º 28
0
struct kvm_cpu *kvm_cpu__arch_init(struct kvm *kvm, unsigned long cpu_id)
{
	struct kvm_arm_target *target;
	struct kvm_cpu *vcpu;
	int coalesced_offset, mmap_size, err = -1;
	unsigned int i;
	struct kvm_vcpu_init preferred_init;
	struct kvm_vcpu_init vcpu_init = {
		.features = ARM_VCPU_FEATURE_FLAGS(kvm, cpu_id)
	};

	vcpu = calloc(1, sizeof(struct kvm_cpu));
	if (!vcpu)
		return NULL;

	vcpu->vcpu_fd = ioctl(kvm->vm_fd, KVM_CREATE_VCPU, cpu_id);
	if (vcpu->vcpu_fd < 0)
		die_perror("KVM_CREATE_VCPU ioctl");

	mmap_size = ioctl(kvm->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
	if (mmap_size < 0)
		die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");

	vcpu->kvm_run = mmap(NULL, mmap_size, PROT_RW, MAP_SHARED,
			     vcpu->vcpu_fd, 0);
	if (vcpu->kvm_run == MAP_FAILED)
		die("unable to mmap vcpu fd");

	/* Set KVM_ARM_VCPU_PSCI_0_2 if available */
	if (kvm__supports_extension(kvm, KVM_CAP_ARM_PSCI_0_2)) {
		vcpu_init.features[0] |= (1UL << KVM_ARM_VCPU_PSCI_0_2);
	}

	/*
	 * If the preferred target ioctl is successful then
	 * use preferred target else try each and every target type
	 */
	err = ioctl(kvm->vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init);
	if (!err) {
		/* Match preferred target CPU type. */
		target = NULL;
		for (i = 0; i < ARRAY_SIZE(kvm_arm_targets); ++i) {
			if (!kvm_arm_targets[i])
				continue;
			if (kvm_arm_targets[i]->id == preferred_init.target) {
				target = kvm_arm_targets[i];
				break;
			}
		}
		if (!target) {
			target = kvm_arm_generic_target;
			vcpu_init.target = preferred_init.target;
		} else {
			vcpu_init.target = target->id;
		}
		err = ioctl(vcpu->vcpu_fd, KVM_ARM_VCPU_INIT, &vcpu_init);
	} else {
		/* Find an appropriate target CPU type. */
		for (i = 0; i < ARRAY_SIZE(kvm_arm_targets); ++i) {
			if (!kvm_arm_targets[i])
				continue;
			target = kvm_arm_targets[i];
			vcpu_init.target = target->id;
			err = ioctl(vcpu->vcpu_fd, KVM_ARM_VCPU_INIT, &vcpu_init);
			if (!err)
				break;
		}
		if (err)
			die("Unable to find matching target");
	}

	if (err || target->init(vcpu))
		die("Unable to initialise vcpu");

	coalesced_offset = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION,
				 KVM_CAP_COALESCED_MMIO);
	if (coalesced_offset)
		vcpu->ring = (void *)vcpu->kvm_run +
			     (coalesced_offset * PAGE_SIZE);

	/* Populate the vcpu structure. */
	vcpu->kvm		= kvm;
	vcpu->cpu_id		= cpu_id;
	vcpu->cpu_type		= vcpu_init.target;
	vcpu->cpu_compatible	= target->compatible;
	vcpu->is_running	= true;

	return vcpu;
}

void kvm_cpu__arch_nmi(struct kvm_cpu *cpu)
{
}

void kvm_cpu__delete(struct kvm_cpu *vcpu)
{
	free(vcpu);
}
Ejemplo n.º 29
0
/* To ensure timing that's as consistent as possible, pull all our
 * pages to RAM and pin them there.
 */
void lock_memory(void)
{
	if (mlockall(MCL_CURRENT | MCL_FUTURE))
		die_perror("lockall(MCL_CURRENT | MCL_FUTURE)");
}
Ejemplo n.º 30
0
static void packet_socket_setup(struct packet_socket *psock)
{
	int bpf_fd = -1, val = -1;

	DEBUGP("calling pcap_create() with %s\n", psock->name);
	psock->pcap_in = pcap_create(psock->name, psock->pcap_error);
	if (psock->pcap_in == NULL)
		die_pcap_perror(psock->pcap_in, "pcap_create");
	psock->pcap_out = pcap_create(psock->name, psock->pcap_error);
	if (psock->pcap_out == NULL)
		die_pcap_perror(psock->pcap_out, "pcap_create");

	if (pcap_set_snaplen(psock->pcap_in, PACKET_READ_BYTES) != 0)
		die_pcap_perror(psock->pcap_in, "pcap_set_snaplen");
	if (pcap_set_snaplen(psock->pcap_out, PACKET_READ_BYTES) != 0)
		die_pcap_perror(psock->pcap_out, "pcap_set_snaplen");

	if (pcap_activate(psock->pcap_in) != 0)
		die_pcap_perror(psock->pcap_in,
				"pcap_activate "
				"(OpenBSD: another process (tcpdump?) "
				"using bpf0?)");
	if (pcap_activate(psock->pcap_out) != 0)
		die_pcap_perror(psock->pcap_out,
				"pcap_activate "
				"(OpenBSD: another process (tcpdump?) "
				"using bpf0?)");

	if (pcap_setdirection(psock->pcap_in, PCAP_D_IN) != 0)
		die_pcap_perror(psock->pcap_in, "pcap_setdirection");
	if (pcap_setdirection(psock->pcap_out, PCAP_D_OUT) != 0)
		die_pcap_perror(psock->pcap_out, "pcap_setdirection");

	bpf_fd = pcap_get_selectable_fd(psock->pcap_in);
	if (bpf_fd < 0)
		die_pcap_perror(psock->pcap_in, "pcap_get_selectable_fd");

	/* By default libpcap with BPF waits until a read buffer fills
	 * up before returning any packets. We use BIOCIMMEDIATE to
	 * force the BPF device to return the first packet
	 * immediately.
	 */
	val = 1;
	if (ioctl(bpf_fd, BIOCIMMEDIATE, &val) < 0)
		die_perror("ioctl BIOCIMMEDIATE on bpf fd");
	bpf_fd = pcap_get_selectable_fd(psock->pcap_out);
	if (bpf_fd < 0)
		die_pcap_perror(psock->pcap_out, "pcap_get_selectable_fd");

	/* By default libpcap with BPF waits until a read buffer fills
	 * up before returning any packets. We use BIOCIMMEDIATE to
	 * force the BPF device to return the first packet
	 * immediately.
	 */
	val = 1;
	if (ioctl(bpf_fd, BIOCIMMEDIATE, &val) < 0)
		die_perror("ioctl BIOCIMMEDIATE on bpf fd");

	/* Find data link type. */
	psock->data_link = pcap_datalink(psock->pcap_in);
	DEBUGP("data_link: %d\n", psock->data_link);

	/* Based on the data_link type, calculate the offset of the
	 * packet data in the buffer.
	 */
	switch (psock->data_link) {
	case DLT_EN10MB:
		psock->pcap_offset = sizeof(struct ether_header);
		break;
	case DLT_LOOP:
	case DLT_NULL:
		psock->pcap_offset = sizeof(u32);
		break;
	default:
		die("Unknown data_link type %d\n", psock->data_link);
		break;
	}
}