char *libxl__devid_to_localdev(libxl__gc *gc, int devid) { unsigned int minor; int offset; int nr_parts; char *ptr = NULL; char *ret = libxl__zalloc(gc, BUFFER_SIZE); if (!VDEV_IS_EXTENDED(devid)) { minor = devid & 0xff; nr_parts = 16; } else { minor = BLKIF_MINOR_EXT(devid); nr_parts = 256; } offset = minor / nr_parts; strcpy(ret, "xvd"); ptr = encode_disk_name(ret + 3, offset); if (minor % nr_parts == 0) *ptr = 0; else /* overflow cannot happen, thanks to the upper bound */ snprintf(ptr, ret + 32 - ptr, "%d", minor & (nr_parts - 1)); return ret; }
static int libxl__allocate_acpi_tables(libxl__gc *gc, libxl_domain_build_info *info, struct xc_dom_image *dom, struct acpitable acpitables[]) { int rc; size_t size; acpitables[RSDP].addr = GUEST_ACPI_BASE; acpitables[RSDP].size = sizeof(struct acpi_table_rsdp); dom->acpi_modules[0].length += ROUNDUP(acpitables[RSDP].size, 3); acpitables[XSDT].addr = GUEST_ACPI_BASE + dom->acpi_modules[0].length; /* * Currently only 3 tables(GTDT, FADT, MADT) are pointed by XSDT. Alloc * entries for them. */ acpitables[XSDT].size = sizeof(struct acpi_table_xsdt) + sizeof(uint64_t) * 2; dom->acpi_modules[0].length += ROUNDUP(acpitables[XSDT].size, 3); acpitables[GTDT].addr = GUEST_ACPI_BASE + dom->acpi_modules[0].length; acpitables[GTDT].size = sizeof(struct acpi_table_gtdt); dom->acpi_modules[0].length += ROUNDUP(acpitables[GTDT].size, 3); acpitables[MADT].addr = GUEST_ACPI_BASE + dom->acpi_modules[0].length; rc = libxl__estimate_madt_size(gc, info, &size); if (rc < 0) goto out; acpitables[MADT].size = size; dom->acpi_modules[0].length += ROUNDUP(acpitables[MADT].size, 3); acpitables[FADT].addr = GUEST_ACPI_BASE + dom->acpi_modules[0].length; acpitables[FADT].size = sizeof(struct acpi_table_fadt); dom->acpi_modules[0].length += ROUNDUP(acpitables[FADT].size, 3); acpitables[DSDT].addr = GUEST_ACPI_BASE + dom->acpi_modules[0].length; acpitables[DSDT].size = dsdt_anycpu_arm_len; dom->acpi_modules[0].length += ROUNDUP(acpitables[DSDT].size, 3); assert(dom->acpi_modules[0].length <= GUEST_ACPI_SIZE); dom->acpi_modules[0].data = libxl__zalloc(gc, dom->acpi_modules[0].length); rc = 0; out: return rc; }
char *libxl__sprintf(libxl__gc *gc, const char *fmt, ...) { char *s; va_list ap; int ret; va_start(ap, fmt); ret = vsnprintf(NULL, 0, fmt, ap); va_end(ap); assert(ret >= 0); s = libxl__zalloc(gc, ret + 1); va_start(ap, fmt); ret = vsnprintf(s, ret + 1, fmt, ap); va_end(ap); return s; }
char *libxl__vsprintf(libxl__gc *gc, const char *fmt, va_list ap) { char *s; va_list aq; int ret; va_copy(aq, ap); ret = vsnprintf(NULL, 0, fmt, aq); va_end(aq); assert(ret >= 0); s = libxl__zalloc(gc, ret + 1); va_copy(aq, ap); ret = vsnprintf(s, ret + 1, fmt, aq); va_end(aq); return s; }
static void setup_read_record(libxl__egc *egc, libxl__stream_read_state *stream) { libxl__sr_record_buf *rec = NULL; STATE_AO_GC(stream->ao); int rc; assert(stream->incoming_record == NULL); stream->incoming_record = rec = libxl__zalloc(NOGC, sizeof(*rec)); rc = setup_read(stream, "record header", &rec->hdr, sizeof(rec->hdr), record_header_done); if (rc) goto err; return; err: assert(rc); stream_complete(egc, stream, rc); }
char *libxl__sprintf(libxl__gc *gc, const char *fmt, ...) { char *s; va_list ap; int ret; va_start(ap, fmt); ret = vsnprintf(NULL, 0, fmt, ap); va_end(ap); if (ret < 0) { return NULL; } s = libxl__zalloc(gc, ret + 1); if (s) { va_start(ap, fmt); ret = vsnprintf(s, ret + 1, fmt, ap); va_end(ap); } return s; }
void libxl__datacopier_prefixdata(libxl__egc *egc, libxl__datacopier_state *dc, const void *data, size_t len) { EGC_GC; libxl__datacopier_buf *buf; /* * It is safe for this to be called immediately after _start, as * is documented in the public comment. _start's caller must have * the ctx locked, so other threads don't get to mess with the * contents, and the fd events cannot happen reentrantly. So we * are guaranteed to beat the first data from the read fd. */ assert(len < dc->maxsz - dc->used); buf = libxl__zalloc(NOGC, sizeof(*buf)); buf->used = len; memcpy(buf->buf, data, len); dc->used += len; LIBXL_TAILQ_INSERT_TAIL(&dc->bufs, buf, entry); }
/* Portability note: this lock utilises flock(2) so a proper implementation of * flock(2) is required. */ libxl__domain_userdata_lock *libxl__lock_domain_userdata(libxl__gc *gc, uint32_t domid) { libxl__domain_userdata_lock *lock = NULL; const char *lockfile; int fd; struct stat stab, fstab; lockfile = libxl__userdata_path(gc, domid, "domain-userdata-lock", "l"); if (!lockfile) goto out; lock = libxl__zalloc(NOGC, sizeof(libxl__domain_userdata_lock)); lock->path = libxl__strdup(NOGC, lockfile); while (true) { libxl__carefd_begin(); fd = open(lockfile, O_RDWR|O_CREAT, 0666); if (fd < 0) LOGE(ERROR, "cannot open lockfile %s, errno=%d", lockfile, errno); lock->lock_carefd = libxl__carefd_opened(CTX, fd); if (fd < 0) goto out; /* Lock the file in exclusive mode, wait indefinitely to * acquire the lock */ while (flock(fd, LOCK_EX)) { switch (errno) { case EINTR: /* Signal received, retry */ continue; default: /* All other errno: EBADF, EINVAL, ENOLCK, EWOULDBLOCK */ LOGE(ERROR, "unexpected error while trying to lock %s, fd=%d, errno=%d", lockfile, fd, errno); goto out; } } if (fstat(fd, &fstab)) { LOGE(ERROR, "cannot fstat %s, fd=%d, errno=%d", lockfile, fd, errno); goto out; } if (stat(lockfile, &stab)) { if (errno != ENOENT) { LOGE(ERROR, "cannot stat %s, errno=%d", lockfile, errno); goto out; } } else { if (stab.st_dev == fstab.st_dev && stab.st_ino == fstab.st_ino) break; } libxl__carefd_close(lock->lock_carefd); } /* Check the domain is still there, if not we should release the * lock and clean up. */ if (libxl_domain_info(CTX, NULL, domid)) goto out; return lock; out: if (lock) libxl__unlock_domain_userdata(lock); return NULL; }