static void hostmem_client_set_memory(CPUPhysMemoryClient *client, target_phys_addr_t start_addr, ram_addr_t size, ram_addr_t phys_offset) { HostMem *hostmem = container_of(client, HostMem, client); ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; size_t s = offsetof(struct vhost_memory, regions) + (hostmem->mem->nregions + 1) * sizeof hostmem->mem->regions[0]; /* TODO: this is a hack. * At least one vga card (cirrus) changes the gpa to hva * memory maps on data path, which slows us down. * Since we should never need to DMA into VGA memory * anyway, lets just skip these regions. */ if (ranges_overlap(start_addr, size, 0xa0000, 0x10000)) { return; } qemu_mutex_lock(&hostmem->mem_lock); hostmem->mem = qemu_realloc(hostmem->mem, s); assert(size); vhost_mem_unassign_memory(hostmem->mem, start_addr, size); if (flags == IO_MEM_RAM) { /* Add given mapping, merging adjacent regions if any */ vhost_mem_assign_memory(hostmem->mem, start_addr, size, (uintptr_t)qemu_get_ram_ptr(phys_offset)); } qemu_mutex_unlock(&hostmem->mem_lock); }
static void goldfish_audio_buff_ensure( struct goldfish_audio_buff* b, uint32_t size ) { if (b->capacity < size) { b->data = qemu_realloc(b->data, size); b->capacity = size; } }
static void transfer_fifo2fifo(struct soc_dma_ch_s *ch) { if (ch->bytes > fifo_size) fifo_buf = qemu_realloc(fifo_buf, fifo_size = ch->bytes); /* Implement as transfer_fifo2linear + transfer_linear2fifo. */ ch->io_fn[0](ch->io_opaque[0], fifo_buf, ch->bytes); ch->io_fn[1](ch->io_opaque[1], fifo_buf, ch->bytes); }
void soc_dma_port_add_mem(struct soc_dma_s *soc, uint8_t *phys_base, target_phys_addr_t virt_base, size_t size) { struct memmap_entry_s *entry; struct dma_s *dma = (struct dma_s *) soc; dma->memmap = qemu_realloc(dma->memmap, sizeof(*entry) * (dma->memmap_size + 1)); entry = soc_dma_lookup(dma, virt_base); if (dma->memmap_size) { if (entry->type == soc_dma_port_mem) { if ((entry->addr >= virt_base && entry->addr < virt_base + size) || (entry->addr <= virt_base && entry->addr + entry->u.mem.size > virt_base)) { fprintf(stderr, "%s: RAM at " TARGET_FMT_lx "-" TARGET_FMT_lx " collides with RAM region at " TARGET_FMT_lx "-" TARGET_FMT_lx "\n", __FUNCTION__, (target_ulong) virt_base, (target_ulong) (virt_base + size), (target_ulong) entry->addr, (target_ulong) (entry->addr + entry->u.mem.size)); exit(-1); } if (entry->addr <= virt_base) entry ++; } else { if (entry->addr >= virt_base && entry->addr < virt_base + size) { fprintf(stderr, "%s: RAM at " TARGET_FMT_lx "-" TARGET_FMT_lx " collides with FIFO at " TARGET_FMT_lx "\n", __FUNCTION__, (target_ulong) virt_base, (target_ulong) (virt_base + size), (target_ulong) entry->addr); exit(-1); } while (entry < dma->memmap + dma->memmap_size && entry->addr <= virt_base) entry ++; } memmove(entry + 1, entry, (uint8_t *) (dma->memmap + dma->memmap_size ++) - (uint8_t *) entry); } else dma->memmap_size ++; entry->addr = virt_base; entry->type = soc_dma_port_mem; entry->u.mem.base = phys_base; entry->u.mem.size = size; }
static void buffer_append(struct XenConsole *con) { struct buffer *buffer = &con->buffer; XENCONS_RING_IDX cons, prod, size; struct xencons_interface *intf = con->sring; cons = intf->out_cons; prod = intf->out_prod; xen_mb(); size = prod - cons; if ((size == 0) || (size > sizeof(intf->out))) return; if ((buffer->capacity - buffer->size) < size) { buffer->capacity += (size + 1024); buffer->data = qemu_realloc(buffer->data, buffer->capacity); } while (cons != prod) buffer->data[buffer->size++] = intf->out[ MASK_XENCONS_IDX(cons++, intf->out)]; xen_mb(); intf->out_cons = cons; xen_be_send_notify(&con->xendev); if (buffer->max_capacity && buffer->size > buffer->max_capacity) { /* Discard the middle of the data. */ size_t over = buffer->size - buffer->max_capacity; uint8_t *maxpos = buffer->data + buffer->max_capacity; memmove(maxpos - over, maxpos, over); buffer->data = qemu_realloc(buffer->data, buffer->max_capacity); buffer->size = buffer->capacity = buffer->max_capacity; if (buffer->consumed > buffer->max_capacity - over) buffer->consumed = buffer->max_capacity - over; } }
static void vhost_client_set_memory(CPUPhysMemoryClient *client, target_phys_addr_t start_addr, ram_addr_t size, ram_addr_t phys_offset) { struct vhost_dev *dev = container_of(client, struct vhost_dev, client); ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; int s = offsetof(struct vhost_memory, regions) + (dev->mem->nregions + 1) * sizeof dev->mem->regions[0]; uint64_t log_size; int r; dev->mem = qemu_realloc(dev->mem, s); assert(size); vhost_dev_unassign_memory(dev, start_addr, size); if (flags == IO_MEM_RAM) { /* Add given mapping, merging adjacent regions if any */ vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)qemu_get_ram_ptr(phys_offset)); } else { /* Remove old mapping for this memory, if any. */ vhost_dev_unassign_memory(dev, start_addr, size); } if (!dev->started) { return; } if (dev->started) { r = vhost_verify_ring_mappings(dev, start_addr, size); assert(r >= 0); } if (!dev->log_enabled) { r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); assert(r >= 0); return; } log_size = vhost_get_log_size(dev); /* We allocate an extra 4K bytes to log, * to reduce the * number of reallocations. */ #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) /* To log more, must increase log size before table update. */ if (dev->log_size < log_size) { vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); } r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); assert(r >= 0); /* To log less, can only decrease log size after table update. */ if (dev->log_size > log_size + VHOST_LOG_BUFFER) { vhost_dev_log_resize(dev, log_size); } }
void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len) { if (qiov->niov == qiov->nalloc) { qiov->nalloc = 2 * qiov->nalloc + 1; qiov->iov = qemu_realloc(qiov->iov, qiov->nalloc * sizeof(struct iovec)); } qiov->iov[qiov->niov].iov_base = base; qiov->iov[qiov->niov].iov_len = len; qiov->size += len; ++qiov->niov; }
void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base, target_phys_addr_t len) { if (qsg->nsg == qsg->nalloc) { qsg->nalloc = 2 * qsg->nalloc + 1; qsg->sg = qemu_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry)); } qsg->sg[qsg->nsg].base = base; qsg->sg[qsg->nsg].len = len; qsg->size += len; ++qsg->nsg; }
/* Add RAM. */ static void create_ram(const void *dt) { int node = -1; const struct fdt_property *p; int len; uint32_t base; uint32_t size; uint32_t *data; ram_addr_t offset; while (1) { node = fdt_node_offset_by_prop_value(dt, node, "device_type", "memory", 7); if (node < 0) break; check_cells(dt, node, 1, 1); p = fdt_get_property(dt, node, "reg", &len); if (!p || (len % 8) != 0) { fprintf(stderr, "bad memory section %s\n", fdt_get_name(dt, node, NULL)); exit(1); } data = (uint32_t *)p->data; while (len) { base = fdt32_to_cpu(data[0]); size = fdt32_to_cpu(data[1]); data += 2; len -= 8; /* Ignore zero size regions. */ if (size == 0) continue; offset = qemu_ram_alloc(size); cpu_register_physical_memory(base, size, offset | IO_MEM_RAM); devtree_ram_map_size++; devtree_ram_map = qemu_realloc(devtree_ram_map, devtree_ram_map_size * sizeof(devtree_ram_region)); devtree_ram_map[devtree_ram_map_size - 1].base = base; devtree_ram_map[devtree_ram_map_size - 1].size = size; } } /* FIXME: Merge and sort memory map entries. */ /* Technically there's no reason we have to have RAM. However in practice it indicates a busted machine description. */ if (!devtree_ram_map) { fprintf(stderr, "No memory regions found\n"); exit(1); } }
void soc_dma_port_add_fifo(struct soc_dma_s *soc, target_phys_addr_t virt_base, soc_dma_io_t fn, void *opaque, int out) { struct memmap_entry_s *entry; struct dma_s *dma = (struct dma_s *) soc; dma->memmap = qemu_realloc(dma->memmap, sizeof(*entry) * (dma->memmap_size + 1)); entry = soc_dma_lookup(dma, virt_base); if (dma->memmap_size) { if (entry->type == soc_dma_port_mem) { if (entry->addr <= virt_base && entry->addr + entry->u.mem.size > virt_base) { fprintf(stderr, "%s: FIFO at " TARGET_FMT_lx " collides with RAM region at " TARGET_FMT_lx "-" TARGET_FMT_lx "\n", __FUNCTION__, (target_ulong) virt_base, (target_ulong) entry->addr, (target_ulong) (entry->addr + entry->u.mem.size)); exit(-1); } if (entry->addr <= virt_base) entry ++; } else while (entry < dma->memmap + dma->memmap_size && entry->addr <= virt_base) { if (entry->addr == virt_base && entry->u.fifo.out == out) { fprintf(stderr, "%s: FIFO at " TARGET_FMT_lx " collides FIFO at " TARGET_FMT_lx "\n", __FUNCTION__, (target_ulong) virt_base, (target_ulong) entry->addr); exit(-1); } entry ++; } memmove(entry + 1, entry, (uint8_t *) (dma->memmap + dma->memmap_size ++) - (uint8_t *) entry); } else dma->memmap_size ++; entry->addr = virt_base; entry->type = soc_dma_port_fifo; entry->u.fifo.fn = fn; entry->u.fifo.opaque = opaque; entry->u.fifo.out = out; }
/* qstring_append(): Append a C string to a QString */ void qstring_append(QString *qstring, const char *str) { size_t len = strlen(str); if (qstring->capacity < (qstring->length + len)) { qstring->capacity += len; qstring->capacity *= 2; /* use exponential growth */ qstring->string = qemu_realloc(qstring->string, qstring->capacity + 1); } memcpy(qstring->string + qstring->length, str, len); qstring->length += len; qstring->string[qstring->length] = 0; }
static void vmc_have_data(VirtIOSerialPort *port, const uint8_t *buf, size_t len) { SpiceVirtualChannel *svc = DO_UPCAST(SpiceVirtualChannel, port, port); dprintf(svc, 2, "%s: %zd\n", __func__, len); assert(svc->datalen == 0); if (svc->bufsize < len) { svc->bufsize = len; svc->buffer = qemu_realloc(svc->buffer, svc->bufsize); } memcpy(svc->buffer, buf, len); svc->datapos = svc->buffer; svc->datalen = len; virtio_serial_throttle_port(&svc->port, true); spice_server_char_device_wakeup(&svc->sin); }
/* this function increase the dynamic storage need to store data about other * guests */ static void increase_dynamic_storage(IVShmemState *s, int new_min_size) { int j, old_nb_alloc; old_nb_alloc = s->nb_peers; while (new_min_size >= s->nb_peers) s->nb_peers = s->nb_peers * 2; IVSHMEM_DPRINTF("bumping storage to %d guests\n", s->nb_peers); s->peers = qemu_realloc(s->peers, s->nb_peers * sizeof(Peer)); /* zero out new pointers */ for (j = old_nb_alloc; j < s->nb_peers; j++) { s->peers[j].eventfds = NULL; s->peers[j].nb_eventfds = 0; } }
static void buffered_append(QEMUFileBuffered *s, const uint8_t *buf, size_t size) { if (size > (s->buffer_capacity - s->buffer_size)) { void *tmp; DPRINTF("increasing buffer capacity from %zu by %zu\n", s->buffer_capacity, size + 1024); s->buffer_capacity += size + 1024; tmp = qemu_realloc(s->buffer, s->buffer_capacity); if (tmp == NULL) { fprintf(stderr, "qemu file buffer expansion failed\n"); exit(1); } s->buffer = tmp; } memcpy(s->buffer + s->buffer_size, buf, size); s->buffer_size += size; }
static void vhost_client_set_memory(CPUPhysMemoryClient *client, target_phys_addr_t start_addr, ram_addr_t size, ram_addr_t phys_offset) { struct vhost_dev *dev = container_of(client, struct vhost_dev, client); ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; int s = offsetof(struct vhost_memory, regions) + (dev->mem->nregions + 1) * sizeof dev->mem->regions[0]; uint64_t log_size; int r; /* TODO: this is a hack. * At least one vga card (cirrus) changes the gpa to hva * memory maps on data path, which slows us down. * Since we should never need to DMA into VGA memory * anyway, lets just skip these regions. */ if (ranges_overlap(start_addr, size, 0xa0000, 0x10000)) { return; } dev->mem = qemu_realloc(dev->mem, s); assert(size); vhost_mem_unassign_memory(dev->mem, start_addr, size); if (flags == IO_MEM_RAM) { /* Add given mapping, merging adjacent regions if any */ vhost_mem_assign_memory(dev->mem, start_addr, size, (uintptr_t)qemu_get_ram_ptr(phys_offset)); } else { /* Remove old mapping for this memory, if any. */ vhost_mem_unassign_memory(dev->mem, start_addr, size); } if (!dev->started) { return; } if (dev->started) { r = vhost_verify_ring_mappings(dev, start_addr, size); assert(r >= 0); } if (!dev->log_enabled) { r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); assert(r >= 0); return; } log_size = vhost_get_log_size(dev); /* We allocate an extra 4K bytes to log, * to reduce the * number of reallocations. */ #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) /* To log more, must increase log size before table update. */ if (dev->log_size < log_size) { vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); } r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); assert(r >= 0); /* To log less, can only decrease log size after table update. */ if (dev->log_size > log_size + VHOST_LOG_BUFFER) { vhost_dev_log_resize(dev, log_size); } }
static int dmg_open(BlockDriverState *bs, int flags) { BDRVDMGState *s = bs->opaque; off_t info_begin,info_end,last_in_offset,last_out_offset; uint32_t count; uint32_t max_compressed_size=1,max_sectors_per_chunk=1,i; int64_t offset; bs->read_only = 1; s->n_chunks = 0; s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL; /* read offset of info blocks */ offset = bdrv_getlength(bs->file); if (offset < 0) { goto fail; } offset -= 0x1d8; info_begin = read_off(bs, offset); if (info_begin == 0) { goto fail; } if (read_uint32(bs, info_begin) != 0x100) { goto fail; } count = read_uint32(bs, info_begin + 4); if (count == 0) { goto fail; } info_end = info_begin + count; offset = info_begin + 0x100; /* read offsets */ last_in_offset = last_out_offset = 0; while (offset < info_end) { uint32_t type; count = read_uint32(bs, offset); if(count==0) goto fail; offset += 4; type = read_uint32(bs, offset); if (type == 0x6d697368 && count >= 244) { int new_size, chunk_count; offset += 4; offset += 200; chunk_count = (count-204)/40; new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count); s->types = qemu_realloc(s->types, new_size/2); s->offsets = qemu_realloc(s->offsets, new_size); s->lengths = qemu_realloc(s->lengths, new_size); s->sectors = qemu_realloc(s->sectors, new_size); s->sectorcounts = qemu_realloc(s->sectorcounts, new_size); for(i=s->n_chunks;i<s->n_chunks+chunk_count;i++) { s->types[i] = read_uint32(bs, offset); offset += 4; if(s->types[i]!=0x80000005 && s->types[i]!=1 && s->types[i]!=2) { if(s->types[i]==0xffffffff) { last_in_offset = s->offsets[i-1]+s->lengths[i-1]; last_out_offset = s->sectors[i-1]+s->sectorcounts[i-1]; } chunk_count--; i--; offset += 36; continue; } offset += 4; s->sectors[i] = last_out_offset+read_off(bs, offset); offset += 8; s->sectorcounts[i] = read_off(bs, offset); offset += 8; s->offsets[i] = last_in_offset+read_off(bs, offset); offset += 8; s->lengths[i] = read_off(bs, offset); offset += 8; if(s->lengths[i]>max_compressed_size) max_compressed_size = s->lengths[i]; if(s->sectorcounts[i]>max_sectors_per_chunk) max_sectors_per_chunk = s->sectorcounts[i]; } s->n_chunks+=chunk_count; } } /* initialize zlib engine */ s->compressed_chunk = qemu_malloc(max_compressed_size+1); s->uncompressed_chunk = qemu_malloc(512*max_sectors_per_chunk); if(inflateInit(&s->zstream) != Z_OK) goto fail; s->current_chunk = s->n_chunks; return 0; fail: return -1; }
int vnc_tls_validate_certificate(struct VncState *vs) { int ret; unsigned int status; const gnutls_datum_t *certs; unsigned int nCerts, i; time_t now; VNC_DEBUG("Validating client certificate\n"); if ((ret = gnutls_certificate_verify_peers2 (vs->tls.session, &status)) < 0) { VNC_DEBUG("Verify failed %s\n", gnutls_strerror(ret)); return -1; } if ((now = time(NULL)) == ((time_t)-1)) { return -1; } if (status != 0) { if (status & GNUTLS_CERT_INVALID) VNC_DEBUG("The certificate is not trusted.\n"); if (status & GNUTLS_CERT_SIGNER_NOT_FOUND) VNC_DEBUG("The certificate hasn't got a known issuer.\n"); if (status & GNUTLS_CERT_REVOKED) VNC_DEBUG("The certificate has been revoked.\n"); if (status & GNUTLS_CERT_INSECURE_ALGORITHM) VNC_DEBUG("The certificate uses an insecure algorithm\n"); return -1; } else { VNC_DEBUG("Certificate is valid!\n"); } /* Only support x509 for now */ if (gnutls_certificate_type_get(vs->tls.session) != GNUTLS_CRT_X509) return -1; if (!(certs = gnutls_certificate_get_peers(vs->tls.session, &nCerts))) return -1; for (i = 0 ; i < nCerts ; i++) { gnutls_x509_crt_t cert; VNC_DEBUG ("Checking certificate chain %d\n", i); if (gnutls_x509_crt_init (&cert) < 0) return -1; if (gnutls_x509_crt_import(cert, &certs[i], GNUTLS_X509_FMT_DER) < 0) { gnutls_x509_crt_deinit (cert); return -1; } if (gnutls_x509_crt_get_expiration_time (cert) < now) { VNC_DEBUG("The certificate has expired\n"); gnutls_x509_crt_deinit (cert); return -1; } if (gnutls_x509_crt_get_activation_time (cert) > now) { VNC_DEBUG("The certificate is not yet activated\n"); gnutls_x509_crt_deinit (cert); return -1; } if (gnutls_x509_crt_get_activation_time (cert) > now) { VNC_DEBUG("The certificate is not yet activated\n"); gnutls_x509_crt_deinit (cert); return -1; } if (i == 0) { size_t dnameSize = 1024; vs->tls.dname = qemu_malloc(dnameSize); requery: if ((ret = gnutls_x509_crt_get_dn (cert, vs->tls.dname, &dnameSize)) != 0) { if (ret == GNUTLS_E_SHORT_MEMORY_BUFFER) { vs->tls.dname = qemu_realloc(vs->tls.dname, dnameSize); goto requery; } gnutls_x509_crt_deinit (cert); VNC_DEBUG("Cannot get client distinguished name: %s", gnutls_strerror (ret)); return -1; } if (vs->vd->tls.x509verify) { int allow; if (!vs->vd->tls.acl) { VNC_DEBUG("no ACL activated, allowing access"); gnutls_x509_crt_deinit (cert); continue; } allow = qemu_acl_party_is_allowed(vs->vd->tls.acl, vs->tls.dname); VNC_DEBUG("TLS x509 ACL check for %s is %s\n", vs->tls.dname, allow ? "allowed" : "denied"); if (!allow) { gnutls_x509_crt_deinit (cert); return -1; } } } gnutls_x509_crt_deinit (cert); } return 0; }
static int dmg_open(BlockDriverState *bs, const char *filename, int flags) { BDRVDMGState *s = bs->opaque; off_t info_begin,info_end,last_in_offset,last_out_offset; uint32_t count; uint32_t max_compressed_size=1,max_sectors_per_chunk=1,i; s->fd = open(filename, O_RDONLY | O_BINARY); if (s->fd < 0) return -errno; bs->read_only = 1; s->n_chunks = 0; s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL; /* read offset of info blocks */ if(lseek(s->fd,-0x1d8,SEEK_END)<0) { dmg_close: close(s->fd); /* open raw instead */ bs->drv=bdrv_find_format("raw"); return bs->drv->bdrv_open(bs, filename, flags); } info_begin=read_off(s->fd); if(info_begin==0) goto dmg_close; if(lseek(s->fd,info_begin,SEEK_SET)<0) goto dmg_close; if(read_uint32(s->fd)!=0x100) goto dmg_close; if((count = read_uint32(s->fd))==0) goto dmg_close; info_end = info_begin+count; if(lseek(s->fd,0xf8,SEEK_CUR)<0) goto dmg_close; /* read offsets */ last_in_offset = last_out_offset = 0; while(lseek(s->fd,0,SEEK_CUR)<info_end) { uint32_t type; count = read_uint32(s->fd); if(count==0) goto dmg_close; type = read_uint32(s->fd); if(type!=0x6d697368 || count<244) lseek(s->fd,count-4,SEEK_CUR); else { int new_size, chunk_count; if(lseek(s->fd,200,SEEK_CUR)<0) goto dmg_close; chunk_count = (count-204)/40; new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count); s->types = qemu_realloc(s->types, new_size/2); s->offsets = qemu_realloc(s->offsets, new_size); s->lengths = qemu_realloc(s->lengths, new_size); s->sectors = qemu_realloc(s->sectors, new_size); s->sectorcounts = qemu_realloc(s->sectorcounts, new_size); for(i=s->n_chunks;i<s->n_chunks+chunk_count;i++) { s->types[i] = read_uint32(s->fd); if(s->types[i]!=0x80000005 && s->types[i]!=1 && s->types[i]!=2) { if(s->types[i]==0xffffffff) { last_in_offset = s->offsets[i-1]+s->lengths[i-1]; last_out_offset = s->sectors[i-1]+s->sectorcounts[i-1]; } chunk_count--; i--; if(lseek(s->fd,36,SEEK_CUR)<0) goto dmg_close; continue; } read_uint32(s->fd); s->sectors[i] = last_out_offset+read_off(s->fd); s->sectorcounts[i] = read_off(s->fd); s->offsets[i] = last_in_offset+read_off(s->fd); s->lengths[i] = read_off(s->fd); if(s->lengths[i]>max_compressed_size) max_compressed_size = s->lengths[i]; if(s->sectorcounts[i]>max_sectors_per_chunk) max_sectors_per_chunk = s->sectorcounts[i]; } s->n_chunks+=chunk_count; } } /* initialize zlib engine */ s->compressed_chunk = qemu_malloc(max_compressed_size+1); s->uncompressed_chunk = qemu_malloc(512*max_sectors_per_chunk); if(inflateInit(&s->zstream) != Z_OK) goto dmg_close; s->current_chunk = s->n_chunks; return 0; }
int acpi_table_add(const char *t) { static const char *dfl_id = "QEMUQEMU"; char buf[1024], *p, *f; struct acpi_table_header acpi_hdr; unsigned long val; size_t off; memset(&acpi_hdr, 0, sizeof(acpi_hdr)); if (get_param_value(buf, sizeof(buf), "sig", t)) { strncpy(acpi_hdr.signature, buf, 4); } else { strncpy(acpi_hdr.signature, dfl_id, 4); } if (get_param_value(buf, sizeof(buf), "rev", t)) { val = strtoul(buf, &p, 10); if (val > 255 || *p != '\0') goto out; } else { val = 1; } acpi_hdr.revision = (int8_t)val; if (get_param_value(buf, sizeof(buf), "oem_id", t)) { strncpy(acpi_hdr.oem_id, buf, 6); } else { strncpy(acpi_hdr.oem_id, dfl_id, 6); } if (get_param_value(buf, sizeof(buf), "oem_table_id", t)) { strncpy(acpi_hdr.oem_table_id, buf, 8); } else { strncpy(acpi_hdr.oem_table_id, dfl_id, 8); } if (get_param_value(buf, sizeof(buf), "oem_rev", t)) { val = strtol(buf, &p, 10); if(*p != '\0') goto out; } else { val = 1; } acpi_hdr.oem_revision = cpu_to_le32(val); if (get_param_value(buf, sizeof(buf), "asl_compiler_id", t)) { strncpy(acpi_hdr.asl_compiler_id, buf, 4); } else { strncpy(acpi_hdr.asl_compiler_id, dfl_id, 4); } if (get_param_value(buf, sizeof(buf), "asl_compiler_rev", t)) { val = strtol(buf, &p, 10); if(*p != '\0') goto out; } else { val = 1; } acpi_hdr.asl_compiler_revision = cpu_to_le32(val); if (!get_param_value(buf, sizeof(buf), "data", t)) { buf[0] = '\0'; } acpi_hdr.length = sizeof(acpi_hdr); f = buf; while (buf[0]) { struct stat s; char *n = strchr(f, ':'); if (n) *n = '\0'; if(stat(f, &s) < 0) { fprintf(stderr, "Can't stat file '%s': %s\n", f, strerror(errno)); goto out; } acpi_hdr.length += s.st_size; if (!n) break; *n = ':'; f = n + 1; } if (!acpi_tables) { acpi_tables_len = sizeof(uint16_t); acpi_tables = qemu_mallocz(acpi_tables_len); } p = acpi_tables + acpi_tables_len; acpi_tables_len += sizeof(uint16_t) + acpi_hdr.length; acpi_tables = qemu_realloc(acpi_tables, acpi_tables_len); acpi_hdr.length = cpu_to_le32(acpi_hdr.length); *(uint16_t*)p = acpi_hdr.length; p += sizeof(uint16_t); memcpy(p, &acpi_hdr, sizeof(acpi_hdr)); off = sizeof(acpi_hdr); f = buf; while (buf[0]) { struct stat s; int fd; char *n = strchr(f, ':'); if (n) *n = '\0'; fd = open(f, O_RDONLY); if(fd < 0) goto out; if(fstat(fd, &s) < 0) { close(fd); goto out; } do { int r; r = read(fd, p + off, s.st_size); if (r > 0) { off += r; s.st_size -= r; } else if ((r < 0 && errno != EINTR) || r == 0) { close(fd); goto out; } } while(s.st_size); close(fd); if (!n) break; f = n + 1; } ((struct acpi_table_header*)p)->checksum = acpi_checksum((uint8_t*)p, off); /* increase number of tables */ (*(uint16_t*)acpi_tables) = cpu_to_le32(le32_to_cpu(*(uint16_t*)acpi_tables) + 1); return 0; out: if (acpi_tables) { qemu_free(acpi_tables); acpi_tables = NULL; } return -1; }
int load_multiboot(void *fw_cfg, FILE *f, const char *kernel_filename, const char *initrd_filename, const char *kernel_cmdline, int kernel_file_size, uint8_t *header) { int i, is_multiboot = 0; uint32_t flags = 0; uint32_t mh_entry_addr; uint32_t mh_load_addr; uint32_t mb_kernel_size; MultibootState mbs; uint8_t bootinfo[MBI_SIZE]; uint8_t *mb_bootinfo_data; /* Ok, let's see if it is a multiboot image. The header is 12x32bit long, so the latest entry may be 8192 - 48. */ for (i = 0; i < (8192 - 48); i += 4) { if (ldl_p(header+i) == 0x1BADB002) { uint32_t checksum = ldl_p(header+i+8); flags = ldl_p(header+i+4); checksum += flags; checksum += (uint32_t)0x1BADB002; if (!checksum) { is_multiboot = 1; break; } } } if (!is_multiboot) return 0; /* no multiboot */ mb_debug("qemu: I believe we found a multiboot image!\n"); memset(bootinfo, 0, sizeof(bootinfo)); memset(&mbs, 0, sizeof(mbs)); if (flags & 0x00000004) { /* MULTIBOOT_HEADER_HAS_VBE */ fprintf(stderr, "qemu: multiboot knows VBE. we don't.\n"); } if (!(flags & 0x00010000)) { /* MULTIBOOT_HEADER_HAS_ADDR */ uint64_t elf_entry; uint64_t elf_low, elf_high; int kernel_size; fclose(f); if (((struct elf64_hdr*)header)->e_machine == EM_X86_64) { fprintf(stderr, "Cannot load x86-64 image, give a 32bit one.\n"); exit(1); } kernel_size = load_elf(kernel_filename, NULL, NULL, &elf_entry, &elf_low, &elf_high, 0, ELF_MACHINE, 0); if (kernel_size < 0) { fprintf(stderr, "Error while loading elf kernel\n"); exit(1); } mh_load_addr = elf_low; mb_kernel_size = elf_high - elf_low; mh_entry_addr = elf_entry; mbs.mb_buf = qemu_malloc(mb_kernel_size); if (rom_copy(mbs.mb_buf, mh_load_addr, mb_kernel_size) != mb_kernel_size) { fprintf(stderr, "Error while fetching elf kernel from rom\n"); exit(1); } mb_debug("qemu: loading multiboot-elf kernel (%#x bytes) with entry %#zx\n", mb_kernel_size, (size_t)mh_entry_addr); } else { /* Valid if mh_flags sets MULTIBOOT_HEADER_HAS_ADDR. */ uint32_t mh_header_addr = ldl_p(header+i+12); mh_load_addr = ldl_p(header+i+16); uint32_t mb_kernel_text_offset = i - (mh_header_addr - mh_load_addr); mh_entry_addr = ldl_p(header+i+28); mb_kernel_size = kernel_file_size - mb_kernel_text_offset; /* Valid if mh_flags sets MULTIBOOT_HEADER_HAS_VBE. uint32_t mh_mode_type = ldl_p(header+i+32); uint32_t mh_width = ldl_p(header+i+36); uint32_t mh_height = ldl_p(header+i+40); uint32_t mh_depth = ldl_p(header+i+44); */ mb_debug("multiboot: mh_header_addr = %#x\n", mh_header_addr); mb_debug("multiboot: mh_load_addr = %#x\n", mh_load_addr); mb_debug("multiboot: mh_load_end_addr = %#x\n", ldl_p(header+i+20)); mb_debug("multiboot: mh_bss_end_addr = %#x\n", ldl_p(header+i+24)); mb_debug("qemu: loading multiboot kernel (%#x bytes) at %#x\n", mb_kernel_size, mh_load_addr); mbs.mb_buf = qemu_malloc(mb_kernel_size); fseek(f, mb_kernel_text_offset, SEEK_SET); if (fread(mbs.mb_buf, 1, mb_kernel_size, f) != mb_kernel_size) { fprintf(stderr, "fread() failed\n"); exit(1); } fclose(f); } mbs.mb_buf_phys = mh_load_addr; mbs.mb_buf_size = TARGET_PAGE_ALIGN(mb_kernel_size); mbs.offset_mbinfo = mbs.mb_buf_size; /* Calculate space for cmdlines and mb_mods */ mbs.mb_buf_size += strlen(kernel_filename) + 1; mbs.mb_buf_size += strlen(kernel_cmdline) + 1; if (initrd_filename) { const char *r = initrd_filename; mbs.mb_buf_size += strlen(r) + 1; mbs.mb_mods_avail = 1; while ((r = strchr(r, ','))) { mbs.mb_mods_avail++; r++; } mbs.mb_buf_size += MB_MOD_SIZE * mbs.mb_mods_avail; } mbs.mb_buf_size = TARGET_PAGE_ALIGN(mbs.mb_buf_size); /* enlarge mb_buf to hold cmdlines and mb-info structs */ mbs.mb_buf = qemu_realloc(mbs.mb_buf, mbs.mb_buf_size); mbs.offset_cmdlines = mbs.offset_mbinfo + mbs.mb_mods_avail * MB_MOD_SIZE; if (initrd_filename) { char *next_initrd; mbs.offset_mods = mbs.mb_buf_size; do { char *next_space; int mb_mod_length; uint32_t offs = mbs.mb_buf_size; next_initrd = strchr(initrd_filename, ','); if (next_initrd) *next_initrd = '\0'; /* if a space comes after the module filename, treat everything after that as parameters */ target_phys_addr_t c = mb_add_cmdline(&mbs, initrd_filename); if ((next_space = strchr(initrd_filename, ' '))) *next_space = '\0'; mb_debug("multiboot loading module: %s\n", initrd_filename); mb_mod_length = get_image_size(initrd_filename); if (mb_mod_length < 0) { fprintf(stderr, "failed to get %s image size\n", initrd_filename); exit(1); } mbs.mb_buf_size = TARGET_PAGE_ALIGN(mb_mod_length + mbs.mb_buf_size); mbs.mb_buf = qemu_realloc(mbs.mb_buf, mbs.mb_buf_size); load_image(initrd_filename, (unsigned char *)mbs.mb_buf + offs); mb_add_mod(&mbs, mbs.mb_buf_phys + offs, mbs.mb_buf_phys + offs + mb_mod_length, c); mb_debug("mod_start: %p\nmod_end: %p\n cmdline: "TARGET_FMT_plx"\n", (char *)mbs.mb_buf + offs, (char *)mbs.mb_buf + offs + mb_mod_length, c); initrd_filename = next_initrd+1; } while (next_initrd); } /* Commandline support */ char kcmdline[strlen(kernel_filename) + strlen(kernel_cmdline) + 2]; snprintf(kcmdline, sizeof(kcmdline), "%s %s", kernel_filename, kernel_cmdline); stl_p(bootinfo + MBI_CMDLINE, mb_add_cmdline(&mbs, kcmdline)); stl_p(bootinfo + MBI_MODS_ADDR, mbs.mb_buf_phys + mbs.offset_mbinfo); stl_p(bootinfo + MBI_MODS_COUNT, mbs.mb_mods_count); /* mods_count */ /* the kernel is where we want it to be now */ stl_p(bootinfo + MBI_FLAGS, MULTIBOOT_FLAGS_MEMORY | MULTIBOOT_FLAGS_BOOT_DEVICE | MULTIBOOT_FLAGS_CMDLINE | MULTIBOOT_FLAGS_MODULES | MULTIBOOT_FLAGS_MMAP); stl_p(bootinfo + MBI_MEM_LOWER, 640); stl_p(bootinfo + MBI_MEM_UPPER, ram_size / 1024); stl_p(bootinfo + MBI_BOOT_DEVICE, 0x8001ffff); /* XXX: use the -boot switch? */ stl_p(bootinfo + MBI_MMAP_ADDR, ADDR_E820_MAP); mb_debug("multiboot: mh_entry_addr = %#x\n", mh_entry_addr); mb_debug(" mb_buf_phys = "TARGET_FMT_plx"\n", mbs.mb_buf_phys); mb_debug(" mod_start = "TARGET_FMT_plx"\n", mbs.mb_buf_phys + mbs.offset_mods); mb_debug(" mb_mods_count = %d\n", mbs.mb_mods_count); /* save bootinfo off the stack */ mb_bootinfo_data = qemu_malloc(sizeof(bootinfo)); memcpy(mb_bootinfo_data, bootinfo, sizeof(bootinfo)); /* Pass variables to option rom */ fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ENTRY, mh_entry_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, mh_load_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, mbs.mb_buf_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, mbs.mb_buf, mbs.mb_buf_size); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, ADDR_MBI); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, sizeof(bootinfo)); fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, mb_bootinfo_data, sizeof(bootinfo)); option_rom[nb_option_roms] = "multiboot.bin"; nb_option_roms++; return 1; /* yes, we are multiboot */ }