/* * This routine finds the first virtqueue described in the configuration of * this device and sets it up. */ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name) { struct kvm_device *kdev = to_kvmdev(vdev); struct kvm_vqconfig *config; struct virtqueue *vq; int err; if (index >= kdev->desc->num_vq) return ERR_PTR(-ENOENT); config = kvm_vq_config(kdev->desc)+index; err = vmem_add_mapping(config->address, vring_size(config->num, KVM_S390_VIRTIO_RING_ALIGN)); if (err) goto out; vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN, vdev, (void *) config->address, kvm_notify, callback, name); if (!vq) { err = -ENOMEM; goto unmap; } /* * register a callback token * The host will sent this via the external interrupt parameter */ config->token = (u64) vq; vq->priv = config; return vq; unmap: vmem_remove_mapping(config->address, vring_size(config->num, KVM_S390_VIRTIO_RING_ALIGN)); out: return ERR_PTR(err); }
int arch_add_memory(int nid, u64 start, u64 size, bool for_device) { unsigned long zone_start_pfn, zone_end_pfn, nr_pages; unsigned long start_pfn = PFN_DOWN(start); unsigned long size_pages = PFN_DOWN(size); struct zone *zone; int rc; rc = vmem_add_mapping(start, size); if (rc) return rc; for_each_zone(zone) { if (zone_idx(zone) != ZONE_MOVABLE) { /* Add range within existing zone limits */ zone_start_pfn = zone->zone_start_pfn; zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; } else { /* Add remaining range to ZONE_MOVABLE */ zone_start_pfn = start_pfn; zone_end_pfn = start_pfn + size_pages; } if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn) continue; nr_pages = (start_pfn + size_pages > zone_end_pfn) ? zone_end_pfn - start_pfn : size_pages; rc = __add_pages(nid, zone, start_pfn, nr_pages); if (rc) break; start_pfn += nr_pages; size_pages -= nr_pages; if (!size_pages) break; } if (rc) vmem_remove_mapping(start, size); return rc; }
int arch_add_memory(int nid, u64 start, u64 size, bool for_device) { unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS); unsigned long start_pfn = PFN_DOWN(start); unsigned long size_pages = PFN_DOWN(size); unsigned long nr_pages; int rc, zone_enum; rc = vmem_add_mapping(start, size); if (rc) return rc; while (size_pages > 0) { if (start_pfn < dma_end_pfn) { nr_pages = (start_pfn + size_pages > dma_end_pfn) ? dma_end_pfn - start_pfn : size_pages; zone_enum = ZONE_DMA; } else if (start_pfn < normal_end_pfn) { nr_pages = (start_pfn + size_pages > normal_end_pfn) ? normal_end_pfn - start_pfn : size_pages; zone_enum = ZONE_NORMAL; } else { nr_pages = size_pages; zone_enum = ZONE_MOVABLE; } rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum, start_pfn, size_pages); if (rc) break; start_pfn += nr_pages; size_pages -= nr_pages; } if (rc) vmem_remove_mapping(start, size); return rc; }