struct vring_used_elem *virtio_queue_set_used_elem(struct virtio_queue *vq, u32 head, u32 len) { struct vring_used_elem *used_elem; if (!vq->addr) { return NULL; } used_elem = &vq->vring.used->ring[ umod32(vq->vring.used->idx, vq->vring.num)]; used_elem->id = head; used_elem->len = len; /* * Use wmb to assure that used elem was updated with head and len. * We need a wmb here since we can't advance idx unless we're ready * to pass the used element to the guest. */ arch_wmb(); vq->vring.used->idx++; /* * Use wmb to assure used idx has been increased before we signal the guest. * Without a wmb here the guest may ignore the queue since it won't see * an updated idx. */ arch_wmb(); return used_elem; }
static void gic_raise(struct vmm_host_irq *d, const struct vmm_cpumask *mask) { unsigned long map = *vmm_cpumask_bits(mask); /* * Ensure that stores to Normal memory are visible to the * other CPUs before issuing the IPI. */ arch_wmb(); /* This always happens on GIC0 */ gic_write(map << 16 | d->hwirq, gic_data[0].dist_base + GICD_SOFTINT); }
static int __init scu_cpu_prepare(unsigned int cpu) { int rc; u32 val = 0; physical_addr_t _start_secondary_pa; /* Get physical address secondary startup code */ rc = vmm_host_va2pa((virtual_addr_t)&_start_secondary_nopen, &_start_secondary_pa); if (rc) { return rc; } /* Enable snooping through SCU */ if (scu_base) { scu_enable((void *)scu_base); } /* Write to clear address */ if (clear_addr[cpu]) { arch_wmb(); val = ~0x0; vmm_host_memory_write(clear_addr[cpu], &val, sizeof(u32), FALSE); } /* Write to release address */ if (release_addr[cpu]) { arch_wmb(); val = _start_secondary_pa; vmm_host_memory_write(release_addr[cpu], &val, sizeof(u32), FALSE); } return VMM_OK; }
void __lock arch_atomic_write(atomic_t *atom, long value) { atom->counter = value; arch_wmb(); }