int domain_vgic_init(struct domain *d, unsigned int nr_spis) { int i; int ret; d->arch.vgic.ctlr = 0; /* Limit the number of virtual SPIs supported to (1020 - 32) = 988 */ if ( nr_spis > (1020 - NR_LOCAL_IRQS) ) return -EINVAL; d->arch.vgic.nr_spis = nr_spis; ret = domain_vgic_register(d); if ( ret < 0 ) return ret; spin_lock_init(&d->arch.vgic.lock); d->arch.vgic.shared_irqs = xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d)); if ( d->arch.vgic.shared_irqs == NULL ) return -ENOMEM; d->arch.vgic.pending_irqs = xzalloc_array(struct pending_irq, d->arch.vgic.nr_spis); if ( d->arch.vgic.pending_irqs == NULL ) return -ENOMEM; for (i=0; i<d->arch.vgic.nr_spis; i++) vgic_init_pending_irq(&d->arch.vgic.pending_irqs[i], i + 32); /* SPIs are routed to VCPU0 by default */ for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ ) vgic_rank_init(&d->arch.vgic.shared_irqs[i], i + 1, 0); ret = d->arch.vgic.handler->domain_init(d); if ( ret ) return ret; d->arch.vgic.allocated_irqs = xzalloc_array(unsigned long, BITS_TO_LONGS(vgic_num_irqs(d))); if ( !d->arch.vgic.allocated_irqs ) return -ENOMEM; /* vIRQ0-15 (SGIs) are reserved */ for ( i = 0; i < NR_GIC_SGI; i++ ) set_bit(i, d->arch.vgic.allocated_irqs); return 0; }
int vcpu_vgic_init(struct vcpu *v) { int i; v->arch.vgic.private_irqs = xzalloc(struct vgic_irq_rank); if ( v->arch.vgic.private_irqs == NULL ) return -ENOMEM; /* SGIs/PPIs are always routed to this VCPU */ vgic_rank_init(v->arch.vgic.private_irqs, 0, v->vcpu_id); v->domain->arch.vgic.handler->vcpu_init(v); memset(&v->arch.vgic.pending_irqs, 0, sizeof(v->arch.vgic.pending_irqs)); for (i = 0; i < 32; i++) vgic_init_pending_irq(&v->arch.vgic.pending_irqs[i], i); INIT_LIST_HEAD(&v->arch.vgic.inflight_irqs); INIT_LIST_HEAD(&v->arch.vgic.lr_pending); spin_lock_init(&v->arch.vgic.lock); return 0; }
int domain_vgic_init(struct domain *d, unsigned int nr_spis) { int i; int ret; d->arch.vgic.ctlr = 0; /* Limit the number of virtual SPIs supported to (1020 - 32) = 988 */ if ( nr_spis > (1020 - NR_LOCAL_IRQS) ) return -EINVAL; d->arch.vgic.nr_spis = nr_spis; switch ( d->arch.vgic.version ) { #ifdef CONFIG_HAS_GICV3 case GIC_V3: if ( vgic_v3_init(d) ) return -ENODEV; break; #endif case GIC_V2: if ( vgic_v2_init(d) ) return -ENODEV; break; default: printk(XENLOG_G_ERR "d%d: Unknown vGIC version %u\n", d->domain_id, d->arch.vgic.version); return -ENODEV; } spin_lock_init(&d->arch.vgic.lock); d->arch.vgic.shared_irqs = xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d)); if ( d->arch.vgic.shared_irqs == NULL ) return -ENOMEM; d->arch.vgic.pending_irqs = xzalloc_array(struct pending_irq, d->arch.vgic.nr_spis); if ( d->arch.vgic.pending_irqs == NULL ) return -ENOMEM; for (i=0; i<d->arch.vgic.nr_spis; i++) vgic_init_pending_irq(&d->arch.vgic.pending_irqs[i], i + 32); /* SPIs are routed to VCPU0 by default */ for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ ) vgic_rank_init(&d->arch.vgic.shared_irqs[i], i + 1, 0); ret = d->arch.vgic.handler->domain_init(d); if ( ret ) return ret; d->arch.vgic.allocated_irqs = xzalloc_array(unsigned long, BITS_TO_LONGS(vgic_num_irqs(d))); if ( !d->arch.vgic.allocated_irqs ) return -ENOMEM; /* vIRQ0-15 (SGIs) are reserved */ for ( i = 0; i < NR_GIC_SGI; i++ ) set_bit(i, d->arch.vgic.allocated_irqs); return 0; }