/** * \fn init_gic_distributor * * Initialise GIC Dirstributor. * * Configure all IRQs to be active low, level sensitive, target cpu0, * priority 0xa0 and disable all interrupts. */ void init_gic_distributor() { GICD[GICD_CTLR] = 0x0; // disable GIC unsigned int typer = GICD[GICD_TYPER]; unsigned int lines = 32 * ((typer & 0x1F) + 1); unsigned int i; /* set global interrupts to active low, level sensitive */ for (i = 32; i < lines; i += 16) { GICD[GICD_ICFGR(i / 16)] = 0x0; } for (i = 32; i < lines; i += 4) { GICD[GICD_ITARGETSR(i / 4)] = 0x01010101; } for (i = 32; i < lines; i += 4) { GICD[GICD_IPRIORITYR(i / 4)] = 0xa0a0a0a0; } for (i = 32; i < lines; i += 32) { GICD[GICD_ICENABLER(i / 32)] = 0xFFFFFFFF; } for (i = 32; i < lines; i += 32) { GICD[GICD_IGROUPR(i / 32)] = 0x0; } GICD[GICD_CTLR] = 0x0; }
uint32_t gic_it_get_target(size_t it) { size_t reg_idx = it / NUM_TARGETS_PER_REG; uint32_t target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS; uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift; uint32_t target = read32(gic.gicd_base + GICD_ITARGETSR(reg_idx)) & target_mask; target = target >> target_shift; return target; }
void irqctrl_enable(unsigned int irq) { int n = irq / BITS_PER_REGISTER; int m = irq % BITS_PER_REGISTER; uint32_t tmp; /* Writing zeroes to this register has no * effect, so we just write single "1" */ REG_STORE(GICD_ISENABLER(n), 1 << m); /* N-N irq model: all CPUs receive this IRQ */ REG_STORE(GICD_ICFGR(n), 1 << m); /* All CPUs do listen to this IRQ */ n = irq / 4; m = irq % 4; tmp = REG_LOAD(GICD_ITARGETSR(n)); tmp |= 0xFF << (8 * m); REG_STORE(GICD_ITARGETSR(n), tmp); }
void gic_it_set_cpu_mask(size_t it, uint8_t cpu_mask) { size_t idx = it / 32; uint32_t mask = 1 << (it % 32); uint32_t target; assert(it <= gic.max_it); /* Not too large */ /* Assigned to group0 */ assert(!(read32(gic.gicd_base + GICD_IGROUPR(idx)) & mask)); /* Route it to selected CPUs */ target = read32(gic.gicd_base + GICD_ITARGETSR(it / 4)); target &= ~(0xff << ((it % 4) * 8)); target |= cpu_mask << ((it % 4) * 8); DMSG("cpu_mask: writing 0x%x to 0x%x\n", target, gic.gicd_base + GICD_ITARGETSR(it / 4)); write32(target, gic.gicd_base + GICD_ITARGETSR(it / 4)); DMSG("cpu_mask: 0x%x\n", read32(gic.gicd_base + GICD_ITARGETSR(it / 4))); }
static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it) { size_t reg_idx = it / NUM_TARGETS_PER_REG; uint32_t target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS; uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift; uint32_t target = read32(gd->gicd_base + GICD_ITARGETSR(reg_idx)) & target_mask; target = target >> target_shift; return target; }
void gic_it_set_cpu_mask(size_t it, uint8_t cpu_mask) { size_t idx = it / NUM_INTS_PER_REG; uint32_t mask = 1 << (it % NUM_INTS_PER_REG); uint32_t target, target_shift; assert(it <= gic.max_it); /* Not too large */ /* Assigned to group0 */ assert(!(read32(gic.gicd_base + GICD_IGROUPR(idx)) & mask)); /* Route it to selected CPUs */ target = read32(gic.gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG)); target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS; target &= ~(ITARGETSR_FIELD_MASK << target_shift); target |= cpu_mask << target_shift; DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, gic.gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG)); write32(target, gic.gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG)); DMSG("cpu_mask: 0x%x\n", read32(gic.gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG))); }
static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it, uint8_t cpu_mask) { size_t idx __maybe_unused = it / NUM_INTS_PER_REG; uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG); uint32_t target, target_shift; /* Assigned to group0 */ assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); /* Route it to selected CPUs */ target = read32(gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG)); target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS; target &= ~(ITARGETSR_FIELD_MASK << target_shift); target |= cpu_mask << target_shift; DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG)); write32(target, gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG)); DMSG("cpu_mask: 0x%x\n", read32(gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG))); }
/* * ディストリビュータの初期化 */ void gicd_initialize(void) { int i; /* * ディストリビュータをディスエーブル */ sil_wrw_mem(GICD_CTLR, GICD_CTLR_DISABLE); #ifdef TOPPERS_SAFEG_SECURE /* * すべての割込みをグループ1(IRQ)に設定 */ for (i = 0; i < (GIC_TNUM_INTNO + 31) / 32; i++) { sil_wrw_mem(GICD_IGROUPR(i), 0xffffffffU); } #endif /* TOPPERS_SAFEG_SECURE */ /* * すべての割込みを禁止 */ for (i = 0; i < (GIC_TNUM_INTNO + 31) / 32; i++) { sil_wrw_mem(GICD_ICENABLER(i), 0xffffffffU); } /* * すべての割込みペンディングをクリア */ for (i = 0; i < (GIC_TNUM_INTNO + 31) / 32; i++) { sil_wrw_mem(GICD_ICPENDR(i), 0xffffffffU); } /* * すべての割込みを最低優先度に設定 */ for (i = 0; i < (GIC_TNUM_INTNO + 3) / 4; i++){ sil_wrw_mem(GICD_IPRIORITYR(i), 0xffffffffU); } /* * すべての共有ペリフェラル割込みのターゲットをプロセッサ0に設定 */ for (i = GIC_INTNO_SPI0 / 4; i < (GIC_TNUM_INTNO + 3) / 4; i++) { sil_wrw_mem(GICD_ITARGETSR(i), 0x01010101U); } /* * すべてのペリフェラル割込みをレベルトリガに設定 */ for (i = GIC_INTNO_PPI0 / 16; i < (GIC_TNUM_INTNO + 15) / 16; i++) { #ifdef GIC_ARM11MPCORE sil_wrw_mem(GICD_ICFGR(i), 0x55555555U); #else /* GIC_ARM11MPCORE */ sil_wrw_mem(GICD_ICFGR(i), 0x00000000U); #endif /* GIC_ARM11MPCORE */ } /* * ディストリビュータをイネーブル */ sil_wrw_mem(GICD_CTLR, GICD_CTLR_ENABLE); }
/** * \fn enable_irq(unsigned int irqn) * * Enable IRQ in GICD, setting target CPU to cpu0 and priority to 0xa0 */ void enable_irq(unsigned int irqn) { GICD[GICD_ISENABLER(irqn / 32)] = 1 << (irqn % 32); GICD[GICD_ITARGETSR(irqn / 4)] |= (0x01 << ((irqn % 4) * 8)); GICD[GICD_IPRIORITYR(irqn / 4)] |= (0xa0 << ((irqn % 4) * 8)); }