void platform_init_ap(int cpuid) { unsigned ciu_int_mask, clock_int_mask, ipi_int_mask; /* * Set the exception base. */ mips_wr_ebase(0x80000000); /* * Clear any pending IPIs. */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cpuid), 0xffffffff); /* * Set up interrupts. */ octeon_ciu_reset(); /* * Unmask the clock, ipi and ciu interrupts. */ ciu_int_mask = hard_int_mask(0); clock_int_mask = hard_int_mask(5); ipi_int_mask = hard_int_mask(platform_ipi_intrnum()); set_intr_mask(ciu_int_mask | clock_int_mask | ipi_int_mask); mips_wbflush(); }
void platform_init_ap(int cpuid) { uint32_t status; register_t hwrena; u_int clock_int_mask; KASSERT(cpuid < MAXCPU, ("%s: invalid CPU id %d", __func__, cpuid)); /* Make sure coprocessors are enabled. */ status = mips_rd_status(); status |= (MIPS_SR_COP_0_BIT | MIPS_SR_COP_1_BIT); #if defined(CPU_CHERI) status |= MIPS_SR_COP_2_BIT; #endif mips_wr_status(status); /* Enable HDWRD instruction in userspace. */ hwrena = mips_rd_hwrena(); hwrena |= (MIPS_HWRENA_CC | MIPS_HWRENA_CCRES | MIPS_HWRENA_CPUNUM); mips_wr_hwrena(hwrena); /* * Enable per-thread timer. */ clock_int_mask = hard_int_mask(5); set_intr_mask(clock_int_mask); }
inline static void disable_irq(const uint8_t irq) noexcept { irq_mask_ |= (1 << irq); if ((irq_mask_ & 0xFF00) == 0xFF00) { irq_mask_ |= (1 << 2); } set_intr_mask(irq_mask_); INFO2("- Disabling IRQ %i, mask: 0x%x", irq, irq_mask_); }
/* * Initialize the hardware exception vectors, and the jump table used to * call locore cache and TLB management functions, based on the kind * of CPU the kernel is running on. */ void mips_vector_init(void) { /* * Make sure that the Wait region logic is not been * changed */ if (MipsWaitEnd - MipsWaitStart != 16) panic("startup: MIPS wait region not correct"); /* * Copy down exception vector code. */ if (MipsTLBMissEnd - MipsTLBMiss > 0x80) panic("startup: UTLB code too large"); if (MipsCacheEnd - MipsCache > 0x80) panic("startup: Cache error code too large"); bcopy(MipsTLBMiss, (void *)MIPS_UTLB_MISS_EXC_VEC, MipsTLBMissEnd - MipsTLBMiss); /* * XXXRW: Why don't we install the XTLB handler for all 64-bit * architectures? */ #if defined(__mips_n64) || defined(CPU_RMI) || defined(CPU_NLM) || defined(CPU_BERI) || defined(CPU_CHERI) /* Fake, but sufficient, for the 32-bit with 64-bit hardware addresses */ bcopy(MipsTLBMiss, (void *)MIPS_XTLB_MISS_EXC_VEC, MipsTLBMissEnd - MipsTLBMiss); #endif bcopy(MipsException, (void *)MIPS_GEN_EXC_VEC, MipsExceptionEnd - MipsException); bcopy(MipsCache, (void *)MIPS_CACHE_ERR_EXC_VEC, MipsCacheEnd - MipsCache); #ifdef CPU_CHERI bcopy(CHERICCallVector, (void *)CHERI_CCALL_EXC_VEC, CHERICCallVectorEnd - CHERICCallVector); #endif /* * Clear out the I and D caches. */ mips_icache_sync_all(); mips_dcache_wbinv_all(); /* * Mask all interrupts. Each interrupt will be enabled * when handler is installed for it */ set_intr_mask(0); /* Clear BEV in SR so we start handling our own exceptions */ mips_wr_status(mips_rd_status() & ~MIPS_SR_BEV); }
void enable_irq(unsigned int irq) { printf(">>> Enabling IRQ %i, old mask: 0x%x ",irq,irq_mask); irq_mask &= ~(1 << irq); if (irq >= 8) irq_mask &= ~(1 << 2); set_intr_mask(irq_mask); printf(" new mask: 0x%x \n",irq_mask); }
void platform_init_ap(int cpuid) { int ipi_int_mask, clock_int_mask; /* * Unmask the clock and ipi interrupts. */ clock_int_mask = hard_int_mask(5); ipi_int_mask = hard_int_mask(platform_ipi_intrnum()); set_intr_mask(ipi_int_mask | clock_int_mask); }
void init_pic() { OS::outb(PIC_MSTR_CTRL, PIC_MSTR_ICW1); OS::outb(PIC_SLV_CTRL, PIC_SLV_ICW1); OS::outb(PIC_MSTR_MASK, PIC_MSTR_ICW2); OS::outb(PIC_SLV_MASK, PIC_SLV_ICW2); OS::outb(PIC_MSTR_MASK, PIC_MSTR_ICW3); OS::outb(PIC_SLV_MASK, PIC_SLV_ICW3); OS::outb(PIC_MSTR_MASK, PIC_MSTR_ICW4); OS::outb(PIC_SLV_MASK, PIC_SLV_ICW4); set_intr_mask(irq_mask); }
/* * Initialize the hardware exception vectors, and the jump table used to * call locore cache and TLB management functions, based on the kind * of CPU the kernel is running on. */ void mips_vector_init(void) { /* * Make sure that the Wait region logic is not been * changed */ if (MipsWaitEnd - MipsWaitStart != 16) panic("startup: MIPS wait region not correct"); /* * Copy down exception vector code. */ if (MipsTLBMissEnd - MipsTLBMiss > 0x80) panic("startup: UTLB code too large"); if (MipsCacheEnd - MipsCache > 0x80) panic("startup: Cache error code too large"); bcopy(MipsTLBMiss, (void *)MIPS_UTLB_MISS_EXC_VEC, MipsTLBMissEnd - MipsTLBMiss); #ifdef __mips_n64 bcopy(MipsTLBMiss, (void *)MIPS_XTLB_MISS_EXC_VEC, MipsTLBMissEnd - MipsTLBMiss); #endif bcopy(MipsException, (void *)MIPS_GEN_EXC_VEC, MipsExceptionEnd - MipsException); bcopy(MipsCache, (void *)MIPS_CACHE_ERR_EXC_VEC, MipsCacheEnd - MipsCache); /* * Clear out the I and D caches. */ mips_icache_sync_all(); mips_dcache_wbinv_all(); /* * Mask all interrupts. Each interrupt will be enabled * when handler is installed for it */ set_intr_mask(0); /* Clear BEV in SR so we start handling our own exceptions */ mips_wr_status(mips_rd_status() & ~MIPS_SR_BEV); }
/* * Initialize the hardware exception vectors, and the jump table used to * call locore cache and TLB management functions, based on the kind * of CPU the kernel is running on. */ void mips_vector_init(void) { /* * Copy down exception vector code. */ if (MipsTLBMissEnd - MipsTLBMiss > 0x80) panic("startup: UTLB code too large"); if (MipsCacheEnd - MipsCache > 0x80) panic("startup: Cache error code too large"); bcopy(MipsTLBMiss, (void *)MIPS_UTLB_MISS_EXC_VEC, MipsTLBMissEnd - MipsTLBMiss); #if defined(CPU_CNMIPS) || defined(CPU_RMI) || defined(CPU_NLM) /* Fake, but sufficient, for the 32-bit with 64-bit hardware addresses */ bcopy(MipsTLBMiss, (void *)MIPS3_XTLB_MISS_EXC_VEC, MipsTLBMissEnd - MipsTLBMiss); #endif bcopy(MipsException, (void *)MIPS3_GEN_EXC_VEC, MipsExceptionEnd - MipsException); bcopy(MipsCache, (void *)MIPS3_CACHE_ERR_EXC_VEC, MipsCacheEnd - MipsCache); /* * Clear out the I and D caches. */ mips_icache_sync_all(); mips_dcache_wbinv_all(); /* * Mask all interrupts. Each interrupt will be enabled * when handler is installed for it */ set_intr_mask(0); /* Clear BEV in SR so we start handling our own exceptions */ mips_wr_status(mips_rd_status() & ~MIPS_SR_BEV); }
void platform_init_ap(int cpuid) { int ipi_int_mask, clock_int_mask; KASSERT(cpuid == 1, ("AP has an invalid cpu id %d", cpuid)); /* * Make sure that kseg0 is mapped cacheable-coherent */ kseg0_map_coherent(); sb_intr_init(cpuid); /* * Unmask the clock and ipi interrupts. */ clock_int_mask = hard_int_mask(5); ipi_int_mask = hard_int_mask(platform_ipi_intrnum()); set_intr_mask(ipi_int_mask | clock_int_mask); }
void platform_init_ap(int cpuid) { unsigned reg; /* * Clear any pending IPIs. */ mips_wr_xburst_core_sts(~(JZ_CORESTS_MIRQ0P << cpuid)); /* Allow IPI mbox for this core */ reg = mips_rd_xburst_reim(); reg |= (JZ_REIM_MIRQ0M << cpuid); mips_wr_xburst_reim(reg); /* * Unmask the ipi interrupts. */ reg = hard_int_mask(platform_ipi_hardintr_num()); set_intr_mask(reg); }
void disable_irq(unsigned int irq) { irq_mask |= (1 << irq); if ((irq_mask & 0xFF00) == 0xFF00) irq_mask |= (1 << 2); set_intr_mask(irq_mask); }