static void octeon_irq_ciu1_disable(unsigned int irq) { int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ unsigned long flags; uint64_t en1; #ifdef CONFIG_SMP int cpu; write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); for_each_online_cpu(cpu) { int coreid = cpu_logical_map(cpu); en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); en1 &= ~(1ull << bit); cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); } /* * We need to do a read after the last update to make sure all * of them are done. */ cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); #else int coreid = cvmx_get_core_num(); local_irq_save(flags); en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); en1 &= ~(1ull << bit); cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); local_irq_restore(flags); #endif }
/* * octeon_ciu_reset * * Shutdown all CIU to IP2, IP3 mappings */ void octeon_ciu_reset(void) { uint64_t cvmctl; /* Disable all CIU interrupts by default */ cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num()*2), 0); cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num()*2+1), 0); cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num()*2), 0); cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num()*2+1), 0); #ifdef SMP /* Enable the MBOX interrupts. */ cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num()*2+1), (1ull << (OCTEON_IRQ_MBOX0 - 8)) | (1ull << (OCTEON_IRQ_MBOX1 - 8))); #endif /* * Move the Performance Counter interrupt to OCTEON_PMC_IRQ */ cvmctl = mips_rd_cvmctl(); cvmctl &= ~(7 << 7); cvmctl |= (OCTEON_PMC_IRQ + 2) << 7; mips_wr_cvmctl(cvmctl); }
static void ciu_en1_intr_unmask(void *arg) { uint64_t mask; int irq; irq = (uintptr_t)arg; mask = cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num()*2)); mask |= 1ull << (irq - CIU_IRQ_EN1_BEGIN); cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num()*2), mask); }
static int ciu_intr(void *arg) { struct ciu_softc *sc; uint64_t en0_sum, en1_sum; uint64_t en0_mask, en1_mask; int irq_index; int error; sc = arg; (void)sc; en0_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(cvmx_get_core_num()*2)); en1_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); en0_mask = cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num()*2)); en1_mask = cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num()*2)); en0_sum &= en0_mask; en1_sum &= en1_mask; if (en0_sum == 0 && en1_sum == 0) return (FILTER_STRAY); irq_index = 0; for (irq_index = 0; en0_sum != 0; irq_index++, en0_sum >>= 1) { if ((en0_sum & 1) == 0) continue; mips_intrcnt_inc(ciu_en0_intrcnt[irq_index]); error = intr_event_handle(ciu_en0_intr_events[irq_index], NULL); if (error != 0) printf("%s: stray en0 irq%d\n", __func__, irq_index); } irq_index = 0; for (irq_index = 0; en1_sum != 0; irq_index++, en1_sum >>= 1) { if ((en1_sum & 1) == 0) continue; mips_intrcnt_inc(ciu_en1_intrcnt[irq_index]); error = intr_event_handle(ciu_en1_intr_events[irq_index], NULL); if (error != 0) printf("%s: stray en1 irq%d\n", __func__, irq_index); } return (FILTER_HANDLED); }
int inic_data_local_init(void) { core_id = 0; CVM_COMMON_DBG_MSG(CVM_COMMON_DBG_LVL_INFO, "inic_data_local_init\n"); if ( (cvmx_helper_initialize_packet_io_local()) == -1) { printf("inic_data_local_init : Failed to initialize/setup input ports\n"); return (-1); } core_id = cvmx_get_core_num(); cvmx_wait(core_id * 500); /* this is only for pass 1 */ /*cvm_common_rand8_init();*/ cvm_ip_local_init(); #ifdef INET6 cvm_ip6_local_init(); #endif cvm_tcp_local_init(); cvm_so_stack_socket_local_init(); cvm_udp_local_init(); cvm_raw_local_init(); return 0; }
static void octeon_irq_ciu1_ack_v2(unsigned int irq) { int index = cvmx_get_core_num() * 2 + 1; u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); }
static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) { int cpu; unsigned long flags; int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); uint64_t en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); if (cpumask_test_cpu(cpu, dest)) en0 |= 1ull << bit; else en0 &= ~(1ull << bit); cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); } /* * We need to do a read after the last update to make sure all * of them are done. */ cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); return 0; }
static void octeon_irq_ciu0_enable_v2(unsigned int irq) { int index = cvmx_get_core_num() * 2; u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); }
/** * Returns 1 if the running core is being unplugged, else it returns 0. */ int is_core_being_unplugged(void) { if (cvmx_app_hotplug_info_ptr->unplug_cores & (1ull << cvmx_get_core_num())) return 1; return 0; }
static int octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest) { int cpu; unsigned long flags; int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); for_each_online_cpu(cpu) { int coreid = cpu_logical_map(cpu); uint64_t en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1 (coreid * 2 + 1)); if (cpumask_test_cpu(cpu, dest)) en1 |= 1ull << bit; else en1 &= ~(1ull << bit); cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); } /* * We need to do a read after the last update to make sure all * of them are done. */ cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); return 0; }
int inic_app_local_init(void) { int core_id = -1; CVM_COMMON_DBG_MSG(CVM_COMMON_DBG_LVL_INFO, "inic_app_local_init\n"); core_id = cvmx_get_core_num(); /* * for each core running the application, seed * the random number generator */ srand((core_id+1) * RAND_VAL); if ( (cvmx_helper_initialize_packet_io_local()) == -1) { printf("inic_app_local_init : Failed to initialize/setup input ports\n"); return (-1); } cvm_so_app_socket_local_init(); return (0); }
static int octeon_coreid_for_cpu(int cpu) { #ifdef CONFIG_SMP return cpu_logical_map(cpu); #else return cvmx_get_core_num(); #endif }
/* * ISR for the incoming shutdown request interrupt. */ static void __cvmx_app_hotplug_shutdown(int irq_number, uint64_t registers[32], void *user_arg) { cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get(); uint64_t mbox; cvmx_app_hotplug_info_t *ai = cvmx_app_hotplug_info_ptr; int dbg = 0; #ifdef DEBUG dbg = 1; #endif cvmx_interrupt_mask_irq(CVMX_IRQ_MBOX0); mbox = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num())); /* Clear the interrupt */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), mbox); /* Make sure the write above completes */ cvmx_read_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num())); if (!cvmx_app_hotplug_info_ptr) { printf("ERROR: Application is not registered for hotplug!\n"); return; } if (ai->hotplug_activated_coremask != sys_info_ptr->core_mask) { printf("ERROR: Shutdown requested when not all app cores have " "activated hotplug\n" "Application coremask: 0x%x Hotplug " "coremask: 0x%x\n", (unsigned int)sys_info_ptr->core_mask, (unsigned int)ai->hotplug_activated_coremask); return; } if (mbox & 1ull) { int core = cvmx_get_core_num(); if (dbg) printf("Shutting down application .\n"); /* Call the application's own callback function */ if (ai->shutdown_callback) { ((void(*)(void*))(long)ai->shutdown_callback)(CASTPTR(void *, ai->data)); }
/** * Activate the current application core for receiving hotplug shutdown requests. * * This routine makes sure that each core belonging to the application is enabled * to receive the shutdown notification and also provides a barrier sync to make * sure that all cores are ready. */ int cvmx_app_hotplug_activate(void) { uint64_t cnt = 0; uint64_t cnt_interval = 10000000; while (!cvmx_app_hotplug_info_ptr) { cnt++; if ((cnt % cnt_interval) == 0) printf("waiting for cnt=%lld\n", (unsigned long long)cnt); } if (cvmx_app_hotplug_info_ptr->hplugged_cores & (1ull << cvmx_get_core_num())) { #ifdef DEBUG printf("core=%d : is being hotplugged \n", cvmx_get_core_num()); #endif cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get(); sys_info_ptr->core_mask |= 1ull << cvmx_get_core_num(); } else { __cvmx_app_hotplug_sync(); } cvmx_spinlock_lock(&cvmx_app_hotplug_lock); if (!cvmx_app_hotplug_info_ptr) { cvmx_spinlock_unlock(&cvmx_app_hotplug_lock); printf("ERROR: This application is not registered for hotplug\n"); return -1; } /* Enable the interrupt before we mark the core as activated */ cvmx_interrupt_unmask_irq(CVMX_IRQ_MBOX0); cvmx_app_hotplug_info_ptr->hotplug_activated_coremask |= (1ull<<cvmx_get_core_num()); #ifdef DEBUG printf("cvmx_app_hotplug_activate(): coremask 0x%x valid %d sizeof %d\n", cvmx_app_hotplug_info_ptr->coremask, cvmx_app_hotplug_info_ptr->valid, sizeof(*cvmx_app_hotplug_info_ptr)); #endif cvmx_spinlock_unlock(&cvmx_app_hotplug_lock); return 0; }
static void octeon_irq_ciu1_eoi_v2(unsigned int irq) { struct irq_desc *desc = irq_desc + irq; int index = cvmx_get_core_num() * 2 + 1; u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); if ((desc->status & IRQ_DISABLED) == 0) cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); }
void cvmx_sysinfo_remove_self_from_core_mask(void) { int core = cvmx_get_core_num(); uint32_t core_mask = 1 << core; cvmx_spinlock_lock(&state.lock); state.sysinfo.core_mask = state.sysinfo.core_mask & ~core_mask; cvmx_spinlock_unlock(&state.lock); }
void cvmx_sysinfo_add_self_to_core_mask(void) { int core = cvmx_get_core_num(); uint32_t core_mask = 1 << core; cvmx_spinlock_lock(&state.lock); state.sysinfo.core_mask = state.sysinfo.core_mask | core_mask; cvmx_spinlock_unlock(&state.lock); }
int cvmx_power_throttle_self(uint8_t percentage) { if (!CVMX_PTH_AVAILABLE) return -1; if (cvmx_power_throttle_set_powlim(cvmx_get_core_num(), percentage) == 0) return -1; return 0; }
void foo (void) { int core_num; core_num = cvmx_get_core_num (); if (core_num) g1 += 3; else g0 += 9; }
void cvmx_app_hotplug_remove_self_from_core_mask(void) { int core = cvmx_get_core_num(); uint32_t core_mask = 1ull << core; cvmx_spinlock_lock(&cvmx_app_hotplug_lock); cvmx_app_hotplug_info_ptr->coremask = cvmx_app_hotplug_info_ptr->coremask & ~core_mask ; cvmx_app_hotplug_info_ptr->hotplug_activated_coremask = cvmx_app_hotplug_info_ptr->hotplug_activated_coremask & ~core_mask ; cvmx_spinlock_unlock(&cvmx_app_hotplug_lock); }
/** * Simulator magic is not supported in user mode under Linux. * This version of simprintf simply calls the underlying C * library printf for output. It also makes sure that two * calls to simprintf provide atomic output. * * @param fmt Format string in the same format as printf. */ void simprintf(const char *fmt, ...) { CVMX_SHARED static cvmx_spinlock_t simprintf_lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER; va_list ap; cvmx_spinlock_lock(&simprintf_lock); printf("SIMPRINTF(%d): ", (int)cvmx_get_core_num()); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); cvmx_spinlock_unlock(&simprintf_lock); }
static void __init octeon_smp_setup(void) { const int coreid = cvmx_get_core_num(); int cpus; int id; struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get(); #ifdef CONFIG_HOTPLUG_CPU int core_mask = octeon_get_boot_coremask(); unsigned int num_cores = cvmx_octeon_num_cores(); #endif /* The present CPUs are initially just the boot cpu (CPU 0). */ for (id = 0; id < NR_CPUS; id++) { set_cpu_possible(id, id == 0); set_cpu_present(id, id == 0); } __cpu_number_map[coreid] = 0; __cpu_logical_map[0] = coreid; /* The present CPUs get the lowest CPU numbers. */ cpus = 1; for (id = 0; id < NR_CPUS; id++) { if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) { set_cpu_possible(cpus, true); set_cpu_present(cpus, true); __cpu_number_map[id] = cpus; __cpu_logical_map[cpus] = id; cpus++; } } #ifdef CONFIG_HOTPLUG_CPU /* * The possible CPUs are all those present on the chip. We * will assign CPU numbers for possible cores as well. Cores * are always consecutively numberd from 0. */ for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr && id < num_cores && id < NR_CPUS; id++) { if (!(core_mask & (1 << id))) { set_cpu_possible(cpus, true); __cpu_number_map[id] = cpus; __cpu_logical_map[cpus] = id; cpus++; } } #endif octeon_smp_hotplug_setup(); }
void play_dead(void) { int cpu = cpu_number_map(cvmx_get_core_num()); idle_task_exit(); octeon_processor_boot = 0xff; per_cpu(cpu_state, cpu) = CPU_DEAD; mb(); while (1) /* core will be reset here */ ; }
/** * Callout to firmware before smp_init * */ static void __init octeon_prepare_cpus(unsigned int max_cpus) { /* * Only the low order mailbox bits are used for IPIs, leave * the other bits alone. */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff); if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI", mailbox_interrupt)) { panic("Cannot request_irq(OCTEON_IRQ_MBOX0)"); } }
static void octeon_irq_ciu_disable_local(struct irq_data *data) { unsigned long *pen; unsigned long flags; union octeon_ciu_chip_data cd; cd.p = irq_data_get_irq_chip_data(data); if (cd.s.line == 0) { raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); clear_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } else { raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); clear_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } }
static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu) { unsigned int isset; #ifdef CONFIG_SMP int coreid = cpu_logical_map(cpu); #else int coreid = cvmx_get_core_num(); #endif int bit = (irq < OCTEON_IRQ_WDOG0) ? irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0; if (irq < 64) { isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) & (1ull << bit)) >> bit; } else {
static void octeon_irq_ciu0_enable(unsigned int irq) { int coreid = cvmx_get_core_num(); unsigned long flags; uint64_t en0; int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); en0 |= 1ull << bit; cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); }
static void octeon_irq_ciu1_enable(unsigned int irq) { int coreid = cvmx_get_core_num(); unsigned long flags; uint64_t en1; int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); en1 |= 1ull << bit; cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); }
static void octeon_irq_ciu1_disable(unsigned int irq) { int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ unsigned long flags; uint64_t en1; int cpu; raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); en1 &= ~(1ull << bit); cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); } /* * We need to do a read after the last update to make sure all * of them are done. */ cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); }
void octeon_wdog_nmi(void) { int core; core = cvmx_get_core_num(); printf("cpu%u: NMI detected\n", core); printf("cpu%u: Exception PC: %p\n", core, (void *)mips_rd_excpc()); printf("cpu%u: status %#x cause %#x\n", core, mips_rd_status(), mips_rd_cause()); /* * This is the end * Beautiful friend * * Just wait for Soft Reset to come and take us */ for (;;) continue; }