int main (void) { CVMX_SHARED static cvmx_spinlock_t core_lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER; int j; cvmx_sysinfo_t *sysinfo; sysinfo = cvmx_sysinfo_get(); for (j = 0; j < 4; j++) { /* Used to sync up the cores, otherwise the same core hits the hardware watchpoint again and again on continue commands. */ cvmx_coremask_barrier_sync (&sysinfo->core_mask); /* Used to control the sequence of the program. There are chances of hitting the hardware breakpoint by both the cores at the same time. */ cvmx_spinlock_lock (&core_lock); foo (); cvmx_spinlock_unlock (&core_lock); } while (1); /* set common breakpoint here */ }
/** * Main entrypoint of the application. Here we setup shared * memory and fork processes for each cpu. This simulates the * normal simple executive environment of one process per * cpu core. * * @param argc Number of command line arguments * @param argv The command line arguments * @return Return value for the process */ int main(int argc, const char *argv[]) { CVMX_SHARED static cvmx_spinlock_t mask_lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER; CVMX_SHARED static int32_t pending_fork; unsigned long cpumask; unsigned long cpu; setup_system_info(); if (sizeof(void*) == 4) { if (linux_mem32_min) setup_reserve32(); else { printf("\nFailed to access 32bit shared memory region. Most likely the Kernel\n" "has not been configured for 32bit shared memory access. Check the\n" "kernel configuration.\n" "Aborting...\n\n"); exit(-1); } } setup_cvmx_shared(); /* Check to make sure the Chip version matches the configured version */ octeon_model_version_check(cvmx_app_init_processor_id); /* Get the list of logical cpus we should run on */ if (sched_getaffinity(0, sizeof(cpumask), (cpu_set_t*)&cpumask)) { perror("sched_getaffinity failed"); exit(errno); } cvmx_sysinfo_t *system_info = cvmx_sysinfo_get(); cvmx_atomic_set32(&pending_fork, 1); for (cpu=0; cpu<16; cpu++) { if (cpumask & (1<<cpu)) { /* Turn off the bit for this CPU number. We've counted him */ cpumask ^= (1<<cpu); /* If this is the last CPU to run on, use this process instead of forking another one */ if (cpumask == 0) break; /* Increment the number of CPUs running this app */ cvmx_atomic_add32(&pending_fork, 1); /* Fork a process for the new CPU */ int pid = fork(); if (pid == 0) { break; } else if (pid == -1) { perror("Fork failed"); exit(errno); } } } /* Set affinity to lock me to the correct CPU */ cpumask = (1<<cpu); if (sched_setaffinity(0, sizeof(cpumask), (cpu_set_t*)&cpumask)) { perror("sched_setaffinity failed"); exit(errno); } cvmx_spinlock_lock(&mask_lock); system_info->core_mask |= 1<<cvmx_get_core_num(); cvmx_atomic_add32(&pending_fork, -1); if (cvmx_atomic_get32(&pending_fork) == 0) cvmx_dprintf("Active coremask = 0x%x\n", system_info->core_mask); cvmx_spinlock_unlock(&mask_lock); /* Spinning waiting for forks to complete */ while (cvmx_atomic_get32(&pending_fork)) {} cvmx_coremask_barrier_sync(system_info->core_mask); int result = appmain(argc, argv); shutdown_cvmx_shared(); return result; }