Esempio n. 1
0
int maping_allocate(int commsize, char *g, int *ranks, char *mpipgo_algo)
{
    int i = 0;
    csrgraph_t *graph;

    graph = csrgraph_load(g);
    if (graph == NULL) {
        fprintf(stderr, "gpart load error\n");
        return -1;
    }
    /*    for (i = 0; i < commsize; i++) {
            printf("subset_nodes[%d] = %d\n", old_mapp[i],
                   subset_nodes[old_mapp[i]]);
        }
        for (i = 0; i < npart; i++) {
            printf("pweights[%d] = %d\n", i, pweights[i]);
        }
    */
    if (strcmp(mpipgo_algo, "gpart") == 0) {
        if (gpart_partition_recursive(graph, pweights, npart, new_mapp) > 0) {
            fprintf(stderr, "gpart partition error\n");
            return -1;
        }
    } else if (strcmp(mpipgo_algo, "linear") == 0) {
        linear(npart, pweights, new_mapp, commsize);
    } else if (strcmp(mpipgo_algo, "rr") == 0) {
        rr(npart, pweights, new_mapp, commsize);
    }
    /*
        for (i = 0; i < commsize; i++) {
            printf("old[%d] = %d || new[%d] = %d \n", i, old_mapp[i], i,
                   new_mapp[i]);
        }
    */
    subsystem = subsystem_init(old_mapp, commsize, npart, pweights,
                               subset_nodes);
    if (subsystem == NULL) {
        return -1;
    }
//    printf("stage 1\n");
    for (i = 0; i < commsize; i++) {
        ranks[subsystem_getproc(subsystem, new_mapp[i])] = i;
    }
    /*    for (i = 0; i < commsize; i++) {
            printf("ranks[%d] = %d\n", i, ranks[i]);
        }
    */
    subsystem_free(subsystem, npart);

    return 0;
}
Esempio n. 2
0
static int subsystem_setup(void)
{
	splat_cfg_t *cfg;
	int i, rc, size, cfg_size;
	subsystem_t *sub;
	splat_user_t *desc;

	/* Aquire the number of registered subsystems */
	cfg_size = sizeof(*cfg);
	cfg = (splat_cfg_t *)malloc(cfg_size);
	if (cfg == NULL)
		return -ENOMEM;

	memset(cfg, 0, cfg_size);
	cfg->cfg_magic = SPLAT_CFG_MAGIC;
        cfg->cfg_cmd   = SPLAT_CFG_SUBSYSTEM_COUNT;

	rc = ioctl(splatctl_fd, SPLAT_CFG, cfg);
	if (rc) {
		fprintf(stderr, "Ioctl() error 0x%lx / %d: %d\n",
		        (unsigned long)SPLAT_CFG, cfg->cfg_cmd, errno);
		free(cfg);
		return rc;
	}

	size = cfg->cfg_rc1;
	free(cfg);

	/* Based on the newly acquired number of subsystems allocate
	 * memory to get the descriptive information for them all. */
	cfg_size = sizeof(*cfg) + size * sizeof(splat_user_t);
	cfg = (splat_cfg_t *)malloc(cfg_size);
	if (cfg == NULL)
		return -ENOMEM;

	memset(cfg, 0, cfg_size);
	cfg->cfg_magic = SPLAT_CFG_MAGIC;
	cfg->cfg_cmd   = SPLAT_CFG_SUBSYSTEM_LIST;
	cfg->cfg_data.splat_subsystems.size = size;

	rc = ioctl(splatctl_fd, SPLAT_CFG, cfg);
	if (rc) {
		fprintf(stderr, "Ioctl() error %lu / %d: %d\n",
		        (unsigned long) SPLAT_CFG, cfg->cfg_cmd, errno);
		free(cfg);
		return rc;
	}

	/* Add the new subsystems in to the global list */
	size = cfg->cfg_rc1;
	for (i = 0; i < size; i++) {
		desc = &(cfg->cfg_data.splat_subsystems.descs[i]);

		sub = subsystem_init(desc);
		if (sub == NULL) {
			fprintf(stderr, "Error initializing subsystem: %s\n",
			        desc->name);
			free(cfg);
			return -ENOMEM;
		}

		list_append(subsystems, sub);
	}

	free(cfg);
	return 0;
}
Esempio n. 3
0
/*
 *	Running in virtual memory, on the interrupt stack.
 *	Does not return.  Dispatches initial thread.
 *
 *	Assumes that master_cpu is set.
 */
void
setup_main(void)
{
	thread_t		startup_thread;

	printf_init();
	panic_init();

	sched_init();
	vm_mem_bootstrap();
	ipc_bootstrap();
	vm_mem_init();
	ipc_init();

	/*
	 * As soon as the virtual memory system is up, we record
	 * that this CPU is using the kernel pmap.
	 */
	PMAP_ACTIVATE_KERNEL(master_cpu);

	init_timers();
	timeout_init();

#if	CDLI > 0
	ns_init();	/* Initialize CDLI */
#endif	/* CDLI > 0 */

	dev_lookup_init();
	timeout_init();
	machine_init();

	machine_info.max_cpus = NCPUS;
	machine_info.memory_size = mem_size;
	machine_info.avail_cpus = 0;
	machine_info.major_version = KERNEL_MAJOR_VERSION;
	machine_info.minor_version = KERNEL_MINOR_VERSION;

#if	XPR_DEBUG
	xprbootstrap();
#endif	/* XPR_DEBUG */

	/*
	 *	Initialize the IPC, task, and thread subsystems.
	 */
	clock_init();
	utime_init();
        ledger_init();
#if	THREAD_SWAPPER
	thread_swapper_init();
#endif	/* THREAD_SWAPPER */
#if	TASK_SWAPPER
	task_swapper_init();
#endif	/* TASK_SWAPPER */
	task_init();
	act_init();
	thread_init();
	subsystem_init();
#if	TASK_SWAPPER
	task_swappable(&realhost, kernel_task, FALSE);
#endif	/* TASK_SWAPPER */
#if	MACH_HOST
	pset_sys_init();
#endif	/* MACH_HOST */

	/*
	 *	Kick off the time-out driven routines by calling
	 *	them the first time.
	 */
	recompute_priorities();
	compute_mach_factor();

	/*
	 *	Initialize the Event Trace Analysis Package.
	 * 	Dynamic Phase: 2 of 2
	 */
	etap_init_phase2();
	
	/*
	 *	Create a kernel thread to start the other kernel
	 *	threads.  Thread_resume (from kernel_thread) calls
	 *	thread_setrun, which may look at current thread;
	 *	we must avoid this, since there is no current thread.
	 */

	/*
	 * Create the thread, and point it at the routine.
	 */
	(void) thread_create_at(kernel_task, &startup_thread,
							start_kernel_threads);
#if	NCPUS > 1 && PARAGON860
	thread_bind(startup_thread, cpu_to_processor(master_cpu));
#endif
	/*
	 * Pretend it is already running, and resume it.
	 * Since it looks as if it is running, thread_resume
	 * will not try to put it on the run queues.
	 *
	 * We can do all of this without locking, because nothing
	 * else is running yet.
	 */
	startup_thread->state |= TH_RUN;
	(void) thread_resume(startup_thread->top_act);

	/*
	 * Start the thread.
	 */
	cpu_launch_first_thread(startup_thread);
	/*NOTREACHED*/
	panic("cpu_launch_first_thread returns!");
}