コード例 #1
0
ファイル: agent_helper.c プロジェクト: carriercomm/zetascale
/*
 * @brief Pthread-safe pre-initialization of SDF agent engine.
 *
 * Order of initialization (change accordingly): <br>
 * 1. sdf_msg_init_mpi (find out who we are in the cluster?) <br>
 * 2. shmem initialization <br>
 * 3. fthInit <br>
 * 4. sdf_msg_init <br>
 * 5. sdf_msg_startmsg <br>   // FIXME: merge sdf_msg_xxx calls?
 *
 * @return status, SDF_TRUE on success
 */
static SDF_boolean_t
agent_engine_pre_init_internal(struct sdf_agent_state *state,
                               int argc, char *argv[]) {
    int numScheds;
    uint32_t sdf_msg_numprocs;

    sdf_msg_init(argc, argv);
    state->rank = sdf_msg_myrank();
    sdf_msg_numprocs = sdf_msg_numranks();

    /*
     * Allow number of nodes to not match MPI so that node failures
     * can be simulated for multi-node operation as with replication
     * prior to the adoption of non-MPI based messaging.
     */
    if (!state->config.nnodes)
        state->config.nnodes = sdf_msg_numprocs;

    if (!init_agent_sm_config(&state->config.shmem, state->rank))
        return SDF_FALSE;

    if (state->config.ffdc_disable == 0) {
        if (ffdc_initialize(0, BLD_VERSION,
                            state->config.ffdc_buffer_len) == 0) {
            plat_log_msg(21788, LOG_CAT, PLAT_LOG_LEVEL_DEBUG,
                         "Initialized FFDC logging "
                         "(max buffers=%d, thread bufsize=0x%lx)",
                         FFDC_MAX_BUFFERS,
                         (long)state->config.ffdc_buffer_len);
        } else {
            plat_log_msg(21789, LOG_CAT, PLAT_LOG_LEVEL_WARN,
                         "Unable to initialize FFDC logging "
                         "(max buffers=%d, thread bufsize=0x%lx)",
                         FFDC_MAX_BUFFERS,
                         (long)state->config.ffdc_buffer_len);
        }
    } else {
        plat_log_msg(10005, LOG_CAT, PLAT_LOG_LEVEL_DEBUG,
                     "FFDC logging disabled");
    }

    numScheds = getProperty_Int("SDF_FTHREAD_SCHEDULERS", 1);
    fthInitMultiQ(1, numScheds);
    return SDF_TRUE;
}
コード例 #2
0
int
main(int argc, char *argv[]) {
	
	info = (struct test_info *)malloc(sizeof(struct test_info));
	test_info_init(info);
	info->test_type = 0;
    info->msg_count=50;
    
    struct plat_opts_config_mpilogme config;
    SDF_boolean_t success = SDF_TRUE;
    uint32_t numprocs;
    int tmp, namelen, mpiv = 0, mpisubv = 0, i;
    char processor_name[MPI_MAX_PROCESSOR_NAME];
    int msg_init_flags = SDF_MSG_MPI_INIT;
    config.inputarg = 0;
    config.msgtstnum = 500;

    /* We may not need to gather anything from here but what the heck */
    loadProperties("/opt/schooner/config/schooner-med.properties"); // TODO get filename from command line

    /* make sure this is first in order to get the the mpi init args */
    success = plat_opts_parse_mpilogme(&config, argc, argv) ? SDF_FALSE : SDF_TRUE;

    printf("input arg %d msgnum %d success %d\n", config.inputarg, config.msgtstnum, success);
    fflush(stdout);
    myid = sdf_msg_init_mpi(argc, argv, &numprocs, &success, msg_init_flags);
    info->myid = myid;
    if ((!success) || (myid < 0)) {
        printf("Node %d: MPI Init failure... exiting - errornum %d\n", myid, success);
        fflush(stdout);
        MPI_Finalize();
        return (EXIT_FAILURE);
    }

    int debug = 0;
    while(debug);

    tmp = init_msgtest_sm((uint32_t)myid);

    /* Enable this process to run threads across 2 cpus, MPI will default to running all threads
     * on only one core which is not what we really want as it forces the msg thread to time slice
     * with the fth threads that send and receive messsages
     * first arg is the number of the processor you want to start off on and arg #2 is the sequential
     * number of processors from there
     */
    //lock_processor(0, 7);
    lock_processor(myid * 4, 4);
    info->lock_cpu = 4;
    sleep(1);
    msg_init_flags =  msg_init_flags | SDF_MSG_RTF_DISABLE_MNGMT;

    /* Startup SDF Messaging Engine FIXME - dual node mode still - pnodeid is passed and determined
     * from the number of processes mpirun sees.
     */
    sdf_msg_init(myid, &pnodeid, msg_init_flags);

    MPI_Get_version(&mpiv, &mpisubv);
    MPI_Get_processor_name(processor_name, &namelen);

    printf("Node %d: MPI Version: %d.%d Name %s \n", myid, mpiv, mpisubv, processor_name);
    fflush(stdout);

    plat_log_msg(
            PLAT_LOG_ID_INITIAL,
            LOG_CAT,
            PLAT_LOG_LEVEL_TRACE,
            "\nNode %d: Completed Msg Init.. numprocs %d pnodeid %d Starting Test\n",
            myid, numprocs, pnodeid);
    info->pnodeid = pnodeid;
    for (i = 0; i < 2; i++) {
        sleep(2);
        plat_log_msg(PLAT_LOG_ID_INITIAL, LOG_CAT, PLAT_LOG_LEVEL_TRACE,
                "\nNode %d: Number of sleeps %d\n", myid, i);
    }

    fthInit();

    /* SAVE THIS may need to play with the priority later */
#if 0
    struct sched_param param;
    int newprio = 60;
    pthread_attr_t hi_prior_attr;

    pthread_attr_init(&hi_prior_attr);
    pthread_attr_setschedpolicy(&hi_prior_attr, SCHED_FIFO);
    pthread_attr_getschedparam(&hi_prior_attr, &param);
    param.sched_priority = newprio;
    pthread_attr_setschedparam(&hi_prior_attr, &param);
    pthread_create(&fthPthread, &hi_prior_attr, &fthPthreadRoutine, NULL);
#endif

    pthread_attr_t attr;
    pthread_attr_init(&attr);
    pthread_create(&fthPthread, &attr, &SinglePtlSequentialPressPthreadRoutine, NULL);

    plat_log_msg(PLAT_LOG_ID_INITIAL, LOG_CAT, PLAT_LOG_LEVEL_TRACE,
                 "\nNode %d: Created pthread for FTH %d\n", myid, i);
    info->pthread_info = 1;
    info->fth_info = 2;
    pthread_join(fthPthread, NULL);

    plat_log_msg(PLAT_LOG_ID_INITIAL, LOG_CAT, PLAT_LOG_LEVEL_TRACE,
                 "\nNode %d: SDF Messaging Test Complete - i %d\n", myid, i);

    /* Lets stop the messaging engine this will block until they complete */
    /* FIXME arg is the threadlvl */
    sdf_msg_stopmsg(myid, SYS_SHUTDOWN_SELF);
    
    if (numprocs > 1) {
        sdf_msg_nsync(myid, (myid == 0 ? 1 : 0));
    }

    plat_shmem_detach();
    info->success++;
    if (myid == 0) {
        sched_yield();
        printf("Node %d: Exiting message test after yielding... Calling MPI_Finalize\n", myid);
        fflush(stdout);
        sched_yield();
        MPI_Finalize();
        print_test_info(info);
        test_info_final(info);
    }
    else {
        printf("Node %d: Exiting message test... Calling MPI_Finalize\n", myid);
        fflush(stdout);
        sched_yield();
        MPI_Finalize();
    }
    printf("Successfully ends\n");
    return (EXIT_SUCCESS);

}
コード例 #3
0
int main(int argc, char *argv[]) {
	info = (struct test_info *)malloc(sizeof(struct test_info));
	test_info_init(info);
	info->test_type = 0;
    msgCount = 50;
    info->msg_count=msgCount;
    struct plat_opts_config_mpilogme config;
    SDF_boolean_t success = SDF_TRUE;
    uint32_t numprocs;
    int tmp, namelen, mpiv = 0, mpisubv = 0;
    char processor_name[MPI_MAX_PROCESSOR_NAME];
    int msg_init_flags = SDF_MSG_MPI_INIT;

    config.inputarg = 0;
    config.msgtstnum = 50;

    /* We may not need to gather anything from here but what the heck */
    loadProperties("/opt/schooner/config/schooner-med.properties"); // TODO get filename from command line

    /* make sure this is first in order to get the the mpi init args */
    success = plat_opts_parse_mpilogme(&config, argc, argv) ? SDF_FALSE : SDF_TRUE;

    printf("input arg %d msgnum %d success %d\n", config.inputarg, config.msgtstnum, success);
    fflush(stdout);
    myid = sdf_msg_init_mpi(argc, argv, &numprocs, &success, msg_init_flags);
    info->myid = myid;
    if ((!success) || (myid < 0)) {
        printf("Node %d: MPI Init failure... exiting - errornum %d\n", myid,
                success);
        fflush(stdout);
        MPI_Finalize();
        return (EXIT_FAILURE);
    }
    tmp = init_msgtest_sm((uint32_t)myid);

    /* Enable this process to run threads across 2 cpus, MPI will default to running all threads
     * on only one core which is not what we really want as it forces the msg thread to time slice
     * with the fth threads that send and receive messsages
     * first arg is the number of the processor you want to start off on and arg #2 is the sequential
     * number of processors from there
     */
    lock_processor(0, 2);
    info->lock_cpu = 2;
    /* Startup SDF Messaging Engine FIXME - dual node mode still - pnodeid is passed and determined
     * from the number of processes mpirun sees.
     */
    sleep(1);

    msg_init_flags =  msg_init_flags | SDF_MSG_RTF_DISABLE_MNGMT;

    sdf_msg_init(myid, &pnodeid, msg_init_flags);
    MPI_Get_version(&mpiv, &mpisubv);
    MPI_Get_processor_name(processor_name, &namelen);

    printf("Node %d: MPI Version: %d.%d Name %s \n", myid, mpiv, mpisubv,
            processor_name);
    fflush(stdout);

    plat_log_msg(
            PLAT_LOG_ID_INITIAL,
            LOG_CAT,
            PLAT_LOG_LEVEL_TRACE,
            "\nNode %d: Completed Msg Init.. numprocs %d pnodeid %d Starting Test\n",
            myid, numprocs, pnodeid);
    info->pnodeid = pnodeid;
    for (msgCount = 0; msgCount < 2; msgCount++) {
        sleep(2);
        plat_log_msg(PLAT_LOG_ID_INITIAL, LOG_CAT, PLAT_LOG_LEVEL_TRACE,
                "\nNode %d: Number of sleeps %d\n", myid, msgCount);
    }

    /* create the fth test threads */

    fthInit(); // Init
    // start the message engine
    sdf_msg_startmsg(myid, 0, NULL);

    pthread_attr_t attr;
    pthread_attr_init(&attr);
    pthread_create(&fthPthread[0], &attr, &MixedthreadTestfthUniptlRoutine, &myid);
    pthread_create(&fthPthread[1], &attr, &MixedthreadTestpthreadUniptlRoutine, &myid);
    plat_log_msg(PLAT_LOG_ID_INITIAL, LOG_CAT, PLAT_LOG_LEVEL_TRACE,
            "\nNode %d: Created pthread for mixed thread test2\n", myid);
    info->pthread_info = 2;
    info->fth_info = 6;
    pthread_join(fthPthread[0], NULL);
    pthread_join(fthPthread[1], NULL);

    plat_log_msg(PLAT_LOG_ID_INITIAL, LOG_CAT, PLAT_LOG_LEVEL_TRACE,
            "\nNode %d: SDF Messaging Test Complete\n", myid);

    sdf_msg_stopmsg(myid, SYS_SHUTDOWN_SELF);

    plat_shmem_detach();
    info->success++;
    if (myid == 0) {
        sched_yield();
        printf("Node %d: Exiting message test after yielding... Calling MPI_Finalize\n", myid);
        fflush(stdout);
        sched_yield();
        MPI_Finalize();
        print_test_info(info);
        test_info_final(info);
    }
    else {
        printf("Node %d: Exiting message test... Calling MPI_Finalize\n", myid);
        fflush(stdout);
        sched_yield();
        MPI_Finalize();
    }
    printf("Successfully ends\n");
    return (EXIT_SUCCESS);
}