static void cmd_server_status(struct http_channel *c) { int sessions = sessions_count(); int clients = clients_count(); int resultsets = resultsets_count(); response_open(c, "server-status"); wrbuf_printf(c->wrbuf, "\n <sessions>%u</sessions>\n", sessions); wrbuf_printf(c->wrbuf, " <clients>%u</clients>\n", clients); /* Only works if yaz has been compiled with enabling of this */ wrbuf_printf(c->wrbuf, " <resultsets>%u</resultsets>\n",resultsets); print_meminfo(c->wrbuf); /* TODO add all sessions status */ /* http_sessions_t http_sessions = c->http_sessions; */ /* struct http_session *p; */ /* yaz_mutex_enter(http_sessions->mutex); for (p = http_sessions->session_list; p; p = p->next) { p->activity_counter++; wrbuf_puts(c->wrbuf, "<session-status>\n"); wrbuf_printf(c->wrbuf, "<id>%s</id>\n", p->session_id); yaz_mutex_leave(http_sessions->mutex); session_status(c, p); wrbuf_puts(c->wrbuf, "</session-status>\n"); yaz_mutex_enter(http_sessions->mutex); p->activity_counter--; } yaz_mutex_leave(http_sessions->mutex); */ response_close(c, "server-status"); xmalloc_trav(0); }
/** * Executes the issued command */ void console_exec(char *buf) { if(strncmp(buf, "cd", 2) == 0) { console_cd(dir, buf); } else if(strncmp(buf, "start", 5) == 0) { console_start(dir, buf); } else if(strncmp(buf, "read", 4) == 0) { console_read(dir, buf); } else if(strncmp(buf, "write", 5) == 0) { console_write(dir, buf); } else if(strncmp(buf, "touch", 5) == 0) { console_touch(dir, buf); } else if(strncmp(buf, "delete", 6) == 0) { console_delete(dir, buf); } else if(strcmp(buf, "hoho") == 0) { printk("hoho\n"); } else if(strcmp(buf, "help") == 0) { printk("Help:\nhoho - prints hoho\nhelp - shows help\nmeminfo - prints RAM info\ncpuinfo - shows CPU info\nls - shows filesystem devices\nread - reads a file\nstart - starts a program\nclear - clears the screen\nhalt - shuts down\nreboot - reboots the pc\n"); } else if(strcmp(buf, "meminfo") == 0) { print_meminfo(); } else if(strcmp(buf, "cpuinfo") == 0) { printk("%s\n", get_cpu_vendor(0)); } else if(strcmp(buf, "ls") == 0) { if(dir[0] == 0) { vfs_ls(); } else { vfs_ls_dir(dir); } } else if(strcmp(buf, "clear") == 0) { clear(); } else if(strcmp(buf, "proc") == 0) { print_procs(); } else if(strcmp(buf, "halt") == 0) { printk("Shutting down\n"); halt(); while(1); } else if(strcmp(buf, "reboot") == 0) { printk("Rebooting\n"); reboot(); } else { printk("Command not found\n"); } }
int main(int argc, char* argv[]) { pami_result_t result = PAMI_ERROR; if (Kernel_GetRank()==0) print_meminfo(stdout, "before PAMI_Client_create"); /* initialize the client */ char * clientname = ""; pami_client_t client; result = PAMI_Client_create( clientname, &client, NULL, 0 ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_create"); if (Kernel_GetRank()==0) print_meminfo(stdout, "after PAMI_Client_create"); /* query properties of the client */ pami_configuration_t config; size_t num_contexts; config.name = PAMI_CLIENT_TASK_ID; result = PAMI_Client_query( client, &config, 1); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query"); world_rank = config.value.intval; config.name = PAMI_CLIENT_NUM_TASKS; result = PAMI_Client_query( client, &config, 1); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query"); world_size = config.value.intval; if ( world_rank == 0 ) { printf("starting test on %ld ranks \n", world_size); fflush(stdout); } config.name = PAMI_CLIENT_PROCESSOR_NAME; result = PAMI_Client_query( client, &config, 1); assert(result == PAMI_SUCCESS); //printf("rank %ld is processor %s \n", world_rank, config.value.chararray); //fflush(stdout); config.name = PAMI_CLIENT_NUM_CONTEXTS; result = PAMI_Client_query( client, &config, 1); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query"); num_contexts = config.value.intval; /* initialize the contexts */ pami_context_t * contexts = NULL; contexts = (pami_context_t *) malloc( num_contexts * sizeof(pami_context_t) ); assert(contexts!=NULL); if (Kernel_GetRank()==0) fprintf(stdout, "num_contexts = %ld \n", (long)num_contexts); result = PAMI_Context_createv( client, &config, 0, contexts, num_contexts ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_createv"); if (Kernel_GetRank()==0) print_meminfo(stdout, "after PAMI_Context_createv"); /* setup the world geometry */ pami_geometry_t world_geometry; pami_xfer_type_t barrier_xfer = PAMI_XFER_BARRIER; size_t num_alg[2]; pami_algorithm_t * safe_barrier_algs = NULL; pami_metadata_t * safe_barrier_meta = NULL; pami_algorithm_t * fast_barrier_algs = NULL; pami_metadata_t * fast_barrier_meta = NULL; result = PAMI_Geometry_world( client, &world_geometry ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Geometry_world"); if (Kernel_GetRank()==0) print_meminfo(stdout, "after PAMI_Geometry_world"); result = PAMI_Geometry_algorithms_num( world_geometry, barrier_xfer, num_alg ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Geometry_algorithms_num"); if ( world_rank == 0 ) printf("number of barrier algorithms = {%ld,%ld} \n", num_alg[0], num_alg[1] ); if (Kernel_GetRank()==0) print_meminfo(stdout, "after PAMI_Geometry_algorithms_num"); safe_barrier_algs = (pami_algorithm_t *) malloc( num_alg[0] * sizeof(pami_algorithm_t) ); assert(safe_barrier_algs!=NULL); safe_barrier_meta = (pami_metadata_t *) malloc( num_alg[0] * sizeof(pami_metadata_t) ); assert(safe_barrier_meta!=NULL); fast_barrier_algs = (pami_algorithm_t *) malloc( num_alg[1] * sizeof(pami_algorithm_t) ); assert(fast_barrier_algs!=NULL); fast_barrier_meta = (pami_metadata_t *) malloc( num_alg[1] * sizeof(pami_metadata_t) ); assert(fast_barrier_meta!=NULL); result = PAMI_Geometry_algorithms_query( world_geometry, barrier_xfer, safe_barrier_algs, safe_barrier_meta, num_alg[0], fast_barrier_algs, fast_barrier_meta, num_alg[1] ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Geometry_algorithms_query"); if (Kernel_GetRank()==0) print_meminfo(stdout, "after PAMI_Geometry_algorithms_query"); /* perform a barrier */ size_t b; pami_xfer_t barrier; volatile int active = 0; for ( b = 0 ; b < num_alg[0] ; b++ ) { barrier.cb_done = cb_done; barrier.cookie = (void*) &active; barrier.algorithm = safe_barrier_algs[b]; uint64_t t0 = GetTimeBase(); active = 1; result = PAMI_Collective( contexts[0], &barrier ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Collective - barrier"); while (active) result = PAMI_Context_advance( contexts[0], 1 ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_advance - barrier"); uint64_t t1 = GetTimeBase(); if ( world_rank == 0 ) printf("safe barrier algorithm %ld (%s) - took %llu cycles \n", b, safe_barrier_meta[b].name, (long long unsigned int)t1-t0 ); fflush(stdout); } for ( b = 0 ; b < num_alg[1] ; b++ ) { barrier.cb_done = cb_done; barrier.cookie = (void*) &active; barrier.algorithm = fast_barrier_algs[b]; uint64_t t0 = GetTimeBase(); active = 1; result = PAMI_Collective( contexts[0], &barrier ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Collective - barrier"); while (active) result = PAMI_Context_advance( contexts[0], 1 ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_advance - barrier"); uint64_t t1 = GetTimeBase(); if ( world_rank == 0 ) printf("fast barrier algorithm %ld (%s) - took %llu cycles \n", b, fast_barrier_meta[b].name, (long long unsigned int)t1-t0 ); fflush(stdout); } if (Kernel_GetRank()==0) print_meminfo(stdout, "after barrier tests"); /* finalize the contexts */ result = PAMI_Context_destroyv( contexts, num_contexts ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_destroyv"); free(contexts); if (Kernel_GetRank()==0) print_meminfo(stdout, "before PAMI_Client_destroy"); /* finalize the client */ result = PAMI_Client_destroy( &client ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_destroy"); if (Kernel_GetRank()==0) print_meminfo(stdout, "after PAMI_Client_destroy"); if ( world_rank == 0 ) { printf("end of test \n"); fflush(stdout); } return 0; }
int main(int argc, char *argv[]) { /********************************************************************************* * INITIALIZE MPI *********************************************************************************/ int world_size = 0, world_rank = -1; int provided = -1; #if defined(USE_MPI_INIT) MPI_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); if (world_rank==0) print_meminfo(stdout, "after MPI_Init"); #else int requested = -1; # if defined(USE_MPI_INIT_THREAD_MULTIPLE) requested = MPI_THREAD_MULTIPLE; # elif defined(USE_MPI_INIT_THREAD_SERIALIZED) requested = MPI_THREAD_SERIALIZED; # elif defined(USE_MPI_INIT_THREAD_FUNNELED) requested = MPI_THREAD_FUNNELED; # else requested = MPI_THREAD_SINGLE; # endif MPI_Init_thread( &argc, &argv, requested, &provided ); MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); if (world_rank==0) print_meminfo(stdout, "after MPI_Init_thread"); if (provided>requested) { if (world_rank==0) printf("MPI_Init_thread returned %s instead of %s, but this is okay. \n", MPI_THREAD_STRING(provided), MPI_THREAD_STRING(requested) ); } if (provided<requested) { if (world_rank==0) printf("MPI_Init_thread returned %s instead of %s so the test will exit. \n", MPI_THREAD_STRING(provided), MPI_THREAD_STRING(requested) ); MPI_Abort(MPI_COMM_WORLD, 1); } #endif double t0 = MPI_Wtime(); int is_init = 0; MPI_Initialized(&is_init); if (world_rank==0) printf("MPI %s initialized. \n", (is_init==1 ? "was" : "was not") ); MPI_Query_thread(&provided); if (world_rank==0) printf("MPI thread support is %s. \n", MPI_THREAD_STRING(provided) ); MPI_Comm_size( MPI_COMM_WORLD, &world_size ); if (world_rank==0) printf("MPI test program running on %d ranks. \n", world_size); char procname[MPI_MAX_PROCESSOR_NAME]; int pnlen; MPI_Get_processor_name(procname,&pnlen); printf("%d: processor name = %s\n", world_rank, procname); /********************************************************************************* * SETUP MPI COMMUNICATORS *********************************************************************************/ if (world_rank==0) printf("MPI_Barrier on MPI_COMM_WORLD 1 \n"); MPI_Barrier( MPI_COMM_WORLD ); if (world_rank==0) printf("MPI_Comm_dup of MPI_COMM_WORLD \n"); MPI_Comm comm_world_dup; MPI_Comm_dup(MPI_COMM_WORLD, &comm_world_dup); if (world_rank==0) print_meminfo(stdout, "after MPI_Comm_dup"); if (world_rank==0) printf("MPI_Barrier on comm_world_dup \n"); MPI_Barrier( comm_world_dup ); if (world_rank==0) printf("MPI_Comm_split of MPI_COMM_WORLD into world_reordered \n"); MPI_Comm comm_world_reordered; MPI_Comm_split(MPI_COMM_WORLD, 0, world_size-world_rank, &comm_world_reordered); if (world_rank==0) print_meminfo(stdout, "after MPI_Comm_split"); if (world_rank==0) printf("MPI_Comm_split of MPI_COMM_WORLD into left-right \n"); MPI_Comm comm_world_leftright; int leftright = (world_rank<(world_size/2)); MPI_Comm_split(MPI_COMM_WORLD, leftright, world_rank, &comm_world_leftright); if (world_rank==0) print_meminfo(stdout, "after MPI_Comm_split"); if (world_rank==0) printf("MPI_Barrier on comm_world_leftright \n"); MPI_Barrier( comm_world_leftright ); if (world_rank==0) printf("MPI_Comm_split of MPI_COMM_WORLD into odd-even \n"); MPI_Comm comm_world_oddeven; int oddeven = (world_rank%2); MPI_Comm_split(MPI_COMM_WORLD, oddeven, world_rank, &comm_world_oddeven); if (world_rank==0) print_meminfo(stdout, "after MPI_Comm_split"); if (world_rank==0) printf("MPI_Barrier on comm_world_oddeven \n"); MPI_Barrier( comm_world_oddeven ); if (world_rank==0) printf("MPI_Comm_split MPI_COMM_WORLD into (world-1) \n"); MPI_Comm comm_world_minus_one; int left_out = world_rank==(world_size/2); MPI_Comm_split(MPI_COMM_WORLD, left_out, world_rank, &comm_world_minus_one); if (world_rank==0) print_meminfo(stdout, "after MPI_Comm_split"); if (world_rank==0) printf("MPI_Barrier on comm_world_minus_one \n"); MPI_Barrier( comm_world_minus_one ); if (world_rank==0) printf("MPI_Comm_group of group_world from MPI_COMM_WORLD \n"); MPI_Group group_world; MPI_Comm_group(MPI_COMM_WORLD, &group_world); if (world_rank==0) print_meminfo(stdout, "after MPI_Comm_group"); int geomprog_size = (world_size==1) ? 1 : ceil(log2(world_size)); int * geomprog_list = NULL; geomprog_list = (int *) safemalloc( geomprog_size * sizeof(int) ); for (int i=0; i<geomprog_size; i++) geomprog_list[i] = pow(2,i)-1; if (world_rank==0) for (int i=0; i<geomprog_size; i++) if (world_rank==0) printf("geomprog_list[%d] = %d \n", i, geomprog_list[i]); if (world_rank==0) printf("MPI_Group_incl of group_geomprog (geometric progression) from group_world \n"); MPI_Group group_geomprog; MPI_Group_incl(group_world, geomprog_size, geomprog_list, &group_geomprog); MPI_Group_free(&group_world); if (world_rank==0) printf("MPI_Comm_create of comm_geomprog from group_geomprog on MPI_COMM_WORLD \n"); MPI_Comm comm_geomprog; MPI_Comm_create(MPI_COMM_WORLD, group_geomprog, &comm_geomprog); MPI_Group_free(&group_geomprog); if (world_rank==0) print_meminfo(stdout, "after MPI_Comm_create"); if (world_rank==0) printf("MPI_Barrier on comm_geomprog \n"); for (int i=0; i<geomprog_size; i++) if (geomprog_list[i]==world_rank) MPI_Barrier( comm_geomprog ); if (world_rank==0) printf("MPI_Barrier on MPI_COMM_WORLD 2 \n"); MPI_Barrier( MPI_COMM_WORLD ); if (world_rank==0) print_meminfo(stdout, "after MPI communicator creation"); /********************************************************************************* * COLLECTIVES *********************************************************************************/ int max_mem = (argc>1 ? atoi(argv[1]) : 32*1024*1024); MPI_Comm test_comm; #if defined(DO_COMM_WORLD) test_comm = MPI_COMM_WORLD; MPI_Barrier( MPI_COMM_WORLD ); if (world_rank==0) printf("############## %s ##############\n", "MPI_COMM_WORLD - pass 1" ); { MPI_Barrier( test_comm ); bcast_only(stdout, test_comm, max_mem); gather_only(stdout, test_comm, max_mem); allgather_only(stdout, test_comm, max_mem); scatter_only(stdout, test_comm, max_mem); alltoall_only(stdout, test_comm, max_mem); reduce_only(stdout, test_comm, max_mem); allreduce_only(stdout, test_comm, max_mem); reducescatterblock_only(stdout, test_comm, max_mem); } fflush(stdout); MPI_Barrier( MPI_COMM_WORLD ); if (world_rank==0) printf("############## %s ##############\n", "MPI_COMM_WORLD - pass 2" ); { MPI_Barrier( test_comm ); bcast_only(stdout, test_comm, max_mem); gather_only(stdout, test_comm, max_mem); allgather_only(stdout, test_comm, max_mem); scatter_only(stdout, test_comm, max_mem); alltoall_only(stdout, test_comm, max_mem); reduce_only(stdout, test_comm, max_mem); allreduce_only(stdout, test_comm, max_mem); reducescatterblock_only(stdout, test_comm, max_mem); } fflush(stdout); MPI_Barrier( MPI_COMM_WORLD ); #endif #ifdef DO_COMM_WORLD_JITTER test_comm = MPI_COMM_WORLD; MPI_Barrier( MPI_COMM_WORLD ); if (world_rank==0) printf("############## %s ##############\n", "COMM_WORLD_JITTER" ); { int jitter = 0; if ((world_rank%10)==0) jitter++; if ((world_rank%100)==0) jitter++; if ((world_rank%1000)==0) jitter++; if ((world_rank%10000)==0) jitter++; if ((world_rank%100000)==0) jitter++; MPI_Barrier( test_comm ); sleep(jitter); bcast_only(stdout, test_comm, max_mem); MPI_Barrier( test_comm ); sleep(jitter); gather_only(stdout, test_comm, max_mem); MPI_Barrier( test_comm ); sleep(jitter); allgather_only(stdout, test_comm, max_mem); MPI_Barrier( test_comm ); sleep(jitter); scatter_only(stdout, test_comm, max_mem); MPI_Barrier( test_comm ); sleep(jitter); alltoall_only(stdout, test_comm, max_mem); MPI_Barrier( test_comm ); sleep(jitter); reduce_only(stdout, test_comm, max_mem); MPI_Barrier( test_comm ); sleep(jitter); allreduce_only(stdout, test_comm, max_mem); MPI_Barrier( test_comm ); sleep(jitter); reducescatterblock_only(stdout, test_comm, max_mem); } fflush(stdout); MPI_Barrier( MPI_COMM_WORLD ); #endif #ifdef DO_COMM_WORLD_DUP test_comm = comm_world_dup; MPI_Barrier( MPI_COMM_WORLD ); if (world_rank==0) printf("############## %s ##############\n", "COMM_WORLD_DUP" ); { MPI_Barrier( test_comm ); bcast_only(stdout, test_comm, max_mem); gather_only(stdout, test_comm, max_mem); allgather_only(stdout, test_comm, max_mem); scatter_only(stdout, test_comm, max_mem); alltoall_only(stdout, test_comm, max_mem); reduce_only(stdout, test_comm, max_mem); allreduce_only(stdout, test_comm, max_mem); reducescatterblock_only(stdout, test_comm, max_mem); } fflush(stdout); MPI_Barrier( MPI_COMM_WORLD ); #endif #ifdef DO_WORLD_REORDERED test_comm = comm_world_reordered; MPI_Barrier( MPI_COMM_WORLD ); if (world_rank==0) printf("############## %s ##############\n", "WORLD_REORDERED" ); { MPI_Barrier( test_comm ); bcast_only(stdout, test_comm, max_mem); gather_only(stdout, test_comm, max_mem); allgather_only(stdout, test_comm, max_mem); scatter_only(stdout, test_comm, max_mem); alltoall_only(stdout, test_comm, max_mem); reduce_only(stdout, test_comm, max_mem); allreduce_only(stdout, test_comm, max_mem); reducescatterblock_only(stdout, test_comm, max_mem); } fflush(stdout); MPI_Barrier( MPI_COMM_WORLD ); #endif #ifdef DO_WORLD_MINUS_ONE test_comm = comm_world_minus_one; MPI_Barrier( MPI_COMM_WORLD ); if (world_rank==0) printf("############## %s ##############\n", "WORLD_MINUS_ONE" ); if (left_out==0) { MPI_Barrier( test_comm ); bcast_only(stdout, test_comm, max_mem); gather_only(stdout, test_comm, max_mem); allgather_only(stdout, test_comm, max_mem); scatter_only(stdout, test_comm, max_mem); alltoall_only(stdout, test_comm, max_mem); reduce_only(stdout, test_comm, max_mem); allreduce_only(stdout, test_comm, max_mem); reducescatterblock_only(stdout, test_comm, max_mem); } fflush(stdout); MPI_Barrier( MPI_COMM_WORLD ); #endif #if DO_LEFT_RIGHT test_comm = comm_world_leftright; for (int i=0; i<2; i++) { MPI_Barrier( MPI_COMM_WORLD ); if (world_rank==i) printf("############## %s ##############\n", (i==0 ? "LEFT" : "RIGHT") ); if (leftright==i) { MPI_Barrier( test_comm ); bcast_only(stdout, test_comm, max_mem); gather_only(stdout, test_comm, max_mem); allgather_only(stdout, test_comm, max_mem); scatter_only(stdout, test_comm, max_mem); alltoall_only(stdout, test_comm, max_mem); reduce_only(stdout, test_comm, max_mem); allreduce_only(stdout, test_comm, max_mem); reducescatterblock_only(stdout, test_comm, max_mem); } } fflush(stdout); MPI_Barrier( MPI_COMM_WORLD ); #endif #if DO_ODD_EVEN test_comm = comm_world_oddeven; for (int i=0; i<2; i++) { MPI_Barrier( MPI_COMM_WORLD ); if (world_rank==i) printf("############## %s ##############\n", (i==0 ? "EVEN" : "ODD") ); if (oddeven==i) { MPI_Barrier( test_comm ); bcast_only(stdout, test_comm, max_mem); gather_only(stdout, test_comm, max_mem); allgather_only(stdout, test_comm, max_mem); scatter_only(stdout, test_comm, max_mem); alltoall_only(stdout, test_comm, max_mem); reduce_only(stdout, test_comm, max_mem); allreduce_only(stdout, test_comm, max_mem); reducescatterblock_only(stdout, test_comm, max_mem); } } fflush(stdout); MPI_Barrier( MPI_COMM_WORLD ); #endif #ifdef DO_GEOM_PROG test_comm = comm_geomprog; MPI_Barrier( MPI_COMM_WORLD ); if (world_rank==0) printf("############## %s ##############\n", "GEOM_PROG" ); for (int i=0; i<geomprog_size; i++) if (geomprog_list[i]==world_rank) { MPI_Barrier( test_comm ); bcast_only(stdout, test_comm, max_mem); gather_only(stdout, test_comm, max_mem); allgather_only(stdout, test_comm, max_mem); scatter_only(stdout, test_comm, max_mem); alltoall_only(stdout, test_comm, max_mem); reduce_only(stdout, test_comm, max_mem); allreduce_only(stdout, test_comm, max_mem); reducescatterblock_only(stdout, test_comm, max_mem); } fflush(stdout); MPI_Barrier( MPI_COMM_WORLD ); #endif if (world_rank==0) print_meminfo(stdout, "after MPI collective tests"); /********************************************************************************* * CLEAN UP AND FINALIZE *********************************************************************************/ for (int i=0; i<geomprog_size; i++) if (geomprog_list[i]==world_rank) MPI_Comm_free(&comm_geomprog); free(geomprog_list); MPI_Comm_free(&comm_world_minus_one); MPI_Comm_free(&comm_world_oddeven); MPI_Comm_free(&comm_world_leftright); MPI_Comm_free(&comm_world_reordered); MPI_Comm_free(&comm_world_dup); MPI_Barrier( MPI_COMM_WORLD ); double t1 = MPI_Wtime(); double dt = t1-t0; if (world_rank==0) printf("TEST FINISHED SUCCESSFULLY IN %lf SECONDS \n", dt); fflush(stdout); if (world_rank==0) print_meminfo(stdout, "before MPI_Finalize"); MPI_Finalize(); return 0; }